<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:28:14 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2791] Stuck client on server OOM/lost message</title>
                <link>https://jira.whamcloud.com/browse/LU-2791</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;After recent landings of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1431&quot; title=&quot;Support for larger than 1MB sequential I/O RPCs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1431&quot;&gt;&lt;del&gt;LU-1431&lt;/del&gt;&lt;/a&gt; and a corresponding crop up of allocation failures due to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2748&quot; title=&quot;OSD uses kmalloc with high order to allocate a keys&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2748&quot;&gt;&lt;del&gt;LU-2748&lt;/del&gt;&lt;/a&gt; my sanity test hung up like this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 7441.448176] Lustre: DEBUG MARKER: == sanity test 124b: lru resize (performance test) ========================= 02:32:43 (1360481563)
...
[ 7511.366772] ll_ost00_008: page allocation failure. order:5, mode:0x50
[ 7511.367282] Pid: 23333, comm: ll_ost00_008 Not tainted 2.6.32-debug #6
[ 7511.367763] Call Trace:
[ 7511.368120]  [&amp;lt;ffffffff81125bd6&amp;gt;] ? __alloc_pages_nodemask+0x976/0x9e0
[ 7511.368632]  [&amp;lt;ffffffff81160a62&amp;gt;] ? kmem_getpages+0x62/0x170
[ 7511.369397]  [&amp;lt;ffffffff8116349c&amp;gt;] ? fallback_alloc+0x1bc/0x270
[ 7511.389365]  [&amp;lt;ffffffff81162db7&amp;gt;] ? cache_grow+0x4d7/0x520
[ 7511.389816]  [&amp;lt;ffffffff81163188&amp;gt;] ? ____cache_alloc_node+0xa8/0x200
[ 7511.390278]  [&amp;lt;ffffffff81163838&amp;gt;] ? __kmalloc+0x208/0x2a0
[ 7511.390737]  [&amp;lt;ffffffffa04fcc00&amp;gt;] ? cfs_alloc+0x30/0x60 [libcfs]
[ 7511.391245]  [&amp;lt;ffffffffa04fcc00&amp;gt;] ? cfs_alloc+0x30/0x60 [libcfs]
[ 7511.391707]  [&amp;lt;ffffffffa0ac953e&amp;gt;] ? osd_key_init+0x1e/0x5d0 [osd_ldiskfs]
[ 7511.404245]  [&amp;lt;ffffffffa062a3df&amp;gt;] ? keys_fill+0x6f/0x190 [obdclass]
[ 7511.404941]  [&amp;lt;ffffffffa062de8b&amp;gt;] ? lu_context_init+0xab/0x260 [obdclass]
[ 7511.405579]  [&amp;lt;ffffffffa062e05e&amp;gt;] ? lu_env_init+0x1e/0x30 [obdclass]
[ 7511.406197]  [&amp;lt;ffffffffa0d4bf27&amp;gt;] ? ofd_lvbo_init+0x137/0x8e0 [ofd]
[ 7511.419335]  [&amp;lt;ffffffffa04fcb12&amp;gt;] ? cfs_mem_cache_alloc+0x22/0x30 [libcfs]
[ 7511.419900]  [&amp;lt;ffffffffa0767763&amp;gt;] ? ldlm_resource_get+0x383/0x730 [ptlrpc]
[ 7511.420407]  [&amp;lt;ffffffffa07617a5&amp;gt;] ? ldlm_lock_create+0x55/0xa50 [ptlrpc]
[ 7511.420966]  [&amp;lt;ffffffffa0786ae6&amp;gt;] ? ldlm_handle_enqueue0+0x156/0x1090 [ptlrpc]
[ 7511.443020]  [&amp;lt;ffffffffa0787a86&amp;gt;] ? ldlm_handle_enqueue+0x66/0x70 [ptlrpc]
[ 7511.443583]  [&amp;lt;ffffffffa0787a90&amp;gt;] ? ldlm_server_completion_ast+0x0/0x640 [ptlrpc]
[ 7511.459402]  [&amp;lt;ffffffffa0b2b8b0&amp;gt;] ? ost_blocking_ast+0x0/0xca0 [ost]
[ 7511.459950]  [&amp;lt;ffffffffa07843c0&amp;gt;] ? ldlm_server_glimpse_ast+0x0/0x3b0 [ptlrpc]
[ 7511.478957]  [&amp;lt;ffffffffa0b33807&amp;gt;] ? ost_handle+0x1be7/0x4590 [ost]
[ 7511.480742]  [&amp;lt;ffffffffa0508204&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
[ 7511.481280]  [&amp;lt;ffffffffa07b6e03&amp;gt;] ? ptlrpc_server_handle_request+0x453/0xe50 [ptlrpc]
[ 7511.486155]  [&amp;lt;ffffffffa04fc65e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
[ 7511.486813]  [&amp;lt;ffffffffa07afe91&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
[ 7511.498204]  [&amp;lt;ffffffff81051f73&amp;gt;] ? __wake_up+0x53/0x70
[ 7511.498685]  [&amp;lt;ffffffffa07b98cd&amp;gt;] ? ptlrpc_main+0xafd/0x17f0 [ptlrpc]
[ 7511.499220]  [&amp;lt;ffffffffa07b8dd0&amp;gt;] ? ptlrpc_main+0x0/0x17f0 [ptlrpc]
[ 7511.499684]  [&amp;lt;ffffffff8100c14a&amp;gt;] ? child_rip+0xa/0x20
[ 7511.500158]  [&amp;lt;ffffffffa07b8dd0&amp;gt;] ? ptlrpc_main+0x0/0x17f0 [ptlrpc]
[ 7511.500689]  [&amp;lt;ffffffffa07b8dd0&amp;gt;] ? ptlrpc_main+0x0/0x17f0 [ptlrpc]
[ 7511.501159]  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
...
[ 7511.543594] LustreError: 23333:0:(ldlm_resource.c:1161:ldlm_resource_get()) lvbo_init failed for resource 48519: rc -12
[ 7511.544499] LustreError: 23333:0:(ldlm_resource.c:1161:ldlm_resource_get()) Skipped 92 previous similar messages
[ 7511.546166] LustreError: 21396:0:(ldlm_lock.c:1542:ldlm_fill_lvb()) ### Replied unexpected ost LVB size 0 ns: lustre-OST0001-osc-ffff8800145e1bf0 lock: ffff880048126db0/0x3fda232370b59bd7 lrc: 6/2,0 mode: --/PR res: 48519/9663677440 rrc: 1 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;18446744073709551615) flags: 0x0 nid: local remote: 0x3fda232370b5a179 expref: -99 pid: 3863 timeout: 0 lvb_type: 1
[ 7511.551229] LustreError: 21396:0:(ldlm_lock.c:1542:ldlm_fill_lvb()) ### Replied unexpected ost LVB size 0 ns: lustre-OST0001-osc-ffff8800145e1bf0 lock: ffff880098de4db0/0x3fda232370b5a187 lrc: 6/2,0 mode: --/PR res: 48519/9663677440 rrc: 2 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;18446744073709551615) flags: 0x0 nid: local remote: 0x3fda232370b5a18e expref: -99 pid: 3860 timeout: 0 lvb_type: 1
[ 7512.986352] LustreError: 21397:0:(ldlm_lock.c:1542:ldlm_fill_lvb()) ### Replied unexpected ost LVB size 0 ns: lustre-OST0001-osc-ffff8800145e1bf0 lock: ffff880091bdddb0/0x3fda232370b5a221 lrc: 6/2,0 mode: --/PR res: 48519/9663677440 rrc: 3 type: EXT [0-&amp;gt;18446744073709551615] (req 0-&amp;gt;18446744073709551615) flags: 0x0 nid: local remote: 0x3fda232370b5a228 expref: -99 pid: 3866 timeout: 0 lvb_type: 1
...
[ 7550.196668] Lustre: DEBUG MARKER: == sanity test 132: som avoids glimpse rpc == 02:34:32 (1360481672)
[ 7552.368263] Lustre: DEBUG MARKER: cancel_lru_locks osc start
[ 7552.849425] Lustre: DEBUG MARKER: cancel_lru_locks osc stop
[ 7553.369267] Lustre: Setting parameter lustre-MDT0000.mdt.som in log lustre-MDT0000
[ 7553.370141] Lustre: Skipped 3 previous similar messages
[ 7555.706750] LustreError: 9584:0:(ldlm_resource.c:805:ldlm_resource_complain()) Namespace lustre-OST0001-osc-ffff8800145e1bf0 resource refcount nonzero (3) after lock cleanup; forcing cleanup.
[ 7555.708100] LustreError: 9584:0:(ldlm_resource.c:811:ldlm_resource_complain()) Resource: ffff88000f3c6e78 (48519/9663677440/0/0) (rc: 3)
[ 7555.709025] LustreError: 9584:0:(ldlm_resource.c:1404:ldlm_resource_dump()) --- Resource: ffff88000f3c6e78 (48519/9663677440/0/0) (rc: 4)
[ 7557.280133] LNet: Service thread pid 22395 was inactive for 40.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[ 7557.281500] Pid: 22395, comm: ll_ost00_006
[ 7557.281907] 
[ 7557.281908] Call Trace:
[ 7557.282616]  [&amp;lt;ffffffff814fb054&amp;gt;] ? _spin_lock_irqsave+0x24/0x30
[ 7557.283088]  [&amp;lt;ffffffff814f8ad1&amp;gt;] schedule_timeout+0x191/0x2e0
[ 7557.283548]  [&amp;lt;ffffffff8107bcd0&amp;gt;] ? process_timeout+0x0/0x10
[ 7557.284065]  [&amp;lt;ffffffffa04fc771&amp;gt;] cfs_waitq_timedwait+0x11/0x20 [libcfs]
[ 7557.284586]  [&amp;lt;ffffffffa077fd0d&amp;gt;] ldlm_completion_ast+0x4dd/0x950 [ptlrpc]
[ 7557.285113]  [&amp;lt;ffffffffa077b440&amp;gt;] ? ldlm_expired_completion_wait+0x0/0x390 [ptlrpc]
[ 7557.285846]  [&amp;lt;ffffffff81057d60&amp;gt;] ? default_wake_function+0x0/0x20
[ 7557.286341]  [&amp;lt;ffffffffa077f458&amp;gt;] ldlm_cli_enqueue_local+0x1f8/0x5d0 [ptlrpc]
[ 7557.286875]  [&amp;lt;ffffffffa077f830&amp;gt;] ? ldlm_completion_ast+0x0/0x950 [ptlrpc]
[ 7557.287604]  [&amp;lt;ffffffffa077e1d0&amp;gt;] ? ldlm_blocking_ast+0x0/0x180 [ptlrpc]
[ 7557.288146]  [&amp;lt;ffffffffa0d32610&amp;gt;] ofd_destroy_by_fid+0x160/0x380 [ofd]
[ 7557.288718]  [&amp;lt;ffffffffa077e1d0&amp;gt;] ? ldlm_blocking_ast+0x0/0x180 [ptlrpc]
[ 7557.289296]  [&amp;lt;ffffffffa077f830&amp;gt;] ? ldlm_completion_ast+0x0/0x950 [ptlrpc]
[ 7557.289829]  [&amp;lt;ffffffffa07a77a5&amp;gt;] ? lustre_msg_buf+0x55/0x60 [ptlrpc]
[ 7557.290312]  [&amp;lt;ffffffffa0d33b37&amp;gt;] ofd_destroy+0x187/0x670 [ofd]
[ 7557.290751]  [&amp;lt;ffffffffa0b35732&amp;gt;] ost_handle+0x3b12/0x4590 [ost]
[ 7557.291301]  [&amp;lt;ffffffffa0508204&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
[ 7557.291849]  [&amp;lt;ffffffffa07b6e03&amp;gt;] ptlrpc_server_handle_request+0x453/0xe50 [ptlrpc]
[ 7557.292785]  [&amp;lt;ffffffffa04fc65e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
[ 7557.293433]  [&amp;lt;ffffffffa07afe91&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
[ 7557.293961]  [&amp;lt;ffffffff81057d60&amp;gt;] ? default_wake_function+0x0/0x20
[ 7557.294452]  [&amp;lt;ffffffffa07b98cd&amp;gt;] ptlrpc_main+0xafd/0x17f0 [ptlrpc]
[ 7557.294939]  [&amp;lt;ffffffffa07b8dd0&amp;gt;] ? ptlrpc_main+0x0/0x17f0 [ptlrpc]
[ 7557.295390]  [&amp;lt;ffffffff8100c14a&amp;gt;] child_rip+0xa/0x20
[ 7557.295928]  [&amp;lt;ffffffffa07b8dd0&amp;gt;] ? ptlrpc_main+0x0/0x17f0 [ptlrpc]
[ 7557.296475]  [&amp;lt;ffffffffa07b8dd0&amp;gt;] ? ptlrpc_main+0x0/0x17f0 [ptlrpc]
[ 7557.296963]  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
[ 7557.298274] 
[ 7557.298613] LustreError: dumping log to /tmp/lustre-log.1360481679.22395
[ 7558.542139] Lustre: e4b81d30-e30c-d5e2-eae1-83e5d43c85e8 is already connected, SOM will be enabled on the next mount
[ 7604.468836] Lustre: lustre-OST0001: haven&apos;t heard from client e4b81d30-e30c-d5e2-eae1-83e5d43c85e8 (at 0@lo) in 52 seconds. I think it&apos;s dead, and I am evicting it. exp ffff8800b3070bf0, cur 1360481727 expire 1360481697 last 1360481675
[ 7604.470498] LustreError: 22092:0:(ofd_lvb.c:145:ofd_lvbo_update()) lustre-OST0001: no lvb when running lvbo_update, res: 48519!
[ 7604.480091] LNet: Service thread pid 22395 completed after 87.19s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).
[ 7680.616144] INFO: task ll_cfg_requeue:22093 blocked for more than 120 seconds.
[ 7680.616937] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[ 7680.617711] ll_cfg_requeu D 0000000000000000  3920 22093      2 0x00000000
[ 7680.618207]  ffff880064355ad8 0000000000000046 0000000000000000 0000000000000018
[ 7680.619051]  0000000000000002 0000000064355a90 ffff88002266d000 0000000064355a88
[ 7680.619894]  ffff8800492c08b8 ffff880064355fd8 000000000000fba8 ffff8800492c08b8
[ 7680.620688] Call Trace:
[ 7680.621034]  [&amp;lt;ffffffff814fabbd&amp;gt;] rwsem_down_failed_common+0x8d/0x1d0
[ 7680.621497]  [&amp;lt;ffffffff814fad56&amp;gt;] rwsem_down_read_failed+0x26/0x30
[ 7680.622011]  [&amp;lt;ffffffff8127c104&amp;gt;] call_rwsem_down_read_failed+0x14/0x30
[ 7680.622495]  [&amp;lt;ffffffff814f9ec7&amp;gt;] ? down_read+0x37/0x40
[ 7680.622970]  [&amp;lt;ffffffffa07e1e8e&amp;gt;] sptlrpc_conf_client_adapt+0x6e/0x240 [ptlrpc]
[ 7680.623777]  [&amp;lt;ffffffffa09d09d8&amp;gt;] osc_set_info_async+0x5d8/0x940 [osc]
[ 7680.625332]  [&amp;lt;ffffffffa05e60b0&amp;gt;] class_notify_sptlrpc_conf+0x230/0x4f0 [obdclass]
[ 7680.626146]  [&amp;lt;ffffffffa0b73671&amp;gt;] mgc_process_cfg_log+0x2e1/0x14c0 [mgc]
[ 7680.626651]  [&amp;lt;ffffffffa05fb217&amp;gt;] ? class_handle2object+0x97/0x170 [obdclass]
[ 7680.627169]  [&amp;lt;ffffffffa0b74cb3&amp;gt;] mgc_process_log+0x463/0x1330 [mgc]
[ 7680.627668]  [&amp;lt;ffffffffa04fcbce&amp;gt;] ? cfs_free+0xe/0x10 [libcfs]
[ 7680.629329]  [&amp;lt;ffffffffa0b6fa40&amp;gt;] ? mgc_blocking_ast+0x0/0x770 [mgc]
[ 7680.629868]  [&amp;lt;ffffffffa077f830&amp;gt;] ? ldlm_completion_ast+0x0/0x950 [ptlrpc]
[ 7680.630356]  [&amp;lt;ffffffffa0b766d8&amp;gt;] mgc_requeue_thread+0x348/0x790 [mgc]
[ 7680.630832]  [&amp;lt;ffffffff81057d60&amp;gt;] ? default_wake_function+0x0/0x20
[ 7680.631299]  [&amp;lt;ffffffffa0b76390&amp;gt;] ? mgc_requeue_thread+0x0/0x790 [mgc]
[ 7680.631774]  [&amp;lt;ffffffff8100c14a&amp;gt;] child_rip+0xa/0x20
[ 7680.632245]  [&amp;lt;ffffffffa0b76390&amp;gt;] ? mgc_requeue_thread+0x0/0x790 [mgc]
[ 7680.632707]  [&amp;lt;ffffffffa0b76390&amp;gt;] ? mgc_requeue_thread+0x0/0x790 [mgc]
[ 7680.633176]  [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;and the ll_cfg_requeue thread never recovers repeating this trace forever.&lt;/p&gt;</description>
                <environment></environment>
        <key id="17522">LU-2791</key>
            <summary>Stuck client on server OOM/lost message</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="green">Oleg Drokin</reporter>
                        <labels>
                            <label>LB</label>
                    </labels>
                <created>Sun, 10 Feb 2013 13:13:56 +0000</created>
                <updated>Wed, 13 Mar 2013 08:59:53 +0000</updated>
                            <resolved>Wed, 13 Mar 2013 08:59:53 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.4.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="52098" author="green" created="Sun, 10 Feb 2013 13:15:22 +0000"  >&lt;p&gt;full dmesg from the run.&lt;/p&gt;</comment>
                            <comment id="52158" author="jlevi" created="Mon, 11 Feb 2013 15:15:13 +0000"  >&lt;p&gt;Jinshan,&lt;br/&gt;
Could you have a look at this one and assign to Fan Yong if he needs to take this one. He is on holiday this week so we wanted to start with you.&lt;/p&gt;</comment>
                            <comment id="53015" author="pjones" created="Tue, 26 Feb 2013 03:42:09 +0000"  >&lt;p&gt;Fanyong could you please look into this one? thanks Peter&lt;/p&gt;</comment>
                            <comment id="53537" author="yong.fan" created="Thu, 7 Mar 2013 10:38:34 +0000"  >&lt;p&gt;The failure for zero-sized ext lock &quot;lvb&quot; is another instance of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2790&quot; title=&quot;Failure to allocated osd keys leads to ofd_intent_policy()) ASSERTION( res_lvb != ((void *)0) ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2790&quot;&gt;&lt;del&gt;LU-2790&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;====================&lt;br/&gt;
[ 7511.546166] LustreError: 21396:0:(ldlm_lock.c:1542:ldlm_fill_lvb()) ### Replied unexpected ost LVB size 0 ns: lustre-OST0001-osc-ffff8800145e1bf0 lock: ffff880048126db0/0x3fda232370b59bd7 lrc: 6/2,0 mode: -&lt;del&gt;/PR res: 48519/9663677440 rrc: 1 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; (req 0&lt;/del&gt;&amp;gt;18446744073709551615) flags: 0x0 nid: local remote: 0x3fda232370b5a179 expref: -99 pid: 3863 timeout: 0 lvb_type: 1&lt;br/&gt;
[ 7511.551229] LustreError: 21396:0:(ldlm_lock.c:1542:ldlm_fill_lvb()) ### Replied unexpected ost LVB size 0 ns: lustre-OST0001-osc-ffff8800145e1bf0 lock: ffff880098de4db0/0x3fda232370b5a187 lrc: 6/2,0 mode: -&lt;del&gt;/PR res: 48519/9663677440 rrc: 2 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; (req 0&lt;/del&gt;&amp;gt;18446744073709551615) flags: 0x0 nid: local remote: 0x3fda232370b5a18e expref: -99 pid: 3860 timeout: 0 lvb_type: 1&lt;br/&gt;
[ 7512.986352] LustreError: 21397:0:(ldlm_lock.c:1542:ldlm_fill_lvb()) ### Replied unexpected ost LVB size 0 ns: lustre-OST0001-osc-ffff8800145e1bf0 lock: ffff880091bdddb0/0x3fda232370b5a221 lrc: 6/2,0 mode: -&lt;del&gt;/PR res: 48519/9663677440 rrc: 3 type: EXT &lt;span class=&quot;error&quot;&gt;&amp;#91;0-&amp;gt;18446744073709551615&amp;#93;&lt;/span&gt; (req 0&lt;/del&gt;&amp;gt;18446744073709551615) flags: 0x0 nid: local remote: 0x3fda232370b5a228 expref: -99 pid: 3866 timeout: 0 lvb_type: 1&lt;br/&gt;
...&lt;br/&gt;
[ 7555.706750] LustreError: 9584:0:(ldlm_resource.c:805:ldlm_resource_complain()) Namespace lustre-OST0001-osc-ffff8800145e1bf0 resource refcount nonzero (3) after lock cleanup; forcing cleanup.&lt;br/&gt;
[ 7555.708100] LustreError: 9584:0:(ldlm_resource.c:811:ldlm_resource_complain()) Resource: ffff88000f3c6e78 (48519/9663677440/0/0) (rc: 3)&lt;br/&gt;
[ 7555.709025] LustreError: 9584:0:(ldlm_resource.c:1404:ldlm_resource_dump()) &amp;#8212; Resource: ffff88000f3c6e78 (48519/9663677440/0/0) (rc: 4)&lt;br/&gt;
...&lt;br/&gt;
====================&lt;/p&gt;

&lt;p&gt;As shown in the log, there were three failed locks left on client and blocked the client umount thread in test_132: client_disconnect_export() =&amp;gt; ldlm_namespace_free_prior() =&amp;gt; __ldlm_namespace_free()&lt;/p&gt;

&lt;p&gt;The blocked umount thread holds the semaphore &quot;client_obd::cl_sem&quot; via &quot;down_write(&amp;amp;cli-&amp;gt;cl_sem);&quot;, the ll_cfg_requeue threads need read semaphore against the &quot;client_obd::cl_sem&quot; , so blocked.&lt;/p&gt;</comment>
                            <comment id="53542" author="yong.fan" created="Thu, 7 Mar 2013 11:13:43 +0000"  >&lt;p&gt;This is the patch:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/#change,5634&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5634&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="53904" author="pjones" created="Wed, 13 Mar 2013 08:59:53 +0000"  >&lt;p&gt;Landed for 2.4&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="14521">LU-1431</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="17439">LU-2748</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="12249" name="log2791.txt.gz" size="55056" author="green" created="Sun, 10 Feb 2013 13:15:22 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvixj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6757</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>