<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:03:09 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6777] replay-single test 34 hung in tgt_txn_stop_cb</title>
                <link>https://jira.whamcloud.com/browse/LU-6777</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Hit this today, other tests might be affected too:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[35924.999839] Lustre: DEBUG MARKER: == replay-single test 34: abort recovery before client does replay (test mds_cleanup_orphans) == 20:34:40 (1435624480)
[35932.384654] Lustre: lustre-OST0000: Export ffff8800b45b67f0 already connecting from 0@lo
[35937.384931] Lustre: lustre-OST0000: Export ffff8800b45b67f0 already connecting from 0@lo
[35942.385306] Lustre: lustre-OST0000: Export ffff8800b45b67f0 already connecting from 0@lo
[35947.384695] Lustre: lustre-OST0000: Export ffff8800b45b67f0 already connecting from 0@lo
[35952.385167] Lustre: lustre-OST0000: Export ffff8800b45b67f0 already connecting from 0@lo
[35955.528070] LNet: Service thread pid 31469 was inactive for 40.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[35955.528898] Pid: 31469, comm: ll_ost00_006
[35955.529197] 
[35955.529198] Call Trace:
[35955.529613]  [&amp;lt;ffffffff8117394a&amp;gt;] ? cache_alloc_debugcheck_after+0x14a/0x210
[35955.529898]  [&amp;lt;ffffffff8151ff50&amp;gt;] __mutex_lock_slowpath+0x120/0x2e0
[35955.530201]  [&amp;lt;ffffffff81520141&amp;gt;] mutex_lock+0x31/0x50
[35955.530511]  [&amp;lt;ffffffffa175e970&amp;gt;] tgt_txn_stop_cb+0x380/0xd60 [ptlrpc]
[35955.530788]  [&amp;lt;ffffffff8151fabe&amp;gt;] ? mutex_unlock+0xe/0x10
[35955.531104]  [&amp;lt;ffffffffa0e6eace&amp;gt;] dt_txn_hook_stop+0x5e/0x90 [obdclass]
[35955.531402]  [&amp;lt;ffffffffa0771730&amp;gt;] osd_trans_stop+0x190/0x590 [osd_ldiskfs]
[35955.531692]  [&amp;lt;ffffffffa0784c85&amp;gt;] ? osd_object_destroy+0x295/0x680 [osd_ldiskfs]
[35955.532199]  [&amp;lt;ffffffffa0a9ecef&amp;gt;] ofd_trans_stop+0x1f/0x60 [ofd]
[35955.532489]  [&amp;lt;ffffffffa0aa10c1&amp;gt;] ofd_object_destroy+0x2d1/0x8e0 [ofd]
[35955.532774]  [&amp;lt;ffffffffa0a9b15d&amp;gt;] ofd_destroy_by_fid+0x35d/0x620 [ofd]
[35955.533139]  [&amp;lt;ffffffffa16d88d0&amp;gt;] ? ldlm_blocking_ast+0x0/0x180 [ptlrpc]
[35955.533457]  [&amp;lt;ffffffffa16da210&amp;gt;] ? ldlm_completion_ast+0x0/0x9b0 [ptlrpc]
[35955.533749]  [&amp;lt;ffffffffa0a949da&amp;gt;] ofd_destroy_hdl+0x2fa/0xb60 [ofd]
[35955.534076]  [&amp;lt;ffffffffa176bbfe&amp;gt;] tgt_request_handle+0xa2e/0x1230 [ptlrpc]
[35955.534398]  [&amp;lt;ffffffffa1718d64&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[35955.534699]  [&amp;lt;ffffffffa1717ed0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[35955.534972]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[35955.535293]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[35955.535553]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[35955.535802]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[35955.536051] 
[35955.536307] LustreError: dumping log to /tmp/lustre-log.1435624510.31469
[35955.558142] Pid: 30313, comm: ll_ost00_005
[35955.558431] 
[35955.558432] Call Trace:
[35955.558878]  [&amp;lt;ffffffff8104e658&amp;gt;] ? __change_page_attr_set_clr+0x808/0xcc0
[35955.559167]  [&amp;lt;ffffffffa037a0f1&amp;gt;] start_this_handle+0x291/0x4b0 [jbd2]
[35955.559475]  [&amp;lt;ffffffffa037a4b1&amp;gt;] ? jbd2_journal_start+0x81/0x100 [jbd2]
[35955.559762]  [&amp;lt;ffffffff8109d2d0&amp;gt;] ? autoremove_wake_function+0x0/0x40
[35955.560067]  [&amp;lt;ffffffffa037a4e5&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
[35955.560397]  [&amp;lt;ffffffffa0722296&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
[35955.560977]  [&amp;lt;ffffffffa0771d0f&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
[35955.561326]  [&amp;lt;ffffffffa175fabd&amp;gt;] tgt_client_data_update+0x29d/0x680 [ptlrpc]
[35955.561710]  [&amp;lt;ffffffffa1760122&amp;gt;] tgt_client_del+0x282/0x600 [ptlrpc]
[35955.562072]  [&amp;lt;ffffffffa0aacb83&amp;gt;] ? ofd_grant_discard+0xb3/0x1c0 [ofd]
[35955.562360]  [&amp;lt;ffffffffa0a95f8b&amp;gt;] ofd_obd_disconnect+0x1bb/0x200 [ofd]
[35955.562695]  [&amp;lt;ffffffffa16c27f1&amp;gt;] target_handle_disconnect+0x1b1/0x480 [ptlrpc]
[35955.563244]  [&amp;lt;ffffffffa176a729&amp;gt;] tgt_disconnect+0x39/0x160 [ptlrpc]
[35955.563569]  [&amp;lt;ffffffffa176bbfe&amp;gt;] tgt_request_handle+0xa2e/0x1230 [ptlrpc]
[35955.563900]  [&amp;lt;ffffffffa1718d64&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[35955.564228]  [&amp;lt;ffffffffa1717ed0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[35955.564511]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[35955.564823]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[35955.565124]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[35955.565400]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[35955.566732] 
[35955.566924] Pid: 31471, comm: ll_ost00_008
[35955.567187] 
[35955.567188] Call Trace:
[35955.567590]  [&amp;lt;ffffffffa0381ab5&amp;gt;] jbd2_log_wait_commit+0xc5/0x140 [jbd2]
[35955.567899]  [&amp;lt;ffffffff8109d2d0&amp;gt;] ? autoremove_wake_function+0x0/0x40
[35955.568211]  [&amp;lt;ffffffff8152245e&amp;gt;] ? _spin_unlock+0xe/0x10
[35955.568495]  [&amp;lt;ffffffffa03799c4&amp;gt;] jbd2_journal_stop+0x1e4/0x2b0 [jbd2]
[35955.568857]  [&amp;lt;ffffffffa0722208&amp;gt;] __ldiskfs_journal_stop+0x68/0xa0 [ldiskfs]
[35955.569215]  [&amp;lt;ffffffffa0771772&amp;gt;] osd_trans_stop+0x1d2/0x590 [osd_ldiskfs]
[35955.569549]  [&amp;lt;ffffffffa0784c85&amp;gt;] ? osd_object_destroy+0x295/0x680 [osd_ldiskfs]
[35955.570076]  [&amp;lt;ffffffffa0a9ecef&amp;gt;] ofd_trans_stop+0x1f/0x60 [ofd]
[35955.570370]  [&amp;lt;ffffffffa0aa10c1&amp;gt;] ofd_object_destroy+0x2d1/0x8e0 [ofd]
[35955.570653]  [&amp;lt;ffffffffa0a9b15d&amp;gt;] ofd_destroy_by_fid+0x35d/0x620 [ofd]
[35955.570998]  [&amp;lt;ffffffffa16d88d0&amp;gt;] ? ldlm_blocking_ast+0x0/0x180 [ptlrpc]
[35955.571342]  [&amp;lt;ffffffffa16da210&amp;gt;] ? ldlm_completion_ast+0x0/0x9b0 [ptlrpc]
[35955.572135]  [&amp;lt;ffffffffa0a949da&amp;gt;] ofd_destroy_hdl+0x2fa/0xb60 [ofd]
[35955.572478]  [&amp;lt;ffffffffa176bbfe&amp;gt;] tgt_request_handle+0xa2e/0x1230 [ptlrpc]
[35955.572837]  [&amp;lt;ffffffffa1718d64&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[35955.573166]  [&amp;lt;ffffffffa1717ed0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[35955.573487]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[35955.573755]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[35955.574008]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[35955.574290]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[35955.574546] 
[35955.574735] Pid: 25759, comm: ll_ost01_004
[35955.574964] 
[35955.574965] Call Trace:
[35955.575404]  [&amp;lt;ffffffffa0381ab5&amp;gt;] jbd2_log_wait_commit+0xc5/0x140 [jbd2]
[35955.575680]  [&amp;lt;ffffffff8109d2d0&amp;gt;] ? autoremove_wake_function+0x0/0x40
[35955.575953]  [&amp;lt;ffffffff8152245e&amp;gt;] ? _spin_unlock+0xe/0x10
[35955.576247]  [&amp;lt;ffffffffa03799c4&amp;gt;] jbd2_journal_stop+0x1e4/0x2b0 [jbd2]
[35955.576564]  [&amp;lt;ffffffffa0722208&amp;gt;] __ldiskfs_journal_stop+0x68/0xa0 [ldiskfs]
[35955.576888]  [&amp;lt;ffffffffa0771772&amp;gt;] osd_trans_stop+0x1d2/0x590 [osd_ldiskfs]
[35955.577182]  [&amp;lt;ffffffffa0784c85&amp;gt;] ? osd_object_destroy+0x295/0x680 [osd_ldiskfs]
[35955.577715]  [&amp;lt;ffffffffa0a9ecef&amp;gt;] ofd_trans_stop+0x1f/0x60 [ofd]
[35955.577982]  [&amp;lt;ffffffffa0aa10c1&amp;gt;] ofd_object_destroy+0x2d1/0x8e0 [ofd]
[35955.578259]  [&amp;lt;ffffffffa0a9b15d&amp;gt;] ofd_destroy_by_fid+0x35d/0x620 [ofd]
[35955.578604]  [&amp;lt;ffffffffa16d88d0&amp;gt;] ? ldlm_blocking_ast+0x0/0x180 [ptlrpc]
[35955.578910]  [&amp;lt;ffffffffa16da210&amp;gt;] ? ldlm_completion_ast+0x0/0x9b0 [ptlrpc]
[35955.579200]  [&amp;lt;ffffffffa0a949da&amp;gt;] ofd_destroy_hdl+0x2fa/0xb60 [ofd]
[35955.579570]  [&amp;lt;ffffffffa176bbfe&amp;gt;] tgt_request_handle+0xa2e/0x1230 [ptlrpc]
[35955.579883]  [&amp;lt;ffffffffa1718d64&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[35955.580184]  [&amp;lt;ffffffffa1717ed0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[35955.580524]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[35955.580801]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[35955.581051]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[35955.581325]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[35955.581636] 
[35962.384528] Lustre: lustre-OST0000: Export ffff8800b45b67f0 already connecting from 0@lo
[35962.385358] Lustre: Skipped 1 previous similar message
[35977.385175] Lustre: lustre-OST0000: haven&apos;t heard from client lustre-MDT0000-mdtlov_UUID (at (no nid)) in 55 seconds. I think it&apos;s dead, and I am evicting it. exp ffff8800b45b67f0, cur 1435624532 expire 1435624502 last 1435624477
[35984.376066] LNet: Service thread pid 31470 was inactive for 62.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[35984.377655] LNet: Skipped 3 previous similar messages
[35984.378107] Pid: 31470, comm: ll_ost00_007
[35984.378547] 
[35984.378548] Call Trace:
[35984.379309]  [&amp;lt;ffffffff8104e658&amp;gt;] ? __change_page_attr_set_clr+0x808/0xcc0
[35984.379829]  [&amp;lt;ffffffff8109d5fe&amp;gt;] ? prepare_to_wait+0x4e/0x80
[35984.380314]  [&amp;lt;ffffffffa037a0f1&amp;gt;] start_this_handle+0x291/0x4b0 [jbd2]
[35984.380888]  [&amp;lt;ffffffffa037a4b1&amp;gt;] ? jbd2_journal_start+0x81/0x100 [jbd2]
[35984.381462]  [&amp;lt;ffffffff8109d2d0&amp;gt;] ? autoremove_wake_function+0x0/0x40
[35984.381898]  [&amp;lt;ffffffffa037a4e5&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
[35984.382408]  [&amp;lt;ffffffffa0722296&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
[35984.383116]  [&amp;lt;ffffffffa0771d0f&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
[35984.383603]  [&amp;lt;ffffffffa175fabd&amp;gt;] tgt_client_data_update+0x29d/0x680 [ptlrpc]
[35984.384208]  [&amp;lt;ffffffffa1760bcc&amp;gt;] tgt_client_new+0x41c/0x600 [ptlrpc]
[35984.384660]  [&amp;lt;ffffffffa0a97ec3&amp;gt;] ofd_obd_connect+0x363/0x400 [ofd]
[35984.385225]  [&amp;lt;ffffffffa16c7d84&amp;gt;] target_handle_connect+0xe94/0x2d60 [ptlrpc]
[35984.385699]  [&amp;lt;ffffffff8152245e&amp;gt;] ? _spin_unlock+0xe/0x10
[35984.386116]  [&amp;lt;ffffffffa0d4e34f&amp;gt;] ? cfs_trace_unlock_tcd+0x3f/0xa0 [libcfs]
[35984.386554]  [&amp;lt;ffffffff812918f0&amp;gt;] ? string+0x40/0x100
[35984.387012]  [&amp;lt;ffffffffa176b762&amp;gt;] tgt_request_handle+0x592/0x1230 [ptlrpc]
[35984.387494]  [&amp;lt;ffffffffa1718d64&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[35984.387977]  [&amp;lt;ffffffffa1717ed0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[35984.388423]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[35984.388862]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[35984.389257]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[35984.389692]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[35984.390097] 
[35984.390408] LustreError: dumping log to /tmp/lustre-log.1435624539.31470
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This is is basically current master with a couple of patches that don&apos;t seem related (tag in my source tree master-20150629)&lt;/p&gt;</description>
                <environment></environment>
        <key id="30864">LU-6777</key>
            <summary>replay-single test 34 hung in tgt_txn_stop_cb</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="green">Oleg Drokin</reporter>
                        <labels>
                    </labels>
                <created>Tue, 30 Jun 2015 11:58:32 +0000</created>
                <updated>Mon, 20 Jul 2020 22:35:47 +0000</updated>
                            <resolved>Mon, 20 Jul 2020 22:35:47 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="120327" author="green" created="Fri, 3 Jul 2015 23:31:29 +0000"  >&lt;p&gt;Just hit this again:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[13377.522582] Lustre: DEBUG MARKER: == replay-single test 34: abort recovery before client does replay (test mds_cleanup_orphans) == 16:15:09 (1435954509)
[13383.248379] Lustre: lustre-OST0000: Export ffff88006302f7f0 already connecting from 0@lo
[13388.248401] Lustre: lustre-OST0000: Export ffff88006302f7f0 already connecting from 0@lo
[13393.248253] Lustre: lustre-OST0000: Export ffff88006302f7f0 already connecting from 0@lo
[13398.248428] Lustre: lustre-OST0000: Export ffff88006302f7f0 already connecting from 0@lo
[13403.248426] Lustre: lustre-OST0000: Export ffff88006302f7f0 already connecting from 0@lo
[13404.740050] LNet: Service thread pid 21885 was inactive for 40.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[13404.740866] Pid: 21885, comm: ll_ost00_009
[13404.741420] 
[13404.741421] Call Trace:
[13404.741821]  [&amp;lt;ffffffff8109d157&amp;gt;] ? bit_waitqueue+0x17/0xd0
[13404.742103]  [&amp;lt;ffffffff8117394a&amp;gt;] ? cache_alloc_debugcheck_after+0x14a/0x210
[13404.742401]  [&amp;lt;ffffffff8151ff50&amp;gt;] __mutex_lock_slowpath+0x120/0x2e0
[13404.742677]  [&amp;lt;ffffffff81520141&amp;gt;] mutex_lock+0x31/0x50
[13404.742986]  [&amp;lt;ffffffffa1768673&amp;gt;] tgt_txn_stop_cb+0x7d3/0x13e0 [ptlrpc]
[13404.743273]  [&amp;lt;ffffffff8151fabe&amp;gt;] ? mutex_unlock+0xe/0x10
[13404.743560]  [&amp;lt;ffffffffa0da3dee&amp;gt;] dt_txn_hook_stop+0x5e/0x90 [obdclass]
[13404.743854]  [&amp;lt;ffffffffa076d730&amp;gt;] osd_trans_stop+0x190/0x590 [osd_ldiskfs]
[13404.744197]  [&amp;lt;ffffffffa0780ca5&amp;gt;] ? osd_object_destroy+0x295/0x680 [osd_ldiskfs]
[13404.744686]  [&amp;lt;ffffffffa0a9ccff&amp;gt;] ofd_trans_stop+0x1f/0x60 [ofd]
[13404.744980]  [&amp;lt;ffffffffa0a9f0d1&amp;gt;] ofd_object_destroy+0x2d1/0x8e0 [ofd]
[13404.745657]  [&amp;lt;ffffffffa0a9916d&amp;gt;] ofd_destroy_by_fid+0x35d/0x620 [ofd]
[13404.745877]  [&amp;lt;ffffffffa16daaa0&amp;gt;] ? ldlm_blocking_ast+0x0/0x180 [ptlrpc]
[13404.746135]  [&amp;lt;ffffffffa16dc3e0&amp;gt;] ? ldlm_completion_ast+0x0/0x9b0 [ptlrpc]
[13404.746421]  [&amp;lt;ffffffffa0a929ea&amp;gt;] ofd_destroy_hdl+0x2fa/0xb60 [ofd]
[13404.746631]  [&amp;lt;ffffffffa1771e22&amp;gt;] tgt_request_handle+0xa42/0x1230 [ptlrpc]
[13404.746955]  [&amp;lt;ffffffffa171b084&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[13404.747225]  [&amp;lt;ffffffffa171a1f0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[13404.747413]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[13404.747576]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[13404.747740]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[13404.747906]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[13404.748171] 
[13404.748730] LustreError: dumping log to /tmp/lustre-log.1435954536.21885
[13404.786698] Pid: 14834, comm: ll_ost00_000
[13404.786971] 
[13404.786972] Call Trace:
[13404.787434]  [&amp;lt;ffffffff8104e658&amp;gt;] ? __change_page_attr_set_clr+0x808/0xcc0
[13404.787724]  [&amp;lt;ffffffff8109d5fe&amp;gt;] ? prepare_to_wait+0x4e/0x80
[13404.788052]  [&amp;lt;ffffffffa03760f1&amp;gt;] start_this_handle+0x291/0x4b0 [jbd2]
[13404.788356]  [&amp;lt;ffffffffa03764b1&amp;gt;] ? jbd2_journal_start+0x81/0x100 [jbd2]
[13404.788660]  [&amp;lt;ffffffff8109d2d0&amp;gt;] ? autoremove_wake_function+0x0/0x40
[13404.788996]  [&amp;lt;ffffffffa03764e5&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
[13404.789326]  [&amp;lt;ffffffffa071e296&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
[13404.789817]  [&amp;lt;ffffffffa076dd0f&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
[13404.790190]  [&amp;lt;ffffffffa17628cd&amp;gt;] tgt_client_data_update+0x29d/0x680 [ptlrpc]
[13404.790538]  [&amp;lt;ffffffffa1762f32&amp;gt;] tgt_client_del+0x282/0x600 [ptlrpc]
[13404.790776]  [&amp;lt;ffffffffa0aaab93&amp;gt;] ? ofd_grant_discard+0xb3/0x1c0 [ofd]
[13404.791082]  [&amp;lt;ffffffffa0a93f9b&amp;gt;] ofd_obd_disconnect+0x1bb/0x200 [ofd]
[13404.791339]  [&amp;lt;ffffffffa16c47f1&amp;gt;] target_handle_disconnect+0x1b1/0x480 [ptlrpc]
[13404.791697]  [&amp;lt;ffffffffa1770939&amp;gt;] tgt_disconnect+0x39/0x160 [ptlrpc]
[13404.791942]  [&amp;lt;ffffffffa1771e22&amp;gt;] tgt_request_handle+0xa42/0x1230 [ptlrpc]
[13404.792267]  [&amp;lt;ffffffffa171b084&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[13404.792509]  [&amp;lt;ffffffffa171a1f0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[13404.792781]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[13404.793071]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[13404.793363]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[13404.793643]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[13404.793918] 
[13413.204070] LNet: Service thread pid 14838 was inactive for 40.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
[13413.205041] LNet: Skipped 1 previous similar message
[13413.205330] Pid: 14838, comm: ll_ost01_001
[13413.205605] 
[13413.205606] Call Trace:
[13413.206080]  [&amp;lt;ffffffff8104e658&amp;gt;] ? __change_page_attr_set_clr+0x808/0xcc0
[13413.206418]  [&amp;lt;ffffffff8109d5fe&amp;gt;] ? prepare_to_wait+0x4e/0x80
[13413.206733]  [&amp;lt;ffffffffa03760f1&amp;gt;] start_this_handle+0x291/0x4b0 [jbd2]
[13413.207172]  [&amp;lt;ffffffffa03764b1&amp;gt;] ? jbd2_journal_start+0x81/0x100 [jbd2]
[13413.207509]  [&amp;lt;ffffffff8109d2d0&amp;gt;] ? autoremove_wake_function+0x0/0x40
[13413.207840]  [&amp;lt;ffffffffa03764e5&amp;gt;] jbd2_journal_start+0xb5/0x100 [jbd2]
[13413.209322]  [&amp;lt;ffffffffa071e296&amp;gt;] ldiskfs_journal_start_sb+0x56/0xe0 [ldiskfs]
[13413.209862]  [&amp;lt;ffffffffa076dd0f&amp;gt;] osd_trans_start+0x1df/0x660 [osd_ldiskfs]
[13413.210343]  [&amp;lt;ffffffffa17628cd&amp;gt;] tgt_client_data_update+0x29d/0x680 [ptlrpc]
[13413.210714]  [&amp;lt;ffffffffa1763d0f&amp;gt;] tgt_client_new+0x3ef/0x6c0 [ptlrpc]
[13413.211168]  [&amp;lt;ffffffffa0a95ed3&amp;gt;] ofd_obd_connect+0x363/0x400 [ofd]
[13413.211520]  [&amp;lt;ffffffffa16c9dc4&amp;gt;] target_handle_connect+0xe94/0x2d60 [ptlrpc]
[13413.211862]  [&amp;lt;ffffffff8152245e&amp;gt;] ? _spin_unlock+0xe/0x10
[13413.212180]  [&amp;lt;ffffffffa0c0d34f&amp;gt;] ? cfs_trace_unlock_tcd+0x3f/0xa0 [libcfs]
[13413.212519]  [&amp;lt;ffffffff812918f0&amp;gt;] ? string+0x40/0x100
[13413.212851]  [&amp;lt;ffffffffa1771992&amp;gt;] tgt_request_handle+0x5b2/0x1230 [ptlrpc]
[13413.213256]  [&amp;lt;ffffffffa171b084&amp;gt;] ptlrpc_main+0xe94/0x19e0 [ptlrpc]
[13413.213609]  [&amp;lt;ffffffffa171a1f0&amp;gt;] ? ptlrpc_main+0x0/0x19e0 [ptlrpc]
[13413.213932]  [&amp;lt;ffffffff8109ce4e&amp;gt;] kthread+0x9e/0xc0
[13413.214327]  [&amp;lt;ffffffff8100c24a&amp;gt;] child_rip+0xa/0x20
[13413.214625]  [&amp;lt;ffffffff8109cdb0&amp;gt;] ? kthread+0x0/0xc0
[13413.214921]  [&amp;lt;ffffffff8100c240&amp;gt;] ? child_rip+0x0/0x20
[13413.215241] 
[13413.215465] LustreError: dumping log to /tmp/lustre-log.1435954545.14838
[13413.249117] Lustre: lustre-OST0000: Export ffff88006302f7f0 already connecting from 0@lo
[13413.249883] Lustre: Skipped 1 previous similar message
[13428.248615] Lustre: lustre-OST0000: haven&apos;t heard from client lustre-MDT0000-mdtlov_UUID (at (no nid)) in 55 seconds. I think it&apos;s dead, and I am evicting it. exp ffff88006302f7f0, cur 1435954560 expire 1435954530 last 1435954505
[13463.248336] Lustre: lustre-OST0000: Export ffff8800690837f0 already connecting from 0@lo
[13463.248831] Lustre: Skipped 3 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="125620" author="green" created="Sat, 29 Aug 2015 14:58:42 +0000"  >&lt;p&gt;Just hit again&lt;/p&gt;</comment>
                            <comment id="275822" author="adilger" created="Mon, 20 Jul 2020 22:35:47 +0000"  >&lt;p&gt;Have not seen this in a long time.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxguv:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>