<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:34:42 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10395] ASSERTION( osd-&gt;od_oi_table != NULL &amp;&amp; osd-&gt;od_oi_count &gt;= 1 ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-10395</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Just had this crash in my master-next testing:&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&#160;
[271899.484182] Lustre: DEBUG MARKER: == replay-single test 26: |X| open(O_CREAT), unlink two, close one, replay, close one (test mds_cleanup_orphans) ====================================================================================================== 05:31:19 (1513333879)
[271900.114927] Turning device loop0 (0x700000) read-only
[271900.159562] Lustre: DEBUG MARKER: mds1 REPLAY BARRIER on lustre-MDT0000
[271900.197289] Lustre: DEBUG MARKER: local REPLAY BARRIER on lustre-MDT0000
[271900.868045] LustreError: 29112:0:(osd_internal.h:899:osd_fid2oi()) ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed: [0xa:0x8:0x0]
[271900.870230] LustreError: 29112:0:(osd_internal.h:899:osd_fid2oi()) LBUG
[271900.870897] Pid: 29112, comm: ll_mgs_0002
[271900.871499] 
Call Trace:
[271900.874098]  [&amp;lt;ffffffffa02927ce&amp;gt;] libcfs_call_trace+0x4e/0x60 [libcfs]
[271900.874904]  [&amp;lt;ffffffffa029285c&amp;gt;] lbug_with_loc+0x4c/0xb0 [libcfs]
[271900.875989]  [&amp;lt;ffffffffa0bd5390&amp;gt;] __osd_oi_lookup+0x2e0/0x390 [osd_ldiskfs]
[271900.876716]  [&amp;lt;ffffffffa0bd715a&amp;gt;] osd_oi_lookup+0xca/0x190 [osd_ldiskfs]
[271900.877452]  [&amp;lt;ffffffffa0bd3112&amp;gt;] osd_fid_lookup+0x4a2/0x1b50 [osd_ldiskfs]
[271900.878132]  [&amp;lt;ffffffff810e3224&amp;gt;] ? lockdep_init_map+0xc4/0x600
[271900.902777]  [&amp;lt;ffffffffa0bd4821&amp;gt;] osd_object_init+0x61/0x180 [osd_ldiskfs]
[271900.903535]  [&amp;lt;ffffffffa03d352f&amp;gt;] lu_object_alloc+0xdf/0x310 [obdclass]
[271900.904230]  [&amp;lt;ffffffffa03d38cc&amp;gt;] lu_object_find_at+0x16c/0x290 [obdclass]
[271900.904930]  [&amp;lt;ffffffffa03d4d88&amp;gt;] dt_locate_at+0x18/0xb0 [obdclass]
[271900.905594]  [&amp;lt;ffffffffa0399140&amp;gt;] llog_osd_open+0x4f0/0xf80 [obdclass]
[271900.906616]  [&amp;lt;ffffffffa038814a&amp;gt;] llog_open+0x13a/0x3b0 [obdclass]
[271900.907360]  [&amp;lt;ffffffffa0647953&amp;gt;] llog_origin_handle_read_header+0x1b3/0x630 [ptlrpc]
[271900.908617]  [&amp;lt;ffffffffa068da13&amp;gt;] tgt_llog_read_header+0x33/0xe0 [ptlrpc]
[271900.909364]  [&amp;lt;ffffffffa069716b&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[271900.910077]  [&amp;lt;ffffffffa063c091&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[271900.911298]  [&amp;lt;ffffffffa063fe48&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[271900.911970]  [&amp;lt;ffffffff81706467&amp;gt;] ? _raw_spin_unlock_irq+0x27/0x50
[271900.912646]  [&amp;lt;ffffffffa063f3f0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[271900.914668]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[271900.915359]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[271900.915996]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[271900.916627]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[271900.917273] 
[271900.917861] Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I have a crashdump.&lt;/p&gt;</description>
                <environment></environment>
        <key id="49762">LU-10395</key>
            <summary>ASSERTION( osd-&gt;od_oi_table != NULL &amp;&amp; osd-&gt;od_oi_count &gt;= 1 ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="green">Oleg Drokin</reporter>
                        <labels>
                    </labels>
                <created>Fri, 15 Dec 2017 15:27:37 +0000</created>
                <updated>Sat, 11 Jul 2020 15:17:21 +0000</updated>
                            <resolved>Thu, 16 Apr 2020 20:03:49 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                    <version>Lustre 2.13.0</version>
                                    <fixVersion>Lustre 2.14.0</fixVersion>
                    <fixVersion>Lustre 2.12.6</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="216646" author="green" created="Mon, 18 Dec 2017 20:42:58 +0000"  >&lt;p&gt;Just had this one happen again.&lt;/p&gt;</comment>
                            <comment id="216690" author="green" created="Tue, 19 Dec 2017 05:07:26 +0000"  >&lt;p&gt;and just once more.&lt;/p&gt;</comment>
                            <comment id="216702" author="yong.fan" created="Tue, 19 Dec 2017 08:02:47 +0000"  >&lt;p&gt;On which branch? any special patch(es)?&lt;/p&gt;</comment>
                            <comment id="217447" author="green" created="Thu, 4 Jan 2018 06:20:49 +0000"  >&lt;p&gt;hit it again om the curent master-next (= current master as of the time of this comment):&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[269316.691753] Lustre: DEBUG MARKER: == replay-single test 20c: check that client eviction does not affect file content =================== 23:11:02 (1515039062)
[269316.759984] Lustre: 29811:0:(genops.c:1818:obd_export_evict_by_uuid()) lustre-MDT0000: evicting fbdcce1d-2114-114c-1c77-51be5d58048e at adminstrative request
[269317.806265] LustreError: 11-0: lustre-MDT0000-mdc-ffff8800ac76f800: operation ldlm_enqueue to node 0@lo failed: rc = -107
[269317.809371] LustreError: 29813:0:(file.c:4074:ll_inode_revalidate_fini()) lustre: revalidate FID [0x200000007:0x1:0x0] error: rc = -5
[269319.367417] Lustre: DEBUG MARKER: == replay-single test 21: |X| open(O_CREAT), unlink touch new, replay, close (test mds_cleanup_orphans) ====================================================================================================== 23:11:04 (1515039064)
[269319.575062] Turning device loop0 (0x700000) read-only
[269319.600543] Lustre: DEBUG MARKER: mds1 REPLAY BARRIER on lustre-MDT0000
[269319.615571] Lustre: DEBUG MARKER: local REPLAY BARRIER on lustre-MDT0000
[269320.012251] LustreError: 29273:0:(osd_internal.h:899:osd_fid2oi()) ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed: [0xa:0x2:0x0]
[269320.013895] LustreError: 29273:0:(osd_internal.h:899:osd_fid2oi()) LBUG
[269320.027745] Pid: 29273, comm: ll_mgs_0002
[269320.028226] 
Call Trace:
[269320.029360]  [&amp;lt;ffffffffa026e7ce&amp;gt;] libcfs_call_trace+0x4e/0x60 [libcfs]
[269320.030022]  [&amp;lt;ffffffffa026e85c&amp;gt;] lbug_with_loc+0x4c/0xb0 [libcfs]
[269320.030589]  [&amp;lt;ffffffffa0bac790&amp;gt;] __osd_oi_lookup+0x2e0/0x390 [osd_ldiskfs]
[269320.031065]  [&amp;lt;ffffffffa0bae55a&amp;gt;] osd_oi_lookup+0xca/0x190 [osd_ldiskfs]
[269320.031590]  [&amp;lt;ffffffffa0baa422&amp;gt;] osd_fid_lookup+0x4a2/0x1c40 [osd_ldiskfs]
[269320.032062]  [&amp;lt;ffffffff810e3224&amp;gt;] ? lockdep_init_map+0xc4/0x600
[269320.032692]  [&amp;lt;ffffffffa0babc21&amp;gt;] osd_object_init+0x61/0x180 [osd_ldiskfs]
[269320.033438]  [&amp;lt;ffffffffa03afcff&amp;gt;] lu_object_alloc+0xdf/0x310 [obdclass]
[269320.034136]  [&amp;lt;ffffffffa03b009c&amp;gt;] lu_object_find_at+0x16c/0x290 [obdclass]
[269320.034855]  [&amp;lt;ffffffffa03b1558&amp;gt;] dt_locate_at+0x18/0xb0 [obdclass]
[269320.035565]  [&amp;lt;ffffffffa0377520&amp;gt;] llog_osd_open+0x4f0/0xf80 [obdclass]
[269320.036257]  [&amp;lt;ffffffffa036414a&amp;gt;] llog_open+0x13a/0x3b0 [obdclass]
[269320.037013]  [&amp;lt;ffffffffa05f5a03&amp;gt;] llog_origin_handle_read_header+0x1b3/0x630 [ptlrpc]
[269320.038416]  [&amp;lt;ffffffffa063ba53&amp;gt;] tgt_llog_read_header+0x33/0xe0 [ptlrpc]
[269320.039153]  [&amp;lt;ffffffffa06451ab&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[269320.039887]  [&amp;lt;ffffffffa05ea141&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[269320.059312]  [&amp;lt;ffffffffa05edef8&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[269320.059852]  [&amp;lt;ffffffff81706467&amp;gt;] ? _raw_spin_unlock_irq+0x27/0x50
[269320.060367]  [&amp;lt;ffffffffa05ed4a0&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[269320.060842]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[269320.061311]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[269320.062024]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[269320.062780]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[269320.063281] 
[269320.063683] Kernel panic - not syncing: LBUG
[269320.064200] CPU: 0 PID: 29273 Comm: ll_mgs_0002 Tainted: P           OE  ------------   3.10.0-debug #2
[269320.065091] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[269320.065559]  ffffffffa028e212 00000000626be182 ffff8800a1bf78a0 ffffffff816fd3e4
[269320.066423]  ffff8800a1bf7920 ffffffff816f8c34 ffffffff00000008 ffff8800a1bf7930
[269320.067317]  ffff8800a1bf78d0 00000000626be182 00000000626be182 0000000000000001
[269320.068197] Call Trace:
[269320.068621]  [&amp;lt;ffffffff816fd3e4&amp;gt;] dump_stack+0x19/0x1b
[269320.069060]  [&amp;lt;ffffffff816f8c34&amp;gt;] panic+0xd8/0x1e7
[269320.069528]  [&amp;lt;ffffffffa026e874&amp;gt;] lbug_with_loc+0x64/0xb0 [libcfs]
[269320.070015]  [&amp;lt;ffffffffa0bac790&amp;gt;] __osd_oi_lookup+0x2e0/0x390 [osd_ldiskfs]
[269320.070547]  [&amp;lt;ffffffffa0bae55a&amp;gt;] osd_oi_lookup+0xca/0x190 [osd_ldiskfs]
[269320.071017]  [&amp;lt;ffffffffa0baa422&amp;gt;] osd_fid_lookup+0x4a2/0x1c40 [osd_ldiskfs]
[269320.071510]  [&amp;lt;ffffffff810e3224&amp;gt;] ? lockdep_init_map+0xc4/0x600
[269320.071959]  [&amp;lt;ffffffffa0babc21&amp;gt;] osd_object_init+0x61/0x180 [osd_ldiskfs]
[269320.072477]  [&amp;lt;ffffffffa03afcff&amp;gt;] lu_object_alloc+0xdf/0x310 [obdclass]
[269320.072957]  [&amp;lt;ffffffffa03b009c&amp;gt;] lu_object_find_at+0x16c/0x290 [obdclass]
[269320.073602]  [&amp;lt;ffffffffa03b1558&amp;gt;] dt_locate_at+0x18/0xb0 [obdclass]
[269320.074419]  [&amp;lt;ffffffffa0377520&amp;gt;] llog_osd_open+0x4f0/0xf80 [obdclass]
[269320.075331]  [&amp;lt;ffffffffa036414a&amp;gt;] llog_open+0x13a/0x3b0 [obdclass]
[269320.076242]  [&amp;lt;ffffffffa05f5a03&amp;gt;] llog_origin_handle_read_header+0x1b3/0x630 [ptlrpc]
[269320.077666]  [&amp;lt;ffffffffa063ba53&amp;gt;] tgt_llog_read_header+0x33/0xe0 [ptlrpc]
[269320.078474]  [&amp;lt;ffffffffa06451ab&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[269320.079298]  [&amp;lt;ffffffffa05ea141&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[269320.080761]  [&amp;lt;ffffffffa05edef8&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[269320.081560]  [&amp;lt;ffffffff81706467&amp;gt;] ? _raw_spin_unlock_irq+0x27/0x50
[269320.082392]  [&amp;lt;ffffffffa05ed4a0&amp;gt;] ? ptlrpc_register_service+0xeb0/0xeb0 [ptlrpc]
[269320.083844]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[269320.084600]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[269320.085397]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[269320.087581]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread_create_on_node+0x140/0x140
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="217448" author="green" created="Thu, 4 Jan 2018 06:23:40 +0000"  >&lt;p&gt;latest crash&apos;s crashdump is on onyx-68 in /exports/crashdumps/192.168.123.129-2018-01-03-23:11:10&lt;/p&gt;
</comment>
                            <comment id="218775" author="green" created="Sun, 21 Jan 2018 17:55:46 +0000"  >&lt;p&gt;just hit this once more on current master-next&lt;/p&gt;</comment>
                            <comment id="219928" author="green" created="Sun, 4 Feb 2018 05:03:48 +0000"  >&lt;p&gt;just hit this again on master-next:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[151025.517057] Lustre: DEBUG MARKER: == sanity-pfl test complete, duration 805 sec ======================================================== 18:28:23 (1517700503)
[151025.778703] Lustre: Failing over lustre-MDT0000
[151026.031080] LustreError: 13940:0:(osd_internal.h:911:osd_fid2oi()) ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed: [0xa:0x9:0x0]
[151026.033598] LustreError: 13940:0:(osd_internal.h:911:osd_fid2oi()) LBUG
[151026.034402] Pid: 13940, comm: ll_mgs_0000
[151026.035148] 
Call Trace:
[151026.036641]  [&amp;lt;ffffffffa01d77ce&amp;gt;] libcfs_call_trace+0x4e/0x60 [libcfs]
[151026.037462]  [&amp;lt;ffffffffa01d785c&amp;gt;] lbug_with_loc+0x4c/0xb0 [libcfs]
[151026.038313]  [&amp;lt;ffffffffa0b52f80&amp;gt;] __osd_oi_lookup+0x2e0/0x390 [osd_ldiskfs]
[151026.041160]  [&amp;lt;ffffffffa0b54d5a&amp;gt;] osd_oi_lookup+0xca/0x190 [osd_ldiskfs]
[151026.041748]  [&amp;lt;ffffffffa0b50902&amp;gt;] osd_fid_lookup+0x4a2/0x1c60 [osd_ldiskfs]
[151026.042281]  [&amp;lt;ffffffff810e3224&amp;gt;] ? lockdep_init_map+0xc4/0x600
[151026.042780]  [&amp;lt;ffffffffa0b52121&amp;gt;] osd_object_init+0x61/0x110 [osd_ldiskfs]
[151026.043366]  [&amp;lt;ffffffffa039999f&amp;gt;] lu_object_alloc+0xdf/0x310 [obdclass]
[151026.043895]  [&amp;lt;ffffffffa0399d3c&amp;gt;] lu_object_find_at+0x16c/0x290 [obdclass]
[151026.046491]  [&amp;lt;ffffffffa039b308&amp;gt;] dt_locate_at+0x18/0xb0 [obdclass]
[151026.047276]  [&amp;lt;ffffffffa0361800&amp;gt;] llog_osd_open+0x4f0/0xf80 [obdclass]
[151026.047924]  [&amp;lt;ffffffffa034e14a&amp;gt;] llog_open+0x13a/0x3b0 [obdclass]
[151026.048498]  [&amp;lt;ffffffffa061bad3&amp;gt;] llog_origin_handle_read_header+0x1b3/0x630 [ptlrpc]
[151026.049479]  [&amp;lt;ffffffffa0663263&amp;gt;] tgt_llog_read_header+0x33/0xe0 [ptlrpc]
[151026.050493]  [&amp;lt;ffffffffa066c9bb&amp;gt;] tgt_request_handle+0x93b/0x13e0 [ptlrpc]
[151026.051100]  [&amp;lt;ffffffffa0610211&amp;gt;] ptlrpc_server_handle_request+0x261/0xaf0 [ptlrpc]
[151026.052417]  [&amp;lt;ffffffffa0613fc8&amp;gt;] ptlrpc_main+0xa58/0x1df0 [ptlrpc]
[151026.053122]  [&amp;lt;ffffffff81706467&amp;gt;] ? _raw_spin_unlock_irq+0x27/0x50
[151026.053837]  [&amp;lt;ffffffffa0613570&amp;gt;] ? ptlrpc_main+0x0/0x1df0 [ptlrpc]
[151026.054527]  [&amp;lt;ffffffff810a2eba&amp;gt;] kthread+0xea/0xf0
[151026.055231]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[151026.055902]  [&amp;lt;ffffffff8170fb98&amp;gt;] ret_from_fork+0x58/0x90
[151026.056556]  [&amp;lt;ffffffff810a2dd0&amp;gt;] ? kthread+0x0/0xf0
[151026.057205] 
[151026.057786] Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="258073" author="bzzz" created="Mon, 11 Nov 2019 06:12:31 +0000"  >&lt;p&gt;hit this few times locally:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
LustreError: 7249:0:(osd_internal.h:994:osd_fid2oi()) ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 )
 lbug_with_loc+0x79/0x80 [libcfs]
 ? osd_oi_delete+0x32c/0x470 [osd_ldiskfs]
 ? osd_destroy+0x1ad/0x810 [osd_ldiskfs]
 ? osd_ref_del+0x1f8/0x700 [osd_ldiskfs]
 ? local_object_unlink+0x50d/0x1020 [obdclass]
 ? nodemap_cache_find_create+0x139/0x530 [ptlrpc]
 ? nodemap_save_config_cache+0x49/0x4d0 [ptlrpc]
 ? lu_object_put+0x230/0x370 [obdclass]
 ? nodemap_config_set_active_mgc+0xd6/0x1d0 [ptlrpc]
 ? mgc_process_log+0x27a8/0x27e0 [mgc]
 ? mgc_requeue_thread+0x278/0x800 [mgc]
 ? wake_up_q+0x60/0x60
 ? kthread+0x100/0x140
 ? mgc_process_config+0x13c0/0x13c0 [mgc]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;according to the dumped log it was a race between umount and mgc thread.&lt;/p&gt;</comment>
                            <comment id="263543" author="bzzz" created="Wed, 19 Feb 2020 08:29:25 +0000"  >&lt;p&gt;this is a race: MDT umount vs mgs normal processing:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
ll_mgs_0002     D    0 10966      2 0x80000000
Call Trace:
 ? __schedule+0x2ad/0xb00
 schedule+0x34/0x80
 lbug_with_loc+0x79/0x80 [libcfs]
 ? __osd_oi_lookup.isra.0+0x32f/0x3d0 [osd_ldiskfs]
 ? osd_object_init+0x290/0x2110 [osd_ldiskfs]
 ? __raw_spin_lock_init+0x28/0x50
 ? osd_object_alloc+0x117/0x3d0 [osd_ldiskfs]
 ? lu_object_start.isra.0+0x68/0x100 [obdclass]
 ? lu_object_find_at+0x317/0x8d0 [obdclass]
 ? dt_locate_at+0x13/0xa0 [obdclass]
 ? llog_osd_open+0x2b0/0xf90 [obdclass]
 ? __raw_spin_lock_init+0x28/0x50
 ? llog_open+0x307/0x410 [obdclass]
 ? llog_origin_handle_read_header+0x178/0x500 [ptlrpc]
 ? tgt_llog_read_header+0x26/0xb0 [ptlrpc]
 ? tgt_request_handle+0x3fe/0x1770 [ptlrpc]

umount          I    0 11081  11080 0x00000000
Call Trace:
 ? __schedule+0x2ad/0xb00
 schedule+0x34/0x80
 ptlrpc_stop_all_threads+0x55d/0x590 [ptlrpc]
 ? wait_woken+0x90/0x90
 ptlrpc_unregister_service+0xb6/0x1110 [ptlrpc]
 mgs_device_fini+0xa0/0x5c0 [mgs]
 class_cleanup+0x682/0xb50 [obdclass]
 class_process_config+0x153e/0x30f0 [obdclass]
 ? class_manual_cleanup+0xd1/0x670 [obdclass]
 ? class_manual_cleanup+0xd1/0x670 [obdclass]
 ? cache_alloc_debugcheck_after+0x138/0x150
 ? __kmalloc+0x20c/0x2e0
 class_manual_cleanup+0x197/0x670 [obdclass]
 server_put_super+0x1525/0x1d50 [obdclass]
 ? evict_inodes+0x138/0x180
 generic_shutdown_super+0x5f/0xf0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="263544" author="bzzz" created="Wed, 19 Feb 2020 08:38:39 +0000"  >&lt;p&gt;Patch &lt;a href=&quot;https://review.whamcloud.com/37615&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37615&lt;/a&gt; should help.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LU-10395 osd: stop OI at device shutdown

and not at obd_cleanup(). otherwise a race is possible:

  umount &amp;lt;MDT&amp;gt; stopping OI vs MGS accessing same OSD which

results in the assertion:
 ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 )

Signed-off-by: Alex Zhuravlev &amp;lt;bzzz@whamcloud.com&amp;gt;
Change-Id: I24fccea718f2e2663166cfb0ff26571039357535
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="263554" author="aboyko" created="Wed, 19 Feb 2020 11:38:57 +0000"  >&lt;p&gt;I have got a local reproducer for this race. I&apos;ve tried 37615 patch, and it helps.&lt;/p&gt;</comment>
                            <comment id="263637" author="aboyko" created="Thu, 20 Feb 2020 06:17:50 +0000"  >&lt;p&gt;The original shutdown process frees a od_oi_table at the next trace&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 2833.653390] Call Trace:
[ 2833.653395]  [&amp;lt;ffffffff816ae7c8&amp;gt;] dump_stack+0x19/0x1b
[ 2833.653398]  [&amp;lt;ffffffff8108ae58&amp;gt;] __warn+0xd8/0x100
[ 2833.653401]  [&amp;lt;ffffffff8108af9d&amp;gt;] warn_slowpath_null+0x1d/0x20
[ 2833.653409]  [&amp;lt;ffffffffc0e95109&amp;gt;] osd_oi_fini+0x39/0x1f0 [osd_ldiskfs]
[ 2833.653418]  [&amp;lt;ffffffffc0eb48ec&amp;gt;] osd_scrub_cleanup+0x6c/0xb0 [osd_ldiskfs]
[ 2833.653437]  [&amp;lt;ffffffffc0e79738&amp;gt;] osd_shutdown+0x118/0x2e0 [osd_ldiskfs]
[ 2833.653528]  [&amp;lt;ffffffffc0e92a37&amp;gt;] osd_process_config+0x277/0x360 [osd_ldiskfs]
[ 2833.653551]  [&amp;lt;ffffffffc10a17cd&amp;gt;] lod_process_config+0x24d/0x15a0 [lod]
[ 2833.653562]  [&amp;lt;ffffffffc04b5fd7&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[ 2833.653570]  [&amp;lt;ffffffffc0f500e6&amp;gt;] mdd_process_config+0x146/0x5f0 [mdd]
[ 2833.653587]  [&amp;lt;ffffffffc0fbd2e2&amp;gt;] mdt_stack_fini+0x2c2/0xca0 [mdt]
[ 2833.653596]  [&amp;lt;ffffffffc0fbe00b&amp;gt;] mdt_device_fini+0x34b/0x930 [mdt]
[ 2833.653623]  [&amp;lt;ffffffffc06519a8&amp;gt;] class_cleanup+0x9a8/0xc40 [obdclass]
[ 2833.653641]  [&amp;lt;ffffffffc06528cc&amp;gt;] class_process_config+0x65c/0x2830 [obdclass]
[ 2833.653650]  [&amp;lt;ffffffffc04b5fd7&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[ 2833.653667]  [&amp;lt;ffffffffc0654c66&amp;gt;] class_manual_cleanup+0x1c6/0x710 [obdclass]
[ 2833.653687]  [&amp;lt;ffffffffc0687441&amp;gt;] server_put_super+0xa01/0x1120 [obdclass]
[ 2833.653692]  [&amp;lt;ffffffff81223246&amp;gt;] ? evict_inodes+0xd6/0x140
[ 2833.653695]  [&amp;lt;ffffffff81208085&amp;gt;] generic_shutdown_super+0x75/0x100
[ 2833.653697]  [&amp;lt;ffffffff81208462&amp;gt;] kill_anon_super+0x12/0x20
[ 2833.653714]  [&amp;lt;ffffffffc0657872&amp;gt;] lustre_kill_super+0x32/0x50 [obdclass]
[ 2833.653717]  [&amp;lt;ffffffff8120881e&amp;gt;] deactivate_locked_super+0x4e/0x70
[ 2833.653719]  [&amp;lt;ffffffff81208fa6&amp;gt;] deactivate_super+0x46/0x60
[ 2833.653722]  [&amp;lt;ffffffff8122655f&amp;gt;] cleanup_mnt+0x3f/0x80
[ 2833.653725]  [&amp;lt;ffffffff812265f2&amp;gt;] __cleanup_mnt+0x12/0x20
[ 2833.653727]  [&amp;lt;ffffffff810b087b&amp;gt;] task_work_run+0xbb/0xe0
[ 2833.653730]  [&amp;lt;ffffffff8102ab52&amp;gt;] do_notify_resume+0x92/0xb0
[ 2833.653734]  [&amp;lt;ffffffff816c1a5d&amp;gt;] int_signal+0x12/0x17
[ 2833.653735] ---[ end trace bfc327a09eefce17 ]---
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Also osd_shutdown frees quota related things, what if some quota requests are processed during shutdown? @Alex Zhuravlev, what do you think, is this case possible?&lt;/p&gt;</comment>
                            <comment id="263638" author="bzzz" created="Thu, 20 Feb 2020 06:28:16 +0000"  >&lt;p&gt;OSD is able to work without quota, there is internal locking in the quota code to deal with possible race, see qsd_fini() and qsd_op_begin()&lt;/p&gt;</comment>
                            <comment id="263672" author="gerrit" created="Thu, 20 Feb 2020 10:21:50 +0000"  >&lt;p&gt;Alexandr Boyko (c17825@cray.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/37635&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37635&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10395&quot; title=&quot;ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10395&quot;&gt;&lt;del&gt;LU-10395&lt;/del&gt;&lt;/a&gt; tests: add test_280 sanity&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 997da4924fc37125bbf5fbae2c12c186bb05a284&lt;/p&gt;</comment>
                            <comment id="263673" author="aboyko" created="Thu, 20 Feb 2020 10:24:44 +0000"  >&lt;p&gt;I&apos;ve pushed the regression test for issue if it is OK I rebase it to the fix.&lt;/p&gt;</comment>
                            <comment id="263674" author="bzzz" created="Thu, 20 Feb 2020 10:25:41 +0000"  >&lt;p&gt;thanks!&lt;/p&gt;</comment>
                            <comment id="263679" author="aboyko" created="Thu, 20 Feb 2020 11:21:05 +0000"  >&lt;p&gt;Alex, I don&apos;t see how qsd_op_begin() pins qsd at memory during using. Only small checks about !=NULL and qsd_started, even qsd_stopping is missed.&#160;&lt;/p&gt;</comment>
                            <comment id="263878" author="adilger" created="Sun, 23 Feb 2020 22:49:20 +0000"  >&lt;p&gt;+1 on master sanity test_208:&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/52f42644-0100-4d2d-bb11-794a4b7a1bf0&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/52f42644-0100-4d2d-bb11-794a4b7a1bf0&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 8058.688742] LustreError: 12938:0:(osd_internal.h:1010:osd_fid2oi()) ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed: [0xa:0x3:0x0]
[ 8058.691039] LustreError: 12938:0:(osd_internal.h:1010:osd_fid2oi()) LBUG
[ 8058.692144] Pid: 12938, comm: ll_mgs_0002 3.10.0-1062.9.1.el7_lustre.x86_64 #1 SMP Wed Feb 12 09:50:45 UTC 2020
[ 8058.693915] Call Trace:
[ 8058.694388]  [&amp;lt;ffffffffc09b8f7c&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
[ 8058.695655]  [&amp;lt;ffffffffc09b902c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]
[ 8058.696793]  [&amp;lt;ffffffffc11159d0&amp;gt;] __osd_oi_lookup+0x310/0x3c0 [osd_ldiskfs]
[ 8058.698145]  [&amp;lt;ffffffffc1117925&amp;gt;] osd_oi_lookup+0x95/0x1e0 [osd_ldiskfs]
[ 8058.699407]  [&amp;lt;ffffffffc1112ff5&amp;gt;] osd_fid_lookup+0x455/0x1d60 [osd_ldiskfs]
[ 8058.700594]  [&amp;lt;ffffffffc1114961&amp;gt;] osd_object_init+0x61/0x110 [osd_ldiskfs]
[ 8058.701927]  [&amp;lt;ffffffffc0bdbafb&amp;gt;] lu_object_start.isra.31+0x8b/0x120 [obdclass]
[ 8058.703596]  [&amp;lt;ffffffffc0bdfba2&amp;gt;] lu_object_find_at+0x1b2/0x980 [obdclass]
[ 8058.704808]  [&amp;lt;ffffffffc0be0fcd&amp;gt;] dt_locate_at+0x1d/0xb0 [obdclass]
[ 8058.705983]  [&amp;lt;ffffffffc0ba2c4e&amp;gt;] llog_osd_open+0x50e/0xf30 [obdclass]
[ 8058.707231]  [&amp;lt;ffffffffc0b8f08f&amp;gt;] llog_open+0x25f/0x400 [obdclass]
[ 8058.708380]  [&amp;lt;ffffffffc0edb5b6&amp;gt;] llog_origin_handle_read_header+0x1b6/0x630 [ptlrpc]
[ 8058.710027]  [&amp;lt;ffffffffc0f25ca3&amp;gt;] tgt_llog_read_header+0x33/0xe0 [ptlrpc]
[ 8058.711367]  [&amp;lt;ffffffffc0f2f68a&amp;gt;] tgt_request_handle+0x95a/0x1610 [ptlrpc]
[ 8058.712594]  [&amp;lt;ffffffffc0ed1066&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[ 8058.714032]  [&amp;lt;ffffffffc0ed5464&amp;gt;] ptlrpc_main+0xbb4/0x1550 [ptlrpc]
[ 8058.715207]  [&amp;lt;ffffffffa32c61f1&amp;gt;] kthread+0xd1/0xe0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="267055" author="gerrit" created="Tue, 7 Apr 2020 17:19:20 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/37615/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37615/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10395&quot; title=&quot;ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10395&quot;&gt;&lt;del&gt;LU-10395&lt;/del&gt;&lt;/a&gt; osd: stop OI at device shutdown&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 2789978e1192dbf6d90399c96b5594e0dc049cd9&lt;/p&gt;</comment>
                            <comment id="267056" author="gerrit" created="Tue, 7 Apr 2020 17:20:55 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/38153&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/38153&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10395&quot; title=&quot;ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10395&quot;&gt;&lt;del&gt;LU-10395&lt;/del&gt;&lt;/a&gt; osd: stop OI at device shutdown&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a7e345f49985b29d1d6f45a6065af56340102470&lt;/p&gt;</comment>
                            <comment id="267057" author="gerrit" created="Tue, 7 Apr 2020 17:21:06 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/37635/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37635/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10395&quot; title=&quot;ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10395&quot;&gt;&lt;del&gt;LU-10395&lt;/del&gt;&lt;/a&gt; tests: add test_280 sanity&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: f4eeadee5ba5d4ab9d04918d8d81d18907daa831&lt;/p&gt;</comment>
                            <comment id="267797" author="bzzz" created="Thu, 16 Apr 2020 08:07:49 +0000"  >&lt;p&gt;can we close the ticket now?&lt;/p&gt;</comment>
                            <comment id="275101" author="gerrit" created="Sat, 11 Jul 2020 07:27:56 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/38153/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/38153/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10395&quot; title=&quot;ASSERTION( osd-&amp;gt;od_oi_table != NULL &amp;amp;&amp;amp; osd-&amp;gt;od_oi_count &amp;gt;= 1 ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10395&quot;&gt;&lt;del&gt;LU-10395&lt;/del&gt;&lt;/a&gt; osd: stop OI at device shutdown&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: b27a323147d992b510fddcfbef8aaef508be7c87&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzpjj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>