<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:41:08 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11122] NULL pointer dereference in fld_local_lookup()</title>
                <link>https://jira.whamcloud.com/browse/LU-11122</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;MDS nodes were power cycled during hardware maintenance.  After they came back up, got below (some material redacted, see comments below for full console log contents):&lt;/p&gt;

&lt;p&gt;BUG: unable to handle kernel NULL pointer dereference at 0000000000000018&lt;br/&gt;
IP: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc0cdc392&amp;gt;&amp;#93;&lt;/span&gt; fld_local_lookup+0x52/0x270 &lt;span class=&quot;error&quot;&gt;&amp;#91;fld&amp;#93;&lt;/span&gt;&lt;br/&gt;
CPU: 17 PID: 180501 Comm: orph_cleanup_ls Kdump: loaded Tainted: P           OE  ------------   3.10.0-862.3.2.1chaos.ch6.x86_64 #1&lt;br/&gt;
Call Trace:&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc06e8f6c&amp;gt;&amp;#93;&lt;/span&gt; ? dmu_tx_hold_object_impl+0x6c/0xc0 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc109ff28&amp;gt;&amp;#93;&lt;/span&gt; osd_fld_lookup+0x48/0xd0 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc10a008a&amp;gt;&amp;#93;&lt;/span&gt; fid_is_on_ost+0xda/0x2f0 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc10a02e9&amp;gt;&amp;#93;&lt;/span&gt; osd_get_name_n_idx+0x49/0xd00 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc109902c&amp;gt;&amp;#93;&lt;/span&gt; ? osd_declare_attr_set+0x14c/0x730 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc0753b7e&amp;gt;&amp;#93;&lt;/span&gt; ? zap_lookup_by_dnode+0x2e/0x30 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1097510&amp;gt;&amp;#93;&lt;/span&gt; osd_declare_object_destroy+0xe0/0x3e0 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1139ffe&amp;gt;&amp;#93;&lt;/span&gt; lod_sub_object_declare_destroy+0xce/0x2d0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lod&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1129700&amp;gt;&amp;#93;&lt;/span&gt; lod_declare_object_destroy+0x170/0x4a0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lod&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1513689&amp;gt;&amp;#93;&lt;/span&gt; ? orph_declare_index_delete+0x179/0x460 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1513f66&amp;gt;&amp;#93;&lt;/span&gt; orph_key_test_and_del+0x5f6/0xd30 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc1514c57&amp;gt;&amp;#93;&lt;/span&gt; __mdd_orphan_cleanup+0x5b7/0x840 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc15146a0&amp;gt;&amp;#93;&lt;/span&gt; ? orph_key_test_and_del+0xd30/0xd30 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffbb2c05f1&amp;gt;&amp;#93;&lt;/span&gt; kthread+0xd1/0xe0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffbb2c0520&amp;gt;&amp;#93;&lt;/span&gt; ? insert_kthread_work+0x40/0x40&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffbb9438b7&amp;gt;&amp;#93;&lt;/span&gt; ret_from_fork_nospec_begin+0x21/0x21&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffbb2c0520&amp;gt;&amp;#93;&lt;/span&gt; ? insert_kthread_work+0x40/0x40&lt;/p&gt;</description>
                <environment>lustre-2.8.2_2.chaos-1.ch6.x86_64&lt;br/&gt;
4 MDTs used in DNE-1 fashion (remote dirs, no striped dirs)&lt;br/&gt;
RHEL 7.5</environment>
        <key id="52646">LU-11122</key>
            <summary>NULL pointer dereference in fld_local_lookup()</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="ofaaland">Olaf Faaland</reporter>
                        <labels>
                            <label>llnl</label>
                    </labels>
                <created>Thu, 5 Jul 2018 19:46:46 +0000</created>
                <updated>Wed, 8 Aug 2018 17:06:11 +0000</updated>
                            <resolved>Wed, 8 Aug 2018 17:05:56 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="229971" author="ofaaland" created="Thu, 5 Jul 2018 19:48:43 +0000"  >&lt;p&gt;This a production file system, it&apos;s currently down due to this issue.&lt;/p&gt;</comment>
                            <comment id="229972" author="ofaaland" created="Thu, 5 Jul 2018 19:49:28 +0000"  >&lt;p&gt;More complete console log contents:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[2507417.597913] Lustre: lsrza-MDT0003: Recovery over after 1:45, of 1011 clients 0 recovered and 1011 were evicted.
[2507417.634717] LustreError: 162832:0:(client.c:1164:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff9eaed88fef00 x1602545796916964/t0(0) o5-&amp;gt;lsrza-OST0003-osc-MDT0003@172.21.3.8@o2ib700:28/4 lens 432/432 e 0 to 0 dl 0 ref 2 fl Rpc:N/0/ffffffff rc 0/-1           
[2507417.634723] LustreError: 162879:0:(osp_precreate.c:900:osp_precreate_cleanup_orphans()) lsrza-OST0005-osc-MDT0003: cannot cleanup orphans: rc = -5
[2507417.634725] LustreError: 162963:0:(osp_precreate.c:900:osp_precreate_cleanup_orphans()) lsrza-OST0007-osc-MDT0003: cannot cleanup orphans: rc = -5
[2507417.634726] LustreError: 162879:0:(osp_precreate.c:900:osp_precreate_cleanup_orphans()) Skipped 1 previous similar message
[2507417.634727] LustreError: 162963:0:(osp_precreate.c:900:osp_precreate_cleanup_orphans()) Skipped 1 previous similar message
[2507417.724392] LustreError: 162832:0:(client.c:1164:ptlrpc_import_delay_req()) Skipped 926 previous similar messages
[2507417.726598] BUG: unable to handle kernel NULL pointer dereference at 0000000000000018
[2507417.726603] IP: [&amp;lt;ffffffffc0cdc392&amp;gt;] fld_local_lookup+0x52/0x270 [fld]
[2507417.726604] PGD 0
[2507417.726605] Oops: 0000 [#1] SMP
&amp;lt;&amp;lt;Long line listing Modules linked in redacted&amp;gt;&amp;gt;
[2507417.726650] CPU: 17 PID: 180501 Comm: orph_cleanup_ls Kdump: loaded Tainted: P           OE  ------------   3.10.0-862.3.2.1chaos.ch6.x86_64 #1
[2507417.726651] Hardware name: Intel Corporation S2600WTTR/S2600WTTR, BIOS SE5C610.86B.01.01.0019.101220160604 10/12/2016
[2507417.726651] task: ffff9eacbe858000 ti: ffff9ea134cdc000 task.ti: ffff9ea134cdc000
[2507417.726655] RIP: 0010:[&amp;lt;ffffffffc0cdc392&amp;gt;]  [&amp;lt;ffffffffc0cdc392&amp;gt;] fld_local_lookup+0x52/0x270 [fld]
[2507417.726655] RSP: 0018:ffff9ea134cdfc00  EFLAGS: 00010286
[2507417.726656] RAX: ffff9e7059e223c0 RBX: ffff9e7059e223c0 RCX: ffff9eaff5e9d430
[2507417.726657] RDX: ffff9e6e32fa4800 RSI: ffffffffc0ce4ec0 RDI: ffff9eb0007e9240
[2507417.726657] RBP: ffff9ea134cdfc38 R08: 00000002c000f1db R09: ffff9e32bfc07a00
[2507417.726658] R10: ffff9e32bfc07a00 R11: ffffffffc03d4208 R12: ffff9eaff5e9d430
[2507417.726658] R13: 0000000000000000 R14: 00000002c000f1e7 R15: ffff9eaf7318c000
[2507417.726659] FS:  0000000000000000(0000) GS:ffff9e70be840000(0000) knlGS:0000000000000000
[2507417.726660] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[2507417.726660] CR2: 0000000000000018 CR3: 0000003f7e378000 CR4: 00000000001607e0
[2507417.726661] Call Trace:
[2507417.726689]  [&amp;lt;ffffffffc06e8f6c&amp;gt;] ? dmu_tx_hold_object_impl+0x6c/0xc0 [zfs]
[2507417.726696]  [&amp;lt;ffffffffc109ff28&amp;gt;] osd_fld_lookup+0x48/0xd0 [osd_zfs]
[2507417.726700]  [&amp;lt;ffffffffc10a008a&amp;gt;] fid_is_on_ost+0xda/0x2f0 [osd_zfs]
[2507417.726703]  [&amp;lt;ffffffffc10a02e9&amp;gt;] osd_get_name_n_idx+0x49/0xd00 [osd_zfs]
[2507417.726706]  [&amp;lt;ffffffffc109902c&amp;gt;] ? osd_declare_attr_set+0x14c/0x730 [osd_zfs]
[2507417.726729]  [&amp;lt;ffffffffc0753b7e&amp;gt;] ? zap_lookup_by_dnode+0x2e/0x30 [zfs]
[2507417.726733]  [&amp;lt;ffffffffc1097510&amp;gt;] osd_declare_object_destroy+0xe0/0x3e0 [osd_zfs]
[2507417.726742]  [&amp;lt;ffffffffc1139ffe&amp;gt;] lod_sub_object_declare_destroy+0xce/0x2d0 [lod]
[2507417.726748]  [&amp;lt;ffffffffc1129700&amp;gt;] lod_declare_object_destroy+0x170/0x4a0 [lod]
[2507417.726756]  [&amp;lt;ffffffffc1513689&amp;gt;] ? orph_declare_index_delete+0x179/0x460 [mdd]
[2507417.726760]  [&amp;lt;ffffffffc1513f66&amp;gt;] orph_key_test_and_del+0x5f6/0xd30 [mdd]
[2507417.726776]  [&amp;lt;ffffffffc1514c57&amp;gt;] __mdd_orphan_cleanup+0x5b7/0x840 [mdd]
[2507417.726780]  [&amp;lt;ffffffffc15146a0&amp;gt;] ? orph_key_test_and_del+0xd30/0xd30 [mdd]
[2507417.726782]  [&amp;lt;ffffffffbb2c05f1&amp;gt;] kthread+0xd1/0xe0
[2507417.726783]  [&amp;lt;ffffffffbb2c0520&amp;gt;] ? insert_kthread_work+0x40/0x40
[2507417.726786]  [&amp;lt;ffffffffbb9438b7&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[2507417.726786]  [&amp;lt;ffffffffbb2c0520&amp;gt;] ? insert_kthread_work+0x40/0x40
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="229973" author="ofaaland" created="Thu, 5 Jul 2018 19:50:22 +0000"  >&lt;p&gt;Top of stack:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[2507417.726696] [&amp;lt;ffffffffc109ff28&amp;gt;] osd_fld_lookup+0x48/0xd0 [osd_zfs]
[2507417.726700] [&amp;lt;ffffffffc10a008a&amp;gt;] fid_is_on_ost+0xda/0x2f0 [osd_zfs]
[2507417.726703] [&amp;lt;ffffffffc10a02e9&amp;gt;] osd_get_name_n_idx+0x49/0xd00 [osd_zfs]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And point of NULL ptr dereference:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[2507417.726655] RIP: 0010:[&amp;lt;ffffffffc0cdc392&amp;gt;] [&amp;lt;ffffffffc0cdc392&amp;gt;] fld_local_lookup+0x52/0x270 [fld]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;(gdb) l *(fld_local_lookup+0x52)
0x53c2 is in fld_local_lookup (/usr/src/debug/lustre-2.8.2_2.chaos/lustre/fld/fld_handler.c:218).
213 info = lu_context_key_get(&amp;amp;env-&amp;gt;le_ctx, &amp;amp;fld_thread_key);
214 LASSERT(info != NULL);
215 erange = &amp;amp;info-&amp;gt;fti_lrange;
216
217 /* Lookup it in the cache. */
218 rc = fld_cache_lookup(fld-&amp;gt;lsf_cache, seq, erange);
219 if (rc == 0) {
220 if (unlikely(fld_range_type(erange) != fld_range_type(range) &amp;amp;&amp;amp;
221 !fld_range_is_any(range))) {
222 CERROR(&quot;%s: FLD cache range &quot;DRANGE&quot; does not match&quot;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Variable info cannot be NULL, because it is local, not shared, and the ASSERT verified it was not NULL after it was last assigned. So it seems like line 218 really is where the NULL ptr deref is occurring:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;218 rc = fld_cache_lookup(fld-&amp;gt;lsf_cache, seq, erange);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;fld was passed in and we have not check it for NULL, and that&apos;s the only pointer used. So it seems fld must be NULL.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="229974" author="ofaaland" created="Thu, 5 Jul 2018 19:53:08 +0000"  >&lt;p&gt;For our code, including the tags we build from, see the lustre-release-fe-llnl project in gerrit.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="229975" author="ofaaland" created="Thu, 5 Jul 2018 19:54:29 +0000"  >&lt;p&gt;Is there a way we can tell the MDT to skip orphan cleanup once, so it forgets about these orphaned objects?&#160; We don&apos;t really care if some&#160;&lt;b&gt;unused&lt;/b&gt; objects are orphaned.&lt;/p&gt;</comment>
                            <comment id="229978" author="jhammond" created="Thu, 5 Jul 2018 20:18:59 +0000"  >&lt;p&gt;&amp;gt; Is there a way we can tell the MDT to skip orphan cleanup once, so it forgets about these orphaned objects?  We don&apos;t really care if some unused objects are orphaned.&lt;/p&gt;

&lt;p&gt;You could mount the backing FS and remove the files from the PENDING directory.&lt;/p&gt;</comment>
                            <comment id="229979" author="wcjohnso" created="Thu, 5 Jul 2018 20:19:02 +0000"  >&lt;p&gt;Hi &lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=ofaaland&quot; class=&quot;user-hover&quot; rel=&quot;ofaaland&quot;&gt;ofaaland&lt;/a&gt;,&lt;/p&gt;

&lt;p&gt;Is this a Lustre only system or do you also have IML installed?&lt;/p&gt;

&lt;p&gt;Regards,&lt;/p&gt;

&lt;p&gt;Will&lt;/p&gt;</comment>
                            <comment id="229980" author="ofaaland" created="Thu, 5 Jul 2018 20:20:23 +0000"  >&lt;p&gt;Lustre only, no IML.&lt;/p&gt;</comment>
                            <comment id="229981" author="ofaaland" created="Thu, 5 Jul 2018 20:24:18 +0000"  >&lt;p&gt;The file system is back up.&#160; This is no longer an emergency.&lt;/p&gt;

&lt;p&gt;In the course of rebooting the nodes and mounting manually to get more information, the messages about orphan cleanup failing stopped appearing after recovery.&#160; It&apos;s been about 6 minutes now since the MDTs completed recovery.&lt;/p&gt;</comment>
                            <comment id="229982" author="ofaaland" created="Thu, 5 Jul 2018 20:25:03 +0000"  >&lt;p&gt;John, thanks for the suggestion.&#160; I&apos;ll try it if this comes up again.&lt;/p&gt;</comment>
                            <comment id="229983" author="ofaaland" created="Thu, 5 Jul 2018 20:26:17 +0000"  >&lt;p&gt;Thank you for responding so quickly.&lt;/p&gt;</comment>
                            <comment id="229985" author="pjones" created="Thu, 5 Jul 2018 20:29:53 +0000"  >&lt;p&gt;No problem. We&apos;ll discuss tomorrow whether further work could be useful to be able to avoid this kind of scenario.&lt;/p&gt;</comment>
                            <comment id="230019" author="pjones" created="Fri, 6 Jul 2018 17:39:08 +0000"  >&lt;p&gt;Alex&lt;/p&gt;

&lt;p&gt;What do you recommend here?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="230156" author="bzzz" created="Wed, 11 Jul 2018 06:42:48 +0000"  >&lt;p&gt;was it possible that umount was initiated at that time? I see that&#160;2.8.2_2.chaos stops orphan thread in mdd_shutdown() (LCFG_CLEANUP path) while master branch stops orphan thread in the preceding phase (LCFG_PRE_CLEANUP path)&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="230158" author="bzzz" created="Wed, 11 Jul 2018 07:09:20 +0000"  >&lt;p&gt;in&#160; 2.8.2_2.chaos the sequence at umount will be:&lt;/p&gt;

&lt;p&gt;mdt_fini() &lt;del&gt;&amp;gt;&#160;mdt_fld_fini() (&#160;ss&lt;/del&gt;&amp;gt;ss_server_fld = NULL ) -&amp;gt;&#160;mdt_stack_fini() -&amp;gt;&#160;ldo_process_config*LCFG_CLEANUP) -&amp;gt;&#160;mdd_device_shutdown() -&amp;gt;&#160;mdd_generic_thread_stop(mdd_orph_cleanup_thread)&lt;/p&gt;

&lt;p&gt;IOW, ss_server_fld can be reset to NULL while the orphan thread is still running&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="230165" author="bzzz" created="Wed, 11 Jul 2018 12:37:13 +0000"  >&lt;p&gt;please check &lt;a href=&quot;http://review.whamcloud.com/23029&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/23029&lt;/a&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="230207" author="ofaaland" created="Thu, 12 Jul 2018 04:08:51 +0000"  >&lt;p&gt;You&apos;re right, this was in the context of a umount.&#160; That all seems to fit.&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="231060" author="pjones" created="Mon, 30 Jul 2018 13:14:25 +0000"  >&lt;p&gt;Is any further action needed here or can this ticket be marked as a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7206&quot; title=&quot;LBUG osp_sync.c:1541:osp_sync_id_traction_fini()) ASSERTION( list_empty(&amp;amp;tr-&amp;gt;otr_wakeup_list) ) failed: &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7206&quot;&gt;&lt;del&gt;LU-7206&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</comment>
                            <comment id="231553" author="ofaaland" created="Mon, 6 Aug 2018 23:51:12 +0000"  >&lt;p&gt;Peter, can you backport this to 2.8fe?&#160; This way it gets some automated sanity tests run against it, which we aren&apos;t really set up to do.&lt;/p&gt;</comment>
                            <comment id="231645" author="ofaaland" created="Wed, 8 Aug 2018 17:03:43 +0000"  >&lt;p&gt;Backport reviewed and passed testing.&lt;/p&gt;</comment>
                            <comment id="231646" author="ofaaland" created="Wed, 8 Aug 2018 17:05:01 +0000"  >&lt;p&gt;Dupe of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7206&quot; title=&quot;LBUG osp_sync.c:1541:osp_sync_id_traction_fini()) ASSERTION( list_empty(&amp;amp;tr-&amp;gt;otr_wakeup_list) ) failed: &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7206&quot;&gt;&lt;del&gt;LU-7206&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="231647" author="ofaaland" created="Wed, 8 Aug 2018 17:05:56 +0000"  >&lt;p&gt;Dupe of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7206&quot; title=&quot;LBUG osp_sync.c:1541:osp_sync_id_traction_fini()) ASSERTION( list_empty(&amp;amp;tr-&amp;gt;otr_wakeup_list) ) failed: &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7206&quot;&gt;&lt;del&gt;LU-7206&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzysv:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10020"><![CDATA[1]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>