<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:33:09 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10224] recovery-small test_57: timeout</title>
                <link>https://jira.whamcloud.com/browse/LU-10224</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for liuying &amp;lt;emoly.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d938f700-c59c-11e7-9c63-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d938f700-c59c-11e7-9c63-52540065bddc&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_57 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Timeout occurred after 630 mins, last suite running was recovery-small, restarting cluster to continue tests
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[14587.215937] general protection fault: 0000 [#1] SMP 
[14587.226037] CPU: 0 PID: 4274 Comm: lctl Tainted: G           OE  ------------   3.10.0-693.5.2.el7.x86_64 #1
[14587.227003] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[14587.228344] RIP: 0010:[&amp;lt;ffffffffc0742fa1&amp;gt;]  [&amp;lt;ffffffffc0742fa1&amp;gt;] lprocfs_stats_collect+0xc1/0x140 [obdclass]
[14587.239740] Call Trace:
[14587.240004]  [&amp;lt;ffffffffc0743b87&amp;gt;] lprocfs_stats_seq_show+0x47/0x140 [obdclass]
[14587.240754]  [&amp;lt;ffffffff81226550&amp;gt;] seq_read+0x250/0x3b0
[14587.241283]  [&amp;lt;ffffffff812702cd&amp;gt;] proc_reg_read+0x3d/0x80
[14587.241831]  [&amp;lt;ffffffff81200b1c&amp;gt;] vfs_read+0x9c/0x170
[14587.242347]  [&amp;lt;ffffffff812019df&amp;gt;] SyS_read+0x7f/0xe0
[14587.242854]  [&amp;lt;ffffffff816b5089&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Please provide additional information about the failure here.&lt;/p&gt;

&lt;p&gt;Info required for matching: recovery-small 57&lt;/p&gt;</description>
                <environment></environment>
        <key id="49232">LU-10224</key>
            <summary>recovery-small test_57: timeout</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bfaccini">Bruno Faccini</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Fri, 10 Nov 2017 02:33:16 +0000</created>
                <updated>Thu, 3 May 2018 19:17:35 +0000</updated>
                            <resolved>Mon, 11 Dec 2017 13:40:56 +0000</resolved>
                                                    <fixVersion>Lustre 2.11.0</fixVersion>
                    <fixVersion>Lustre 2.10.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="213318" author="sguminsx" created="Fri, 10 Nov 2017 12:23:47 +0000"  >&lt;p&gt;I believe I also hit this same problem on master:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b286f820-c5a4-11e7-a066-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b286f820-c5a4-11e7-a066-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="213506" author="adilger" created="Sun, 12 Nov 2017 05:48:00 +0000"  >
&lt;p&gt;Found this in &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/5a997ade-c66c-11e7-9c63-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/5a997ade-c66c-11e7-9c63-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;recovery-small test 57: read procfs entries causes kernel crash =================================== 20:33:25 (1510346005)&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.603958&amp;#93;&lt;/span&gt; Lustre: DEBUG MARKER: grep -c /mnt/lustre&apos; &apos; /proc/mounts&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.610675&amp;#93;&lt;/span&gt; Lustre: DEBUG MARKER: lsof -t /mnt/lustre&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.689718&amp;#93;&lt;/span&gt; Lustre: DEBUG MARKER: umount /mnt/lustre 2&amp;gt;&amp;amp;1&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.713815&amp;#93;&lt;/span&gt; general protection fault: 0000 &lt;a href=&quot;#1&quot; target=&quot;_blank&quot; rel=&quot;noopener&quot;&gt;1&lt;/a&gt; SMP&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.723227&amp;#93;&lt;/span&gt; CPU: 1 PID: 1225 Comm: lctl&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.736098&amp;#93;&lt;/span&gt; Call Trace:&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.736355&amp;#93;&lt;/span&gt;  &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffc0745627&amp;gt;&amp;#93;&lt;/span&gt; lprocfs_stats_seq_show+0x47/0x140 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.737040&amp;#93;&lt;/span&gt;  &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81226550&amp;gt;&amp;#93;&lt;/span&gt; seq_read+0x250/0x3b0&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.737536&amp;#93;&lt;/span&gt;  &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff812702cd&amp;gt;&amp;#93;&lt;/span&gt; proc_reg_read+0x3d/0x80&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.738041&amp;#93;&lt;/span&gt;  &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81200b1c&amp;gt;&amp;#93;&lt;/span&gt; vfs_read+0x9c/0x170&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.738527&amp;#93;&lt;/span&gt;  &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff812019df&amp;gt;&amp;#93;&lt;/span&gt; SyS_read+0x7f/0xe0&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;19211.739000&amp;#93;&lt;/span&gt;  &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff816b5089&amp;gt;&amp;#93;&lt;/span&gt; system_call_fastpath+0x16/0x1b&lt;/p&gt;
{nodormat}</comment>
                            <comment id="213734" author="yong.fan" created="Wed, 15 Nov 2017 07:30:16 +0000"  >&lt;p&gt;+1 on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6e70c5fa-c9d5-11e7-a066-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6e70c5fa-c9d5-11e7-a066-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="214119" author="bfaccini" created="Mon, 20 Nov 2017 12:47:28 +0000"  >&lt;p&gt;+1 on master at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/065827f2-cc1d-11e7-9c63-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/065827f2-cc1d-11e7-9c63-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="214425" author="bfaccini" created="Wed, 22 Nov 2017 12:38:47 +0000"  >&lt;p&gt;After having a look to an associated crash-dump, my guess is that there is a possible race between umount process and concurrent &quot;lctl get_param osc.&amp;lt;OSC&amp;gt;.stats&quot;, what recovery-small/test_57 is intended to detect.&lt;/p&gt;

&lt;p&gt;This seems to occur due to osc_precleanup() calling ptlrpc_lprocfs_unregister_obd() before lprocfs_obd_cleanup(), allowing the obd&amp;#45;&amp;gt;obd_svc_stats/lprocfs_stats struct to be freed before the associated proc file and internal structs concurrent users are done and thus can be cleaned. Looks like a revival of LU&amp;#45;106!&lt;/p&gt;

&lt;p&gt;I wonder if this can just be fixed by changing ptlrpc_lprocfs_unregister_obd() and lprocfs_obd_cleanup() order in osc_precleanup() (and may be also in other places where both routines are used the same way/order, like in mdc_precleanup()/lwp_device_fini()/osp_init0()/osp_device_fini()).&lt;/p&gt;</comment>
                            <comment id="214508" author="bfaccini" created="Thu, 23 Nov 2017 09:48:48 +0000"  >&lt;p&gt;Stacks showing the umount/lctl racy scenario in crash-dump :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;PID: 25606  TASK: ffff88007b579fa0  CPU: 1   COMMAND: &quot;lctl&quot;
bt: seek error: kernel virtual address: ffffffffffffffff  type: &quot;cpu_online_map&quot;
 #0 [ffff880069293b50] machine_kexec at ffffffff8105c4cb
 #1 [ffff880069293bb0] __crash_kexec at ffffffff81104a42
 #2 [ffff880069293c80] crash_kexec at ffffffff81104b30
 #3 [ffff880069293c98] oops_end at ffffffff816ad338
 #4 [ffff880069293cc0] die at ffffffff8102e97b
 #5 [ffff880069293cf0] do_general_protection at ffffffff816accbe
 #6 [ffff880069293d20] general_protection at ffffffff816ac568
    [exception RIP: lprocfs_stats_collect+193]
    RIP: ffffffffc0720a41  RSP: ffff880069293dd8  RFLAGS: 00010246
    RAX: 0000000000007c15  RBX: ffff880069293e20  RCX: 0000000000000006
    RDX: 6f2d313030305953  RSI: 6f2d313030305a53  RDI: 0000000000000006
    RBP: ffff880069293e00   R8: 0000000000000500   R9: 0000000000000100
    R10: 0000000000000000  R11: ffff880069293ca6  R12: ffff88007c15a2c0
    R13: 0000000000000020  R14: ffff880079723000  R15: ffff880068d94d00
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
 #7 [ffff880069293e08] lprocfs_stats_seq_show at ffffffffc0721627 [obdclass]
 #8 [ffff880069293e78] seq_read at ffffffff81226550
 #9 [ffff880069293ee8] proc_reg_poll at ffffffff812702cd  &amp;lt;&amp;lt;&amp;lt;&amp;lt; or proc_reg_read() ???????
#10 [ffff880069293f08] vfs_read at ffffffff81200b1c
#11 [ffff880069293f38] sys_read at ffffffff812019df
#12 [ffff880069293f80] system_call_fastpath at ffffffff816b5089
    RIP: 00007f3b2efdc6f0  RSP: 00007fff00fef468  RFLAGS: 00000246
    RAX: 0000000000000000  RBX: ffffffff816b5089  RCX: ffffffffffffffff
    RDX: 0000000000001000  RSI: 0000000001d74250  RDI: 0000000000000003
    RBP: 0000000001d7425a   R8: 0000000001d72430   R9: 0000000000001000
    R10: 0000000000000063  R11: 0000000000000246  R12: 00007fff00ff0610
    R13: 0000000001d74250  R14: 0000000000000001  R15: 0000000001d72410
    ORIG_RAX: 0000000000000000  CS: 0033  SS: 002b

PID: 25600  TASK: ffff88007ad72f70  CPU: 0   COMMAND: &quot;umount&quot;
 #0 [ffff880068ceb810] __schedule at ffffffff816a9005
 #1 [ffff880068ceb878] schedule at ffffffff816a9589
 #2 [ffff880068ceb888] schedule_timeout at ffffffff816a7099
 #3 [ffff880068ceb930] wait_for_completion at ffffffff816a993d
 #4 [ffff880068ceb990] proc_entry_rundown at ffffffff81270885
 #5 [ffff880068ceb9e0] remove_proc_subtree at ffffffff81276e52
 #6 [ffff880068ceba30] proc_remove at ffffffff81276efe
 #7 [ffff880068ceba40] lprocfs_obd_cleanup at ffffffffc071baff [obdclass]
 #8 [ffff880068ceba58] osc_precleanup at ffffffffc0ad8cd7 [osc]
 #9 [ffff880068ceba80] class_cleanup at ffffffffc072f1e2 [obdclass]
#10 [ffff880068cebaf8] class_process_config at ffffffffc0731b26 [obdclass]
#11 [ffff880068cebba8] class_manual_cleanup at ffffffffc0732746 [obdclass]
#12 [ffff880068cebc50] lov_putref at ffffffffc0b43a22 [lov]
#13 [ffff880068cebcb0] lov_disconnect at ffffffffc0b4ae02 [lov]
#14 [ffff880068cebcd8] obd_disconnect at ffffffffc0bc24f3 [lustre]
#15 [ffff880068cebcf8] ll_put_super at ffffffffc0bc5990 [lustre]
#16 [ffff880068cebe38] generic_shutdown_super at ffffffff81203692
#17 [ffff880068cebe60] kill_anon_super at ffffffff81203a62
#18 [ffff880068cebe78] lustre_kill_super at ffffffffc0734eb5 [obdclass]
#19 [ffff880068cebe90] deactivate_locked_super at ffffffff81203e19
#20 [ffff880068cebeb0] deactivate_super at ffffffff81204586
#21 [ffff880068cebec8] cleanup_mnt at ffffffff812217bf
#22 [ffff880068cebee0] __cleanup_mnt at ffffffff81221852
#23 [ffff880068cebef0] task_work_run at ffffffff810ad275
#24 [ffff880068cebf30] do_notify_resume at ffffffff8102ab62
#25 [ffff880068cebf50] int_signal at ffffffff816b533d
    RIP: 00007f403a9cc3e7  RSP: 00007ffc54bf3958  RFLAGS: 00000246
    RAX: 0000000000000000  RBX: 0000556135a43040  RCX: ffffffffffffffff
    RDX: 0000000000000001  RSI: 0000000000000000  RDI: 0000556135a445a0
    RBP: 0000556135a445a0   R8: 0000000000000008   R9: 000000000000000c
    R10: 00007ffc54bf3670  R11: 0000000000000246  R12: 00007f403b542d58
    R13: 0000000000000000  R14: 0000556135a43210  R15: 0000556135a43040
    ORIG_RAX: 00000000000000a6  CS: 0033  SS: 002b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="214517" author="adilger" created="Thu, 23 Nov 2017 16:19:19 +0000"  >&lt;p&gt;Bruno, thanks for looking into this. &lt;/p&gt;

&lt;p&gt;We used to have proper protection of the /process entries, but I guess that that was lost during the transition to sysfs or similar. Is there anything that landed just before this bug started appearing that might be the cause?&lt;/p&gt;</comment>
                            <comment id="214518" author="bfaccini" created="Thu, 23 Nov 2017 16:50:11 +0000"  >&lt;p&gt;Andreas, I believe that, according to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5378&quot; title=&quot;recovery-small test 57 crash on invalid spinlock.&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5378&quot;&gt;&lt;del&gt;LU-5378&lt;/del&gt;&lt;/a&gt;, and the history of Maloo test results, it looks like this is a very old issue but not enough frequent to have been addressed until now.&lt;/p&gt;

&lt;p&gt;Also, during the testing of my fix proposal as a first try for osc lprocfs, I am unable to reproduce the crash with a heavier reproducer than recovery-small/test_57, on a single-node setup :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# while true ; do umount /mnt/lustre ; mount -t lustre -o rw,flock,user_xattr,lazystatfs 10.2.5.126@tcp:/lustre /mnt/lustre ; done &amp;amp;
# while true ; do lctl get_param osc.*-f*.stats ; done &amp;amp;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;But I am still able to get the same crash by stressing the mdc lprocfs with this reproducer :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# while true ; do umount /mnt/lustre ; mount -t lustre -o rw,flock,user_xattr,lazystatfs 10.2.5.126@tcp:/lustre /mnt/lustre ; done &amp;amp;
# while true ; do lctl get_param mdc.*.stats ; done &amp;amp;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;and when stressing the osp lprocfs with the same kind of reproducer :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# while true ; do umount /mnt/lustre-mds1 ; mount -t lustre -o loop /tmp/lustre-mdt1 /mnt/lustre-mds1 ; done &amp;amp;
# while true ; do lctl get_param osp.*.stats ; done &amp;amp;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So seems that as I suspected before, my fix idea looks valid and also may need to be applied for all the obd_device types I have already identified and listed.&lt;/p&gt;

&lt;p&gt;Will try to push a full patch soon now.&lt;/p&gt;</comment>
                            <comment id="214611" author="gerrit" created="Fri, 24 Nov 2017 16:45:26 +0000"  >&lt;p&gt;Faccini Bruno (bruno.faccini@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30249&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30249&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10224&quot; title=&quot;recovery-small test_57: timeout&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10224&quot;&gt;&lt;del&gt;LU-10224&lt;/del&gt;&lt;/a&gt; obd: free obd_svc_stats when all users are gone&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 1d4fc1f20bd701edb45ff3e2a26b734f7ba08e37&lt;/p&gt;</comment>
                            <comment id="215892" author="gerrit" created="Mon, 11 Dec 2017 03:45:44 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30249/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30249/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10224&quot; title=&quot;recovery-small test_57: timeout&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10224&quot;&gt;&lt;del&gt;LU-10224&lt;/del&gt;&lt;/a&gt; obd: free obd_svc_stats when all users are gone&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: ffc843a0aacd78495b1cff51344aaee3e32fc2de&lt;/p&gt;</comment>
                            <comment id="215917" author="pjones" created="Mon, 11 Dec 2017 13:40:56 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                            <comment id="220327" author="gerrit" created="Wed, 7 Feb 2018 16:45:38 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31197&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31197&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10224&quot; title=&quot;recovery-small test_57: timeout&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10224&quot;&gt;&lt;del&gt;LU-10224&lt;/del&gt;&lt;/a&gt; obd: free obd_svc_stats when all users are gone&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 758bb9b569c14f30c0257fbb7f4f675b6f003040&lt;/p&gt;</comment>
                            <comment id="222010" author="bogl" created="Thu, 1 Mar 2018 03:31:32 +0000"  >&lt;p&gt;another on b2_10:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/91b464c2-1ce1-11e8-a10a-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/91b464c2-1ce1-11e8-a10a-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="227213" author="gerrit" created="Thu, 3 May 2018 18:16:28 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/31197/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31197/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10224&quot; title=&quot;recovery-small test_57: timeout&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10224&quot;&gt;&lt;del&gt;LU-10224&lt;/del&gt;&lt;/a&gt; obd: free obd_svc_stats when all users are gone&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: d9c595b530c5bc5f112698151bd1e9f4d74080cb&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="25663">LU-5378</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="49674">LU-10362</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzznlr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>