<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:21:39 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2015] Test failure on test suite obdfilter-survey, subtest test_3a</title>
                <link>https://jira.whamcloud.com/browse/LU-2015</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Li Wei &amp;lt;liwei@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6dfc3fae-049c-11e2-bfd4-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6dfc3fae-049c-11e2-bfd4-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_3a failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;test failed to respond and timed out&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;From the OSS console:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;06:39:39:Lustre: DEBUG MARKER: == obdfilter-survey test 3a: Network survey ========================================================== 06:39:38 (1348234778)
06:40:00:Lustre: DEBUG MARKER: grep -c /mnt/ost1&apos; &apos; /proc/mounts
06:40:00:Lustre: DEBUG MARKER: umount -d -f /mnt/ost1
06:40:01:Lustre: 3533:0:(client.c:1905:ptlrpc_expire_one_request()) @@@ Request  sent has timed out for slow reply: [sent 1348234793/real 1348234793]  req@ffff88003d984800 x1413717737622294/t0(0) o400-&amp;gt;MGC10.10.4.186@tcp@10.10.4.186@tcp:26/25 lens 224/224 e 0 to 1 dl 1348234800 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
06:40:01:Lustre: 3533:0:(client.c:1905:ptlrpc_expire_one_request()) Skipped 1 previous similar message
06:40:01:LustreError: 166-1: MGC10.10.4.186@tcp: Connection to MGS (at 10.10.4.186@tcp) was lost; in progress operations using this service will fail
06:40:24:Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 8 seconds. The obd refcount = 4. Is it stuck?
06:40:36:Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 16 seconds. The obd refcount = 4. Is it stuck?
06:41:07:Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 32 seconds. The obd refcount = 4. Is it stuck?
06:41:07:Lustre: 3532:0:(client.c:1905:ptlrpc_expire_one_request()) @@@ Request  sent has timed out for slow reply: [sent 1348234845/real 1348234845]  req@ffff88003d984800 x1413717737622299/t0(0) o250-&amp;gt;MGC10.10.4.186@tcp@10.10.4.186@tcp:26/25 lens 400/544 e 0 to 1 dl 1348234866 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
06:41:07:Lustre: 3532:0:(client.c:1905:ptlrpc_expire_one_request()) Skipped 3 previous similar messages
06:42:09:Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 64 seconds. The obd refcount = 4. Is it stuck?
06:43:42:Lustre: 3532:0:(client.c:1905:ptlrpc_expire_one_request()) @@@ Request  sent has timed out for slow reply: [sent 1348234990/real 1348234990]  req@ffff88003d984800 x1413717737622304/t0(0) o250-&amp;gt;MGC10.10.4.186@tcp@10.10.4.186@tcp:26/25 lens 400/544 e 0 to 1 dl 1348235015 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
06:43:42:Lustre: 3532:0:(client.c:1905:ptlrpc_expire_one_request()) Skipped 4 previous similar messages
06:44:24:Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 128 seconds. The obd refcount = 4. Is it stuck?
06:47:58:INFO: task umount:13712 blocked for more than 120 seconds.
06:47:58:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
06:47:58:umount        D 0000000000000000     0 13712  13711 0x00000080
06:47:58: ffff880038249ac8 0000000000000086 ffffffff00000010 ffff880038249a78
06:47:58: ffff880038249a38 0000000000000286 ffffffffa05f0ff0 ffff880052692d54
06:47:58: ffff8800383245f8 ffff880038249fd8 000000000000fb88 ffff8800383245f8
06:47:58:Call Trace:
06:47:58: [&amp;lt;ffffffff814fea92&amp;gt;] schedule_timeout+0x192/0x2e0
06:47:58: [&amp;lt;ffffffff8107e120&amp;gt;] ? process_timeout+0x0/0x10
06:47:58: [&amp;lt;ffffffffa03a373d&amp;gt;] cfs_schedule_timeout_and_set_state+0x1d/0x20 [libcfs]
06:47:58: [&amp;lt;ffffffffa0538828&amp;gt;] obd_exports_barrier+0x98/0x180 [obdclass]
06:47:59: [&amp;lt;ffffffffa0bc6fb2&amp;gt;] ofd_device_fini+0x42/0x230 [ofd]
06:47:59: [&amp;lt;ffffffffa055ddc7&amp;gt;] class_cleanup+0x577/0xdc0 [obdclass]
06:47:59: [&amp;lt;ffffffffa053aa36&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
06:47:59: [&amp;lt;ffffffffa055f6b5&amp;gt;] class_process_config+0x10a5/0x1ca0 [obdclass]
06:47:59: [&amp;lt;ffffffffa03a3be0&amp;gt;] ? cfs_alloc+0x30/0x60 [libcfs]
06:47:59: [&amp;lt;ffffffffa0559043&amp;gt;] ? lustre_cfg_new+0x353/0x7e0 [obdclass]
06:47:59: [&amp;lt;ffffffffa0560427&amp;gt;] class_manual_cleanup+0x177/0x6f0 [obdclass]
06:47:59: [&amp;lt;ffffffffa053aa36&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
06:47:59: [&amp;lt;ffffffffa0569997&amp;gt;] server_put_super+0x5a7/0xcb0 [obdclass]
06:47:59: [&amp;lt;ffffffff8117d34b&amp;gt;] generic_shutdown_super+0x5b/0xe0
06:48:00: [&amp;lt;ffffffff8117d436&amp;gt;] kill_anon_super+0x16/0x60
06:48:00: [&amp;lt;ffffffffa0562066&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
06:48:00: [&amp;lt;ffffffff8117e4b0&amp;gt;] deactivate_super+0x70/0x90
06:48:00: [&amp;lt;ffffffff8119a4ff&amp;gt;] mntput_no_expire+0xbf/0x110
06:48:00: [&amp;lt;ffffffff8119af9b&amp;gt;] sys_umount+0x7b/0x3a0
06:48:00: [&amp;lt;ffffffff810d6b12&amp;gt;] ? audit_syscall_entry+0x272/0x2a0
06:48:00: [&amp;lt;ffffffff8100b0f2&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This was master with OFD and LDiskFS OSTs.&lt;/p&gt;

&lt;p&gt;Info required for matching: obdfilter-survey 3a&lt;/p&gt;</description>
                <environment>lustre-master build #1560 zfs</environment>
        <key id="16088">LU-2015</key>
            <summary>Test failure on test suite obdfilter-survey, subtest test_3a</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="mdiep">Minh Diep</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>zfs</label>
                    </labels>
                <created>Sun, 23 Sep 2012 22:13:40 +0000</created>
                <updated>Fri, 2 Aug 2013 15:52:49 +0000</updated>
                            <resolved>Fri, 2 Aug 2013 15:52:49 +0000</resolved>
                                    <version>Lustre 2.5.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>11</watches>
                                                                            <comments>
                            <comment id="46589" author="adilger" created="Mon, 15 Oct 2012 14:33:40 +0000"  >&lt;p&gt;I hit this same problem with shutting down the MGS during sanity-scrub.sh test_10b() on 2.3.53-27-g1a660e4 (one local patch beyond 773c66 &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2142&quot; title=&quot;&amp;quot;lctl lfsck_start&amp;quot; should start a scrub&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2142&quot;&gt;&lt;del&gt;LU-2142&lt;/del&gt;&lt;/a&gt; scrub: reset completed scrub position if retrigger&quot;), though I don&apos;t know if this has anything to do with the patch, even though it was touching the scrub code:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: MGS is waiting for obd_unlinked_exports more than 128 seconds. The obd refcount = 5. Is it stuck?
INFO: task umount:23147 blocked for more than 120 seconds.
&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
umount        D 0000000000000000     0 23147  23146 0x00000000
 ffff8800b22cbab8 0000000000000086 0000000000000000 ffff8800b22cba68
 ffff8800b22cba28 ffff88005ed8d000 ffffffffa13a73d1 0000000000000000
 ffff880029f685f8 ffff8800b22cbfd8 000000000000fb88 ffff880029f685f8
Call Trace:
 [&amp;lt;ffffffff814fea92&amp;gt;] schedule_timeout+0x192/0x2e0
 [&amp;lt;ffffffff8107e120&amp;gt;] ? process_timeout+0x0/0x10
 [&amp;lt;ffffffffa120673d&amp;gt;] cfs_schedule_timeout_and_set_state+0x1d/0x20 [libcfs]
 [&amp;lt;ffffffffa132a308&amp;gt;] obd_exports_barrier+0x98/0x180 [obdclass]
 [&amp;lt;ffffffffa0bde8fe&amp;gt;] mgs_device_fini+0xfe/0x5d0 [mgs]
 [&amp;lt;ffffffffa13583c7&amp;gt;] class_cleanup+0x577/0xdc0 [obdclass]
 [&amp;lt;ffffffffa132c516&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
 [&amp;lt;ffffffffa1359cb5&amp;gt;] class_process_config+0x10a5/0x1ca0 [obdclass]
 [&amp;lt;ffffffffa1206be0&amp;gt;] ? cfs_alloc+0x30/0x60 [libcfs]
 [&amp;lt;ffffffffa1353473&amp;gt;] ? lustre_cfg_new+0x353/0x7e0 [obdclass]
 [&amp;lt;ffffffffa135aa29&amp;gt;] class_manual_cleanup+0x179/0x6f0 [obdclass]
 [&amp;lt;ffffffffa132c516&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
 [&amp;lt;ffffffffa1367e5d&amp;gt;] server_put_super+0x4cd/0x12f0 [obdclass]
 [&amp;lt;ffffffff8117d34b&amp;gt;] generic_shutdown_super+0x5b/0xe0
 [&amp;lt;ffffffff8117d436&amp;gt;] kill_anon_super+0x16/0x60
 [&amp;lt;ffffffffa135c8a6&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
 [&amp;lt;ffffffff8117e4b0&amp;gt;] deactivate_super+0x70/0x90
 [&amp;lt;ffffffff8119a4ff&amp;gt;] mntput_no_expire+0xbf/0x110
 [&amp;lt;ffffffff8119af9b&amp;gt;] sys_umount+0x7b/0x3a0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="48566" author="sarah" created="Fri, 30 Nov 2012 01:52:14 +0000"  >&lt;p&gt;Hit the similar issue during interop testing with 2.3 server and luster-master client&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/2b02c4e6-3987-11e2-9fda-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/2b02c4e6-3987-11e2-9fda-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;01:52:18:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
01:52:18:umount        D 0000000000000000     0 18490  18489 0x00000080
01:52:18: ffff88002f81fac8 0000000000000086 ffffffff00000010 ffff88002f81fa78
01:52:18: ffff88002f81fa38 0000000000000286 ffffffffa067ba50 ffff880076f95348
01:52:18: ffff88003b093058 ffff88002f81ffd8 000000000000fb88 ffff88003b093058
01:52:18:Call Trace:
01:52:18: [&amp;lt;ffffffff814fea92&amp;gt;] schedule_timeout+0x192/0x2e0
01:52:18: [&amp;lt;ffffffff8107e120&amp;gt;] ? process_timeout+0x0/0x10
01:52:18: [&amp;lt;ffffffffa043673d&amp;gt;] cfs_schedule_timeout_and_set_state+0x1d/0x20 [libcfs]
01:52:18: [&amp;lt;ffffffffa05ca168&amp;gt;] obd_exports_barrier+0x98/0x180 [obdclass]
01:52:18: [&amp;lt;ffffffffa0c49692&amp;gt;] ofd_device_fini+0x42/0x240 [ofd]
01:52:18: [&amp;lt;ffffffffa05eb4b7&amp;gt;] class_cleanup+0x577/0xdc0 [obdclass]
01:52:18: [&amp;lt;ffffffffa05cc376&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
01:52:18: [&amp;lt;ffffffffa05ed4ab&amp;gt;] class_process_config+0x102b/0x1c30 [obdclass]
01:52:18: [&amp;lt;ffffffffa0436be0&amp;gt;] ? cfs_alloc+0x30/0x60 [libcfs]
01:52:18: [&amp;lt;ffffffffa05e6eb3&amp;gt;] ? lustre_cfg_new+0x353/0x7e0 [obdclass]
01:52:18: [&amp;lt;ffffffffa05ee229&amp;gt;] class_manual_cleanup+0x179/0x6f0 [obdclass]
01:52:18: [&amp;lt;ffffffffa05cc376&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
01:52:18: [&amp;lt;ffffffffa05f8439&amp;gt;] server_put_super+0x6f9/0xcf0 [obdclass]
01:52:18: [&amp;lt;ffffffff8117d34b&amp;gt;] generic_shutdown_super+0x5b/0xe0
01:52:18: [&amp;lt;ffffffff8117d436&amp;gt;] kill_anon_super+0x16/0x60
01:52:18: [&amp;lt;ffffffffa05efe66&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
01:52:18: [&amp;lt;ffffffff8117e4b0&amp;gt;] deactivate_super+0x70/0x90
01:52:18: [&amp;lt;ffffffff8119a4ff&amp;gt;] mntput_no_expire+0xbf/0x110
01:52:18: [&amp;lt;ffffffff8119af9b&amp;gt;] sys_umount+0x7b/0x3a0
01:52:18: [&amp;lt;ffffffff810d6b12&amp;gt;] ? audit_syscall_entry+0x272/0x2a0
01:52:18: [&amp;lt;ffffffff8100b0f2&amp;gt;] system_call_fastpath+0x16/0x1b
01:53:40:Lustre: lustre-OST0000 is waiting for obd_unlinked_exports more than 256 seconds. The obd refcount = 4. Is it stuck?
01:55:42:Lustre: 3436:0:(client.c:1917:ptlrpc_expire_one_request()) @@@ Request  sent has timed out for slow reply: [sent 1354096509/real 1354096509]  req@ffff880070cb7c00 x1419868288058186/t0(0) o250-&amp;gt;MGC10.10.4.154@tcp@10.10.4.154@tcp:26/25 lens 400/544 e 0 to 1 dl 1354096534 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
01:55:42:Lustre: 3436:0:(client.c:1917:ptlrpc_expire_one_request()) Skipped 8 previous similar messages
01:56:24:INFO: task umount:18490 blocked for more than 120 seconds.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="53633" author="adilger" created="Fri, 8 Mar 2013 17:57:31 +0000"  >&lt;p&gt;Hit this again in recovery-small.sh test_107&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/229fc768-82ed-11e2-8172-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/229fc768-82ed-11e2-8172-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;15:16:28:Lustre: 18053:0:(client.c:1866:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: &lt;span class=&quot;error&quot;&gt;&amp;#91;sent 1362179756/real 1362179756&amp;#93;&lt;/span&gt;  req@ffff88007c931c00 x1428344221543872/t0(0) o250-&amp;gt;MGC10.10.4.150@tcp@0@lo:26/25 lens 400/544 e 0 to 1 dl 1362179781 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1&lt;br/&gt;
15:16:30:Lustre: 18053:0:(client.c:1866:ptlrpc_expire_one_request()) Skipped 20 previous similar messages&lt;br/&gt;
15:17:13:LustreError: 137-5: UUID &apos;lustre-MDT0000_UUID&apos; is not available for connect (no target)&lt;br/&gt;
15:17:13:LustreError: Skipped 1081 previous similar messages&lt;br/&gt;
15:17:37:INFO: task umount:31658 blocked for more than 120 seconds.&lt;br/&gt;
15:17:37:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.&lt;br/&gt;
15:17:37:umount        D 0000000000000000     0 31658  31657 0x00000080&lt;br/&gt;
15:17:37: ffff8800545cfa98 0000000000000086 ffffffff00000010 ffff8800545cfa48&lt;br/&gt;
15:17:37: ffff8800545cfa08 ffff8800534ba000 ffffffffa106173e 0000000000000000&lt;br/&gt;
15:17:37: ffff88006483dab8 ffff8800545cffd8 000000000000fb88 ffff88006483dab8&lt;br/&gt;
15:17:37:Call Trace:&lt;br/&gt;
15:17:37: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff814ead12&amp;gt;&amp;#93;&lt;/span&gt; schedule_timeout+0x192/0x2e0&lt;br/&gt;
15:17:37: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8107cb50&amp;gt;&amp;#93;&lt;/span&gt; ? process_timeout+0x0/0x10&lt;br/&gt;
15:17:37: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0ef96bd&amp;gt;&amp;#93;&lt;/span&gt; cfs_schedule_timeout_and_set_state+0x1d/0x20 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:38: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0fe3a98&amp;gt;&amp;#93;&lt;/span&gt; obd_exports_barrier+0x98/0x170 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:38: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa058d946&amp;gt;&amp;#93;&lt;/span&gt; mgs_device_fini+0xf6/0x5a0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mgs&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:38: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa10109d7&amp;gt;&amp;#93;&lt;/span&gt; class_cleanup+0x577/0xda0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:38: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0fe5d36&amp;gt;&amp;#93;&lt;/span&gt; ? class_name2dev+0x56/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:38: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa10122bc&amp;gt;&amp;#93;&lt;/span&gt; class_process_config+0x10bc/0x1c80 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:38: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0f03df8&amp;gt;&amp;#93;&lt;/span&gt; ? libcfs_log_return+0x28/0x40 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:39: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa100bb21&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_cfg_new+0x391/0x7e0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:39: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1012ff9&amp;gt;&amp;#93;&lt;/span&gt; class_manual_cleanup+0x179/0x6f0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:41: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0fe5d36&amp;gt;&amp;#93;&lt;/span&gt; ? class_name2dev+0x56/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:42: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa101fa4d&amp;gt;&amp;#93;&lt;/span&gt; server_put_super+0x46d/0xf00 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:42: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811785ab&amp;gt;&amp;#93;&lt;/span&gt; generic_shutdown_super+0x5b/0xe0&lt;br/&gt;
15:17:42: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81178696&amp;gt;&amp;#93;&lt;/span&gt; kill_anon_super+0x16/0x60&lt;br/&gt;
15:17:42: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa1014e56&amp;gt;&amp;#93;&lt;/span&gt; lustre_kill_super+0x36/0x60 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
15:17:42: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81179670&amp;gt;&amp;#93;&lt;/span&gt; deactivate_super+0x70/0x90&lt;br/&gt;
15:17:43: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811955cf&amp;gt;&amp;#93;&lt;/span&gt; mntput_no_expire+0xbf/0x110&lt;br/&gt;
15:17:43: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81195f2b&amp;gt;&amp;#93;&lt;/span&gt; sys_umount+0x7b/0x3a0&lt;br/&gt;
15:17:43: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100b072&amp;gt;&amp;#93;&lt;/span&gt; system_call_fastpath+0x16/0x1b&lt;br/&gt;
15:22:13:Lustre: MGS is waiting for obd_unlinked_exports more than 1024 seconds. The obd refcount = 5. Is it stuck?&lt;/p&gt;&lt;/blockquote&gt;</comment>
                            <comment id="53657" author="green" created="Sun, 10 Mar 2013 04:08:10 +0000"  >&lt;p&gt;I am hitting this frequently on my test cluster (see &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2939&quot; title=&quot;Lustre: MGS is waiting for obd_unlinked_exports more than 256 seconds. The obd refcount = 5. Is it stuck?&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2939&quot;&gt;&lt;del&gt;LU-2939&lt;/del&gt;&lt;/a&gt;), I have crashdumps for anybody interested to dig into it.&lt;/p&gt;</comment>
                            <comment id="53921" author="utopiabound" created="Wed, 13 Mar 2013 11:58:32 +0000"  >&lt;p&gt;Hit this ldiskfs recovery-small/29a&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/9a6769ca-8ac9-11e2-a80f-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/9a6769ca-8ac9-11e2-a80f-52540035b04c&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;17:22:19:Lustre: DEBUG MARKER: == recovery-small test 29a: error adding new clients doesn&apos;t cause LBUG (bug 22273) == 17:22:11 (1363047731)
17:22:19:Lustre: DEBUG MARKER: lctl set_param fail_loc=0x80000711
17:22:19:Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
17:22:19:Lustre: DEBUG MARKER: umount -d /mnt/mds1
17:22:19:Lustre: Failing over lustre-MDT0000
17:22:32:LustreError: 137-5: UUID &apos;lustre-MDT0000_UUID&apos; is not available for connect (no target)
17:22:32:LustreError: Skipped 8 previous similar messages
17:22:34:Lustre: MGS is waiting for obd_unlinked_exports more than 8 seconds. The obd refcount = 5. Is it stuck?
17:22:34:LustreError: 166-1: MGC10.10.16.89@tcp: Connection to MGS (at 0@lo) was lost; in progress operations using this service will fail
17:22:45:Lustre: MGS is waiting for obd_unlinked_exports more than 16 seconds. The obd refcount = 5. Is it stuck?
17:23:20:Lustre: MGS is waiting for obd_unlinked_exports more than 32 seconds. The obd refcount = 5. Is it stuck?
17:23:41:LustreError: 137-5: UUID &apos;lustre-MDT0000_UUID&apos; is not available for connect (no target)
17:23:42:LustreError: Skipped 116 previous similar messages
17:24:25:Lustre: MGS is waiting for obd_unlinked_exports more than 64 seconds. The obd refcount = 5. Is it stuck?
17:25:50:LustreError: 137-5: UUID &apos;lustre-MDT0000_UUID&apos; is not available for connect (no target)
17:25:50:LustreError: Skipped 233 previous similar messages
17:26:33:Lustre: MGS is waiting for obd_unlinked_exports more than 128 seconds. The obd refcount = 5. Is it stuck?
17:28:43:Lustre: 11992:0:(client.c:1866:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1363048086/real 1363048086]  req@ffff880079f23c00 x1429257112860604/t0(0) o250-&amp;gt;MGC10.10.16.89@tcp@0@lo:26/25 lens 400/544 e 0 to 1 dl 1363048111 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
17:28:43:Lustre: 11992:0:(client.c:1866:ptlrpc_expire_one_request()) Skipped 17 previous similar messages
17:29:37:INFO: task umount:17156 blocked for more than 120 seconds.
17:29:37:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
17:29:37:umount        D 0000000000000000     0 17156  17155 0x00000080
17:29:37: ffff880071d23a98 0000000000000086 ffffffff00000010 ffff880071d23a48
17:29:38: ffff880071d23a08 ffff88005c2a4400 ffffffffa0d2a73e 0000000000000000
17:29:38: ffff880056f2a5f8 ffff880071d23fd8 000000000000fb88 ffff880056f2a5f8
17:29:38:Call Trace:
17:29:38: [&amp;lt;ffffffff814ead12&amp;gt;] schedule_timeout+0x192/0x2e0
17:29:38: [&amp;lt;ffffffff8107cb50&amp;gt;] ? process_timeout+0x0/0x10
17:29:38: [&amp;lt;ffffffffa0bc26bd&amp;gt;] cfs_schedule_timeout_and_set_state+0x1d/0x20 [libcfs]
17:29:38: [&amp;lt;ffffffffa0caca98&amp;gt;] obd_exports_barrier+0x98/0x170 [obdclass]
17:29:38: [&amp;lt;ffffffffa1725996&amp;gt;] mgs_device_fini+0xf6/0x5a0 [mgs]
17:29:38: [&amp;lt;ffffffffa0cd99c7&amp;gt;] class_cleanup+0x577/0xda0 [obdclass]
17:29:38: [&amp;lt;ffffffffa0caed36&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
17:29:38: [&amp;lt;ffffffffa0cdb2ac&amp;gt;] class_process_config+0x10bc/0x1c80 [obdclass]
17:29:38: [&amp;lt;ffffffffa0bccdf8&amp;gt;] ? libcfs_log_return+0x28/0x40 [libcfs]
17:29:40: [&amp;lt;ffffffffa0cd4b11&amp;gt;] ? lustre_cfg_new+0x391/0x7e0 [obdclass]
17:29:40: [&amp;lt;ffffffffa0cdbfe9&amp;gt;] class_manual_cleanup+0x179/0x6f0 [obdclass]
17:29:40: [&amp;lt;ffffffffa0caed36&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
17:29:40: [&amp;lt;ffffffffa0ce8a3d&amp;gt;] server_put_super+0x46d/0xf00 [obdclass]
17:29:40: [&amp;lt;ffffffff811785ab&amp;gt;] generic_shutdown_super+0x5b/0xe0
17:29:40: [&amp;lt;ffffffff81178696&amp;gt;] kill_anon_super+0x16/0x60
17:29:40: [&amp;lt;ffffffffa0cdde46&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
17:29:40: [&amp;lt;ffffffff81179670&amp;gt;] deactivate_super+0x70/0x90
17:29:40: [&amp;lt;ffffffff811955cf&amp;gt;] mntput_no_expire+0xbf/0x110
17:29:40: [&amp;lt;ffffffff81195f2b&amp;gt;] sys_umount+0x7b/0x3a0
17:29:40: [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="56419" author="sarah" created="Tue, 16 Apr 2013 18:45:35 +0000"  >&lt;p&gt;Hit this error in the tag-2.3.64 testing:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/a2ff5494-a66c-11e2-90ad-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/a2ff5494-a66c-11e2-90ad-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="56958" author="utopiabound" created="Wed, 24 Apr 2013 18:02:58 +0000"  >&lt;p&gt;Failure on replay-single/90 review-zfs&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6128f8ce-ad04-11e2-bd7c-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6128f8ce-ad04-11e2-bd7c-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="57528" author="yujian" created="Thu, 2 May 2013 15:35:18 +0000"  >&lt;p&gt;I found that since 2013-04-12, obdfilter-survey had failed with this issue consistently on master branch. The latest build number of master branch which passed the obdfilter-survey test in &quot;full&quot; test session is #1381.&lt;/p&gt;</comment>
                            <comment id="57548" author="pjones" created="Thu, 2 May 2013 18:04:09 +0000"  >&lt;p&gt;Minh&lt;/p&gt;

&lt;p&gt;Could you please see if you can isolate which landing introduced this problem?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="57549" author="adilger" created="Thu, 2 May 2013 18:05:28 +0000"  >&lt;p&gt;What is the git commit hash for build #1381?  It probably makes sense to just look through the patches that were landed to master between build #1381 and #1382, and if necessary build &amp;amp; run the obdfilter-survey test with git-bisect to isolate it to a specific patch.&lt;/p&gt;</comment>
                            <comment id="57594" author="mdiep" created="Thu, 2 May 2013 22:59:09 +0000"  >&lt;p&gt; 34 UP lwp lustre-MDT0000-lwp-OST0005 lustre-MDT0000-lwp-OST0005_UUID 5&lt;br/&gt;
 35 UP osd-ldiskfs lustre-OST0006-osd lustre-OST0006-osd_UUID 5&lt;br/&gt;
 36 UP obdfilter lustre-OST0006 lustre-OST0006_UUID 10&lt;br/&gt;
 37 UP lwp lustre-MDT0000-lwp-OST0006 lustre-MDT0000-lwp-OST0006_UUID 5&lt;br/&gt;
 38 UP echo_client lustre-OST0000_ecc lustre-OST0000_ecc_UUID 3&lt;br/&gt;
 39 UP echo_client lustre-OST0001_ecc lustre-OST0001_ecc_UUID 3&lt;br/&gt;
 40 UP echo_client lustre-OST0002_ecc lustre-OST0002_ecc_UUID 3&lt;br/&gt;
 41 UP echo_client lustre-OST0003_ecc lustre-OST0003_ecc_UUID 3&lt;br/&gt;
 42 UP echo_client lustre-OST0004_ecc lustre-OST0004_ecc_UUID 3&lt;br/&gt;
 43 UP echo_client lustre-OST0005_ecc lustre-OST0005_ecc_UUID 3&lt;br/&gt;
 44 UP echo_client lustre-OST0006_ecc lustre-OST0006_ecc_UUID 3&lt;/p&gt;

&lt;p&gt;One of the reasons is obdfilter-survey did not clean up the echo_client after finish. I am looking further&lt;/p&gt;</comment>
                            <comment id="57609" author="mdiep" created="Fri, 3 May 2013 06:04:28 +0000"  >&lt;p&gt;obdfilter-survey exited unexpectedly due to this command&lt;/p&gt;


&lt;p&gt;+ echo &apos;=============&amp;gt; Destroy 1 on localhost:lustre-OST0000_ecc&apos;&lt;br/&gt;
+ destroy_objects localhost 38 2 1 /tmp/obdfilter_survey_2013-05-02@20:45_fat-amd-4.lab.whamcloud.com.detail_tmp&lt;br/&gt;
+ local host=localhost&lt;br/&gt;
+ local devno=38&lt;br/&gt;
+ local obj0=2&lt;br/&gt;
+ local nobj=1&lt;br/&gt;
+ local rfile=/tmp/obdfilter_survey_2013-05-02@20:45_fat-amd-4.lab.whamcloud.com.detail_tmp&lt;br/&gt;
+ remote_shell localhost lctl --device 38 destroy 2 1&lt;/p&gt;


&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@fat-amd-4 ~&amp;#93;&lt;/span&gt;# cat /tmp/obdfilter_survey_2013-05-02&amp;#64;20\:45_fat-amd-4.lab.whamcloud.com.detail_tmp&lt;br/&gt;
+ host=localhost&lt;br/&gt;
+ shift&lt;br/&gt;
+ cmds=&apos;lctl --device 38 destroy 2 1&apos;&lt;br/&gt;
++ uname -n&lt;br/&gt;
+ &apos;&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;#39; localhost = localhost -o localhost = fat-amd-4.lab.whamcloud.com &amp;#39;&amp;#93;&lt;/span&gt;&apos;&lt;br/&gt;
+ eval &apos;lctl --device 38 destroy 2 1&apos;&lt;br/&gt;
++ lctl --device 38 destroy 2 1&lt;br/&gt;
destroy: 1 objects&lt;br/&gt;
error: destroy: objid 0x2: No such file or directory&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@fat-amd-4 ~&amp;#93;&lt;/span&gt;#              &lt;/p&gt;

&lt;p&gt;but we create objid 0x2!!!!&lt;/p&gt;

&lt;p&gt;=============&amp;gt; Create 1 on localhost:lustre-OST0000_ecc&lt;br/&gt;
++ host=localhost&lt;br/&gt;
++ shift&lt;br/&gt;
++ cmds=&apos;lctl --device 38 create 1&apos;&lt;br/&gt;
+++ uname -n&lt;br/&gt;
++ &apos;&lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;#39; localhost = localhost -o localhost = fat-amd-4.lab.whamcloud.com &amp;#39;&amp;#93;&lt;/span&gt;&apos;&lt;br/&gt;
++ eval &apos;lctl --device 38 create 1&apos;&lt;br/&gt;
+++ lctl --device 38 create 1&lt;br/&gt;
create: 1 objects&lt;br/&gt;
create: #1 is object id 0x2&lt;/p&gt;
</comment>
                            <comment id="57733" author="mdiep" created="Mon, 6 May 2013 15:46:40 +0000"  >&lt;p&gt;The fix is dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3267&quot; title=&quot;cl_echo_object_find()) ASSERTION( ostid_seq(&amp;amp;lsm-&amp;gt;lsm_wire.lw_object_oi) == FID_SEQ_ECHO ) failed:&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3267&quot;&gt;&lt;del&gt;LU-3267&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="62434" author="sarah" created="Tue, 16 Jul 2013 22:29:36 +0000"  >&lt;p&gt;Hit this issue again in zfs testing&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/14e07cdc-ed44-11e2-99b4-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/14e07cdc-ed44-11e2-99b4-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;OST console:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;00:47:41:LustreError: 19583:0:(qsd_reint.c:54:qsd_reint_completion()) Skipped 1 previous similar message
00:48:42:INFO: task umount:19559 blocked for more than 120 seconds.
00:48:42:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
00:48:42:umount        D 0000000000000001     0 19559  19558 0x00000080
00:48:42: ffff8802d22c7aa8 0000000000000082 ffffffff00000010 ffff8802d22c7a58
&#65533;
00:48:42: ffff8802d22c7a18 0000000000000286 ffffffffa07db450 ffff8802fcc06f2a
00:48:42: ffff8803003725f8 ffff8802d22c7fd8 000000000000fb88 ffff8803003725f8
00:48:42:Call Trace:
00:48:42: [&amp;lt;ffffffff8150ee42&amp;gt;] schedule_timeout+0x192/0x2e0
00:48:42: [&amp;lt;ffffffff810810e0&amp;gt;] ? process_timeout+0x0/0x10
00:48:42: [&amp;lt;ffffffffa05f462d&amp;gt;] cfs_schedule_timeout_and_set_state+0x1d/0x20 [libcfs]
00:48:42: [&amp;lt;ffffffffa07175f8&amp;gt;] obd_exports_barrier+0x98/0x170 [obdclass]
00:48:42: [&amp;lt;ffffffffa0e5a962&amp;gt;] ofd_device_fini+0x42/0x230 [ofd]
00:48:42: [&amp;lt;ffffffffa0742f17&amp;gt;] class_cleanup+0x577/0xda0 [obdclass]
00:48:42: [&amp;lt;ffffffffa07197a6&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
00:48:42: [&amp;lt;ffffffffa07447fc&amp;gt;] class_process_config+0x10bc/0x1c80 [obdclass]
00:48:42: [&amp;lt;ffffffffa073e1e3&amp;gt;] ? lustre_cfg_new+0x2d3/0x6e0 [obdclass]
00:48:42: [&amp;lt;ffffffffa0745539&amp;gt;] class_manual_cleanup+0x179/0x6f0 [obdclass]
00:48:42: [&amp;lt;ffffffffa07197a6&amp;gt;] ? class_name2dev+0x56/0xe0 [obdclass]
00:48:42: [&amp;lt;ffffffffa07809ec&amp;gt;] server_put_super+0x5ec/0xf60 [obdclass]
00:48:42: [&amp;lt;ffffffff811833ab&amp;gt;] generic_shutdown_super+0x5b/0xe0
00:48:42: [&amp;lt;ffffffff81183496&amp;gt;] kill_anon_super+0x16/0x60
00:48:42: [&amp;lt;ffffffffa07473e6&amp;gt;] lustre_kill_super+0x36/0x60 [obdclass]
00:48:42: [&amp;lt;ffffffff81183c37&amp;gt;] deactivate_super+0x57/0x80
00:48:42: [&amp;lt;ffffffff811a1c8f&amp;gt;] mntput_no_expire+0xbf/0x110
00:48:42: [&amp;lt;ffffffff811a26fb&amp;gt;] sys_umount+0x7b/0x3a0
00:48:42: [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="62994" author="mdiep" created="Thu, 25 Jul 2013 17:24:19 +0000"  >&lt;p&gt;Hi Sarah,&lt;/p&gt;

&lt;p&gt;This seems to be a different issue. Could you file a new bug?&lt;/p&gt;</comment>
                            <comment id="63212" author="sarah" created="Tue, 30 Jul 2013 06:15:09 +0000"  >&lt;p&gt;Thanks Minh, &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3665&quot; title=&quot;obdfilter-survey test_3a: unmount stuck in obd_exports_barrier()&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3665&quot;&gt;&lt;del&gt;LU-3665&lt;/del&gt;&lt;/a&gt; is for the new issue.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="17814">LU-2939</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="18543">LU-3230</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzv3nz:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>4116</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>