<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:47:15 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4950] sanity-benchmark test fsx hung: txg_sync was stuck on OSS</title>
                <link>https://jira.whamcloud.com/browse/LU-4950</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;While running sanity-benchmark test fsx with the following configuration, it timed out in 3600s.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;MDSCOUNT=1
MDSSIZE=2097152
OSTCOUNT=2
OSTSIZE=8388608
FSTYPE=zfs
SLOW=yes
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;txg_sync was stuck on OSS node:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;txg_sync      D 0000000000000001     0  2665      2 0x00000080
 ffff88006f4cbba0 0000000000000046 00000000ffffffff 000037007418a66d
 ffff88006f4cbb10 ffff88007cba1a60 0000000000bba084 ffffffffaca9e91e
 ffff880070bc05f8 ffff88006f4cbfd8 000000000000fbc8 ffff880070bc05f8
Call Trace:
 [&amp;lt;ffffffff810a7091&amp;gt;] ? ktime_get_ts+0xb1/0xf0
 [&amp;lt;ffffffff81528823&amp;gt;] io_schedule+0x73/0xc0
 [&amp;lt;ffffffffa0142e7c&amp;gt;] cv_wait_common+0x8c/0x100 [spl]
 [&amp;lt;ffffffff8109b290&amp;gt;] ? autoremove_wake_function+0x0/0x40
 [&amp;lt;ffffffffa0142f08&amp;gt;] __cv_wait_io+0x18/0x20 [spl]
 [&amp;lt;ffffffffa02864ab&amp;gt;] zio_wait+0xfb/0x1b0 [zfs]
 [&amp;lt;ffffffffa021e5a5&amp;gt;] dsl_pool_sync+0x2f5/0x540 [zfs]
 [&amp;lt;ffffffffa023725e&amp;gt;] spa_sync+0x40e/0xa00 [zfs]
 [&amp;lt;ffffffff8103ea79&amp;gt;] ? kvm_clock_get_cycles+0x9/0x10
 [&amp;lt;ffffffffa024253a&amp;gt;] txg_sync_thread+0x27a/0x4b0 [zfs]
 [&amp;lt;ffffffff81059329&amp;gt;] ? set_user_nice+0xc9/0x130
 [&amp;lt;ffffffffa02422c0&amp;gt;] ? txg_sync_thread+0x0/0x4b0 [zfs]
 [&amp;lt;ffffffffa013e9bf&amp;gt;] thread_generic_wrapper+0x5f/0x70 [spl]
 [&amp;lt;ffffffffa013e960&amp;gt;] ? thread_generic_wrapper+0x0/0x70 [spl]
 [&amp;lt;ffffffff8109aee6&amp;gt;] kthread+0x96/0xa0
 [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff8109ae50&amp;gt;] ? kthread+0x0/0xa0
 [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7ec85f26-cb6e-11e3-95c9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7ec85f26-cb6e-11e3-95c9-52540035b04c&lt;/a&gt;&lt;/p&gt;</description>
                <environment>&lt;br/&gt;
Lustre build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_5/47/&quot;&gt;http://build.whamcloud.com/job/lustre-b2_5/47/&lt;/a&gt;&lt;br/&gt;
FSTYPE=zfs&lt;br/&gt;
</environment>
        <key id="24369">LU-4950</key>
            <summary>sanity-benchmark test fsx hung: txg_sync was stuck on OSS</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                            <label>zfs</label>
                    </labels>
                <created>Thu, 24 Apr 2014 08:48:49 +0000</created>
                <updated>Thu, 16 Apr 2020 07:29:31 +0000</updated>
                            <resolved>Thu, 16 Apr 2020 07:29:31 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                    <version>Lustre 2.5.1</version>
                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.4.3</version>
                    <version>Lustre 2.5.3</version>
                    <version>Lustre 2.8.0</version>
                    <version>Lustre 2.10.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>15</watches>
                                                                            <comments>
                            <comment id="82365" author="yujian" created="Thu, 24 Apr 2014 09:25:53 +0000"  >&lt;p&gt;Lustre build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_4/74/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_4/74/&lt;/a&gt;&lt;br/&gt;
FSTYPE=zfs&lt;/p&gt;

&lt;p&gt;sanity-benchmark test iozone also hit the same issue:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/41f6d9ec-bfa7-11e3-8176-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/41f6d9ec-bfa7-11e3-8176-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="82366" author="yujian" created="Thu, 24 Apr 2014 09:34:56 +0000"  >&lt;p&gt;Hello Alex,&lt;/p&gt;

&lt;p&gt;Could you please take a look whether this is a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2476&quot; title=&quot;poor OST file creation rate performance with zfs backend&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2476&quot;&gt;&lt;del&gt;LU-2476&lt;/del&gt;&lt;/a&gt;/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2600&quot; title=&quot;lustre metadata performance is very slow on zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2600&quot;&gt;&lt;del&gt;LU-2600&lt;/del&gt;&lt;/a&gt; ? Thanks.&lt;/p&gt;</comment>
                            <comment id="82496" author="isaac" created="Fri, 25 Apr 2014 16:01:52 +0000"  >&lt;p&gt;It looks more likely a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4716&quot; title=&quot;replay-ost-single test_5: stuck in dbuf_read-&amp;gt;zio_wait&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4716&quot;&gt;&lt;del&gt;LU-4716&lt;/del&gt;&lt;/a&gt; - stuck in zio_wait(), i.e. IO couldn&apos;t complete.&lt;/p&gt;

&lt;p&gt;What was the storage devices for the OST storage pools? At least we should make sure not to use certain IO schedulers known to cause ZFS IO starvation for those devices. &lt;/p&gt;</comment>
                            <comment id="82570" author="yujian" created="Sun, 27 Apr 2014 14:37:00 +0000"  >&lt;p&gt;Hi Isaac,&lt;/p&gt;

&lt;p&gt;The tests were performed on virtual machines by autotest. The storage devices were LVs on a /dev/vda partition. And the I/O scheduler was:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# cat /sys/block/vda/queue/scheduler 
noop anticipatory [deadline] cfq 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;FYI, the same failure was reported in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2887&quot; title=&quot;sanity-quota test_12a: slow due to ZFS VMs sharing single disk&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2887&quot;&gt;&lt;del&gt;LU-2887&lt;/del&gt;&lt;/a&gt; before. There were some discussions and experiment data in that ticket.&lt;/p&gt;</comment>
                            <comment id="82607" author="bzzz" created="Mon, 28 Apr 2014 14:36:46 +0000"  >&lt;p&gt;Jian, no, this is definitely not 2476/2600. I hit this locally few times as well - basically everything got stuck. we discussed this a bit with Brian B., but the root cause isn&apos;t clear yet.&lt;/p&gt;</comment>
                            <comment id="83817" author="isaac" created="Mon, 12 May 2014 09:42:37 +0000"  >&lt;p&gt;Hi Jian, I&apos;d suggest to set IO scheduler of /dev/vda to noop. In fact, ZFS automatically sets IO schedulers of storage pool devices to noop, as other schedulers could cause ZFS IO starvation. But since the pool uses LVs, ZFS can&apos;t see /dev/vda at all.&lt;/p&gt;</comment>
                            <comment id="85795" author="yujian" created="Thu, 5 Jun 2014 13:07:17 +0000"  >&lt;p&gt;Hi Isaac,&lt;/p&gt;

&lt;p&gt;I tried noop before, please refer to: &lt;a href=&quot;https://jira.hpdd.intel.com/browse/LU-2887?focusedCommentId=67639&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-67639&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LU-2887?focusedCommentId=67639&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-67639&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="85810" author="utopiabound" created="Thu, 5 Jun 2014 14:19:55 +0000"  >&lt;p&gt;This seems to be similar to the txg_sync issue described in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2887&quot; title=&quot;sanity-quota test_12a: slow due to ZFS VMs sharing single disk&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2887&quot;&gt;&lt;del&gt;LU-2887&lt;/del&gt;&lt;/a&gt;.  Possibly related to how autotest sets up disks.&lt;/p&gt;</comment>
                            <comment id="85827" author="yujian" created="Thu, 5 Jun 2014 14:50:21 +0000"  >&lt;p&gt;In fact, in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2887&quot; title=&quot;sanity-quota test_12a: slow due to ZFS VMs sharing single disk&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2887&quot;&gt;&lt;del&gt;LU-2887&lt;/del&gt;&lt;/a&gt;, even with OSTCOUNT=2, the txg_sync issue still existed. Please refer to the last several comments.&lt;br/&gt;
In the Maloo reports in this ticket, the OSTCOUNT was also 2.&lt;/p&gt;</comment>
                            <comment id="86001" author="utopiabound" created="Fri, 6 Jun 2014 13:39:48 +0000"  >&lt;p&gt;This may actually be a &lt;a href=&quot;https://github.com/zfsonlinux/zfs/issues/1640&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;ZFS issue&lt;/a&gt; and should be retested with zfs 0.6.3 when it arrives.&lt;/p&gt;</comment>
                            <comment id="86967" author="adilger" created="Wed, 18 Jun 2014 19:45:14 +0000"  >&lt;p&gt;There was also a failure of this test with ldiskfs in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5214&quot; title=&quot;Failure on test suite replay-ost-single test_5&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5214&quot;&gt;&lt;del&gt;LU-5214&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="89680" author="sarah" created="Mon, 21 Jul 2014 21:49:29 +0000"  >&lt;p&gt;Hit several timeout failures in lustre-b2_6-rc2 zfs testing, cannot find error messages&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/254eb710-0ecb-11e4-9f57-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/254eb710-0ecb-11e4-9f57-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f69a0dd2-0ec7-11e4-9f57-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f69a0dd2-0ec7-11e4-9f57-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/75580c7e-0ec2-11e4-9f57-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/75580c7e-0ec2-11e4-9f57-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="90346" author="pjones" created="Tue, 29 Jul 2014 18:11:31 +0000"  >&lt;p&gt;That rules out this being fixed in zfs 0.6.3&lt;/p&gt;</comment>
                            <comment id="92279" author="yujian" created="Sun, 24 Aug 2014 05:21:15 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/84/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/84/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.5/x86_64&lt;br/&gt;
FSTYPE=zfs&lt;/p&gt;

&lt;p&gt;Patch &lt;a href=&quot;http://review.whamcloud.com/11298&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/11298&lt;/a&gt; was already in Lustre b2_5 build #84. However, the failure still occurred:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/eee5bd1a-2b24-11e4-bb80-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/eee5bd1a-2b24-11e4-bb80-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/88e0db3a-2b24-11e4-bb80-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/88e0db3a-2b24-11e4-bb80-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/294bd846-2b24-11e4-bb80-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/294bd846-2b24-11e4-bb80-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/deb57516-2b24-11e4-bb80-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/deb57516-2b24-11e4-bb80-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="92904" author="yujian" created="Sun, 31 Aug 2014 21:24:22 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/86/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/86/&lt;/a&gt; (2.5.3 RC1)&lt;br/&gt;
FSTYPE=zfs&lt;/p&gt;

&lt;p&gt;The same failure occurred:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f444ed7c-311d-11e4-b503-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f444ed7c-311d-11e4-b503-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/9210d3d4-311b-11e4-b503-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/9210d3d4-311b-11e4-b503-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/c618d298-30ae-11e4-9e60-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/c618d298-30ae-11e4-9e60-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="93133" author="isaac" created="Wed, 3 Sep 2014 18:07:38 +0000"  >&lt;p&gt;I assume the latest reports were using ZFS 0.6.3 already.&lt;/p&gt;

&lt;p&gt;Can you elaborate on how the OSS storage are configured:&lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;Two OSTs, I assume one zpool for each OST&lt;/li&gt;
	&lt;li&gt;For each OST, what are the disks in the zpool? Any RAIDZ configuration? How are the disks configured on the host of the OSS VM? Does each disk correspond to a dedicated physical disk or do they share physical disks? Anything else on the host competing for IO on those disks? What are the IO schedulers on the host disks?&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;It appeared that the OSS had 2G memory. Any ZFS tunings? Can you include outputs of &apos;cat /sys/modules/zfs/parameters/*&apos; i.e. run-time values of the tunings?&lt;/p&gt;

&lt;p&gt;The write throttle and IO scheduling have been completely rewritten in 0.6.3. The defaults may not be proper for a VM test system. So first I need to understand exactly how the OSS is configured.&lt;/p&gt;</comment>
                            <comment id="93135" author="yujian" created="Wed, 3 Sep 2014 18:26:55 +0000"  >&lt;p&gt;Hi Isaac,&lt;/p&gt;

&lt;p&gt;I just added Minh and Joshua to watch this ticket. The OSS and OSTs were configured by autotest system. To answer the above questions, I need look into the test nodes where a ZFS test session is being tested. If you would like, I can trigger a ZFS test session and tell you the test nodes, so that you can log into the nodes and investigate the parameters directly. &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;</comment>
                            <comment id="93173" author="isaac" created="Thu, 4 Sep 2014 04:03:01 +0000"  >&lt;p&gt;I checked stack dumps from the most recent three failures reported by Jian, and found the OSS z_ioctl_iss thread were all stuck in a same place:&lt;/p&gt;

&lt;p&gt;z_ioctl_iss/0 S 0000000000000001     0  3996      2 0x00000080&lt;br/&gt;
 ffff880079dc9e20 0000000000000046 ffff880079dc9da0 ffffffffa024aa46&lt;br/&gt;
 ffff880079dc9de0 ffffc900192865f8 ffff880079a76000 0000000000000200&lt;br/&gt;
 ffff8800730c8638 ffff880079dc9fd8 000000000000fbc8 ffff8800730c8638&lt;br/&gt;
Call Trace:&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa024aa46&amp;gt;&amp;#93;&lt;/span&gt; ? vdev_disk_io_start+0x156/0x1b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;zfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa013f686&amp;gt;&amp;#93;&lt;/span&gt; taskq_thread+0x3c6/0x3f0 &lt;span class=&quot;error&quot;&gt;&amp;#91;spl&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;Likely it&apos;s really stuck in vdev_disk_io_flush() which is probably inlined. It seemed like the syncing txg is about to complete and a WRITE_FLUSH_FUA is to be issued after the uberblocks have been updated. There&apos;s also other threads waiting for the txg to be synced.&lt;/p&gt;

&lt;p&gt;The only place it could get stuck in vdev_disk_io_flush() seemed to be in bio_alloc(), which indicated a deadlock under memory pressure.&lt;/p&gt;

&lt;p&gt;There&apos;s two things to help troubleshoot:&lt;br/&gt;
1. Set zfs_nocacheflush:&lt;br/&gt;
options zfs zfs_nocacheflush=1&lt;br/&gt;
This makes DKIOCFLUSHWRITECACHE a noop. Then the deadlock may move elsewhere or simply disappear.&lt;br/&gt;
2. Whether zfs_nocacheflush is set or not, once the timeout happens, it&apos;d be useful to gather &quot;zpool events -v&quot; outputs on the OSS, which&apos;d give more details on the state of the stuck zio.&lt;/p&gt;</comment>
                            <comment id="93896" author="sarah" created="Fri, 12 Sep 2014 21:34:17 +0000"  >&lt;p&gt;seen this in hard failover test for master branch zfs&lt;br/&gt;
server and client: lustre-master build #2642&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/63b3a69c-37c4-11e4-a2a6-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/63b3a69c-37c4-11e4-a2a6-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="93905" author="yujian" created="Fri, 12 Sep 2014 23:08:59 +0000"  >&lt;blockquote&gt;
&lt;p&gt;There&apos;s two things to help troubleshoot:&lt;br/&gt;
1. Set zfs_nocacheflush:&lt;br/&gt;
options zfs zfs_nocacheflush=1&lt;br/&gt;
This makes DKIOCFLUSHWRITECACHE a noop. Then the deadlock may move elsewhere or simply disappear.&lt;br/&gt;
2. Whether zfs_nocacheflush is set or not, once the timeout happens, it&apos;d be useful to gather &quot;zpool events -v&quot; outputs on the OSS, which&apos;d give more details on the state of the stuck zio.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;I pushed a for-test-only patch on Lustre b2_5 branch to run the previously timed out tests on ZFS with &quot;options zfs zfs_nocacheflush=1&quot;:  &lt;a href=&quot;http://review.whamcloud.com/11906&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/11906&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;If the deadlock still occurred, I&apos;ll ask TEI team for help to implement 2.&lt;/p&gt;</comment>
                            <comment id="94211" author="isaac" created="Wed, 17 Sep 2014 04:42:50 +0000"  >&lt;p&gt;My previous analysis might be inaccurate as I neglected the &apos;?&apos; in the stack entry:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[&amp;lt;ffffffffa024aa46&amp;gt;] ? vdev_disk_io_start+0x156/0x1b0 [zfs]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Which meant that the vdev_disk_io_start() function likely returned and the WRITE_FLUSH_FUA bio was already submitted - the z_ioctl_iss thread was waiting for more work to do. I checked the OSS stack dumps, and the z_* threads were all idle, so there was no more IO left to do. These led me to believe that:&lt;br/&gt;
1. IO for syncing the txg were all done, and WRITE_FLUSH_FUA was submitted as an IO barrier, i.e. to make sure all previous writes do hit persistent storage.&lt;br/&gt;
2. But the WRITE_FLUSH_FUA somehow didn&apos;t complete, causing txg_sync to wait and hang.&lt;/p&gt;

&lt;p&gt;Suggestions to troubleshoot stay the same: on the OSS, set zfs_nocacheflush, and gather &quot;zpool events -v&quot; outputs.&lt;/p&gt;</comment>
                            <comment id="94940" author="yujian" created="Thu, 25 Sep 2014 06:12:47 +0000"  >&lt;p&gt;I set up a test cluster on onyx-23vm[5-8] with the following configuration:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;NAME=ncli
RCLIENTS=onyx-23vm6

mds_HOST=onyx-23vm7
MDSCOUNT=1
MDSSIZE=3145728
MDSDEV1=/dev/vda5

ost_HOST=onyx-23vm8
OSTCOUNT=2
OSTSIZE=8388608
OSTDEV1=/dev/vda5
OSTDEV2=/dev/vda6

SLOW=yes
ENABLE_QUOTA=yes
FSTYPE=zfs
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# pdsh -l root -S -w onyx-23vm[5-8] &quot;export PATH=\$PATH:/sbin:/usr/sbin; cat /sys/block/vda/queue/scheduler&quot;
onyx-23vm6: [noop] anticipatory deadline cfq
onyx-23vm5: [noop] anticipatory deadline cfq
onyx-23vm7: [noop] anticipatory deadline cfq
onyx-23vm8: [noop] anticipatory deadline cfq
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Lustre build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/88/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/88/&lt;/a&gt;&lt;br/&gt;
sanity-benchmark passed in 16878s: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6cb0f36a-4473-11e4-8e4d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6cb0f36a-4473-11e4-8e4d-5254006e85c2&lt;/a&gt;&lt;br/&gt;
performance-sanity passed in 7864s: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6d589494-4473-11e4-8e4d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6d589494-4473-11e4-8e4d-5254006e85c2&lt;/a&gt;&lt;br/&gt;
parallel-scale passed in 6336s: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6e4fdc22-4473-11e4-8e4d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6e4fdc22-4473-11e4-8e4d-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I&apos;m re-running the tests with &quot;deadline&quot; scheduler, which is the default value after the test nodes are provisioned.&lt;/p&gt;</comment>
                            <comment id="94990" author="yujian" created="Thu, 25 Sep 2014 19:07:25 +0000"  >&lt;blockquote&gt;&lt;p&gt;I&apos;m re-running the tests with &quot;deadline&quot; scheduler, which is the default value after the test nodes are provisioned.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;sanity-benchmark passed in 15688s. However, performance-sanity test 8 hung. The output of &quot;zpool events -v&quot; on OSS is attached. And here is the test report: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/810760ae-45c4-11e4-aeca-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/810760ae-45c4-11e4-aeca-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Hi Isaac, if you can access onyx cluster, please feel free to use onyx-23vm[5-8]. The testing environment is remained.&lt;/p&gt;</comment>
                            <comment id="95104" author="isaac" created="Fri, 26 Sep 2014 23:54:29 +0000"  >&lt;p&gt;Looks like the combination of LVM elimination and noop scheduler made a difference. Can you please repeat the same test again (i.e. noop, and no LVM)? Considering the low pass rate, we&apos;d be able to isolate LVM+deadline as the cause of IO timeout if it passes again.&lt;/p&gt;</comment>
                            <comment id="95113" author="yujian" created="Sat, 27 Sep 2014 01:06:19 +0000"  >&lt;blockquote&gt;&lt;p&gt;Can you please repeat the same test again (i.e. noop, and no LVM)? &lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Sure. Testing was started.&lt;/p&gt;</comment>
                            <comment id="95241" author="yujian" created="Mon, 29 Sep 2014 20:34:08 +0000"  >&lt;p&gt;Hi Isaac,&lt;/p&gt;

&lt;p&gt;I ran the same tests on the same test nodes with the same configuration as that of the noop scheduler in the previous comments for two times. Both hung at performance-sanity. Here is the test report of the latest run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/b9ebf592-4803-11e4-a4e8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/b9ebf592-4803-11e4-a4e8-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;So, noop scheduler does not resolve the issue.&lt;/p&gt;</comment>
                            <comment id="95593" author="isaac" created="Thu, 2 Oct 2014 22:49:01 +0000"  >&lt;p&gt;The test failure above appeared to be a different issue &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3786&quot; title=&quot;performance-sanity test_6: mkdir hung (sys_mkdir)&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3786&quot;&gt;&lt;del&gt;LU-3786&lt;/del&gt;&lt;/a&gt;:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: ===== mdsrate-lookup-10dirs.sh Test preparation: creating 10 dirs with 7840 files.
INFO: task mkdir:23576 blocked for more than 120 seconds.
      Not tainted 2.6.32-431.29.2.el6.x86_64 #1
&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
mkdir         D 0000000000000000     0 23576  23568 0x00000080
 ffff88003ca05db8 0000000000000086 ffff880067cb2040 ffff880067cb2040
 ffff880067cb2040 ffff880067cb2040 000000103ca05e38 ffff88005a5f9000
 ffff880067cb25f8 ffff88003ca05fd8 000000000000fbc8 ffff880067cb25f8
Call Trace:
 [&amp;lt;ffffffff811ab390&amp;gt;] ? mntput_no_expire+0x30/0x110
 [&amp;lt;ffffffff8152a5be&amp;gt;] __mutex_lock_slowpath+0x13e/0x180
 [&amp;lt;ffffffff8152a45b&amp;gt;] mutex_lock+0x2b/0x50
 [&amp;lt;ffffffff81196c00&amp;gt;] lookup_create+0x30/0xd0
 [&amp;lt;ffffffff8119ae63&amp;gt;] sys_mkdirat+0x83/0x1b0
 [&amp;lt;ffffffff811ab390&amp;gt;] ? mntput_no_expire+0x30/0x110
 [&amp;lt;ffffffff810e1e07&amp;gt;] ? audit_syscall_entry+0x1d7/0x200
 [&amp;lt;ffffffff8119afa8&amp;gt;] sys_mkdir+0x18/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So it still looks positive that the combination of LVM elimination and noop scheduler made a difference.&lt;/p&gt;</comment>
                            <comment id="96730" author="utopiabound" created="Mon, 20 Oct 2014 17:53:18 +0000"  >&lt;p&gt;sanity-hsm/24a on review-zfs on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a14816a6-5753-11e4-86e0-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a14816a6-5753-11e4-86e0-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="97911" author="isaac" created="Thu, 30 Oct 2014 04:35:00 +0000"  >&lt;p&gt;Likely it can be fixed by this ZFS patch:&lt;br/&gt;
&lt;a href=&quot;https://github.com/zfsonlinux/zfs/pull/2828&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/zfsonlinux/zfs/pull/2828&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I&apos;ll test it soon.&lt;/p&gt;</comment>
                            <comment id="98996" author="adilger" created="Wed, 12 Nov 2014 18:48:49 +0000"  >&lt;p&gt;I know that we prefer to use tagged upstream ZFS releases, but if this allows us to pass tests I&apos;d prefer that it be landed into our repository to use it.  In that case, it is worthwhile to post in the upstream GitHub ticket that this is needed for Lustre to pass and hopefully Brian will land it more quickly.&lt;/p&gt;</comment>
                            <comment id="99769" author="gerrit" created="Fri, 21 Nov 2014 17:10:54 +0000"  >&lt;p&gt;Nathaniel Clark (nathaniel.l.clark@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/12810&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/12810&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4950&quot; title=&quot;sanity-benchmark test fsx hung: txg_sync was stuck on OSS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4950&quot;&gt;&lt;del&gt;LU-4950&lt;/del&gt;&lt;/a&gt; build: Use new version of ZFS/SPL&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 89bdb0a802a5c6701b4e7c76f5caa5cdd2276568&lt;/p&gt;</comment>
                            <comment id="99770" author="utopiabound" created="Fri, 21 Nov 2014 17:11:13 +0000"  >&lt;p&gt;Build ZFS with this change:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/12810&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/12810&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="101615" author="adilger" created="Mon, 15 Dec 2014 18:35:49 +0000"  >&lt;p&gt;Isaac, can you look at this issue again?  It seems the upstream Illumos patch that you were looking at didn&apos;t fix this problem in ZFS.  We are still trying to land that patch for fixing &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5242&quot; title=&quot;Test hang sanity test_132, test_133: umount ost&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5242&quot;&gt;&lt;del&gt;LU-5242&lt;/del&gt;&lt;/a&gt; via &lt;a href=&quot;http://review.whamcloud.com/13050&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13050&lt;/a&gt; but it doesn&apos;t help this problem.&lt;/p&gt;</comment>
                            <comment id="103064" author="isaac" created="Fri, 9 Jan 2015 20:02:02 +0000"  >&lt;p&gt;Nathaniel is working on updating our ZFS/SPL version to 0.6.3-1.2 in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6105&quot; title=&quot;Update ZFS/SPL version to 0.6.3-1.2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6105&quot;&gt;&lt;del&gt;LU-6105&lt;/del&gt;&lt;/a&gt;, which includes the following SPL fix:&lt;br/&gt;
&lt;a href=&quot;https://github.com/zfsonlinux/spl/commit/a3c1eb77721a0d511b4fe7111bb2314686570c4b&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/zfsonlinux/spl/commit/a3c1eb77721a0d511b4fe7111bb2314686570c4b&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;It looks positive that the SPL fix would fix many of the ZIO timeouts we&apos;ve been seeing, including this one and the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5242&quot; title=&quot;Test hang sanity test_132, test_133: umount ost&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5242&quot;&gt;&lt;del&gt;LU-5242&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="109436" author="pjones" created="Wed, 11 Mar 2015 12:51:41 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6105&quot; title=&quot;Update ZFS/SPL version to 0.6.3-1.2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6105&quot;&gt;&lt;del&gt;LU-6105&lt;/del&gt;&lt;/a&gt; landed to master some time back. Are we now able to close this as a duplicate?&lt;/p&gt;</comment>
                            <comment id="109505" author="yujian" created="Wed, 11 Mar 2015 21:05:36 +0000"  >&lt;p&gt;Patch &lt;a href=&quot;http://review.whamcloud.com/13322&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13322&lt;/a&gt; for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6105&quot; title=&quot;Update ZFS/SPL version to 0.6.3-1.2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6105&quot;&gt;&lt;del&gt;LU-6105&lt;/del&gt;&lt;/a&gt; landed on master branch, however, the issue in this ticket was not resolved:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/07180fae-c7cf-11e4-92dc-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/07180fae-c7cf-11e4-92dc-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d41fb90c-c7cf-11e4-92dc-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d41fb90c-c7cf-11e4-92dc-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Console log on OSS:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;10:51:22:txg_sync      D 0000000000000001     0  2753      2 0x00000080
10:51:22: ffff8800652c9b90 0000000000000046 00000000ffffffff 00001708d56216b3
10:51:22: 0000000000000000 ffff88006c6eeb40 000000000038fc98 ffffffffab01512e
10:51:22: ffff880063d04638 ffff8800652c9fd8 000000000000fbc8 ffff880063d04638
10:51:22:Call Trace:
10:51:22: [&amp;lt;ffffffff810a6d31&amp;gt;] ? ktime_get_ts+0xb1/0xf0
10:51:22: [&amp;lt;ffffffff81529e83&amp;gt;] io_schedule+0x73/0xc0
10:51:22: [&amp;lt;ffffffffa0143596&amp;gt;] cv_wait_common+0xa6/0x120 [spl]
10:51:22: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
10:51:22: [&amp;lt;ffffffffa0143628&amp;gt;] __cv_wait_io+0x18/0x20 [spl]
10:51:22: [&amp;lt;ffffffffa028d81b&amp;gt;] zio_wait+0xfb/0x1c0 [zfs]
10:51:22: [&amp;lt;ffffffffa0220993&amp;gt;] dsl_pool_sync+0xb3/0x3f0 [zfs]
10:51:22: [&amp;lt;ffffffffa02398bf&amp;gt;] spa_sync+0x40f/0xa70 [zfs]
10:51:22: [&amp;lt;ffffffffa0243771&amp;gt;] ? spa_txg_history_set+0xc1/0xf0 [zfs]
10:51:22: [&amp;lt;ffffffffa0246c7d&amp;gt;] txg_sync_thread+0x30d/0x520 [zfs]
10:51:22: [&amp;lt;ffffffff810591a9&amp;gt;] ? set_user_nice+0xc9/0x130
10:51:22: [&amp;lt;ffffffffa0246970&amp;gt;] ? txg_sync_thread+0x0/0x520 [zfs]
10:51:22: [&amp;lt;ffffffffa013ec3f&amp;gt;] thread_generic_wrapper+0x5f/0x70 [spl]
10:51:22: [&amp;lt;ffffffffa013ebe0&amp;gt;] ? thread_generic_wrapper+0x0/0x70 [spl]
10:51:22: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
10:51:22: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
10:51:22: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
10:51:22: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
</comment>
                            <comment id="121753" author="sarah" created="Mon, 20 Jul 2015 21:26:42 +0000"  >&lt;p&gt;server and client:lustre-master build # 3094 RHEL6.6 zfs&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a8e1a402-2865-11e5-8329-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a8e1a402-2865-11e5-8329-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="137084" author="standan" created="Mon, 21 Dec 2015 20:20:51 +0000"  >&lt;p&gt;Another instance for EL6.7 Server/EL6.7 Client - ZFS&lt;br/&gt;
Master, build# 3270&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/92039c10-a275-11e5-bdef-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/92039c10-a275-11e5-bdef-5254006e85c2&lt;/a&gt;&lt;br/&gt;
Another instance forEL7.1 Server/EL7.1 Client - ZFS&lt;br/&gt;
Master, build# 3264&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/4296dba6-a135-11e5-83b8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/4296dba6-a135-11e5-83b8-5254006e85c2&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;txg_sync      D 0000000000000000     0 23282      2 0x00000080
 ffff880039e73b60 0000000000000046 0000000000000000 0000a3cd3e7c281e
 ffff88007cd68ae8 ffff880037d9e600 00003b391acd9c96 ffffffffa9941bdc
 0000000006f6b1a7 0000000103dd04a2 ffff88007cd69068 ffff880039e73fd8
Call Trace:
 [&amp;lt;ffffffff815395d3&amp;gt;] io_schedule+0x73/0xc0
 [&amp;lt;ffffffffa01a6eaf&amp;gt;] cv_wait_common+0xaf/0x130 [spl]
 [&amp;lt;ffffffff810a1460&amp;gt;] ? autoremove_wake_function+0x0/0x40
 [&amp;lt;ffffffff8106ee1a&amp;gt;] ? __cond_resched+0x2a/0x40
 [&amp;lt;ffffffffa01a6f48&amp;gt;] __cv_wait_io+0x18/0x20 [spl]
 [&amp;lt;ffffffffa02b129b&amp;gt;] zio_wait+0x10b/0x1e0 [zfs]
 [&amp;lt;ffffffffa0238b1b&amp;gt;] dsl_pool_sync+0x2bb/0x440 [zfs]
 [&amp;lt;ffffffffa024fb3e&amp;gt;] spa_sync+0x35e/0xb10 [zfs]
 [&amp;lt;ffffffffa02661f8&amp;gt;] txg_sync_thread+0x3d8/0x670 [zfs]
 [&amp;lt;ffffffffa0265e20&amp;gt;] ? txg_sync_thread+0x0/0x670 [zfs]
 [&amp;lt;ffffffffa0265e20&amp;gt;] ? txg_sync_thread+0x0/0x670 [zfs]
 [&amp;lt;ffffffffa01a2258&amp;gt;] thread_generic_wrapper+0x68/0x80 [spl]
 [&amp;lt;ffffffffa01a21f0&amp;gt;] ? thread_generic_wrapper+0x0/0x80 [spl]
 [&amp;lt;ffffffff810a0fce&amp;gt;] kthread+0x9e/0xc0
 [&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20
 [&amp;lt;ffffffff810a0f30&amp;gt;] ? kthread+0x0/0xc0
 [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="139404" author="sarah" created="Wed, 20 Jan 2016 04:05:22 +0000"  >&lt;p&gt;another instance on master build#3305 RHEL6.7 &lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6472141a-bc84-11e5-b3b7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6472141a-bc84-11e5-b3b7-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="141457" author="standan" created="Fri, 5 Feb 2016 23:11:25 +0000"  >&lt;p&gt;Another instance on master for FULL - EL7.1 Server/EL7.1 Client - ZFS, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b5ff5e4c-cb88-11e5-b49e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b5ff5e4c-cb88-11e5-b49e-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="141700" author="standan" created="Tue, 9 Feb 2016 23:53:40 +0000"  >&lt;p&gt;Another instance found for hardfailover : EL6.7 Server/Client - ZFS, tag 2.7.66, master build 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/766ea3ec-cb55-11e5-b49e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/766ea3ec-cb55-11e5-b49e-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for hardfailover : EL7 Server/Client - ZFS, tag 2.7.66, master build 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/f0dd9616-ca6e-11e5-9609-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/f0dd9616-ca6e-11e5-9609-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 - EL6.7 Server/EL6.7 Client - ZFS, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/98eb99ce-cb47-11e5-a59a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/98eb99ce-cb47-11e5-a59a-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Another instance found for Full tag 2.7.66 -EL7.1 Server/EL7.1 Client - ZFS, build# 3314&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ddc75dc6-cb88-11e5-b49e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ddc75dc6-cb88-11e5-b49e-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b5ff5e4c-cb88-11e5-b49e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b5ff5e4c-cb88-11e5-b49e-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="141789" author="bzzz" created="Wed, 10 Feb 2016 17:52:16 +0000"  >&lt;p&gt;it would be interesting to try with &lt;a href=&quot;http://review.whamcloud.com/#/c/18341/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/18341/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="143565" author="standan" created="Wed, 24 Feb 2016 16:53:01 +0000"  >&lt;p&gt;Another instance found on b2_8 for failover testing , build# 6.&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/54ec62da-d99d-11e5-9ebe-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/54ec62da-d99d-11e5-9ebe-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/c5a8e44c-d9c7-11e5-85dd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/c5a8e44c-d9c7-11e5-85dd-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="154052" author="bzzz" created="Tue, 31 May 2016 11:20:58 +0000"  >&lt;p&gt;dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5242&quot; title=&quot;Test hang sanity test_132, test_133: umount ost&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5242&quot;&gt;&lt;del&gt;LU-5242&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="187846" author="sarah" created="Fri, 10 Mar 2017 17:30:20 +0000"  >&lt;p&gt;In master branch, tag-2.9.53 el7 zfs is still experiencing the same error, reopen it and see if anything else is needed&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/19e62a78-f8f9-11e6-aac4-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/19e62a78-f8f9-11e6-aac4-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;oss console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;07:34:00:[ 3545.939658] Lustre: DEBUG MARKER: == obdfilter-survey test 1a: Object Storage Targets survey =========================================== 07:23:02 (1487748182)
07:34:00:[ 3546.225025] Lustre: DEBUG MARKER: lctl dl | grep obdfilter
07:34:00:[ 3546.764980] Lustre: DEBUG MARKER: /usr/sbin/lctl list_nids | grep tcp | cut -f 1 -d &apos;@&apos;
07:34:00:[ 3547.846310] Lustre: Echo OBD driver; http://www.lustre.org/
07:34:00:[ 4200.108290] INFO: task txg_sync:22316 blocked for more than 120 seconds.
07:34:00:[ 4200.110327] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
07:34:00:
07:34:00:[ 4200.112402] txg_sync        D 0000000000000001     0 22316      2 0x00000080
07:34:00:[ 4200.116024]  ffff88003b79fac0 0000000000000046 ffff88003a438fb0 ffff88003b79ffd8
07:34:00:[ 4200.118769]  ffff88003b79ffd8 ffff88003b79ffd8 ffff88003a438fb0 ffff88007fd16c40
07:34:00:[ 4200.120870]  0000000000000000 7fffffffffffffff ffff88003bd09908 0000000000000001
07:34:00:[ 4200.123100] Call Trace:
07:34:00:[ 4200.124786]  [&amp;lt;ffffffff8168bac9&amp;gt;] schedule+0x29/0x70
07:34:00:[ 4200.126894]  [&amp;lt;ffffffff81689519&amp;gt;] schedule_timeout+0x239/0x2d0
07:34:00:[ 4200.128916]  [&amp;lt;ffffffff810c4fe2&amp;gt;] ? default_wake_function+0x12/0x20
07:34:00:[ 4200.130962]  [&amp;lt;ffffffff810ba238&amp;gt;] ? __wake_up_common+0x58/0x90
07:34:00:[ 4200.133032]  [&amp;lt;ffffffff81060c1f&amp;gt;] ? kvm_clock_get_cycles+0x1f/0x30
07:34:00:[ 4200.135083]  [&amp;lt;ffffffff8168b06e&amp;gt;] io_schedule_timeout+0xae/0x130
07:34:00:[ 4200.137129]  [&amp;lt;ffffffff810b1416&amp;gt;] ? prepare_to_wait_exclusive+0x56/0x90
07:34:00:[ 4200.139259]  [&amp;lt;ffffffff8168b108&amp;gt;] io_schedule+0x18/0x20
07:34:00:[ 4200.141213]  [&amp;lt;ffffffffa0679617&amp;gt;] cv_wait_common+0xa7/0x130 [spl]
07:34:00:[ 4200.143303]  [&amp;lt;ffffffff810b1720&amp;gt;] ? wake_up_atomic_t+0x30/0x30
07:34:00:[ 4200.145316]  [&amp;lt;ffffffffa06796f8&amp;gt;] __cv_wait_io+0x18/0x20 [spl]
07:34:00:[ 4200.147464]  [&amp;lt;ffffffffa07d351b&amp;gt;] zio_wait+0x10b/0x1f0 [zfs]
07:34:00:[ 4200.149508]  [&amp;lt;ffffffffa075ccdf&amp;gt;] dsl_pool_sync+0xbf/0x440 [zfs]
07:34:00:[ 4200.151553]  [&amp;lt;ffffffffa0777868&amp;gt;] spa_sync+0x388/0xb50 [zfs]
07:34:00:[ 4200.153602]  [&amp;lt;ffffffff810b174b&amp;gt;] ? autoremove_wake_function+0x2b/0x40
07:34:00:[ 4200.155664]  [&amp;lt;ffffffff81689c72&amp;gt;] ? mutex_lock+0x12/0x2f
07:34:00:[ 4200.157616]  [&amp;lt;ffffffffa07894e5&amp;gt;] txg_sync_thread+0x3c5/0x620 [zfs]
07:34:00:[ 4200.159653]  [&amp;lt;ffffffffa0789120&amp;gt;] ? txg_init+0x280/0x280 [zfs]
07:34:00:[ 4200.161605]  [&amp;lt;ffffffffa0674851&amp;gt;] thread_generic_wrapper+0x71/0x80 [spl]
07:34:00:[ 4200.163625]  [&amp;lt;ffffffffa06747e0&amp;gt;] ? __thread_exit+0x20/0x20 [spl]
07:34:00:[ 4200.165586]  [&amp;lt;ffffffff810b064f&amp;gt;] kthread+0xcf/0xe0
07:34:00:[ 4200.167428]  [&amp;lt;ffffffff810b0580&amp;gt;] ? kthread_create_on_node+0x140/0x140
07:34:00:[ 4200.169377]  [&amp;lt;ffffffff81696958&amp;gt;] ret_from_fork+0x58/0x90
07:34:00:[ 4200.171244]  [&amp;lt;ffffffff810b0580&amp;gt;] ? kthread_create_on_node+0x140/0x140
07:34:00:[ 4200.173334] INFO: task lctl:3231 blocked for more than 120 seconds.
07:34:00:[ 4200.175136] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
07:34:00:[ 4200.177040] lctl            D 0000000000000000     0  3231   3197 0x00000080
07:34:00:[ 4200.178923]  ffff88003d3db948 0000000000000082 ffff88001d2f9f60 ffff88003d3dbfd8
07:34:00:[ 4200.181132]  ffff88003d3dbfd8 ffff88003d3dbfd8 ffff88001d2f9f60 ffff88003b9c5b50
07:34:00:[ 4200.183052]  ffff88003b9c5a10 ffff88003b9c5b58 ffff88003b9c5a38 0000000000000000
07:34:00:[ 4200.185136] Call Trace:
07:34:00:[ 4200.186747]  [&amp;lt;ffffffff8168bac9&amp;gt;] schedule+0x29/0x70
07:34:00:[ 4200.188372]  [&amp;lt;ffffffffa067967d&amp;gt;] cv_wait_common+0x10d/0x130 [spl]
07:34:00:[ 4200.190210]  [&amp;lt;ffffffff810b1720&amp;gt;] ? wake_up_atomic_t+0x30/0x30
07:34:00:[ 4200.191823]  [&amp;lt;ffffffffa06796b5&amp;gt;] __cv_wait+0x15/0x20 [spl]
07:34:00:[ 4200.193657]  [&amp;lt;ffffffffa07886a3&amp;gt;] txg_wait_open+0xb3/0xf0 [zfs]
07:34:00:[ 4200.195240]  [&amp;lt;ffffffffa0743b0d&amp;gt;] dmu_tx_wait+0x34d/0x360 [zfs]
07:34:00:[ 4200.196968]  [&amp;lt;ffffffffa0743bb1&amp;gt;] dmu_tx_assign+0x91/0x4b0 [zfs]
07:34:00:[ 4200.198521]  [&amp;lt;ffffffffa0f8c05a&amp;gt;] osd_trans_start+0xaa/0x3c0 [osd_zfs]
07:34:00:[ 4200.200290]  [&amp;lt;ffffffffa108e2db&amp;gt;] ofd_trans_start+0x6b/0xe0 [ofd]
07:34:00:[ 4200.202029]  [&amp;lt;ffffffffa109460b&amp;gt;] ofd_commitrw_write+0x94b/0x1c30 [ofd]
07:34:00:[ 4200.203800]  [&amp;lt;ffffffffa109850d&amp;gt;] ofd_commitrw+0x56d/0xa30 [ofd]
07:34:00:[ 4200.205564]  [&amp;lt;ffffffffa0b30679&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
07:34:00:[ 4200.207167]  [&amp;lt;ffffffffa11895ea&amp;gt;] echo_client_prep_commit.isra.49+0x69a/0xc30 [obdecho]
07:34:00:[ 4200.209038]  [&amp;lt;ffffffffa11930cf&amp;gt;] echo_client_iocontrol+0x9cf/0x1c50 [obdecho]
07:34:00:[ 4200.210635]  [&amp;lt;ffffffffa0b30679&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
07:34:00:[ 4200.212403]  [&amp;lt;ffffffffa0b1bb1e&amp;gt;] class_handle_ioctl+0x19de/0x2150 [obdclass]
07:34:00:[ 4200.213966]  [&amp;lt;ffffffff812a8558&amp;gt;] ? security_capable+0x18/0x20
07:34:00:[ 4200.215613]  [&amp;lt;ffffffffa0b005d2&amp;gt;] obd_class_ioctl+0xd2/0x170 [obdclass]
07:34:00:[ 4200.217176]  [&amp;lt;ffffffff81212025&amp;gt;] do_vfs_ioctl+0x2d5/0x4b0
07:34:00:[ 4200.218778]  [&amp;lt;ffffffff81691ef1&amp;gt;] ? __do_page_fault+0x171/0x450
07:34:00:[ 4200.220285]  [&amp;lt;ffffffffa0b1b258&amp;gt;] ? class_handle_ioctl+0x1118/0x2150 [obdclass]
07:34:00:[ 4200.222049]  [&amp;lt;ffffffff812122a1&amp;gt;] SyS_ioctl+0xa1/0xc0
07:34:00:[ 4200.223471]  [&amp;lt;ffffffff81696a09&amp;gt;] system_call_fastpath+0x16/0x1b
07:34:00:[ 4200.225197] INFO: task lctl:3232 blocked for more than 120 seconds.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="188057" author="bzzz" created="Mon, 13 Mar 2017 12:00:36 +0000"  >&lt;p&gt;Sarah, I can&apos;t find ofd_destroy() in the traces, I doubt this is &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4950&quot; title=&quot;sanity-benchmark test fsx hung: txg_sync was stuck on OSS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4950&quot;&gt;&lt;del&gt;LU-4950&lt;/del&gt;&lt;/a&gt;..&lt;/p&gt;</comment>
                            <comment id="267789" author="bzzz" created="Thu, 16 Apr 2020 07:29:31 +0000"  >&lt;p&gt;a duplicate of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5242&quot; title=&quot;Test hang sanity test_132, test_133: umount ost&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5242&quot;&gt;&lt;del&gt;LU-5242&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="26283">LU-5575</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="30995">LU-6812</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="32968">LU-7374</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="25269">LU-5242</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="17731">LU-2887</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="25190">LU-5214</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="28117">LU-6105</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="55515">LU-12234</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="44983">LU-9247</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="26283">LU-5575</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="15845" name="zpool_events_onyx-23vm8.log" size="28587" author="yujian" created="Thu, 25 Sep 2014 19:07:25 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwkwn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>13698</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>