<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:57:30 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6133] sanity test_56a: timeout</title>
                <link>https://jira.whamcloud.com/browse/LU-6133</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Nathaniel Clark &amp;lt;nathaniel.l.clark@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/febe4ef4-9ef2-11e4-a23e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/febe4ef4-9ef2-11e4-a23e-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/e817c76c-9dca-11e4-858d-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/e817c76c-9dca-11e4-858d-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The sub-test test_56a failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test failed to respond and timed out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Please provide additional information about the failure here.&lt;/p&gt;

&lt;p&gt;Info required for matching: sanity 56a&lt;/p&gt;</description>
                <environment></environment>
        <key id="28244">LU-6133</key>
            <summary>sanity test_56a: timeout</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="utopiabound">Nathaniel Clark</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>zfs</label>
                    </labels>
                <created>Mon, 19 Jan 2015 16:09:51 +0000</created>
                <updated>Fri, 7 Apr 2017 15:50:37 +0000</updated>
                            <resolved>Fri, 7 Apr 2017 15:50:37 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                    <fixVersion>Lustre 2.5.5</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>11</watches>
                                                                            <comments>
                            <comment id="103883" author="adilger" created="Mon, 19 Jan 2015 18:17:15 +0000"  >&lt;p&gt;It looks like the MDS rebooted some time after test_54e and test_56a according to the MDS syslog.  No indication why that happened without more console logs from the MDS.&lt;/p&gt;</comment>
                            <comment id="104329" author="jhammond" created="Thu, 22 Jan 2015 15:04:33 +0000"  >&lt;p&gt;There&apos;s a crash dump at shadow:/export/scratch/dumps/shadow-16vm4.shadow.whamcloud.com/10.1.4.188-2015-01-17-21:27:55/vmcore&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;crash&amp;gt; bt -l 16053
PID: 16053  TASK: ffff88007d051540  CPU: 1   COMMAND: &quot;mdt00_000&quot;
 #0 [ffff880002307e90] crash_nmi_callback at ffffffff8102fee6
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/arch/x86/include/asm/paravirt.h: 115
 #1 [ffff880002307ea0] notifier_call_chain at ffffffff8152f2f5
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/kernel/notifier.c: 95
 #2 [ffff880002307ee0] atomic_notifier_call_chain at ffffffff8152f35a
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/kernel/notifier.c: 192
 #3 [ffff880002307ef0] notify_die at ffffffff810a11de
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/kernel/notifier.c: 573
 #4 [ffff880002307f20] do_nmi at ffffffff8152cfbb
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/arch/x86/kernel/traps.c: 503
 #5 [ffff880002307f50] nmi at ffffffff8152c880
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/arch/x86_64/kernel/entry.S
    [exception RIP: _spin_lock_irq+37]
    RIP: ffffffff8152c015  RSP: ffff88006f9636e8  RFLAGS: 00000002
    RAX: 0000000000000000  RBX: ffff8800731c3600  RCX: 0000000000000000
    RDX: 00000000000000a4  RSI: ffff88006f963758  RDI: ffff8800731c3608
    RBP: ffff88006f9636e8   R8: 00ff7b4f2f8f252e   R9: 0000000000000000
    R10: ffff88006b973800  R11: 0000000000000800  R12: ffff88007d051540
    R13: ffff88006f963758  R14: ffff8800731c3608  R15: ffff88006f9638f8
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
--- &amp;lt;NMI exception stack&amp;gt; ---
 #6 [ffff88006f9636e8] _spin_lock_irq at ffffffff8152c015
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/arch/x86/include/asm/spinlock.h: 127
 #7 [ffff88006f9636f0] rwsem_down_failed_common at ffffffff8152bd0c
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/lib/rwsem.c: 161
 #8 [ffff88006f963750] rwsem_down_read_failed at ffffffff8152bef6
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/lib/rwsem.c: 200
 #9 [ffff88006f963790] call_rwsem_down_read_failed at ffffffff8128fbb4
    /usr/src/debug///////////////////////////////////////////////////////////////////////////////////////////////////kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/arch/x86/lib/rwsem_64.S: 49
#10 [ffff88006f9637f8] zap_get_leaf_byblk at ffffffffa0256b18 [zfs]
#11 [ffff88006f963878] zap_deref_leaf at ffffffffa0256dca [zfs]
#12 [ffff88006f9638b8] fzap_lookup at ffffffffa0257430 [zfs]
#13 [ffff88006f963938] zap_lookup_norm at ffffffffa025ca21 [zfs]
#14 [ffff88006f9639a8] zap_lookup at ffffffffa025cb63 [zfs]
#15 [ffff88006f9639d8] osd_dir_lookup at ffffffffa0e9acdd [osd_zfs]
#16 [ffff88006f963a38] lod_index_lookup at ffffffffa10fb755 [lod]
#17 [ffff88006f963a48] __mdd_lookup at ffffffffa1165450 [mdd]
#18 [ffff88006f963aa8] mdd_lookup at ffffffffa1165e91 [mdd]
#19 [ffff88006f963af8] mdt_getattr_name_lock at ffffffffa10206df [mdt]
#20 [ffff88006f963b98] mdt_intent_getattr at ffffffffa1021b72 [mdt]
#21 [ffff88006f963be8] mdt_intent_policy at ffffffffa100ccf4 [mdt]
#22 [ffff88006f963c58] ldlm_lock_enqueue at ffffffffa093e4f9 [ptlrpc]
#23 [ffff88006f963cb8] ldlm_handle_enqueue0 at ffffffffa096a4bb [ptlrpc]
#24 [ffff88006f963d28] tgt_enqueue at ffffffffa09eb1c2 [ptlrpc]
#25 [ffff88006f963d48] tgt_request_handle at ffffffffa09ebdae [ptlrpc]
#26 [ffff88006f963da8] ptlrpc_main at ffffffffa099b8a1 [ptlrpc]
#27 [ffff88006f963ee8] kthread at ffffffff8109abf6
    /usr/src/debug/kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/kernel/kthread.c: 78
#28 [ffff88006f963f48] kernel_thread at ffffffff8100c20a
    /usr/src/debug///////////////////////////////////////////////////////////////////////////////////////////////////kernel-2.6.32-431.29.2.el6/linux-2.6.32-431.29.2.el6_lustre.gffd1fc2.x86_64/arch/x86/kernel/entry_64.S: 1235
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="107081" author="pjones" created="Mon, 16 Feb 2015 15:41:39 +0000"  >&lt;p&gt;Nathaniel&lt;/p&gt;

&lt;p&gt;Has this issue only appeared since the upgrade to the latest ZFS maintenance release in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6105&quot; title=&quot;Update ZFS/SPL version to 0.6.3-1.2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6105&quot;&gt;&lt;del&gt;LU-6105&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="107120" author="utopiabound" created="Tue, 17 Feb 2015 16:27:55 +0000"  >&lt;p&gt;This has not been fixed by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6105&quot; title=&quot;Update ZFS/SPL version to 0.6.3-1.2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6105&quot;&gt;&lt;del&gt;LU-6105&lt;/del&gt;&lt;/a&gt; (moving to 0.6.3-1.2):&lt;/p&gt;

&lt;p&gt;recent master instance:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/cc90d598-b3a4-11e4-94dd-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/cc90d598-b3a4-11e4-94dd-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="107240" author="pjones" created="Wed, 18 Feb 2015 15:55:30 +0000"  >&lt;p&gt;My question was the &lt;em&gt;opposite&lt;/em&gt; - I wondered if the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6105&quot; title=&quot;Update ZFS/SPL version to 0.6.3-1.2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6105&quot;&gt;&lt;del&gt;LU-6105&lt;/del&gt;&lt;/a&gt; upgrade had introduced this problem....&lt;/p&gt;</comment>
                            <comment id="107313" author="isaac" created="Thu, 19 Feb 2015 00:19:21 +0000"  >&lt;p&gt;There was a report of the same problem for zfs-0.6.3-1.1:&lt;br/&gt;
&lt;a href=&quot;http://git.net/zfs-discuss/msg19682.html&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://git.net/zfs-discuss/msg19682.html&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;So it&apos;s unlikely caused by the recent upgrade to 0.6.3-1.2.&lt;/p&gt;</comment>
                            <comment id="107324" author="pjones" created="Thu, 19 Feb 2015 04:27:41 +0000"  >&lt;p&gt;Ok thanks So it is probably just something quite rare and it was a coincidence on the timing.&lt;/p&gt;</comment>
                            <comment id="107653" author="utopiabound" created="Mon, 23 Feb 2015 15:04:31 +0000"  >&lt;p&gt;This issue does not appear with spl/zfs 0.6.3-1.1 or before.&lt;/p&gt;

&lt;p&gt;It also appears that there is a slow/stuck txg_sync thread on MDS:&lt;br/&gt;
e.g.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;21:26:43:txg_sync      D 0000000000000000     0 15966      2 0x00000080
21:26:43: ffff88006e225b90 0000000000000046 00000000ffffffff 0000094536c0ce7a
21:26:43: 0000000000000000 ffff8800379f81a0 0000000000113874 ffffffffab454110
21:26:43: ffff8800721885f8 ffff88006e225fd8 000000000000fbc8 ffff8800721885f8
21:26:43:Call Trace:
21:26:43: [&amp;lt;ffffffff810a6d31&amp;gt;] ? ktime_get_ts+0xb1/0xf0
21:26:43: [&amp;lt;ffffffff81529e83&amp;gt;] io_schedule+0x73/0xc0
21:26:43: [&amp;lt;ffffffffa0143596&amp;gt;] cv_wait_common+0xa6/0x120 [spl]
21:26:43: [&amp;lt;ffffffff8109afa0&amp;gt;] ? autoremove_wake_function+0x0/0x40
21:26:43: [&amp;lt;ffffffffa0143628&amp;gt;] __cv_wait_io+0x18/0x20 [spl]
21:26:43: [&amp;lt;ffffffffa028d81b&amp;gt;] zio_wait+0xfb/0x1c0 [zfs]
21:26:43: [&amp;lt;ffffffffa0220993&amp;gt;] dsl_pool_sync+0xb3/0x3f0 [zfs]
21:26:43: [&amp;lt;ffffffffa02398bf&amp;gt;] spa_sync+0x40f/0xa70 [zfs]
21:26:43: [&amp;lt;ffffffffa0243771&amp;gt;] ? spa_txg_history_set+0xc1/0xf0 [zfs]
21:26:43: [&amp;lt;ffffffffa0246c7d&amp;gt;] txg_sync_thread+0x30d/0x520 [zfs]
21:26:43: [&amp;lt;ffffffff810591a9&amp;gt;] ? set_user_nice+0xc9/0x130
21:26:43: [&amp;lt;ffffffffa0246970&amp;gt;] ? txg_sync_thread+0x0/0x520 [zfs]
21:26:43: [&amp;lt;ffffffffa013ec3f&amp;gt;] thread_generic_wrapper+0x5f/0x70 [spl]
21:26:43: [&amp;lt;ffffffffa013ebe0&amp;gt;] ? thread_generic_wrapper+0x0/0x70 [spl]
21:26:43: [&amp;lt;ffffffff8109abf6&amp;gt;] kthread+0x96/0xa0
21:26:43: [&amp;lt;ffffffff8100c20a&amp;gt;] child_rip+0xa/0x20
21:26:43: [&amp;lt;ffffffff8109ab60&amp;gt;] ? kthread+0x0/0xa0
21:26:43: [&amp;lt;ffffffff8100c200&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="107659" author="utopiabound" created="Mon, 23 Feb 2015 16:57:27 +0000"  >&lt;p&gt;Reverting to spl/zfs 0.6.3-1.1 may be the right answer since it doesn&apos;t seem to have solved any problems (ie &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5242&quot; title=&quot;Test hang sanity test_132, test_133: umount ost&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5242&quot;&gt;&lt;del&gt;LU-5242&lt;/del&gt;&lt;/a&gt;) and has induced this one.&lt;/p&gt;</comment>
                            <comment id="107668" author="isaac" created="Mon, 23 Feb 2015 18:23:42 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=utopiabound&quot; class=&quot;user-hover&quot; rel=&quot;utopiabound&quot;&gt;utopiabound&lt;/a&gt; 0.6.3-1.2 did fix a txg_sync hang that was one of the causes for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5242&quot; title=&quot;Test hang sanity test_132, test_133: umount ost&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5242&quot;&gt;&lt;del&gt;LU-5242&lt;/del&gt;&lt;/a&gt;, I was able to reproduce it a few times with 0.6.3-1.1. In addition, here&apos;s a report of a very similar issue with spl/zfs 0.6.3-1.1: &lt;a href=&quot;http://git.net/zfs-discuss/msg19682.html&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://git.net/zfs-discuss/msg19682.html&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="107675" author="adilger" created="Mon, 23 Feb 2015 18:54:10 +0000"  >&lt;p&gt;Brian, any thoughts on this? It seems that moving to 0.6.3-1.2 has introduced a regression vs 0.6.3-1.1, but it isn&apos;t clear whether it is better to revert to -1.1 (and get back the issues resolved by that update) or stuck with -1.2?&lt;/p&gt;

&lt;p&gt;Nathaniel, do you have a solid reproducer for this locally (e.g. run sanity 10x and it will always fail once)?  Is it possible to Git bisect the -1.2 update and isolate this to a specific patch within that update?&lt;/p&gt;</comment>
                            <comment id="107683" author="behlendorf" created="Mon, 23 Feb 2015 20:07:30 +0000"  >&lt;p&gt;Since we&apos;re treating 0.6.3-x as a stable release there really wasn&apos;t much change between the tags.  Just a few few build fixes for newer kernels, changes to a few default values we&apos;ve been running forever, and a two critical bug fixes.  Nothing which should have caused a regression, here&apos;s the full commit log between 0.6.3-1.1 and 0.6.3-1.2.&lt;/p&gt;

&lt;p&gt;$ git log --oneline spl-0.6.3-1.1..spl-0.6.3-1.2&lt;br/&gt;
099c670 Tag spl-0.6.3-1.2&lt;br/&gt;
8cd930b mutex: force serialization on mutex_exit() to fix races&lt;br/&gt;
17e5cf3 Linux 3.12 compat: shrinker semantics&lt;br/&gt;
23fcd1e Linux 3.16 compat: smp_mb__after_clear_bit()&lt;br/&gt;
f322c6a Linux 3.17 compat: remove wait_on_bit action function&lt;br/&gt;
5c8a0ad Set spl_kmem_cache_slab_limit=16384 to default&lt;br/&gt;
bf56579 Set spl_kmem_cache_reclaim=0 to default&lt;/p&gt;


&lt;p&gt;$ git log --oneline zfs-0.6.3-1.1..zfs-0.6.3-1.2&lt;br/&gt;
7b13354 Tag zfs-0.6.3-1.2&lt;br/&gt;
c3db49a Fix typo in %post scriptlet lines&lt;br/&gt;
73fd1f4 Don&apos;t perform ACL-to-mode translation on empty ACL&lt;br/&gt;
a85804e Undirty freed spill blocks.&lt;br/&gt;
5b29265 Export symbols for ZIL interface&lt;br/&gt;
107077a Remove checks for mandatory locks&lt;br/&gt;
c69e727 Linux 3.12 compat: shrinker semantics&lt;/p&gt;</comment>
                            <comment id="107731" author="isaac" created="Tue, 24 Feb 2015 00:09:50 +0000"  >&lt;p&gt;There was one report of a very similar panic for 0.6.3-1.1, without running Lustre:&lt;br/&gt;
&lt;a href=&quot;https://groups.google.com/a/zfsonlinux.org/forum/#!msg/zfs-discuss/6YaxCnuVfzM/3icF7asNL5AJ&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://groups.google.com/a/zfsonlinux.org/forum/#!msg/zfs-discuss/6YaxCnuVfzM/3icF7asNL5AJ&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;#12 [ffff880028207f50] nmi at ffffffff81534ee0
    [exception RIP: _spin_lock_irq+40]
    RIP: ffffffff81534678  RSP: ffff88021a9a75d0  RFLAGS: 00000002
    RAX: 0000000000000000  RBX: ffff880193353b00  RCX: 0000000000000014
    RDX: 00000000000073e6  RSI: ffff88021a9a7640  RDI: ffff880193353b08
    RBP: ffff88021a9a75d0   R8: 00d67e80287a31ef   R9: ffff88020a91a000
    R10: ffff88021a9a7660  R11: ffff88020b2a7550  R12: ffff8801c68b4ae0
    R13: ffff88021a9a7640  R14: ffff880193353b08  R15: ffff88021a9a7968
    ORIG_RAX: ffffffffffffffff  CS: 0010  SS: 0018
--- &amp;lt;NMI exception stack&amp;gt; ---
#13 [ffff88021a9a75d0] _spin_lock_irq at ffffffff81534678
#14 [ffff88021a9a75d8] rwsem_down_failed_common at ffffffff8153436c
#15 [ffff88021a9a7638] rwsem_down_read_failed at ffffffff81534556
#16 [ffff88021a9a7678] call_rwsem_down_read_failed at ffffffff8129fb44
#17 [ffff88021a9a76e0] zap_get_leaf_byblk at ffffffffa0374b70 [zfs]
#18 [ffff88021a9a7750] zap_deref_leaf at ffffffffa0374dba [zfs]
#19 [ffff88021a9a7790] fzap_cursor_retrieve at ffffffffa0375cbc [zfs]
#20 [ffff88021a9a7820] zap_cursor_retrieve at ffffffffa037aceb [zfs]
#21 [ffff88021a9a78c0] zfs_purgedir at ffffffffa038302b [zfs]
#22 [ffff88021a9a7af0] zfs_unlinked_drain at ffffffffa03832de [zfs]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="110096" author="bogl" created="Thu, 19 Mar 2015 14:52:50 +0000"  >&lt;p&gt;another seen on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/4233a110-ce15-11e4-8b81-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/4233a110-ce15-11e4-8b81-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="111178" author="yujian" created="Tue, 31 Mar 2015 21:54:00 +0000"  >&lt;p&gt;More failure instances on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/4233a110-ce15-11e4-8b81-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/4233a110-ce15-11e4-8b81-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/53903488-cd47-11e4-8795-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/53903488-cd47-11e4-8795-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f437f480-cb5c-11e4-8352-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f437f480-cb5c-11e4-8352-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/0ac8da4a-c7ff-11e4-be50-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/0ac8da4a-c7ff-11e4-be50-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ee95d0fc-c80e-11e4-92dc-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ee95d0fc-c80e-11e4-92dc-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="29562">LU-6477</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="28117">LU-6105</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzx47b:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>17092</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>