<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:08:36 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7404] ZFS OSS - Numerous timeouts - SWL</title>
                <link>https://jira.whamcloud.com/browse/LU-7404</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Running SWL, OSS has repeated timeouts&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;Nov  5 15:23:57 iws9 kernel: LNet: Service thread pid 23042 was inactive &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; 200.00s. The thread might be hung, or it 
might only be slow and will resume later. Dumping the stack trace &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; debugging purposes:
Nov  5 15:23:57 iws9 kernel: Pid: 23042, comm: ll_ost00_004
Nov  5 15:23:57 iws9 kernel:
Nov  5 15:23:57 iws9 kernel: Call Trace:
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa067c380&amp;gt;] ? vdev_mirror_child_done+0x0/0x30 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffff815395c3&amp;gt;] io_schedule+0x73/0xc0
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa05b2f8f&amp;gt;] cv_wait_common+0xaf/0x130 [spl]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffff810a1460&amp;gt;] ? autoremove_wake_function+0x0/0x40
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa05b3028&amp;gt;] __cv_wait_io+0x18/0x20 [spl]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa06bd2eb&amp;gt;] zio_wait+0x10b/0x1e0 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0614939&amp;gt;] dbuf_read+0x439/0x850 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0614ef1&amp;gt;] __dbuf_hold_impl+0x1a1/0x4f0 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa06152bd&amp;gt;] dbuf_hold_impl+0x7d/0xb0 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0616790&amp;gt;] dbuf_hold+0x20/0x30 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa061d0d7&amp;gt;] dmu_buf_hold_noread+0x87/0x140 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa061d1cb&amp;gt;] dmu_buf_hold+0x3b/0x90 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0612fb8&amp;gt;] ? dbuf_rele_and_unlock+0x268/0x400 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0686e5a&amp;gt;] zap_lockdir+0x5a/0x770 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffff81178fcd&amp;gt;] ? kmem_cache_alloc_node_trace+0x1cd/0x200
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa06889ca&amp;gt;] zap_lookup_norm+0x4a/0x190 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0688ba3&amp;gt;] zap_lookup+0x33/0x40 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa062cc76&amp;gt;] dmu_tx_hold_zap+0x146/0x210 [zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa1034255&amp;gt;] osd_declare_object_create+0x2a5/0x440 [osd_zfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa11738e4&amp;gt;] ofd_precreate_objects+0x4e4/0x19d0 [ofd]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa04b4b61&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa1180a9b&amp;gt;] ? ofd_grant_create+0x23b/0x3e0 [ofd]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa116384e&amp;gt;] ofd_create_hdl+0x56e/0x2640 [ofd]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0c28e80&amp;gt;] ? lustre_pack_reply_v2+0x220/0x280 [ptlrpc]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0c930ec&amp;gt;] tgt_request_handle+0x8bc/0x12e0 [ptlrpc]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0c3a9e1&amp;gt;] ptlrpc_main+0xe41/0x1910 [ptlrpc]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffffa0c39ba0&amp;gt;] ? ptlrpc_main+0x0/0x1910 [ptlrpc]
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffff810a0fce&amp;gt;] kthread+0x9e/0xc0
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffff8100c28a&amp;gt;] child_rip+0xa/0x20
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffff810a0f30&amp;gt;] ? kthread+0x0/0xc0
Nov  5 15:23:57 iws9 kernel: [&amp;lt;ffffffff8100c280&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Lustre-log dump attached&lt;/p&gt;</description>
                <environment>Hyperion /SWL 2.7.61 review build 35536 (patch &lt;a href=&quot;http://review.whamcloud.com/17053&quot;&gt;http://review.whamcloud.com/17053&lt;/a&gt; - Revert &amp;quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4865&quot; title=&quot;osd-zfs: increase object block size dynamically as object grows&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4865&quot;&gt;&lt;strike&gt;LU-4865&lt;/strike&gt;&lt;/a&gt; zfs: grow block size by write pattern&amp;quot;)</environment>
        <key id="33039">LU-7404</key>
            <summary>ZFS OSS - Numerous timeouts - SWL</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="jay">Jinshan Xiong</assignee>
                                    <reporter username="cliffw">Cliff White</reporter>
                        <labels>
                            <label>RZ_LS</label>
                            <label>zfs</label>
                    </labels>
                <created>Fri, 6 Nov 2015 16:46:22 +0000</created>
                <updated>Wed, 20 Mar 2019 18:43:46 +0000</updated>
                            <resolved>Thu, 3 Nov 2016 19:10:20 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>17</watches>
                                                                            <comments>
                            <comment id="132875" author="jgmitter" created="Fri, 6 Nov 2015 18:46:41 +0000"  >&lt;p&gt;Issues seen after reverting patch for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4865&quot; title=&quot;osd-zfs: increase object block size dynamically as object grows&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4865&quot;&gt;&lt;del&gt;LU-4865&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="133176" author="adilger" created="Tue, 10 Nov 2015 19:48:11 +0000"  >&lt;p&gt;Current testing with DNE+ZFS on Hyperion has shown that 2.7.56 does &lt;b&gt;not&lt;/b&gt; have this timeout problem, while 2.7.62 &lt;b&gt;does&lt;/b&gt; have the timeouts.  Testing is underway with the 2.7.59 tag to see if the timeout problem is present there as well.    The &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6750&quot; title=&quot;missing stop callback in osd-zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6750&quot;&gt;&lt;del&gt;LU-6750&lt;/del&gt;&lt;/a&gt; patch was landed as &lt;tt&gt;v2_7_56_0-5-g27929cc&lt;/tt&gt; (i.e. 5 patches past 2.7.56) so it wasn&apos;t present in the 2.7.56 testing that passed.  The &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4865&quot; title=&quot;osd-zfs: increase object block size dynamically as object grows&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4865&quot;&gt;&lt;del&gt;LU-4865&lt;/del&gt;&lt;/a&gt; patch was landed as &lt;tt&gt;v2_7_59_0-20-g3e43691&lt;/tt&gt; (i.e. 20 patches past 2.7.59) so the 2.7.59 testing will give us a good half-way mark without being affected by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4865&quot; title=&quot;osd-zfs: increase object block size dynamically as object grows&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4865&quot;&gt;&lt;del&gt;LU-4865&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;I&apos;ve pushed &lt;a href=&quot;http://review.whamcloud.com/17112&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17112&lt;/a&gt; to revert the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6750&quot; title=&quot;missing stop callback in osd-zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6750&quot;&gt;&lt;del&gt;LU-6750&lt;/del&gt;&lt;/a&gt; patch in addition to the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4865&quot; title=&quot;osd-zfs: increase object block size dynamically as object grows&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4865&quot;&gt;&lt;del&gt;LU-4865&lt;/del&gt;&lt;/a&gt; patch reversion, based on the current tip of master, for the next stage of testing after 2.7.59, depending on those results.&lt;/p&gt;</comment>
                            <comment id="137339" author="adilger" created="Wed, 23 Dec 2015 21:25:20 +0000"  >&lt;p&gt;This problem has been isolated to the update from ZFS 0.6.4.2 to 0.6.5.2, commit v2_7_61_0-39-ge94d375d8a, patch &lt;a href=&quot;http://review.whamcloud.com/16399&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/16399&lt;/a&gt; &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7153&quot; title=&quot;Update ZFS/SPL version to 0.6.5.2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7153&quot;&gt;&lt;del&gt;LU-7153&lt;/del&gt;&lt;/a&gt; build: Update SPL/ZFS to 0.6.5.2&quot;.&lt;/p&gt;

&lt;p&gt;One option for debugging would be to bisect the ZFS code upstream to see which ZFS patch has introduced this.&lt;/p&gt;</comment>
                            <comment id="137356" author="yujian" created="Wed, 23 Dec 2015 21:54:42 +0000"  >&lt;p&gt;Here is the patch to reset ZFS baseline to version 0.6.4.2: &lt;a href=&quot;http://review.whamcloud.com/17712&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17712&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="137479" author="utopiabound" created="Mon, 28 Dec 2015 13:35:02 +0000"  >&lt;p&gt;This hang isn&apos;t the same as deadlock as &lt;a href=&quot;https://github.com/zfsonlinux/spl/pull/484&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;zfsonlinux/spl#484&lt;/a&gt; &quot;Disable dynamic taskqs by default to avoid deadlock&quot; that was fixed in 0.6.5.3 is it?  If it isn&apos;t has someone opened a bug upstream for this?&lt;/p&gt;</comment>
                            <comment id="137552" author="jay" created="Tue, 29 Dec 2015 06:38:04 +0000"  >&lt;p&gt;Hi Nathaniel,&lt;/p&gt;

&lt;p&gt;Cliff has verified that the same issue can be seen on current master, where the baseline of ZFS is 0.6.5.3. I will investigate if this is a problem of OSD-ZFS, or a problem of ZFS baseline. We will file an issue upstream if it turns out to be a problem of ZFS.&lt;/p&gt;</comment>
                            <comment id="137983" author="gerrit" created="Tue, 5 Jan 2016 18:57:11 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/17712/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17712/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7404&quot; title=&quot;ZFS OSS - Numerous timeouts - SWL&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7404&quot;&gt;&lt;del&gt;LU-7404&lt;/del&gt;&lt;/a&gt; zfs: reset ZFS baseline to 0.6.4.2&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 182b30b7699858c73a990c36c51b70c40858a1fe&lt;/p&gt;</comment>
                            <comment id="138147" author="jay" created="Wed, 6 Jan 2016 23:04:38 +0000"  >&lt;p&gt;I&apos;m dropping the priority of this issue because it&apos;s not blocking 2.8 release any more. I will keep this ticket open till I find the root cause.&lt;/p&gt;</comment>
                            <comment id="138775" author="jay" created="Wed, 13 Jan 2016 07:48:08 +0000"  >&lt;p&gt;I filed a ticket on upstreaming zfs at: &lt;a href=&quot;https://github.com/zfsonlinux/zfs/issues/4210&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/zfsonlinux/zfs/issues/4210&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="139578" author="utopiabound" created="Thu, 21 Jan 2016 16:09:57 +0000"  >&lt;p&gt;Given the discussion on &lt;a href=&quot;https://github.com/zfsonlinux/zfs/issues/4210&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;zfs#4210&lt;/a&gt; should I push a patch to move to 0.6.5.4?&lt;/p&gt;</comment>
                            <comment id="139580" author="jay" created="Thu, 21 Jan 2016 16:15:06 +0000"  >&lt;p&gt;Hi Nathaniel,&lt;/p&gt;

&lt;p&gt;We&apos;ve tried 0.6.5.4 before and it didn&apos;t help.&lt;/p&gt;

&lt;p&gt;Only ZFS Master includes the patches the upstream ZFS developer mentioned and we tried that on Hyperion yesterday, unfortunately it didn&apos;t help either. &lt;/p&gt;</comment>
                            <comment id="143281" author="jay" created="Tue, 23 Feb 2016 00:38:43 +0000"  >&lt;p&gt;it turned out the root cause of this issue may be due to cache hit ratio dropped significantly introduced by the commit of multilisted ARC implementation.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;ARC Total accesses:                                     863.95k
        Cache Hit Ratio:                99.68%  861.22k
        Cache Miss Ratio:               0.32%   2.73k
        Actual Hit Ratio:               99.68%  861.22k

        Data Demand Efficiency:         100.00% 369.92k

        CACHE HITS BY CACHE LIST:
          Most Recently Used:           30.96%  266.66k
          Most Frequently Used:         69.04%  594.56k
          Most Recently Used Ghost:     0.00%   0
          Most Frequently Used Ghost:   0.00%   0

        CACHE HITS BY DATA TYPE:
          Demand Data:                  42.95%  369.92k
          Prefetch Data:                0.00%   0
          Demand Metadata:              57.05%  491.30k
          Prefetch Metadata:            0.00%   0

        CACHE MISSES BY DATA TYPE:
          Demand Data:                  0.00%   0
          Prefetch Data:                0.00%   0
          Demand Metadata:              100.00% 2.73k
          Prefetch Metadata:            0.00%   0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This is the ARC stats before the patch is introduced, the cache hit ratio is close to 100%.&lt;/p&gt;

&lt;p&gt;While this is the ARC stats after the commit is landed:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;ARC Total accesses:                                     967.83k
        Cache Hit Ratio:                18.54%  179.46k
        Cache Miss Ratio:               81.46%  788.37k
        Actual Hit Ratio:               18.54%  179.46k

        Data Demand Efficiency:         10.47%  868.25k

        CACHE HITS BY CACHE LIST:
          Most Recently Used:           35.70%  64.07k
          Most Frequently Used:         64.30%  115.39k
          Most Recently Used Ghost:     3.77%   6.77k
          Most Frequently Used Ghost:   6.30%   11.30k

        CACHE HITS BY DATA TYPE:
          Demand Data:                  50.64%  90.89k
          Prefetch Data:                0.00%   0
          Demand Metadata:              49.36%  88.58k
          Prefetch Metadata:            0.00%   0

        CACHE MISSES BY DATA TYPE:
          Demand Data:                  98.60%  777.36k
          Prefetch Data:                0.00%   0
          Demand Metadata:              1.40%   11.01k
          Prefetch Metadata:            0.00%   0&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="147777" author="cliffw" created="Mon, 4 Apr 2016 22:07:44 +0000"  >&lt;p&gt;Ran latest patch from Jinshan - furnished him with debug information from SWL run.&lt;/p&gt;</comment>
                            <comment id="148672" author="jay" created="Tue, 12 Apr 2016 22:52:01 +0000"  >&lt;p&gt;From Brian @ LLNL - it looks like the underlying storage on Hyperion doesn&apos;t match TXG dirty throttle in ZFS. OSTs can only do ~50MB/s but zfs_dirty_data_max was set to 6G by default. After Brian reduced zfs_dirty_data_max to 1G, he didn&apos;t see I/O timeouts any more. This has been verified by Cliff.&lt;/p&gt;

&lt;p&gt;The reason we can&apos;t see this problem elsewhere is because it&apos;s unusual to have such a slow storage in production.&lt;/p&gt;</comment>
                            <comment id="148673" author="jay" created="Tue, 12 Apr 2016 22:55:34 +0000"  >&lt;p&gt;My theory to explain this problem is that the ARC lock contention in old ZFS releases may throttle I/O by itself.&lt;/p&gt;</comment>
                            <comment id="148676" author="adilger" created="Tue, 12 Apr 2016 23:07:40 +0000"  >&lt;p&gt;Shouldn&apos;t ZFS limit the TXG size based on the speed of the underlying storage?  I&apos;d thought that was the main feature of the dynamic TXG sizing - record how quickly the data could be flushed to disk in the previous TXG and then use it to limit it the size of the next TXG based on the desired TXG commit interval.&lt;/p&gt;</comment>
                            <comment id="148680" author="jay" created="Tue, 12 Apr 2016 23:23:55 +0000"  >&lt;p&gt;From what I have seen in the code, ZFS starts to throttle I/O when dirty data in pool reaches 60% of zfs_dirty_data_max(6GB by default on Hyperion); at the same time it will wake up quiescing thread to close the current open txg. But there is no mechanism to adjust TXG size from the time how long previous TXG was complete. This pushed the txg sync time to be about 100s on average.&lt;/p&gt;</comment>
                            <comment id="148687" author="behlendorf" created="Tue, 12 Apr 2016 23:48:31 +0000"  >&lt;p&gt;Andreas the ZFS IO throttled used to work exactly as you&apos;ve described it.  That implementation had some significant draw backs so it was reworked in 2013.  Specifically, it often resulted in a nasty saw tooth behavior when processes hit the calculated limit (which was often wrong).  See &lt;a href=&quot;https://github.com/zfsonlinux/zfs/commit/e8b96c6007bf97cdf34869c1ffbd0ce753873a3d&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/zfsonlinux/zfs/commit/e8b96c6007bf97cdf34869c1ffbd0ce753873a3d&lt;/a&gt; for all gory the details.  Bottom line the code was updated so that `dmu_tx_assign()` now starts injecting tiny delays which gradually increase as you approach the dirty limit.  This back pressure is supposed to keep the TXG from getting to large and allow the system to settle in to a steady equilibrium.&lt;/p&gt;

&lt;p&gt;Clearly that doesn&apos;t seem to be working properly for a Lustre workload even though `dmu_tx_assign()` was injecting large delays.  I suspect it may be because of the large number of Lustre IO threads all attempting to dirty ARC buffers, that wasn&apos;t exactly the use case this was designed for.  The code also makes sure to allow any caller which was delayed in to the next TXG to prevent starving them.  How quickly these delays ramp up is controlled by the `zfs_delay_scale` module parameter which we should able to tune to control this.  There&apos;s a great comment describing this in the zfs-module-parameters.5 man page which is worth reading.  See &lt;a href=&quot;https://github.com/zfsonlinux/zfs/blob/master/man/man5/zfs-module-parameters.5#L1768&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/zfsonlinux/zfs/blob/master/man/man5/zfs-module-parameters.5#L1768&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="149297" author="gerrit" created="Mon, 18 Apr 2016 16:36:22 +0000"  >&lt;p&gt;Jinshan Xiong (jinshan.xiong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/19632&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/19632&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7404&quot; title=&quot;ZFS OSS - Numerous timeouts - SWL&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7404&quot;&gt;&lt;del&gt;LU-7404&lt;/del&gt;&lt;/a&gt; zfs: Reset default zfs version to 0.6.5.5&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_8&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 947f37272b35ede3791616261e6a6929aebcac86&lt;/p&gt;</comment>
                            <comment id="159692" author="pjones" created="Mon, 25 Jul 2016 05:05:31 +0000"  >&lt;p&gt;AFAIK no action is required for 2.9 and master is now running against a more current 0.6.5.x ZFS release&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="33838">LU-7602</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="32101">LU-7153</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="24075">LU-4865</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="30756">LU-6750</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="35818">LU-7987</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="19548" name="iws9.t1.txt.gz" size="5101719" author="cliffw" created="Fri, 6 Nov 2015 16:46:22 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10490" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>End date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Mon, 28 Dec 2015 16:46:22 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                            <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxshj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10493" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>Start date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Fri, 6 Nov 2015 16:46:22 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                    </customfields>
    </item>
</channel>
</rss>