<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:31:03 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3109] ZFS - very slow reads, OST watchdogs</title>
                <link>https://jira.whamcloud.com/browse/LU-3109</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Running ior file-per-process. we observe one or two of the OSTs have a very excessive load factor compared to the others (load average of 112 vs LA of 0.1)&lt;br/&gt;
System log shows large number of watchdogs. IO is not failing but rates are very, very slow. &lt;br/&gt;
First watchdog (log attached) &lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;2013-04-04 11:26:05 LNet: Service thread pid 8074 was inactive &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; 200.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; debugging purposes:
2013-04-04 11:26:05 Pid: 8074, comm: ll_ost_io00_018
2013-04-04 11:26:05
2013-04-04 11:26:05 Call Trace:
2013-04-04 11:26:05  [&amp;lt;ffffffffa056cd40&amp;gt;] ? arc_read_nolock+0x530/0x810 [zfs]
2013-04-04 11:26:05  [&amp;lt;ffffffffa04e45ac&amp;gt;] cv_wait_common+0x9c/0x1a0 [spl]
2013-04-04 11:26:05  [&amp;lt;ffffffff81090990&amp;gt;] ? autoremove_wake_function+0x0/0x40
2013-04-04 11:26:05  [&amp;lt;ffffffffa04e46e3&amp;gt;] __cv_wait+0x13/0x20 [spl]
2013-04-04 11:26:05  [&amp;lt;ffffffffa060633b&amp;gt;] zio_wait+0xeb/0x160 [zfs]
2013-04-04 11:26:05  [&amp;lt;ffffffffa057106d&amp;gt;] dbuf_read+0x3fd/0x720 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0572c1b&amp;gt;] dbuf_prefetch+0x10b/0x2b0 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0586381&amp;gt;] dmu_zfetch_dofetch+0xf1/0x160 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0570280&amp;gt;] ? dbuf_read_done+0x0/0x110 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0587211&amp;gt;] dmu_zfetch+0xaa1/0xe40 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa05710fa&amp;gt;] dbuf_read+0x48a/0x720 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0578bc9&amp;gt;] dmu_buf_hold_array_by_dnode+0x179/0x570 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0579b28&amp;gt;] dmu_buf_hold_array_by_bonus+0x68/0x90 [zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0d4c95d&amp;gt;] osd_bufs_get+0x49d/0x9a0 [osd_zfs]
2013-04-04 11:26:06  [&amp;lt;ffffffff81270f7c&amp;gt;] ? put_dec+0x10c/0x110
2013-04-04 11:26:06  [&amp;lt;ffffffffa0723736&amp;gt;] ? lu_object_find+0x16/0x20 [obdclass]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0ded49f&amp;gt;] ofd_preprw_read+0x13f/0x7e0 [ofd]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0dedec5&amp;gt;] ofd_preprw+0x385/0x1190 [ofd]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0da739c&amp;gt;] obd_preprw+0x12c/0x3d0 [ost]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0dace80&amp;gt;] ost_brw_read+0xd00/0x12e0 [ost]
2013-04-04 11:26:06  [&amp;lt;ffffffff812739b6&amp;gt;] ? vsnprintf+0x2b6/0x5f0
2013-04-04 11:26:06  [&amp;lt;ffffffffa035127b&amp;gt;] ? cfs_set_ptldebug_header+0x2b/0xc0 [libcfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0361bdb&amp;gt;] ? libcfs_debug_vmsg2+0x50b/0xbb0 [libcfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa08a2f4c&amp;gt;] ? lustre_msg_get_version+0x8c/0x100 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffffa08a30a8&amp;gt;] ? lustre_msg_check_version+0xe8/0x100 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffffa0db3a63&amp;gt;] ost_handle+0x2b53/0x46f0 [ost]
2013-04-04 11:26:06  [&amp;lt;ffffffffa035e0e4&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa08b21ac&amp;gt;] ptlrpc_server_handle_request+0x41c/0xdf0 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffffa03525de&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
2013-04-04 11:26:06  [&amp;lt;ffffffffa08a97e9&amp;gt;] ? ptlrpc_wait_event+0xa9/0x290 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffff81052223&amp;gt;] ? __wake_up+0x53/0x70
2013-04-04 11:26:06  [&amp;lt;ffffffffa08b36f5&amp;gt;] ptlrpc_main+0xb75/0x1870 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffffa08b2b80&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffff8100c0ca&amp;gt;] child_rip+0xa/0x20
2013-04-04 11:26:06  [&amp;lt;ffffffffa08b2b80&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffffa08b2b80&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
2013-04-04 11:26:06  [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
2013-04-04 11:26:06
2013-04-04 11:26:06 LustreError: dumping log to /tmp/lustre-log.1365099965.8074
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>Hyperion/LLNL</environment>
        <key id="18249">LU-3109</key>
            <summary>ZFS - very slow reads, OST watchdogs</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="cliffw">Cliff White</reporter>
                        <labels>
                            <label>performance</label>
                            <label>prz</label>
                            <label>zfs</label>
                    </labels>
                <created>Thu, 4 Apr 2013 18:45:24 +0000</created>
                <updated>Sat, 14 Feb 2015 01:11:31 +0000</updated>
                            <resolved>Sat, 14 Feb 2015 01:11:31 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="55535" author="cliffw" created="Thu, 4 Apr 2013 18:46:10 +0000"  >&lt;p&gt;lustre-log from watchdog&lt;/p&gt;</comment>
                            <comment id="55537" author="keith" created="Thu, 4 Apr 2013 18:56:02 +0000"  >&lt;p&gt;A little system state info. &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;avg-cpu:  %user   %nice %system %iowait  %steal   %idle
           0.00    0.00    1.17    0.00    0.00   98.83

Device:         rrqm/s   wrqm/s     r/s     w/s   rsec/s   wsec/s avgrq-sz avgqu-sz   await  svctm  %util
sdg               0.00     0.00  505.50    0.00 187472.50     0.00   370.87     6.35   12.53   1.98 100.00
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The await is comparable to other systems that are not blocking. &lt;/p&gt;

</comment>
                            <comment id="55539" author="bzzz" created="Thu, 4 Apr 2013 19:09:35 +0000"  >&lt;p&gt;I saw this on local setup few times (over an year period).&lt;/p&gt;</comment>
                            <comment id="55541" author="keith" created="Thu, 4 Apr 2013 19:49:27 +0000"  >&lt;p&gt;With this zfs setup is is happening a bit.  There is just a ton of waiting on zfs code. &lt;/p&gt;

&lt;p&gt;There are few servers with the same general workload one or two of them will be in this very hight load avg state.&lt;/p&gt;

&lt;p&gt;Is there some zfs data we can dump from sysfs or proc to give a better insight as to what is going on? &lt;/p&gt;</comment>
                            <comment id="55549" author="keith" created="Thu, 4 Apr 2013 21:44:46 +0000"  >&lt;p&gt;Maybe this is a storage thing:&lt;/p&gt;

&lt;p&gt;iostat -x 10 output. &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;avg-cpu:  %user   %nice %system %iowait  %steal   %idle
           0.00    0.00    3.20    0.00    0.00   96.80

Device:         rrqm/s   wrqm/s     r/s     w/s   rsec/s   wsec/s avgrq-sz avgqu-sz   await  svctm  %util
sda               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdf               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdb               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdc               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sde               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdd               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdh               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdi               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdj               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdk               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdl               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdg               0.00     0.00    5.30 1263.20    51.70 638661.00   503.52     3.47    2.72   0.76  96.39
dm-0              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-1              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-2              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-3              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-4              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-5              0.00  1299.00    5.30 1263.20    51.70 638661.00   503.52     3.83    3.00   0.78  98.87

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Next &quot;normal&quot; server&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
avg-cpu:  %user   %nice %system %iowait  %steal   %idle
           0.01    0.00    4.15    0.00    0.00   95.84

Device:         rrqm/s   wrqm/s     r/s     w/s   rsec/s   wsec/s avgrq-sz avgqu-sz   await  svctm  %util
sda               0.00     0.00    2.50 1548.10     6.50 746937.90   481.71     1.09    0.71   0.18  28.48
sdb               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdc               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdd               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sde               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdf               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdh               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdg               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdi               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdj               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdk               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
sdl               0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-0              0.00  1445.40    2.50 1548.10     6.50 746937.90   481.71     1.34    0.87   0.19  29.90
dm-1              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-2              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-3              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-4              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
dm-5              0.00     0.00    0.00    0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;We seem to be going faster with 1/3 of the utility of the slower server. &lt;/p&gt;</comment>
                            <comment id="55566" author="keith" created="Fri, 5 Apr 2013 00:55:18 +0000"  >&lt;p&gt;Also there is a junk load of threads on both good and bad performing servers. &lt;/p&gt;

&lt;p&gt;hyperion-agb14@mannthey:ps aux | grep ll_ost_ | wc -l&lt;br/&gt;
202&lt;br/&gt;
hyperion-agb18@mannthey:ps aux | grep z_ | wc -l&lt;br/&gt;
90&lt;/p&gt;

&lt;p&gt;Just about 300 threads for one block device and one filesystem? &lt;/p&gt;

&lt;p&gt;There is only 16 cpus and 1 device. &lt;/p&gt;</comment>
                            <comment id="55567" author="keith" created="Fri, 5 Apr 2013 01:02:03 +0000"  >&lt;p&gt;The hotspot (high load avg) seems to move servers depending on config. &lt;/p&gt;

&lt;p&gt;&quot;normal&quot;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;/proc/fs/lustre/ost/OSS/ost/stats @ 1365123595.929143
Name                      Cur.Count  Cur.Rate   #Events   Unit           last        min          avg        max    stddev
req_waittime              13         0          20685     [usec]          929          9       214.98     712727   8332.40 
req_qdepth                13         0          20685     [reqs]            0          0         0.00          1      0.03 
req_active                13         0          20685     [reqs]           13          1         1.28         11      1.39 
req_timeout               13         0          20685     [sec]           585         45        45.00         45      0.00 
reqbuf_avail              25         1          41400     [bufs]         1575         49        62.70         64      1.43 
ldlm_extent_enqueue       0          0          768       [reqs]            0          1         1.00          1      0.00 
ost_create                0          0          11        [usec]            0        170    958523.00    8438749 2519109.30 
ost_destroy               0          0          512       [usec]            0     356414   1530694.28    7608546 775048.51 
ost_get_info              0          0          1         [usec]            0      19398     19398.00      19398      0.00 
ost_connect               0          0          125       [usec]            0        415       498.31       1069     94.21 
obd_ping                  13         0          19268     [usec]          579          7        42.13        207     15.58 

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;bad&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;/proc/fs/lustre/ost/OSS/ost/stats @ 1365123593.682607
Name                      Cur.Count  Cur.Rate   #Events   Unit           last        min          avg        max    stddev
req_waittime              13         0          20687     [usec]         1862         24       617.66    3054848  34127.66 
req_qdepth                13         0          20687     [reqs]            0          0         0.00          1      0.04 
req_active                13         0          20687     [reqs]           13          1         1.29         12      1.38 
req_timeout               13         0          20687     [sec]           585         45        45.00         45      0.00 
reqbuf_avail              28         1          45215     [bufs]         1764         51        62.67         64      1.45 
ldlm_extent_enqueue       0          0          768       [reqs]            0          1         1.00          1      0.00 
ost_create                0          0          11        [usec]            0         65    150352.36     575236 201593.16 
ost_destroy               0          0          512       [usec]            0     272486   1592186.24    4242310 735618.42 
ost_get_info              0          0          1         [usec]            0      17611     17611.00      17611      0.00 
ost_connect               0          0          125       [usec]            0        412       484.32       1126     73.94 
obd_ping                  13         0          19270     [usec]         2143         50       158.49       2066     26.96 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="55568" author="cliffw" created="Fri, 5 Apr 2013 01:11:06 +0000"  >&lt;p&gt;Alex, this is consistently reproduceable on Hyperion - what data should we be looking at?&lt;/p&gt;</comment>
                            <comment id="55569" author="cliffw" created="Fri, 5 Apr 2013 01:26:43 +0000"  >&lt;p&gt;We are using zfs-0.6.0-rc10&lt;/p&gt;</comment>
                            <comment id="55570" author="keith" created="Fri, 5 Apr 2013 01:33:43 +0000"  >&lt;p&gt;&quot;normal&quot;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;/proc/fs/lustre/obdfilter/lustre-OST0001/stats @ 1365124587.752293
Name                      Cur.Count  Cur.Rate   #Events   Unit           last        min          avg        max
read_bytes                0          0          1625295   [bytes]           0       4096    393794.95    1048576
write_bytes               6568       437        652523    [bytes]  6880755712       4096   1047962.14    1048576
get_info                  73         4          1538      
set_info_async            0          0          3         
connect                   0          0          125       
statfs                    3          0          1226      
create                    0          0          12        
destroy                   34         2          768       
sync                      39         2          768       
preprw                    6568       437        2277818   
commitrw                  6568       437        2277818   
ping                      108        7          23612 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;&quot;bad&quot;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;/proc/fs/lustre/obdfilter/lustre-OST0003/stats @ 1365124582.616412
Name                      Cur.Count  Cur.Rate   #Events   Unit           last        min          avg        max
read_bytes                0          0          1226305   [bytes]           0       4096    521965.14    1048576
write_bytes               5602       373        653713    [bytes]  5861138432       4096   1042516.96    1048576
get_info                  127        8          1426      
set_info_async            0          0          2         
connect                   0          0          125       
statfs                    3          0          1223      
create                    0          0          12        
destroy                   63         4          712       
sync                      64         4          712       
preprw                    5602       373        1880018   
commitrw                  5602       373        1880018   
ping                      95         6          23601   
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Looks like a &quot;normal&quot; server is doing more work. &lt;/p&gt;</comment>
                            <comment id="55571" author="keith" created="Fri, 5 Apr 2013 02:12:09 +0000"  >&lt;p&gt;It seems our zfs is 0.6.0-rc10 and there is a 0.6.1 . &lt;/p&gt;</comment>
                            <comment id="55577" author="keith" created="Fri, 5 Apr 2013 04:01:58 +0000"  >&lt;p&gt;oprofile didn&apos;t just work. &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3111&quot; title=&quot;Build:  Debug symbols needed for the whole stack&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3111&quot;&gt;&lt;del&gt;LU-3111&lt;/del&gt;&lt;/a&gt; filed. &lt;/p&gt;</comment>
                            <comment id="55587" author="morrone" created="Fri, 5 Apr 2013 08:04:03 +0000"  >&lt;p&gt;rc10 is old, you should definitely upgrade to the latest rc of 0.6.0.  0.6.1 is a little TOO new, because packaging has changed there, and lustre will need a little tweaking to find the new paths and things automatically.  You can build by hand by giving spl and zfs paths, but the latest 0.6.0 rc will just be easier.&lt;/p&gt;</comment>
                            <comment id="107032" author="adilger" created="Sat, 14 Feb 2015 01:11:19 +0000"  >&lt;p&gt;This is probably the same as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5278&quot; title=&quot;ZFS - many OST watchdogs with IOR&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5278&quot;&gt;&lt;del&gt;LU-5278&lt;/del&gt;&lt;/a&gt;, which has patches that improve ZFS read performance significantly.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="25389">LU-5278</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="17731">LU-2887</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="12480" name="agb13.lustre.log.gz" size="3363807" author="cliffw" created="Thu, 4 Apr 2013 18:46:10 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvn4n:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>7557</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>