<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:06:27 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-14055] Write performance regression caused by an commit from LU-13344</title>
                <link>https://jira.whamcloud.com/browse/LU-14055</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;&lt;a href=&quot;https://review.whamcloud.com/37834&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;commit 76626d6c52&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; all: Separate debugfs and procfs handling&lt;/tt&gt;&quot; caused write performance regression.  Here is a reproducer and tested workload.&lt;/p&gt;

&lt;p&gt;Single Client(Ubuntu 18.04, 5.4.0-47-generic), 16MB O_DIRECT, FPP (128 processes)&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# mpirun --allow-run-as-root -np 128 --oversubscribe --mca btl_openib_warn_default_gid_prefix 0 --bind-to none ior -u -w -r
 -k -e -F -t 16384k -b 16384k -s 1000 -u -o /mnt/ai400x/ior.out/file --posix.odirect 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&quot;git bisect&quot; indentified an commit where regression started.&lt;/p&gt;

&lt;p&gt;Here is test results.&lt;br/&gt;
76626d6c52 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; all: Separate debugfs and procfs handling&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;access    bw(MiB/s)  IOPS       Latency(s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ----       ----------  ---------- ---------  --------   --------   --------   --------   ----
write     21861      1366.33    60.78       16384      16384      0.091573   93.68      40.38      93.68      0   
read      38547      2409.18    46.14       16384      16384      0.005706   53.13      8.26       53.13      0   
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;5bc1fe092c &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13196&quot; title=&quot;Remove inode mutex for DIO reads&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13196&quot;&gt;&lt;del&gt;LU-13196&lt;/del&gt;&lt;/a&gt; llite: Remove mutex on dio read&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;access    bw(MiB/s)  IOPS       Latency(s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ----       ----------  ---------- ---------  --------   --------   --------   --------   ----
write     32678      2042.40    58.96       16384      16384      0.105843   62.67      4.98       62.67      0   
read      38588      2411.78    45.89       16384      16384      0.004074   53.07      8.11       53.07      0   
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;master (commit 56526a90ae)&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;access    bw(MiB/s)  IOPS       Latency(s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ----       ----------  ---------- ---------  --------   --------   --------   --------   ----
write     17046      1065.37    119.02      16384      16384      0.084449   120.15     67.76      120.15     0   
read      38512      2407.00    45.04       16384      16384      0.006462   53.18      9.07       53.18      0   
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;master still has this regression and when commit 76626d6c52 reverts from master, the performrance is back.&lt;/p&gt;

&lt;p&gt;master (commit 56526a90ae)+ revert commit 76626d6c52&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;access    bw(MiB/s)  IOPS       Latency(s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ----       ----------  ---------- ---------  --------   --------   --------   --------   ----
write     32425      2026.59    59.88       16384      16384      0.095842   63.16      4.79       63.16      0   
read      39601      2475.09    47.22       16384      16384      0.003637   51.72      5.73       51.72      0  
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>master (commit: 56526a90ae)</environment>
        <key id="61287">LU-14055</key>
            <summary>Write performance regression caused by an commit from LU-13344</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="paf0186">Patrick Farrell</assignee>
                                    <reporter username="sihara">Shuichi Ihara</reporter>
                        <labels>
                    </labels>
                <created>Wed, 21 Oct 2020 04:12:56 +0000</created>
                <updated>Wed, 5 Jan 2022 19:51:08 +0000</updated>
                            <resolved>Wed, 5 Jan 2022 19:51:08 +0000</resolved>
                                    <version>Lustre 2.14.0</version>
                                    <fixVersion>Lustre 2.15.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>20</watches>
                                                                            <comments>
                            <comment id="282916" author="adilger" created="Wed, 21 Oct 2020 20:54:25 +0000"  >&lt;p&gt;I looked through that patch and didn&apos;t see anything obvious that would introduce a performance regression, since none of those proc/seq files should be anywhere close to the IO path.  I think the likely source of the problem is that the change from &lt;tt&gt;/proc&lt;/tt&gt; to &lt;tt&gt;/sys/kernel/debug&lt;/tt&gt; has resulted in some tunable parameter to be lost completely (random example &lt;tt&gt;readcache_max_io_mb&lt;/tt&gt;), or is somehow &quot;disconnected&quot; from the actual parameter (e.g. the parameter can be set, but it doesn&apos;t actually change the behavior of the kernel).&lt;/p&gt;

&lt;p&gt;There weren&apos;t any parameters that I could easily see, beyond the &lt;tt&gt;readcache_max_io_mb&lt;/tt&gt; and related &lt;tt&gt;osd-ldiskfs&lt;/tt&gt; parameters that could be the culprit.&lt;/p&gt;</comment>
                            <comment id="282936" author="sihara" created="Thu, 22 Oct 2020 02:34:57 +0000"  >&lt;p&gt;it might be related to what cpu is quite busy rather than IO? A test case was 128 IOR process against 128 CPU cores and client&apos;s cpu was quite busy and I saw some ior threads were much slower than others in bad case. Let me dig in this detail.&lt;/p&gt;</comment>
                            <comment id="282939" author="adilger" created="Thu, 22 Oct 2020 03:38:51 +0000"  >&lt;p&gt;I don&apos;t think &lt;em&gt;any&lt;/em&gt; of the code changed by the patch would actually be used during operation.  Are there any of tunable parameters set on the client or server that were modified by this patch?  I think the most likely cause is that one of the modified parameter settings is not working properly.  It might be possible to revert part (ideally half) of the parameter changes to see if that avoids or reintroduces the problem, to isolate it down to a specific part of the patch?&lt;/p&gt;</comment>
                            <comment id="282941" author="sihara" created="Thu, 22 Oct 2020 04:16:42 +0000"  >&lt;p&gt;I only reverted patch on client and performance was back.&lt;br/&gt;
Also, what parameters applied was below. No other parameters applied on both server and client.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;obdfilter.*.brw_size: 16
osd-ldiskfs.*.writethrough_cache_enable: 0
osd-ldiskfs.*.read_cache_enable: 0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="285700" author="pjones" created="Fri, 20 Nov 2020 18:42:20 +0000"  >&lt;p&gt;Any thoughts on this one? Should we just revert this patch while we are working out the details?&lt;/p&gt;</comment>
                            <comment id="285897" author="stancheff" created="Tue, 24 Nov 2020 15:53:43 +0000"  >&lt;p&gt;I looked through the patch again and I do not see anything obvious.&lt;br/&gt;
It would be fine to revert this for now.&lt;/p&gt;</comment>
                            <comment id="285899" author="simmonsja" created="Tue, 24 Nov 2020 16:12:30 +0000"  >&lt;p&gt;I can&apos;t reproduce this problem. Is it only a Ubunut issue? Can you reproduce it Shaun? Does it break mpirun or mdtest / IOR. Lustre is only used by mdtest to call&#160;llapi_dir_set_default_lmv_stripe() for the -g option. What setup do you have ? Everything on one node or all VMs?&lt;/p&gt;</comment>
                            <comment id="285903" author="stancheff" created="Tue, 24 Nov 2020 16:35:21 +0000"  >&lt;p&gt;No I don&apos;t have a reproducer here. My ubuntu testing is just a couple of VMs.&lt;br/&gt;
If it is 5.3/4 kernel related and not specific to Ubuntu then I expect I will have something available soonish but not immediately.&lt;/p&gt;</comment>
                            <comment id="285954" author="adilger" created="Wed, 25 Nov 2020 04:55:42 +0000"  >&lt;p&gt;Shaun, would you be able to post a handful of different patches that revert different parts of patch &lt;a href=&quot;https://review.whamcloud.com/37834&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37834&lt;/a&gt; to see if Ihara can isolate which part is slowing things down?&lt;/p&gt;</comment>
                            <comment id="285961" author="stancheff" created="Wed, 25 Nov 2020 08:24:15 +0000"  >&lt;p&gt;Sounds good. I will post a series of reverts.&lt;/p&gt;</comment>
                            <comment id="286466" author="gerrit" created="Wed, 2 Dec 2020 10:42:05 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40833&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40833&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Enable debugfs macros fallback to PDE_DATA&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 6ef6b0b9b9ee670930a7a1863f4903f783f58055&lt;/p&gt;</comment>
                            <comment id="286484" author="gerrit" created="Wed, 2 Dec 2020 15:22:08 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40835&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40835&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Revert &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; 1 of 7&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 3a42232a0a536758121cd1ebcb4c73334d1d898f&lt;/p&gt;</comment>
                            <comment id="286485" author="gerrit" created="Wed, 2 Dec 2020 15:22:09 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40836&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40836&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Revert &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; 2 of 7&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: e2c1be578c090153aaaa752ac8bad2f6668923d2&lt;/p&gt;</comment>
                            <comment id="286486" author="gerrit" created="Wed, 2 Dec 2020 15:22:09 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40837&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40837&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Revert &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; 3 of 7&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a650c8a62227589adc940b53a0432d28a4276274&lt;/p&gt;</comment>
                            <comment id="286487" author="gerrit" created="Wed, 2 Dec 2020 15:22:10 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40838&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40838&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Revert &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; 4 of 7&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: e6b994fbaf5e0b7a6eef7266c6f9b0836115cbad&lt;/p&gt;</comment>
                            <comment id="286488" author="gerrit" created="Wed, 2 Dec 2020 15:22:11 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40839&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40839&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Revert &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; 5 of 7&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: a8b7b7877beea237164312f5581fe21919df87b9&lt;/p&gt;</comment>
                            <comment id="286489" author="gerrit" created="Wed, 2 Dec 2020 15:22:11 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40840&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40840&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Revert &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; 6 of 7&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 43fdb7aa30367545452d242e41b2b95fbd6832f4&lt;/p&gt;</comment>
                            <comment id="286490" author="gerrit" created="Wed, 2 Dec 2020 15:22:12 +0000"  >&lt;p&gt;Shaun Tancheff (shaun.tancheff@hpe.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40841&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40841&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; all: Revert &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; 7 of 7&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: ca7e834e5472c520b8a4443fd13d44fdacd230a6&lt;/p&gt;</comment>
                            <comment id="286494" author="adilger" created="Wed, 2 Dec 2020 16:08:38 +0000"  >&lt;p&gt;Shuichi, could you please give these patches a try to help isolate the source of the regression. &lt;/p&gt;</comment>
                            <comment id="288063" author="simmonsja" created="Fri, 18 Dec 2020 20:50:23 +0000"  >&lt;p&gt;Shuichi I pushed a fixed patch set that Shaun started. Please give it a try. Some of the RHEL8.2 builds failed due to an unrelated bug. Those patches do build as you can see in later patches building properly.&lt;/p&gt;</comment>
                            <comment id="288065" author="sihara" created="Fri, 18 Dec 2020 21:51:55 +0000"  >&lt;p&gt;Thank you James! I will test new patches on my test node and let you know.&lt;/p&gt;</comment>
                            <comment id="288413" author="sihara" created="Fri, 25 Dec 2020 00:46:21 +0000"  >&lt;p&gt;Here is test results after reverted patch. (&quot;Write&quot; performance only measured) So, it still needed to revert all patches to get the performance back.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;7 of 7 patch 29,406 MiB/s
6 of 7 patch 18,791 MiB/s
5 of 7 patch 18,924 MiB/s
4 of 7 patch 18,715 MiB/s
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt; 
&lt;p&gt;I didn&apos;t test 3,2,1 of 7 patch since regression already started when 6 of 7 patch reverted. should we split a bit 7 of 7 patch to narrow down?&lt;/p&gt;</comment>
                            <comment id="288415" author="adilger" created="Fri, 25 Dec 2020 03:30:24 +0000"  >&lt;p&gt;Shuichi, just to confirm, the &quot;good&quot; number beside &quot;7 of 7&quot; is with &lt;b&gt;all 7&lt;/b&gt; of the patches applied (ie. the entire original patch reverted), and if the &quot;7 of 7&quot; patch is removed then the performance is bad?&lt;/p&gt;

&lt;p&gt;What is strange is that the &quot;7 of 7&quot; patch is not really doing much except renaming a few structs and adding &lt;b&gt;one&lt;/b&gt; variable near the end of &lt;tt&gt;struct obd_device&lt;/tt&gt;. I had previously wondered if the increase in &lt;tt&gt;struct obd_device&lt;/tt&gt; size was causing something bad with cache lines on SMP systems, or the struct became too large for some reason (it is just over 4KB now)?&lt;/p&gt;

&lt;p&gt;You could test this by reverting all of the patches, the only adding a &quot;&lt;tt&gt;void *dummy;&lt;/tt&gt;&quot; pointer where &quot;&lt;tt&gt;struct ldebugfs_vars *obd_debugfs_vars&lt;/tt&gt;&quot; is located in the &quot;7 of 7&quot; patch in &lt;a href=&quot;https://review.whamcloud.com/#/c/40841/2/lustre/include/obd.h&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/40841/2/lustre/include/obd.h&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Other than that, I can&apos;t see anything in the &quot;7 of 7&quot; patch that would cause any problems. The /proc files changed are for &quot;fid&quot; and &quot;fld&quot; services, which I don&apos;t think are being tuned at all, so it shouldn&apos;t matter even if the tunables were broken. &lt;/p&gt;</comment>
                            <comment id="288432" author="sihara" created="Sat, 26 Dec 2020 13:30:41 +0000"  >&lt;blockquote&gt;
&lt;p&gt;Shuichi, just to confirm, the &quot;good&quot; number beside &quot;7 of 7&quot; is with all 7 of the patches applied (ie. the entire original patch reverted), and if the &quot;7 of 7&quot; patch is removed then the performance is bad?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;That&apos;s correct. Once patch &quot;7 of 7&quot; applied (reverted entire &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; patch), the performance was good.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;You could test this by reverting all of the patches, the only adding a &quot;void *dummy;&quot; pointer where &quot;struct ldebugfs_vars *obd_debugfs_vars&quot; is located in the &quot;7 of 7&quot; patch in &lt;a href=&quot;https://review.whamcloud.com/#/c/40841/2/lustre/include/obd.h&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/40841/2/lustre/include/obd.h&lt;/a&gt;&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;okey, I will test this idea.&lt;/p&gt;
</comment>
                            <comment id="288433" author="adilger" created="Sat, 26 Dec 2020 17:17:10 +0000"  >&lt;p&gt;I noticed that even before the problematic patch is applied, there are already some places in the code using &quot;&lt;tt&gt;ldebugfs&lt;/tt&gt;&quot; that are affected by the changes in the patch. Most places are &lt;tt&gt;fid&lt;/tt&gt;, &lt;tt&gt;fld&lt;/tt&gt;, &lt;tt&gt;seq&lt;/tt&gt;, which I don&apos;t think can be harmful. There is a bit of usage in &lt;tt&gt;ldlm&lt;/tt&gt;, but it doesn&apos;t look harmful either. &lt;/p&gt;

&lt;p&gt;One important parameter that may be affected is &lt;tt&gt;llite.&amp;#42;.max_cached_mb&lt;/tt&gt;&quot;. &lt;/p&gt;

&lt;p&gt;Shuichi, the other thing you could try with and without the patch applied is &quot;&lt;tt&gt;lctl get_param -R &apos;*&apos;&lt;/tt&gt;&quot; to see if the parameters available changes between the two versions?&lt;/p&gt;</comment>
                            <comment id="288596" author="jhammond" created="Mon, 4 Jan 2021 20:26:42 +0000"  >&lt;p&gt;For the test runs, was there a default striping set on the filesystem?&lt;/p&gt;</comment>
                            <comment id="288822" author="sihara" created="Wed, 6 Jan 2021 12:59:45 +0000"  >&lt;blockquote&gt;
&lt;p&gt;For the test runs, was there a default striping set on the filesystem?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;yes, stripe count was 1 for all tests.&lt;/p&gt;</comment>
                            <comment id="288893" author="sihara" created="Thu, 7 Jan 2021 03:12:26 +0000"  >&lt;blockquote&gt;&lt;p&gt;You could test this by reverting all of the patches, the only adding a &quot;void *dummy;&quot; pointer where &quot;struct ldebugfs_vars *obd_debugfs_vars&quot; is located in the &quot;7 of 7&quot; patch in &lt;a href=&quot;https://review.whamcloud.com/#/c/40841/2/lustre/include/obd.h&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/40841/2/lustre/include/obd.h&lt;/a&gt;&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Andreas, here is test resutls of your suggestion. And it seems that results tells us a root cause that you exaclty pointed out. it was tested 3 times to verify.&lt;/p&gt;
&lt;div class=&apos;table-wrap&apos;&gt;
&lt;table class=&apos;confluenceTable&apos;&gt;&lt;tbody&gt;
&lt;tr&gt;
&lt;th class=&apos;confluenceTh&apos;&gt;Write(MB/s)&lt;/th&gt;
&lt;th class=&apos;confluenceTh&apos;&gt;&quot;7 of 7&quot; revert patch&lt;/th&gt;
&lt;th class=&apos;confluenceTh&apos;&gt;&quot;7 of 7&quot; revert patch + adding &quot;struct ldebugfs_vars *obd_debugfs_vars&quot;&lt;/th&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;iteration1&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;36.225&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;19.280&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;iteration2&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;35.410&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;18.781&lt;/td&gt;
&lt;/tr&gt;
&lt;tr&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;iteration3&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;33.871&lt;/td&gt;
&lt;td class=&apos;confluenceTd&apos;&gt;19.033&lt;/td&gt;
&lt;/tr&gt;
&lt;/tbody&gt;&lt;/table&gt;
&lt;/div&gt;
</comment>
                            <comment id="288897" author="neilb" created="Thu, 7 Jan 2021 04:00:39 +0000"  >&lt;p&gt;If it isn&apos;t too much trouble (and if you didn&apos;t already) can you try adding that field at the &lt;b&gt;end&lt;/b&gt; of the structure, rather than where it currently is?&lt;/p&gt;

&lt;p&gt;If that makes a difference, then my guess is that there is a hot cache-line in that struct which is being split.&lt;/p&gt;

&lt;p&gt;If it doesn&apos;t make a difference, then my guess is that the struct size crosses a boundary&#160; resulting in obd_device_cache being shared with a different other cache, which maybe doesn&apos;t something strange.&#160; I doubt that though.&lt;/p&gt;</comment>
                            <comment id="288898" author="sihara" created="Thu, 7 Jan 2021 04:21:02 +0000"  >&lt;p&gt;I should have to inform that adding a &quot;void *dummy;&quot; top of &quot;7 of 7&quot; revert patch also caused same performance problem, then tested adding &quot;struct ldebugfs_vars *obd_debugfs_vars&quot; after of that. &lt;/p&gt;</comment>
                            <comment id="288903" author="adilger" created="Thu, 7 Jan 2021 07:59:12 +0000"  >&lt;p&gt;Very strange that adding an 8-byte pointer to the already-large &lt;tt&gt;struct obd_device&lt;/tt&gt; causes such a huge performance problem. According to &lt;tt&gt;pahole&lt;/tt&gt;:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* size: 6808, cachelines: 107, members: 98 */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* sum members: 6792, holes: 2, sum holes: 8 */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* sum bitfield members: 20 bits, bit holes: 1, sum bit holes: 44 bits */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* paddings: 2, sum paddings: 8 */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* forced alignments: 1 */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* last cacheline: 24 bytes */&lt;/span&gt;
} __attribute__((__aligned__(8)));
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Maybe it pushes the struct from 106 cachelines to 107, but I wouldn&apos;t think that would make a &lt;em&gt;huge&lt;/em&gt; difference in performance. We &lt;em&gt;could&lt;/em&gt; pack the two 4-byte holes in the struct to get back 8 bytes to solve this problem quickly before 2.14. That means the &lt;em&gt;next&lt;/em&gt; patch that touches this struct is going to cause problems again, but would also give us some breathing room to resolve this issue more completely in 2.15. I will make a patch for this.&lt;/p&gt;

&lt;p&gt;pahole shows a number of other major offenders in the struct that could be cleaned up:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
        struct rhashtable          obd_uuid_hash;        &lt;span class=&quot;code-comment&quot;&gt;/*   200   176 */&lt;/span&gt;
        struct rhltable            obd_nid_hash;         &lt;span class=&quot;code-comment&quot;&gt;/*   376   176 */&lt;/span&gt;
 
        struct obd_llog_group      obd_olg;              &lt;span class=&quot;code-comment&quot;&gt;/*   936   176 */&lt;/span&gt;

        struct hrtimer             obd_recovery_timer __attribute__((__aligned__
(8))); &lt;span class=&quot;code-comment&quot;&gt;/*  1272    80 */&lt;/span&gt;

        union {
                struct obd_device_target obt;            &lt;span class=&quot;code-comment&quot;&gt;/*  1520    96 */&lt;/span&gt;
                struct filter_obd  filter;               &lt;span class=&quot;code-comment&quot;&gt;/*  1520    96 */&lt;/span&gt;
                struct ost_obd     ost;                  &lt;span class=&quot;code-comment&quot;&gt;/*  1520    72 */&lt;/span&gt;
                struct echo_obd    echo;                 &lt;span class=&quot;code-comment&quot;&gt;/*  1520   336 */&lt;/span&gt;
                struct client_obd  cli;                  &lt;span class=&quot;code-comment&quot;&gt;/*  1520  2664 */&lt;/span&gt;
                struct echo_client_obd echo_client;      &lt;span class=&quot;code-comment&quot;&gt;/*  1520    56 */&lt;/span&gt;
                struct lov_obd     lov;                  &lt;span class=&quot;code-comment&quot;&gt;/*  1520   680 */&lt;/span&gt;
                struct lmv_obd     lmv;                  &lt;span class=&quot;code-comment&quot;&gt;/*  1520  4872 */&lt;/span&gt;
        } u;                                             &lt;span class=&quot;code-comment&quot;&gt;/*  1520  4872 */&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;There seems to have been a &lt;b&gt;lot&lt;/b&gt; of cruft added to &lt;tt&gt;obd_device&lt;/tt&gt; over the years that is server-specific and could be moved into &lt;tt&gt;struct obd_device_target&lt;/tt&gt; instead of being kept in the common struct:&lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;&lt;tt&gt;obd_nid_hash&lt;/tt&gt; &lt;b&gt;usage&lt;/b&gt; is already under &lt;tt&gt;HAVE_SERVER_SUPPORT&lt;/tt&gt; and could be declared that way also.&#160; I have a patch for this.&#160; Moving it into &lt;tt&gt;obd_device_target&lt;/tt&gt; would be better since it would also reduce memory usage on clients that are built with server support.&lt;/li&gt;
	&lt;li&gt;&lt;tt&gt;obd_uuid_hash&lt;/tt&gt; entry is also only needed on the server. While it is referenced by &lt;tt&gt;__class_new_export()&lt;/tt&gt; (also used on the client for &quot;self&quot; exports), it should only really be needed for targets that add remote connections. The whole block that checks &lt;tt&gt;obd_uuid_equals(cluuid, obd_uuid)&lt;/tt&gt; could be under &lt;tt&gt;HAVE_SERVER_SUPPORT&lt;/tt&gt; to allow &lt;tt&gt;obd_uuid_hash&lt;/tt&gt; to also be moved into &lt;tt&gt;obd_device_target&lt;/tt&gt;, along with &lt;tt&gt;obd_uuid_add()&lt;/tt&gt; and &lt;tt&gt;obd_uuid_del()&lt;/tt&gt;. I have a patch for this.&lt;/li&gt;
	&lt;li&gt;&lt;tt&gt;obd_olg&lt;/tt&gt; is larger than it needs to be, since many of the &lt;tt&gt;LLOG&amp;#95;&amp;#42;&amp;#95;CTXT&lt;/tt&gt; contexts are no longer used (at least &lt;tt&gt;LLOG_MDS_OST_REPL_CTXT&lt;/tt&gt;, &lt;tt&gt;LLOG_SIZE_ORIG,REPL_CTXT&lt;/tt&gt;, and &lt;tt&gt;LLOG_TEST_REPL_CTXT&lt;/tt&gt;. &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5218&quot; title=&quot;Interop 2.5.1&amp;lt;-&amp;gt;2.6 failure on test suite lustre-rsync-test test_1: ASSERTION( index &amp;gt;= 0 &amp;amp;&amp;amp; index &amp;lt; LLOG_MAX_CTXTS ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5218&quot;&gt;&lt;del&gt;LU-5218&lt;/del&gt;&lt;/a&gt; explains some of the details. &lt;tt&gt;OST_SZ_REC&lt;/tt&gt; which &lt;em&gt;seems&lt;/em&gt; to imply that &lt;tt&gt;LLOG_SIZE_ORIG_CTXT&lt;/tt&gt; is used is itself not actually &lt;b&gt;set&lt;/b&gt; anywhere but test code, and was never used in production (it was for an old Size-on-MDS implementation that was never finished). Removing the unused &lt;tt&gt;LLOG_&amp;#42;_CTXT&lt;/tt&gt; constants will not directly shrink &lt;tt&gt;LLOG_MAX_CTXTS&lt;/tt&gt;, but may allow a more efficient mapping to be used (e.g. a simple mapping to an in-memory dense enum).&lt;/li&gt;
	&lt;li&gt;&lt;tt&gt;obd_recovery_timer&lt;/tt&gt; is only needed on the server and can move into &lt;tt&gt;obd_device_target&lt;/tt&gt;, along with all of the other &lt;tt&gt;obd&amp;#95;recovery&amp;#95;&amp;#42;&lt;/tt&gt;, &lt;tt&gt;obd&amp;#95;replay&amp;#95;&amp;#42;&lt;/tt&gt;, &lt;tt&gt;obd_lwp_export&lt;/tt&gt;, &lt;tt&gt;obd_exports_timed&lt;/tt&gt;, &lt;tt&gt;obd_eviction_timer&lt;/tt&gt;, &lt;tt&gt;obd&amp;#95;&amp;#42;&amp;#95;clients&lt;/tt&gt;, and at least some of the &lt;tt&gt;obd&amp;#95;&amp;#42;transno&amp;#42;&lt;/tt&gt; fields (though clients may use some of them). The &lt;tt&gt;target&amp;#95;&amp;#42;()&lt;/tt&gt; functions under &lt;tt&gt;HAVE_SERVER_SUPPORT&lt;/tt&gt; in &lt;tt&gt;ldlm_lib.c&lt;/tt&gt; should all be moved into &lt;tt&gt;lustre/target/tgt_recovery.c&lt;/tt&gt; (or similar) since they don&apos;t really have anything to do with LDLM.&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;The worst offender is the device-specific union &lt;tt&gt;u&lt;/tt&gt;, with &lt;tt&gt;client_obd&lt;/tt&gt; and &lt;tt&gt;lmv_obd&lt;/tt&gt; being the largest members (though &lt;tt&gt;obd_device_target&lt;/tt&gt; may increase in size in the future). One option is to dynamically allocate this member depending on the type used, since there is typically only one &lt;tt&gt;lmv_obd&lt;/tt&gt; on a client, though &lt;tt&gt;client_obd&lt;/tt&gt; is used for most of the devices on the client so will not help much.&lt;/p&gt;

&lt;p&gt;The (almost only) offender in &lt;tt&gt;lmv_obd&lt;/tt&gt; is &lt;tt&gt;struct lu_tgt_descs lmv_mdt_descs&lt;/tt&gt;, which is a static array for all of the potential MDT devices the LMV may have (&lt;tt&gt;struct lu_tgt_desc_idx *ltd_tgt_idx&lt;span class=&quot;error&quot;&gt;&amp;#91;TGT_PTRS&amp;#93;&lt;/span&gt;&lt;/tt&gt; being the major contributor). This is really unnecessary, and could be dynamically allocated for the maximum current MDT count. I have included that into patch &lt;a href=&quot;https://review.whamcloud.com/40901&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40901&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13601&quot; title=&quot;page allocation failure  during mount &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13601&quot;&gt;&lt;del&gt;LU-13601&lt;/del&gt;&lt;/a&gt; llite: avoid needless large allocations&lt;/tt&gt;&quot; but it deserves to be split into its own patch, since making &lt;tt&gt;ltd_tgt_index&lt;/tt&gt; dynamically sized is relatively easy compared to the llite changes also in that patch. I have a patch for this already.&lt;/p&gt;

&lt;p&gt;The worst offender in &lt;tt&gt;client_obd&lt;/tt&gt; are the seven &lt;tt&gt;obd_histogram&lt;/tt&gt; fields, consuming 1848 of 2664 bytes, with 34 bytes of holes. I had a patch to dynamically allocate these structures on an as-needed basis, but it was complex and never landed. Maybe I need to revive that patch again.&lt;/p&gt;</comment>
                            <comment id="288905" author="adilger" created="Thu, 7 Jan 2021 08:41:20 +0000"  >&lt;p&gt;As for fields that might be affected by the addition of &lt;tt&gt;obd_debugfs_vars&lt;/tt&gt; there are only a few that are after this new field, and of those only a subset are used on the client:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 101 boundary (6464 bytes) --- */&lt;/span&gt;                       
        struct ldebugfs_vars *     obd_debugfs_vars;     &lt;span class=&quot;code-comment&quot;&gt;/*  6464     8 */&lt;/span&gt;      
        atomic_t                   obd_evict_inprogress; &lt;span class=&quot;code-comment&quot;&gt;/*  6472     4 */&lt;/span&gt;      
                                                                                
        &lt;span class=&quot;code-comment&quot;&gt;/* XXX 4 bytes hole, &lt;span class=&quot;code-keyword&quot;&gt;try&lt;/span&gt; to pack */&lt;/span&gt;                                     
                                                                                
        wait_queue_head_t          obd_evict_inprogress_waitq; &lt;span class=&quot;code-comment&quot;&gt;/*  6480    24 */&lt;/span&gt;
        struct list_head           obd_evict_list;       &lt;span class=&quot;code-comment&quot;&gt;/*  6504    16 */&lt;/span&gt;      
        rwlock_t                   obd_pool_lock;        &lt;span class=&quot;code-comment&quot;&gt;/*  6520     8 */&lt;/span&gt;      
        &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 102 boundary (6528 bytes) --- */&lt;/span&gt;                       
        __u64                      obd_pool_slv;         &lt;span class=&quot;code-comment&quot;&gt;/*  6528     8 */&lt;/span&gt;      
        &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt;                        obd_pool_limit;       &lt;span class=&quot;code-comment&quot;&gt;/*  6536     4 */&lt;/span&gt;      
        &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt;                        obd_conn_inprogress;  &lt;span class=&quot;code-comment&quot;&gt;/*  6540     4 */&lt;/span&gt;      
        struct lu_ref              obd_reference;        &lt;span class=&quot;code-comment&quot;&gt;/*  6544     0 */&lt;/span&gt;      
        struct kset                obd_kset;             &lt;span class=&quot;code-comment&quot;&gt;/*  6544   160 */&lt;/span&gt;      
        &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 104 boundary (6656 bytes) was 48 bytes ago --- */&lt;/span&gt;      
        struct kobj_type           obd_ktype;            &lt;span class=&quot;code-comment&quot;&gt;/*  6704    72 */&lt;/span&gt;      
        &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 105 boundary (6720 bytes) was 56 bytes ago --- */&lt;/span&gt;      
        struct completion          obd_kobj_unregister;  &lt;span class=&quot;code-comment&quot;&gt;/*  6776    32 */&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Likely candidates might be &lt;tt&gt;obd_pool_slv&lt;/tt&gt; being split from &lt;tt&gt;obd_pool_lock&lt;/tt&gt;, or something in the recently-added &lt;tt&gt;obd_kset&lt;/tt&gt; or &lt;tt&gt;obd_ktype&lt;/tt&gt; fields.  I&apos;m hoping that shrinking &lt;tt&gt;obd_device&lt;/tt&gt; will resolve the problem, since random field alignment issues causing such huge performance swings is a nightmare.&lt;/p&gt;</comment>
                            <comment id="288922" author="gerrit" created="Thu, 7 Jan 2021 13:23:17 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41161&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41161&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; obdclass: fill hole in struct obd_device&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 29898c50da924ad146b71046ee066371058cbb6a&lt;/p&gt;</comment>
                            <comment id="288923" author="gerrit" created="Thu, 7 Jan 2021 13:23:18 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41162&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41162&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; lmv: reduce struct lmv_obd size&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 4f6159a3f2175cdcb8eee4017ba8e0a3d70268f2&lt;/p&gt;</comment>
                            <comment id="288933" author="adilger" created="Thu, 7 Jan 2021 14:33:00 +0000"  >&lt;p&gt;Shuichi, could you please try with these two patches. The first one just decreases the struct size by 4 bytes, but I&apos;m not sure if that will be enough. The second reduces it by over 2KB, which hopefully is enough. Otherwise, it may be that there is an alignment issue with some struct (likely the new &quot;&lt;tt&gt;obd_k&amp;#42;&lt;/tt&gt;&quot; ones at the end) that will need some specific alignment requests. &lt;/p&gt;</comment>
                            <comment id="288956" author="jhammond" created="Thu, 7 Jan 2021 16:46:13 +0000"  >&lt;p&gt;I cannot tell if this is already understood but it would be useful to know if the change in performance is due to the debugfs changes being applied to the client, to the server, or both.&lt;/p&gt;</comment>
                            <comment id="288975" author="adilger" created="Thu, 7 Jan 2021 20:13:49 +0000"  >&lt;p&gt;John, it is on the client only, AFAIK. &lt;/p&gt;

&lt;p&gt;Shuichi, could you please run &quot;&lt;tt&gt;pahole&lt;/tt&gt;&quot; on the obdclass.ko module with and without the problem and attach it here. That program is part of the &quot;&lt;tt&gt;dwarves&lt;/tt&gt; RPM. &lt;/p&gt;</comment>
                            <comment id="289004" author="sihara" created="Fri, 8 Jan 2021 05:33:30 +0000"  >&lt;p&gt;Andreas, &lt;br/&gt;
 I just tested two patches, but the performrance was even worse below.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;access    bw(MiB/s)  IOPS       Latency(s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ----       ----------  ---------- ---------  --------   --------   --------   --------   ----
write     14218      888.67     161.01      16384      16384      0.159725   172.84     27.90      172.85     0   
write     14168      885.51     159.56      16384      16384      0.218456   173.46     23.85      173.46     0   
write     14093      880.82     161.54      16384      16384      0.191401   174.38     25.50      174.38     0   
&#160;&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="289081" author="gerrit" created="Fri, 8 Jan 2021 18:38:03 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41178&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41178&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; obdclass: move obd_debugfs_vars to end obd_device&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: dd0a0df383387e6455bbad565503883433516454&lt;/p&gt;</comment>
                            <comment id="289085" author="adilger" created="Fri, 8 Jan 2021 19:09:55 +0000"  >&lt;p&gt;Shuichi, if shrinking &lt;tt&gt;struct obd_device&lt;/tt&gt; does not solve the problem, then it seems the problem is caused by a misalignment of some data structure that  follows the added &lt;tt&gt;obd_debugfs_vars&lt;/tt&gt; field.&lt;/p&gt;

&lt;p&gt;Can you please try another set of tests that move the &quot;&lt;tt&gt;obd_debugfs_vars&lt;/tt&gt;&quot; line until we isolate the problematic field.  The first test would be to move &lt;tt&gt;obd_debugfs_vars&lt;/tt&gt; to the end of the struct:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
 &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 101 boundary (6464 bytes) --- */&lt;/span&gt;
-       struct ldebugfs_vars *     obd_debugfs_vars;     &lt;span class=&quot;code-comment&quot;&gt;/*  6464     8 */&lt;/span&gt;
        atomic_t                   obd_evict_inprogress; &lt;span class=&quot;code-comment&quot;&gt;/*  6472     4 */&lt;/span&gt;
        wait_queue_head_t          obd_evict_inprogress_waitq; &lt;span class=&quot;code-comment&quot;&gt;/*  6480    24 */&lt;/span&gt;
        struct list_head           obd_evict_list;       &lt;span class=&quot;code-comment&quot;&gt;/*  6504    16 */&lt;/span&gt;
        rwlock_t                   obd_pool_lock;        &lt;span class=&quot;code-comment&quot;&gt;/*  6520     8 */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 102 boundary (6528 bytes) --- */&lt;/span&gt;                
        __u64                      obd_pool_slv;         &lt;span class=&quot;code-comment&quot;&gt;/*  6528     8 */&lt;/span&gt;
        &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt;                        obd_pool_limit;       &lt;span class=&quot;code-comment&quot;&gt;/*  6536     4 */&lt;/span&gt;
        &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt;                        obd_conn_inprogress;  &lt;span class=&quot;code-comment&quot;&gt;/*  6540     4 */&lt;/span&gt;
        struct lu_ref              obd_reference;        &lt;span class=&quot;code-comment&quot;&gt;/*  6544     0 */&lt;/span&gt;
        struct kset                obd_kset;             &lt;span class=&quot;code-comment&quot;&gt;/*  6544   160 */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 104 boundary (6656 bytes) was 48 bytes ago --- */&lt;/span&gt;
        struct kobj_type           obd_ktype;            &lt;span class=&quot;code-comment&quot;&gt;/*  6704    72 */&lt;/span&gt;
        &lt;span class=&quot;code-comment&quot;&gt;/* --- cacheline 105 boundary (6720 bytes) was 56 bytes ago --- */&lt;/span&gt;
        struct completion          obd_kobj_unregister;  &lt;span class=&quot;code-comment&quot;&gt;/*  6776    32 */&lt;/span&gt;
+       struct ldebugfs_vars *     obd_debugfs_vars;     &lt;span class=&quot;code-comment&quot;&gt;/*  6464     8 */&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;to see if this solves the problem (without my other patches).  I&apos;ve pushed a patch to do this.  If it fixes the problem, then this confirms that the problem is caused by the alignment or cacheline contention on of one of the fields between &lt;tt&gt;obd_evict_inprogress&lt;/tt&gt; and &lt;tt&gt;obd_kobj_unregister&lt;/tt&gt;.  This would be enough to land for 2.14.0 to solve the problem, but I don&apos;t want to leave the reason for the problem unsolved, since it is likely to be accidentally returned again in the future (e.g. by landing my patches to shrink &lt;tt&gt;lu_tgt_desc&lt;/tt&gt; or anything else).&lt;/p&gt;

&lt;p&gt;To isolate the reason for the problem you would need to &quot;bisect&quot; the 11 fields/366 bytes to see which one is causing the slowdown.&lt;/p&gt;

&lt;p&gt;First try moving &lt;tt&gt;obd_debugfs_vars&lt;/tt&gt; after &lt;tt&gt;obd_kset&lt;/tt&gt; to see if this causes the slowdown again.  If not, then the problem is &lt;tt&gt;obd_kset&lt;/tt&gt; or earlier, so try moving it immediately before &lt;tt&gt;obd_kset&lt;/tt&gt; (this is the largest field so makes it difficult to &quot;bisect&quot; exactly).  If the problem is still not seen, move it after &lt;tt&gt;obd_evict_list&lt;/tt&gt;, etc.  Essentially, when &lt;tt&gt;obd_debugfs_vars&lt;/tt&gt; is immediately &lt;em&gt;before&lt;/em&gt; the offending struct the performance will be bad, and when it is immediately &lt;em&gt;after&lt;/em&gt; the struct then the performance problem should go away.  Once you find out what the structure is, try moving that field to be at the start of &lt;tt&gt;struct obd_device&lt;/tt&gt; so that there is no chance of it being misaligned, after &lt;tt&gt;obd_lu_dev&lt;/tt&gt; and after &lt;tt&gt;obd_recovery_expired&lt;/tt&gt;.  If these also show good performance, then this can be a permanent solution (I would prefer after &lt;tt&gt;obd_recovery_expired&lt;/tt&gt; since these bitfields are very commonly used).&lt;/p&gt;

&lt;p&gt;Please run the &quot;&lt;tt&gt;pahole&lt;/tt&gt;&quot; command on the &lt;tt&gt;obdclass.ko&lt;/tt&gt; module to show the &quot;good&quot; and &quot;bad&quot; structures to see what the problem is, and attach the results here.&lt;/p&gt;

&lt;p&gt;Neil, James, since the &lt;tt&gt;obd_kobj&lt;/tt&gt; and &lt;tt&gt;obd_ktype&lt;/tt&gt; fields are recent additions and the largest fields in this area, it seems likely that they are the culprit here.  Is there anything &quot;special&quot; about them that would require their alignment, or to avoid cacheline contention?  Are they &quot;hot&quot; and referenced/refcounted continuously during object access?&lt;/p&gt;</comment>
                            <comment id="289136" author="sihara" created="Sun, 10 Jan 2021 04:36:25 +0000"  >&lt;p&gt;patch: &lt;a href=&quot;https://review.whamcloud.com/41178&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41178&lt;/a&gt; didn&apos;t help and it was still same slow down. let me try your &quot;bisect&quot; ideas to find if it shows good (or still bad) performance.&lt;/p&gt;</comment>
                            <comment id="289137" author="adilger" created="Sun, 10 Jan 2021 05:57:11 +0000"  >&lt;p&gt;Strange that the patch to reduce the structure to the previous size (-8 bytes) &lt;b&gt;and&lt;/b&gt; the patch to move the new 8-byte field to the end didn&apos;t help performance &lt;b&gt;and&lt;/b&gt; the patch to reduce the struct size -2000 bytes also didn&apos;t help. It is possible my &quot;-8 byte&quot; patch was too complex, and I moved too many fields around?  I had tried to keep the related fields together.&lt;/p&gt;

&lt;p&gt;Do you have any idea where the performance is being lost?  Is it high CPU usage or lock contention or something else?&lt;/p&gt;</comment>
                            <comment id="289239" author="sihara" created="Tue, 12 Jan 2021 03:46:22 +0000"  >&lt;p&gt;Attached are two frame graphes on good (reverted whole &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; patches) and bad (master) cases. &lt;br/&gt;
I see huge amount of spin_locks in osc_queue_sync_pages() under ll_direct_IO() in bad case. There are also several spin_lock impacts in the functions under brw_interpret().&lt;/p&gt;</comment>
                            <comment id="289247" author="adilger" created="Tue, 12 Jan 2021 07:17:28 +0000"  >&lt;p&gt;It looks like the contended lock in &lt;span class=&quot;nobr&quot;&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/attachment/37143/37143_master.svg&quot; title=&quot;master.svg attached to LU-14055&quot;&gt;master.svg&lt;sup&gt;&lt;img class=&quot;rendericon&quot; src=&quot;https://jira.whamcloud.com/images/icons/link_attachment_7.gif&quot; height=&quot;7&quot; width=&quot;7&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/span&gt; is &lt;tt&gt;cl_loi_list_lock&lt;/tt&gt; in both &lt;tt&gt;osc_queue_sync_pages()&lt;/tt&gt;  and &lt;tt&gt;brw_interpret()&lt;/tt&gt;.  The &lt;tt&gt;cl_loi_list_lock&lt;/tt&gt; is in &lt;tt&gt;struct client_obd&lt;/tt&gt;.  What is strange is that this field is &lt;b&gt;before&lt;/b&gt; the added structure, so I can&apos;t see how that would be affected by the new field.  I&apos;ll have to check if there is something later in &lt;tt&gt;obd_device&lt;/tt&gt; that is being accessed under that lock.&lt;/p&gt;</comment>
                            <comment id="289483" author="jhammond" created="Thu, 14 Jan 2021 15:32:55 +0000"  >&lt;p&gt;&amp;gt; Attached are two frame graphes on good (reverted whole &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13344&quot; title=&quot;Support for linux 5.6 clients&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13344&quot;&gt;&lt;del&gt;LU-13344&lt;/del&gt;&lt;/a&gt; patches) and bad (master) cases. &lt;/p&gt;

&lt;p&gt;How long did perf run for the two flame graphs? One has 5x the samples as the other. master-revert.svg shows 20% in swapper and 5% in open. Had ior completed startup when perf was run?&lt;/p&gt;</comment>
                            <comment id="298472" author="gerrit" created="Sat, 10 Apr 2021 17:39:43 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/41162/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41162/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14055&quot; title=&quot;Write performance regression caused by an commit from LU-13344&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14055&quot;&gt;&lt;del&gt;LU-14055&lt;/del&gt;&lt;/a&gt; lmv: reduce struct lmv_obd size&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: e11deeb1e6d114608eac4ee998d4cea22e30b0f5&lt;/p&gt;</comment>
                            <comment id="304492" author="paf0186" created="Mon, 14 Jun 2021 20:40:51 +0000"  >&lt;p&gt;Ihara,&lt;/p&gt;

&lt;p&gt;Ignoring entirely the apparent memory layout issue, I have two suggestions that may reduce the pain on cl_loi_list_lock.&lt;/p&gt;

&lt;p&gt;This patch just landed to master:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://review.whamcloud.com/38214&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/38214&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;And this one which has not yet landed:&lt;br/&gt;
&lt;a href=&quot;https://review.whamcloud.com/#/c/39482/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/39482/&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;If you&apos;re able to give these a try, that would be very interesting.&#160; Both should cut the contention on cl_loi_list_lock when doing dio.&#160; I&apos;m not sure precisely what effect they&apos;ll have here, but it seems worth trying.&lt;/p&gt;</comment>
                            <comment id="304498" author="paf0186" created="Mon, 14 Jun 2021 21:00:52 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=tappro&quot; class=&quot;user-hover&quot; rel=&quot;tappro&quot;&gt;tappro&lt;/a&gt; made this comment over on &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14580&quot; title=&quot;Lustre 2.12.6 performance regression&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14580&quot;&gt;LU-14580&lt;/a&gt;, and I wanted to bring it here:&lt;/p&gt;

&lt;p&gt;&#160;&lt;br/&gt;
 &quot;I don&apos;t see problems with patch itself. Increment in&#160;&lt;tt&gt;osc_consume_write_grant()&lt;/tt&gt;&#160;was removed because it is done by&#160;&lt;tt&gt;atomic_long_add_return()&lt;/tt&gt;&#160;now outside that call and it is done in both places where it is called. But maybe the patch &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12687&quot; title=&quot;Fast ENOSPC on direct I/O&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12687&quot;&gt;&lt;del&gt;LU-12687&lt;/del&gt;&lt;/a&gt;&#160;osc: consume grants for direct I/O&quot; itself causes slowdown? Now grants are taken for Direct IO as well, so maybe that is related to not enough grants problem or similar. Are there any complains about grants on client during IOR run?&quot;&lt;/p&gt;

&lt;p&gt;That patch definitely has performance implications.&#160; Direct i/o will keep sending even when there are no grants - since it is already synchronous - but it significantly increases the load on the cl_loi_list_lock in some cases.&#160; The patches I noted above are aimed at that.&lt;/p&gt;

&lt;p&gt;There&apos;s still very likely a memory layout issue here, but perhaps these will help...&lt;/p&gt;</comment>
                            <comment id="304574" author="paf0186" created="Tue, 15 Jun 2021 14:26:01 +0000"  >&lt;p&gt;Ihara,&lt;/p&gt;

&lt;p&gt;FYI I had to rebase that second patch above:&lt;br/&gt;
&lt;a href=&quot;https://review.whamcloud.com/#/c/39482/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/39482/&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;It was not compatible with current master, should be fixed now.&lt;/p&gt;</comment>
                            <comment id="319257" author="pjones" created="Fri, 26 Nov 2021 21:30:12 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=sihara&quot; class=&quot;user-hover&quot; rel=&quot;sihara&quot;&gt;sihara&lt;/a&gt; can this issue now be considered fixed?&lt;/p&gt;</comment>
                            <comment id="321847" author="paf0186" created="Wed, 5 Jan 2022 19:51:08 +0000"  >&lt;p&gt;My understanding is this issue is resolved, though we never fully understood it.&#160; Let me knw if that&apos;s wrong and we can reopen or open a new ticket.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="41575">LU-8837</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="58314">LU-13344</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="63640">LU-14580</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="59320">LU-13601</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="37142" name="master-revert.svg" size="423738" author="sihara" created="Tue, 12 Jan 2021 03:45:33 +0000"/>
                            <attachment id="37143" name="master.svg" size="251282" author="sihara" created="Tue, 12 Jan 2021 03:45:33 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i01cun:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>