<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:07:36 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-14187] LU-14134 causes many racer timeouts</title>
                <link>https://jira.whamcloud.com/browse/LU-14187</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;w/o the patch only 3 from 36 racer runs timed out. w/ the patch - 35 from 36 did.&lt;br/&gt;
&#65532;&lt;/p&gt;</description>
                <environment>local testing</environment>
        <key id="61879">LU-14187</key>
            <summary>LU-14134 causes many racer timeouts</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="wshilong">Wang Shilong</assignee>
                                    <reporter username="bzzz">Alex Zhuravlev</reporter>
                        <labels>
                    </labels>
                <created>Mon, 7 Dec 2020 07:35:52 +0000</created>
                <updated>Mon, 1 Feb 2021 04:48:08 +0000</updated>
                            <resolved>Tue, 5 Jan 2021 13:00:27 +0000</resolved>
                                                    <fixVersion>Lustre 2.14.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="286870" author="bzzz" created="Mon, 7 Dec 2020 14:01:14 +0000"  >&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
PID: 1677   TASK: ffff88012f27e000  CPU: 1   COMMAND: &lt;span class=&quot;code-quote&quot;&gt;&quot;ll_ost_io00_030&quot;&lt;/span&gt;
 #0 [ffff880105c5ba80] __schedule at ffffffff816af9ad
    /home/lustre/linux-4.18.0-32.el8/kernel/sched/core.c: 2853
 #1 [ffff880105c5bb18] schedule at ffffffff816b0234
    /home/lustre/linux-4.18.0-32.el8/./arch/x86/include/asm/preempt.h: 81
 #2 [ffff880105c5bb28] rwsem_down_write_failed at ffffffff816b479b
    /home/lustre/linux-4.18.0-32.el8/./arch/x86/include/asm/current.h: 15
 #3 [ffff880105c5bbb8] call_rwsem_down_write_failed at ffffffff816abb13
    /home/lustre/linux-4.18.0-32.el8/arch/x86/lib/rwsem.S: 118
 #4 [ffff880105c5bbf8] down_write at ffffffff816b3a34
    /home/lustre/linux-4.18.0-32.el8/./arch/x86/include/asm/rwsem.h: 142
 #5 [ffff880105c5bc18] osd_trunc_lock at ffffffffa0b2da21 [osd_ldiskfs]
    /home/lustre/master-mine/lustre/osd-ldiskfs/osd_io.c: 2530
 #6 [ffff880105c5bc48] osd_declare_punch at ffffffffa0b2e0fe [osd_ldiskfs]
    /home/lustre/master-mine/libcfs/include/libcfs/libcfs_debug.h: 155
 #7 [ffff880105c5bc70] ofd_object_punch at ffffffffa0e824f9 [ofd]
    /home/lustre/master-mine/lustre/include/dt_object.h: 2570
 #8 [ffff880105c5bd48] ofd_punch_hdl at ffffffffa0e6c54c [ofd]
    /home/lustre/master-mine/lustre/ofd/ofd_dev.c: 2125

PID: 6515   TASK: ffff88012c996000  CPU: 1   COMMAND: &lt;span class=&quot;code-quote&quot;&gt;&quot;ll_ost_io00_006&quot;&lt;/span&gt;
 #0 [ffff88010d40b9a8] __schedule at ffffffff816af9ad
    /home/lustre/linux-4.18.0-32.el8/kernel/sched/core.c: 2853
 #1 [ffff88010d40ba40] schedule at ffffffff816b0234
    /home/lustre/linux-4.18.0-32.el8/./arch/x86/include/asm/preempt.h: 81
 #2 [ffff88010d40ba50] wait_transaction_locked at ffffffff812ef568
    /home/lustre/linux-4.18.0-32.el8/fs/jbd2/transaction.c: 160
 #3 [ffff88010d40baa8] add_transaction_credits at ffffffff812ef700
    /home/lustre/linux-4.18.0-32.el8/fs/jbd2/transaction.c: 188
 #4 [ffff88010d40bb00] start_this_handle at ffffffff812efa1c
    /home/lustre/linux-4.18.0-32.el8/fs/jbd2/transaction.c: 357
 #5 [ffff88010d40bb80] osd_write_commit at ffffffffa0b2ccc0 [osd_ldiskfs]
    /home/lustre/master-mine/lustre/osd-ldiskfs/osd_io.c: 1476
 #6 [ffff88010d40bbf8] ofd_commitrw at ffffffffa0e892df [ofd]
    /home/lustre/master-mine/lustre/include/dt_object.h: 2530
 #7 [ffff88010d40bcb0] tgt_brw_write at ffffffffa05c756c [ptlrpc]
    /home/lustre/master-mine/libcfs/include/libcfs/libcfs_debug.h: 155
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;basically &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14134&quot; title=&quot;reduce credits for new writing potentially&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14134&quot;&gt;&lt;del&gt;LU-14134&lt;/del&gt;&lt;/a&gt; breaks transaction-locks ordering as restart (which is just start in essense) is done with the lock held&lt;/p&gt;</comment>
                            <comment id="286946" author="wshilong" created="Tue, 8 Dec 2020 01:54:45 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=bzzz&quot; class=&quot;user-hover&quot; rel=&quot;bzzz&quot;&gt;bzzz&lt;/a&gt; Thanks for helping track it down.&lt;br/&gt;
Would you be specific a bit more? It looks now we hold trunc lock before start transaction, and for restart case, trunc lock is still held?&lt;/p&gt;</comment>
                            <comment id="287515" author="bzzz" created="Mon, 14 Dec 2020 20:39:42 +0000"  >&lt;p&gt;while trunc lock was involved in this specific example, I think the issue is not directly related to that one - regular dt locks (e.g. ofd_write_lock() and ofd_read_lock()) are taken within a running transaction and transaction restart breaks this. actually that&apos;s the reason why truncate was implemented via osd_trans_stop().&lt;/p&gt;</comment>
                            <comment id="287541" author="wshilong" created="Tue, 15 Dec 2020 02:32:27 +0000"  >&lt;p&gt;What do you think we move IO restart upper layer(in OFD layer), or we should implement similar way like truncate?&lt;/p&gt;</comment>
                            <comment id="287544" author="wshilong" created="Tue, 15 Dec 2020 02:51:17 +0000"  >&lt;p&gt;It looks currently, some ofd_write_lock is held outside transaction, some inside, so maybe that is reason why it could timeout even without patch&lt;/p&gt;</comment>
                            <comment id="287552" author="bzzz" created="Tue, 15 Dec 2020 05:31:47 +0000"  >&lt;blockquote&gt;
&lt;p&gt;It looks currently, some ofd_write_lock is held outside transaction, some inside, so maybe that is reason why it could timeout even without patch&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;I found this only in fallocate path which was landed just recently and this is clearly a bug.&lt;/p&gt;</comment>
                            <comment id="287559" author="wshilong" created="Tue, 15 Dec 2020 08:54:58 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=bzzz&quot; class=&quot;user-hover&quot; rel=&quot;bzzz&quot;&gt;bzzz&lt;/a&gt;yup, i saw your pushed patch, i&apos;ll try to fix by moving restart to ofd layer.&lt;/p&gt;</comment>
                            <comment id="287674" author="gerrit" created="Wed, 16 Dec 2020 07:18:25 +0000"  >&lt;p&gt;Wang Shilong (wshilong@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/40991&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40991&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14187&quot; title=&quot;LU-14134 causes many racer timeouts&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14187&quot;&gt;&lt;del&gt;LU-14187&lt;/del&gt;&lt;/a&gt; osd-ldiskfs: fix locking in write commit&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: be8337f36c01227e8a959041146577134a402977&lt;/p&gt;</comment>
                            <comment id="288274" author="bzzz" created="Wed, 23 Dec 2020 08:14:35 +0000"  >&lt;p&gt;with the latest version I hit sanity-quota/3b very often:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
== sanity-quota test 3b: Quota pools: Block soft limit (start timer, expires, stop timer) ============ 13:06:59 (1608710819)
limit 4 glbl_limit 8
grace 20 glbl_grace 40
User quota in qpool1(soft limit:4 MB  grace:20 seconds)
Creating &lt;span class=&quot;code-keyword&quot;&gt;new&lt;/span&gt; pool
Pool lustre.qpool1 created
Adding targets to pool
OST lustre-OST0000_UUID added to pool lustre.qpool1
OST lustre-OST0001_UUID added to pool lustre.qpool1
Trying to set grace &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; pool qpool1
Trying to set quota &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; pool qpool1
Waiting &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; local destroys to complete
Creating test directory
fail_val=0
fail_loc=0
Write up to soft limit
running as uid/gid/euid/egid 60000/60000/60000/60000, groups:
 [dd] [&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt;=/dev/zero] [bs=1M] [of=/mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0] [count=4]
4+0 records in
4+0 records out
4194304 bytes (4.2 MB, 4.0 MiB) copied, 0.205771 s, 20.4 MB/s
Write to exceed soft limit
running as uid/gid/euid/egid 60000/60000/60000/60000, groups:
 [dd] [&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt;=/dev/zero] [of=/mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0] [bs=1K] [count=10] [seek=4096]
10+0 records in
10+0 records out
10240 bytes (10 kB, 10 KiB) copied, 0.00531433 s, 1.9 MB/s
mmap write when over soft limit
running as uid/gid/euid/egid 60000/60000/60000/60000, groups:
 [multiop] [/mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0.mmap] [OT40960SMW]
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; usr quota_usr (uid 60000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre    4148    8192       0       -       2       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       2       -       0       -
lustre-OST0000_UUID
                   4108       -    4144       -       -       -       -       -
lustre-OST0001_UUID
                     40*      -      40       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 4184
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; grp quota_usr (gid 60000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre    4148       0       0       -       2       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       2       -       0       -
lustre-OST0000_UUID
                   4108       -       0       -       -       -       -       -
lustre-OST0001_UUID
                     40       -       0       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 0
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; prj 1000 (pid 1000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre       0       0       0       -       0       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       0       -       0       -
lustre-OST0000_UUID
                      0       -       0       -       -       -       -       -
lustre-OST0001_UUID
                      0       -       0       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 0
Block grace time: 40s; Inode grace time: 1w
Block grace time: 1w; Inode grace time: 1w
Block grace time: 1w; Inode grace time: 1w
Write before timer goes off
running as uid/gid/euid/egid 60000/60000/60000/60000, groups:
 [dd] [&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt;=/dev/zero] [of=/mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0] [bs=1K] [count=10] [seek=5120]
10+0 records in
10+0 records out
10240 bytes (10 kB, 10 KiB) copied, 0.011448 s, 894 kB/s
Quota info &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; qpool1:
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; usr quota_usr (uid 60000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre    4160*   4096       0     20s       2       0       0       -
Sleep through grace ...
...sleep 25 seconds
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; usr quota_usr (uid 60000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre    4160    8192       0       -       2       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       2       -       0       -
lustre-OST0000_UUID
                   4120       -    4144       -       -       -       -       -
lustre-OST0001_UUID
                     40*      -      40       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 4184
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; grp quota_usr (gid 60000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre    4160       0       0       -       2       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       2       -       0       -
lustre-OST0000_UUID
                   4120       -       0       -       -       -       -       -
lustre-OST0001_UUID
                     40       -       0       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 0
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; prj 1000 (pid 1000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre       0       0       0       -       0       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       0       -       0       -
lustre-OST0000_UUID
                      0       -       0       -       -       -       -       -
lustre-OST0001_UUID
                      0       -       0       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 0
Block grace time: 40s; Inode grace time: 1w
Block grace time: 1w; Inode grace time: 1w
Block grace time: 1w; Inode grace time: 1w
Write after timer goes off
running as uid/gid/euid/egid 60000/60000/60000/60000, groups:
 [dd] [&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt;=/dev/zero] [of=/mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0] [bs=1K] [count=10] [seek=6144]
10+0 records in
10+0 records out
10240 bytes (10 kB, 10 KiB) copied, 0.00177872 s, 5.8 MB/s
Write after cancel lru locks
running as uid/gid/euid/egid 60000/60000/60000/60000, groups:
 [dd] [&lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt;=/dev/zero] [of=/mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0] [bs=1K] [count=10] [seek=7168]
10+0 records in
10+0 records out
10240 bytes (10 kB, 10 KiB) copied, 0.00480989 s, 2.1 MB/s
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; usr quota_usr (uid 60000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre    4172    8192       0       -       2       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       2       -       0       -
lustre-OST0000_UUID
                   4132       -    4144       -       -       -       -       -
lustre-OST0001_UUID
                     40*      -      40       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 4184
Files &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; user (quota_usr):
  File: /mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0.mmap
  Size: 40960     	Blocks: 80         IO Block: 4194304 regular file
Device: 2c54f966h/743766374d	Inode: 144115238826934298  Links: 1
Access: (0644/-rw-r--r--)  Uid: (60000/quota_usr)   Gid: (60000/quota_usr)
Access: 2020-12-23 13:07:15.000000000 +0500
Modify: 2020-12-23 13:07:15.000000000 +0500
Change: 2020-12-23 13:07:15.000000000 +0500
 Birth: 2020-12-23 13:07:15.000000000 +0500
  File: /mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0
  Size: 7350272   	Blocks: 8264       IO Block: 4194304 regular file
Device: 2c54f966h/743766374d	Inode: 144115238826934296  Links: 1
Access: (0644/-rw-r--r--)  Uid: (60000/quota_usr)   Gid: (60000/quota_usr)
Access: 2020-12-23 13:07:14.000000000 +0500
Modify: 2020-12-23 13:07:41.000000000 +0500
Change: 2020-12-23 13:07:41.000000000 +0500
 Birth: 2020-12-23 13:07:14.000000000 +0500
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; grp quota_usr (gid 60000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre    4172       0       0       -       2       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       2       -       0       -
lustre-OST0000_UUID
                   4132       -       0       -       -       -       -       -
lustre-OST0001_UUID
                     40       -       0       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 0
Files &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; group (quota_usr):
  File: /mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0.mmap
  Size: 40960     	Blocks: 80         IO Block: 4194304 regular file
Device: 2c54f966h/743766374d	Inode: 144115238826934298  Links: 1
Access: (0644/-rw-r--r--)  Uid: (60000/quota_usr)   Gid: (60000/quota_usr)
Access: 2020-12-23 13:07:15.000000000 +0500
Modify: 2020-12-23 13:07:15.000000000 +0500
Change: 2020-12-23 13:07:15.000000000 +0500
 Birth: 2020-12-23 13:07:15.000000000 +0500
  File: /mnt/lustre/d3b.sanity-quota/f3b.sanity-quota-0
  Size: 7350272   	Blocks: 8264       IO Block: 4194304 regular file
Device: 2c54f966h/743766374d	Inode: 144115238826934296  Links: 1
Access: (0644/-rw-r--r--)  Uid: (60000/quota_usr)   Gid: (60000/quota_usr)
Access: 2020-12-23 13:07:14.000000000 +0500
Modify: 2020-12-23 13:07:41.000000000 +0500
Change: 2020-12-23 13:07:41.000000000 +0500
 Birth: 2020-12-23 13:07:14.000000000 +0500
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; prj 1000 (pid 1000):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /mnt/lustre       0       0       0       -       0       0       0       -
lustre-MDT0000_UUID
                      0       -       0       -       0       -       0       -
lustre-OST0000_UUID
                      0       -       0       -       -       -       -       -
lustre-OST0001_UUID
                      0       -       0       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 0
Files &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; project (1000):
 sanity-quota test_3b: @@@@@@ FAIL: write success, but expect EDQUOT 
  Trace dump:
  = ./../tests/test-framework.sh:6273:error()
  = sanity-quota.sh:159:quota_error()
  = sanity-quota.sh:1297:test_block_soft()
  = sanity-quota.sh:1435:test_3b()
  = ./../tests/test-framework.sh:6576:run_one()
  = ./../tests/test-framework.sh:6623:run_one_logged()
  = ./../tests/test-framework.sh:6450:run_test()
  = sanity-quota.sh:1494:main()
Dumping lctl log to /tmp/ltest-logs/sanity-quota.test_3b.*.1608710861.log
Dumping logs only on local client.
Resetting fail_loc on all nodes...done.
Delete files...
Wait &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; unlink objects finished...
Waiting &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; local destroys to complete
Destroy the created pools: qpool1
lustre.qpool1
OST lustre-OST0000_UUID removed from pool lustre.qpool1
OST lustre-OST0001_UUID removed from pool lustre.qpool1
Pool lustre.qpool1 destroyed
FAIL 3b (85s)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="288275" author="wshilong" created="Wed, 23 Dec 2020 08:34:51 +0000"  >&lt;p&gt;Alex, the first guess is this might be similar problem like &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12702&quot; title=&quot;sanity-quota test_4a: create after timer expired, but expect EDQUOT&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12702&quot;&gt;&lt;del&gt;LU-12702&lt;/del&gt;&lt;/a&gt;, anyway, would you get debug logs for it.&lt;/p&gt;</comment>
                            <comment id="288276" author="bzzz" created="Wed, 23 Dec 2020 08:36:26 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=wshilong&quot; class=&quot;user-hover&quot; rel=&quot;wshilong&quot;&gt;wshilong&lt;/a&gt; I can&apos;t reproduce that with clean master (local setup), but it can&apos;t be that some unluck conditions/timings get exposed better with the patch.&lt;/p&gt;</comment>
                            <comment id="288277" author="wshilong" created="Wed, 23 Dec 2020 08:41:17 +0000"  >&lt;p&gt;Sure, would you help attach debug for me? i could try to reproduce locally, but that is not fast than yours.&lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;</comment>
                            <comment id="288644" author="gerrit" created="Tue, 5 Jan 2021 08:29:10 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/40991/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/40991/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14187&quot; title=&quot;LU-14134 causes many racer timeouts&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14187&quot;&gt;&lt;del&gt;LU-14187&lt;/del&gt;&lt;/a&gt; osd-ldiskfs: fix locking in write commit&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: f0f92773ee18a9da71cd27e3b5c32e5d318ed0d2&lt;/p&gt;</comment>
                            <comment id="288664" author="pjones" created="Tue, 5 Jan 2021 13:00:27 +0000"  >&lt;p&gt;Landed for 2.14&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="61650">LU-14134</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="62253">LU-14304</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="36869" name="bt-all.txt" size="570881" author="bzzz" created="Mon, 7 Dec 2020 07:36:49 +0000"/>
                            <attachment id="37034" name="sanity-quota.test_3b.debug_log.tmp.1608711135.log" size="3537531" author="bzzz" created="Wed, 23 Dec 2020 08:43:23 +0000"/>
                            <attachment id="36870" name="serial.txt" size="67630" author="bzzz" created="Mon, 7 Dec 2020 07:36:20 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i01ggv:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>