<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:05:37 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-13955] OST become readonly when test using fio with file size larger than 4G</title>
                <link>https://jira.whamcloud.com/browse/LU-13955</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;When do a test with latest mater branch by using fio, I found that when file size is larger than 4G, it will cause the OST into readonly state on CentOS7.&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
cat /etc/redhat-release 
CentOS Linux release 7.6.1810 (Core) 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
# mkfs.lustre --fsname=lustre --mdt --mgs --index=0 --reformat /dev/sdb1
# mkfs.lustre --fsname=lustre --ost --mgsnode=192.168.150.128@tcp --index=0 --reformat /dev/sdb2
# mount.lustre /dev/sdb1 /mnt/lustre-mds1
# mount.lustre /dev/sdb2 /mnt/lustre-ost1
# mount.lustre 192.168.150.128@tcp:/lustre /mnt/lustre
# df
Filesystem&#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 1K-blocks&#160; &#160; &#160; Used Available Use% Mounted on
/dev/sda1&#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 52507040&#160; 21610852&#160; 28205936&#160; 44% /
devtmpfs&#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 1917568 &#160; &#160; &#160; &#160; 0 &#160; 1917568 &#160; 0% /dev
tmpfs &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 1930752 &#160; &#160; &#160; &#160; 0 &#160; 1930752 &#160; 0% /dev/shm
tmpfs &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 1930752 &#160; &#160; 11732 &#160; 1919020 &#160; 1% /run
tmpfs &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 1930752 &#160; &#160; &#160; &#160; 0 &#160; 1930752 &#160; 0% /sys/fs/cgroup
.host:/ &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 488245288 283447468 204797820&#160; 59% /mnt/hgfs
tmpfs&#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 386152 &#160; &#160; &#160; &#160; 0&#160; &#160; 386152 &#160; 0% /run/user/0
/dev/sdb1&#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 159688&#160; &#160; &#160; 1908&#160; &#160; 143972 &#160; 2% /mnt/lustre-mds1
/dev/sdb2&#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; &#160; 17839688 &#160; &#160; 46168&#160; 16833420 &#160; 1% /mnt/lustre-ost1
192.168.150.128@tcp:/lustre&#160; 17839688 &#160; &#160; 46168&#160; 16833420 &#160; 1% /mnt/lustre

# lctl get_param version
version=2.13.55_84_g03e6db5
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
mkdir /mnt/lustre/qian
fio --name=seqread --directory=/mnt/lustre/qian --filesize=5G --bs=128K --create_only=1 --numjobs=1 --create_serialize=0
seqread: (g=0): rw=read, bs=(R) 128KiB-128KiB, (W) 128KiB-128KiB, (T) 128KiB-128KiB, ioengine=psync, iodepth=1
fio-3.1
Starting 1 process
seqread: Laying out IO file (1 file / 5120MiB)
fio: native_fallocate call failed: No space left on device
fio: pid=10054, err=30/file:filesetup.c:184, func=ftruncate, error=Read-only file system




Run status group 0 (all jobs):
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;The server dump messages:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[&#160; 150.093475] WARNING: CPU: 0 PID: 9940 at /tmp/rpmbuild-lustre-root-t8NmDyeO/BUILD/lustre-2.13.55_84_g03e6db5/ldiskfs/ext4_jbd2.c:266 __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]
[&#160; 150.093476] Modules linked in: lustre(OE) lmv(OE) mdc(OE) lov(OE) osc(OE) ofd(OE) ost(OE) osp(OE) mdd(OE) lod(OE) mdt(OE) lfsck(OE) mgs(OE) mgc(OE) osd_ldiskfs(OE) lquota(OE) fid(OE) fld(OE) ksocklnd(OE) ptlrpc(OE) obdclass(OE) lnet(OE) libcfs(OE) ldiskfs(OE) ipmi_devintf ipmi_msghandler vmhgfs(OE) vmw_vsock_vmci_transport vsock ppdev iosf_mbi crc32_pclmul ghash_clmulni_intel vmw_balloon aesni_intel lrw gf128mul glue_helper ablk_helper cryptd joydev pcspkr sg vmw_vmci i2c_piix4 parport_pc parport ip_tables ext4 mbcache jbd2 sd_mod crc_t10dif crct10dif_generic sr_mod cdrom crct10dif_pclmul crct10dif_common crc32c_intel serio_raw vmwgfx drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm e1000 nfit drm libnvdimm drm_panel_orientation_quirks mptspi scsi_transport_spi mptscsih mptbase ata_generic
[&#160; 150.093516]&#160; pata_acpi ata_piix libata
[&#160; 150.093521] CPU: 0 PID: 9940 Comm: ll_ost00_002 Kdump: loaded Tainted: G &#160; &#160; &#160; &#160; &#160; OE&#160; ------------ &#160; 3.10.0-957.12.2.el7_lustre.2.12.55_47_gf6497eb.x86_64 #1
[&#160; 150.093523] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 04/13/2018
[&#160; 150.093524] Call Trace:
[&#160; 150.093532]&#160; [&amp;lt;ffffffff90963041&amp;gt;] dump_stack+0x19/0x1b
[&#160; 150.093536]&#160; [&amp;lt;ffffffff902976e8&amp;gt;] __warn+0xd8/0x100
[&#160; 150.093539]&#160; [&amp;lt;ffffffff9029782d&amp;gt;] warn_slowpath_null+0x1d/0x20
[&#160; 150.093551]&#160; [&amp;lt;ffffffffc050e862&amp;gt;] __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]
[&#160; 150.093561]&#160; [&amp;lt;ffffffffc04ec67b&amp;gt;] ldiskfs_mb_mark_diskspace_used+0x2bb/0x510 [ldiskfs]
[&#160; 150.093570]&#160; [&amp;lt;ffffffffc04f0800&amp;gt;] ldiskfs_mb_new_blocks+0x350/0xb20 [ldiskfs]
[&#160; 150.093581]&#160; [&amp;lt;ffffffffc05186c5&amp;gt;] ? __read_extent_tree_block+0x55/0x1e0 [ldiskfs]
[&#160; 150.093585]&#160; [&amp;lt;ffffffff9041d9bb&amp;gt;] ? __kmalloc+0x1eb/0x230
[&#160; 150.093596]&#160; [&amp;lt;ffffffffc0519764&amp;gt;] ? ldiskfs_ext_find_extent+0x134/0x340 [ldiskfs]
[&#160; 150.093606]&#160; [&amp;lt;ffffffffc051dbf6&amp;gt;] ldiskfs_ext_map_blocks+0x4a6/0xf60 [ldiskfs]
[&#160; 150.093610]&#160; [&amp;lt;ffffffff90477fff&amp;gt;] ? has_bh_in_lru+0xf/0x50
[&#160; 150.093620]&#160; [&amp;lt;ffffffffc052286c&amp;gt;] ldiskfs_map_blocks+0x12c/0x6a0 [ldiskfs]
[&#160; 150.093630]&#160; [&amp;lt;ffffffffc0518c0e&amp;gt;] ? ldiskfs_alloc_file_blocks.isra.36+0xbe/0x2f0 [ldiskfs]
[&#160; 150.093639]&#160; [&amp;lt;ffffffffc0518c31&amp;gt;] ldiskfs_alloc_file_blocks.isra.36+0xe1/0x2f0 [ldiskfs]
[&#160; 150.093648]&#160; [&amp;lt;ffffffffc051fff9&amp;gt;] ldiskfs_fallocate+0x809/0x8a0 [ldiskfs]
[&#160; 150.093651]&#160; [&amp;lt;ffffffff904af45a&amp;gt;] ? __dquot_initialize+0x3a/0x240
[&#160; 150.093656]&#160; [&amp;lt;ffffffffc0321a93&amp;gt;] ? jbd2__journal_start+0xf3/0x1f0 [jbd2]
[&#160; 150.093671]&#160; [&amp;lt;ffffffffc0c4da23&amp;gt;] osd_fallocate+0x243/0x530 [osd_ldiskfs]
[&#160; 150.093679]&#160; [&amp;lt;ffffffffc0c2ff65&amp;gt;] ? osd_trans_start+0x235/0x4e0 [osd_ldiskfs]
[&#160; 150.093688]&#160; [&amp;lt;ffffffffc106ce28&amp;gt;] ofd_object_fallocate+0x538/0x780 [ofd]
[&#160; 150.093693]&#160; [&amp;lt;ffffffffc10565b1&amp;gt;] ofd_fallocate_hdl+0x231/0x970 [ofd]
[&#160; 150.093742]&#160; [&amp;lt;ffffffffc09d6dbf&amp;gt;] ? lustre_pack_reply_flags+0x6f/0x1e0 [ptlrpc]
[&#160; 150.093789]&#160; [&amp;lt;ffffffffc0a3fd0a&amp;gt;] tgt_request_handle+0x96a/0x1700 [ptlrpc]
[&#160; 150.093829]&#160; [&amp;lt;ffffffffc0a1a301&amp;gt;] ? ptlrpc_nrs_req_get_nolock0+0xd1/0x170 [ptlrpc]
[&#160; 150.093838]&#160; [&amp;lt;ffffffffc059402e&amp;gt;] ? ktime_get_real_seconds+0xe/0x10 [libcfs]
[&#160; 150.093873]&#160; [&amp;lt;ffffffffc09e33f6&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[&#160; 150.093908]&#160; [&amp;lt;ffffffffc09e29ca&amp;gt;] ? ptlrpc_server_handle_req_in+0x92a/0x1100 [ptlrpc]
[&#160; 150.093912]&#160; [&amp;lt;ffffffff902c2df0&amp;gt;] ? wake_up_atomic_t+0x30/0x30
[&#160; 150.093946]&#160; [&amp;lt;ffffffffc09e7f4c&amp;gt;] ptlrpc_main+0xb3c/0x14d0 [ptlrpc]
[&#160; 150.093980]&#160; [&amp;lt;ffffffffc09e7410&amp;gt;] ? ptlrpc_register_service+0xf90/0xf90 [ptlrpc]
[&#160; 150.093983]&#160; [&amp;lt;ffffffff902c1d21&amp;gt;] kthread+0xd1/0xe0
[&#160; 150.093985]&#160; [&amp;lt;ffffffff902c1c50&amp;gt;] ? insert_kthread_work+0x40/0x40
[&#160; 150.093988]&#160; [&amp;lt;ffffffff90975c1d&amp;gt;] ret_from_fork_nospec_begin+0x7/0x21
[&#160; 150.093991]&#160; [&amp;lt;ffffffff902c1c50&amp;gt;] ? insert_kthread_work+0x40/0x40
[&#160; 150.093992] ---[ end trace 92c47b4354741217 ]---
[&#160; 150.093995] LDISKFS-fs: ldiskfs_mb_mark_diskspace_used:3450: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata
[&#160; 150.094045] LDISKFS: jbd2_journal_dirty_metadata failed: handle type 0 started at line 1919, credits 41/0, errcode -28
[&#160; 150.094087] LDISKFS-fs warning (device sdb2): ldiskfs_mb_new_blocks:5077: Updating bitmap error: [err -28] [pa ffff8d59008f6068] [phy 1441792] [logic 1146880] [len 32768] [free 32768] [error 1] [inode 233]
[&#160; 150.094526] Quota error (device sdb2): qtree_write_dquot: dquota write failed
[&#160; 150.094552] LDISKFS-fs error (device sdb2) in ldiskfs_write_dquot:5495: error 28
[&#160; 150.094886] Aborting journal on device sdb2-8.
[&#160; 150.095175] LDISKFS-fs (sdb2): Remounting filesystem read-only
[&#160; 150.095200] LDISKFS-fs error (device sdb2) in ldiskfs_reserve_inode_write:5313: Journal has aborted
[&#160; 150.095515] LDISKFS-fs error (device sdb2) in ldiskfs_alloc_file_blocks:4760: error 28
[&#160; 150.095852] LDISKFS-fs error (device sdb2) in osd_trans_stop:2029: error 28
[&#160; 150.095958] LustreError: 9933:0:(osd_handler.c:1728:osd_trans_commit_cb()) transaction @0xffff8d590a96a200 commit error: 2
[&#160; 150.096084] LustreError: 9940:0:(osd_handler.c:2032:osd_trans_stop()) lustre-OST0000: failed to stop transaction: rc = -28
[&#160; 152.806430] LustreError: 9940:0:(ofd_dev.c:1818:ofd_destroy_hdl()) lustre-OST0000: error destroying object [0x100000000:0x2:0x0]: -30
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;</description>
                <environment></environment>
        <key id="60750">LU-13955</key>
            <summary>OST become readonly when test using fio with file size larger than 4G</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="qian_wc">Qian Yingjin</reporter>
                        <labels>
                    </labels>
                <created>Fri, 11 Sep 2020 06:59:30 +0000</created>
                <updated>Fri, 11 Sep 2020 12:00:10 +0000</updated>
                            <resolved>Fri, 11 Sep 2020 12:00:10 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="279319" author="qian_wc" created="Fri, 11 Sep 2020 09:33:55 +0000"  >&lt;p&gt;This bug can be easily reproduced via the fallocate:&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
fallocate -l 5G /mnt/lustre/tfile
fallocate: fallocate failed: No space left on device
[root@qian ~]# fallocate -l 4G /mnt/lustre/tfile
fallocate: fallocate failed: Read-only file system
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[ 8421.788181] WARNING: CPU: 0 PID: 90292 at /tmp/rpmbuild-lustre-root-t8NmDyeO/BUILD/lustre-2.13.55_84_g03e6db5/ldiskfs/ext4_jbd2.c:266 __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]
[ 8421.788182] Modules linked in: lustre(OE) lmv(OE) mdc(OE) lov(OE) osc(OE) ofd(OE) ost(OE) osp(OE) mdd(OE) lod(OE) mdt(OE) lfsck(OE) mgs(OE) mgc(OE) osd_ldiskfs(OE) lquota(OE) fid(OE) fld(OE) ksocklnd(OE) ptlrpc(OE) obdclass(OE) lnet(OE) libcfs(OE) ldiskfs(OE) ipmi_devintf ipmi_msghandler vmhgfs(OE) ppdev iosf_mbi crc32_pclmul ghash_clmulni_intel vmw_vsock_vmci_transport vsock vmw_balloon aesni_intel lrw gf128mul glue_helper ablk_helper cryptd joydev pcspkr sg parport_pc parport vmw_vmci i2c_piix4 ip_tables ext4 mbcache jbd2 sd_mod crc_t10dif crct10dif_generic sr_mod cdrom crct10dif_pclmul crct10dif_common crc32c_intel ata_generic serio_raw pata_acpi mptspi scsi_transport_spi e1000 mptscsih mptbase vmwgfx drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm drm ata_piix drm_panel_orientation_quirks
[ 8421.788212]&#160; libata nfit libnvdimm
[ 8421.788216] CPU: 0 PID: 90292 Comm: ll_ost00_000 Kdump: loaded Tainted: G &#160; &#160; &#160; &#160; &#160; OE&#160; ------------ &#160; 3.10.0-957.12.2.el7_lustre.2.12.55_47_gf6497eb.x86_64 #1
[ 8421.788217] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 04/13/2018
[ 8421.788218] Call Trace:
[ 8421.788286]&#160; [&amp;lt;ffffffffaaf63041&amp;gt;] dump_stack+0x19/0x1b
[ 8421.788291]&#160; [&amp;lt;ffffffffaa8976e8&amp;gt;] __warn+0xd8/0x100
[ 8421.788295]&#160; [&amp;lt;ffffffffaa89782d&amp;gt;] warn_slowpath_null+0x1d/0x20
[ 8421.788304]&#160; [&amp;lt;ffffffffc045e862&amp;gt;] __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]
[ 8421.788311]&#160; [&amp;lt;ffffffffc043c67b&amp;gt;] ldiskfs_mb_mark_diskspace_used+0x2bb/0x510 [ldiskfs]
[ 8421.788317]&#160; [&amp;lt;ffffffffc0440800&amp;gt;] ldiskfs_mb_new_blocks+0x350/0xb20 [ldiskfs]
[ 8421.788324]&#160; [&amp;lt;ffffffffc04686c5&amp;gt;] ? __read_extent_tree_block+0x55/0x1e0 [ldiskfs]
[ 8421.788327]&#160; [&amp;lt;ffffffffaaa1d9bb&amp;gt;] ? __kmalloc+0x1eb/0x230
[ 8421.788335]&#160; [&amp;lt;ffffffffc0469764&amp;gt;] ? ldiskfs_ext_find_extent+0x134/0x340 [ldiskfs]
[ 8421.788341]&#160; [&amp;lt;ffffffffc046dbf6&amp;gt;] ldiskfs_ext_map_blocks+0x4a6/0xf60 [ldiskfs]
[ 8421.788344]&#160; [&amp;lt;ffffffffaaa77fff&amp;gt;] ? has_bh_in_lru+0xf/0x50
[ 8421.788351]&#160; [&amp;lt;ffffffffc047286c&amp;gt;] ldiskfs_map_blocks+0x12c/0x6a0 [ldiskfs]
[ 8421.788358]&#160; [&amp;lt;ffffffffc0468c0e&amp;gt;] ? ldiskfs_alloc_file_blocks.isra.36+0xbe/0x2f0 [ldiskfs]
[ 8421.788363]&#160; [&amp;lt;ffffffffc0468c31&amp;gt;] ldiskfs_alloc_file_blocks.isra.36+0xe1/0x2f0 [ldiskfs]
[ 8421.788369]&#160; [&amp;lt;ffffffffc046fff9&amp;gt;] ldiskfs_fallocate+0x809/0x8a0 [ldiskfs]
[ 8421.788372]&#160; [&amp;lt;ffffffffaaaaf45a&amp;gt;] ? __dquot_initialize+0x3a/0x240
[ 8421.788377]&#160; [&amp;lt;ffffffffc025ba93&amp;gt;] ? jbd2__journal_start+0xf3/0x1f0 [jbd2]
[ 8421.788391]&#160; [&amp;lt;ffffffffc0b9da23&amp;gt;] osd_fallocate+0x243/0x530 [osd_ldiskfs]
[ 8421.788434]&#160; [&amp;lt;ffffffffc0b7ff65&amp;gt;] ? osd_trans_start+0x235/0x4e0 [osd_ldiskfs]
[ 8421.788441]&#160; [&amp;lt;ffffffffc0fbce28&amp;gt;] ofd_object_fallocate+0x538/0x780 [ofd]
[ 8421.788445]&#160; [&amp;lt;ffffffffc0fa65b1&amp;gt;] ofd_fallocate_hdl+0x231/0x970 [ofd]
[ 8421.788478]&#160; [&amp;lt;ffffffffc0926dbf&amp;gt;] ? lustre_pack_reply_flags+0x6f/0x1e0 [ptlrpc]
[ 8421.788511]&#160; [&amp;lt;ffffffffc098fd0a&amp;gt;] tgt_request_handle+0x96a/0x1700 [ptlrpc]
[ 8421.788539]&#160; [&amp;lt;ffffffffc096a301&amp;gt;] ? ptlrpc_nrs_req_get_nolock0+0xd1/0x170 [ptlrpc]
[ 8421.788546]&#160; [&amp;lt;ffffffffc04e402e&amp;gt;] ? ktime_get_real_seconds+0xe/0x10 [libcfs]
[ 8421.788570]&#160; [&amp;lt;ffffffffc09333f6&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[ 8421.788594]&#160; [&amp;lt;ffffffffc09329ca&amp;gt;] ? ptlrpc_server_handle_req_in+0x92a/0x1100 [ptlrpc]
[ 8421.788654]&#160; [&amp;lt;ffffffffaa8c2df0&amp;gt;] ? wake_up_atomic_t+0x30/0x30
[ 8421.788682]&#160; [&amp;lt;ffffffffc0937f4c&amp;gt;] ptlrpc_main+0xb3c/0x14d0 [ptlrpc]
[ 8421.788706]&#160; [&amp;lt;ffffffffc0937410&amp;gt;] ? ptlrpc_register_service+0xf90/0xf90 [ptlrpc]
[ 8421.788708]&#160; [&amp;lt;ffffffffaa8c1d21&amp;gt;] kthread+0xd1/0xe0
[ 8421.788711]&#160; [&amp;lt;ffffffffaa8c1c50&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 8421.788714]&#160; [&amp;lt;ffffffffaaf75c1d&amp;gt;] ret_from_fork_nospec_begin+0x7/0x21
[ 8421.788715]&#160; [&amp;lt;ffffffffaa8c1c50&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 8421.788716] ---[ end trace ed0258569624c37e ]---
[ 8421.788719] LDISKFS-fs: ldiskfs_mb_mark_diskspace_used:3450: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata
[ 8421.788821] LDISKFS: jbd2_journal_dirty_metadata failed: handle type 0 started at line 1919, credits 41/0, errcode -28
[ 8421.788893] LDISKFS-fs warning (device sdb2): ldiskfs_mb_new_blocks:5077: Updating bitmap error: [err -28] [pa ffff954669f92000] [phy 1441792] [logic 1146880] [len 32768] [free 32768] [error 1] [inode 233]
[ 8421.789150] Quota error (device sdb2): qtree_write_dquot: dquota write failed
[ 8421.789172] LDISKFS-fs error (device sdb2) in ldiskfs_write_dquot:5495: error 28
[ 8421.789453] Aborting journal on device sdb2-8.
[ 8421.789758] LDISKFS-fs (sdb2): Remounting filesystem read-only
[ 8421.789780] LDISKFS-fs error (device sdb2) in ldiskfs_reserve_inode_write:5313: Journal has aborted
[ 8421.791221] LDISKFS-fs error (device sdb2) in ldiskfs_alloc_file_blocks:4760: error 28
[ 8421.791447] LDISKFS-fs error (device sdb2) in osd_trans_stop:2029: error 28
[ 8421.791519] LustreError: 90286:0:(osd_handler.c:1728:osd_trans_commit_cb()) transaction @0xffff954774c39d00 commit error: 2
[ 8421.791724] LustreError: 90292:0:(osd_handler.c:2032:osd_trans_stop()) lustre-OST0000: failed to stop transaction: rc = -28
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="279323" author="wshilong" created="Fri, 11 Sep 2020 11:23:53 +0000"  >&lt;p&gt;This looks duplicated as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13765&quot; title=&quot;ldiskfs_mb_mark_diskspace_used:3472: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13765&quot;&gt;&lt;del&gt;LU-13765&lt;/del&gt;&lt;/a&gt;, let&apos;s fix it there.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="59894">LU-13765</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i019lb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>