<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:28:49 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-9740] Most of OSTs remounted read-only due to abort transaction in __ldiskfs_handle_dirty_metadata</title>
                <link>https://jira.whamcloud.com/browse/LU-9740</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Most of OST (36 out of 40) was remounted in read0-only at the same time due to abort transaction in __ldiskfs_handle_dirty_metadata as follows. &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Jun 15 18:54:54 oss01-mg kernel: ------------[ cut here ]------------
Jun 15 18:54:54 oss01-mg kernel: WARNING: at /tmp/rpmbuild-lustre-root-FtLAmY5x/BUILD/lustre-2.7.18.4.ddn0.g557254f/ldiskfs/ext4_jbd2.c:266 __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]()
Jun 15 18:54:54 oss01-mg kernel: Modules linked in: osp(OE) ofd(OE) lfsck(OE) ost(OE) mgc(OE) osd_ldiskfs(OE) ldiskfs(OE) lquota(OE) fid(OE) fld(OE) ko2iblnd(OE) ptlrpc(OE) obdclass(OE) lnet(OE) sha512_generic crypto_null libcfs(OE) rdma_ucm(OE) ib_ucm(OE) rdma_cm(OE) iw_cm(OE) ib_ipoib(OE) ib_cm(OE) ib_uverbs(OE) ib_umad(OE) mlx4_en(OE) mlx4_ib(OE) ib_sa(OE) ib_mad(OE) mlx4_core(OE) crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul ppdev glue_helper ablk_helper cryptd sg pcspkr i6300esb parport_pc parport i2c_piix4 nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables ext4 mbcache jbd2 sd_mod crc_t10dif sr_mod crct10dif_generic cdrom ata_generic mlx5_ib(OE) ib_core(OE) ib_addr(OE) ib_netlink(OE) pata_acpi cirrus syscopyarea sysfillrect sysimgblt drm_kms_helper mlx5_core(OE) ttm vxlan ip6_udp_tunnel udp_tunnel
Jun 15 18:54:54 oss01-mg kernel: ata_piix crct10dif_pclmul ptp crct10dif_common e1000 drm libata pps_core serio_raw igbvf crc32c_intel i2c_core mlx_compat(OE) sfablkdriver(OE) floppy dm_mirror dm_region_hash dm_log dm_mod
Jun 15 18:54:54 oss01-mg kernel: CPU: 9 PID: 4980 Comm: ll_ost03_019 Tainted: G           OE  ------------   3.10.0-327.36.1.el7_lustre.2.7.18.4.ddn0.g557254f.x86_64 #1
Jun 15 18:54:54 oss01-mg kernel: Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
Jun 15 18:54:54 oss01-mg kernel: 0000000000000000 00000000e3d423b3 ffff8809cea03820 ffffffff816366a1
Jun 15 18:54:54 oss01-mg kernel: ffff8809cea03858 ffffffff8107b260 ffff88083ec95d68 ffff880a0070d450
Jun 15 18:54:54 oss01-mg kernel: ffff8809897a4548 ffffffffa0c66a1c 0000000000000327 ffff8809cea03868
Jun 15 18:54:54 oss01-mg kernel: Call Trace:
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff816366a1&amp;gt;] dump_stack+0x19/0x1b
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff8107b260&amp;gt;] warn_slowpath_common+0x70/0xb0
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff8107b3aa&amp;gt;] warn_slowpath_null+0x1a/0x20
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0c09622&amp;gt;] __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0c2cf21&amp;gt;] ldiskfs_getblk+0x131/0x200 [ldiskfs]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0c2d01a&amp;gt;] ldiskfs_bread+0x2a/0x1e0 [ldiskfs]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0cf33c9&amp;gt;] osd_ldiskfs_write_record+0x169/0x360 [osd_ldiskfs]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0cf36b8&amp;gt;] osd_write+0xf8/0x230 [osd_ldiskfs]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0790325&amp;gt;] dt_record_write+0x45/0x130 [obdclass]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0a4ceac&amp;gt;] tgt_client_data_write.isra.19+0x12c/0x140 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0a5112b&amp;gt;] tgt_client_data_update+0x36b/0x510 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0a51a0b&amp;gt;] tgt_client_new+0x3fb/0x5f0 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0e29358&amp;gt;] ofd_obd_connect+0x2e8/0x3f0 [ofd]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa09b6c6f&amp;gt;] target_handle_connect+0x11ef/0x2bf0 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff810c5618&amp;gt;] ? load_balance+0x218/0x890
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff810be46e&amp;gt;] ? account_entity_dequeue+0xae/0xd0
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff810c1a96&amp;gt;] ? dequeue_entity+0x106/0x520
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0a35400&amp;gt;] ? nrs_request_removed+0x80/0x120 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0a5c8ba&amp;gt;] tgt_request_handle+0x55a/0x11f0 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa09ffa0b&amp;gt;] ptlrpc_server_handle_request+0x21b/0xa90 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0634d08&amp;gt;] ? lc_watchdog_touch+0x68/0x180 [libcfs]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa09fcad8&amp;gt;] ? ptlrpc_wait_event+0x98/0x330 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0a03330&amp;gt;] ptlrpc_main+0xc00/0x1f60 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffffa0a02730&amp;gt;] ? ptlrpc_register_service+0x1070/0x1070 [ptlrpc]
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff810a5b8f&amp;gt;] kthread+0xcf/0xe0
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff810a5ac0&amp;gt;] ? kthread_create_on_node+0x140/0x140
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff81646cd8&amp;gt;] ret_from_fork+0x58/0x90
Jun 15 18:54:54 oss01-mg kernel: [&amp;lt;ffffffff810a5ac0&amp;gt;] ? kthread_create_on_node+0x140/0x140
Jun 15 18:54:54 oss01-mg kernel: ---[ end trace 120678ee9d6e4000 ]---
Jun 15 18:54:54 oss01-mg kernel: LDISKFS-fs: ldiskfs_getblk:807: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata
Jun 15 18:54:54 oss01-mg kernel: LDISKFS-fs error (device sfa0007): ldiskfs_getblk:807: inode #81: block 805347324: comm ll_ost03_019: journal_dirty_metadata failed: handle type 0 started at line 1156, credits 8/0, errcode -28
Jun 15 18:54:54 oss01-mg kernel: LDISKFS-fs: ldiskfs_getblk:807: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata
Jun 15 18:54:54 oss01-mg kernel: LDISKFS-fs error (device sfa0000): ldiskfs_getblk:807: inode #81: block 110813181: comm ll_ost03_045: journal_dirty_metadata failed: handle type 0 started at line 1156, credits 8/0, errcode -28
Jun 15 18:54:54 oss01-mg kernel: Aborting journal on device sfa0000-8.
Jun 15 18:54:54 oss01-mg kernel: LDISKFS-fs (sfa0000): Remounting filesystem read-only
Jun 15 18:54:54 oss01-mg kernel: LustreError: 5006:0:(osd_io.c:1694:osd_ldiskfs_write_record()) sfa0000: error reading offset 20480 (block 5): rc = -28
Jun 15 18:54:54 oss01-mg kernel: LDISKFS-fs error (device sfa0000) in osd_trans_stop:1240: error 28
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;The customer was trying to mount lustre from new clients which are Intel Xeon Phi KNL server. They were installing lustre 2.7.19.8.ddn3. They have 16 machines and mounting lustre worked as expected on 14 clients, but they found a problem on 2 clients.  When we checked messages files from the servers, OST had been remounted read-only.  &lt;/p&gt;

&lt;p&gt;Similar issue is reported in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6722&quot; title=&quot;sanity-lfsck test_1a: FAIL: (3) Fail to start LFSCK for namespace!&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6722&quot;&gt;&lt;del&gt;LU-6722&lt;/del&gt;&lt;/a&gt; and the fix is included from 2.7.19.12. Can you check if this synopsis is due to the issue in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6722&quot; title=&quot;sanity-lfsck test_1a: FAIL: (3) Fail to start LFSCK for namespace!&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6722&quot;&gt;&lt;del&gt;LU-6722&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</description>
                <environment></environment>
        <key id="47130">LU-9740</key>
            <summary>Most of OSTs remounted read-only due to abort transaction in __ldiskfs_handle_dirty_metadata</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="yong.fan">nasf</reporter>
                        <labels>
                    </labels>
                <created>Thu, 6 Jul 2017 06:59:07 +0000</created>
                <updated>Thu, 6 Sep 2018 10:17:17 +0000</updated>
                            <resolved>Wed, 19 Jul 2017 03:44:30 +0000</resolved>
                                                    <fixVersion>Lustre 2.10.1</fixVersion>
                    <fixVersion>Lustre 2.11.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="201120" author="gerrit" created="Thu, 6 Jul 2017 07:16:53 +0000"  >&lt;p&gt;Fan Yong (fan.yong@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/27947&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27947&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9740&quot; title=&quot;Most of OSTs remounted read-only due to abort transaction in __ldiskfs_handle_dirty_metadata&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9740&quot;&gt;&lt;del&gt;LU-9740&lt;/del&gt;&lt;/a&gt; ldiskfs: more credits for non-append write&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 4ae97971f3ed0156ecbe81bf088b5e6a23ce10ec&lt;/p&gt;</comment>
                            <comment id="202583" author="gerrit" created="Wed, 19 Jul 2017 03:29:10 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/27947/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27947/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9740&quot; title=&quot;Most of OSTs remounted read-only due to abort transaction in __ldiskfs_handle_dirty_metadata&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9740&quot;&gt;&lt;del&gt;LU-9740&lt;/del&gt;&lt;/a&gt; ldiskfs: more credits for non-append write&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: c668a8d405a9d8819bf9b96e0c610ccc5353d77d&lt;/p&gt;</comment>
                            <comment id="202640" author="pjones" created="Wed, 19 Jul 2017 03:44:30 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                            <comment id="203621" author="gerrit" created="Wed, 26 Jul 2017 18:25:55 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/28229&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28229&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9740&quot; title=&quot;Most of OSTs remounted read-only due to abort transaction in __ldiskfs_handle_dirty_metadata&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9740&quot;&gt;&lt;del&gt;LU-9740&lt;/del&gt;&lt;/a&gt; ldiskfs: more credits for non-append write&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 98f1b59aaf81ac9d57b8091a0517fc89faf5a1d3&lt;/p&gt;</comment>
                            <comment id="204692" author="gerrit" created="Mon, 7 Aug 2017 18:49:44 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/28229/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28229/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9740&quot; title=&quot;Most of OSTs remounted read-only due to abort transaction in __ldiskfs_handle_dirty_metadata&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9740&quot;&gt;&lt;del&gt;LU-9740&lt;/del&gt;&lt;/a&gt; ldiskfs: more credits for non-append write&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 138c9a3bae52a2d6abeb5af07fc2076bcd9526b1&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                                        </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzg5j:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>