<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:00:12 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6435] ldiskfs bug in __ldiskfs_handle_dirty_metadata</title>
                <link>https://jira.whamcloud.com/browse/LU-6435</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;New bug seen only in autotest for el7.1 server code so far. Not seen in previous test runs on el7.0.  After the failure the lustre fs becomes read-only, leading to many more failures.&lt;/p&gt;

&lt;p&gt;Yang Sheng reports:&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;4/4/15, 8:46:51 PM&amp;#93;&lt;/span&gt; yang sheng: I also encountered such error in my test environment. I found it caused by journal space not eough to handle dirty data. So modify MDS_FS_MKFS_OPTS=&apos;-J size=xxx&apos; would works well.&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;4/4/15, 8:57:22 PM&amp;#93;&lt;/span&gt; yang sheng: I&apos;ll doing more investigate to reveal root cause. &lt;/p&gt;

&lt;p&gt;Seen in sanity test_102i: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b849c698-da5b-11e4-8289-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b849c698-da5b-11e4-8289-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;[ 2666.355166] -----------&lt;del&gt;[ cut here ]&lt;/del&gt;-----------&lt;br/&gt;
[ 2666.356994] WARNING: at /var/lib/jenkins/tmp/lustre_el7_topdir/BUILD/BUILD/lustre-2.7.51/ldiskfs/ext4_jbd2.c:260 __ldiskfs_handle_dirty_metadata+0x1c2/0x220 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;()&lt;br/&gt;
[ 2666.361168] Modules linked in: osp(OF) mdd(OF) lod(OF) mdt(OF) lfsck(OF) mgs(OF) mgc(OF) osd_ldiskfs(OF) lquota(OF) fid(OF) fld(OF) ksocklnd(OF) ptlrpc(OF) obdclass(OF) lnet(OF) sha512_generic libcfs(OF) ldiskfs(OF) dm_mod rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd fscache xprtrdma sunrpc ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ppdev ib_cm parport_pc iw_cm parport ib_sa ib_mad virtio_balloon pcspkr i2c_piix4 ib_core serio_raw ib_addr ext4 mbcache jbd2 ata_generic pata_acpi virtio_blk cirrus syscopyarea sysfillrect sysimgblt drm_kms_helper ttm 8139too virtio_pci virtio_ring virtio ata_piix floppy drm i2c_core libata 8139cp mii &lt;span class=&quot;error&quot;&gt;&amp;#91;last unloaded: llog_test&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;[ 2666.378809] CPU: 0 PID: 11066 Comm: mdt00_002 Tainted: GF       W  O--------------   3.10.0-229.1.2.el7_lustre.g5f2eb1d.x86_64 #1&lt;br/&gt;
[ 2666.382889] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007&lt;br/&gt;
[ 2666.384948]  0000000000000000 00000000e532c5a7 ffff880077c677f8 ffffffff81604d2a&lt;br/&gt;
[ 2666.387191]  ffff880077c67830 ffffffff8106e34b ffff880035b8bdd0 ffff88007cc414b0&lt;br/&gt;
[ 2666.389386]  0000000000000000 ffffffffa05ba540 00000000000013f2 ffff880077c67840&lt;br/&gt;
[ 2666.391661] Call Trace:&lt;br/&gt;
[ 2666.393487]  [] dump_stack+0x19/0x1b&lt;br/&gt;
[ 2666.395480]  [] warn_slowpath_common+0x6b/0xb0&lt;br/&gt;
[ 2666.397550]  [] warn_slowpath_null+0x1a/0x20&lt;br/&gt;
[ 2666.399640]  [] __ldiskfs_handle_dirty_metadata+0x1c2/0x220 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.401911]  [] ? ldiskfs_dirty_inode+0x54/0x60 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.404128]  [] ldiskfs_free_blocks+0x5e6/0xb90 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.406246]  [] ldiskfs_xattr_release_block+0x275/0x330 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.408443]  [] ldiskfs_xattr_delete_inode+0x2bb/0x300 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.410567]  [] ldiskfs_evict_inode+0x1b5/0x610 &lt;span class=&quot;error&quot;&gt;&amp;#91;ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.412594]  [] evict+0xa7/0x170&lt;br/&gt;
[ 2666.414443]  [] iput+0xf5/0x180&lt;br/&gt;
[ 2666.416275]  [] osd_object_delete+0x1d3/0x300 &lt;span class=&quot;error&quot;&gt;&amp;#91;osd_ldiskfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.418308]  [] lu_object_free.isra.30+0x9d/0x1a0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.420350]  [] lu_object_put+0xc2/0x320 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.422389]  [] mdt_reint_unlink+0x796/0x1150 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.424396]  [] mdt_reint_rec+0x80/0x210 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.426508]  [] mdt_reint_internal+0x58c/0x780 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.428542]  [] mdt_reint+0x67/0x140 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.430616]  [] tgt_request_handle+0x635/0xfd0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.432746]  [] ptlrpc_server_handle_request+0x21b/0xa90 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.434929]  [] ? ptlrpc_wait_event+0x98/0x330 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.436900]  [] ? libcfs_debug_msg+0x57/0x80 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.438920]  [] ptlrpc_main+0xaf8/0x1ea0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.440866]  [] ? __dequeue_entity+0x26/0x40&lt;br/&gt;
[ 2666.442788]  [] ? ptlrpc_register_service+0xf00/0xf00 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
[ 2666.444755]  [] kthread+0xcf/0xe0&lt;br/&gt;
[ 2666.446590]  [] ? kthread_create_on_node+0x140/0x140&lt;br/&gt;
[ 2666.448452]  [] ret_from_fork+0x7c/0xb0&lt;br/&gt;
[ 2666.450308]  [] ? kthread_create_on_node+0x140/0x140&lt;br/&gt;
[ 2666.452177] --&lt;del&gt;[ end trace 53ab1a0dad30f568 ]&lt;/del&gt;--&lt;br/&gt;
[ 2666.453923] LDISKFS-fs: ldiskfs_free_blocks:5106: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata&lt;br/&gt;
[ 2666.456082] LDISKFS: jbd2_journal_dirty_metadata failed: handle type 5 started at line 240, credits 3/0, errcode -28&lt;br/&gt;
[ 2666.456945] LDISKFS-fs error (device dm-0) in ldiskfs_free_blocks:5118: error 28&lt;br/&gt;
[ 2666.469566] Aborting journal on device dm-0-8.&lt;br/&gt;
[ 2666.516889] LDISKFS-fs (dm-0): Remounting filesystem read-only&lt;/p&gt;</description>
                <environment>el7.1</environment>
        <key id="29407">LU-6435</key>
            <summary>ldiskfs bug in __ldiskfs_handle_dirty_metadata</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="ys">Yang Sheng</assignee>
                                    <reporter username="bogl">Bob Glossman</reporter>
                        <labels>
                    </labels>
                <created>Mon, 6 Apr 2015 14:58:20 +0000</created>
                <updated>Thu, 18 Jun 2015 08:11:13 +0000</updated>
                            <resolved>Thu, 18 Jun 2015 08:11:13 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="111718" author="ys" created="Wed, 8 Apr 2015 11:36:55 +0000"  >&lt;p&gt;This is a journal credits insufficient problem. I have ever seen it on my local box. Will trying to reproduce it again.&lt;/p&gt;</comment>
                            <comment id="118918" author="sarah" created="Wed, 17 Jun 2015 23:39:01 +0000"  >&lt;p&gt;Is this one a dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6722&quot; title=&quot;sanity-lfsck test_1a: FAIL: (3) Fail to start LFSCK for namespace!&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6722&quot;&gt;&lt;del&gt;LU-6722&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</comment>
                            <comment id="118920" author="yong.fan" created="Thu, 18 Jun 2015 00:11:20 +0000"  >&lt;p&gt;I think so. It is another failure instance of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6722&quot; title=&quot;sanity-lfsck test_1a: FAIL: (3) Fail to start LFSCK for namespace!&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6722&quot;&gt;&lt;del&gt;LU-6722&lt;/del&gt;&lt;/a&gt; because of not enough credits during ldiskfs_evict_inode().&lt;/p&gt;</comment>
                            <comment id="118940" author="yong.fan" created="Thu, 18 Jun 2015 08:11:13 +0000"  >&lt;p&gt;It is the duplication of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6722&quot; title=&quot;sanity-lfsck test_1a: FAIL: (3) Fail to start LFSCK for namespace!&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6722&quot;&gt;&lt;del&gt;LU-6722&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="30655">LU-6722</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxa87:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10021"><![CDATA[2]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>