<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:02:43 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6726] ost-pools test 18 hung</title>
                <link>https://jira.whamcloud.com/browse/LU-6726</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Maloo report: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/e8527890-135d-11e5-b4b0-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/e8527890-135d-11e5-b4b0-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Console log on MDS (onyx-42vm3):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[47400.288764] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == ost-pools test 18: File create in a directory which references a deleted pool ===================== 01:16:28 \(1434356188\)
[47400.681119] Lustre: DEBUG MARKER: == ost-pools test 18: File create in a directory which references a deleted pool ===================== 01:16:28 (1434356188)
[47401.792963] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47402.021937] Lustre: DEBUG MARKER: /usr/sbin/lctl set_param -n osd*.*MD*.force_sync 1
[47402.355179] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47403.631867] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47404.910225] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47406.173498] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47407.433753] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47408.754109] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47409.944647] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47411.194761] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47412.424538] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47442.232084] Lustre: DEBUG MARKER: lctl pool_new lustre.testpool
[47452.524195] Lustre: DEBUG MARKER: lctl pool_add lustre.testpool lustre-OST[0000-0006/1]
[47461.462995] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osc.*MDT*.sync_*
[47481.660971] ------------[ cut here ]------------
[47481.662751] WARNING: at /var/lib/jenkins/workspace/lustre-master/arch/x86_64/build_type/server/distro/el7/ib_stack/inkernel/BUILD/BUILD/lustre-2.7.55/ldiskfs/ext4_jbd2.c:260 __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]()
[47481.665841] Modules linked in: loop osp(OF) mdd(OF) lod(OF) mdt(OF) lfsck(OF) mgs(OF) mgc(OF) osd_ldiskfs(OF) lquota(OF) fid(OF) fld(OF) ksocklnd(OF) ptlrpc(OF) obdclass(OF) lnet(OF) sha512_generic libcfs(OF) ldiskfs(OF) dm_mod rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd fscache xprtrdma sunrpc ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_sa ib_mad ib_core ppdev serio_raw pcspkr parport_pc parport virtio_balloon i2c_piix4 ib_addr ext4 mbcache jbd2 ata_generic pata_acpi 8139too virtio_blk cirrus syscopyarea sysfillrect sysimgblt drm_kms_helper ttm drm virtio_pci 8139cp virtio_ring mii virtio ata_piix libata i2c_core floppy
[47481.678942] CPU: 0 PID: 565 Comm: mdt00_002 Tainted: GF       W  O--------------   3.10.0-229.4.2.el7_lustre.x86_64 #1
[47481.680890] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
[47481.682532]  0000000000000000 00000000b7c54144 ffff88006963b7f8 ffffffff816050da
[47481.684337]  ffff88006963b830 ffffffff8106e34b ffff880057224680 ffff88007b994ae0
[47481.686152]  0000000000000000 ffffffffa05b49c0 00000000000013f7 ffff88006963b840
[47481.687984] Call Trace:
[47481.689419]  [&amp;lt;ffffffff816050da&amp;gt;] dump_stack+0x19/0x1b
[47481.691178]  [&amp;lt;ffffffff8106e34b&amp;gt;] warn_slowpath_common+0x6b/0xb0
[47481.692969]  [&amp;lt;ffffffff8106e49a&amp;gt;] warn_slowpath_null+0x1a/0x20
[47481.694722]  [&amp;lt;ffffffffa055f6b2&amp;gt;] __ldiskfs_handle_dirty_metadata+0x1c2/0x220 [ldiskfs]
[47481.696632]  [&amp;lt;ffffffffa0575fa4&amp;gt;] ? ldiskfs_dirty_inode+0x54/0x60 [ldiskfs]
[47481.698767]  [&amp;lt;ffffffffa0582676&amp;gt;] ldiskfs_free_blocks+0x5e6/0xb90 [ldiskfs]
[47481.700655]  [&amp;lt;ffffffff812e0ca9&amp;gt;] ? pointer.isra.17+0x1b9/0x490
[47481.702479]  [&amp;lt;ffffffffa0576f75&amp;gt;] ldiskfs_xattr_release_block+0x275/0x330 [ldiskfs]
[47481.704345]  [&amp;lt;ffffffffa057a1ab&amp;gt;] ldiskfs_xattr_delete_inode+0x2bb/0x300 [ldiskfs]
[47481.706278]  [&amp;lt;ffffffffa0574ad5&amp;gt;] ldiskfs_evict_inode+0x1b5/0x610 [ldiskfs]
[47481.708108]  [&amp;lt;ffffffff811e23d7&amp;gt;] evict+0xa7/0x170
[47481.709854]  [&amp;lt;ffffffff811e2c15&amp;gt;] iput+0xf5/0x180
[47481.711543]  [&amp;lt;ffffffffa0ba1e73&amp;gt;] osd_object_delete+0x1d3/0x300 [osd_ldiskfs]
[47481.713520]  [&amp;lt;ffffffffa07566ad&amp;gt;] lu_object_free.isra.30+0x9d/0x1a0 [obdclass]
[47481.715393]  [&amp;lt;ffffffffa0756872&amp;gt;] lu_object_put+0xc2/0x320 [obdclass]
[47481.717283]  [&amp;lt;ffffffffa0d63b56&amp;gt;] mdt_reint_unlink+0x796/0x11b0 [mdt]
[47481.719287]  [&amp;lt;ffffffffa09ac070&amp;gt;] ? lustre_msg_buf_v2+0x1b0/0x1b0 [ptlrpc]
[47481.721162]  [&amp;lt;ffffffffa077880e&amp;gt;] ? lu_ucred+0x1e/0x30 [obdclass]
[47481.722845]  [&amp;lt;ffffffffa0d671d0&amp;gt;] mdt_reint_rec+0x80/0x210 [mdt]
[47481.724579]  [&amp;lt;ffffffffa0d4ae6c&amp;gt;] mdt_reint_internal+0x58c/0x780 [mdt]
[47481.726287]  [&amp;lt;ffffffffa0d54167&amp;gt;] mdt_reint+0x67/0x140 [mdt]
[47481.728052]  [&amp;lt;ffffffffa0a097e5&amp;gt;] tgt_request_handle+0x6d5/0x1060 [ptlrpc]
[47481.729866]  [&amp;lt;ffffffffa09b972b&amp;gt;] ptlrpc_server_handle_request+0x21b/0xa90 [ptlrpc]
[47481.731783]  [&amp;lt;ffffffffa09b72a8&amp;gt;] ? ptlrpc_wait_event+0x98/0x340 [ptlrpc]
[47481.733583]  [&amp;lt;ffffffff810a9662&amp;gt;] ? default_wake_function+0x12/0x20
[47481.735408]  [&amp;lt;ffffffff810a0898&amp;gt;] ? __wake_up_common+0x58/0x90
[47481.737177]  [&amp;lt;ffffffffa09bd918&amp;gt;] ptlrpc_main+0xaf8/0x1ea0 [ptlrpc]
[47481.741378]  [&amp;lt;ffffffffa09bce20&amp;gt;] ? ptlrpc_register_service+0xf00/0xf00 [ptlrpc]
[47481.744096]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
[47481.745924]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[47481.747826]  [&amp;lt;ffffffff81614f7c&amp;gt;] ret_from_fork+0x7c/0xb0
[47481.749626]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[47481.751541] ---[ end trace d2044b06032067c6 ]---
[47481.753203] LDISKFS-fs: ldiskfs_free_blocks:5111: aborting transaction: error 28 in __ldiskfs_handle_dirty_metadata
[47481.755276] LDISKFS: jbd2_journal_dirty_metadata failed: handle type 5 started at line 240, credits 3/0, errcode -28
[47481.756083] LDISKFS-fs error (device dm-0) in ldiskfs_free_blocks:5123: error 28
[47481.767617] Aborting journal on device dm-0-8.
[47481.774915] LDISKFS-fs (dm-0): Remounting filesystem read-only
[47481.777496] LDISKFS-fs warning (device dm-0): ldiskfs_evict_inode:274: couldn&apos;t extend journal (err -30)
[47481.779967] LDISKFS-fs error (device dm-0) in ldiskfs_evict_inode:303: error 28
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;More console logs are in the attached file.&lt;/p&gt;</description>
                <environment>Lustre Build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-master/3071&quot;&gt;https://build.hpdd.intel.com/job/lustre-master/3071&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL7.1/x86_64</environment>
        <key id="30677">LU-6726</key>
            <summary>ost-pools test 18 hung</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Mon, 15 Jun 2015 19:38:19 +0000</created>
                <updated>Thu, 12 May 2016 18:10:10 +0000</updated>
                            <resolved>Wed, 17 Jun 2015 03:46:31 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="118750" author="yong.fan" created="Wed, 17 Jun 2015 03:46:31 +0000"  >&lt;p&gt;It is another failure instance of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6722&quot; title=&quot;sanity-lfsck test_1a: FAIL: (3) Fail to start LFSCK for namespace!&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6722&quot;&gt;&lt;del&gt;LU-6722&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="30655">LU-6722</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="18179" name="onyx-42vm3.log.txt" size="16936" author="yujian" created="Mon, 15 Jun 2015 19:38:19 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxft3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>