<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:26:28 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-9469] conf-sanity test_61: test failed to respond and timed out</title>
                <link>https://jira.whamcloud.com/browse/LU-9469</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/b7669e10-d1f4-42ce-a573-31dd57859317&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/b7669e10-d1f4-42ce-a573-31dd57859317&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;From MDS console:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;BUG: unable to handle kernel NULL pointer dereference at           (null)
Oops: 0000 [#1] SMP
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Followed by a trace that is very similar to the one seen in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6821&quot; title=&quot;conf-sanity test_62: tune2fs failed &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6821&quot;&gt;LU-6821&lt;/a&gt;:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[&amp;lt;ffffffffa0764214&amp;gt;] ldiskfs_map_blocks+0x554/0x600 [ldiskfs]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa074aa81&amp;gt;] ldiskfs_xattr_set_entry+0x4a1/0x790 [ldiskfs]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa074b970&amp;gt;] ldiskfs_xattr_ibody_set+0x30/0x80 [ldiskfs]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa074bd55&amp;gt;] ldiskfs_xattr_set_handle+0x1c5/0x530 [ldiskfs]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa074c1fd&amp;gt;] ldiskfs_xattr_set+0x13d/0x1a0 [ldiskfs]
21:19:41:[23773.866330]  [&amp;lt;ffffffff8121e4bb&amp;gt;] generic_setxattr+0x5b/0x70
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0e6bfd4&amp;gt;] osd_xattr_set+0x294/0xaf0 [osd_ldiskfs]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa109bf2f&amp;gt;] lod_sub_object_xattr_set+0x1df/0x430 [lod]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa108372f&amp;gt;] lod_xattr_set_internal+0xaf/0x2a0 [lod]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa1091574&amp;gt;] lod_xattr_set+0xe4/0x530 [lod]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa110277d&amp;gt;] mdd_xattr_set+0x6cd/0xe50 [mdd]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0fbac30&amp;gt;] mdt_reint_setxattr+0x410/0xf60 [mdt]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0fb967e&amp;gt;] mdt_reint_rec+0x6e/0x1e0 [mdt]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0f9d087&amp;gt;] mdt_reint_internal+0x557/0x950 [mdt]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0fa8051&amp;gt;] mdt_reint+0x61/0x120 [mdt]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0bd1cb8&amp;gt;] tgt_request_handle+0x8b8/0x1290 [ptlrpc]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0b7e573&amp;gt;] ptlrpc_server_handle_request+0x203/0xa00 [ptlrpc]
21:19:41:[23773.866330]  [&amp;lt;ffffffffa0b82398&amp;gt;] ptlrpc_main+0xa38/0x1d80 [ptlrpc]
21:19:41:[23773.866330]  [&amp;lt;ffffffff810999dd&amp;gt;] kthread+0xbd/0xe0
21:19:41:[23773.866330]  [&amp;lt;ffffffff815e6f3f&amp;gt;] ret_from_fork+0x3f/0x70
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>onyx-37, full&lt;br/&gt;
&amp;nbsp;&amp;nbsp;servers: SLES12.2, master branch, v2.9.56.11, b3565, ldiskfs&lt;br/&gt;
&amp;nbsp;&amp;nbsp;clients: SLES12.2, master branch, v2.9.56.11, b3565&lt;br/&gt;
</environment>
        <key id="45935">LU-9469</key>
            <summary>conf-sanity test_61: test failed to respond and timed out</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bogl">Bob Glossman</assignee>
                                    <reporter username="jcasper">James Casper</reporter>
                        <labels>
                    </labels>
                <created>Mon, 8 May 2017 22:30:52 +0000</created>
                <updated>Tue, 24 Oct 2017 21:41:18 +0000</updated>
                            <resolved>Mon, 16 Oct 2017 13:48:48 +0000</resolved>
                                    <version>Lustre 2.10.0</version>
                                    <fixVersion>Lustre 2.11.0</fixVersion>
                    <fixVersion>Lustre 2.10.2</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="195387" author="pjones" created="Wed, 10 May 2017 23:47:11 +0000"  >&lt;p&gt;Bob&lt;/p&gt;

&lt;p&gt;Can you please look into this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="195543" author="adilger" created="Thu, 11 May 2017 18:14:46 +0000"  >&lt;p&gt;It is very likely that this is related to patch &lt;a href=&quot;https://review.whamcloud.com/25595&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/25595&lt;/a&gt; &quot;&lt;tt&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9146&quot; title=&quot;Backport patches from upstream to resolve deadlock in xattr&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9146&quot;&gt;&lt;del&gt;LU-9146&lt;/del&gt;&lt;/a&gt; ldiskfs: backport a few patches to resolve deadlock&lt;/tt&gt;&quot;, which was landed for RHEL7, but not SLES12.&lt;/p&gt;

&lt;p&gt;At the same time, it makes sense to port patch &lt;a href=&quot;https://review.whamcloud.com/27045&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27045&lt;/a&gt;&lt;br/&gt;
&quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9384&quot; title=&quot;conf-sanity test 32b fails with &amp;#39;list verification failed &amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9384&quot;&gt;&lt;del&gt;LU-9384&lt;/del&gt;&lt;/a&gt; ldiskfs: port upstream patches for project quota&quot; to SLES 12 as well.&lt;/p&gt;</comment>
                            <comment id="195547" author="bogl" created="Thu, 11 May 2017 18:32:34 +0000"  >&lt;p&gt;I have asked Yang Sheng to take a look.  He was the author of &lt;a href=&quot;https://review.whamcloud.com/25595&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/25595&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="195603" author="ys" created="Fri, 12 May 2017 03:23:23 +0000"  >&lt;p&gt;From MDS log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;21:19:41:[23773.807317] Lustre: DEBUG MARKER: /usr/sbin/lctl mark save large xattr trusted.big on \/mnt\/lustre\/f61.conf-sanity
21:19:41:[23773.846525] Lustre: DEBUG MARKER: save large xattr trusted.big on /mnt/lustre/f61.conf-sanity
21:19:41:[23773.862509] BUG: unable to handle kernel NULL pointer dereference at           (null)
21:19:41:[23773.864655] IP: [&amp;lt;ffffffffa0288a10&amp;gt;] jbd2_journal_file_inode+0x30/0xf0 [jbd2]
21:19:41:[23773.866330] PGD 0 
21:19:41:[23773.866330] Oops: 0000 [#1] SMP 
21:19:41:[23773.866330] Modules linked in: osp(OEN) mdd(OEN) lod(OEN) mdt(OEN) lfsck(OEN) mgs(OEN) mgc(OEN) osd_ldiskfs(OEN) lquota(OEN) fid(OEN) fld(OEN) ksocklnd(OEN) ptlrpc(OEN) obdclass(OEN) lnet(OEN) ldiskfs(OEN) libcfs(OEN) loop(E) rpcsec_gss_krb5(E) auth_rpcgss(E) nfsv4(E) dns_resolver(E) nfs(E) lockd(E) grace(E) sunrpc(E) fscache(E) af_packet(E) iscsi_boot_sysfs(E) ib_ipoib(E) rdma_ucm(E) ib_ucm(E) ib_uverbs(E) ib_umad(E) rdma_cm(E) 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;It is a jinode issue. We lost control since some codes have changed. For &lt;a href=&quot;https://review.whamcloud.com/25595&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/25595&lt;/a&gt;, looks like sles12sp2 already has included it. For &lt;a href=&quot;https://review.whamcloud.com/27045&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/27045&lt;/a&gt;, It can be ported if we will port project quota to sles12sp2.&lt;/p&gt;

&lt;p&gt;Thanks,&lt;br/&gt;
YangSheng&lt;/p&gt;</comment>
                            <comment id="195912" author="pjones" created="Mon, 15 May 2017 22:50:48 +0000"  >&lt;p&gt;Yes. Let&apos;s bring the SLES12 SP2 patches up to date with the RHEL7.3 patches&lt;/p&gt;</comment>
                            <comment id="206174" author="pjones" created="Wed, 23 Aug 2017 17:49:22 +0000"  >&lt;p&gt;Any progress on this?&lt;/p&gt;</comment>
                            <comment id="206185" author="bogl" created="Wed, 23 Aug 2017 18:37:49 +0000"  >&lt;p&gt;attempting a patch suggested by Yang Sheng that addressed a similar panic seen on el7.4&lt;/p&gt;</comment>
                            <comment id="206188" author="ys" created="Wed, 23 Aug 2017 18:40:53 +0000"  >&lt;p&gt;It looks same as &lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/3157a7c8-7814-11e7-a12c-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/3157a7c8-7814-11e7-a12c-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;03:59:12:[20259.344620] BUG: unable to handle kernel NULL pointer dereference at           (null)
03:59:12:[20259.345439] IP: [&amp;lt;ffffffffc0195c84&amp;gt;] jbd2_journal_file_inode+0x34/0xf0 [jbd2]
03:59:12:[20259.345439] PGD 0 
03:59:12:[20259.345439] Oops: 0000 [#1] SMP 
03:59:12:[20259.345439] Modules linked in: osp(OE) mdd(OE) lod(OE) mdt(OE) lfsck(OE) mgs(OE) mgc(OE) osd_ldiskfs(OE) lquota(OE) fid(OE) fld(OE) ksocklnd(OE) ptlrpc(OE) obdclass(OE) lnet(OE) ldiskfs(OE) libcfs(OE) loop dm_mod rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache rpcrdma ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod crc_t10dif crct10dif_generic ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_core iosf_mbi crc32_pclmul ghash_clmulni_intel ppdev aesni_intel lrw gf128mul glue_helper ablk_helper cryptd pcspkr joydev virtio_balloon i2c_piix4 parport_pc parport nfsd nfs_acl lockd auth_rpcgss grace sunrpc ip_tables ext4 mbcache jbd2 ata_generic pata_acpi cirrus drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm virtio_blk drm ata_piix 8139too libata crct10dif_pclmul crct10dif_common crc32c_intel serio_raw 8139cp virtio_pci virtio_ring mii virtio i2c_core floppy [last unloaded: libcfs]
03:59:12:[20259.345439] CPU: 1 PID: 31105 Comm: mdt00_002 Tainted: G        W  OE  ------------   3.10.0-693.el7_lustre.x86_64 #1
03:59:12:[20259.345439] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
03:59:12:[20259.345439] task: ffff880064ab2f70 ti: ffff880064b58000 task.ti: ffff880064b58000
03:59:12:[20259.345439] RIP: 0010:[&amp;lt;ffffffffc0195c84&amp;gt;]  [&amp;lt;ffffffffc0195c84&amp;gt;] jbd2_journal_file_inode+0x34/0xf0 [jbd2]
03:59:12:[20259.345439] RSP: 0018:ffff880064b5b6b0  EFLAGS: 00010246
03:59:12:[20259.345439] RAX: 0000000000000800 RBX: ffff880054ba9800 RCX: 0000080000200000
03:59:12:[20259.345439] RDX: 000000008002c823 RSI: 0000000000000000 RDI: ffff88007bd3d2a0
03:59:12:[20259.345439] RBP: ffff880064b5b6d8 R08: 000000000000c350 R09: ffffffffc07b75c3
03:59:12:[20259.345439] R10: 0000000000006b03 R11: 0000000000000000 R12: 0000000000000000
03:59:12:[20259.345439] R13: 0000000000000001 R14: 0000000000000000 R15: ffff8800796f2000
03:59:12:[20259.345439] FS:  0000000000000000(0000) GS:ffff88007fd00000(0000) knlGS:0000000000000000
03:59:12:[20259.345439] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
03:59:12:[20259.345439] CR2: 0000000000000000 CR3: 0000000079809000 CR4: 00000000000406e0
03:59:12:[20259.345439] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
03:59:12:[20259.345439] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
03:59:12:[20259.345439] Stack:
03:59:12:[20259.345439]  ffff880000c28958 0000000000000000 0000000000000001 ffff880064b5b7c8
03:59:12:[20259.345439]  0000000000000001 ffff880064b5b760 ffffffffc07c3b4c 0000000000000000
03:59:12:[20259.345439]  0000000000c28958 000000007bd3d2a0 ffff88007bd3d2a0 0000000000000003
03:59:12:[20259.345439] Call Trace:
03:59:12:[20259.345439]  [&amp;lt;ffffffffc07c3b4c&amp;gt;] ldiskfs_map_blocks+0x64c/0x6f0 [ldiskfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc077824c&amp;gt;] ldiskfs_xattr_set_entry+0x2cc/0x7c0 [ldiskfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc07791c5&amp;gt;] ldiskfs_xattr_ibody_set+0x35/0x80 [ldiskfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc07795a4&amp;gt;] ldiskfs_xattr_set_handle+0x1a4/0x500 [ldiskfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0779a46&amp;gt;] ldiskfs_xattr_set+0x146/0x1c0 [ldiskfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc07b89cd&amp;gt;] ldiskfs_xattr_trusted_set+0x2d/0x30 [ldiskfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffff812273d2&amp;gt;] generic_setxattr+0x62/0x80
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0d2f43c&amp;gt;] osd_xattr_set+0x2cc/0xbf0 [osd_ldiskfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc06933e4&amp;gt;] ? libcfs_log_return+0x24/0x30 [libcfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0faec00&amp;gt;] lod_sub_xattr_set+0x1f0/0x480 [lod]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0f94ce9&amp;gt;] lod_xattr_set_internal+0xb9/0x2d0 [lod]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0fa3ce1&amp;gt;] lod_xattr_set+0xf1/0x560 [lod]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc1010b05&amp;gt;] mdd_xattr_set+0x705/0xe80 [mdd]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0ec2607&amp;gt;] ? mdt_version_save+0x67/0x120 [mdt]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0ecdde0&amp;gt;] mdt_reint_setxattr+0x410/0xf80 [mdt]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0696bc7&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0ecc790&amp;gt;] mdt_reint_rec+0x80/0x210 [mdt]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0eae31b&amp;gt;] mdt_reint_internal+0x5fb/0x9c0 [mdt]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0eb9da7&amp;gt;] mdt_reint+0x67/0x140 [mdt]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0b5d195&amp;gt;] tgt_request_handle+0x925/0x1370 [ptlrpc]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0b06036&amp;gt;] ptlrpc_server_handle_request+0x236/0xa90 [ptlrpc]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0696bc7&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0b097d2&amp;gt;] ptlrpc_main+0xa92/0x1e40 [ptlrpc]
03:59:12:[20259.345439]  [&amp;lt;ffffffff81029557&amp;gt;] ? __switch_to+0xd7/0x510
03:59:12:[20259.345439]  [&amp;lt;ffffffff816a8f00&amp;gt;] ? __schedule+0x310/0x8b0
03:59:12:[20259.345439]  [&amp;lt;ffffffffc0b08d40&amp;gt;] ? ptlrpc_register_service+0xe80/0xe80 [ptlrpc]
03:59:12:[20259.345439]  [&amp;lt;ffffffff810b098f&amp;gt;] kthread+0xcf/0xe0
03:59:12:[20259.345439]  [&amp;lt;ffffffff810b08c0&amp;gt;] ? insert_kthread_work+0x40/0x40
03:59:12:[20259.345439]  [&amp;lt;ffffffff816b4f18&amp;gt;] ret_from_fork+0x58/0x90
03:59:12:[20259.345439]  [&amp;lt;ffffffff810b08c0&amp;gt;] ? insert_kthread_work+0x40/0x40
03:59:12:[20259.345439] Code: 41 57 41 56 41 55 41 54 53 f6 47 1c 08 48 8b 1f 0f 85 91 00 00 00 48 85 db 0f 84 88 00 00 00 4c 8b 3b 45 8b 37 41 83 e6 02 75 7c &amp;lt;48&amp;gt; 39 1e 49 89 f4 74 7a 48 39 5e 08 74 74 4d 8d af a0 03 00 00 
03:59:12:[20259.345439] RIP  [&amp;lt;ffffffffc0195c84&amp;gt;] jbd2_journal_file_inode+0x34/0xf0 [jbd2]
03:59:12:[20259.345439]  RSP &amp;lt;ffff880064b5b6b0&amp;gt;
03:59:12:[20259.345439] CR2: 0000000000000000
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I have proposed a fix for that. Bob will port it to sles12.&lt;/p&gt;

&lt;p&gt;Thanks,&lt;br/&gt;
YangSheng&lt;/p&gt;</comment>
                            <comment id="206193" author="gerrit" created="Wed, 23 Aug 2017 19:01:31 +0000"  >&lt;p&gt;Bob Glossman (bob.glossman@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/28665&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28665&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9469&quot; title=&quot;conf-sanity test_61: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9469&quot;&gt;&lt;del&gt;LU-9469&lt;/del&gt;&lt;/a&gt; ldiskfs: add additional attach_jinode call&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: f8c12ff7c6b57e16e741be7e4acaf381b5b293d3&lt;/p&gt;</comment>
                            <comment id="206201" author="gerrit" created="Wed, 23 Aug 2017 19:41:19 +0000"  >&lt;p&gt;Yang Sheng (yang.sheng@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/28666&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28666&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9469&quot; title=&quot;conf-sanity test_61: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9469&quot;&gt;&lt;del&gt;LU-9469&lt;/del&gt;&lt;/a&gt; ldiskfs: just test&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 354bcd64b3119666f84718aa9d56c47458f321c4&lt;/p&gt;</comment>
                            <comment id="206400" author="ys" created="Fri, 25 Aug 2017 10:18:31 +0000"  >&lt;p&gt;So looks like it is very easy to reproduce on sles12sp2. &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/cc1c2380-7d1d-4672-a3ba-0c0f4275f105&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/cc1c2380-7d1d-4672-a3ba-0c0f4275f105&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;We need run fullset tests while kernel update. Else may lost chance to catch issue timely.&lt;/p&gt;

&lt;p&gt;Thanks,&lt;br/&gt;
YangSheng&lt;/p&gt;</comment>
                            <comment id="206403" author="bogl" created="Fri, 25 Aug 2017 12:24:14 +0000"  >&lt;p&gt;YangSheng,&lt;br/&gt;
I see that your test patch reproduced the problem.&lt;br/&gt;
I also see the proposed patch &lt;a href=&quot;https://review.whamcloud.com/28665&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28665&lt;/a&gt; did not show it.  Tests on that patch are running into other problems though.  possibly &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9908&quot; title=&quot;conf-sanity test_41b: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9908&quot;&gt;&lt;del&gt;LU-9908&lt;/del&gt;&lt;/a&gt;, maybe other things too.&lt;/p&gt;

&lt;p&gt;You could help in a couple of ways:&lt;/p&gt;
&lt;ol&gt;
	&lt;li&gt;rebase your test on top of &lt;a href=&quot;https://review.whamcloud.com/28665&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28665&lt;/a&gt; and see if the problem still happens with repeated test of conf-sanity.&lt;/li&gt;
	&lt;li&gt;help with finding root cause(s) of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9908&quot; title=&quot;conf-sanity test_41b: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9908&quot;&gt;&lt;del&gt;LU-9908&lt;/del&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ol&gt;


&lt;p&gt;Thanks,&lt;br/&gt;
Bob&lt;/p&gt;</comment>
                            <comment id="206405" author="ys" created="Fri, 25 Aug 2017 12:40:55 +0000"  >&lt;p&gt;Hi, Bob,&lt;/p&gt;

&lt;p&gt;Do you have a way to run all of tests group for sles12 sp2 instead of conf-sanity only? I afraid testing of sles12 is not enough. Maybe more issues will be revealed while full tests is running. I think just change Test-parameter  of &lt;a href=&quot;https://review.whamcloud.com/28665&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28665&lt;/a&gt; is enough. My patch is just for test, it change nothing.&lt;/p&gt;

&lt;p&gt;Thanks,&lt;br/&gt;
YangSheng&lt;/p&gt;</comment>
                            <comment id="206406" author="bogl" created="Fri, 25 Aug 2017 12:52:43 +0000"  >&lt;p&gt;I know your patch is just for test.  want to just test with #29665 to see if original problem will occur with just conf-sanity if repeated enough times before trying anything else.&lt;/p&gt;

&lt;p&gt;The only way I can think of to run all normal test groups in #29665 is to have 5 separate Test-Parmaters lines, 1 for each of the 5 default test runs.   I can do that, but want to have more confidence about conf-sanity first.  Doing that seems likely to run into other, unrelated, pre-existing fails on sles12.&lt;/p&gt;</comment>
                            <comment id="208235" author="gerrit" created="Wed, 13 Sep 2017 13:41:42 +0000"  >&lt;p&gt;Bob Glossman (bob.glossman@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/28968&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28968&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9469&quot; title=&quot;conf-sanity test_61: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9469&quot;&gt;&lt;del&gt;LU-9469&lt;/del&gt;&lt;/a&gt; ldiskfs: add additional attach_jinode call&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 415773036850bdd5562caa7f0cf9ccc18c1e7212&lt;/p&gt;</comment>
                            <comment id="211149" author="gerrit" created="Mon, 16 Oct 2017 03:23:39 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/28665/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28665/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9469&quot; title=&quot;conf-sanity test_61: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9469&quot;&gt;&lt;del&gt;LU-9469&lt;/del&gt;&lt;/a&gt; ldiskfs: add additional attach_jinode call&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 84f690eee28f3b9123b89249c568dd6255a4a348&lt;/p&gt;</comment>
                            <comment id="211171" author="pjones" created="Mon, 16 Oct 2017 13:48:48 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                            <comment id="211869" author="gerrit" created="Tue, 24 Oct 2017 21:20:08 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/28968/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/28968/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9469&quot; title=&quot;conf-sanity test_61: test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9469&quot;&gt;&lt;del&gt;LU-9469&lt;/del&gt;&lt;/a&gt; ldiskfs: add additional attach_jinode call&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 6ece1f512e9e0ef8dfc8293738716808c70a5a42&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10120">
                    <name>Blocker</name>
                                                                <inwardlinks description="is blocked by">
                                        <issuelink>
            <issuekey id="47939">LU-9908</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="44046">LU-9146</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzc5b:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>