<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:04:46 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6958] conf-sanity conf-sanity: (osd_internal.h:1088:osd_trans_exec_check()) LBUG</title>
                <link>https://jira.whamcloud.com/browse/LU-6958</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d747913c-36eb-11e5-8aa3-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d747913c-36eb-11e5-8aa3-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test conf-sanity failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;test failed to respond and timed out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDS console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;============----- Wed Jul 29 14:28:52 PDT 2015
21:29:48:[ 6939.261454] Lustre: DEBUG MARKER: -----============= acceptance-small: conf-sanity ============----- Wed Jul 29 14:28:52 PDT 2015
21:29:48:[ 6939.725706] Lustre: DEBUG MARKER: /usr/sbin/lctl mark excepting tests: 32newtarball
21:29:48:[ 6939.843346] Lustre: DEBUG MARKER: excepting tests: 32newtarball
21:29:48:[ 6941.017504] Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
21:29:48:[ 6941.263857] Lustre: DEBUG MARKER: umount -d -f /mnt/mds1
21:29:48:[ 6944.448137] LustreError: 2957:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff88007853aa00 x1508060907555292/t0(0) o13-&amp;gt;lustre-OST0004-osc-MDT0000@10.2.4.186@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
21:29:48:[ 6944.454124] LustreError: 2957:0:(client.c:1144:ptlrpc_import_delay_req()) Skipped 4 previous similar messages
21:29:48:[ 6945.325038] Lustre: lustre-MDT0000: Not available for connect from 10.2.4.186@tcp (stopping)
21:29:48:[ 6945.325039] Lustre: lustre-MDT0000: Not available for connect from 10.2.4.186@tcp (stopping)
21:29:48:[ 6945.334071] Lustre: Skipped 5 previous similar messages
21:29:48:[ 6953.424097] Lustre: 15445:0:(client.c:2020:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1438205342/real 1438205342]  req@ffff88006bb46400 x1508060907555340/t0(0) o251-&amp;gt;MGC10.2.4.185@tcp@0@lo:26/25 lens 224/224 e 0 to 1 dl 1438205348 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
21:29:48:[ 6953.940790] Lustre: server umount lustre-MDT0000 complete
21:29:48:[ 6954.052294] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
21:29:48:[ 6967.843149] Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
21:29:48:[ 6968.122930] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
21:29:48:[ 6968.433002] Lustre: DEBUG MARKER: mkfs.lustre --mgs --fsname=lustre --mdt --index=0 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=ldiskfs --device-size=200000 --mkfsoptions=&quot;-E lazy_itable_i
21:29:48:[ 6969.075173] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro
21:29:48:[ 6985.644418] Lustre: DEBUG MARKER: mkdir -p /mnt/mds1
21:29:48:[ 6985.873774] Lustre: DEBUG MARKER: test -b /dev/lvm-Role_MDS/P1
21:29:48:[ 6986.104783] Lustre: DEBUG MARKER: mkdir -p /mnt/mds1; mount -t lustre   		                   /dev/lvm-Role_MDS/P1 /mnt/mds1
21:29:48:[ 6986.315220] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro
21:29:48:[ 6986.561765] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro,no_mbcache
21:29:48:[ 6986.607695] Lustre: Setting parameter lustre-MDT0000-mdtlov.lov.stripesize in log lustre-MDT0000
21:29:48:[ 6986.775915] Lustre: 15982:0:(osd_internal.h:1085:osd_trans_exec_check()) op 3: used 8, used now 8, reserved 4
21:29:48:[ 6986.781997] Lustre: 15982:0:(osd_internal.h:1085:osd_trans_exec_check()) Skipped 2 previous similar messages
21:29:48:[ 6986.784728] Lustre: 15982:0:(osd_handler.c:902:osd_trans_dump_creds())   create: 1/4/8, destroy: 0/0/0
21:29:48:[ 6986.786932] Lustre: 15982:0:(osd_handler.c:902:osd_trans_dump_creds()) Skipped 7 previous similar messages
21:29:48:[ 6986.789329] Lustre: 15982:0:(osd_handler.c:909:osd_trans_dump_creds())   attr_set: 0/0/0, xattr_set: 0/0/0
21:29:48:[ 6986.791595] Lustre: 15982:0:(osd_handler.c:909:osd_trans_dump_creds()) Skipped 7 previous similar messages
21:29:48:[ 6986.793997] Lustre: 15982:0:(osd_handler.c:919:osd_trans_dump_creds())   write: 0/0/0, punch: 0/0/0, quota 2/2/0
21:29:48:[ 6986.796392] Lustre: 15982:0:(osd_handler.c:919:osd_trans_dump_creds()) Skipped 7 previous similar messages
21:29:48:[ 6986.798858] Lustre: 15982:0:(osd_handler.c:926:osd_trans_dump_creds())   insert: 1/17/0, delete: 0/0/0
21:29:48:[ 6986.801203] Lustre: 15982:0:(osd_handler.c:926:osd_trans_dump_creds()) Skipped 7 previous similar messages
21:29:48:[ 6986.803701] Lustre: 15982:0:(osd_handler.c:933:osd_trans_dump_creds())   ref_add: 0/0/0, ref_del: 0/0/0
21:29:48:[ 6986.805925] Lustre: 15982:0:(osd_handler.c:933:osd_trans_dump_creds()) Skipped 7 previous similar messages
21:29:48:[ 6986.808305] LustreError: 15982:0:(osd_internal.h:1088:osd_trans_exec_check()) LBUG
21:29:48:[ 6986.810317] Pid: 15982, comm: llog_process_th
21:29:48:[ 6986.812333] 
21:29:48:[ 6986.812333] Call Trace:
21:29:48:[ 6986.815553]  [&amp;lt;ffffffffa06147d3&amp;gt;] libcfs_debug_dumpstack+0x53/0x80 [libcfs]
21:29:48:[ 6986.817690]  [&amp;lt;ffffffffa0614d75&amp;gt;] lbug_with_loc+0x45/0xc0 [libcfs]
21:29:48:[ 6986.819616]  [&amp;lt;ffffffffa0bf481e&amp;gt;] osd_it_ea_rec.part.94+0x0/0x36 [osd_ldiskfs]
21:29:48:[ 6986.821735]  [&amp;lt;ffffffffa0bcd8c7&amp;gt;] osd_object_ea_create+0x927/0xb80 [osd_ldiskfs]
21:29:48:[ 6986.823728]  [&amp;lt;ffffffffa075afa8&amp;gt;] dt_find_or_create+0x598/0x8d0 [obdclass]
21:29:48:[ 6986.825719]  [&amp;lt;ffffffffa05f4d40&amp;gt;] fld_index_init+0x1a0/0xbb0 [fld]
21:29:48:[ 6986.827708]  [&amp;lt;ffffffffa05f2485&amp;gt;] fld_server_init+0xa5/0x3e0 [fld]
21:29:48:[ 6986.829555]  [&amp;lt;ffffffffa0d8dcf4&amp;gt;] mdt_init0+0x4f4/0x12b0 [mdt]
21:29:48:[ 6986.831523]  [&amp;lt;ffffffffa0d8eb29&amp;gt;] mdt_device_alloc+0x79/0x110 [mdt]
21:29:48:[ 6986.833346]  [&amp;lt;ffffffffa073ba84&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
21:29:48:[ 6986.835297]  [&amp;lt;ffffffffa073cb77&amp;gt;] class_setup+0x2f7/0x8d0 [obdclass]
21:29:48:[ 6986.837065]  [&amp;lt;ffffffffa0744c96&amp;gt;] class_process_config+0x1c36/0x2db0 [obdclass]
21:29:48:[ 6986.839037]  [&amp;lt;ffffffff812de5d9&amp;gt;] ? simple_strtoul+0x9/0x10
21:29:48:[ 6986.840735]  [&amp;lt;ffffffffa0748fb0&amp;gt;] ? target_name2index+0x90/0xc0 [obdclass]
21:29:48:[ 6986.842695]  [&amp;lt;ffffffff811acbe3&amp;gt;] ? __kmalloc+0x1f3/0x230
21:29:48:[ 6986.844363]  [&amp;lt;ffffffffa073e6bb&amp;gt;] ? lustre_cfg_new+0x8b/0x400 [obdclass]
21:29:48:[ 6986.846252]  [&amp;lt;ffffffffa07466bd&amp;gt;] class_config_llog_handler+0x8ad/0x1d20 [obdclass]
21:29:48:[ 6986.848028]  [&amp;lt;ffffffffa0707345&amp;gt;] llog_process_thread+0x5f5/0x1020 [obdclass]
21:29:48:[ 6986.849909]  [&amp;lt;ffffffffa07087c0&amp;gt;] ? llog_process_thread_daemonize+0x0/0x80 [obdclass]
21:29:48:[ 6986.851690]  [&amp;lt;ffffffffa070880c&amp;gt;] llog_process_thread_daemonize+0x4c/0x80 [obdclass]
21:29:48:[ 6986.853606]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
21:29:48:[ 6986.855145]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
21:29:48:[ 6986.856831]  [&amp;lt;ffffffff81615018&amp;gt;] ret_from_fork+0x58/0x90
21:29:48:[ 6986.858357]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
21:29:48:[ 6986.859975] 
21:29:48:[ 6986.861579] Kernel panic - not syncing: LBUG
21:29:48:[ 6986.862575] CPU: 1 PID: 15982 Comm: llog_process_th Tainted: GF          O--------------   3.10.0-229.7.2.el7_lustre.g8bca546.x86_64 #1
21:29:48:[ 6986.862575] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
21:29:48:[ 6986.862575]  ffffffffa0631faf 000000004569312b ffff880067503870 ffffffff816051aa
21:29:48:[ 6986.862575]  ffff8800675038f0 ffffffff815fea1e ffffffff00000008 ffff880067503900
21:29:48:[ 6986.862575]  ffff8800675038a0 000000004569312b ffffffffa0bf67d0 0000000000000246
21:29:48:[ 6986.862575] Call Trace:
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff816051aa&amp;gt;] dump_stack+0x19/0x1b
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff815fea1e&amp;gt;] panic+0xd8/0x1e7
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0614ddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0bf481e&amp;gt;] osd_trans_exec_check.part.91+0x1a/0x1a [osd_ldiskfs]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0bcd8c7&amp;gt;] osd_object_ea_create+0x927/0xb80 [osd_ldiskfs]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa075afa8&amp;gt;] dt_find_or_create+0x598/0x8d0 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa05f4d40&amp;gt;] fld_index_init+0x1a0/0xbb0 [fld]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa05f2485&amp;gt;] fld_server_init+0xa5/0x3e0 [fld]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0d8dcf4&amp;gt;] mdt_init0+0x4f4/0x12b0 [mdt]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0d8eb29&amp;gt;] mdt_device_alloc+0x79/0x110 [mdt]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa073ba84&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa073cb77&amp;gt;] class_setup+0x2f7/0x8d0 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0744c96&amp;gt;] class_process_config+0x1c36/0x2db0 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff812de5d9&amp;gt;] ? simple_strtoul+0x9/0x10
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0748fb0&amp;gt;] ? target_name2index+0x90/0xc0 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff811acbe3&amp;gt;] ? __kmalloc+0x1f3/0x230
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa073e6bb&amp;gt;] ? lustre_cfg_new+0x8b/0x400 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa07466bd&amp;gt;] class_config_llog_handler+0x8ad/0x1d20 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa0707345&amp;gt;] llog_process_thread+0x5f5/0x1020 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa07087c0&amp;gt;] ? llog_backup+0x500/0x500 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffffa070880c&amp;gt;] llog_process_thread_daemonize+0x4c/0x80 [obdclass]
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff81615018&amp;gt;] ret_from_fork+0x58/0x90
21:29:48:[ 6986.862575]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
21:29:48:[ 6986.862575] drm_kms_helper: panic occurred, switching back to text console
21:29:48:[ 6986.862575] ------------[ cut here ]------------
21:29:48:[ 6986.862575] kernel BUG at arch/x86/mm/pageattr.c:216!
21:29:48:[ 6986.862575] invalid opcode: 0000 [#1] SMP 
21:29:48:[ 6986.862575] Modules linked in: osp(OF) mdd(OF) lod(OF) mdt(OF) lfsck(OF) mgs(OF) mgc(OF) osd_ldiskfs(OF) lquota(OF) fid(OF) fld(OF) ksocklnd(OF) ptlrpc(OF) obdclass(OF) lnet(OF) sha512_generic libcfs(OF) ldiskfs(OF) dm_mod rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd fscache xprtrdma sunrpc ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ppdev ib_sa parport_pc virtio_balloon pcspkr serio_raw parport i2c_piix4 ib_mad ib_core ib_addr ext4 mbcache jbd2 ata_generic pata_acpi cirrus syscopyarea sysfillrect virtio_blk sysimgblt 8139too drm_kms_helper ttm virtio_pci virtio_ring virtio 8139cp mii drm i2c_core ata_piix libata floppy
21:29:48:[ 6986.862575] CPU: 1 PID: 15982 Comm: llog_process_th Tainted: GF          O--------------   3.10.0-229.7.2.el7_lustre.g8bca546.x86_64 #1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>server: lustre-master build # 3120 RHEL7.1&lt;br/&gt;
client: SLES11 SP3</environment>
        <key id="31356">LU-6958</key>
            <summary>conf-sanity conf-sanity: (osd_internal.h:1088:osd_trans_exec_check()) LBUG</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Wed, 5 Aug 2015 03:01:00 +0000</created>
                <updated>Thu, 6 Aug 2015 04:53:35 +0000</updated>
                            <resolved>Thu, 6 Aug 2015 04:53:35 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="123284" author="sarah" created="Wed, 5 Aug 2015 03:21:33 +0000"  >&lt;p&gt;it also make sanity-lfsck failed&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d3fdfbfe-3b1b-11e5-bdff-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d3fdfbfe-3b1b-11e5-bdff-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;07:21:23:[ 3378.397120] Lustre: DEBUG MARKER: -----============= acceptance-small: sanity-lfsck ============----- Tue Aug 4 00:21:01 PDT 2015
07:21:23:[ 3378.912074] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == sanity-lfsck test complete, duration -o sec == 00:21:02 \(1438672862\)
07:21:23:[ 3379.044652] Lustre: DEBUG MARKER: == sanity-lfsck test complete, duration -o sec == 00:21:02 (1438672862)
07:21:23:[ 3381.002348] Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
07:21:23:[ 3381.243257] Lustre: DEBUG MARKER: umount -d -f /mnt/mds1
07:21:23:[ 3382.032208] LustreError: 3025:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880066782d00 x1508554900711260/t0(0) o13-&amp;gt;lustre-OST0006-osc-MDT0000@10.2.4.204@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
07:21:23:[ 3383.424333] LustreError: 3026:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880064b48c00 x1508554900711264/t0(0) o13-&amp;gt;lustre-OST0005-osc-MDT0000@10.2.4.204@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
07:21:23:[ 3384.576293] LustreError: 3025:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880064b48f00 x1508554900711272/t0(0) o13-&amp;gt;lustre-OST0002-osc-MDT0000@10.2.4.204@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
07:21:23:[ 3384.583993] LustreError: 3025:0:(client.c:1144:ptlrpc_import_delay_req()) Skipped 1 previous similar message
07:21:23:[ 3385.342311] Lustre: lustre-MDT0000: Not available for connect from 10.2.4.204@tcp (stopping)
07:21:23:[ 3385.348123] Lustre: Skipped 6 previous similar messages
07:21:34:[ 3393.399148] Lustre: 21243:0:(client.c:2020:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1438672871/real 1438672871]  req@ffff880066782d00 x1508554900711308/t0(0) o251-&amp;gt;MGC10.2.4.203@tcp@0@lo:26/25 lens 224/224 e 0 to 1 dl 1438672877 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
07:21:34:[ 3393.649300] Lustre: server umount lustre-MDT0000 complete
07:21:35:[ 3393.772660] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
07:21:35:[ 3399.028308] Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
07:21:35:[ 3399.250588] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
07:21:35:[ 3399.540714] Lustre: DEBUG MARKER: mkfs.lustre --mgs --fsname=lustre --mdt --index=0 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=ldiskfs --device-size=100000 --reformat /dev/lvm-Role_MDS/P1
07:21:35:[ 3399.843372] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro
07:21:35:[ 3407.552264] Lustre: DEBUG MARKER: running=$(grep -c /mnt/mds1&apos; &apos; /proc/mounts);
07:21:35:[ 3407.552264] mpts=$(mount | grep -c /mnt/mds1&apos; &apos;);
07:21:35:[ 3407.552264] if [ $running -ne $mpts ]; then
07:21:35:[ 3407.552264]     echo $(hostname) env are INSANE!;
07:21:35:[ 3407.552264]     exit 1;
07:21:35:[ 3407.552264] fi
07:21:35:[ 3407.842656] Lustre: DEBUG MARKER: running=$(grep -c /mnt/mds1&apos; &apos; /proc/mounts);
07:21:35:[ 3407.842656] mpts=$(mount | grep -c /mnt/mds1&apos; &apos;);
07:21:35:[ 3407.842656] if [ $running -ne $mpts ]; then
07:21:35:[ 3407.842656]     echo $(hostname) env are INSANE!;
07:21:35:[ 3407.842656]     exit 1;
07:21:35:[ 3407.842656] fi
07:21:35:[ 3408.804243] Lustre: DEBUG MARKER: mkdir -p /mnt/mds1
07:21:35:[ 3409.023783] Lustre: DEBUG MARKER: test -b /dev/lvm-Role_MDS/P1
07:21:35:[ 3409.250145] Lustre: DEBUG MARKER: mkdir -p /mnt/mds1; mount -t lustre   		                   /dev/lvm-Role_MDS/P1 /mnt/mds1
07:21:35:[ 3409.447160] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro
07:21:35:[ 3409.554144] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro,no_mbcache
07:21:35:[ 3409.593824] Lustre: Setting parameter lustre-MDT0000-mdtlov.lov.stripesize in log lustre-MDT0000
07:21:35:[ 3409.600230] Lustre: Skipped 4 previous similar messages
07:21:35:[ 3409.681883] Lustre: 21900:0:(osd_internal.h:1085:osd_trans_exec_check()) op 3: used 8, used now 8, reserved 4
07:21:35:[ 3409.687942] Lustre: 21900:0:(osd_handler.c:902:osd_trans_dump_creds())   create: 1/4/8, destroy: 0/0/0
07:21:35:[ 3409.691974] Lustre: 21900:0:(osd_handler.c:909:osd_trans_dump_creds())   attr_set: 0/0/0, xattr_set: 0/0/0
07:21:35:[ 3409.694351] Lustre: 21900:0:(osd_handler.c:919:osd_trans_dump_creds())   write: 0/0/0, punch: 0/0/0, quota 2/2/0
07:21:35:[ 3409.696715] Lustre: 21900:0:(osd_handler.c:926:osd_trans_dump_creds())   insert: 1/17/0, delete: 0/0/0
07:21:35:[ 3409.698964] Lustre: 21900:0:(osd_handler.c:933:osd_trans_dump_creds())   ref_add: 0/0/0, ref_del: 0/0/0
07:21:35:[ 3409.701213] LustreError: 21900:0:(osd_internal.h:1088:osd_trans_exec_check()) LBUG
07:21:35:[ 3409.703301] Pid: 21900, comm: llog_process_th
07:21:35:[ 3409.705196] 
07:21:35:[ 3409.705196] Call Trace:
07:21:35:[ 3409.708410]  [&amp;lt;ffffffffa06107d3&amp;gt;] libcfs_debug_dumpstack+0x53/0x80 [libcfs]
07:21:35:[ 3409.710441]  [&amp;lt;ffffffffa0610d75&amp;gt;] lbug_with_loc+0x45/0xc0 [libcfs]
07:21:35:[ 3409.712347]  [&amp;lt;ffffffffa0bf081e&amp;gt;] osd_it_ea_rec.part.94+0x0/0x36 [osd_ldiskfs]
07:21:35:[ 3409.714286]  [&amp;lt;ffffffffa0bc98c7&amp;gt;] osd_object_ea_create+0x927/0xb80 [osd_ldiskfs]
07:21:35:[ 3409.716229]  [&amp;lt;ffffffffa0756fa8&amp;gt;] dt_find_or_create+0x598/0x8d0 [obdclass]
07:21:35:[ 3409.718053]  [&amp;lt;ffffffffa05f0e20&amp;gt;] fld_index_init+0x1a0/0xbb0 [fld]
07:21:35:[ 3409.719899]  [&amp;lt;ffffffffa05ee565&amp;gt;] fld_server_init+0xa5/0x3e0 [fld]
07:21:35:[ 3409.721660]  [&amp;lt;ffffffffa0d89cf4&amp;gt;] mdt_init0+0x4f4/0x12b0 [mdt]
07:21:35:[ 3409.723354]  [&amp;lt;ffffffffa0d8ab29&amp;gt;] mdt_device_alloc+0x79/0x110 [mdt]
07:21:35:[ 3409.725093]  [&amp;lt;ffffffffa0737a84&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
07:21:35:[ 3409.726784]  [&amp;lt;ffffffffa0738b77&amp;gt;] class_setup+0x2f7/0x8d0 [obdclass]
07:21:35:[ 3409.728474]  [&amp;lt;ffffffffa0740c96&amp;gt;] class_process_config+0x1c36/0x2db0 [obdclass]
07:21:35:[ 3409.730230]  [&amp;lt;ffffffff812de5d9&amp;gt;] ? simple_strtoul+0x9/0x10
07:21:35:[ 3409.731866]  [&amp;lt;ffffffffa0744fb0&amp;gt;] ? target_name2index+0x90/0xc0 [obdclass]
07:21:35:[ 3409.733550]  [&amp;lt;ffffffff811acbe3&amp;gt;] ? __kmalloc+0x1f3/0x230
07:21:35:[ 3409.735128]  [&amp;lt;ffffffffa073a6bb&amp;gt;] ? lustre_cfg_new+0x8b/0x400 [obdclass]
07:21:35:[ 3409.736805]  [&amp;lt;ffffffffa07426bd&amp;gt;] class_config_llog_handler+0x8ad/0x1d20 [obdclass]
07:21:35:[ 3409.738491]  [&amp;lt;ffffffffa0703345&amp;gt;] llog_process_thread+0x5f5/0x1020 [obdclass]
07:21:35:[ 3409.740146]  [&amp;lt;ffffffffa07047c0&amp;gt;] ? llog_process_thread_daemonize+0x0/0x80 [obdclass]
07:21:35:[ 3409.741831]  [&amp;lt;ffffffffa070480c&amp;gt;] llog_process_thread_daemonize+0x4c/0x80 [obdclass]
07:21:35:[ 3409.743461]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
07:21:35:[ 3409.744898]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
07:21:35:[ 3409.746331]  [&amp;lt;ffffffff81615018&amp;gt;] ret_from_fork+0x58/0x90
07:21:35:[ 3409.747825]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread+0x0/0xe0
07:21:35:[ 3409.749277] 
07:21:35:[ 3409.750649] Kernel panic - not syncing: LBUG
07:21:35:[ 3409.751641] CPU: 0 PID: 21900 Comm: llog_process_th Tainted: GF          O--------------   3.10.0-229.7.2.el7_lustre.gfd6f11c.x86_64 #1
07:21:35:[ 3409.751641] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
07:21:36:[ 3409.751641]  ffffffffa062dfaf 000000008a979486 ffff8800608af870 ffffffff816051aa
07:21:36:[ 3409.751641]  ffff8800608af8f0 ffffffff815fea1e ffffffff00000008 ffff8800608af900
07:21:36:[ 3409.751641]  ffff8800608af8a0 000000008a979486 ffffffffa0bf27d0 0000000000000246
07:21:36:[ 3409.751641] Call Trace:
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff816051aa&amp;gt;] dump_stack+0x19/0x1b
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff815fea1e&amp;gt;] panic+0xd8/0x1e7
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0610ddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0bf081e&amp;gt;] osd_trans_exec_check.part.91+0x1a/0x1a [osd_ldiskfs]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0bc98c7&amp;gt;] osd_object_ea_create+0x927/0xb80 [osd_ldiskfs]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0756fa8&amp;gt;] dt_find_or_create+0x598/0x8d0 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa05f0e20&amp;gt;] fld_index_init+0x1a0/0xbb0 [fld]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa05ee565&amp;gt;] fld_server_init+0xa5/0x3e0 [fld]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0d89cf4&amp;gt;] mdt_init0+0x4f4/0x12b0 [mdt]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0d8ab29&amp;gt;] mdt_device_alloc+0x79/0x110 [mdt]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0737a84&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0738b77&amp;gt;] class_setup+0x2f7/0x8d0 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0740c96&amp;gt;] class_process_config+0x1c36/0x2db0 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff812de5d9&amp;gt;] ? simple_strtoul+0x9/0x10
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0744fb0&amp;gt;] ? target_name2index+0x90/0xc0 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff811acbe3&amp;gt;] ? __kmalloc+0x1f3/0x230
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa073a6bb&amp;gt;] ? lustre_cfg_new+0x8b/0x400 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa07426bd&amp;gt;] class_config_llog_handler+0x8ad/0x1d20 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa0703345&amp;gt;] llog_process_thread+0x5f5/0x1020 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa07047c0&amp;gt;] ? llog_backup+0x500/0x500 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa070480c&amp;gt;] llog_process_thread_daemonize+0x4c/0x80 [obdclass]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff81615018&amp;gt;] ret_from_fork+0x58/0x90
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
07:21:36:[ 3409.751641] drm_kms_helper: panic occurred, switching back to text console
07:21:36:[ 3409.751641] ------------[ cut here ]------------
07:21:36:[ 3409.751641] kernel BUG at arch/x86/mm/pageattr.c:216!
07:21:36:[ 3409.751641] invalid opcode: 0000 [#1] SMP 
07:21:36:[ 3409.751641] Modules linked in: osp(OF) mdd(OF) lod(OF) mdt(OF) lfsck(OF) mgs(OF) mgc(OF) osd_ldiskfs(OF) lquota(OF) fid(OF) fld(OF) ksocklnd(OF) ptlrpc(OF) obdclass(OF) lnet(OF) sha512_generic libcfs(OF) ldiskfs(OF) dm_mod rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd fscache xprtrdma sunrpc ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ppdev ib_cm iw_cm virtio_balloon pcspkr serio_raw ib_sa i2c_piix4 parport_pc parport ib_mad ib_core ib_addr ext4 mbcache jbd2 ata_generic pata_acpi cirrus syscopyarea sysfillrect sysimgblt 8139too drm_kms_helper ttm virtio_blk ata_piix libata floppy drm i2c_core virtio_pci virtio_ring virtio 8139cp mii
07:21:36:[ 3409.751641] CPU: 0 PID: 21900 Comm: llog_process_th Tainted: GF          O--------------   3.10.0-229.7.2.el7_lustre.gfd6f11c.x86_64 #1
07:21:36:[ 3409.751641] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
07:21:36:[ 3409.751641] task: ffff88003661b8e0 ti: ffff8800608ac000 task.ti: ffff8800608ac000
07:21:36:[ 3409.751641] RIP: 0010:[&amp;lt;ffffffff8105c2ef&amp;gt;]  [&amp;lt;ffffffff8105c2ef&amp;gt;] change_page_attr_set_clr+0x4ef/0x500
07:21:36:[ 3409.751641] RSP: 0018:ffff8800608af070  EFLAGS: 00010046
07:21:36:[ 3409.751641] RAX: 0000000000000046 RBX: 0000000000000000 RCX: 0000000000000010
07:21:36:[ 3409.751641] RDX: 0000000000002000 RSI: 0000000000000000 RDI: 0000000080000000
07:21:36:[ 3409.751641] RBP: ffff8800608af108 R08: 0000000000000004 R09: 000000000005bfad
07:21:36:[ 3409.751641] R10: 0000000000003689 R11: ffffffff8118ff6f R12: 0000000000000010
07:21:36:[ 3409.751641] R13: 0000000000000000 R14: 0000000000000200 R15: 0000000000000005
07:21:36:[ 3409.751641] FS:  0000000000000000(0000) GS:ffff88007fc00000(0000) knlGS:0000000000000000
07:21:36:[ 3409.751641] CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
07:21:36:[ 3409.751641] CR2: 00007fd717d5c018 CR3: 000000000190e000 CR4: 00000000000006f0
07:21:36:[ 3409.751641] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
07:21:36:[ 3409.751641] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
07:21:36:[ 3409.751641] Stack:
07:21:36:[ 3409.751641]  000000045a5a5a5a ffff880000000000 0000000000000000 ffff880069f20000
07:21:36:[ 3409.751641]  ffff88003661b8e0 0000000000000000 0000000000000000 0000000000000010
07:21:36:[ 3409.751641]  0000000000000000 0000000500000001 000000000005bfad 0000020000000000
07:21:36:[ 3409.751641] Call Trace:
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff8105c646&amp;gt;] _set_pages_array+0xe6/0x130
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff8105c6c3&amp;gt;] set_pages_array_wc+0x13/0x20
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa00a73af&amp;gt;] ttm_set_pages_caching+0x2f/0x70 [ttm]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa00a74f4&amp;gt;] ttm_alloc_new_pages.isra.7+0xb4/0x180 [ttm]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa00a7e50&amp;gt;] ttm_pool_populate+0x3e0/0x500 [ttm]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa013732e&amp;gt;] cirrus_ttm_tt_populate+0xe/0x10 [cirrus]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa00a46dd&amp;gt;] ttm_bo_move_memcpy+0x65d/0x6e0 [ttm]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffff8118f73e&amp;gt;] ? map_vm_area+0x2e/0x40
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa00a02c9&amp;gt;] ? ttm_tt_init+0x69/0xb0 [ttm]
07:21:36:[ 3409.751641]  [&amp;lt;ffffffffa01372d8&amp;gt;] cirrus_bo_move+0x18/0x20 [cirrus]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa00a1de5&amp;gt;] ttm_bo_handle_move_mem+0x265/0x5b0 [ttm]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffff81601a64&amp;gt;] ? __slab_free+0x10e/0x277
07:21:37:[ 3409.751641]  [&amp;lt;ffffffff8118ef33&amp;gt;] ? __free_vmap_area+0xb3/0xf0
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa00a274a&amp;gt;] ? ttm_bo_mem_space+0x10a/0x310 [ttm]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa00a2e17&amp;gt;] ttm_bo_validate+0x247/0x260 [ttm]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffff81059e69&amp;gt;] ? iounmap+0x79/0xa0
07:21:37:[ 3409.751641]  [&amp;lt;ffffffff81050000&amp;gt;] ? kgdb_arch_late+0x80/0x180
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0137ac2&amp;gt;] cirrus_bo_push_sysram+0x82/0xe0 [cirrus]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0135c84&amp;gt;] cirrus_crtc_do_set_base.isra.8.constprop.10+0x84/0x430 [cirrus]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0136479&amp;gt;] cirrus_crtc_mode_set+0x449/0x4d0 [cirrus]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0112939&amp;gt;] drm_crtc_helper_set_mode+0x2e9/0x520 [drm_kms_helper]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa01136bf&amp;gt;] drm_crtc_helper_set_config+0x87f/0xaa0 [drm_kms_helper]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa005b711&amp;gt;] drm_mode_set_config_internal+0x61/0xe0 [drm]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa011ae83&amp;gt;] restore_fbdev_mode+0xb3/0xe0 [drm_kms_helper]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa011b045&amp;gt;] drm_fb_helper_force_kernel_mode+0x75/0xb0 [drm_kms_helper]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa011bd59&amp;gt;] drm_fb_helper_panic+0x29/0x30 [drm_kms_helper]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffff81610a6c&amp;gt;] notifier_call_chain+0x4c/0x70
07:21:37:[ 3409.751641]  [&amp;lt;ffffffff81610aca&amp;gt;] atomic_notifier_call_chain+0x1a/0x20
07:21:37:[ 3409.751641]  [&amp;lt;ffffffff815fea4c&amp;gt;] panic+0x106/0x1e7
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0610ddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0bf081e&amp;gt;] osd_trans_exec_check.part.91+0x1a/0x1a [osd_ldiskfs]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0bc98c7&amp;gt;] osd_object_ea_create+0x927/0xb80 [osd_ldiskfs]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa0756fa8&amp;gt;] dt_find_or_create+0x598/0x8d0 [obdclass]
07:21:37:[ 3409.751641]  [&amp;lt;ffffffffa05f0e20&amp;gt;] fld_index_init+0x1a0/0xbb0 [fld]
08:22:00:********** Timeout by autotest system **********
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="123288" author="bobijam" created="Wed, 5 Aug 2015 04:07:44 +0000"  >&lt;p&gt;I think it&apos;s a dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6827&quot; title=&quot;sanity-scrub: Failed mount OST on ldiskfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6827&quot;&gt;&lt;del&gt;LU-6827&lt;/del&gt;&lt;/a&gt; (transaction credits insufficient for EA object create), and the patch has been landed for 2.8&lt;/p&gt;</comment>
                            <comment id="123313" author="simmonsja" created="Wed, 5 Aug 2015 13:25:56 +0000"  >&lt;p&gt;Its only landed for RHEL platform. We are missing a patch for SLES12 still.&lt;/p&gt;</comment>
                            <comment id="123351" author="bobijam" created="Wed, 5 Aug 2015 16:06:58 +0000"  >&lt;p&gt;The patch of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6827&quot; title=&quot;sanity-scrub: Failed mount OST on ldiskfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6827&quot;&gt;&lt;del&gt;LU-6827&lt;/del&gt;&lt;/a&gt; changes osd-ldiskfs code, it&apos;s distro unrelated but affects ldiskfs as back file type.&lt;/p&gt;</comment>
                            <comment id="123359" author="simmonsja" created="Wed, 5 Aug 2015 16:40:19 +0000"  >&lt;p&gt;Sorry. Thinking of another patch.&lt;/p&gt;</comment>
                            <comment id="123420" author="bobijam" created="Thu, 6 Aug 2015 02:50:37 +0000"  >&lt;p&gt;You may mean &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6911&quot; title=&quot;sanity 102ha test failed: mds crash kernel BUG at fs/inode.c:1358! RIP [&amp;lt;ffffffff811a53c9&amp;gt;] iput+0x69/0x70&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6911&quot;&gt;&lt;del&gt;LU-6911&lt;/del&gt;&lt;/a&gt;, the patch has been updated to include SLES patch.&lt;/p&gt;</comment>
                            <comment id="123427" author="pjones" created="Thu, 6 Aug 2015 04:53:35 +0000"  >&lt;p&gt;It looks like the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6827&quot; title=&quot;sanity-scrub: Failed mount OST on ldiskfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6827&quot;&gt;&lt;del&gt;LU-6827&lt;/del&gt;&lt;/a&gt; patch did land after that test run so we can close this as a duplicate and reopen if that is proven not to be the case.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="31028">LU-6827</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxjsf:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>