<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:03:35 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6827] sanity-scrub: Failed mount OST on ldiskfs</title>
                <link>https://jira.whamcloud.com/browse/LU-6827</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;Please provide additional information about the failure here.&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f7c4952e-25eb-11e5-866a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f7c4952e-25eb-11e5-866a-5254006e85c2&lt;/a&gt;.&lt;/p&gt;


&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;21:50:25:CMD: shadow-43vm3 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
21:50:27:CMD: shadow-43vm3 e2label /dev/lvm-Role_MDS/P1 2&amp;gt;/dev/null
21:50:27:Started lustre-MDT0000
21:50:27:CMD: shadow-43vm4 mkdir -p /mnt/ost1
21:50:27:CMD: shadow-43vm4 test -b /dev/lvm-Role_OSS/P1
21:50:28:Starting ost1:   /dev/lvm-Role_OSS/P1 /mnt/ost1
21:50:28:CMD: shadow-43vm4 mkdir -p /mnt/ost1; mount -t lustre   		                   /dev/lvm-Role_OSS/P1 /mnt/ost1
21:50:28:shadow-43vm4: mount.lustre: mount /dev/mapper/lvm--Role_OSS-P1 at /mnt/ost1 failed: Cannot send after transport endpoint shutdown
21:50:28:sanity-scrub returned 0
21:50:28:running: sanity-benchmark 
21:50:28:run_suite sanity-benchmark /usr/lib64/lustre/tests/sanity-benchmark.sh

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>server and client: lustre-master build # 3094 RHEL7</environment>
        <key id="31028">LU-6827</key>
            <summary>sanity-scrub: Failed mount OST on ldiskfs</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Thu, 9 Jul 2015 17:59:17 +0000</created>
                <updated>Thu, 20 Oct 2016 05:43:25 +0000</updated>
                            <resolved>Mon, 3 Aug 2015 15:12:25 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="120876" author="pjones" created="Thu, 9 Jul 2015 20:13:34 +0000"  >&lt;p&gt;Bobijam&lt;/p&gt;

&lt;p&gt;Could you please look into this as a high priority? It is disrupting RHEL7 server testing.&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="120907" author="bobijam" created="Fri, 10 Jul 2015 00:26:02 +0000"  >&lt;p&gt;it does not look like Lustre issue, more like test network environment issue. When mounting OST, the OSS node cannot reach the MGS node at that moment.&lt;/p&gt;</comment>
                            <comment id="120942" author="pjones" created="Fri, 10 Jul 2015 12:00:15 +0000"  >&lt;p&gt;Thanks Bobi. Sarah, does this issue 100% reproduce?&lt;/p&gt;</comment>
                            <comment id="121193" author="sarah" created="Mon, 13 Jul 2015 20:21:53 +0000"  >&lt;p&gt;No, this one is not 100% reproduced, while it is not a one-time failure neither, here are other instances:&lt;/p&gt;

&lt;p&gt;RHEL7.1 server/6.6 client:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/f47111c4-282d-11e5-b595-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/f47111c4-282d-11e5-b595-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;RHEL7.1 server/client DNE:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/c465a3ec-26fb-11e5-b3d7-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/c465a3ec-26fb-11e5-b3d7-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="121218" author="bobijam" created="Tue, 14 Jul 2015 02:19:23 +0000"  >&lt;p&gt;can you grab a debug log from the node when it happens next time, or let me log into it to check what the problem might be?&lt;/p&gt;</comment>
                            <comment id="121573" author="sarah" created="Fri, 17 Jul 2015 18:55:57 +0000"  >&lt;p&gt;Hello Bobijam,&lt;/p&gt;

&lt;p&gt;could you please check the link here and see if there is any useful information, if still not enough, I will try to reproduce it manually:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/sub_tests/1517745c-25ec-11e5-866a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/sub_tests/1517745c-25ec-11e5-866a-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="121599" author="bobijam" created="Sat, 18 Jul 2015 03:13:45 +0000"  >&lt;p&gt;From the link I saw the issue of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6186&quot; title=&quot;EL7 client sanity-hsm test_70: Failed to start copytool monitor on&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6186&quot;&gt;&lt;del&gt;LU-6186&lt;/del&gt;&lt;/a&gt; &quot;sanity-hsm test_70: Failed to start copytool monitor&quot; (from the OST stack_trace log), not anything relates to this issue.&lt;/p&gt;</comment>
                            <comment id="121878" author="sarah" created="Tue, 21 Jul 2015 21:18:38 +0000"  >&lt;p&gt;ok, then I will try to reproduce this problem manually.&lt;/p&gt;

&lt;p&gt;Hit TEI-3732 when try to provision RHEL7.1 server. will push a for test only patch and see how it goes&lt;/p&gt;</comment>
                            <comment id="121980" author="sarah" created="Thu, 23 Jul 2015 04:58:31 +0000"  >&lt;p&gt;Hello Bobijam,&lt;/p&gt;

&lt;p&gt;I can reproduce this bug and here is what I got from the MDS&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;=========----- Thu Jul 23 04:51:48 UTC 2015
[10502.926564] Lustre: DEBUG MARKER: -----============= acceptance-small: sanity-scrub ============----- Thu Jul 23 04:51:48 UTC 2015
[10504.076263] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == sanity-scrub test complete, duration -o sec ======================================================= 04:51:49 \(1437627109\)
[10504.254124] Lustre: DEBUG MARKER: == sanity-scrub test complete, duration -o sec ======================================================= 04:51:49 (1437627109)
[10506.618798] Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
[10506.935315] Lustre: DEBUG MARKER: umount -d -f /mnt/mds1
[10509.247280] LustreError: 2904:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880066f1cf00 x1507451119138540/t0(0) o13-&amp;gt;lustre-OST0005-osc-MDT0000@10.1.5.50@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
[10509.247286] LustreError: 2905:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff88003d415800 x1507451119138536/t0(0) o13-&amp;gt;lustre-OST0004-osc-MDT0000@10.1.5.50@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
[10510.975199] LustreError: 2905:0:(client.c:1144:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff88003d415800 x1507451119138556/t0(0) o13-&amp;gt;lustre-OST0000-osc-MDT0000@10.1.5.50@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
[10510.979720] LustreError: 2905:0:(client.c:1144:ptlrpc_import_delay_req()) Skipped 4 previous similar messages
[10511.211582] Lustre: lustre-MDT0000: Not available for connect from 10.1.5.50@tcp (stopping)
[10511.211584] Lustre: lustre-MDT0000: Not available for connect from 10.1.5.50@tcp (stopping)
[10511.215851] Lustre: Skipped 1 previous similar message
[10516.205426] LustreError: 137-5: lustre-MDT0000_UUID: not available for connect from 10.1.5.50@tcp (no target). If you are running an HA pair check that the target is mounted on the other server.
[10516.209156] LustreError: Skipped 4 previous similar messages
[10519.155134] Lustre: 31667:0:(client.c:2020:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1437627119/real 1437627119]  req@ffff88006a10f000 x1507451119138576/t0(0) o251-&amp;gt;MGC10.1.5.59@tcp@0@lo:26/25 lens 224/224 e 0 to 1 dl 1437627125 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
[10519.160540] Lustre: 31667:0:(client.c:2020:ptlrpc_expire_one_request()) Skipped 7 previous similar messages
[10519.252293] Lustre: server umount lustre-MDT0000 complete
[10519.415565] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[10524.606842] Lustre: DEBUG MARKER: grep -c /mnt/mds1&apos; &apos; /proc/mounts
[10524.850875] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[10525.212232] Lustre: DEBUG MARKER: mkfs.lustre --mgs --fsname=lustre --mdt --index=0 --param=sys.timeout=20 --param=lov.stripesize=1048576 --param=lov.stripecount=0 --param=mdt.identity_upcall=/usr/sbin/l_getidentity --backfstype=ldiskfs --device-size=200000 --reformat /dev/lvm-Role_MDS/P1
[10525.598654] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro
[10533.175997] Lustre: DEBUG MARKER: running=$(grep -c /mnt/mds1&apos; &apos; /proc/mounts);
[10533.175997] mpts=$(mount | grep -c /mnt/mds1&apos; &apos;);
[10533.175997] if [ $running -ne $mpts ]; then
[10533.175997]     echo $(hostname) env are INSANE!;
[10533.175997]     exit 1;
[10533.175997] fi
[10533.586791] Lustre: DEBUG MARKER: running=$(grep -c /mnt/mds1&apos; &apos; /proc/mounts);
[10533.586791] mpts=$(mount | grep -c /mnt/mds1&apos; &apos;);
[10533.586791] if [ $running -ne $mpts ]; then
[10533.586791]     echo $(hostname) env are INSANE!;
[10533.586791]     exit 1;
[10533.586791] fi
[10534.926722] Lustre: DEBUG MARKER: mkdir -p /mnt/mds1
[10535.227879] Lustre: DEBUG MARKER: test -b /dev/lvm-Role_MDS/P1
[10535.549478] Lustre: DEBUG MARKER: mkdir -p /mnt/mds1; mount -t lustre   		                   /dev/lvm-Role_MDS/P1 /mnt/mds1
[10535.790731] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro
[10535.917518] LDISKFS-fs (dm-0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro,no_mbcache
[10535.963581] Lustre: Setting parameter lustre-MDT0000-mdtlov.lov.stripesize in log lustre-MDT0000
[10535.965404] Lustre: Skipped 5 previous similar messages
[10536.128614] Lustre: 32326:0:(osd_internal.h:1085:osd_trans_exec_check()) op 3: used 8, used now 8, reserved 4
[10536.130569] Lustre: 32326:0:(osd_internal.h:1085:osd_trans_exec_check()) Skipped 2 previous similar messages
[10536.132675] Luse7
[10536.164238]  [&amp;lt;ffffffffa0634ddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
[10536.164238]  [&amp;lt;ffffffffa0c0f81e&amp;gt;] osd_trans_exec_check.part.91+0x1a/0x1a [osd_ldiskfs]
[10536.164238]  [&amp;lt;ffffffffa0be88c7&amp;gt;] osd_object_ea_create+0x927/0xb80 [osd_ldiskfs]
[10536.164238]  [&amp;lt;ffffffffa077afa8&amp;gt;] dt_find_or_create+0x598/0x8d0 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa0614e20&amp;gt;] fld_index_init+0x1a0/0xbb0 [fld]
[10536.164238]  [&amp;lt;ffffffffa0612565&amp;gt;] fld_server_init+0xa5/0x3e0 [fld]
[10536.164238]  [&amp;lt;ffffffffa0da7cf4&amp;gt;] mdt_init0+0x4f4/0x12b0 [mdt]
[10536.164238]  [&amp;lt;ffffffffa0da8b29&amp;gt;] mdt_device_alloc+0x79/0x110 [mdt]
[10536.164238]  [&amp;lt;ffffffffa075ba84&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa075cb77&amp;gt;] class_setup+0x2f7/0x8d0 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa0764c96&amp;gt;] class_process_config+0x1c36/0x2db0 [obdclass]
[10536.164238]  [&amp;lt;ffffffff812de5d9&amp;gt;] ? simple_strtoul+0x9/0x10
[10536.164238]  [&amp;lt;ffffffffa0768fb0&amp;gt;] ? target_name2index+0x90/0xc0 [obdclass]
[10536.164238]  [&amp;lt;ffffffff811acbe3&amp;gt;] ? __kmalloc+0x1f3/0x230
[10536.164238]  [&amp;lt;ffffffffa075e6bb&amp;gt;] ? lustre_cfg_new+0x8b/0x400 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa07666bd&amp;gt;] class_config_llog_handler+0x8ad/0x1d20 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa0727345&amp;gt;] llog_process_thread+0x5f5/0x1020 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa07287c0&amp;gt;] ? llog_backup+0x500/0x500 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa072880c&amp;gt;] llog_process_thread_daemonize+0x4c/0x80 [obdclass]
[10536.164238]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
[10536.164238]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[10536.164238]  [&amp;lt;ffffffff81615018&amp;gt;] ret_from_fork+0x58/0x90
[10536.164238]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[10536.164238] drm_kms_helper: panic occurred, switching back to text console
[10536.164238] ------------[ cut here ]------------
[10536.164238] kernel BUG at arch/x86/mm/pageattr.c:216!
[10536.164238] invalid opcode: 0000 [#1] SMP 
[10536.164238] Modules linked in: osp(OF) mdd(OF) lod(OF) mdt(OF) lfsck(OF) mgs(OF) mgc(OF) osd_ldiskfs(OF) lquota(OF) fid(OF) fld(OF) ksocklnd(OF) ptlrpc(OF) obdclass(OF) lnet(OF) sha512_generic libcfs(OF) ldiskfs(OF) dm_mod nfsv3 nfs_acl rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd fscache xprtrdma sunrpc ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm ib_sa ppdev ib_mad parport_pc virtio_balloon serio_raw pcspkr i2c_piix4 parport ib_core ib_addr ext4 mbcache jbd2 ata_generic pata_acpi cirrus syscopyarea sysfillrect virtio_blk sysimgblt drm_kms_helper ttm ata_piix 8139too virtio_pci virtio_ring virtio drm 8139cp i2c_core mii libata floppy [last unloaded: obdecho]
[10536.164238] CPU: 1 PID: 32326 Comm: llog_process_th Tainted: GF       W  O--------------   3.10.0-229.7.2.el7_lustre.gfd6f11c.x86_64 #1
[10536.164238] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
[10536.164238] task: ffff8800366416c0 ti: ffff88007870c000 task.ti: ffff88007870c000
[10536.164238] RIP: 0010:[&amp;lt;ffffffff8105c2ef&amp;gt;]  [&amp;lt;ffffffff8105c2ef&amp;gt;] change_page_attr_set_clr+0x4ef/0x500
[10536.164238] RSP: 0018:ffff88007870f070  EFLAGS: 00010046
[10536.164238] RAX: 0000000000000046 RBX: 0000000000000000 RCX: 0000000000000010
[10536.164238] RDX: 0000000000002000 RSI: 0000000000000000 RDI: 0000000080000000
[10536.164238] RBP: ffff88007870f108 R08: 0000000000000004 R09: 000000000006b390
[10536.164238] R10: 0000000000003689 R11: 0000000000000004 R12: 0000000000000010
[10536.164238] R13: 0000000000000000 R14: 0000000000000200 R15: 0000000000000005
[10536.164238] FS:  0000000000000000(0000) GS:ffff88007fd00000(0000) knlGS:0000000000000000
[10536.164238] CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
[10536.164238] CR2: 00007feffbdcf000 CR3: 000000000190e000 CR4: 00000000000006e0
[10536.164238] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
[10536.164238] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
[10536.164238] Stack:
[10536.164238]  000000045a5a5a5a ffff880000000000 0000000000000000 ffff88006bf4a000
[10536.164238]  ffff8800366416c0 0000000000000000 0000000000000000 0000000000000010
[10536.164238]  0000000000000000 0000000500000001 000000000006b390 0000020000000000
[10536.164238] Call Trace:
[10536.164238]  [&amp;lt;ffffffff8105c646&amp;gt;] _set_pages_array+0xe6/0x130
[10536.164238]  [&amp;lt;ffffffff8105c6c3&amp;gt;] set_pages_array_wc+0x13/0x20
[10536.164238]  [&amp;lt;ffffffffa01253af&amp;gt;] ttm_set_pages_caching+0x2f/0x70 [ttm]
[10536.164238]  [&amp;lt;ffffffffa01254f4&amp;gt;] ttm_alloc_new_pages.isra.7+0xb4/0x180 [ttm]
[10536.164238]  [&amp;lt;ffffffffa0125e50&amp;gt;] ttm_pool_populate+0x3e0/0x500 [ttm]
[10536.164238]  [&amp;lt;ffffffffa013c32e&amp;gt;] cirrus_ttm_tt_populate+0xe/0x10 [cirrus]
[10536.164238]  [&amp;lt;ffffffffa01226dd&amp;gt;] ttm_bo_move_memcpy+0x65d/0x6e0 [ttm]
[10536.164238]  [&amp;lt;ffffffff8118f73e&amp;gt;] ? map_vm_area+0x2e/0x40
[10536.164238]  [&amp;lt;ffffffffa011e2c9&amp;gt;] ? ttm_tt_init+0x69/0xb0 [ttm]
[10536.164238]  [&amp;lt;ffffffffa013c2d8&amp;gt;] cirrus_bo_move+0x18/0x20 [cirrus]
[10536.164238]  [&amp;lt;ffffffffa011fde5&amp;gt;] ttm_bo_handle_move_mem+0x265/0x5b0 [ttm]
[10536.164238]  [&amp;lt;ffffffff81601a64&amp;gt;] ? __slab_free+0x10e/0x277
[10536.164238]  [&amp;lt;ffffffffa012074a&amp;gt;] ? ttm_bo_mem_space+0x10a/0x310 [ttm]
[10536.164238]  [&amp;lt;ffffffffa0120e17&amp;gt;] ttm_bo_validate+0x247/0x260 [ttm]
[10536.164238]  [&amp;lt;ffffffff81059e69&amp;gt;] ? iounmap+0x79/0xa0
[10536.164238]  [&amp;lt;ffffffff81050000&amp;gt;] ? kgdb_arch_late+0x80/0x180
[10536.164238]  [&amp;lt;ffffffffa013cac2&amp;gt;] cirrus_bo_push_sysram+0x82/0xe0 [cirrus]
[10536.164238]  [&amp;lt;ffffffffa013ac84&amp;gt;] cirrus_crtc_do_set_base.isra.8.constprop.10+0x84/0x430 [cirrus]
[10536.164238]  [&amp;lt;ffffffffa013b479&amp;gt;] cirrus_crtc_mode_set+0x449/0x4d0 [cirrus]
[10536.164238]  [&amp;lt;ffffffffa00db939&amp;gt;] drm_crtc_helper_set_mode+0x2e9/0x520 [drm_kms_helper]
[10536.164238]  [&amp;lt;ffffffffa00dc6bf&amp;gt;] drm_crtc_helper_set_config+0x87f/0xaa0 [drm_kms_helper]
[10536.164238]  [&amp;lt;ffffffffa009c711&amp;gt;] drm_mode_set_config_internal+0x61/0xe0 [drm]
[10536.164238]  [&amp;lt;ffffffffa00e3e83&amp;gt;] restore_fbdev_mode+0xb3/0xe0 [drm_kms_helper]
[10536.164238]  [&amp;lt;ffffffffa00e4045&amp;gt;] drm_fb_helper_force_kernel_mode+0x75/0xb0 [drm_kms_helper]
[10536.164238]  [&amp;lt;ffffffffa00e4d59&amp;gt;] drm_fb_helper_panic+0x29/0x30 [drm_kms_helper]
[10536.164238]  [&amp;lt;ffffffff81610a6c&amp;gt;] notifier_call_chain+0x4c/0x70
[10536.164238]  [&amp;lt;ffffffff81610aca&amp;gt;] atomic_notifier_call_chain+0x1a/0x20
[10536.164238]  [&amp;lt;ffffffff815fea4c&amp;gt;] panic+0x106/0x1e7
[10536.164238]  [&amp;lt;ffffffffa0634ddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
[10536.164238]  [&amp;lt;ffffffffa0c0f81e&amp;gt;] osd_trans_exec_check.part.91+0x1a/0x1a [osd_ldiskfs]
[10536.164238]  [&amp;lt;ffffffffa0be88c7&amp;gt;] osd_object_ea_create+0x927/0xb80 [osd_ldiskfs]
[10536.164238]  [&amp;lt;ffffffffa077afa8&amp;gt;] dt_find_or_create+0x598/0x8d0 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa0614e20&amp;gt;] fld_index_init+0x1a0/0xbb0 [fld]
[10536.164238]  [&amp;lt;ffffffffa0612565&amp;gt;] fld_server_init+0xa5/0x3e0 [fld]
[10536.164238]  [&amp;lt;ffffffffa0da7cf4&amp;gt;] mdt_init0+0x4f4/0x12b0 [mdt]
[10536.164238]  [&amp;lt;ffffffffa0da8b29&amp;gt;] mdt_device_alloc+0x79/0x110 [mdt]
[10536.164238]  [&amp;lt;ffffffffa075ba84&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa075cb77&amp;gt;] class_setup+0x2f7/0x8d0 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa0764c96&amp;gt;] class_process_config+0x1c36/0x2db0 [obdclass]
[10536.164238]  [&amp;lt;ffffffff812de5d9&amp;gt;] ? simple_strtoul+0x9/0x10
[10536.164238]  [&amp;lt;ffffffffa0768fb0&amp;gt;] ? target_name2index+0x90/0xc0 [obdclass]
[10536.164238]  [&amp;lt;ffffffff811acbe3&amp;gt;] ? __kmalloc+0x1f3/0x230
[10536.164238]  [&amp;lt;ffffffffa075e6bb&amp;gt;] ? lustre_cfg_new+0x8b/0x400 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa07666bd&amp;gt;] class_config_llog_handler+0x8ad/0x1d20 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa0727345&amp;gt;] llog_process_thread+0x5f5/0x1020 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa07287c0&amp;gt;] ? llog_backup+0x500/0x500 [obdclass]
[10536.164238]  [&amp;lt;ffffffffa072880c&amp;gt;] llog_process_thread_daemonize+0x4c/0x80 [obdclass]
[10536.164238]  [&amp;lt;ffffffff8109739f&amp;gt;] kthread+0xcf/0xe0
[10536.164238]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[10536.164238]  [&amp;lt;ffffffff81615018&amp;gt;] ret_from_fork+0x58/0x90
[10536.164238]  [&amp;lt;ffffffff810972d0&amp;gt;] ? kthread_create_on_node+0x140/0x140
[10536.164238] Code: ba 00 00 00 48 c7 c7 78 9a 81 81 89 8d 78 ff ff ff 44 89 45 80 e8 a2 21 01 00 8b 8d 78 ff ff ff 44 8b 45 80 e9 99 fd ff ff 0f 0b &amp;lt;0f&amp;gt; 0b 0f 0b 66 66 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 
[10536.164238] RIP  [&amp;lt;ffffffff8105c2ef&amp;gt;] change_page_attr_set_clr+0x4ef/0x500
[10536.164238]  RSP &amp;lt;ffff88007870f070&amp;gt;

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="121983" author="gerrit" created="Thu, 23 Jul 2015 07:16:37 +0000"  >&lt;p&gt;Bobi Jam (bobijam@hotmail.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/15694&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/15694&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6827&quot; title=&quot;sanity-scrub: Failed mount OST on ldiskfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6827&quot;&gt;&lt;del&gt;LU-6827&lt;/del&gt;&lt;/a&gt; osd: trans credit insufficient for EA object create&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: dbbc1abe98a28ae1bc84deca3a8d4d747e78e8c9&lt;/p&gt;</comment>
                            <comment id="121984" author="bobijam" created="Thu, 23 Jul 2015 07:17:08 +0000"  >&lt;p&gt;would you mind trying this patch?&lt;/p&gt;</comment>
                            <comment id="122019" author="sarah" created="Thu, 23 Jul 2015 16:48:26 +0000"  >&lt;p&gt;sure, will update the ticket when I have result&lt;/p&gt;</comment>
                            <comment id="122091" author="sarah" created="Fri, 24 Jul 2015 02:01:34 +0000"  >&lt;p&gt;Rerun the tests for 2 times, it passed with the patch. &lt;/p&gt;</comment>
                            <comment id="122966" author="gerrit" created="Mon, 3 Aug 2015 01:55:29 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/15694/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/15694/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6827&quot; title=&quot;sanity-scrub: Failed mount OST on ldiskfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6827&quot;&gt;&lt;del&gt;LU-6827&lt;/del&gt;&lt;/a&gt; osd: trans credit insufficient for EA object create&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 2aa5f604fba4f01a2c433f1d4cd77ae688e35e61&lt;/p&gt;</comment>
                            <comment id="123013" author="pjones" created="Mon, 3 Aug 2015 15:12:25 +0000"  >&lt;p&gt;Landed for 2.8&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="31356">LU-6958</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="31995">LU-7118</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="26670">LU-5645</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxhun:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>