<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:28:28 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2818] Failure on test suite parallel-scale-nfsv4 test_compilebench: (mdt_lvb.c:126:mdt_lvbo_fill()) ASSERTION( rc == 0 ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-2818</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/25d39028-755e-11e2-bf59-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/25d39028-755e-11e2-bf59-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_compilebench failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;compilebench failed: 1&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;MDS console:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;12:44:26:Lustre: DEBUG MARKER: == parallel-scale-nfsv4 test compilebench: compilebench ============================================== 12:44:25 (1360701865)
12:44:27:Lustre: DEBUG MARKER: /usr/sbin/lctl mark .\/compilebench -D \/mnt\/lustre\/d0.compilebench -i 2         -r 2 --makej
12:44:27:Lustre: DEBUG MARKER: ./compilebench -D /mnt/lustre/d0.compilebench -i 2 -r 2 --makej
13:06:42:mdt00_003: page allocation failure. order:5, mode:0x50
13:06:42:Pid: 22716, comm: mdt00_003 Not tainted 2.6.32-279.19.1.el6_lustre.x86_64 #1
13:06:42:Call Trace:
13:06:42: [&amp;lt;ffffffff811231ff&amp;gt;] ? __alloc_pages_nodemask+0x77f/0x940
13:06:42: [&amp;lt;ffffffff8115d1a2&amp;gt;] ? kmem_getpages+0x62/0x170
13:06:42: [&amp;lt;ffffffff8115ddba&amp;gt;] ? fallback_alloc+0x1ba/0x270
13:06:42: [&amp;lt;ffffffff8115d80f&amp;gt;] ? cache_grow+0x2cf/0x320
13:06:42: [&amp;lt;ffffffff8115db39&amp;gt;] ? ____cache_alloc_node+0x99/0x160
13:06:42: [&amp;lt;ffffffffa04d8b60&amp;gt;] ? cfs_alloc+0x30/0x60 [libcfs]
13:06:42: [&amp;lt;ffffffff8115e909&amp;gt;] ? __kmalloc+0x189/0x220
13:06:42: [&amp;lt;ffffffffa04d8b60&amp;gt;] ? cfs_alloc+0x30/0x60 [libcfs]
13:06:42: [&amp;lt;ffffffffa0d3b69e&amp;gt;] ? osd_key_init+0x1e/0x670 [osd_ldiskfs]
13:06:42: [&amp;lt;ffffffffa066622f&amp;gt;] ? keys_fill+0x6f/0x190 [obdclass]
13:06:42: [&amp;lt;ffffffffa0669e6b&amp;gt;] ? lu_context_init+0xab/0x260 [obdclass]
13:06:42: [&amp;lt;ffffffffa066a03e&amp;gt;] ? lu_env_init+0x1e/0x30 [obdclass]
13:06:42: [&amp;lt;ffffffffa0e775ff&amp;gt;] ? mdt_lvbo_fill+0x1af/0x800 [mdt]
13:06:42: [&amp;lt;ffffffffa0e77450&amp;gt;] ? mdt_lvbo_fill+0x0/0x800 [mdt]
13:06:42: [&amp;lt;ffffffffa07c6591&amp;gt;] ? ldlm_handle_enqueue0+0x641/0x1080 [ptlrpc]
13:06:42: [&amp;lt;ffffffffa0e4e376&amp;gt;] ? mdt_enqueue+0x46/0x110 [mdt]
13:06:42: [&amp;lt;ffffffffa0e42fb8&amp;gt;] ? mdt_handle_common+0x628/0x1620 [mdt]
13:06:42: [&amp;lt;ffffffffa0e7a5c5&amp;gt;] ? mds_regular_handle+0x15/0x20 [mdt]
13:06:42: [&amp;lt;ffffffffa07f800c&amp;gt;] ? ptlrpc_server_handle_request+0x41c/0xdf0 [ptlrpc]
13:06:42: [&amp;lt;ffffffffa04d85de&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
13:06:42: [&amp;lt;ffffffffa07ef739&amp;gt;] ? ptlrpc_wait_event+0xa9/0x290 [ptlrpc]
13:06:42: [&amp;lt;ffffffff81052223&amp;gt;] ? __wake_up+0x53/0x70
13:06:42: [&amp;lt;ffffffffa07f9556&amp;gt;] ? ptlrpc_main+0xb76/0x1870 [ptlrpc]
13:06:42: [&amp;lt;ffffffffa07f89e0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
13:06:42: [&amp;lt;ffffffff8100c0ca&amp;gt;] ? child_rip+0xa/0x20
13:06:42: [&amp;lt;ffffffffa07f89e0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
13:06:42: [&amp;lt;ffffffffa07f89e0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
13:06:42: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
13:06:42:Mem-Info:
13:06:42:Node 0 DMA per-cpu:
13:06:42:CPU    0: hi:    0, btch:   1 usd:   0
13:06:42:Node 0 DMA32 per-cpu:
13:06:42:CPU    0: hi:  186, btch:  31 usd: 152
13:06:42:active_anon:4078 inactive_anon:1377 isolated_anon:0
13:06:42: active_file:109040 inactive_file:162532 isolated_file:0
13:06:42: unevictable:0 dirty:14064 writeback:512 unstable:0
13:06:42: free:14243 slab_reclaimable:32585 slab_unreclaimable:116609
13:06:42: mapped:3106 shmem:41 pagetables:752 bounce:0
13:06:42:Node 0 DMA free:8348kB min:332kB low:412kB high:496kB active_anon:0kB inactive_anon:0kB active_file:184kB inactive_file:3376kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:15324kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:0kB slab_reclaimable:556kB slab_unreclaimable:3268kB kernel_stack:0kB pagetables:0kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? no
13:06:42:lowmem_reserve[]: 0 2003 2003 2003
13:06:42:Node 0 DMA32 free:48624kB min:44720kB low:55900kB high:67080kB active_anon:16312kB inactive_anon:5508kB active_file:435976kB inactive_file:646752kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:2052064kB mlocked:0kB dirty:56256kB writeback:2048kB mapped:12424kB shmem:164kB slab_reclaimable:129784kB slab_unreclaimable:463168kB kernel_stack:1960kB pagetables:3008kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? no
13:06:42:lowmem_reserve[]: 0 0 0 0
13:06:42:Node 0 DMA: 33*4kB 5*8kB 3*16kB 0*32kB 1*64kB 1*128kB 1*256kB 1*512kB 1*1024kB 3*2048kB 0*4096kB = 8348kB
13:06:42:Node 0 DMA32: 5780*4kB 2228*8kB 4*16kB 28*32kB 3*64kB 3*128kB 2*256kB 1*512kB 1*1024kB 0*2048kB 1*4096kB = 48624kB
13:06:42:195649 total pagecache pages
13:06:42:0 pages in swap cache
13:06:42:Swap cache stats: add 0, delete 0, find 0/0
13:06:42:Free swap  = 4128760kB
13:06:42:Total swap = 4128760kB
13:06:42:524284 pages RAM
13:06:42:43608 pages reserved
13:06:42:207968 pages shared
13:06:42:267301 pages non-shared
13:06:42:LustreError: 22716:0:(mdt_lvb.c:126:mdt_lvbo_fill()) ASSERTION( rc == 0 ) failed: 
13:06:42:LustreError: 22716:0:(mdt_lvb.c:126:mdt_lvbo_fill()) LBUG
13:06:42:Pid: 22716, comm: mdt00_003
13:06:42:
13:06:42:Call Trace:
13:06:42: [&amp;lt;ffffffffa04d7895&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
13:06:42: [&amp;lt;ffffffffa04d7e97&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
13:06:42: [&amp;lt;ffffffffa0e77bd5&amp;gt;] mdt_lvbo_fill+0x785/0x800 [mdt]
13:06:42: [&amp;lt;ffffffffa0e77450&amp;gt;] ? mdt_lvbo_fill+0x0/0x800 [mdt]
13:06:42: [&amp;lt;ffffffffa07c6591&amp;gt;] ldlm_handle_enqueue0+0x641/0x1080 [ptlrpc]
13:06:42: [&amp;lt;ffffffffa0e4e376&amp;gt;] mdt_enqueue+0x46/0x110 [mdt]
13:06:43: [&amp;lt;ffffffffa0e42fb8&amp;gt;] mdt_handle_common+0x628/0x1620 [mdt]
13:06:43: [&amp;lt;ffffffffa0e7a5c5&amp;gt;] mds_regular_handle+0x15/0x20 [mdt]
13:06:43: [&amp;lt;ffffffffa07f800c&amp;gt;] ptlrpc_server_handle_request+0x41c/0xdf0 [ptlrpc]
13:06:43: [&amp;lt;ffffffffa04d85de&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
13:06:43: [&amp;lt;ffffffffa07ef739&amp;gt;] ? ptlrpc_wait_event+0xa9/0x290 [ptlrpc]
13:06:43: [&amp;lt;ffffffff81052223&amp;gt;] ? __wake_up+0x53/0x70
13:06:43: [&amp;lt;ffffffffa07f9556&amp;gt;] ptlrpc_main+0xb76/0x1870 [ptlrpc]
13:06:43: [&amp;lt;ffffffffa07f89e0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
13:06:43: [&amp;lt;ffffffff8100c0ca&amp;gt;] child_rip+0xa/0x20
13:06:43: [&amp;lt;ffffffffa07f89e0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
13:06:43: [&amp;lt;ffffffffa07f89e0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
13:06:43: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
13:06:43:
13:06:43:Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="17584">LU-2818</key>
            <summary>Failure on test suite parallel-scale-nfsv4 test_compilebench: (mdt_lvb.c:126:mdt_lvbo_fill()) ASSERTION( rc == 0 ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="hongchao.zhang">Hongchao Zhang</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Fri, 15 Feb 2013 03:22:53 +0000</created>
                <updated>Wed, 17 Mar 2021 03:48:16 +0000</updated>
                            <resolved>Tue, 11 Feb 2014 20:46:56 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                    <version>Lustre 2.5.0</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                    <fixVersion>Lustre 2.5.1</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>9</watches>
                                                                            <comments>
                            <comment id="52624" author="hongchao.zhang" created="Mon, 18 Feb 2013 09:36:03 +0000"  >&lt;p&gt;this bug can be mitigated greatly by the patches(&lt;a href=&quot;http://review.whamcloud.com/#change,5323&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5323&lt;/a&gt;, &lt;a href=&quot;http://review.whamcloud.com/#change,5444&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5444&lt;/a&gt;) in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2748&quot; title=&quot;OSD uses kmalloc with high order to allocate a keys&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2748&quot;&gt;&lt;del&gt;LU-2748&lt;/del&gt;&lt;/a&gt;.&lt;br/&gt;
and I am also creating a patch to let &quot;mdt_lvbo_fill&quot; to find the corresponding &quot;lu_env&quot; in the current ptlrpc_thread to eliminate the need to&lt;br/&gt;
allocate a new &quot;lu_env&quot;.&lt;/p&gt;</comment>
                            <comment id="52795" author="hongchao.zhang" created="Thu, 21 Feb 2013 06:21:39 +0000"  >&lt;p&gt;the additional patch is tracked at &lt;a href=&quot;http://review.whamcloud.com/#change,5497&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5497&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="54248" author="hongchao.zhang" created="Mon, 18 Mar 2013 10:35:40 +0000"  >&lt;p&gt;status update:&lt;/p&gt;

&lt;p&gt;the patch which adding &quot;lu_env&quot; to &quot;LDLM&quot; modules is not ready yet for its complexity, and it affects lots of funtions.&lt;br/&gt;
it&apos;s better to test&amp;amp;merge the patches in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2748&quot; title=&quot;OSD uses kmalloc with high order to allocate a keys&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2748&quot;&gt;&lt;del&gt;LU-2748&lt;/del&gt;&lt;/a&gt;, then this ticket can be dropped as a blocker.&lt;/p&gt;</comment>
                            <comment id="54253" author="bzzz" created="Mon, 18 Mar 2013 13:27:25 +0000"  >&lt;p&gt;we&apos;re not allocating large chunks for osd_thread_info anymore, so this isn&apos;t a blocker already.&lt;/p&gt;</comment>
                            <comment id="54904" author="hongchao.zhang" created="Wed, 27 Mar 2013 11:04:18 +0000"  >&lt;p&gt;the patch passing the &quot;env&quot; into ldlm module has been pushed to Gerrit.&lt;/p&gt;</comment>
                            <comment id="62773" author="green" created="Tue, 23 Jul 2013 05:09:30 +0000"  >&lt;p&gt;I am now frequently hitting this again while running sanity in a loop, this didi not happen bfore in this sort of a test on same config node, so we probably need to take another look at this?&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&amp;lt;4&amp;gt;[13497.371686] mdt00_004: page allocation failure. order:1, mode:0x40
&amp;lt;4&amp;gt;[13497.372009] Pid: 11831, comm: mdt00_004 Not tainted 2.6.32-rhe6.4-debug #2
&amp;lt;4&amp;gt;[13497.372016] Call Trace:
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff8112a666&amp;gt;] ? __alloc_pages_nodemask+0x7c6/0x980
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff814fe0ee&amp;gt;] ? _spin_unlock+0xe/0x10
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff811658f2&amp;gt;] ? kmem_getpages+0x62/0x170
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff8116834a&amp;gt;] ? fallback_alloc+0x1ba/0x270
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff81167bf7&amp;gt;] ? cache_grow+0x4d7/0x520
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff81168038&amp;gt;] ? ____cache_alloc_node+0xa8/0x200
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff81168943&amp;gt;] ? kmem_cache_alloc_trace+0x1c3/0x250
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0b4f2a5&amp;gt;] ? osd_key_init+0x25/0x4e0 [osd_ldiskfs]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0b4f2a5&amp;gt;] ? osd_key_init+0x25/0x4e0 [osd_ldiskfs]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa04f8f8f&amp;gt;] ? keys_fill+0x6f/0x190 [obdclass]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa04fd00e&amp;gt;] ? lu_context_init+0x4e/0x240 [obdclass]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa04fd063&amp;gt;] ? lu_context_init+0xa3/0x240 [obdclass]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa04fd21e&amp;gt;] ? lu_env_init+0x1e/0x30 [obdclass]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0bf131b&amp;gt;] ? mdt_lvbo_fill+0x1ab/0x840 [mdt]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0bf1170&amp;gt;] ? mdt_lvbo_fill+0x0/0x840 [mdt]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0664033&amp;gt;] ? ldlm_handle_enqueue0+0x623/0x10c0 [ptlrpc]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0bb6a96&amp;gt;] ? mdt_enqueue+0x46/0xe0 [mdt]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0bbcc57&amp;gt;] ? mdt_handle_common+0x647/0x16d0 [mdt]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0bf54a5&amp;gt;] ? mds_regular_handle+0x15/0x20 [mdt]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0693898&amp;gt;] ? ptlrpc_server_handle_request+0x3a8/0xc70 [ptlrpc]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0a2155e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0a32b9f&amp;gt;] ? lc_watchdog_touch+0x6f/0x170 [libcfs]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa068afb1&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff81054613&amp;gt;] ? __wake_up+0x53/0x70
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0694b82&amp;gt;] ? ptlrpc_main+0xa22/0x1650 [ptlrpc]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffffa0694160&amp;gt;] ? ptlrpc_main+0x0/0x1650 [ptlrpc]
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff81094606&amp;gt;] ? kthread+0x96/0xa0
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff8100c10a&amp;gt;] ? child_rip+0xa/0x20
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff81094570&amp;gt;] ? kthread+0x0/0xa0
&amp;lt;4&amp;gt;[13497.372016]  [&amp;lt;ffffffff8100c100&amp;gt;] ? child_rip+0x0/0x20
&amp;lt;6&amp;gt;[13497.372016] Mem-Info:
&amp;lt;4&amp;gt;[13497.372016] Node 0 DMA per-cpu:
&amp;lt;4&amp;gt;[13497.372016] CPU    0: hi:    0, btch:   1 usd:   0
&amp;lt;4&amp;gt;[13497.385289] CPU    1: hi:    0, btch:   1 usd:   0
&amp;lt;4&amp;gt;[13497.385289] CPU    2: hi:    0, btch:   1 usd:   0
&amp;lt;4&amp;gt;[13497.385289] CPU    3: hi:    0, btch:   1 usd:   0
&amp;lt;4&amp;gt;[13497.385289] Node 0 DMA32 per-cpu:
&amp;lt;4&amp;gt;[13497.385289] CPU    0: hi:  186, btch:  31 usd:  26
&amp;lt;4&amp;gt;[13497.385289] CPU    1: hi:  186, btch:  31 usd:  75
&amp;lt;4&amp;gt;[13497.385289] CPU    2: hi:  186, btch:  31 usd: 177
&amp;lt;4&amp;gt;[13497.385289] CPU    3: hi:  186, btch:  31 usd: 170
&amp;lt;4&amp;gt;[13497.385289] active_anon:83027 inactive_anon:83006 isolated_anon:0
&amp;lt;4&amp;gt;[13497.385289]  active_file:31104 inactive_file:29174 isolated_file:0
&amp;lt;4&amp;gt;[13497.385289]  unevictable:0 dirty:59 writeback:55 unstable:0
&amp;lt;4&amp;gt;[13497.385289]  free:13805 slab_reclaimable:79792 slab_unreclaimable:363470
&amp;lt;4&amp;gt;[13497.385289]  mapped:669 shmem:163871 pagetables:341 bounce:0
&amp;lt;4&amp;gt;[13497.388538] Node 0 DMA free:9700kB min:136kB low:168kB high:204kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:9312kB mlocked:0kB dirty:0kB writeback:0kB mapped:0kB shmem:0kB slab_reclaimable:0kB slab_unreclaimable:0kB kernel_stack:0kB pagetables:0kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:0 all_unreclaimable? yes
&amp;lt;4&amp;gt;[13497.389526] lowmem_reserve[]: 0 2967 2967 2967
&amp;lt;4&amp;gt;[13497.389526] Node 0 DMA32 free:45520kB min:44916kB low:56144kB high:67372kB active_anon:332000kB inactive_anon:332388kB active_file:124252kB inactive_file:116696kB unevictable:0kB isolated(anon):0kB isolated(file):0kB present:3039080kB mlocked:0kB dirty:236kB writeback:220kB mapped:2676kB shmem:655484kB slab_reclaimable:319168kB slab_unreclaimable:1453880kB kernel_stack:2640kB pagetables:1364kB unstable:0kB bounce:0kB writeback_tmp:0kB pages_scanned:861 all_unreclaimable? no
&amp;lt;4&amp;gt;[13497.389526] lowmem_reserve[]: 0 0 0 0
&amp;lt;4&amp;gt;[13497.389526] Node 0 DMA: 3*4kB 1*8kB 3*16kB 1*32kB 2*64kB 0*128kB 1*256kB 0*512kB 1*1024kB 0*2048kB 2*4096kB = 9700kB
&amp;lt;4&amp;gt;[13497.389526] Node 0 DMA32: 10258*4kB 18*8kB 0*16kB 0*32kB 0*64kB 0*128kB 0*256kB 0*512kB 0*1024kB 0*2048kB 1*4096kB = 45272kB
&amp;lt;4&amp;gt;[13497.389526] 194553 total pagecache pages
&amp;lt;4&amp;gt;[13497.389526] 0 pages in swap cache
&amp;lt;4&amp;gt;[13497.389526] Swap cache stats: add 1550, delete 1550, find 434/448
&amp;lt;4&amp;gt;[13497.389526] Free swap  = 2091392kB
&amp;lt;4&amp;gt;[13497.389526] Total swap = 2097144kB
&amp;lt;6&amp;gt;[13497.389526] 774397 pages RAM
&amp;lt;6&amp;gt;[13497.389526] 38457 pages reserved
&amp;lt;6&amp;gt;[13497.389526] 19663 pages shared
&amp;lt;6&amp;gt;[13497.389526] 697781 pages non-shared
&amp;lt;0&amp;gt;[13497.402539] LustreError: 11831:0:(mdt_lvb.c:125:mdt_lvbo_fill()) ASSERTION( rc == 0 ) failed: 
&amp;lt;0&amp;gt;[13497.403365] LustreError: 11831:0:(mdt_lvb.c:125:mdt_lvbo_fill()) LBUG
&amp;lt;4&amp;gt;[13497.403820] Pid: 11831, comm: mdt00_004
&amp;lt;4&amp;gt;[13497.404356] 
&amp;lt;4&amp;gt;[13497.404356] Call Trace:
&amp;lt;4&amp;gt;[13497.405032]  [&amp;lt;ffffffffa0a208a5&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
&amp;lt;4&amp;gt;[13497.405510]  [&amp;lt;ffffffffa0a20ea7&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
&amp;lt;4&amp;gt;[13497.405979]  [&amp;lt;ffffffffa0bf18d4&amp;gt;] mdt_lvbo_fill+0x764/0x840 [mdt]
&amp;lt;4&amp;gt;[13497.406426]  [&amp;lt;ffffffffa0bf1170&amp;gt;] ? mdt_lvbo_fill+0x0/0x840 [mdt]
&amp;lt;4&amp;gt;[13497.406735]  [&amp;lt;ffffffffa0664033&amp;gt;] ldlm_handle_enqueue0+0x623/0x10c0 [ptlrpc]
&amp;lt;4&amp;gt;[13497.407043]  [&amp;lt;ffffffffa0bb6a96&amp;gt;] mdt_enqueue+0x46/0xe0 [mdt]
&amp;lt;4&amp;gt;[13497.407317]  [&amp;lt;ffffffffa0bbcc57&amp;gt;] mdt_handle_common+0x647/0x16d0 [mdt]
&amp;lt;4&amp;gt;[13497.407604]  [&amp;lt;ffffffffa0bf54a5&amp;gt;] mds_regular_handle+0x15/0x20 [mdt]
&amp;lt;4&amp;gt;[13497.407908]  [&amp;lt;ffffffffa0693898&amp;gt;] ptlrpc_server_handle_request+0x3a8/0xc70 [ptlrpc]
&amp;lt;4&amp;gt;[13497.408502]  [&amp;lt;ffffffffa0a2155e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
&amp;lt;4&amp;gt;[13497.408814]  [&amp;lt;ffffffffa0a32b9f&amp;gt;] ? lc_watchdog_touch+0x6f/0x170 [libcfs]
&amp;lt;4&amp;gt;[13497.409138]  [&amp;lt;ffffffffa068afb1&amp;gt;] ? ptlrpc_wait_event+0xb1/0x2a0 [ptlrpc]
&amp;lt;4&amp;gt;[13497.409425]  [&amp;lt;ffffffff81054613&amp;gt;] ? __wake_up+0x53/0x70
&amp;lt;4&amp;gt;[13497.409706]  [&amp;lt;ffffffffa0694b82&amp;gt;] ptlrpc_main+0xa22/0x1650 [ptlrpc]
&amp;lt;4&amp;gt;[13497.410010]  [&amp;lt;ffffffffa0694160&amp;gt;] ? ptlrpc_main+0x0/0x1650 [ptlrpc]
&amp;lt;4&amp;gt;[13497.410288]  [&amp;lt;ffffffff81094606&amp;gt;] kthread+0x96/0xa0
&amp;lt;4&amp;gt;[13497.410537]  [&amp;lt;ffffffff8100c10a&amp;gt;] child_rip+0xa/0x20
&amp;lt;4&amp;gt;[13497.410790]  [&amp;lt;ffffffff81094570&amp;gt;] ? kthread+0x0/0xa0
&amp;lt;4&amp;gt;[13497.411045]  [&amp;lt;ffffffff8100c100&amp;gt;] ? child_rip+0x0/0x20
&amp;lt;4&amp;gt;[13497.411299] 
&amp;lt;0&amp;gt;[13497.681303] Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="62777" author="bzzz" created="Tue, 23 Jul 2013 07:19:55 +0000"  >&lt;p&gt;I think this can stop to happen once we start to fetch the layouts with explicit getxattr..&lt;/p&gt;</comment>
                            <comment id="75368" author="green" created="Tue, 21 Jan 2014 18:55:28 +0000"  >&lt;p&gt;Because I am really tired of hittign this issue, here&apos;s a patch to at least making mdt not crash when it happens:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/8947&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/8947&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="76777" author="pjones" created="Tue, 11 Feb 2014 20:46:56 +0000"  >&lt;p&gt;Landed for 2.6&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="29542">LU-6472</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="17439">LU-2748</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvj6n:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6822</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>