<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:31:12 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3126] conf-sanity test_41b: fld_server_lookup()) ASSERTION( fld-&gt;lsf_control_exp ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-3126</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for bfaccini &amp;lt;bruno.faccini@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/5963ea2c-9e4a-11e2-9d68-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/5963ea2c-9e4a-11e2-9d68-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_41b failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;test failed to respond and timed out&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Info required for matching: conf-sanity 41b&lt;/p&gt;

&lt;p&gt;An LBUG occured on the OSS side :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;09:23:10:LustreError: 19905:0:(fld_handler.c:173:fld_server_lookup()) ASSERTION( fld-&amp;gt;lsf_control_exp ) failed: 
09:23:10:LustreError: 19905:0:(fld_handler.c:173:fld_server_lookup()) LBUG
09:23:10:Pid: 19905, comm: ll_ost00_001
09:23:10:
09:23:10:Call Trace:
09:23:13: [&amp;lt;ffffffffa04d7895&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
09:23:13: [&amp;lt;ffffffffa04d7e97&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
09:23:13: [&amp;lt;ffffffffa09bfe0f&amp;gt;] fld_server_lookup+0x2df/0x3b0 [fld]
09:23:13: [&amp;lt;ffffffffa0d3b53e&amp;gt;] osd_fld_lookup+0xae/0x1e0 [osd_ldiskfs]
09:23:13: [&amp;lt;ffffffffa0d4e902&amp;gt;] fid_is_on_ost+0x102/0x3b0 [osd_ldiskfs]
09:23:13: [&amp;lt;ffffffffa0d5083a&amp;gt;] osd_oi_lookup+0xca/0x150 [osd_ldiskfs]
09:23:13: [&amp;lt;ffffffffa0d4c810&amp;gt;] osd_object_init+0x4c0/0xa40 [osd_ldiskfs]
09:23:13: [&amp;lt;ffffffffa066557d&amp;gt;] lu_object_alloc+0xcd/0x300 [obdclass]
09:23:13: [&amp;lt;ffffffffa06658f9&amp;gt;] ? htable_lookup+0x119/0x1c0 [obdclass]
09:23:13: [&amp;lt;ffffffffa06660e5&amp;gt;] lu_object_find_at+0x205/0x360 [obdclass]
09:23:13: [&amp;lt;ffffffffa0666256&amp;gt;] lu_object_find+0x16/0x20 [obdclass]
09:23:13: [&amp;lt;ffffffffa0e25f15&amp;gt;] ofd_object_find+0x35/0xf0 [ofd]
09:23:13: [&amp;lt;ffffffffa0e27423&amp;gt;] ofd_precreate_objects+0x1d3/0x1360 [ofd]
09:23:13: [&amp;lt;ffffffffa04e2d88&amp;gt;] ? libcfs_log_return+0x28/0x40 [libcfs]
09:23:14: [&amp;lt;ffffffffa0e1cf72&amp;gt;] ofd_create+0x322/0x1470 [ofd]
09:23:14: [&amp;lt;ffffffffa07e7ff5&amp;gt;] ? lustre_msg_buf+0x55/0x60 [ptlrpc]
09:23:14: [&amp;lt;ffffffffa0df247c&amp;gt;] ost_handle+0x356c/0x46f0 [ost]
09:23:14: [&amp;lt;ffffffffa04e40e4&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
09:23:14: [&amp;lt;ffffffffa07f91dc&amp;gt;] ptlrpc_server_handle_request+0x41c/0xdf0 [ptlrpc]
09:23:14: [&amp;lt;ffffffffa04d85de&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
09:23:14: [&amp;lt;ffffffffa07f0819&amp;gt;] ? ptlrpc_wait_event+0xa9/0x290 [ptlrpc]
09:23:15: [&amp;lt;ffffffffa04e82c1&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
09:23:15: [&amp;lt;ffffffff81052223&amp;gt;] ? __wake_up+0x53/0x70
09:23:16: [&amp;lt;ffffffffa07fa725&amp;gt;] ptlrpc_main+0xb75/0x1870 [ptlrpc]
09:23:16: [&amp;lt;ffffffffa07f9bb0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
09:23:16: [&amp;lt;ffffffff8100c0ca&amp;gt;] child_rip+0xa/0x20
09:23:16: [&amp;lt;ffffffffa07f9bb0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
09:23:16: [&amp;lt;ffffffffa07f9bb0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
09:23:17: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
09:23:18:
09:23:18:Kernel panic - not syncing: LBUG
09:23:18:Pid: 19905, comm: ll_ost00_001 Not tainted 2.6.32-279.19.1.el6_lustre.gc4681d8.x86_64 #1
09:23:18:Call Trace:
09:23:18: [&amp;lt;ffffffff814e9811&amp;gt;] ? panic+0xa0/0x168
09:23:18: [&amp;lt;ffffffffa04d7eeb&amp;gt;] ? lbug_with_loc+0x9b/0xb0 [libcfs]
09:23:18: [&amp;lt;ffffffffa09bfe0f&amp;gt;] ? fld_server_lookup+0x2df/0x3b0 [fld]
09:23:18: [&amp;lt;ffffffffa0d3b53e&amp;gt;] ? osd_fld_lookup+0xae/0x1e0 [osd_ldiskfs]
09:23:18: [&amp;lt;ffffffffa0d4e902&amp;gt;] ? fid_is_on_ost+0x102/0x3b0 [osd_ldiskfs]
09:23:18: [&amp;lt;ffffffffa0d5083a&amp;gt;] ? osd_oi_lookup+0xca/0x150 [osd_ldiskfs]
09:23:18: [&amp;lt;ffffffffa0d4c810&amp;gt;] ? osd_object_init+0x4c0/0xa40 [osd_ldiskfs]
09:23:18: [&amp;lt;ffffffffa066557d&amp;gt;] ? lu_object_alloc+0xcd/0x300 [obdclass]
09:23:18: [&amp;lt;ffffffffa06658f9&amp;gt;] ? htable_lookup+0x119/0x1c0 [obdclass]
09:23:19: [&amp;lt;ffffffffa06660e5&amp;gt;] ? lu_object_find_at+0x205/0x360 [obdclass]
09:23:19: [&amp;lt;ffffffffa0666256&amp;gt;] ? lu_object_find+0x16/0x20 [obdclass]
09:23:19: [&amp;lt;ffffffffa0e25f15&amp;gt;] ? ofd_object_find+0x35/0xf0 [ofd]
09:23:20: [&amp;lt;ffffffffa0e27423&amp;gt;] ? ofd_precreate_objects+0x1d3/0x1360 [ofd]
09:23:20: [&amp;lt;ffffffffa04e2d88&amp;gt;] ? libcfs_log_return+0x28/0x40 [libcfs]
09:23:20: [&amp;lt;ffffffffa0e1cf72&amp;gt;] ? ofd_create+0x322/0x1470 [ofd]
09:23:21: [&amp;lt;ffffffffa07e7ff5&amp;gt;] ? lustre_msg_buf+0x55/0x60 [ptlrpc]
09:23:21: [&amp;lt;ffffffffa0df247c&amp;gt;] ? ost_handle+0x356c/0x46f0 [ost]
09:23:21: [&amp;lt;ffffffffa04e40e4&amp;gt;] ? libcfs_id2str+0x74/0xb0 [libcfs]
09:23:22: [&amp;lt;ffffffffa07f91dc&amp;gt;] ? ptlrpc_server_handle_request+0x41c/0xdf0 [ptlrpc]
09:23:23: [&amp;lt;ffffffffa04d85de&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
09:23:23: [&amp;lt;ffffffffa07f0819&amp;gt;] ? ptlrpc_wait_event+0xa9/0x290 [ptlrpc]
09:23:23: [&amp;lt;ffffffffa04e82c1&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
09:23:23: [&amp;lt;ffffffff81052223&amp;gt;] ? __wake_up+0x53/0x70
09:23:23: [&amp;lt;ffffffffa07fa725&amp;gt;] ? ptlrpc_main+0xb75/0x1870 [ptlrpc]
09:23:23: [&amp;lt;ffffffffa07f9bb0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
09:23:23: [&amp;lt;ffffffff8100c0ca&amp;gt;] ? child_rip+0xa/0x20
09:23:23: [&amp;lt;ffffffffa07f9bb0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
09:23:23: [&amp;lt;ffffffffa07f9bb0&amp;gt;] ? ptlrpc_main+0x0/0x1870 [ptlrpc]
09:23:23: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="18285">LU-3126</key>
            <summary>conf-sanity test_41b: fld_server_lookup()) ASSERTION( fld-&gt;lsf_control_exp ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="di.wang">Di Wang</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>mn4</label>
                            <label>yuc2</label>
                    </labels>
                <created>Mon, 8 Apr 2013 17:30:36 +0000</created>
                <updated>Tue, 15 Apr 2014 13:15:41 +0000</updated>
                            <resolved>Mon, 26 Aug 2013 18:58:57 +0000</resolved>
                                    <version>Lustre 2.5.0</version>
                                    <fixVersion>Lustre 2.5.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>9</watches>
                                                                            <comments>
                            <comment id="56287" author="liwei" created="Mon, 15 Apr 2013 03:14:01 +0000"  >&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/d29c0fe6-a18d-11e2-8fc0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/d29c0fe6-a18d-11e2-8fc0-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="62610" author="jhammond" created="Fri, 19 Jul 2013 14:20:57 +0000"  >&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_logs/e7567198-f024-11e2-b957-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_logs/e7567198-f024-11e2-b957-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="63394" author="sarah" created="Wed, 31 Jul 2013 18:04:10 +0000"  >&lt;p&gt;Hit this issue when running conf-sanity test_32a with 2.4-ldiskfs image:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/8feeb8cc-f9c3-11e2-aee1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/8feeb8cc-f9c3-11e2-aee1-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="63511" author="sarah" created="Thu, 1 Aug 2013 22:08:52 +0000"  >&lt;p&gt;Hit this error when upgrading 2.4.0 to 2.5&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == upgrade-downgrade End == 15:07:57 (1375394877)
LDISKFS-fs (sdc1): mounted filesystem with ordered data mode. quota=on. Opts: 
LustreError: 7916:0:(fld_handler.c:147:fld_server_lookup()) ASSERTION( fld-&amp;gt;lsf_control_exp ) failed: 
LustreError: 7916:0:(fld_handler.c:147:fld_server_lookup()) LBUG
Pid: 7916, comm: mount.lustre

Call Trace:
 [&amp;lt;ffffffffa0375895&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
 [&amp;lt;ffffffffa0375e97&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
 [&amp;lt;ffffffffa083fe0f&amp;gt;] fld_server_lookup+0x2ef/0x3d0 [fld]
 [&amp;lt;ffffffff8119c88e&amp;gt;] ? generic_detach_inode+0x18e/0x1f0
 [&amp;lt;ffffffffa0c693d1&amp;gt;] osd_fld_lookup+0x71/0x1d0 [osd_ldiskfs]
 [&amp;lt;ffffffff8119c6f2&amp;gt;] ? iput+0x62/0x70
 [&amp;lt;ffffffffa0c695ca&amp;gt;] osd_remote_fid+0x9a/0x280 [osd_ldiskfs]
 [&amp;lt;ffffffffa0c75621&amp;gt;] osd_index_ea_lookup+0x521/0x850 [osd_ldiskfs]
 [&amp;lt;ffffffffa04d282f&amp;gt;] dt_lookup_dir+0x6f/0x130 [obdclass]
 [&amp;lt;ffffffffa04b0fb5&amp;gt;] llog_osd_open+0x475/0xbb0 [obdclass]
 [&amp;lt;ffffffffa047d31a&amp;gt;] llog_open+0xba/0x2c0 [obdclass]
 [&amp;lt;ffffffffa0480f71&amp;gt;] llog_backup+0x61/0x500 [obdclass]
 [&amp;lt;ffffffff81281860&amp;gt;] ? sprintf+0x40/0x50
 [&amp;lt;ffffffffa0cf9702&amp;gt;] mgc_process_log+0x1192/0x18e0 [mgc]
 [&amp;lt;ffffffffa0cf3370&amp;gt;] ? mgc_blocking_ast+0x0/0x800 [mgc]
 [&amp;lt;ffffffffa0633c40&amp;gt;] ? ldlm_completion_ast+0x0/0x960 [ptlrpc]
 [&amp;lt;ffffffffa0cfb2e4&amp;gt;] mgc_process_config+0x594/0xed0 [mgc]
 [&amp;lt;ffffffffa04c6776&amp;gt;] lustre_process_log+0x256/0xaa0 [obdclass]
 [&amp;lt;ffffffffa0495972&amp;gt;] ? class_name2dev+0x42/0xe0 [obdclass]
 [&amp;lt;ffffffff81167d83&amp;gt;] ? kmem_cache_alloc_trace+0x1a3/0x1b0
 [&amp;lt;ffffffffa0495a1e&amp;gt;] ? class_name2obd+0xe/0x30 [obdclass]
 [&amp;lt;ffffffffa04fa641&amp;gt;] server_start_targets+0x1821/0x1a40 [obdclass]
 [&amp;lt;ffffffffa04c9db3&amp;gt;] ? lustre_start_mgc+0x493/0x1e90 [obdclass]
 [&amp;lt;ffffffffa04c1ca0&amp;gt;] ? class_config_llog_handler+0x0/0x1880 [obdclass]
 [&amp;lt;ffffffffa04fe1fc&amp;gt;] server_fill_super+0xbbc/0x1a24 [obdclass]
 [&amp;lt;ffffffffa04cb988&amp;gt;] lustre_fill_super+0x1d8/0x530 [obdclass]
 [&amp;lt;ffffffffa04cb7b0&amp;gt;] ? lustre_fill_super+0x0/0x530 [obdclass]
 [&amp;lt;ffffffff8118431f&amp;gt;] get_sb_nodev+0x5f/0xa0
 [&amp;lt;ffffffffa04c3625&amp;gt;] lustre_get_sb+0x25/0x30 [obdclass]
 [&amp;lt;ffffffff8118395b&amp;gt;] vfs_kern_mount+0x7b/0x1b0
 [&amp;lt;ffffffff81183b02&amp;gt;] do_kern_mount+0x52/0x130
 [&amp;lt;ffffffff811a3d32&amp;gt;] do_mount+0x2d2/0x8d0
 [&amp;lt;ffffffff811a43c0&amp;gt;] sys_mount+0x90/0xe0
 [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b

Kernel panic - not syncing: LBUG
Pid: 7916, comm: mount.lustre Not tainted 2.6.32-358.11.1.el6_lustre.g55605c6.x86_64 #1
Call Trace:
 [&amp;lt;ffffffff8150d938&amp;gt;] ? panic+0xa7/0x16f
 [&amp;lt;ffffffffa0375eeb&amp;gt;] ? lbug_with_loc+0x9b/0xb0 [libcfs]
 [&amp;lt;ffffffffa083fe0f&amp;gt;] ? fld_server_lookup+0x2ef/0x3d0 [fld]
 [&amp;lt;ffffffff8119c88e&amp;gt;] ? generic_detach_inode+0x18e/0x1f0
 [&amp;lt;ffffffffa0c693d1&amp;gt;] ? osd_fld_lookup+0x71/0x1d0 [osd_ldiskfs]
 [&amp;lt;ffffffff8119c6f2&amp;gt;] ? iput+0x62/0x70
 [&amp;lt;ffffffffa0c695ca&amp;gt;] ? osd_remote_fid+0x9a/0x280 [osd_ldiskfs]
 [&amp;lt;ffffffffa0c75621&amp;gt;] ? osd_index_ea_lookup+0x521/0x850 [osd_ldiskfs]
 [&amp;lt;ffffffffa04d282f&amp;gt;] ? dt_lookup_dir+0x6f/0x130 [obdclass]
 [&amp;lt;ffffffffa04b0fb5&amp;gt;] ? llog_osd_open+0x475/0xbb0 [obdclass]
 [&amp;lt;ffffffffa047d31a&amp;gt;] ? llog_open+0xba/0x2c0 [obdclass]
 [&amp;lt;ffffffffa0480f71&amp;gt;] ? llog_backup+0x61/0x500 [obdclass]
 [&amp;lt;ffffffff81281860&amp;gt;] ? sprintf+0x40/0x50
 [&amp;lt;ffffffffa0cf9702&amp;gt;] ? mgc_process_log+0x1192/0x18e0 [mgc]
 [&amp;lt;ffffffffa0cf3370&amp;gt;] ? mgc_blocking_ast+0x0/0x800 [mgc]
 [&amp;lt;ffffffffa0633c40&amp;gt;] ? ldlm_completion_ast+0x0/0x960 [ptlrpc]
 [&amp;lt;ffffffffa0cfb2e4&amp;gt;] ? mgc_process_config+0x594/0xed0 [mgc]
 [&amp;lt;ffffffffa04c6776&amp;gt;] ? lustre_process_log+0x256/0xaa0 [obdclass]
 [&amp;lt;ffffffffa0495972&amp;gt;] ? class_name2dev+0x42/0xe0 [obdclass]
 [&amp;lt;ffffffff81167d83&amp;gt;] ? kmem_cache_alloc_trace+0x1a3/0x1b0
 [&amp;lt;ffffffffa0495a1e&amp;gt;] ? class_name2obd+0xe/0x30 [obdclass]
 [&amp;lt;ffffffffa04fa641&amp;gt;] ? server_start_targets+0x1821/0x1a40 [obdclass]
 [&amp;lt;ffffffffa04c9db3&amp;gt;] ? lustre_start_mgc+0x493/0x1e90 [obdclass]
 [&amp;lt;ffffffffa04c1ca0&amp;gt;] ? class_config_llog_handler+0x0/0x1880 [obdclass]
 [&amp;lt;ffffffffa04fe1fc&amp;gt;] ? server_fill_super+0xbbc/0x1a24 [obdclass]
 [&amp;lt;ffffffffa04cb988&amp;gt;] ? lustre_fill_super+0x1d8/0x530 [obdclass]
 [&amp;lt;ffffffffa04cb7b0&amp;gt;] ? lustre_fill_super+0x0/0x530 [obdclass]
 [&amp;lt;ffffffff8118431f&amp;gt;] ? get_sb_nodev+0x5f/0xa0
 [&amp;lt;ffffffffa04c3625&amp;gt;] ? lustre_get_sb+0x25/0x30 [obdclass]
 [&amp;lt;ffffffff8118395b&amp;gt;] ? vfs_kern_mount+0x7b/0x1b0
 [&amp;lt;ffffffff81183b02&amp;gt;] ? do_kern_mount+0x52/0x130
 [&amp;lt;ffffffff811a3d32&amp;gt;] ? do_mount+0x2d2/0x8d0
 [&amp;lt;ffffffff811a43c0&amp;gt;] ? sys_mount+0x90/0xe0
 [&amp;lt;ffffffff8100b072&amp;gt;] ? system_call_fastpath+0x16/0x1b
Initializing cgroup subsys cpuset
Initializing cgroup subsys cpu
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="63801" author="cliffw" created="Wed, 7 Aug 2013 19:39:39 +0000"  >&lt;p&gt;Hit this error when attempting to mount a new filesystem on OSS - Hyperion&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;2013-08-06 13:50:42 Lustre: Lustre: Build Version: jenkins-arch=x86_64,build_type=server,distro=el6,ib_stack=inkernel-1594-gbdf591f-PRISTINE-2.6.32-358.11.1.el6_lustre.gbdf591f.x86_64
2013-08-06 13:50:43 LDISKFS-fs warning (device sdc): ldiskfs_multi_mount_protect: MMP interval 42 higher than expected, please wait.
2013-08-06 13:50:43
2013-08-06 13:51:40 LDISKFS-fs (sdc): recovery complete
2013-08-06 13:51:40 LDISKFS-fs (sdc): mounted filesystem with ordered data mode. quota=on. Opts:
2013-08-06 13:51:41 LustreError: 4864:0:(fld_handler.c:147:fld_server_lookup()) ASSERTION( fld-&amp;gt;lsf_control_exp ) failed:
2013-08-06 13:51:41 LustreError: 4864:0:(fld_handler.c:147:fld_server_lookup()) LBUG
2013-08-06 13:51:41 Pid: 4864, comm: mount.lustre
2013-08-06 13:51:41
2013-08-06 13:51:41 Call Trace:
2013-08-06 13:51:41  [&amp;lt;ffffffffa04d0895&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
2013-08-06 13:51:41  [&amp;lt;ffffffffa04d0e97&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
2013-08-06 13:51:41  [&amp;lt;ffffffffa0b4ee0f&amp;gt;] fld_server_lookup+0x2ef/0x3d0 [fld]
2013-08-06 13:51:41  [&amp;lt;ffffffff8119c88e&amp;gt;] ? generic_detach_inode+0x18e/0x1f0
2013-08-06 13:51:41  [&amp;lt;ffffffffa0f7a3d1&amp;gt;] osd_fld_lookup+0x71/0x1d0 [osd_ldiskfs]
2013-08-06 13:51:41  [&amp;lt;ffffffff8119c6f2&amp;gt;] ? iput+0x62/0x70
2013-08-06 13:51:41  [&amp;lt;ffffffffa0f7a5ca&amp;gt;] osd_remote_fid+0x9a/0x280 [osd_ldiskfs]
2013-08-06 13:51:41  [&amp;lt;ffffffffa0f86621&amp;gt;] osd_index_ea_lookup+0x521/0x850 [osd_ldiskfs]
2013-08-06 13:51:41  [&amp;lt;ffffffffa081982f&amp;gt;] dt_lookup_dir+0x6f/0x130 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa07f7fb5&amp;gt;] llog_osd_open+0x475/0xbb0 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa07c431a&amp;gt;] llog_open+0xba/0x2c0 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa07c7f71&amp;gt;] llog_backup+0x61/0x500 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffff81281860&amp;gt;] ? sprintf+0x40/0x50
2013-08-06 13:51:41  [&amp;lt;ffffffffa1005702&amp;gt;] mgc_process_log+0x1192/0x18e0 [mgc]
2013-08-06 13:51:41  [&amp;lt;ffffffffa0fff370&amp;gt;] ? mgc_blocking_ast+0x0/0x800 [mgc]
2013-08-06 13:51:41  [&amp;lt;ffffffffa097ac40&amp;gt;] ? ldlm_completion_ast+0x0/0x960 [ptlrpc]
2013-08-06 13:51:41  [&amp;lt;ffffffffa10072e4&amp;gt;] mgc_process_config+0x594/0xed0 [mgc]
2013-08-06 13:51:41  [&amp;lt;ffffffffa080d776&amp;gt;] lustre_process_log+0x256/0xaa0 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa07dc972&amp;gt;] ? class_name2dev+0x42/0xe0 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffff81167d83&amp;gt;] ? kmem_cache_alloc_trace+0x1a3/0x1b0
2013-08-06 13:51:41  [&amp;lt;ffffffffa07dca1e&amp;gt;] ? class_name2obd+0xe/0x30 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa0841641&amp;gt;] server_start_targets+0x1821/0x1a40 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa0810db3&amp;gt;] ? lustre_start_mgc+0x493/0x1e90 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa0808ca0&amp;gt;] ? class_config_llog_handler+0x0/0x1880 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa08451fc&amp;gt;] server_fill_super+0xbbc/0x1a24 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa0812988&amp;gt;] lustre_fill_super+0x1d8/0x530 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffffa08127b0&amp;gt;] ? lustre_fill_super+0x0/0x530 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffff8118431f&amp;gt;] get_sb_nodev+0x5f/0xa0
2013-08-06 13:51:41  [&amp;lt;ffffffffa080a625&amp;gt;] lustre_get_sb+0x25/0x30 [obdclass]
2013-08-06 13:51:41  [&amp;lt;ffffffff8118395b&amp;gt;] vfs_kern_mount+0x7b/0x1b0
2013-08-06 13:51:41  [&amp;lt;ffffffff81183b02&amp;gt;] do_kern_mount+0x52/0x130
2013-08-06 13:51:41  [&amp;lt;ffffffff811a3d32&amp;gt;] do_mount+0x2d2/0x8d0
2013-08-06 13:51:41  [&amp;lt;ffffffff811a43c0&amp;gt;] sys_mount+0x90/0xe0
2013-08-06 13:51:41  [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
2013-08-06 13:51:41
2013-08-06 13:51:41 Aug  6 13:51:41 Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Version is 2.4.53&lt;/p&gt;</comment>
                            <comment id="63819" author="di.wang" created="Wed, 7 Aug 2013 21:08:51 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/7266&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/7266&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="64975" author="adilger" created="Fri, 23 Aug 2013 17:12:14 +0000"  >&lt;p&gt;We probably also need to land this patch for 2.4.1.&lt;/p&gt;</comment>
                            <comment id="65083" author="pjones" created="Mon, 26 Aug 2013 18:58:57 +0000"  >&lt;p&gt;Landed for 2.5. Will track landing on b2_4 separately&lt;/p&gt;</comment>
                            <comment id="66994" author="pichong" created="Thu, 19 Sep 2013 12:43:41 +0000"  >&lt;p&gt;Peter,&lt;br/&gt;
Has this been landed in 2.4 ? What is the gerrit patch ?&lt;/p&gt;

&lt;p&gt;thanks.&lt;/p&gt;</comment>
                            <comment id="67003" author="pjones" created="Thu, 19 Sep 2013 13:32:26 +0000"  >&lt;p&gt;Hi Gregoire&lt;/p&gt;

&lt;p&gt;There is no gerrit patch at present. Presently I would expect this to be taken care of when we start working on 2.4.2 in the coming weeks, but this could always be expedited if need be.&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="24149">LU-4878</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="19803">LU-3582</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvndb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>7596</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>