<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:05:55 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7092] Interop 2.7.0&lt;-&gt;master sanity test_133g 133f failed: mds1 find  /proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/ /sys/fs/lustre/ /sys/kernel/debug/lustre/ failed</title>
                <link>https://jira.whamcloud.com/browse/LU-7092</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;MDS console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == sanity test 133g: Check for Oopses on bad io area writes/reads in /proc == 12:10:42 (1441134642)
Lustre: 71929:0:(mdt_coordinator.c:1883:mdt_hsm_policy_seq_write()) lustre-MDT0000: &apos;&apos; is unknown, supported policies are:
LustreError: 71936:0:(mdt_coordinator.c:2035:mdt_hsm_cdt_control_seq_write()) lustre-MDT0000: Valid coordinator control commands are: enabled shutdown disabled purge help
Lustre: 71942:0:(lprocfs_status.c:1979:lprocfs_wr_nosquash_nids()) lustre-MDT0000: failed to set nosquash_nids to &quot;&quot;, bad address rc = -14
LNet: 71942:0:(nidstrings.c:271:parse_nidrange()) can&apos;t parse nidrange: &quot;&quot;
Lustre: 71942:0:(lprocfs_status.c:1983:lprocfs_wr_nosquash_nids()) lustre-MDT0000: failed to set nosquash_nids due to string too long rc = -22
Lustre: 71943:0:(lprocfs_status.c:1913:lprocfs_wr_root_squash()) lustre-MDT0000: failed to set root_squash due to bad address, rc = -14
Lustre: 71943:0:(lprocfs_status.c:1909:lprocfs_wr_root_squash()) lustre-MDT0000: failed to set root_squash to &quot;&quot;, needs uid:gid format, rc = -22
LustreError: 71947:0:(genops.c:1488:obd_export_evict_by_uuid()) lustre-MDT0000: can&apos;t disconnect : no exports found
LustreError: 71949:0:(mdt_lproc.c:357:lprocfs_identity_info_seq_write()) lustre-MDT0000: invalid data count = 5, size = 1048
LustreError: 71949:0:(mdt_lproc.c:374:lprocfs_identity_info_seq_write()) lustre-MDT0000: MDS identity downcall bad params
LustreError: 71951:0:(mdt_lproc.c:292:mdt_identity_upcall_seq_write()) lustre-MDT0000: identity upcall too long
Lustre: 72124:0:(mdt_coordinator.c:1883:mdt_hsm_policy_seq_write()) lustre-MDT0001: &apos;&apos; is unknown, supported policies are:
Lustre: 72124:0:(mdt_coordinator.c:1883:mdt_hsm_policy_seq_write()) Skipped 2 previous similar messages
LustreError: 72131:0:(mdt_coordinator.c:2035:mdt_hsm_cdt_control_seq_write()) lustre-MDT0001: Valid coordinator control commands are: enabled shutdown disabled purge help
LustreError: 72131:0:(mdt_coordinator.c:2035:mdt_hsm_cdt_control_seq_write()) Skipped 2 previous similar messages
Lustre: 72137:0:(lprocfs_status.c:1979:lprocfs_wr_nosquash_nids()) lustre-MDT0001: failed to set nosquash_nids to &quot;&quot;, bad address rc = -14
Lustre: 72137:0:(lprocfs_status.c:1979:lprocfs_wr_nosquash_nids()) Skipped 8 previous similar messages
LNet: 72137:0:(nidstrings.c:271:parse_nidrange()) can&apos;t parse nidrange: &quot;&quot;
LNet: 72137:0:(nidstrings.c:271:parse_nidrange()) Skipped 2 previous similar messages
Lustre: 72137:0:(lprocfs_status.c:1983:lprocfs_wr_nosquash_nids()) lustre-MDT0001: failed to set nosquash_nids due to string too long rc = -22
Lustre: 72137:0:(lprocfs_status.c:1983:lprocfs_wr_nosquash_nids()) Skipped 5 previous similar messages
Lustre: 72138:0:(lprocfs_status.c:1913:lprocfs_wr_root_squash()) lustre-MDT0001: failed to set root_squash due to bad address, rc = -14
Lustre: 72138:0:(lprocfs_status.c:1913:lprocfs_wr_root_squash()) Skipped 8 previous similar messages
Lustre: 72138:0:(lprocfs_status.c:1909:lprocfs_wr_root_squash()) lustre-MDT0001: failed to set root_squash to &quot;&quot;, needs uid:gid format, rc = -22
Lustre: 72138:0:(lprocfs_status.c:1909:lprocfs_wr_root_squash()) Skipped 5 previous similar messages
LustreError: 72142:0:(genops.c:1488:obd_export_evict_by_uuid()) lustre-MDT0001: can&apos;t disconnect : no exports found
LustreError: 72142:0:(genops.c:1488:obd_export_evict_by_uuid()) Skipped 11 previous similar messages
LustreError: 72144:0:(mdt_lproc.c:357:lprocfs_identity_info_seq_write()) lustre-MDT0001: invalid data count = 5, size = 1048
LustreError: 72144:0:(mdt_lproc.c:357:lprocfs_identity_info_seq_write()) Skipped 8 previous similar messages
LustreError: 72144:0:(mdt_lproc.c:374:lprocfs_identity_info_seq_write()) lustre-MDT0001: MDS identity downcall bad params
LustreError: 72144:0:(mdt_lproc.c:374:lprocfs_identity_info_seq_write()) Skipped 4 previous similar messages
LustreError: 72146:0:(mdt_lproc.c:292:mdt_identity_upcall_seq_write()) lustre-MDT0001: identity upcall too long
LustreError: 72146:0:(mdt_lproc.c:292:mdt_identity_upcall_seq_write()) Skipped 5 previous similar messages
LustreError: 72237:0:(ldlm_resource.c:106:seq_watermark_write()) Failed to set LDLM watermark, rc = -14.
LustreError: 72269:0:(ldlm_pool.c:723:lprocfs_wr_recalc_period()) Can&apos;t parse user input, rc = -14
LustreError: 72279:0:(ldlm_resource.c:293:lprocfs_lru_size_seq_write()) invalid value written
LustreError: 72753:0:(ldlm_pool.c:723:lprocfs_wr_recalc_period()) Can&apos;t parse user input, rc = -14
LustreError: 72753:0:(ldlm_pool.c:723:lprocfs_wr_recalc_period()) Skipped 137 previous similar messages
LustreError: 72763:0:(ldlm_resource.c:293:lprocfs_lru_size_seq_write()) invalid value written
LustreError: 72763:0:(ldlm_resource.c:293:lprocfs_lru_size_seq_write()) Skipped 93 previous similar messages
LustreError: 72958:0:(mgs_nids.c:793:lprocfs_wr_ir_state()) Unable to process command: (-22)
LustreError: 72963:0:(genops.c:1488:obd_export_evict_by_uuid()) MGS: can&apos;t disconnect : no exports found
LustreError: 72963:0:(genops.c:1488:obd_export_evict_by_uuid()) Skipped 3 previous similar messages
LustreError: 73141:0:(lproc_fid.c:182:lprocfs_server_fid_width_seq_write()) srv-lustre-MDT0000: invalid width.
LustreError: 4108:0:(qsd_reint.c:618:qsd_pending_updates()) lustre-MDT0002: Delaying reintegration for qtype:1 until pending updates are flushed.
LustreError: 4108:0:(qsd_reint.c:618:qsd_pending_updates()) Skipped 2 previous similar messages
LustreError: 73306:0:(nodemap_handler.c:833:nodemap_create()) cannot add nodemap: &apos;&apos;: rc = -22
Lustre: 73314:0:(libcfs_string.c:131:cfs_str2mask()) unknown mask &apos;&apos;.
mask usage: [+|-]&amp;lt;all|type&amp;gt; ...
Lustre: 0 MB is too small for debug buffer size, setting it to 128 MB.
LustreError: 73349:0:(linux-sysctl.c:144:proc_max_dirty_pages_in_mb()) Refusing to set max dirty pages to 18446612167650554432, which is more than 90% of available RAM; setting to 7377381
LustreError: 73412:0:(client.c:1138:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff880814e446c0 x1511132758974664/t0(0) o1000-&amp;gt;lustre-MDT0001-osp-MDT0000@0@lo:24/4 lens 248/16608 e 0 to 0 dl 0 ref 2 fl Rpc:/0/ffffffff rc 0/-1
LustreError: 73412:0:(client.c:1138:ptlrpc_import_delay_req()) Skipped 2 previous similar messages
LustreError: 73412:0:(osp_object.c:586:osp_attr_get()) lustre-MDT0001-osp-MDT0000:osp_attr_get update error [0x240000402:0x1:0x0]: rc = -5
LustreError: 73412:0:(osp_object.c:586:osp_attr_get()) Skipped 2 previous similar messages
LustreError: 73412:0:(llog.c:180:llog_cancel_rec()) lustre-MDT0001-osp-MDT0000: fail to write header for llog #0x1:1073742850#00000000: rc = -5
LustreError: 73412:0:(llog.c:180:llog_cancel_rec()) Skipped 2 previous similar messages
LustreError: 4053:0:(client.c:1138:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff8803f5662680 x1511132758974684/t0(0) o13-&amp;gt;lustre-OST0002-osc-MDT0000@10.2.4.56@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
LustreError: 4053:0:(client.c:1138:ptlrpc_import_delay_req()) Skipped 2 previous similar messages
LustreError: 4046:0:(client.c:1138:ptlrpc_import_delay_req()) @@@ IMP_CLOSED   req@ffff8803f1052980 x1511132758974700/t0(0) o13-&amp;gt;lustre-OST0001-osc-MDT0000@10.2.4.56@tcp:7/4 lens 224/368 e 0 to 0 dl 0 ref 1 fl Rpc:/0/ffffffff rc 0/-1
LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff8804347b0c80[0x0, 1, [0x1:0x0:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....local_storage@ffff8804347b0cd0

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osd-ldiskfs@ffff880435f7be00osd-ldiskfs-object@ffff880435f7be00(i:ffff8804342196a0:81/3187989141)[plain]

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff8804347b0c80

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff880412e4ae40[0x0, 1, [0x200000003:0x0:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....local_storage@ffff880412e4ae90

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osd-ldiskfs@ffff880415458900osd-ldiskfs-object@ffff880415458900(i:ffff8803f8298a70:79/3187989106)[plain]

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff880412e4ae40

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff880421f5ce40[0x0, 1, [0x200000003:0x2:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....local_storage@ffff880421f5ce90

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osd-ldiskfs@ffff880421f5cd80osd-ldiskfs-object@ffff880421f5cd80(i:ffff8804343c9bf0:80/3187989107)[plain]

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff880421f5ce40

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff880435f7bd40[0x0, 1, [0xa:0x0:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....local_storage@ffff880435f7bd90

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osd-ldiskfs@ffff8804284368c0osd-ldiskfs-object@ffff8804284368c0(i:ffff8804342b52d0:82/3187989175)[plain]

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff880435f7bd40

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff8803fa022380[0x1, 1, [0x200000001:0x1017:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....local_storage@ffff8803fa0223d0

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osd-ldiskfs@ffff8803f9b51180osd-ldiskfs-object@ffff8803f9b51180(i:ffff88043434b7a0:12/1262305354)[plain]

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff8803fa022380

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff8803f97a4a40[0x0, 1, [0x2c00032e1:0x353:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdt@ffff8803f97a4a90mdt-object@ffff8803f97a4a40(flags=0, writecount=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdd@ffff8803fa481a00mdd-object@ffff8803fa481a00(open_count=0, valid=0, cltime=0, flags=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....lod@ffff880437d4c128lod-object@ffff880437d4c128

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osp@ffff8803f561b8f0osp-object@ffff8803f561b8a0

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff8803f97a4a40

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff8803f9699cb0[0x0, 1, [0x2400032e0:0x60:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdt@ffff8803f9699d00mdt-object@ffff8803f9699cb0(flags=0, writecount=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdd@ffff8803f972ab40mdd-object@ffff8803f972ab40(open_count=0, valid=0, cltime=0, flags=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....lod@ffff88042af15ec0lod-object@ffff88042af15ec0

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osp@ffff88042c5664e0osp-object@ffff88042c566490

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff8803f9699cb0

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff8803e8c7d970[0x0, 1, [0x2400032e0:0x67:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdt@ffff8803e8c7d9c0mdt-object@ffff8803e8c7d970(flags=0, writecount=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdd@ffff8803e8d65320mdd-object@ffff8803e8d65320(open_count=0, valid=0, cltime=0, flags=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....lod@ffff8803e8d54380lod-object@ffff8803e8d54380

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osp@ffff8803e995f5b0osp-object@ffff8803e995f560

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff8803e8c7d970

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff8803e8c7da40[0x0, 1, [0x2400032e0:0x68:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdt@ffff8803e8c7da90mdt-object@ffff8803e8c7da40(flags=0, writecount=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdd@ffff8803e8d65370mdd-object@ffff8803e8d65370(open_count=0, valid=0, cltime=0, flags=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....lod@ffff8803e8d543f8lod-object@ffff8803e8d543f8

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osp@ffff8803e995f4e0osp-object@ffff8803e995f490

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff8803e8c7da40

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) header@ffff8803e8fa1970[0x0, 1, [0x2400032e0:0x6f:0x0] hash exist]{

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdt@ffff8803e8fa19c0mdt-object@ffff8803e8fa1970(flags=0, writecount=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....mdd@ffff8803e99a2b40mdd-object@ffff8803e99a2b40(open_count=0, valid=0, cltime=0, flags=0)

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....lod@ffff8803e8cdcf38lod-object@ffff8803e8cdcf38

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) ....osp@ffff880426e6a5b0osp-object@ffff880426e6a560

LustreError: 4003:0:(osp_dev.c:1255:osp_device_free()) } header@ffff8803e8fa1970

LustreError:Initializing cgroup subsys cpuset
Initializing cgroup subsys cpu
Linux version 2.6.32-504.30.3.el6_lustre.x86_64 (jenkins@onyx-6-sde1-el6-x8664.onyx.hpdd.intel.com) (gcc version 4.4.7 20120313 (Red Hat 4.4.7-11) (GCC) ) #1 SMP Mon Aug 17 16:53:02 PDT 2015
Command line: ro root=UUID=745144ea-2119-43ad-82d9-dd19182b6c8a rd_NO_LUKS rd_NO_LVM LANG=en_US.UTF-8 rd_NO_MD console=tty0 SYSFONT=latarcyrheb-sun16 KEYBOARDTYPE=pc KEYTABLE=us rd_NO_DM console=ttyS0,115200 irqpoll nr_cpus=1 reset_devices cgroup_disable=memory mce=off disable_cpu_apicid=0 memmap=exactmap memmap=574K@4K memmap=133550K@49726K elfcorehdr=183276K memmap=4K$0K memmap=62K$578K memmap=128K$896K memmap=42200K$3067876K memmap=992K#3110076K memmap=488K#3111068K memmap=568K#3111556K memmap=516K#3112124K memmap=294912K$3112960K memmap=4K$4173824K memmap=4K$4174948K memmap=16K$4174960K memmap=4K$4175872K memmap=6016K$4188288K
KERNEL supported cpus:
  Intel GenuineIntel
  AMD AuthenticAMD
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;client console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == sanity test 133g: Check for Oopses on bad io area writes/reads in /proc == 12:10:42 (1441134642)
LustreError: 3016:0:(ldlm_pool.c:724:lprocfs_wr_recalc_period()) Can&apos;t parse user input, rc = -14
LustreError: 3026:0:(ldlm_resource.c:237:lprocfs_lru_size_seq_write()) invalid value written
Lustre: 3170:0:(lprocfs_status.c:1972:lprocfs_wr_nosquash_nids()) lustre: failed to set nosquash_nids to &quot;&quot;, bad address rc = -14
LNet: 3170:0:(nidstrings.c:834:parse_nidrange()) can&apos;t parse nidrange: &quot;&quot;
Lustre: 3170:0:(lprocfs_status.c:1976:lprocfs_wr_nosquash_nids()) lustre: failed to set nosquash_nids due to string too long rc = -22
Lustre: 3171:0:(lprocfs_status.c:1906:lprocfs_wr_root_squash()) lustre: failed to set root_squash due to bad address, rc = -14
Lustre: 3171:0:(lprocfs_status.c:1902:lprocfs_wr_root_squash()) lustre: failed to set root_squash to &quot;&quot;, needs uid:gid format, rc = -22
LustreError: 3467:0:(lproc_lmv.c:126:lmv_placement_seq_write()) Invalid placement policy &quot;&quot;!
LustreError: 3489:0:(gss_cli_upcall.c:245:gss_do_ctx_init_rpc()) ioctl size 5, expect 80, please check lgss_keyring version
Lustre: 3497:0:(libcfs_string.c:131:cfs_str2mask()) unknown mask &apos;&apos;.
mask usage: [+|-]&amp;lt;all|type&amp;gt; ...
Lustre: 0 MB is too small for debug buffer size, setting it to 128 MB.
LustreError: 3535:0:(linux-sysctl.c:194:proc_max_dirty_pages_in_mb()) Refusing to set max dirty pages to 18446612150466593728, which is more than 90% of available RAM; setting to 7377381
Lustre: Unmounted lustre-client
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Info required for matching: sanity 133f&lt;br/&gt;
Info required for matching: sanity 133g&lt;/p&gt;</description>
                <environment>server: lustre-master build #3142 RHEL6.6 DNE mode&lt;br/&gt;
client: 2.7.0</environment>
        <key id="31876">LU-7092</key>
            <summary>Interop 2.7.0&lt;-&gt;master sanity test_133g 133f failed: mds1 find  /proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/ /sys/fs/lustre/ /sys/kernel/debug/lustre/ failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="adilger">Andreas Dilger</assignee>
                                    <reporter username="sarah">Sarah Liu</reporter>
                        <labels>
                            <label>interop</label>
                    </labels>
                <created>Wed, 2 Sep 2015 20:13:26 +0000</created>
                <updated>Thu, 23 Mar 2017 05:05:12 +0000</updated>
                            <resolved>Thu, 23 Mar 2017 03:38:12 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.10.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="126212" author="adilger" created="Thu, 3 Sep 2015 17:39:05 +0000"  >&lt;p&gt;Sarah, are the test logs available in Maloo for this failure?  We need to see more of the MDS console logs with the actual stack trace.  There may also be a crashdump for the MDS.&lt;/p&gt;</comment>
                            <comment id="126267" author="sarah" created="Thu, 3 Sep 2015 18:54:19 +0000"  >&lt;p&gt;Unfortunately I don&apos;t have other logs, will try to reproduce it and get more information &lt;/p&gt;</comment>
                            <comment id="184265" author="gerrit" created="Fri, 10 Feb 2017 01:14:43 +0000"  >&lt;p&gt;Andreas Dilger (andreas.dilger@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/25363&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/25363&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7092&quot; title=&quot;Interop 2.7.0&amp;lt;-&amp;gt;master sanity test_133g 133f failed: mds1 find  /proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/ /sys/fs/lustre/ /sys/kernel/debug/lustre/ failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7092&quot;&gt;&lt;del&gt;LU-7092&lt;/del&gt;&lt;/a&gt; tests: generate sanity proc_dirs on facet&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: e4ba3b06c96127ccf93ffb021ba206dabffa25f9&lt;/p&gt;</comment>
                            <comment id="189347" author="gerrit" created="Thu, 23 Mar 2017 01:41:34 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/25363/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/25363/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7092&quot; title=&quot;Interop 2.7.0&amp;lt;-&amp;gt;master sanity test_133g 133f failed: mds1 find  /proc/fs/lustre/ /proc/sys/lnet/ /proc/sys/lustre/ /sys/fs/lustre/ /sys/kernel/debug/lustre/ failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7092&quot;&gt;&lt;del&gt;LU-7092&lt;/del&gt;&lt;/a&gt; tests: generate sanity proc_dirs on facet&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 4b9de88f99066adf4365c6b747aad4f6d52d0ae2&lt;/p&gt;</comment>
                            <comment id="189366" author="pjones" created="Thu, 23 Mar 2017 03:38:12 +0000"  >&lt;p&gt;Landed for 2.10&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="34515">LU-7746</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="44175">LU-9166</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxm7r:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>