<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:24:14 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2323] mds crash</title>
                <link>https://jira.whamcloud.com/browse/LU-2323</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;We recently experienced two MDS crashes on our Lustre installation.&lt;/p&gt;

&lt;p&gt;I&apos;ve attached the netconsole output of both crashes (that&apos;s all i got: there is nothing in the syslog and i wasn&apos;t able to create a screenshot of the console output as the crashed mds was already powercycled by its failover partner).&lt;/p&gt;</description>
                <environment>[&lt;a href=&apos;mailto:root@n-mds1&apos;&gt;root@n-mds1&lt;/a&gt; ~]# cat /proc/fs/lustre/version &lt;br/&gt;
lustre: 2.2.0&lt;br/&gt;
kernel: patchless_client&lt;br/&gt;
build:  2.2.0-RC2--PRISTINE-2.6.32-220.4.2.el6_lustre.x86_64&lt;br/&gt;
&lt;br/&gt;
[&lt;a href=&apos;mailto:root@n-mds1&apos;&gt;root@n-mds1&lt;/a&gt; ~]# uname -r&lt;br/&gt;
2.6.32-220.4.2.el6_lustre.x86_64&lt;br/&gt;
&lt;br/&gt;
[&lt;a href=&apos;mailto:root@n-mds1&apos;&gt;root@n-mds1&lt;/a&gt; ~]# rpm -qa|grep lustre&lt;br/&gt;
lustre-ldiskfs-3.3.0-2.6.32_220.4.2.el6_lustre.x86_64.x86_64&lt;br/&gt;
lustre-2.2.0-2.6.32_220.4.2.el6_lustre.x86_64.x86_64&lt;br/&gt;
kernel-firmware-2.6.32-220.4.2.el6_lustre.x86_64&lt;br/&gt;
lustre-modules-2.2.0-2.6.32_220.4.2.el6_lustre.x86_64.x86_64&lt;br/&gt;
kernel-headers-2.6.32-220.4.2.el6_lustre.x86_64&lt;br/&gt;
kernel-2.6.32-220.4.2.el6_lustre.x86_64&lt;br/&gt;
kernel-devel-2.6.32-220.4.2.el6_lustre.x86_64</environment>
        <key id="16673">LU-2323</key>
            <summary>mds crash</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="ethz.support">ETHz Support</reporter>
                        <labels>
                            <label>server</label>
                    </labels>
                <created>Wed, 14 Nov 2012 04:46:04 +0000</created>
                <updated>Thu, 6 Dec 2012 09:24:09 +0000</updated>
                            <resolved>Thu, 6 Dec 2012 09:24:09 +0000</resolved>
                                    <version>Lustre 2.2.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="47778" author="ethz.support" created="Wed, 14 Nov 2012 04:47:24 +0000"  >&lt;p&gt;netconsole output&lt;/p&gt;</comment>
                            <comment id="47779" author="ethz.support" created="Wed, 14 Nov 2012 04:52:17 +0000"  >&lt;p&gt;In terms of activity, during the crashes, the mds server has memory available and the IB network have low traffic.&lt;/p&gt;

&lt;p&gt;In both cases there was free memory available: The crash yesterday didn&apos;t even have a full read-cache and the MDS was never swapping (it has a 50GB Swap partition).&lt;/p&gt;

&lt;p&gt;I also checked the system load + infiniband traffic: The MDS was doing almost nothing during the crash (load less than 0.5 / IB traffic ~100KB/s).&lt;/p&gt;



</comment>
                            <comment id="47780" author="niu" created="Wed, 14 Nov 2012 05:20:00 +0000"  >&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;2012-11-08T20:14:29+01:00 n-mds1 [&amp;lt;ffffffffa0dd4dbb&amp;gt;] osd_trans_stop+0xeb/0x390 [osd_ldiskfs]
2012-11-08T20:14:29+01:00 n-mds1 RSP &amp;lt;ffff880bd116da70&amp;gt;
2012-11-08T20:14:29+01:00 n-mds1 ---[ end trace 69a06040c21c938c ]---
2012-11-08T20:14:29+01:00 n-mds1 Kernel panic - not syncing: Fatal exception
2012-11-08T20:14:29+01:00 n-mds1 Pid: 3845, comm: mdt_52 Tainted: G      D    ----------------   2.6.32-220.4.2.el6_lustre.x86_64 #1
2012-11-08T20:14:29+01:00 n-mds1 Call Trace:
2012-11-08T20:14:29+01:00 n-mds1 [&amp;lt;ffffffff814ec61a&amp;gt;] ? panic+0x78/0x143
2012-11-08T20:14:29+01:00 n-mds1 [&amp;lt;ffffffff814f07a4&amp;gt;] ? oops_end+0xe4/0x100
2012-11-08T20:14:29+01:00 n-mds1 [&amp;lt;ffffffff8100f26b&amp;gt;] ? die+0x5b/0x90
2012-11-08T20:14:29+01:00 n-mds1 [&amp;lt;ffffffff814f0312&amp;gt;] ? do_general_protection+0x152/0x160
2012-11-08T20:14:29+01:00 n-mds1 [&amp;lt;ffffffffa0ddaada&amp;gt;] ? osd_xattr_set+0x14a/0x1d0 [osd_ldiskfs]
2012-11-08T20:14:29+01:00 n-mds1 [&amp;lt;ffffffff814efae5&amp;gt;] ? general_protection+0x25/0x30
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0dd4dbb&amp;gt;] ? osd_trans_stop+0xeb/0x390 [osd_ldiskfs]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0cde12a&amp;gt;] ? mdd_trans_stop+0x1a/0x20 [mdd]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0cc1036&amp;gt;] ? mdd_attr_set+0xbf6/0x2030 [mdd]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0677820&amp;gt;] ? ldlm_completion_ast+0x0/0x6d0 [ptlrpc]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa069ae3c&amp;gt;] ? lustre_msg_get_versions+0x6c/0xb0 [ptlrpc]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0e12a1c&amp;gt;] ? cml_attr_set+0x6c/0x160 [cmm]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0d4e578&amp;gt;] ? mdt_attr_set+0x268/0x4b0 [mdt]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0d4eb0d&amp;gt;] ? mdt_reint_setattr+0x34d/0x1060 [mdt]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0d48e7b&amp;gt;] ? mdt_reint_rec+0x4b/0xa0 [mdt]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0d41069&amp;gt;] ? mdt_reint_internal+0x479/0x7b0 [mdt]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0d413ee&amp;gt;] ? mdt_reint+0x4e/0xb0 [mdt]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0d37b9d&amp;gt;] ? mdt_handle_common+0x74d/0x1400 [mdt]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0d38925&amp;gt;] ? mdt_regular_handle+0x15/0x20 [mdt]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa06a6011&amp;gt;] ? ptlrpc_server_handle_request+0x3c1/0xcb0 [ptlrpc]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa04373ee&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
2012-11-08T20:14:30+01:00 n-mds1 [&amp;lt;ffffffffa0441e19&amp;gt;] ? lc_watchdog_touch+0x79/0x110 [libcfs]
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffffa06a00e2&amp;gt;] ? ptlrpc_wait_event+0xb2/0x2c0 [ptlrpc]
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffff810519c3&amp;gt;] ? __wake_up+0x53/0x70
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffffa06a701f&amp;gt;] ? ptlrpc_main+0x71f/0x1210 [ptlrpc]
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffffa06a6900&amp;gt;] ? ptlrpc_main+0x0/0x1210 [ptlrpc]
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffff8100c14a&amp;gt;] ? child_rip+0xa/0x20
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffffa06a6900&amp;gt;] ? ptlrpc_main+0x0/0x1210 [ptlrpc]
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffffa06a6900&amp;gt;] ? ptlrpc_main+0x0/0x1210 [ptlrpc]
2012-11-08T20:14:31+01:00 n-mds1 [&amp;lt;ffffffff8100c140&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In the 2.2 code, we added following code in osd_xattr_set():&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;        &lt;span class=&quot;code-comment&quot;&gt;/* version set is not real XATTR */&lt;/span&gt;
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (strcmp(name, XATTR_NAME_VERSION) == 0) {
                /* &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; version we are just using xattr API but change inode
                 * field instead */
                LASSERT(buf-&amp;gt;lb_len == sizeof(dt_obj_version_t));
                osd_object_version_set(env, dt, buf-&amp;gt;lb_buf);
                &lt;span class=&quot;code-keyword&quot;&gt;return&lt;/span&gt; sizeof(dt_obj_version_t);
        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;we should probably check if the &quot;name&quot; is NULL first. Alex, any thought?&lt;/p&gt;</comment>
                            <comment id="47784" author="bzzz" created="Wed, 14 Nov 2012 09:02:13 +0000"  >&lt;p&gt;hmm, I&apos;d say we should assert on name=NULL.. and the caller must be fixed.&lt;/p&gt;</comment>
                            <comment id="47786" author="adrian" created="Wed, 14 Nov 2012 09:46:58 +0000"  >&lt;p&gt;This has become quite serious for us right now: Our filesystem is currently down: We entered a reboot -&amp;gt; panic -&amp;gt; reboot -&amp;gt; panic cycle:&lt;/p&gt;

&lt;p&gt;Our MDS crashes with the same message as soon as we reboot it:&lt;br/&gt;
2012-11-14T15:45:07+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0cde12a&amp;gt;&amp;#93;&lt;/span&gt; mdd_trans_stop+0x1a/0x20 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:07+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0cc1036&amp;gt;&amp;#93;&lt;/span&gt; mdd_attr_set+0xbf6/0x2030 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdd&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:07+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0677820&amp;gt;&amp;#93;&lt;/span&gt; ? ldlm_completion_ast+0x0/0x6d0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa069ae3c&amp;gt;&amp;#93;&lt;/span&gt; ? lustre_msg_get_versions+0x6c/0xb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0e12a1c&amp;gt;&amp;#93;&lt;/span&gt; cml_attr_set+0x6c/0x160 &lt;span class=&quot;error&quot;&gt;&amp;#91;cmm&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d4e578&amp;gt;&amp;#93;&lt;/span&gt; mdt_attr_set+0x268/0x4b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d4eb0d&amp;gt;&amp;#93;&lt;/span&gt; mdt_reint_setattr+0x34d/0x1060 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d48e7b&amp;gt;&amp;#93;&lt;/span&gt; mdt_reint_rec+0x4b/0xa0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d41069&amp;gt;&amp;#93;&lt;/span&gt; mdt_reint_internal+0x479/0x7b0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d413ee&amp;gt;&amp;#93;&lt;/span&gt; mdt_reint+0x4e/0xb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d37b9d&amp;gt;&amp;#93;&lt;/span&gt; mdt_handle_common+0x74d/0x1400 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0d38925&amp;gt;&amp;#93;&lt;/span&gt; mdt_regular_handle+0x15/0x20 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdt&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06a6011&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_server_handle_request+0x3c1/0xcb0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa04373ee&amp;gt;&amp;#93;&lt;/span&gt; ? cfs_timer_arm+0xe/0x10 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0441e19&amp;gt;&amp;#93;&lt;/span&gt; ? lc_watchdog_touch+0x79/0x110 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06a00e2&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_wait_event+0xb2/0x2c0 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff810519c3&amp;gt;&amp;#93;&lt;/span&gt; ? __wake_up+0x53/0x70&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06a701f&amp;gt;&amp;#93;&lt;/span&gt; ptlrpc_main+0x71f/0x1210 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:08+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06a6900&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_main+0x0/0x1210 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:09+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c14a&amp;gt;&amp;#93;&lt;/span&gt; child_rip+0xa/0x20&lt;br/&gt;
2012-11-14T15:45:09+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06a6900&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_main+0x0/0x1210 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:09+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06a6900&amp;gt;&amp;#93;&lt;/span&gt; ? ptlrpc_main+0x0/0x1210 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
2012-11-14T15:45:09+01:00 n-mds2 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c140&amp;gt;&amp;#93;&lt;/span&gt; ? child_rip+0x0/0x20&lt;br/&gt;
2012-11-14T15:45:09+01:00 n-mds2 Code: &lt;/p&gt;</comment>
                            <comment id="47788" author="johann" created="Wed, 14 Nov 2012 10:27:41 +0000"  >&lt;p&gt;Adrian, have you tried to remount the MDS with &quot;-o abort_recov&quot;?&lt;/p&gt;</comment>
                            <comment id="47789" author="adrian" created="Wed, 14 Nov 2012 10:32:35 +0000"  >&lt;p&gt;No, but it seems to be &apos;stable&apos; again.&lt;/p&gt;

&lt;p&gt;After the 4th crash i started a &apos;fsck -n /dev/mapper/...&apos;, got impatient and aborted it after ~10 minutes.&lt;/p&gt;

&lt;p&gt;After this i was able to start the MDS without any new crash: Looks like the &apos;10 minute downtime&apos; was enough to time out the &apos;evil&apos; client/operation !?&lt;/p&gt;


&lt;p&gt;Is there any way to see which client causes the crash?&lt;/p&gt;</comment>
                            <comment id="47791" author="ethz.support" created="Wed, 14 Nov 2012 10:34:47 +0000"  >&lt;p&gt;Adrian,&lt;br/&gt;
I&apos;m suggest to disable the cluster (to avoid pingpong) and mount manually the mds : mount -t lustre -L &amp;lt;MDT name&amp;gt; -o abort_recov &amp;lt;mount point&amp;gt;&lt;/p&gt;
</comment>
                            <comment id="47792" author="adrian" created="Wed, 14 Nov 2012 10:41:57 +0000"  >&lt;p&gt;Johann:&lt;/p&gt;

&lt;p&gt;Does the crash actually get triggered due to a client calling setaddr?&lt;/p&gt;

&lt;p&gt;One of our most frequent setattr callers is an 1.8.4 client (10.201.32.32) - could this client be the cause of the crash?&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@n-mds1 exports&amp;#93;&lt;/span&gt;# grep setattr */stats | awk &apos;&lt;/p&gt;
{print $2 &quot; &quot; $1}
&lt;p&gt;&apos;|sort -n | tail -5&lt;br/&gt;
1694 10.201.38.39@o2ib/stats:setattr&lt;br/&gt;
1841 10.201.38.21@o2ib/stats:setattr&lt;br/&gt;
2062 10.201.38.23@o2ib/stats:setattr   &amp;lt;-- 2.2.93 client&lt;br/&gt;
8931 10.201.32.32@o2ib/stats:setattr   &amp;lt;-- 1.8.4 client&lt;br/&gt;
17278 10.201.32.31@o2ib/stats:setattr  &amp;lt;-- 2.3.0 client&lt;/p&gt;

</comment>
                            <comment id="47857" author="yong.fan" created="Thu, 15 Nov 2012 11:52:41 +0000"  >&lt;p&gt;I do not think it is NULL &quot;name&quot; for osd_xattr_set() caused the failure. In this case, the &quot;name&quot; is from MDS internal, not from client. I do not find any internal callers passing NULL &quot;name&quot;. On the other hand, the other failure instances have different call traces.&lt;/p&gt;

&lt;p&gt;Adrian, have you made any system upgrading recently when you hit the failure? There is no evidence to indicate that it is interoperability issue caused the failure, but we can try to locate the issue step by step. The first step, please migrate the system load from above non-2.2 clients to other standard-2.2 clients. Especially the 1.8.4 client, because it is too old. I do not think we have tested the interoperation between Lustre-2.2 server and Lustre-1.8.4 client when we released Lustre-2.2. So it is the most suspicious.&lt;/p&gt;</comment>
                            <comment id="47861" author="bzzz" created="Thu, 15 Nov 2012 12:04:29 +0000"  >&lt;p&gt;in the both cases the very first messages were about inability to add llog record:&lt;/p&gt;

&lt;p&gt;LustreError: 3809:0:(llog_cat.c:298:llog_cat_add_rec()) llog_write_rec -28: lh=ffff880482b99800&lt;/p&gt;

&lt;p&gt;probably it&apos;s a problem in the code handling this error. I think we should reproduce this locally.&lt;/p&gt;

&lt;p&gt;it makes sense to ls CONFIGS/ directory using ldiskfs or debugfs to see how much space can be free after orphan cleanup.&lt;/p&gt;</comment>
                            <comment id="47862" author="yong.fan" created="Thu, 15 Nov 2012 12:06:16 +0000"  >&lt;p&gt;Adrian, if you cannot abandon 1.8.4 clients, then please umount them temporarily, and try to reproduce the failure with other clients. If cannot reproduce, then it is quite possible related with the interoperability issues.&lt;/p&gt;</comment>
                            <comment id="47863" author="adrian" created="Thu, 15 Nov 2012 12:13:40 +0000"  >&lt;p&gt;Yong Fan:&lt;/p&gt;

&lt;ul&gt;
	&lt;li&gt;We need the 1.8.x client to copy data from our old Lustre 1.8 installation to the 2.2 installation.&lt;br/&gt;
I will temporarily disable it on the next crash (there are still users moving data around).&lt;/li&gt;
&lt;/ul&gt;


&lt;ul&gt;
	&lt;li&gt;We didn&apos;t do any upgrades recently: We didn&apos;t touch the servers since months.&lt;/li&gt;
&lt;/ul&gt;


&lt;ul&gt;
	&lt;li&gt;We have 2.3 clients because the 2.2-client is too unstable for us and i don&apos;t have enough time to backport all crash fixes from 2.3 to our 2.2-client&lt;/li&gt;
&lt;/ul&gt;



&lt;p&gt;Alex: We get this llog_write_rec errors since months (Jun 2012) while our MDS only startet to crash recently (also note that there are ~5 hours between the error and the crash).&lt;/p&gt;

&lt;p&gt;What does the llog_write_rec error mean anyway?&lt;/p&gt;

&lt;p&gt;I&apos;ll post the output of &apos;ls CONFIGS/&apos; as soon as i have a chance to remount the volume via ldiskfs (= on next crash or on monday evening)&lt;/p&gt;</comment>
                            <comment id="47864" author="bzzz" created="Thu, 15 Nov 2012 12:22:55 +0000"  >&lt;p&gt;ok, good to know.. llog_write_rec() getting -28 mean MDS was not able to write (transactionally) a record to make some updates to OST (remove OST object, update OST object attributes).&lt;/p&gt;

&lt;p&gt;then could you attach osd_ldiskfs.ko please ?&lt;/p&gt;</comment>
                            <comment id="47865" author="adrian" created="Thu, 15 Nov 2012 12:30:48 +0000"  >&lt;p&gt;md5sum = 6a07cbbb49f63ea0f6a5e6bc067bc7c9&lt;/p&gt;

&lt;p&gt;requested kernel module&lt;/p&gt;</comment>
                            <comment id="47866" author="adrian" created="Thu, 15 Nov 2012 12:32:09 +0000"  >&lt;p&gt;Alex: I attached the requested kernel module (or did you mean with &apos;attach&apos; that i should insmod it? &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/wink.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt; )&lt;/p&gt;
</comment>
                            <comment id="47870" author="bzzz" created="Thu, 15 Nov 2012 13:58:27 +0000"  >&lt;p&gt;no, i&apos;ve got what I need, thanks &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;

&lt;p&gt;a bit of analysis here:&lt;/p&gt;

&lt;p&gt;0000000000004d26 &amp;lt;osd_trans_stop+0x56&amp;gt; mov    0x50(%rbx),%r12&lt;br/&gt;
0000000000004d2a &amp;lt;osd_trans_stop+0x5a&amp;gt; test   %r12,%r12&lt;br/&gt;
0000000000004d2d &amp;lt;osd_trans_stop+0x5d&amp;gt; je     0000000000004e82 &amp;lt;osd_trans_stop+0x1b2&amp;gt;&lt;br/&gt;
0000000000004d33 &amp;lt;osd_trans_stop+0x63&amp;gt; movzbl 0x28(%r12),%eax&lt;br/&gt;
0000000000004d39 &amp;lt;osd_trans_stop+0x69&amp;gt; movzbl 0x4c(%rbx),%edx&lt;br/&gt;
0000000000004d3d &amp;lt;osd_trans_stop+0x6d&amp;gt; and    $0xfffffffe,%eax&lt;br/&gt;
0000000000004d40 &amp;lt;osd_trans_stop+0x70&amp;gt; and    $0x1,%edx&lt;br/&gt;
0000000000004d43 &amp;lt;osd_trans_stop+0x73&amp;gt; or     %edx,%eax&lt;br/&gt;
0000000000004d45 &amp;lt;osd_trans_stop+0x75&amp;gt; mov    %al,0x28(%r12)&lt;br/&gt;
0000000000004d4a &amp;lt;osd_trans_stop+0x7a&amp;gt; mov    (%r12),%rax&lt;/p&gt;


&lt;p&gt;so rbx contains pointer to oh:&lt;/p&gt;

&lt;p&gt;(gdb) p/x sizeof(struct thandle)&lt;br/&gt;
$2 = 0x50&lt;br/&gt;
struct osd_thandle {&lt;br/&gt;
        struct thandle          ot_super;&lt;br/&gt;
        handle_t               *ot_handle;&lt;/p&gt;


&lt;p&gt;0000000000004db3 &amp;lt;osd_trans_stop+0xe3&amp;gt; mov    (%rbx),%rax&lt;br/&gt;
0000000000004db6 &amp;lt;osd_trans_stop+0xe6&amp;gt; test   %rax,%rax&lt;br/&gt;
0000000000004db9 &amp;lt;osd_trans_stop+0xe9&amp;gt; je     0000000000004dc4 &amp;lt;osd_trans_stop+0xf4&amp;gt;&lt;br/&gt;
0000000000004dbb &amp;lt;osd_trans_stop+0xeb&amp;gt; mov    0x8(%rax),%rax&lt;br/&gt;
0000000000004dbf &amp;lt;osd_trans_stop+0xef&amp;gt; testb  $0x1,(%rax)&lt;/p&gt;

&lt;p&gt;these lines implement:&lt;br/&gt;
if (lu_device_is_md(&amp;amp;th-&amp;gt;th_dev-&amp;gt;dd_lu_dev)) {&lt;/p&gt;

&lt;p&gt;RAX: 0006000100000002 is supposed to be ld_type (and 0x8(%rax) is ld_type-&amp;gt;ldt_tags)&lt;/p&gt;

&lt;p&gt;IOW, thandle was broken and pointing to garbage instead of a device.&lt;/p&gt;

&lt;p&gt;now the question what broke that..&lt;/p&gt;</comment>
                            <comment id="47883" author="ethz.support" created="Thu, 15 Nov 2012 15:45:48 +0000"  >&lt;p&gt;Adrian,&lt;br/&gt;
if I remember well you can use debufs with the device mounted. Try:&lt;/p&gt;

&lt;p&gt;debugfs -c -R &apos;dump CONFIGS/ /tmp/config&apos; /dev/&amp;lt;device&amp;gt;&lt;br/&gt;
              llog_reader /tmp/config&lt;/p&gt;</comment>
                            <comment id="47921" author="adrian" created="Fri, 16 Nov 2012 07:37:17 +0000"  >&lt;p&gt;I did this using a snapshot from the MDS (taken at 5. November).&lt;br/&gt;
The output of llog_reader is attached to the case (llog.txt)&lt;/p&gt;

&lt;p&gt;Output of CONFIGS/ via debugfs:&lt;/p&gt;


&lt;p&gt;$ debugfs mds.dump &lt;br/&gt;
debugfs 1.41.12 (17-May-2010)&lt;br/&gt;
debugfs:  ls -l CONFIGS&lt;br/&gt;
 467550721   40777 (2)      0      0    4096  9-Oct-2012 07:05 .&lt;br/&gt;
      2   40755 (2)      0      0    4096  3-May-2012 14:58 ..&lt;br/&gt;
 467550722  100644 (1)      0      0   12288  9-May-2012 09:21 mountdata&lt;br/&gt;
 467550723  100644 (1)      0      0       0  3-May-2012 14:58 _mgs-sptlrpc&lt;br/&gt;
 467550724  100644 (1)      0      0   89128  3-May-2012 14:58 nero-client&lt;br/&gt;
 467550725  100644 (1)      0      0       0  3-May-2012 14:58 nero-sptlrpc&lt;br/&gt;
 467550726  100644 (1)      0      0   89000  3-May-2012 14:58 nero-MDT0000&lt;br/&gt;
 467550727  100644 (1)      0      0       0  3-May-2012 14:58 changelog_catalog&lt;br/&gt;
 467550728  100644 (1)      0      0       0  3-May-2012 14:58 changelog_users&lt;br/&gt;
 467550730  100644 (1)      0      0    9432  3-May-2012 15:05 nero-OST0000&lt;br/&gt;
 467550729  100644 (1)      0      0       0  9-Oct-2012 07:05 sptlrpc&lt;br/&gt;
 467550731  100644 (1)      0      0    9432  3-May-2012 15:08 nero-OST0008&lt;br/&gt;
 467550732  100644 (1)      0      0    9432  3-May-2012 15:09 nero-OST0010&lt;br/&gt;
 467550733  100644 (1)      0      0    9432  3-May-2012 15:09 nero-OST0018&lt;br/&gt;
 467550734  100644 (1)      0      0    9432  3-May-2012 15:58 nero-OST0001&lt;br/&gt;
 467550735  100644 (1)      0      0    9432  3-May-2012 15:58 nero-OST0009&lt;br/&gt;
 467550736  100644 (1)      0      0    9432  3-May-2012 15:59 nero-OST0011&lt;br/&gt;
 467550737  100644 (1)      0      0    9432  3-May-2012 15:59 nero-OST0019&lt;br/&gt;
 467550738  100644 (1)      0      0    9432  3-May-2012 16:00 nero-OST0002&lt;br/&gt;
 467550739  100644 (1)      0      0    9432  3-May-2012 16:01 nero-OST000a&lt;br/&gt;
 467550740  100644 (1)      0      0    9432  3-May-2012 16:04 nero-OST0012&lt;br/&gt;
 467550741  100644 (1)      0      0    9432  3-May-2012 16:05 nero-OST001a&lt;br/&gt;
 467550742  100644 (1)      0      0    9432  3-May-2012 16:06 nero-OST0003&lt;br/&gt;
 467550743  100644 (1)      0      0    9432  3-May-2012 16:06 nero-OST000b&lt;br/&gt;
 467550744  100644 (1)      0      0    9432  3-May-2012 16:06 nero-OST0013&lt;br/&gt;
 467550745  100644 (1)      0      0    9432  3-May-2012 16:07 nero-OST001b&lt;br/&gt;
 467550746  100644 (1)      0      0    9432  3-May-2012 16:11 nero-OST0004&lt;br/&gt;
 467550747  100644 (1)      0      0    9432  3-May-2012 16:11 nero-OST000c&lt;br/&gt;
 467550748  100644 (1)      0      0    9432  3-May-2012 16:12 nero-OST0014&lt;br/&gt;
 467550749  100644 (1)      0      0    9432  3-May-2012 16:12 nero-OST001c&lt;br/&gt;
 467550750  100644 (1)      0      0    9432  3-May-2012 16:14 nero-OST0005&lt;br/&gt;
 467550751  100644 (1)      0      0    9432  3-May-2012 16:14 nero-OST000d&lt;br/&gt;
 467550752  100644 (1)      0      0    9432  3-May-2012 16:14 nero-OST0015&lt;br/&gt;
 467550753  100644 (1)      0      0    9432  3-May-2012 16:15 nero-OST001d&lt;br/&gt;
 467550754  100644 (1)      0      0    9432  3-May-2012 16:18 nero-OST0006&lt;br/&gt;
 467550755  100644 (1)      0      0    9432  3-May-2012 16:18 nero-OST000e&lt;br/&gt;
 467550756  100644 (1)      0      0    9432  3-May-2012 16:18 nero-OST0016&lt;br/&gt;
 467550757  100644 (1)      0      0    9432  3-May-2012 16:18 nero-OST001e&lt;br/&gt;
 467550758  100644 (1)      0      0    9432  3-May-2012 16:21 nero-OST0007&lt;br/&gt;
 467550759  100644 (1)      0      0    9432  3-May-2012 16:21 nero-OST000f&lt;br/&gt;
 467550760  100644 (1)      0      0    9432  3-May-2012 16:22 nero-OST0017&lt;br/&gt;
 467550761  100644 (1)      0      0    9432  3-May-2012 16:22 nero-OST001f&lt;/p&gt;
</comment>
                            <comment id="48271" author="bobijam" created="Thu, 22 Nov 2012 02:33:28 +0000"  >&lt;p&gt;Adrian,&lt;/p&gt;

&lt;p&gt;Did you have opportunity to try excluding 1.8.x clients to check whether the MDS still crashes with only 2.x clients accessing it?&lt;/p&gt;</comment>
                            <comment id="48273" author="adrian" created="Thu, 22 Nov 2012 04:19:59 +0000"  >&lt;p&gt;Well, the problem is that i can not reproduce the crash and i did not see any new crashes since 14. November.&lt;/p&gt;

&lt;p&gt;(The crash was probably caused by an user job: There are about ~800 users on our cluster and i have no way to figure out what job crashed it).&lt;/p&gt;


&lt;p&gt;But in any case: Even if the crash was triggered by an 1.8.x client: It should get fixed, shouldn&apos;t it?&lt;/p&gt;

&lt;p&gt;And do we have any news about the llog_write_rec error? (did the debugfs output help?)&lt;/p&gt;
</comment>
                            <comment id="48274" author="bobijam" created="Thu, 22 Nov 2012 04:35:33 +0000"  >&lt;p&gt;Yes, even it&apos;s 1.8.x client problem we should fix it. The purpose of the question is trying to help to make out which area to find the root cause.&lt;/p&gt;

&lt;p&gt;I&apos;m still investigating the llog part issue.&lt;/p&gt;</comment>
                            <comment id="48302" author="bobijam" created="Fri, 23 Nov 2012 04:47:16 +0000"  >&lt;p&gt;I think the &quot;LustreError: 31980:0:(llog_cat.c:298:llog_cat_add_rec()) llog_write_rec -28: lh=ffff88042d450240&quot; is a misleading message, the message only means the current log does not has enough space for the log record, it will create a new log for it later.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;&lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; llog_cat_add_rec(struct llog_handle *cathandle, struct llog_rec_hdr *rec,
                     struct llog_cookie *reccookie, void *buf)
{
        struct llog_handle *loghandle;
        &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; rc;
        ENTRY;

        LASSERT(rec-&amp;gt;lrh_len &amp;lt;= LLOG_CHUNK_SIZE);
        loghandle = llog_cat_current_log(cathandle, 1);
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (IS_ERR(loghandle))
                RETURN(PTR_ERR(loghandle));
        &lt;span class=&quot;code-comment&quot;&gt;/* loghandle is already locked by llog_cat_current_log() &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; us */&lt;/span&gt;
        rc = llog_write_rec(loghandle, rec, reccookie, 1, buf, -1);
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc &amp;lt; 0)
                CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;llog_write_rec %d: lh=%p\n&quot;&lt;/span&gt;, rc, loghandle);
        cfs_up_write(&amp;amp;loghandle-&amp;gt;lgh_lock);
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc == -ENOSPC) {
                &lt;span class=&quot;code-comment&quot;&gt;/* to create a &lt;span class=&quot;code-keyword&quot;&gt;new&lt;/span&gt; plain log */&lt;/span&gt;
                loghandle = llog_cat_current_log(cathandle, 1);
                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (IS_ERR(loghandle))
                        RETURN(PTR_ERR(loghandle));
                rc = llog_write_rec(loghandle, rec, reccookie, 1, buf, -1);
                cfs_up_write(&amp;amp;loghandle-&amp;gt;lgh_lock);
        }

        RETURN(rc);
}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="48522" author="niu" created="Thu, 29 Nov 2012 02:22:55 +0000"  >&lt;p&gt;After checking the 2.2 code carefully, I found a culprit which can cause such memory corruption:&lt;/p&gt;

&lt;p&gt;in mdd_declare_attr_set():&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;#ifdef CONFIG_FS_POSIX_ACL
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (ma-&amp;gt;ma_attr.la_valid &amp;amp; LA_MODE) {
                mdd_read_lock(env, obj, MOR_TGT_CHILD);
                rc = mdo_xattr_get(env, obj, buf, XATTR_NAME_ACL_ACCESS,
                                   BYPASS_CAPA);
                mdd_read_unlock(env, obj);
                &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc == -EOPNOTSUPP || rc == -ENODATA)
                        rc = 0;
                &lt;span class=&quot;code-keyword&quot;&gt;else&lt;/span&gt; &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc &amp;lt; 0)
                        &lt;span class=&quot;code-keyword&quot;&gt;return&lt;/span&gt; rc;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Our intention here is to retrieve the xattr length, but we passed an uninitialized buffer to mdo_xattr_get() (we should pass NULL here)...&lt;br/&gt;
Actually this bug has been fixed for 2.3 &amp;amp; 2.4 (see &lt;a href=&quot;http://review.whamcloud.com/#change,3928&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,3928&lt;/a&gt; &amp;amp; &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1823&quot; title=&quot;sanity/103: slab corruption&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1823&quot;&gt;&lt;del&gt;LU-1823&lt;/del&gt;&lt;/a&gt;), I think we need to backport it to 2.2.&lt;/p&gt;</comment>
                            <comment id="48525" author="niu" created="Thu, 29 Nov 2012 02:47:01 +0000"  >&lt;p&gt;backport the memory corruption fix in mdd_declare_attr_set() to b2_2: &lt;a href=&quot;http://review.whamcloud.com/4703&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4703&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="48578" author="adrian" created="Fri, 30 Nov 2012 07:51:37 +0000"  >&lt;p&gt;Thanks for fixing this issue: We will upgrade our MDS as soon as a new build becomes available &amp;#8211; or should we just upgrade to 2.3?&lt;/p&gt;</comment>
                            <comment id="48580" author="pjones" created="Fri, 30 Nov 2012 08:45:26 +0000"  >&lt;p&gt;Adrian a build of the change backported to 2.2 already exists - &lt;a href=&quot;http://build.whamcloud.com/job/lustre-reviews/10853/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-reviews/10853/&lt;/a&gt; - but is still in the automated test queue at the moment. Lustre 2.3 is available now and has been thoroughly tested. It will of course include other content beyond just this one fix (both additional features and many other fixes)&lt;/p&gt;</comment>
                            <comment id="48856" author="pjones" created="Thu, 6 Dec 2012 08:54:17 +0000"  >&lt;p&gt;Adrian&lt;/p&gt;

&lt;p&gt;Have you decided which approach you will take - to patch 2.2 or upgrade to 2.3?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="48859" author="adrian" created="Thu, 6 Dec 2012 09:22:16 +0000"  >&lt;p&gt;Hello Peter,&lt;/p&gt;

&lt;p&gt;We will upgrade to 2.3 as soon as the next opportunity arises, you can therefore close this issue.&lt;/p&gt;

&lt;p&gt;Thanks and best regards,&lt;br/&gt;
 Adrian&lt;/p&gt;</comment>
                            <comment id="48860" author="pjones" created="Thu, 6 Dec 2012 09:24:09 +0000"  >&lt;p&gt;ok thanks Adrian!&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="12057" name="llog.txt" size="48565" author="adrian" created="Fri, 16 Nov 2012 07:32:03 +0000"/>
                            <attachment id="12044" name="mds08.txt" size="15540" author="ethz.support" created="Wed, 14 Nov 2012 04:47:24 +0000"/>
                            <attachment id="12045" name="mds14.txt" size="15425" author="ethz.support" created="Wed, 14 Nov 2012 04:47:24 +0000"/>
                            <attachment id="12054" name="osd_ldiskfs.ko" size="4381825" author="adrian" created="Thu, 15 Nov 2012 12:30:48 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10040" key="com.atlassian.jira.plugin.system.customfieldtypes:labels">
                        <customfieldname>Epic</customfieldname>
                        <customfieldvalues>
                                        <label>metadata</label>
            <label>server</label>
    
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvc7r:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5550</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10020"><![CDATA[1]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>