<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:01:40 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-6607] MDS ( 2 node DNE) running out of memory and crash</title>
                <link>https://jira.whamcloud.com/browse/LU-6607</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;2 node DNE MDS&lt;br/&gt;
16 OSS&lt;br/&gt;
2K clients&lt;/p&gt;

&lt;p&gt;A MDS node randomly running out of memory and hang.&lt;br/&gt;
We watch MDS drain its memory in matter of few minutes. Many times right after recovery from previous hangs. &lt;/p&gt;

&lt;p&gt;Clients are generating a ton of Lustre errors with strings &quot;ptlrpc_expire_one_request&quot;. The numbers are from several hundred thousands to several millions of such errors from each node. Here are number of error counts from some nodes:&lt;/p&gt;

&lt;p&gt;comet-12-31 662616&lt;br/&gt;
comet-10-06 690764&lt;br/&gt;
comet-12-24 720396&lt;br/&gt;
comet-12-25 735659&lt;br/&gt;
comet-12-14 778073&lt;br/&gt;
comet-12-33 840302&lt;br/&gt;
comet-10-10 928322&lt;br/&gt;
comet-12-33 945614&lt;br/&gt;
comet-12-25 992288&lt;br/&gt;
comet-10-15 1131711&lt;br/&gt;
comet-12-25 1147043&lt;br/&gt;
comet-10-07 1160876&lt;br/&gt;
comet-12-30 1180270&lt;br/&gt;
comet-10-03 1387072&lt;br/&gt;
comet-10-02 2515764&lt;br/&gt;
comet-10-02 3371128&lt;/p&gt;

&lt;p&gt;I am attaching logs from both client and server on one such incidence.&lt;/p&gt;</description>
                <environment>Linux panda-mds-19-6.sdsc.edu 3.10.73-1.el6.elrepo.x86_64 #1 SMP Thu Mar 26 16:28:30 EDT 2015 x86_64 x86_64 x86_64 GNU/Linux&lt;br/&gt;
&lt;br/&gt;
lustre-2.7.51-3.10.73_1.el6.elrepo.x86_64_gb019b03.x86_64&lt;br/&gt;
lustre-osd-zfs-mount-2.7.51-3.10.73_1.el6.elrepo.x86_64_gb019b03.x86_64&lt;br/&gt;
lustre-iokit-2.7.51-3.10.73_1.el6.elrepo.x86_64_gb019b03.x86_64&lt;br/&gt;
lustre-source-2.7.51-3.10.73_1.el6.elrepo.x86_64_gb019b03.x86_64&lt;br/&gt;
lustre-osd-zfs-2.7.51-3.10.73_1.el6.elrepo.x86_64_gb019b03.x86_64&lt;br/&gt;
lustre-modules-2.7.51-3.10.73_1.el6.elrepo.x86_64_gb019b03.x86_64&lt;br/&gt;
lustre-tests-2.7.51-3.10.73_1.el6.elrepo.x86_64_gb019b03.x86_64&lt;br/&gt;
</environment>
        <key id="30188">LU-6607</key>
            <summary>MDS ( 2 node DNE) running out of memory and crash</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="2">Won&apos;t Fix</resolution>
                                        <assignee username="laisiyao">Lai Siyao</assignee>
                                    <reporter username="haisong">Haisong Cai</reporter>
                        <labels>
                            <label>sdsc</label>
                    </labels>
                <created>Fri, 15 May 2015 16:06:25 +0000</created>
                <updated>Sat, 24 Mar 2018 14:03:17 +0000</updated>
                            <resolved>Sat, 24 Mar 2018 14:03:17 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                                                        <due></due>
                            <votes>1</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="115508" author="pjones" created="Fri, 15 May 2015 17:32:29 +0000"  >&lt;p&gt;Lai&lt;/p&gt;

&lt;p&gt;Could you please advise on this issue?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="115514" author="haisong" created="Fri, 15 May 2015 17:51:09 +0000"  >&lt;p&gt;Just like to highlight these messages on server (should also be in messages-19-6.gz file)&lt;/p&gt;

&lt;p&gt;May 15 06:35:19 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 06:45:05 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:17:59 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:18:53 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:18:54 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:18:56 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:19:00 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:19:08 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:19:37 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:20:09 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:21:13 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:23:25 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:27:44 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 07:55:17 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 08:08:07 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 08:08:07 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 08:08:08 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 08:08:10 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 08:11:04 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 08:11:12 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;br/&gt;
May 15 08:11:28 panda-mds-19-6 kernel: Lustre: ldlm_canceld: This server is not able to keep up with request traffic (cpu-bound).  &lt;/p&gt;</comment>
                            <comment id="115788" author="haisong" created="Tue, 19 May 2015 04:17:59 +0000"  >
&lt;p&gt;Hi Lai,&lt;/p&gt;

&lt;p&gt;Any update?&lt;/p&gt;

&lt;p&gt;thanks,&lt;br/&gt;
Haisong&lt;/p&gt;
</comment>
                            <comment id="115792" author="di.wang" created="Tue, 19 May 2015 06:51:03 +0000"  >&lt;p&gt;Hello, Cai&lt;/p&gt;

&lt;p&gt;I checked the debug log and dmesg, and I can see MDT0001 seems very slow at that moment. though I can not figure out why from these message. So&lt;/p&gt;

&lt;p&gt;1. Could you please post  these information here stack trace of MDT0001 (panda-mds-19-6), which will help us understand what MDT0001 was busying with. Something like&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;echo t &amp;gt; /proc/sysrq-trigger
dmesg &amp;gt; /tmp/dmesg.out
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;2. Could you please post &quot;cat /proc/slabinfo&quot; here when OOM happens? &lt;/p&gt;

&lt;p&gt;Thanks&lt;br/&gt;
WangDi &lt;/p&gt;</comment>
                            <comment id="115896" author="haisong" created="Tue, 19 May 2015 17:23:50 +0000"  >&lt;p&gt;Hi WangDi,&lt;/p&gt;

&lt;p&gt;I understand when to run 2). &lt;br/&gt;
Do you want output of 1) now or at the same time when I run 2)?&lt;/p&gt;

&lt;p&gt;Haisong&lt;/p&gt;</comment>
                            <comment id="115938" author="di.wang" created="Tue, 19 May 2015 20:56:58 +0000"  >&lt;p&gt;Hello, Cai&lt;/p&gt;

&lt;p&gt;Oh, I only need output of 1) when MDT1 is busy.  But if you can get both at the same time, that would be great.&lt;/p&gt;

&lt;p&gt;Thanks&lt;br/&gt;
WangDi&lt;/p&gt;</comment>
                            <comment id="121183" author="haisong" created="Mon, 13 Jul 2015 18:22:45 +0000"  >&lt;p&gt;WangDi,&lt;/p&gt;

&lt;p&gt;We had 2 incidences recently and both time I failed to collect need info.&lt;br/&gt;
One time I simply forgot and the other time we had no chance since MDS node was hung.&lt;/p&gt;

&lt;p&gt;Haisong&lt;/p&gt;</comment>
                            <comment id="125901" author="haisong" created="Tue, 1 Sep 2015 18:14:44 +0000"  >
&lt;p&gt;WangDi,&lt;/p&gt;

&lt;p&gt;We ran into this problem on one of MDS (mdt0, the master again today)&lt;br/&gt;
I have collected information you asked by issuing the following commands:&lt;/p&gt;

&lt;p&gt;echo t &amp;gt; /proc/sysrq-trigger&lt;br/&gt;
dmesg &amp;gt; /state/partition1/tmp/dmesg.out&lt;br/&gt;
cat /proc/slabinfo &amp;gt; /state/partition1/tmp/slabinfo.txt&lt;/p&gt;

&lt;p&gt;dmesg.out &amp;amp; slabinfo.txt will be uploaded separately.&lt;/p&gt;

&lt;p&gt;Haisong&lt;/p&gt;</comment>
                            <comment id="125911" author="haisong" created="Tue, 1 Sep 2015 18:18:07 +0000"  >
&lt;p&gt;Files collected between 2 time MDS crashes.&lt;/p&gt;</comment>
                            <comment id="125950" author="di.wang" created="Tue, 1 Sep 2015 21:45:02 +0000"  >&lt;p&gt;Ah, it is a ZFS environment (ZFS + DNE)?  A few questions here&lt;/p&gt;

&lt;p&gt;1. I saw this on your MDS console message(dmesg_mds.gz), the kernel version is definitely not EL6? EL7? But we do not support EL7 server on MDS yet.  could you please confirm what kernel did you use on MDS?&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Linux version 3.10.73-1.el6.elrepo.x86_64 (mockbuild@Build64R6) (gcc version 4.4.7 20120313 (Red Hat 4.4.7-11) (GCC) ) #1 SMP Thu Mar 26 16:28:30 EDT 2015
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;2. In the slab info&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;kmalloc-8192      9033431 9033431   8192    1    2 : tunables    8    4    0 : slabdata 9033431 9033431      0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;8192 size slab costs too much memory, 941G! that is too much.  Btw: how much OSTs for each OSS?&lt;/p&gt;</comment>
                            <comment id="125953" author="haisong" created="Tue, 1 Sep 2015 21:51:24 +0000"  >
&lt;p&gt;Hi WangDi,&lt;/p&gt;

&lt;p&gt;We are running CentOS 6.6 with Linux kernel 3.10.73 from elrepo.&lt;br/&gt;
Lustre and ZFS are build as kdms modules.&lt;/p&gt;

&lt;p&gt;Filesystem has 16 OSS and each has 6 OSTs.&lt;/p&gt;

&lt;p&gt;Haisong&lt;/p&gt;</comment>
                            <comment id="125955" author="haisong" created="Tue, 1 Sep 2015 21:53:45 +0000"  >
&lt;p&gt;On one of the 2 MDS servers:&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@panda-mds-19-6 panda-mds-19-6&amp;#93;&lt;/span&gt;# sysctl -a | grep slab&lt;br/&gt;
kernel.spl.kmem.slab_kmem_alloc = 92736&lt;br/&gt;
kernel.spl.kmem.slab_kmem_max = 92736&lt;br/&gt;
kernel.spl.kmem.slab_kmem_total = 172032&lt;br/&gt;
kernel.spl.kmem.slab_vmem_alloc = 407675904&lt;br/&gt;
kernel.spl.kmem.slab_vmem_max = 490480640&lt;br/&gt;
kernel.spl.kmem.slab_vmem_total = 485459072&lt;br/&gt;
vm.min_slab_ratio = 5&lt;/p&gt;</comment>
                            <comment id="125960" author="di.wang" created="Tue, 1 Sep 2015 22:20:19 +0000"  >&lt;p&gt;Is that possible you can upgrade MDS to 2.7.58 ? there are quite a few fix on these area since 2.7.51.&lt;/p&gt;

&lt;p&gt;Btw: we are currently testing ZFS on DNE at &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7009&quot; title=&quot;Testing DNE on ZFS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7009&quot;&gt;&lt;del&gt;LU-7009&lt;/del&gt;&lt;/a&gt;, please follow there.&lt;/p&gt;</comment>
                            <comment id="125964" author="haisong" created="Tue, 1 Sep 2015 22:41:21 +0000"  >
&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6584&quot; title=&quot;OSS hit LBUG and crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6584&quot;&gt;&lt;del&gt;LU-6584&lt;/del&gt;&lt;/a&gt; is about OSS crashing problem.  The OSS servers are part of these very same MDS servers. They are the one file-system.&lt;/p&gt;

&lt;p&gt;We are about to apply a new patch related to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6584&quot; title=&quot;OSS hit LBUG and crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6584&quot;&gt;&lt;del&gt;LU-6584&lt;/del&gt;&lt;/a&gt;. It is built from &lt;a href=&quot;http://review.whamcloud.com/#/c/14926/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/14926/&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Will it be satisfy your recommendation?&lt;/p&gt;

&lt;p&gt;Haisong&lt;/p&gt;</comment>
                            <comment id="125965" author="di.wang" created="Tue, 1 Sep 2015 22:56:46 +0000"  >&lt;p&gt;Hmm, I think &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6584&quot; title=&quot;OSS hit LBUG and crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6584&quot;&gt;&lt;del&gt;LU-6584&lt;/del&gt;&lt;/a&gt; is different issue. This ticket is about MDS OOM during failover?  Do you happen to know any easy way to reproduce this problem?&lt;br/&gt;
Hmm, btw: is that possible you can add &quot;log_buf_len=10M&quot; in your boot command? since the dmesg you post here only have half stack trace. Thanks.&lt;/p&gt;</comment>
                            <comment id="125967" author="haisong" created="Tue, 1 Sep 2015 23:13:19 +0000"  >
&lt;p&gt;Hi Wang Di,&lt;/p&gt;

&lt;p&gt;I understand  &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6584&quot; title=&quot;OSS hit LBUG and crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6584&quot;&gt;&lt;del&gt;LU-6584&lt;/del&gt;&lt;/a&gt; is a different problem, for OSS and not MDS memory issue.&lt;/p&gt;

&lt;p&gt;What I said earlier was, to work on &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6584&quot; title=&quot;OSS hit LBUG and crash&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6584&quot;&gt;&lt;del&gt;LU-6584&lt;/del&gt;&lt;/a&gt; problem, we have to apply a patch soon. This is because they are the same&lt;br/&gt;
file-system. That patch is built with &lt;a href=&quot;http://review.whamcloud.com/#/c/14926/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/14926/&lt;/a&gt; &lt;/p&gt;

&lt;p&gt;Was that 2.7.58 equivalent?&lt;/p&gt;

&lt;p&gt;Haisong&lt;/p&gt;</comment>
                            <comment id="125976" author="di.wang" created="Wed, 2 Sep 2015 02:09:42 +0000"  >&lt;p&gt;Ah, it is. you can use that build. Thanks&lt;/p&gt;</comment>
                            <comment id="126052" author="haisong" created="Wed, 2 Sep 2015 16:42:15 +0000"  >
&lt;p&gt;Hi WangDi, &lt;/p&gt;

&lt;p&gt;You stated that 2.7.58 has a lot fixes. But it may still not fix our problem, correct? &lt;br/&gt;
Can you elaborate on slab situation? You indicated 941G (or 94G) was too big, why is it? Is it because of default setting or some configuration mistake?&lt;/p&gt;

&lt;p&gt;thanks,&lt;br/&gt;
Haisong&lt;/p&gt;</comment>
                            <comment id="126184" author="di.wang" created="Thu, 3 Sep 2015 16:00:47 +0000"  >&lt;p&gt;Hello, Haisong&lt;/p&gt;

&lt;p&gt;Yes, I do not know the exact reason why for this 8192_size slab caused so much memory here. No, I do not think this is related with any default setting. Did you do a lot cross-MDT operation here, like creating remote directory or striped directory? (unfortunately, there are not enough stack trace information here)  Btw: this stack trace is collected when OOM happens ? or before? or about to happen? Right now, I would suggest&lt;/p&gt;

&lt;p&gt;1. Use 2.7.58 plus that patch (&lt;a href=&quot;http://review.whamcloud.com/#/c/14926/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/14926/&lt;/a&gt;) you need, maybe also include &lt;a href=&quot;http://review.whamcloud.com/#/c/16161/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/16161/&lt;/a&gt;. &lt;br/&gt;
2. Please add &quot;log_bf_len=10M&quot; in your boot command, so we can see more of the stack trace when error happens.&lt;br/&gt;
3. Please help me find an easy way to reproduce the problem. Thanks!&lt;/p&gt;

&lt;p&gt;Even though 2.7.58 might not help you on this issue, but it is way better than 2.7.51 on DNE.  &lt;/p&gt;</comment>
                            <comment id="224470" author="pjones" created="Sat, 24 Mar 2018 14:03:17 +0000"  >&lt;p&gt;SDSC have moved onto more current releases so I do not think any further work is needed here&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="17803" name="clients_log.gz" size="637068" author="haisong" created="Fri, 15 May 2015 16:06:26 +0000"/>
                            <attachment id="18831" name="dmesg.out" size="405677" author="haisong" created="Tue, 1 Sep 2015 18:18:07 +0000"/>
                            <attachment id="17802" name="dmesg_mds.gz" size="21827" author="haisong" created="Fri, 15 May 2015 16:06:25 +0000"/>
                            <attachment id="17801" name="lustre-log.tgz" size="9802577" author="haisong" created="Fri, 15 May 2015 16:06:25 +0000"/>
                            <attachment id="17800" name="messages-19-6.gz" size="94091" author="haisong" created="Fri, 15 May 2015 16:06:25 +0000"/>
                            <attachment id="18832" name="slabinfo.txt" size="27605" author="haisong" created="Tue, 1 Sep 2015 18:18:07 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10490" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>End date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Fri, 4 Sep 2015 16:06:25 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                            <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxdfb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10023"><![CDATA[4]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10493" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>Start date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Fri, 15 May 2015 16:06:25 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                    </customfields>
    </item>
</channel>
</rss>