<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:52:52 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5599] Lustre Error: Impossible state: 4</title>
                <link>https://jira.whamcloud.com/browse/LU-5599</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Lustre Version: 2.4.52&lt;br/&gt;
Kernel: patchless_client&lt;br/&gt;
Build: v2_4_52 0-gfdd4844-CHANGED-3.9.9-302.fc19.x86_64&lt;/p&gt;

&lt;p&gt;2nd Instance:&lt;br/&gt;
-----------------&lt;br/&gt;
From: Cledat, Romain E &lt;br/&gt;
Sent: Monday, September 08, 2014 4:34 PM&lt;br/&gt;
To: Bernel, BrianX D&lt;br/&gt;
Subject: Error &lt;/p&gt;

&lt;p&gt;Message from syslogd@bar1 at Sep  8 16:08:51 ...                                                                                                       &lt;br/&gt;
 kernel:&lt;span class=&quot;error&quot;&gt;&amp;#91;1235195.162972&amp;#93;&lt;/span&gt; LustreError: 85031:0:(osc_lock.c:1129:osc_lock_enqueue()) ASSERTION( ols-&amp;gt;ols_state == OLS_NEW ) failed: Impossible state: 4                                r                                                                                                               &lt;br/&gt;
Message from syslogd@bar1 at Sep  8 16:08:51 ...                                                                    &lt;br/&gt;
 kernel:&lt;span class=&quot;error&quot;&gt;&amp;#91;1235195.193211&amp;#93;&lt;/span&gt; LustreError: 85031:0:(osc_lock.c:1129:osc_lock_enqueue()) LBUG&lt;/p&gt;

&lt;p&gt;1st Instance:&lt;br/&gt;
-----------------&lt;br/&gt;
From: Nickerson, Brian R &lt;br/&gt;
Sent: Thursday, August 14, 2014 3:44 PM&lt;br/&gt;
To: Bernel, BrianX D; Cledat, Romain E&lt;br/&gt;
Subject: Kernel crash details&lt;/p&gt;

&lt;p&gt;Message from syslogd@bar4 at Aug 14 15:34:57 ...&lt;br/&gt;
kernel:&lt;span class=&quot;error&quot;&gt;&amp;#91;1216856.270451&amp;#93;&lt;/span&gt; LustreError: 42598:0:(osc_lock.c:1129:osc_lock_enqueue()) ASSERTION( ols-&amp;gt;ols_state == OLS_NEW ) failed: Impossible state: 4&lt;/p&gt;

&lt;p&gt;Message from syslogd@bar4 at Aug 14 15:34:57 ...&lt;br/&gt;
kernel:&lt;span class=&quot;error&quot;&gt;&amp;#91;1216856.271008&amp;#93;&lt;/span&gt; LustreError: 42598:0:(osc_lock.c:1129:osc_lock_enqueue()) LBUG&lt;/p&gt;

&lt;p&gt;Message from syslogd@bar4 at Aug 14 15:34:57 ...&lt;br/&gt;
kernel:&lt;span class=&quot;error&quot;&gt;&amp;#91;1216856.271830&amp;#93;&lt;/span&gt; Kernel panic - not syncing: LBUG&lt;/p&gt;</description>
                <environment>Fedora 19 x86_64 on Washington Pass nodes, 1GbE &amp;amp; FDR IB</environment>
        <key id="26452">LU-5599</key>
            <summary>Lustre Error: Impossible state: 4</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="bogl">Bob Glossman</assignee>
                                    <reporter username="bdbernex">Brian Bernel</reporter>
                        <labels>
                    </labels>
                <created>Tue, 9 Sep 2014 17:01:12 +0000</created>
                <updated>Tue, 24 Jul 2018 13:53:28 +0000</updated>
                            <resolved>Tue, 24 Jul 2018 13:53:28 +0000</resolved>
                                    <version>Lustre 2.4.3</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="93578" author="cliffw" created="Tue, 9 Sep 2014 18:44:58 +0000"  >&lt;p&gt;There should be a stack dump to go along with the ASSERTION - can you please acquire and attach to the ticket? In addition, it would be useful to have logs for some time before the actual assertion - are there any LustreErrors previous to this? &lt;/p&gt;</comment>
                            <comment id="93679" author="pjones" created="Wed, 10 Sep 2014 12:58:36 +0000"  >&lt;p&gt;Bob&lt;/p&gt;

&lt;p&gt;Could you please help with this issue?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="93692" author="bogl" created="Wed, 10 Sep 2014 14:44:02 +0000"  >&lt;p&gt;the reported lustre version is &quot;Lustre Version: 2.4.52&quot;. this suggests it was built from source or derived from a review build in between our standard release versions with names like 2.4.2 or 2.4.3.  Could we get detail about how this lustre was generated or obtained?  knowing the exact origin is very important to help us understand the problem.&lt;/p&gt;</comment>
                            <comment id="93696" author="bogl" created="Wed, 10 Sep 2014 15:03:47 +0000"  >&lt;p&gt;this may not be known but I wonder if there was any particular applications or load that was running near the time of the 2 reported panic instances.&lt;/p&gt;</comment>
                            <comment id="93711" author="bdbernex" created="Wed, 10 Sep 2014 18:48:29 +0000"  >&lt;p&gt;Hi Bob,&lt;/p&gt;

&lt;p&gt;Thanks so much for your help on this. The Lustre kernel for X-Stack was put together by Gabrielle Paciucci. &#8230;As for what might have been going on at the time of the problem, the only thing I have to go on at this point is that git was involved. No kdump available, I&#8217;m afraid. A cursory look at the logs doesn&#8217;t show anything glaringly amiss, but it does corroborate that garret/git was in play in the minute leading up to the Lustre error.&lt;/p&gt;

&lt;p&gt;(Thanks Josh, for directing me to reply by comment vs email)&lt;/p&gt;

&lt;p&gt;Regards,&lt;/p&gt;

&lt;p&gt;Brian&lt;/p&gt;</comment>
                            <comment id="93713" author="bogl" created="Wed, 10 Sep 2014 18:55:54 +0000"  >&lt;p&gt;Brian,&lt;br/&gt;
  The stack dumps Cliff asked about a few comments ago is still needed.  Any chance of getting those?&lt;/p&gt;</comment>
                            <comment id="95568" author="keith" created="Thu, 2 Oct 2014 18:47:32 +0000"  >&lt;p&gt;We do not need a kernel dump to get this information. &lt;/p&gt;

&lt;p&gt;Can you upload /var/log/messages or &quot;dmesg&quot; right after this issue is hit? &lt;/p&gt;</comment>
                            <comment id="98786" author="bdbernex" created="Mon, 10 Nov 2014 18:36:38 +0000"  >&lt;p&gt;Recent Kernel Dump information from bar1 on X-Stack:&lt;/p&gt;

&lt;p&gt;[    3.911516] hpgmg-fv&lt;span class=&quot;error&quot;&gt;&amp;#91;96927&amp;#93;&lt;/span&gt;: segfault at d ip 00007fd22c315d3e sp 00007fd20796bd80 error 6 in libocr.so&lt;span class=&quot;error&quot;&gt;&amp;#91;7fd22c300000+2e000&amp;#93;&lt;/span&gt;&lt;br/&gt;
[    3.252141] LustreError: 11-0: lustrefs-MDT0000-mdc-ffff881fdadfe000: Communicating with 192.168.2.201@o2ib, operation ldlm_enqueue failed with -11.&lt;br/&gt;
[    3.828093] LustreError: 11-0: lustrefs-MDT0000-mdc-ffff881fdadfe000: Communicating with 192.168.2.201@o2ib, operation ldlm_enqueue failed with -11.&lt;br/&gt;
[    4.189668] LustreError: 11-0: lustrefs-MDT0000-mdc-ffff881fdadfe000: Communicating with 192.168.2.201@o2ib, operation ldlm_enqueue failed with -11.&lt;br/&gt;
[    4.197074] LustreError: Skipped 1 previous similar message&lt;br/&gt;
[    0.136898] LustreError: 11-0: lustrefs-MDT0000-mdc-ffff881fdadfe000: Communicating with 192.168.2.201@o2ib, operation ldlm_enqueue failed with -11.&lt;br/&gt;
[    3.395164] hpgmg-fv&lt;span class=&quot;error&quot;&gt;&amp;#91;5886&amp;#93;&lt;/span&gt;: segfault at 7f91ecb51d19 ip 00007f91ecb48d3e sp 00007f91450c0d10 error 7 in libocr.so&lt;span class=&quot;error&quot;&gt;&amp;#91;7f91ecb33000+2e000&amp;#93;&lt;/span&gt;&lt;br/&gt;
[    1.541619] LustreError: 11-0: lustrefs-MDT0000-mdc-ffff881fdadfe000: Communicating with 192.168.2.201@o2ib, operation ldlm_enqueue failed with -11.&lt;br/&gt;
[    1.549642] LustreError: Skipped 1 previous similar message&lt;br/&gt;
[    0.870999] LustreError: 11-0: lustrefs-MDT0000-mdc-ffff881fdadfe000: Communicating with 192.168.2.201@o2ib, operation ldlm_enqueue failed with -11.&lt;br/&gt;
[    1.648428] LustreError: 11-0: lustrefs-MDT0000-mdc-ffff881fdadfe000: Communicating with 192.168.2.201@o2ib, operation ldlm_enqueue failed with -11.&lt;br/&gt;
[    0.486506] hpgmg-fv&lt;span class=&quot;error&quot;&gt;&amp;#91;7547&amp;#93;&lt;/span&gt;: segfault at 7f5a27153d19 ip 00007f5a2714ad3e sp 00007f5978fb3d10 error 7 in libocr.so&lt;span class=&quot;error&quot;&gt;&amp;#91;7f5a27135000+2e000&amp;#93;&lt;/span&gt;&lt;br/&gt;
[    0.824354] hpgmg-fv&lt;span class=&quot;error&quot;&gt;&amp;#91;11206&amp;#93;&lt;/span&gt;: segfault at d ip 00007f4f43557d3e sp 00007f4e9bacfd10 error 6 in libocr.so&lt;span class=&quot;error&quot;&gt;&amp;#91;7f4f43542000+2e000&amp;#93;&lt;/span&gt;&lt;br/&gt;
[    3.258284] hpgmg-fv&lt;span class=&quot;error&quot;&gt;&amp;#91;11745&amp;#93;&lt;/span&gt;: segfault at d ip 00007fb77412ad3e sp 00007fb6cd6a4d10 error 6 in libocr.so&lt;span class=&quot;error&quot;&gt;&amp;#91;7fb774115000+2e000&amp;#93;&lt;/span&gt;&lt;br/&gt;
[    2.546239] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) inode 144118057617399716 mdc close failed: rc = -13&lt;br/&gt;
[    2.559492] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) inode 144117957591658911 mdc close failed: rc = -13&lt;br/&gt;
[    1.467034] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) inode 144118025052861374 mdc close failed: rc = -13&lt;br/&gt;
[    1.476398] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) Skipped 40 previous similar messages&lt;br/&gt;
[    2.468531] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) inode 144118025052858204 mdc close failed: rc = -13&lt;br/&gt;
[    2.478511] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) Skipped 246 previous similar messages&lt;br/&gt;
[    0.176572] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) inode 144118025052850460 mdc close failed: rc = -13&lt;br/&gt;
[    0.186522] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) Skipped 473 previous similar messages&lt;br/&gt;
[    0.030234] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) inode 144118024767594400 mdc close failed: rc = -13&lt;br/&gt;
[    0.039727] LustreError: 1599:0:(file.c:163:ll_close_inode_openhandle()) Skipped 39 previous similar messages&lt;br/&gt;
[    2.904305] hpgmg-fv invoked oom-killer: gfp_mask=0x201da, order=0, oom_score_adj=0&lt;br/&gt;
[    2.904309] hpgmg-fv cpuset=/ mems_allowed=0-1&lt;br/&gt;
[    2.904311] Pid: 11573, comm: hpgmg-fv Tainted: GF          O 3.9.9-302.fc19.x86_64 #1&lt;/p&gt;</comment>
                            <comment id="98788" author="bdbernex" created="Mon, 10 Nov 2014 18:37:49 +0000"  >&lt;p&gt;Related correspondence and screen errors:&lt;/p&gt;

&lt;p&gt;Yep, Vincent confirms that he was doing a checkout of the repo at the time&#8230; Definitely GIT + Lustre. And now they have a full kernel dump &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;.&lt;/p&gt;

&lt;p&gt;Romain&lt;/p&gt;


&lt;p&gt;From: Romain Cledat &amp;lt;romain.e.cledat@intel.com&amp;gt;&lt;br/&gt;
Date: Saturday, November 8, 2014 at 12:19 PM&lt;br/&gt;
To: &quot;Bernel, BrianX D&quot; &amp;lt;brianx.d.bernel@intel.com&amp;gt;&lt;br/&gt;
Subject: Your first kdump &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;

&lt;p&gt;Hello,&lt;/p&gt;

&lt;p&gt;It seems bar1 rebooted by itself some 7h ago. After some investigation, I think it crashed and was rebooted due to kdump (yeah)&lt;/p&gt;

&lt;p&gt;Nov  8 04:51:12 bar1 systemd-logind&lt;span class=&quot;error&quot;&gt;&amp;#91;748&amp;#93;&lt;/span&gt;: New session 17884 of user vincentc.&lt;br/&gt;
Nov  7 22:01:56 bar1 rsyslogd: &lt;span class=&quot;error&quot;&gt;&amp;#91;origin software=&amp;quot;rsyslogd&amp;quot; swVersion=&amp;quot;7.2.6&amp;quot; x-pid=&amp;quot;737&amp;quot; x-info=&amp;quot;http://www.rsyslog.com&amp;quot;&amp;#93;&lt;/span&gt; start&lt;/p&gt;

&lt;p&gt;I think the Nov 7 date is because the clock get reset to a bad value when rebooting. At the end of the reboot, you have:&lt;/p&gt;

&lt;p&gt;Nov  7 22:03:11 bar1 systemd&lt;span class=&quot;error&quot;&gt;&amp;#91;1&amp;#93;&lt;/span&gt;: Startup finished in 2.269s (kernel) + 3.678s (initrd) + 1min 19.515s (userspace) = 1min 25.463s.&lt;br/&gt;
Nov  7 22:03:11 bar1 abrt-server&lt;span class=&quot;error&quot;&gt;&amp;#91;2784&amp;#93;&lt;/span&gt;: No actions are found for event &apos;notify&apos;&lt;br/&gt;
Nov  8 05:03:23 bar1 chronyd&lt;span class=&quot;error&quot;&gt;&amp;#91;717&amp;#93;&lt;/span&gt;: Selected source 149.20.68.17&lt;br/&gt;
Nov  8 05:03:23 bar1 chronyd&lt;span class=&quot;error&quot;&gt;&amp;#91;717&amp;#93;&lt;/span&gt;: System clock wrong by 25198.383400 seconds, adjustment started&lt;br/&gt;
Nov  8 05:03:23 bar1 chronyd&lt;span class=&quot;error&quot;&gt;&amp;#91;717&amp;#93;&lt;/span&gt;: System clock was stepped by 25198.383 seconds&lt;br/&gt;
Nov  8 05:03:23 bar1 systemd&lt;span class=&quot;error&quot;&gt;&amp;#91;1&amp;#93;&lt;/span&gt;: Time has been changed&lt;/p&gt;

&lt;p&gt;So the machine seems to have been down between 4h51 and 5h03. And sure enough there is something in /var/crash. Relevant lines:&lt;br/&gt;
[    2.431054] LustreError: 29800:0:(osc_lock.c:1129:osc_lock_enqueue()) ASSERTION( ols-&amp;gt;ols_state == OLS_NEW ) failed: Impossible state: 4&lt;br/&gt;
[    2.441581] LustreError: 29800:0:(osc_lock.c:1129:osc_lock_enqueue()) LBUG&lt;br/&gt;
[    2.452016] Pid: 29800, comm: git&lt;br/&gt;
[    2.452017] \x0aCall Trace:&lt;/p&gt;

&lt;p&gt;What do you know, it happens to be Lustre again &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;.&lt;/p&gt;

&lt;p&gt;Romain&lt;/p&gt;

&lt;p&gt;PS: I am asking Vincent to confirm it was him and it was git. 5 in the morning though&#8230; &lt;/p&gt;</comment>
                            <comment id="98828" author="jaylan" created="Mon, 10 Nov 2014 23:19:23 +0000"  >&lt;p&gt;NASA Ames hit this bug this morning on its 2.4.3 -7nasC client running sles11sp3 kernel.&lt;/p&gt;</comment>
                            <comment id="98846" author="bobijam" created="Tue, 11 Nov 2014 02:01:21 +0000"  >&lt;p&gt;It looks like &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3433&quot; title=&quot;Encountered a assertion for the ols_state being set to a impossible state&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3433&quot;&gt;&lt;del&gt;LU-3433&lt;/del&gt;&lt;/a&gt;, and patch for 2.4 is here &lt;a href=&quot;http://review.whamcloud.com/#/c/9194/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/9194/&lt;/a&gt;, we also better has &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4558&quot; title=&quot;Crash in cl_lock_put on racer&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4558&quot;&gt;&lt;del&gt;LU-4558&lt;/del&gt;&lt;/a&gt; patch land for 2.4 &lt;a href=&quot;http://review.whamcloud.com/#/c/9876/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/9876/&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2.5 already has them.&lt;/p&gt;</comment>
                            <comment id="98851" author="jaylan" created="Tue, 11 Nov 2014 02:35:23 +0000"  >&lt;p&gt;We had both of them in our 2.4.3-7nasC.&lt;/p&gt;</comment>
                            <comment id="98853" author="bobijam" created="Tue, 11 Nov 2014 02:42:25 +0000"  >&lt;p&gt;Can you try apply these patches?&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/#/c/8530/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/8530/&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#/c/9783/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/9783/&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#/c/9194/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/9194/&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/#/c/9876/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/9876/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="98884" author="pjones" created="Tue, 11 Nov 2014 14:51:01 +0000"  >&lt;p&gt;Jay&lt;/p&gt;

&lt;p&gt;I think that it makes sense to open a separate ticket for the NASA issue because the environments are really quite different and the root cause could be different&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="98935" author="jaylan" created="Tue, 11 Nov 2014 23:11:34 +0000"  >&lt;p&gt;Peter,&lt;/p&gt;

&lt;p&gt;Will do.&lt;/p&gt;

&lt;p&gt;Jay&lt;/p&gt;</comment>
                            <comment id="152755" author="bdbernex" created="Wed, 18 May 2016 20:49:26 +0000"  >&lt;p&gt;Is there set of instructions somewhere on how to apply these patches?&lt;/p&gt;</comment>
                            <comment id="230814" author="pjones" created="Tue, 24 Jul 2018 13:53:28 +0000"  >&lt;p&gt;Out of date ticket&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="26036">LU-5492</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="27557">LU-5910</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10490" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>End date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Wed, 18 May 2016 17:01:12 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10030" key="com.atlassian.jira.plugin.system.customfieldtypes:labels">
                        <customfieldname>Epic/Theme</customfieldname>
                        <customfieldvalues>
                                        <label>Kernel</label>
            <label>Lustre-2.4</label>
            <label>Panic</label>
    
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                            <customfield id="customfield_10070" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Project</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10043"><![CDATA[Fast Forward]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwvtb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>15656</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10493" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>Start date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Tue, 9 Sep 2014 17:01:12 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                    </customfields>
    </item>
</channel>
</rss>