<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:12:36 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7866] BUG: unable to handle kernel NULL pointer dereference at (null)</title>
                <link>https://jira.whamcloud.com/browse/LU-7866</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Error occurred during soak testing of build &apos;20160309&apos; (b2_8 RC5) (see:  &lt;a href=&quot;https://wiki.hpdd.intel.com/display/Releases/Soak+Testing+on+Lola#SoakTestingonLola-20160309&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://wiki.hpdd.intel.com/display/Releases/Soak+Testing+on+Lola#SoakTestingonLola-20160309&lt;/a&gt; also). DNE is enabled. MDTs had been formatted using ldiskfs, OSTs using zfs. MDS nodes are configured in active - active HA failover configuration. (For teset set-up configuration see &lt;a href=&quot;https://wiki.hpdd.intel.com/display/Releases/Soak+Testing+on+Lola#SoakTestingonLola-Configuration&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://wiki.hpdd.intel.com/display/Releases/Soak+Testing+on+Lola#SoakTestingonLola-Configuration&lt;/a&gt;)&lt;/p&gt;

&lt;p&gt;Sequence of events:&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;mds_restart      : 2016-03-11 03:41:05,597 - 2016-03-11 03:54:49,109    lola-8&lt;/li&gt;
	&lt;li&gt;2016-03-11 03:56   Lustre client &lt;tt&gt;lola-32&lt;/tt&gt; crashed with the following error:
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&amp;lt;1&amp;gt;BUG: unable to handle kernel NULL pointer dereference at (null)
&amp;lt;1&amp;gt;IP: [&amp;lt;ffffffffa0a0241f&amp;gt;] ll_open_cleanup+0xaf/0x600 [lustre]
&amp;lt;4&amp;gt;PGD 38a867067 PUD 775372067 PMD 0
&amp;lt;4&amp;gt;Oops: 0000 [#1] SMP
&amp;lt;4&amp;gt;last sysfs file: /sys/devices/pci0000:00/0000:00:02.0/0000:06:00.0/infiniband_mad/umad0/port
&amp;lt;4&amp;gt;CPU 1
&amp;lt;4&amp;gt;Modules linked in: osc(U) mgc(U) lustre(U) lov(U) mdc(U) fid(U) lmv(U) fld(U) ko2iblnd(U) ptlrpc(U) obdclass(U) lnet(U) sha512_generic crc32c_intel libcfs(U) nfsi]
&amp;lt;4&amp;gt;
&amp;lt;4&amp;gt;Pid: 201682, comm: simul Not tainted 2.6.32-504.30.3.el6.x86_64 #1 Intel Corporation S2600GZ/S2600GZ
&amp;lt;4&amp;gt;RIP: 0010:[&amp;lt;ffffffffa0a0241f&amp;gt;]  [&amp;lt;ffffffffa0a0241f&amp;gt;] ll_open_cleanup+0xaf/0x600 [lustre]
&amp;lt;4&amp;gt;RSP: 0018:ffff8801d3f298e8  EFLAGS: 00010286
&amp;lt;4&amp;gt;RAX: 0000000000000000 RBX: ffff8807f7271a00 RCX: ffff88102dd13ca0
&amp;lt;4&amp;gt;RDX: 0000000000000002 RSI: 0000000000000000 RDI: ffff88102f395000
&amp;lt;4&amp;gt;RBP: ffff8801d3f29928 R08: ffff88034f47e9c0 R09: 0000000000000000
&amp;lt;4&amp;gt;R10: 0000000000000010 R11: 0000000000000000 R12: 0000000000000000
&amp;lt;4&amp;gt;R13: ffff88082dd02c00 R14: ffff88081a5fc800 R15: ffff8801d3f29988
&amp;lt;4&amp;gt;FS:  00007f72f89d3700(0000) GS:ffff880045e20000(0000) knlGS:0000000000000000
&amp;lt;4&amp;gt;CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
&amp;lt;4&amp;gt;CR2: 0000000000000000 CR3: 00000003b89f2000 CR4: 00000000001407e0
&amp;lt;4&amp;gt;DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
&amp;lt;4&amp;gt;DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
&amp;lt;4&amp;gt;Process simul (pid: 201682, threadinfo ffff8801d3f28000, task ffff8804c6d73520)
&amp;lt;4&amp;gt;Stack:
&amp;lt;4&amp;gt; ffff8801d3f298f8 0000000000000000 ffff8801d3f29928 0000000000000001
&amp;lt;4&amp;gt;&amp;lt;d&amp;gt; ffff88082dd02c00 ffff88081a5fc800 ffff8809648898c0 ffff8801d3f29988
&amp;lt;4&amp;gt;&amp;lt;d&amp;gt; ffff8801d3f299f8 ffffffffa0a09c4a fffffffffffffffb 00ff880a531788c0
&amp;lt;4&amp;gt;Call Trace:
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a09c4a&amp;gt;] ll_prep_inode+0x20a/0xc40 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa07804b2&amp;gt;] ? __req_capsule_get+0x162/0x6e0 [ptlrpc]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a214f0&amp;gt;] ? ll_md_blocking_ast+0x0/0x7d0 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a21fe1&amp;gt;] ll_lookup_it_finish+0x321/0x12e0 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0723dd0&amp;gt;] ? ldlm_lock_decref_internal+0x2e0/0xa80 [ptlrpc]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0569925&amp;gt;] ? class_handle2object+0x95/0x190 [obdclass]
&amp;lt;4&amp;gt; [&amp;lt;ffffffff81174ab3&amp;gt;] ? kmem_cache_alloc_trace+0x1b3/0x1c0
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a1e439&amp;gt;] ? ll_i2suppgid+0x19/0x30 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a1e47e&amp;gt;] ? ll_i2gids+0x2e/0xd0 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a214f0&amp;gt;] ? ll_md_blocking_ast+0x0/0x7d0 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a23226&amp;gt;] ll_lookup_it+0x286/0xda0 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a23dc9&amp;gt;] ll_lookup_nd+0x89/0x4f0 [lustre]
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8119e055&amp;gt;] do_lookup+0x1a5/0x230
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8119ece4&amp;gt;] __link_path_walk+0x7a4/0x1000
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8119f7fa&amp;gt;] path_walk+0x6a/0xe0
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8119fa0b&amp;gt;] filename_lookup+0x6b/0xc0
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8122daa6&amp;gt;] ? security_file_alloc+0x16/0x20
&amp;lt;4&amp;gt; [&amp;lt;ffffffff811a0ee4&amp;gt;] do_filp_open+0x104/0xd20
&amp;lt;4&amp;gt; [&amp;lt;ffffffff81063c63&amp;gt;] ? perf_event_task_sched_out+0x33/0x70
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8129943a&amp;gt;] ? strncpy_from_user+0x4a/0x90
&amp;lt;4&amp;gt; [&amp;lt;ffffffff811ae392&amp;gt;] ? alloc_fd+0x92/0x160
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8118b157&amp;gt;] do_sys_open+0x67/0x130
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8118b260&amp;gt;] sys_open+0x20/0x30
&amp;lt;4&amp;gt; [&amp;lt;ffffffff8100b0d2&amp;gt;] system_call_fastpath+0x16/0x1b
&amp;lt;4&amp;gt;Code: ba 38 01 00 00 31 f6 e8 30 62 b6 ff f6 05 fd 6b a9 ff 10 74 0d 80 3d f0 6b a9 ff 00 0f 88 ba 01 00 00 48 85 db 0f 84 16 02 00 00 &amp;lt;49&amp;gt; 8b 04 24 48 89 03 49
&amp;lt;1&amp;gt;RIP  [&amp;lt;ffffffffa0a0241f&amp;gt;] ll_open_cleanup+0xaf/0x600 [lustre]
&amp;lt;4&amp;gt; RSP &amp;lt;ffff8801d3f298e8&amp;gt;
&amp;lt;4&amp;gt;CR2: 0000000000000000
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Attached client (&lt;tt&gt;lola-32)&lt;/tt&gt; message, console and vmcore-dmesg.txt file.&lt;/p&gt;</description>
                <environment>lola&lt;br/&gt;
build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_8/12/&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_8/12/&lt;/a&gt;</environment>
        <key id="35274">LU-7866</key>
            <summary>BUG: unable to handle kernel NULL pointer dereference at (null)</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="hongchao.zhang">Hongchao Zhang</assignee>
                                    <reporter username="heckes">Frank Heckes</reporter>
                        <labels>
                            <label>patch</label>
                            <label>soak</label>
                    </labels>
                <created>Fri, 11 Mar 2016 19:13:55 +0000</created>
                <updated>Sun, 30 Jan 2022 10:08:36 +0000</updated>
                                            <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="145269" author="heckes" created="Fri, 11 Mar 2016 19:25:57 +0000"  >&lt;p&gt;Crash file has been saved to &lt;tt&gt;lhn.lola.hpdd.intel.com:/scratch/crashdumps/lu-7866/lola-32/127.0.0.1-2016-03-11-03\:55\:54&lt;/tt&gt;&lt;/p&gt;</comment>
                            <comment id="145285" author="green" created="Fri, 11 Mar 2016 21:38:06 +0000"  >&lt;p&gt;The actual problem here is two lines above the crash:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&amp;lt;3&amp;gt;LustreError: 201682:0:(layout.c:2025:__req_capsule_get()) @@@ Wrong buffer for field `mdt_body&apos; (2 of 1) in format `LDLM_INTENT_OPEN&apos;: 0 vs. 216 (server)
&amp;lt;3&amp;gt;  req@ffff880e244a90c0 x1528368254991312/t489629526959(489629526959) o101-&amp;gt;soaked-MDT0001-mdc-ffff88082dd02c00@192.168.1.108@o2ib10:12/10 lens 840/192 e 1 to 0 dl 1457697328 ref 2 fl Complete:R/4/0 rc -107/-107
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;


&lt;p&gt;This then causes the crash because:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;void ll_open_cleanup(struct super_block *sb, struct ptlrpc_request *open_req)
{
        struct mdt_body                 *body;
        struct md_op_data               *op_data;
        struct ptlrpc_request           *close_req = NULL;
        struct obd_export               *exp       = ll_s2sbi(sb)-&amp;gt;ll_md_exp;
        ENTRY;

        body = req_capsule_server_get(&amp;amp;open_req-&amp;gt;rq_pill, &amp;amp;RMF_MDT_BODY);  &amp;lt;=== Returns NULL due to message above
        OBD_ALLOC_PTR(op_data);
        if (op_data == NULL) {
                CWARN(&quot;%s: cannot allocate op_data to release open handle for &quot;
                      DFID&quot;\n&quot;,
                      ll_get_fsname(sb, NULL, 0), PFID(&amp;amp;body-&amp;gt;mbo_fid1));

                RETURN_EXIT;
        }

        op_data-&amp;gt;op_fid1 = body-&amp;gt;mbo_fid1;       &amp;lt;==== Whoops!
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="145288" author="pjones" created="Fri, 11 Mar 2016 21:42:57 +0000"  >&lt;p&gt;Hongchao&lt;/p&gt;

&lt;p&gt;Can you please look into this? Oleg has suggested that you should&lt;/p&gt;

&lt;p&gt;1) add error handling&lt;br/&gt;
2. figure out why the request reply does not have what we think it should have inside&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="145289" author="green" created="Fri, 11 Mar 2016 21:44:01 +0000"  >&lt;p&gt;Frank - are these builds using the RC5 RPM or do you self-build them from tip of whatever branch? Because it says &quot;build &apos;20160302&apos;&quot;, but if you self build it does not help us and you need to put debuginfo  vmlinux and debuginfo modules alongside the crashdumps&lt;/p&gt;</comment>
                            <comment id="145392" author="heckes" created="Mon, 14 Mar 2016 09:05:09 +0000"  >&lt;p&gt;My apologies there&apos;s a typo in the description field above  (I corrected it). The build under test was b2_8 rc5 and had been downloaded from Jenkens job &lt;em&gt;lustre-2_8&lt;/em&gt; . The debuginfo RPMs can be found at &lt;tt&gt;lhn.lola.hpdd.intel.com:/scratch/rpms/20160309/notinstalled/server/x86_64&lt;/tt&gt;. &lt;/p&gt;</comment>
                            <comment id="145401" author="hongchao.zhang" created="Mon, 14 Mar 2016 12:46:51 +0000"  >&lt;p&gt;the related request has been actually failed with -ENOTCONN(-107), then there is not reply fields (Reply&apos;s bufcount is 1, only contains&lt;br/&gt;
PTLRPC_BODY) in the request, still looking why the failed request can go to the &quot;ll_prep_inode/ll_open_cleanup&quot;.&lt;/p&gt;</comment>
                            <comment id="145547" author="hongchao.zhang" created="Tue, 15 Mar 2016 12:40:59 +0000"  >&lt;p&gt;status update: &lt;br/&gt;
1, the request is just change between ll_prep_inode and ll_open_cleanup, for the MDT_BODY is also retrieved in ll_prep_inode&lt;br/&gt;
2, in ll_prep_inode, ll_iget failed with -EIO, then the related FID could be a directory, and the GET_ATTR request to other MDTs failed&lt;/p&gt;

&lt;p&gt;It&apos;s not clear where the request is modified, and it could be changed by the replay, but there is no &quot;P&quot; flags in the request,&lt;br/&gt;
and I also try to reproduce the problem by replaying the request before ll_open_cleanup use it, but failed to reproduce.&lt;/p&gt;</comment>
                            <comment id="145680" author="hongchao.zhang" created="Tue, 15 Mar 2016 22:25:54 +0000"  >&lt;p&gt;Hi Frank,&lt;br/&gt;
Could you please extract the debug logs from the crash dump? and I can&apos;t connect to the Lola.&lt;br/&gt;
and is the logs at MDT0000 &amp;amp; MDT0001 available? it could help to trace the problem.&lt;br/&gt;
 Thanks!!&lt;/p&gt;</comment>
                            <comment id="145753" author="hongchao.zhang" created="Wed, 16 Mar 2016 13:30:50 +0000"  >&lt;p&gt;the issue can be reproduced if the request is replayed but failed between ll_prep_inod and ll_open_cleanup.&lt;br/&gt;
I have managed to simulate this case in my local VMs and it occurred, but I&apos;m not sure whether it is the same case with this one&lt;/p&gt;</comment>
                            <comment id="145765" author="heckes" created="Wed, 16 Mar 2016 14:31:25 +0000"  >&lt;p&gt;Yes, to extract the debug log from the crash file. For log information on MDS side (MDT000&lt;/p&gt;
{0,1}
&lt;p&gt; I&apos;m going to check, too and will attach the files here.&lt;/p&gt;</comment>
                            <comment id="145833" author="heckes" created="Wed, 16 Mar 2016 18:39:25 +0000"  >&lt;p&gt;Odd, I decompressed the Lustre client &apos;normal&apos; kernel and the debuginfo kernel, both returned the &lt;br/&gt;
following messages (besides different names):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lola-16 crash_lustre]# crash /scratch/crashdumps/lu-7866/lola-32/127.0.0.1-2016-03-11-03\:55\:54/vmcore /tmp/vmlinux-2.6.32-504.30.3.el6.x86_64

crash 6.1.0-6.el6_6
Copyright (C) 2002-2012  Red Hat, Inc.
Copyright (C) 2004, 2005, 2006, 2010  IBM Corporation
Copyright (C) 1999-2006  Hewlett-Packard Co
Copyright (C) 2005, 2006, 2011, 2012  Fujitsu Limited
Copyright (C) 2006, 2007  VA Linux Systems Japan K.K.
Copyright (C) 2005, 2011  NEC Corporation
Copyright (C) 1999, 2002, 2007  Silicon Graphics, Inc.
Copyright (C) 1999, 2000, 2001, 2002  Mission Critical Linux, Inc.
This program is free software, covered by the GNU General Public License,
and you are welcome to change it and/or distribute copies of it under
certain conditions.  Enter &quot;help copying&quot; to see the conditions.
This program has absolutely no warranty.  Enter &quot;help warranty&quot; for details.
 
crash: /tmp/vmlinux-2.6.32-504.30.3.el6.x86_64: no .gnu_debuglink section

crash: /tmp/vmlinux-2.6.32-504.30.3.el6.x86_64: no debugging data available
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Path to Oleg&apos;s &lt;tt&gt;lustre.so&lt;/tt&gt; is configured via &lt;tt&gt;~/.crashrc&lt;/tt&gt;. Any ideas?&lt;/p&gt;</comment>
                            <comment id="146592" author="hongchao.zhang" created="Wed, 23 Mar 2016 11:55:37 +0000"  >&lt;p&gt;the issue should be caused by the race between the replay and the normal process of the open request.&lt;/p&gt;

&lt;p&gt;Thread 1:&lt;br/&gt;
send the LDLM_ENQUEUE request to MDT1 to open the directory, and send requests to other MDTs to get the attributes&lt;br/&gt;
after get the reply from the MDT1, but it was stuck by one of the requests to other MDTs.&lt;/p&gt;

&lt;p&gt;Thread 2:&lt;br/&gt;
closing the file, and the &quot;rq_replay&quot; of the open request is cleared in &quot;mdc_close&quot;.&lt;/p&gt;

&lt;p&gt;Then, the MDT1 got failover and the recovery is initiated, then the LDLM_ENQUEUE request in Thread 1 is replayed but failed&lt;br/&gt;
with -ENOTCONN during replaying the request to MDT1 and its &quot;rq_repmsg&quot; will contain only the &quot;PTLRPC_BODY&quot;  field.&lt;br/&gt;
then the issue is triggered in &quot;ll_open_cleanup&quot;.&lt;/p&gt;</comment>
                            <comment id="147421" author="gerrit" created="Thu, 31 Mar 2016 08:52:03 +0000"  >&lt;p&gt;Hongchao Zhang (hongchao.zhang@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/19256&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/19256&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7866&quot; title=&quot;BUG: unable to handle kernel NULL pointer dereference at (null)&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7866&quot;&gt;LU-7866&lt;/a&gt; ptlrpc: save reply msg for replay&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: d48a27d6e1a2b63dd9d2f2195dfed2e2d0595b78&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="20729" name="console-loa-32.log.bz2" size="98741" author="heckes" created="Fri, 11 Mar 2016 19:30:42 +0000"/>
                            <attachment id="20730" name="lola-32-vmcore-dmesg.txt.bz2" size="30716" author="heckes" created="Fri, 11 Mar 2016 19:30:42 +0000"/>
                            <attachment id="20731" name="messages-lola-32.log.bz2" size="362747" author="heckes" created="Fri, 11 Mar 2016 19:32:39 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzy46f:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>