<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:16:57 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1475] lov_update_create_set() error creating fid 0xf99b sub-object on OST idx 0/1: rc = -5</title>
                <link>https://jira.whamcloud.com/browse/LU-1475</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;While using mdsrate to create large number files to use up the free inodes on a 48TB OST on Juelich test cluster, the following EIO error occurred:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;+ su mpiuser sh -c &quot;/usr/lib64/openmpi/bin/mpirun -mca orte_rsh_agent rsh:ssh -np 25 -machinefile /tmp/large-lun.machines /usr/lib64/lustre/tests/mdsrate --create --verbose --ndirs 25 --dirfmt &apos;/mnt/lustre/mdsrate/dir%d&apos; --nfiles 1993129 --filefmt &apos;file%%d&apos; &quot;
&amp;lt;~snip~&amp;gt;
Rank 2: 71.24 creates/sec 140.38 secs (total: 260000 creates 3334.84 secs)
Rank 21: 71.24 creates/sec 140.38 secs (total: 260000 creates 3334.84 secs)
rank 4: open(file264701) error: Input/output error
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Dmesg on the MDS node showed that:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 4988:0:(lov_request.c:572:lov_update_create_set()) error creating fid 0xf99b sub-object on OST idx 0/1: rc = -5
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/379a475c-aee9-11e1-b0f7-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/379a475c-aee9-11e1-b0f7-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The same issue also occurred on 1TB OST: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/f900e0aa-aee8-11e1-b0f7-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/f900e0aa-aee8-11e1-b0f7-52540035b04c&lt;/a&gt;&lt;/p&gt;</description>
                <environment>&lt;br/&gt;
Lustre Tag: v2_1_2_RC2&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/86/&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/86/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.2/x86_64&lt;br/&gt;
Network: IB (in-kernel OFED)&lt;br/&gt;
</environment>
        <key id="14727">LU-1475</key>
            <summary>lov_update_create_set() error creating fid 0xf99b sub-object on OST idx 0/1: rc = -5</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="green">Oleg Drokin</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Tue, 5 Jun 2012 05:41:42 +0000</created>
                <updated>Mon, 29 May 2017 03:56:41 +0000</updated>
                            <resolved>Mon, 29 May 2017 03:56:41 +0000</resolved>
                                    <version>Lustre 2.3.0</version>
                    <version>Lustre 2.1.2</version>
                    <version>Lustre 2.1.3</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="40008" author="pjones" created="Tue, 5 Jun 2012 05:44:43 +0000"  >&lt;p&gt;Oleg could you please look into this one? Thanks!&lt;/p&gt;</comment>
                            <comment id="40010" author="yujian" created="Tue, 5 Jun 2012 06:09:59 +0000"  >&lt;p&gt;From &lt;a href=&quot;http://wiki.whamcloud.com/display/ENG/Lustre+2.1.1+release+testing+tracker&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://wiki.whamcloud.com/display/ENG/Lustre+2.1.1+release+testing+tracker&lt;/a&gt;, I found the large number inodes testing passed on Lustre 2.1.1 RC1 against 128TB OST on DDN SFA10KE storage appliance:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/22872264-aef7-11e1-b0f7-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/22872264-aef7-11e1-b0f7-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The test was run on one single node at that time.&lt;/p&gt;

&lt;p&gt;Now, on Juelich cluster with separated Lustre Client, MDS and OSS nodes, I performed the same test on Lustre 2.1.1 RC4, the EIO error also occurred while testing against 1TB OST:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/bbce36f6-aee8-11e1-b0f7-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/bbce36f6-aee8-11e1-b0f7-52540035b04c&lt;/a&gt;  &lt;/p&gt;</comment>
                            <comment id="40017" author="green" created="Tue, 5 Jun 2012 10:34:45 +0000"  >&lt;p&gt;from the logs MDS has a strong belief it got disconnected from OSTs, even though there are no messages supportign this state (evictions or whatever).&lt;/p&gt;</comment>
                            <comment id="40046" author="green" created="Tue, 5 Jun 2012 18:06:09 +0000"  >&lt;p&gt;I imagine a higher debug log might shed some light onto the situation, though I am not sure how high youcan reasonably go.&lt;/p&gt;

&lt;p&gt;Ideally if you increase debug_mb to 1024 and debug level to -1 that probably would be best.&lt;/p&gt;</comment>
                            <comment id="40077" author="yujian" created="Wed, 6 Jun 2012 05:49:58 +0000"  >&lt;blockquote&gt;&lt;p&gt;Ideally if you increase debug_mb to 1024 and debug level to -1 that probably would be best.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;OK.&lt;br/&gt;
It seems there is some hardware issue on the MDS node n002. It&apos;s disconnected. I&apos;ve been trying to powercycle it and even re-provision it, it still does not start up. &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/sad.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;

&lt;p&gt;I&apos;m provisioning another MDS node n001... &lt;/p&gt;</comment>
                            <comment id="40108" author="yujian" created="Wed, 6 Jun 2012 08:59:28 +0000"  >&lt;p&gt;Both n001 and n002 did not work. Finally, I provisioned n003 and booted it up. The testing is ongoing now.&lt;/p&gt;</comment>
                            <comment id="40171" author="yujian" created="Wed, 6 Jun 2012 23:10:36 +0000"  >&lt;p&gt;Hi Oleg,&lt;br/&gt;
I reproduced the failure and gathered the debug logs with -1 level. Please find /scratch/logs/2.1.2/large-lun-145447.tar.bz2 on brent node. Thanks!&lt;/p&gt;</comment>
                            <comment id="40179" author="green" created="Thu, 7 Jun 2012 03:44:46 +0000"  >&lt;p&gt;Thanks!&lt;br/&gt;
This log is pretty useful.&lt;/p&gt;

&lt;p&gt;I think I see the problem now, introduced by WangDi&apos;s patch for bug 21379, change eb7c28ff977f4e0a280558aa74e23f2a9ab0ea0c&lt;/p&gt;

&lt;p&gt;in lov_check_and_create_object() we start with rc = -EIO for the early exit case of no available OSCs,&lt;br/&gt;
then we are trying to see if there&apos;s some objects available anywhere:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;+        /* check if objects has been created on this ost */
+        for (stripe = 0; stripe &amp;lt; lsm-&amp;gt;lsm_stripe_count; stripe++) {
+                /* already have object at this stripe */
+                if (ost_idx == lsm-&amp;gt;lsm_oinfo[stripe]-&amp;gt;loi_ost_idx)
+                        break;
+        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;If we did find something (result of a racing create completion that we can clearly see in the logs), then the next if statement is not triggered:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;+        /* check if objects has been created on this ost */
+        for (stripe = 0; stripe &amp;lt; lsm-&amp;gt;lsm_stripe_count; stripe++) {
+                /* already have object at this stripe */
+                if (ost_idx == lsm-&amp;gt;lsm_oinfo[stripe]-&amp;gt;loi_ost_idx)
+                        break;
+        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And as such we exit via following RETURN(rc) with the rc still being initial -EIO.&lt;/p&gt;

&lt;p&gt;I think the fix should just be to either add &quot;else rc = 0&quot; to the last if statement, or to add rc = 0 before the last if statement.&lt;/p&gt;

&lt;p&gt;Wangdi, what do you think?&lt;/p&gt;</comment>
                            <comment id="40186" author="yujian" created="Thu, 7 Jun 2012 09:37:48 +0000"  >&lt;p&gt;Lustre Tag: v2_1_2_RC2&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/87/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/87/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.2/x86_64(server), RHEL6.2/i686(client)&lt;br/&gt;
Network: TCP (1GigE)&lt;br/&gt;
ENABLE_QUOTA=yes&lt;/p&gt;

&lt;p&gt;sanity test 220 failed with the same issue: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/b0aeb296-b07d-11e1-99c6-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/b0aeb296-b07d-11e1-99c6-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I found this failure occurred before and was marked as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-782&quot; title=&quot;sanity.sh subtest test_220 failed with 3, error -107 -ENOTCONN&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-782&quot;&gt;&lt;del&gt;LU-782&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="40196" author="di.wang" created="Thu, 7 Jun 2012 12:07:56 +0000"  >&lt;blockquote&gt;
&lt;p&gt;Wangdi, what do you think?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Yes, it should reset EIO to 0 if it found some objects on the stripe.&lt;/p&gt;</comment>
                            <comment id="40209" author="green" created="Thu, 7 Jun 2012 14:07:06 +0000"  >&lt;p&gt;patch at &lt;a href=&quot;http://review.whamcloud.com/3058&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/3058&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="41573" author="spiechurski" created="Fri, 6 Jul 2012 18:50:29 +0000"  >&lt;p&gt;I uploaded debug logs in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-782&quot; title=&quot;sanity.sh subtest test_220 failed with 3, error -107 -ENOTCONN&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-782&quot;&gt;&lt;del&gt;LU-782&lt;/del&gt;&lt;/a&gt; with similar issue, except the return code is ENOTCONN, but the real current issue is a mix of ENOTCONN and EIO.&lt;/p&gt;

&lt;p&gt;What is the status of the patch ? I see there was a regression introduced with the current version, but can you see why ?&lt;/p&gt;
</comment>
                            <comment id="42288" author="green" created="Wed, 25 Jul 2012 23:08:06 +0000"  >&lt;p&gt;Hm, I just dug some more int logs as part of trying to improve my original patch and came to a conclusion that my initial analysis was wrong (and the &quot;fix&quot; is wrong too).&lt;/p&gt;

&lt;p&gt;What really happens (in the log file I have at hand):&lt;br/&gt;
OST starts to feel real bad (logs have multiple messages that I interpret as i/o errors like&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;sd 6:0:15:0: attempting task abort! scmd(ffff88060e198d80)
sd 6:0:15:0: [sdo] CDB: Write(10): 2a 00 01 cb 62 c0 00 00 08 00
scsi target6:0:15: handle(0x001d), sas_address(0x5000c50040cf2bc1), phy(18)
scsi target6:0:15: enclosure_logical_id(0x500304800137caff), slot(6)
sd 6:0:15:0: task abort: SUCCESS scmd(ffff88060e198d80)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;)&lt;br/&gt;
A precreate request is sent as we start to deplete our precreated objects.&lt;br/&gt;
This precreate requests gets stuck on the OST somewhere. The OST knows it&apos;s slow so it sends periodic early replies to let us know it&apos;s stil working on the request.&lt;br/&gt;
Eventually we exhaust all the precreated objects. A fake request is created for the purposes of waiting for some more objects to appear, and it is placed in ptlrpcd queue.&lt;/p&gt;

&lt;p&gt;As no more objects are created (see OST in trouble).&lt;br/&gt;
10 seconds later ptlrpc decides that the fake request is sittign in the queue for way too long and should be timed out,&lt;br/&gt;
this sets off the entire error stuff eventually propagated to userspace (because we don&apos;t have any other osts to allocate from).&lt;/p&gt;

&lt;p&gt;Sebasten: The LU782 log excerpt you posted there has the same sign of fake request timing out (o-1), so likely something very similar is ongoing in that case.&lt;/p&gt;

&lt;p&gt;Anyway for this bug we need to figure out how come OST is unhappy.&lt;br/&gt;
YuJian, is this still repeatable? On other nodes too? Every time with scsi complaining about errors?&lt;/p&gt;</comment>
                            <comment id="42301" author="spiechurski" created="Thu, 26 Jul 2012 03:58:41 +0000"  >&lt;p&gt;Oleg,&lt;/p&gt;

&lt;p&gt;Looking back to the logs on the OSS when the problem first appeared, I don&apos;t see any obvious problems. However, the problem was raised after a maintenance where a writeconf was done on all the targets, and remounted all in parallel (around 200 targets at once).&lt;br/&gt;
One of our expert suspected that there had been something wrong at re-registration time and that the configs were somehow corrupted.&lt;br/&gt;
Reapplying the writeconf and re-registering each target one by one made the problem disappear. This is still difficult to understand why ....&lt;/p&gt;</comment>
                            <comment id="42320" author="jlevi" created="Thu, 26 Jul 2012 09:47:33 +0000"  >&lt;p&gt;Yu Jian,&lt;br/&gt;
Can you let us know if this is still happening often? We are trying to determine if this should still be a blocker for 2.3 Release.&lt;/p&gt;</comment>
                            <comment id="42540" author="yujian" created="Wed, 1 Aug 2012 05:14:18 +0000"  >&lt;p&gt;Hi Oleg and Jodi,&lt;/p&gt;

&lt;blockquote&gt;&lt;p&gt;YuJian, is this still repeatable? On other nodes too? Every time with scsi complaining about errors?&lt;/p&gt;&lt;/blockquote&gt;
&lt;blockquote&gt;&lt;p&gt;Can you let us know if this is still happening often? We are trying to determine if this should still be a blocker for 2.3 Release.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;I&apos;ve not run mdsrate on b2_1 branch since Lustre 2.1.2 is released. I&apos;ll find out some Toro test nodes to see whether I can reproduce it on the latest b2_1 and master branches or not.&lt;/p&gt;

&lt;p&gt;However, I found &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1454&quot; title=&quot;1.8.8-wc1&amp;lt;-&amp;gt;2.1.2 interop: Test failure on test suite conf-sanity, subtest test_22&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1454&quot;&gt;&lt;del&gt;LU-1454&lt;/del&gt;&lt;/a&gt; (which is a duplicate of this ticket) consistently occurred in autotest runs:&lt;br/&gt;
&lt;a href=&quot;http://tinyurl.com/crz74xd&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://tinyurl.com/crz74xd&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="42787" author="green" created="Mon, 6 Aug 2012 21:55:59 +0000"  >&lt;p&gt;So lu1454 was caused by the same poblem, but it&apos;s a faulty test and default behavior.&lt;/p&gt;

&lt;p&gt;What happens there is we have MDT that has just started and then we restart the (only) OST.&lt;br/&gt;
Next we connect the client and try to touch a file.&lt;br/&gt;
MDT does not yet know that OST restarted and it happens to not have any precreated objects for this OST. Attempt to get some results in finding out that OST was restarted and we no longer have a connection to it. o other OSTs to try, so we return IO error.&lt;/p&gt;

&lt;p&gt;For Bull reported failures the situation is less clear, on the other hand we had multiple complaints from other sites that registering many targets at the same time on MGS (many = more than 30 or so at a time) results in strange problems, mgs creashes and such. This warrants its own bug, I guess.&lt;/p&gt;</comment>
                            <comment id="43083" author="yujian" created="Mon, 13 Aug 2012 03:53:45 +0000"  >&lt;p&gt;RHEL6.3/x86_64 (2.1.3 Server + 1.8.8-wc1 Client):&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/bc2dffb8-e427-11e1-b6d3-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/bc2dffb8-e427-11e1-b6d3-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="43542" author="yujian" created="Tue, 21 Aug 2012 06:49:22 +0000"  >&lt;p&gt;Another instance:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/d98970d4-eabb-11e1-b137-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/d98970d4-eabb-11e1-b137-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="46559" author="yujian" created="Mon, 15 Oct 2012 07:06:41 +0000"  >&lt;p&gt;RHEL6.3/x86_64 (2.3.0 RC3 Server + 1.8.8-wc1 Client):&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/e309f73e-169c-11e2-962d-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/e309f73e-169c-11e2-962d-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="197379" author="adilger" created="Mon, 29 May 2017 03:56:41 +0000"  >&lt;p&gt;Close old ticket.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="14653">LU-1454</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvskn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8533</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>