<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:41:57 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4351] sanity test_54c: can&apos;t find an ext2 filesystem on dev loop3</title>
                <link>https://jira.whamcloud.com/browse/LU-4351</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/0ac2cba4-5de4-11e3-8f3c-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/0ac2cba4-5de4-11e3-8f3c-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_54c failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;error mounting /mnt/lustre/loop54c on /mnt/lustre/d54c&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;client dmesg shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 2564.211169] LustreError: 9888:0:(rw.c:128:ll_cl_init()) lustre: [0x200001b70:0xeff1:0x0] no active IO, please file a ticket.
[ 2564.211172] Pid: 9888, comm: loop3
[ 2564.211173] 
[ 2564.211174] Call Trace:
[ 2564.211186]  [&amp;lt;ffffffff81004935&amp;gt;] dump_trace+0x75/0x310
[ 2564.211210]  [&amp;lt;ffffffffa05c282a&amp;gt;] libcfs_debug_dumpstack+0x4a/0x70 [libcfs]
[ 2564.211250]  [&amp;lt;ffffffffa0becb8e&amp;gt;] ll_cl_init+0x22e/0x330 [lustre]
[ 2564.211280]  [&amp;lt;ffffffffa0c0901a&amp;gt;] ll_write_begin+0x8a/0x5d0 [lustre]
[ 2564.211305]  [&amp;lt;ffffffffa02cb50d&amp;gt;] do_lo_send_aops+0xad/0x1b0 [loop]
[ 2564.211310]  [&amp;lt;ffffffffa02cb7a0&amp;gt;] do_bio_filebacked+0x190/0x280 [loop]
[ 2564.211314]  [&amp;lt;ffffffffa02cb952&amp;gt;] loop_thread+0xc2/0x250 [loop]
[ 2564.211319]  [&amp;lt;ffffffff81082306&amp;gt;] kthread+0x96/0xa0
[ 2564.211325]  [&amp;lt;ffffffff81467864&amp;gt;] kernel_thread_helper+0x4/0x10
[ 2564.211328] 
[ 2564.211335] Buffer I/O error on device loop3, logical block 0
[ 2564.211336] lost page write due to I/O error on loop3
[ 2564.211345] Pid: 9888, comm: loop3
[ 2564.211346] 
[ 2564.211346] Call Trace:
[ 2564.211350]  [&amp;lt;ffffffff81004935&amp;gt;] dump_trace+0x75/0x310
[ 2564.211360]  [&amp;lt;ffffffffa05c282a&amp;gt;] libcfs_debug_dumpstack+0x4a/0x70 [libcfs]
[ 2564.211377]  [&amp;lt;ffffffffa0becb8e&amp;gt;] ll_cl_init+0x22e/0x330 [lustre]
[ 2564.211403]  [&amp;lt;ffffffffa0c0901a&amp;gt;] ll_write_begin+0x8a/0x5d0 [lustre]
[ 2564.211421]  [&amp;lt;ffffffffa02cb50d&amp;gt;] do_lo_send_aops+0xad/0x1b0 [loop]
[ 2564.211426]  [&amp;lt;ffffffffa02cb7a0&amp;gt;] do_bio_filebacked+0x190/0x280 [loop]
[ 2564.211430]  [&amp;lt;ffffffffa02cb952&amp;gt;] loop_thread+0xc2/0x250 [loop]
[ 2564.211434]  [&amp;lt;ffffffff81082306&amp;gt;] kthread+0x96/0xa0
[ 2564.211445]  [&amp;lt;ffffffff81467864&amp;gt;] kernel_thread_helper+0x4/0x10
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 2564.237136] EXT2-fs (loop3): error: can&apos;t find an ext2 filesystem on dev loop3.
[ 2568.722796] Lustre: DEBUG MARKER: /usr/sbin/lctl mark  sanity test_54c: @@@@@@ FAIL: error mounting \/mnt\/lustre\/loop54c on \/mnt\/lustre\/d54c 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>lustre-master build #1791 ldiskfs &lt;br/&gt;
client is running SLES11 SP3</environment>
        <key id="22353">LU-4351</key>
            <summary>sanity test_54c: can&apos;t find an ext2 filesystem on dev loop3</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bogl">Bob Glossman</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Thu, 5 Dec 2013 22:02:41 +0000</created>
                <updated>Mon, 21 Jul 2014 15:43:27 +0000</updated>
                            <resolved>Tue, 3 Jun 2014 14:48:32 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                    <fixVersion>Lustre 2.5.2</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>11</watches>
                                                                            <comments>
                            <comment id="72958" author="jay" created="Fri, 6 Dec 2013 02:10:53 +0000"  >&lt;p&gt;too bad, this kernel is too old which still calls write_being() and write_end() to write pages.&lt;/p&gt;</comment>
                            <comment id="73154" author="green" created="Mon, 9 Dec 2013 23:48:24 +0000"  >&lt;p&gt;what do you mean the kernel is OLD? It&apos;s 3.4 or something like that, definitely very new compared to rhel6.x we run.&lt;/p&gt;</comment>
                            <comment id="74726" author="yujian" created="Fri, 10 Jan 2014 09:06:17 +0000"  >&lt;p&gt;Lustre build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-reviews/20841/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-reviews/20841/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: SLES11SP3/x86_64 (both server and client, kernel version: 3.0.101-0.8)&lt;/p&gt;

&lt;p&gt;The same failure occurred:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7e296c20-7986-11e3-a27b-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7e296c20-7986-11e3-a27b-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="81569" author="bogl" created="Mon, 14 Apr 2014 21:54:59 +0000"  >&lt;p&gt;This is actually a distinct SLES11 specific bug and not a dup.  The underlying cause is the quite variant implementation of kernel loop devices in SLES11.  It differs from the implementation of similar devices in both earlier and later kernel versions in all other distros yet examined, including much later kernel.org versions.&lt;/p&gt;

&lt;p&gt;The basic problem is that the SLES implementation uses address space ops while all the other linux&apos;es use only regular file I/O ops in their write code for kernel loop devices.  This doesn&apos;t play nicely with embedded assumptions in lustre code.&lt;/p&gt;

&lt;p&gt;Since the problem is expected to go away in later kernel versions, even later SLES kernels, the plan is to just skip the test in current SLES clients as it will always fail there.&lt;/p&gt;</comment>
                            <comment id="81574" author="bogl" created="Mon, 14 Apr 2014 22:16:59 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/9955&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/9955&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="82389" author="bogl" created="Thu, 24 Apr 2014 14:48:37 +0000"  >&lt;p&gt;in b2_5&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/10085&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/10085&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="83511" author="paf" created="Thu, 8 May 2014 14:55:34 +0000"  >&lt;p&gt;I&apos;m a bit puzzled:&lt;br/&gt;
Cray has been running sanity on 2.x Lustre+SLES11SP3 for some time now.  We only see this test failure when testing master, not 2.5 or 2.4.&lt;br/&gt;
So it seem strange to say the fault lies completely with SLES.&lt;/p&gt;

&lt;p&gt;I&apos;m not saying SLES doesn&apos;t have a bug in it, but wasn&apos;t there a change made on the Lustre side that caused this failure to start happening?  Have you actually seen this test failure with b2_5?&lt;/p&gt;</comment>
                            <comment id="83514" author="bogl" created="Thu, 8 May 2014 15:02:11 +0000"  >&lt;p&gt;yes, this problem has been reproduced in various branches.  not just seen in master.  It&apos;s possible that the SLES11 flaw in their kernel device implementation only cut in with some specific kernel version over the long life of SLES11.  I only looked at recent 3.0.x versions, didn&apos;t check back through all of SLES11 history.&lt;/p&gt;

&lt;p&gt;It&apos;s also possible that the lustre sensitivity to the use of address space ops called directly came about due to recent changes in lustre, like clio or unified target.  Even if that&apos;s true we aren&apos;t likely to back out such new features due to a very SLES11 specific incompatibility.&lt;/p&gt;

&lt;p&gt;Just as an aside have already confirmed that the current SLES12 beta doesn&apos;t have this flaw in its loop devices.  Nor does any other linux kernel we&apos;ve been able to examine.&lt;/p&gt;</comment>
                            <comment id="83516" author="paf" created="Thu, 8 May 2014 15:17:56 +0000"  >&lt;p&gt;Thanks, Bob.  That clarifies.  &lt;/p&gt;

&lt;p&gt;Well, if this use case turns out to be something we/our customers truly care about - I doubt it will, but... - Cray will continue looking in to it to try to figure out a fix and pass on whatever we find.&lt;/p&gt;</comment>
                            <comment id="83884" author="dmiter" created="Mon, 12 May 2014 17:56:47 +0000"  >&lt;p&gt;I still have the error on Xeon Phi:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[12215.497838] LustreError: 33755:0:(rw.c:128:ll_cl_init()) lustre25: [0x200000bd1:0xbfda:0x0] no active IO, please file a ticket.
[12215.497865] Pid: 33755, comm: loop3
[12215.497871]
[12215.497874] Call Trace:
[12215.497934]  [&amp;lt;ffffffffa01615f9&amp;gt;] libcfs_debug_dumpstack+0x5b/0x6d [libcfs]
[12215.497991]  [&amp;lt;ffffffffa066e177&amp;gt;] ll_cl_init+0x1e1/0x30a [lustre]
[12215.498043]  [&amp;lt;ffffffffa0684745&amp;gt;] ll_write_begin+0x120/0x609 [lustre]
[12215.498068]  [&amp;lt;ffffffff8109f2de&amp;gt;] pagecache_write_begin+0x1c/0x1e
[12215.498091]  [&amp;lt;ffffffff81204c8f&amp;gt;] do_lo_send_aops+0x9c/0x17a
[12215.498110]  [&amp;lt;ffffffff81022c8c&amp;gt;] ? set_next_entity+0x45/0x99
[12215.498127]  [&amp;lt;ffffffff81028ae7&amp;gt;] ? pick_next_task_fair+0xaf/0xbf
[12215.498145]  [&amp;lt;ffffffff81204600&amp;gt;] do_bio_filebacked+0xfd/0x27f
[12215.498161]  [&amp;lt;ffffffff81204bf3&amp;gt;] ? do_lo_send_aops+0x0/0x17a
[12215.498178]  [&amp;lt;ffffffff81204bbd&amp;gt;] loop_thread+0x1db/0x211
[12215.498194]  [&amp;lt;ffffffff8104dba0&amp;gt;] ? autoremove_wake_function+0x0/0x38
[12215.498211]  [&amp;lt;ffffffff812049e2&amp;gt;] ? loop_thread+0x0/0x211
[12215.498229]  [&amp;lt;ffffffff8104d611&amp;gt;] kthread+0x84/0x8c
[12215.498247]  [&amp;lt;ffffffff81003814&amp;gt;] kernel_thread_helper+0x4/0x10
[12215.498264]  [&amp;lt;ffffffff8104d58d&amp;gt;] ? kthread+0x0/0x8c
[12215.498278]  [&amp;lt;ffffffff81003810&amp;gt;] ? kernel_thread_helper+0x0/0x10
[12215.498286]
[12215.498321] Buffer I/O error on device loop3, logical block 0
[12215.498333] lost page write due to I/O error on loop3
[12215.498416] Pid: 33755, comm: loop3
[12215.498423]
[12215.498426] Call Trace:
[12215.498459]  [&amp;lt;ffffffffa01615f9&amp;gt;] libcfs_debug_dumpstack+0x5b/0x6d [libcfs]
[12215.498508]  [&amp;lt;ffffffffa066e177&amp;gt;] ll_cl_init+0x1e1/0x30a [lustre]
[12215.498560]  [&amp;lt;ffffffffa0684745&amp;gt;] ll_write_begin+0x120/0x609 [lustre]
[12215.498577]  [&amp;lt;ffffffff8109f2de&amp;gt;] pagecache_write_begin+0x1c/0x1e
[12215.498594]  [&amp;lt;ffffffff81204c8f&amp;gt;] do_lo_send_aops+0x9c/0x17a
[12215.498615]  [&amp;lt;ffffffff81351ed8&amp;gt;] ? __slab_free+0x78/0xd8
[12215.498630]  [&amp;lt;ffffffff810a323b&amp;gt;] ? mempool_free_slab+0x17/0x19
[12215.498647]  [&amp;lt;ffffffff81204600&amp;gt;] do_bio_filebacked+0xfd/0x27f
[12215.498663]  [&amp;lt;ffffffff81204bf3&amp;gt;] ? do_lo_send_aops+0x0/0x17a
[12215.498685]  [&amp;lt;ffffffff8110605f&amp;gt;] ? bio_free+0x3d/0x54
[12215.498700]  [&amp;lt;ffffffff8110608b&amp;gt;] ? bio_fs_destructor+0x15/0x17
[12215.498716]  [&amp;lt;ffffffff81105cc3&amp;gt;] ? bio_put+0x2b/0x2d
[12215.498731]  [&amp;lt;ffffffff81204bbd&amp;gt;] loop_thread+0x1db/0x211
[12215.498745]  [&amp;lt;ffffffff8104dba0&amp;gt;] ? autoremove_wake_function+0x0/0x38
[12215.498762]  [&amp;lt;ffffffff812049e2&amp;gt;] ? loop_thread+0x0/0x211
[12215.498778]  [&amp;lt;ffffffff8104d611&amp;gt;] kthread+0x84/0x8c
[12215.498794]  [&amp;lt;ffffffff81003814&amp;gt;] kernel_thread_helper+0x4/0x10
[12215.498811]  [&amp;lt;ffffffff8104d58d&amp;gt;] ? kthread+0x0/0x8c
[12215.498825]  [&amp;lt;ffffffff81003810&amp;gt;] ? kernel_thread_helper+0x0/0x10
[12215.498833]
[12215.498862] Buffer I/O error on device loop3, logical block 1
[12215.498874] lost page write due to I/O error on loop3
[12215.498950] Pid: 33755, comm: loop3
...
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="83888" author="bogl" created="Mon, 12 May 2014 18:01:47 +0000"  >&lt;p&gt;Dmitry,&lt;br/&gt;
  Are you using a SLES kernel on the Phi?  if so then this is expected behavior.  If not then it may be an entirely different problem.&lt;/p&gt;</comment>
                            <comment id="83891" author="dmiter" created="Mon, 12 May 2014 18:08:14 +0000"  >&lt;p&gt;No, it&apos;s upstream kernel 2.6.38 with patches for Xeon Phi.&lt;/p&gt;</comment>
                            <comment id="83895" author="bogl" created="Mon, 12 May 2014 18:20:15 +0000"  >&lt;p&gt;I see your call stack has do_lo_send_aops in it.  That says this is likely the same bug that&apos;s in the SLES11 kernel.    Does any of your Phi specific patches apply in drivers/block/loop.c?  If not then this bug probably exists in 2.6.38 upstream kernel source you are using.  Previously had only seen this internal routine in SLES 11 (3.0.x) kernels.  Not seen in el6 (2.6.32-xxx) kernels or fc20, rhel7, sles12.&lt;/p&gt;

&lt;p&gt;As far as I know there is no workaround.&lt;/p&gt;</comment>
                            <comment id="83987" author="bogl" created="Tue, 13 May 2014 14:49:26 +0000"  >&lt;p&gt;I have been researching the use of address space ops in the kernel loop device code a bit.  As far as I can tell it&apos;s been part of the mainline upstream linux kernel for a very long time until the following commit in 2011:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;author	Christoph Hellwig &amp;lt;hch@infradead.org&amp;gt;	2011-10-17 10:57:20 (GMT)
committer	Jens Axboe &amp;lt;axboe@kernel.dk&amp;gt;	2011-10-17 10:57:20 (GMT)
commit	456be1484ffc72a24bdb4200b5847c4fa90139d9 (patch)
tree	570f0818bd6cfa245ab23d0121853b7b1e5a649b /drivers/block/loop.c
parent	8bc03e8f3a334e09e89a7dffb486ee97a5ce84ae (diff)
loop: remove the incorrect write_begin/write_end shortcut
Currently the loop device tries to call directly into write_begin/write_end
instead of going through -&amp;gt;write if it can.  This is a fairly nasty shortcut
as write_begin and write_end are only callbacks for the generic write code
and expect to be called with filesystem specific locks held.

This code currently causes various issues for clustered filesystems as it
doesn&apos;t take the required cluster locks, and it also causes issues for XFS
as it doesn&apos;t properly lock against the swapext ioctl as called by the
defragmentation tools.  This in case causes data corruption if
defragmentation hits a busy loop device in the wrong time window, as
reported by RH QA.

The reason why we have this shortcut is that it saves a data copy when
doing a transformation on the loop device, which is the technical term
for using cryptoloop (or an XOR transformation).  Given that cryptoloop
has been deprecated in favour of dm-crypt my opinion is that we should
simply drop this shortcut instead of finding complicated ways to to
introduce a formal interface for this shortcut.

Signed-off-by: Christoph Hellwig &amp;lt;hch@lst.de&amp;gt;
Signed-off-by: Jens Axboe &amp;lt;axboe@kernel.dk&amp;gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I suspect the reason we don&apos;t see it in el6 is that yanking it out was one of the RH customizations of their distro kernel.&lt;br/&gt;
I suspect the reason it&apos;s in SLES11 is the branch their kernel is derived from is too old to include the upstream kernel commit.&lt;/p&gt;</comment>
                            <comment id="84705" author="bogl" created="Thu, 22 May 2014 15:30:24 +0000"  >&lt;p&gt;Recent bug report submitted upstream to SuSE and marked fixed suggests this bug will very likely disappear after the next sles11sp3 kernel update.&lt;/p&gt;</comment>
                            <comment id="84707" author="paf" created="Thu, 22 May 2014 15:48:30 +0000"  >&lt;p&gt;Bob: Are you able to provide a link or other info about that bug report?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;</comment>
                            <comment id="84709" author="bogl" created="Thu, 22 May 2014 15:56:35 +0000"  >&lt;p&gt;Patrick,&lt;br/&gt;
It&apos;s &lt;a href=&quot;https://bugzilla.novell.com/show_bug.cgi?id=878123&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://bugzilla.novell.com/show_bug.cgi?id=878123&lt;/a&gt;.  Don&apos;t know if that&apos;s generally accessible.&lt;/p&gt;</comment>
                            <comment id="84711" author="bogl" created="Thu, 22 May 2014 15:59:30 +0000"  >&lt;p&gt;btw, the fix isn&apos;t in the latest update to 3.0.101-0.29.  I expect it in the next one.&lt;/p&gt;</comment>
                            <comment id="84712" author="paf" created="Thu, 22 May 2014 16:09:06 +0000"  >&lt;p&gt;Thanks, Bob - It doesn&apos;t seem to be, but Cray has support contracts with SUSE, so someone around here should be able to get me in. &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;

&lt;p&gt;Much appreciated.&lt;/p&gt;</comment>
                            <comment id="85590" author="jlevi" created="Tue, 3 Jun 2014 14:48:33 +0000"  >&lt;p&gt;Patches have landed.&lt;/p&gt;</comment>
                            <comment id="89629" author="bogl" created="Mon, 21 Jul 2014 15:43:27 +0000"  >&lt;p&gt;fixed in sles11sp3 as well in the most recent upstream kernel update from SuSE to version 3.0.101-0.35&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="24621">LU-5032</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwarj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11919</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>