<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:33:01 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3333] lustre_msg_get_opc()) incorrect message magic: a0b03b5 LBUG</title>
                <link>https://jira.whamcloud.com/browse/LU-3333</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I was suspicious when I hit this for the first time, but I hit it once more, so I am filing a bug.&lt;/p&gt;

&lt;p&gt;First time was in sanity test 182&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[78415.059119] LustreError: 17692:0:(mdt_recovery.c:390:mdt_last_rcvd_update()) Trying to overwrite bigger transno:on-disk: 25769825268, new: 25769825267 replay: 0. see LU-617.
[78415.061562] LustreError: 17692:0:(mdt_recovery.c:390:mdt_last_rcvd_update()) Skipped 8 previous similar messages
[78417.103412] LustreError: 31938:0:(pack_generic.c:1032:lustre_msg_get_opc()) incorrect message magic: a0b03b5e(msg:ffff8800aba18df0)
[78417.103916] LustreError: 31938:0:(pack_generic.c:1033:lustre_msg_get_opc()) LBUG
[78417.104332] Pid: 31938, comm: createmany
[78417.104542] 
[78417.104543] Call Trace:
[78417.104925]  [&amp;lt;ffffffffa0b028a5&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
[78417.105213]  [&amp;lt;ffffffffa0b02ea7&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
[78417.105490]  [&amp;lt;ffffffffa0d5a4ba&amp;gt;] lustre_msg_get_opc+0x8a/0x110 [ptlrpc]
[78417.105778]  [&amp;lt;ffffffffa0d5d97b&amp;gt;] _debug_req+0x57b/0x680 [ptlrpc]
[78417.106087]  [&amp;lt;ffffffff81047fde&amp;gt;] ? kernel_map_pages+0xfe/0x110
[78417.106345]  [&amp;lt;ffffffffa0d4965e&amp;gt;] ptlrpc_free_committed+0x20e/0x620 [ptlrpc]
[78417.106619]  [&amp;lt;ffffffffa0d4b313&amp;gt;] after_reply+0x7a3/0xd90 [ptlrpc]
[78417.106878]  [&amp;lt;ffffffffa0d502c3&amp;gt;] ptlrpc_check_set+0x1093/0x1da0 [ptlrpc]
[78417.107137]  [&amp;lt;ffffffff8107eca0&amp;gt;] ? process_timeout+0x0/0x10
[78417.107389]  [&amp;lt;ffffffffa0d512fa&amp;gt;] ptlrpc_set_wait+0x32a/0x880 [ptlrpc]
[78417.107639]  [&amp;lt;ffffffff8105ad10&amp;gt;] ? default_wake_function+0x0/0x20
[78417.107907]  [&amp;lt;ffffffffa0d5b4c6&amp;gt;] ? lustre_msg_set_jobid+0xb6/0x140 [ptlrpc]
[78417.108187]  [&amp;lt;ffffffffa0d518cf&amp;gt;] ptlrpc_queue_wait+0x7f/0x220 [ptlrpc]
[78417.108453]  [&amp;lt;ffffffffa0d2d9c5&amp;gt;] ldlm_cli_enqueue+0x365/0x770 [ptlrpc]
[78417.119499]  [&amp;lt;ffffffff814fc8bc&amp;gt;] ? __mutex_lock_slowpath+0x21c/0x2c0
[78417.119779]  [&amp;lt;ffffffffa0d326e0&amp;gt;] ? ldlm_completion_ast+0x0/0x950 [ptlrpc]
[78417.120071]  [&amp;lt;ffffffffa0f389e0&amp;gt;] ? ll_md_blocking_ast+0x0/0x750 [lustre]
[78417.120334]  [&amp;lt;ffffffffa0415f55&amp;gt;] mdc_enqueue+0x795/0x18c0 [mdc]
[78417.120583]  [&amp;lt;ffffffffa041727d&amp;gt;] mdc_intent_lock+0x1fd/0x5b4 [mdc]
[78417.120847]  [&amp;lt;ffffffffa0f389e0&amp;gt;] ? ll_md_blocking_ast+0x0/0x750 [lustre]
[78417.121427]  [&amp;lt;ffffffffa0d326e0&amp;gt;] ? ldlm_completion_ast+0x0/0x950 [ptlrpc]
[78417.121688]  [&amp;lt;ffffffffa03c8667&amp;gt;] ? lmv_fid_alloc+0x117/0x4b0 [lmv]
[78417.121952]  [&amp;lt;ffffffffa03e02d6&amp;gt;] lmv_intent_open+0x1f6/0x8c0 [lmv]
[78417.122206]  [&amp;lt;ffffffffa0f389e0&amp;gt;] ? ll_md_blocking_ast+0x0/0x750 [lustre]
[78417.122464]  [&amp;lt;ffffffff811661ca&amp;gt;] ? cache_alloc_debugcheck_after+0x14a/0x210
[78417.122720]  [&amp;lt;ffffffff81168671&amp;gt;] ? __kmalloc+0x191/0x2a0
[78417.122958]  [&amp;lt;ffffffffa03e0c4b&amp;gt;] lmv_intent_lock+0x2ab/0x370 [lmv]
[78417.123211]  [&amp;lt;ffffffffa0f389e0&amp;gt;] ? ll_md_blocking_ast+0x0/0x750 [lustre]
[78417.123471]  [&amp;lt;ffffffffa0f37a9e&amp;gt;] ? ll_i2gids+0x2e/0xd0 [lustre]
[78417.123718]  [&amp;lt;ffffffffa0f1e4ba&amp;gt;] ? ll_prep_md_op_data+0xfa/0x3a0 [lustre]
[78417.123983]  [&amp;lt;ffffffffa0f3d014&amp;gt;] ll_lookup_it+0x3a4/0xbf0 [lustre]
[78417.124273]  [&amp;lt;ffffffffa0f389e0&amp;gt;] ? ll_md_blocking_ast+0x0/0x750 [lustre]
[78417.124539]  [&amp;lt;ffffffffa0f3d8e9&amp;gt;] ll_lookup_nd+0x89/0x3b0 [lustre]
[78417.124784]  [&amp;lt;ffffffff8119b456&amp;gt;] ? d_alloc+0x166/0x1d0
[78417.125087]  [&amp;lt;ffffffff8118e8e2&amp;gt;] __lookup_hash+0x102/0x160
[78417.125322]  [&amp;lt;ffffffff8118ef8a&amp;gt;] lookup_hash+0x3a/0x50
[78417.125550]  [&amp;lt;ffffffff81192edb&amp;gt;] do_filp_open+0x2eb/0xe00
[78417.125790]  [&amp;lt;ffffffff814fae88&amp;gt;] ? thread_return+0x4e/0x776
[78417.126034]  [&amp;lt;ffffffff8118f37b&amp;gt;] ? getname+0x3b/0x250
[78417.126259]  [&amp;lt;ffffffff8119fd7b&amp;gt;] ? alloc_fd+0xab/0x160
[78417.126490]  [&amp;lt;ffffffff8117e399&amp;gt;] do_sys_open+0x69/0x140
[78417.126717]  [&amp;lt;ffffffff8100c575&amp;gt;] ? math_state_restore+0x45/0x60
[78417.126965]  [&amp;lt;ffffffff8117e4b0&amp;gt;] sys_open+0x20/0x30
[78417.127190]  [&amp;lt;ffffffff8100b0b2&amp;gt;] system_call_fastpath+0x16/0x1b
[78417.127428] 
[78417.169930] Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Crashdumps and modules for this one are in:&lt;br/&gt;
/exports/crashdumps/192.168.10.224-2013-05-09-20\:43\:05&lt;/p&gt;

&lt;p&gt;The other crash was like this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[79955.565718] Lustre: DEBUG MARKER: == sanity test 71: Running dbench on lustre (don&apos;t segment fault) ====== 21:01:15 (1368493275)
[80393.197212] LustreError: 25536:0:(pack_generic.c:1032:lustre_msg_get_opc()) incorrect message magic: a0b03b5e(msg:ffff8800464ccdf0)
[80393.197711] LustreError: 25536:0:(pack_generic.c:1033:lustre_msg_get_opc()) LBUG
[80393.209355] Pid: 25536, comm: dbench
[80393.209559] 
[80393.209559] Call Trace:
[80393.209964]  [&amp;lt;ffffffffa0b028a5&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
[80393.210228]  [&amp;lt;ffffffffa0b02ea7&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
[80393.210502]  [&amp;lt;ffffffffa0d5a4ba&amp;gt;] lustre_msg_get_opc+0x8a/0x110 [ptlrpc]
[80393.212853]  [&amp;lt;ffffffffa0d5d97b&amp;gt;] _debug_req+0x57b/0x680 [ptlrpc]
[80393.213149]  [&amp;lt;ffffffff81047fde&amp;gt;] ? kernel_map_pages+0xfe/0x110
[80393.213412]  [&amp;lt;ffffffffa0d4965e&amp;gt;] ptlrpc_free_committed+0x20e/0x620 [ptlrpc]
[80393.213689]  [&amp;lt;ffffffffa0d4b313&amp;gt;] after_reply+0x7a3/0xd90 [ptlrpc]
[80393.213967]  [&amp;lt;ffffffffa0d502c3&amp;gt;] ptlrpc_check_set+0x1093/0x1da0 [ptlrpc]
[80393.214225]  [&amp;lt;ffffffff8107eca0&amp;gt;] ? process_timeout+0x0/0x10
[80393.214477]  [&amp;lt;ffffffffa0d512fa&amp;gt;] ptlrpc_set_wait+0x32a/0x880 [ptlrpc]
[80393.214733]  [&amp;lt;ffffffff8105ad10&amp;gt;] ? default_wake_function+0x0/0x20
[80393.214995]  [&amp;lt;ffffffffa0d5b4c6&amp;gt;] ? lustre_msg_set_jobid+0xb6/0x140 [ptlrpc]
[80393.215271]  [&amp;lt;ffffffffa0d518cf&amp;gt;] ptlrpc_queue_wait+0x7f/0x220 [ptlrpc]
[80393.215526]  [&amp;lt;ffffffffa0401f59&amp;gt;] mdc_sync+0xb9/0x230 [mdc]
[80393.215773]  [&amp;lt;ffffffffa03d576e&amp;gt;] lmv_sync+0x2ae/0x550 [lmv]
[80393.216022]  [&amp;lt;ffffffffa0f0a6ab&amp;gt;] ll_fsync+0x1ab/0x860 [lustre]
[80393.216290]  [&amp;lt;ffffffff811b2401&amp;gt;] vfs_fsync_range+0xa1/0xe0
[80393.216525]  [&amp;lt;ffffffff811b24ad&amp;gt;] vfs_fsync+0x1d/0x20
[80393.216777]  [&amp;lt;ffffffff811b24ee&amp;gt;] do_fsync+0x3e/0x60
[80393.217003]  [&amp;lt;ffffffff811b2540&amp;gt;] sys_fsync+0x10/0x20
[80393.217230]  [&amp;lt;ffffffff8100b0b2&amp;gt;] system_call_fastpath+0x16/0x1b
[80393.217471] 
[80393.228801] Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;/exports/crashdumps/192.168.10.224-2013-05-13-21\:08\:35/vmcore, same modules as before.&lt;/p&gt;</description>
                <environment></environment>
        <key id="19000">LU-3333</key>
            <summary>lustre_msg_get_opc()) incorrect message magic: a0b03b5 LBUG</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="green">Oleg Drokin</assignee>
                                    <reporter username="green">Oleg Drokin</reporter>
                        <labels>
                    </labels>
                <created>Tue, 14 May 2013 14:41:52 +0000</created>
                <updated>Fri, 6 Mar 2015 00:04:36 +0000</updated>
                            <resolved>Thu, 1 May 2014 17:17:21 +0000</resolved>
                                    <version>Lustre 2.3.0</version>
                    <version>Lustre 2.4.0</version>
                    <version>Lustre 2.1.5</version>
                    <version>Lustre 1.8.9</version>
                                    <fixVersion>Lustre 2.6.0</fixVersion>
                    <fixVersion>Lustre 2.5.2</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="58477" author="adilger" created="Tue, 14 May 2013 18:06:11 +0000"  >&lt;p&gt;Separate from fixing the root cause of this patch, it doesn&apos;t make sense to LBUG() on anything coming off of the network.  At worst this should be turned into an error.&lt;/p&gt;</comment>
                            <comment id="59147" author="green" created="Thu, 23 May 2013 06:22:28 +0000"  >&lt;p&gt;I still hit this fairly frequently and caught one instance live today.&lt;/p&gt;

&lt;p&gt;I found the request that caused the LBUG, but in the request the rq_reqmsg does NOT match the lustre_msg pointer printed in the assertion.&lt;br/&gt;
lustre_msg pointer seems to be pointing at some sort of structure that&apos;s has mfd handle embedded in it later on (rep buffer content? certainly sounds plausible), how did that happen I am totally not clear.&lt;br/&gt;
the &quot;lustre_msg&quot; magic obviously does not match a proper one, but also does not match what&apos;s in assertion message, instead of the long number (that varies from recompile to recompile, but is the same in the same module version crashes) printed in assertion message, the in-memory value is always 0x2341&lt;/p&gt;

&lt;p&gt;I wonder if it&apos;s an artifact of rq_msg replacement when enlarging.&lt;br/&gt;
Added some debug and going to rerun.&lt;/p&gt;</comment>
                            <comment id="59172" author="green" created="Thu, 23 May 2013 16:16:15 +0000"  >&lt;p&gt;My hunch was right, it seems.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;00000002:00100000:0.0:1369322005.401613:0:7254:0:(mdc_locks.c:635:mdc_finish_enqueue()) Attempting to enlarge req ffff880010aa67f0 with body ffff8800647f79f8
02000000:00100000:0.0:1369322005.401615:0:7254:0:(sec_null.c:266:null_enlarge_reqbuf()) Moved message ffff88002415ddf0 to ffff88004c1efbf0
...
00000100:00020000:4.0:1369322005.402571:0:7256:0:(pack_generic.c:1032:lustre_msg_get_opc()) incorrect message magic: a0b04b5e(msg:ffff88002415ddf0)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Unfortunately such replacement happens with no locking whatsoever and the open msg is already on the replay list. It&apos;s only a matter of (bad) luck for the race to happen where while trying to print req-&amp;gt;rq_reqmsg the message get&apos;s moved to another place and the old one first overwritten and then freed leading to the assertion like what we&apos;ve got here.&lt;/p&gt;

&lt;p&gt;It seems that we need to do this reallocate BEFORE we add the request to the replay list, though the comment in the addition to the list (that I remember I added some time ago before there was such an enlargement code) is advising of urgency of such adding for recovery reason.&lt;br/&gt;
I think it&apos;s still much easier to move the list adding past request extending and increase the race a tiny bit (that would probably be still very unrealistic to hit) than add locking all around req-&amp;gt;rq_reqmsg access.&lt;/p&gt;</comment>
                            <comment id="59392" author="green" created="Mon, 27 May 2013 18:12:22 +0000"  >&lt;p&gt;Tentative patch in &lt;a href=&quot;http://review.whamcloud.com/6467&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/6467&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="82090" author="cliffw" created="Mon, 21 Apr 2014 20:04:37 +0000"  >&lt;p&gt;I seem to have hit this on Hyperion:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;LustreError: 9374:0:(pack_generic.c:487:lustre_msg_buf()) ASSERTION( 0 ) failed: incorrect message magic: 0000000d(msg:ffff881054aab0f8)
LustreError: 9374:0:(pack_generic.c:487:lustre_msg_buf()) LBUG
Pid: 9374, comm: ior

Call Trace:
 [&amp;lt;ffffffffa043d895&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
 [&amp;lt;ffffffffa043de97&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
 [&amp;lt;ffffffffa07cc5aa&amp;gt;] lustre_msg_buf+0x4a/0x60 [ptlrpc]
 [&amp;lt;ffffffffa07f894d&amp;gt;] _sptlrpc_enlarge_msg_inplace+0x5d/0x1b0 [ptlrpc]
 [&amp;lt;ffffffffa08086d5&amp;gt;] null_enlarge_reqbuf+0xd5/0x200 [ptlrpc]
 [&amp;lt;ffffffffa07f607c&amp;gt;] sptlrpc_cli_enlarge_reqbuf+0x5c/0x160 [ptlrpc]
 [&amp;lt;ffffffffa098ece6&amp;gt;] mdc_finish_enqueue+0xb26/0x1190 [mdc]
 [&amp;lt;ffffffffa07d18a0&amp;gt;] ? lustre_swab_ldlm_reply+0x0/0x40 [ptlrpc]
 [&amp;lt;ffffffffa0990860&amp;gt;] mdc_enqueue+0xfc0/0x1b00 [mdc]
 [&amp;lt;ffffffffa099159e&amp;gt;] mdc_intent_lock+0x1fe/0x63f [mdc]
 [&amp;lt;ffffffffa0a76020&amp;gt;] ? ll_md_blocking_ast+0x0/0x7f0 [lustre]
 [&amp;lt;ffffffffa07aaac0&amp;gt;] ? ldlm_completion_ast+0x0/0x930 [ptlrpc]
 [&amp;lt;ffffffffa093b6aa&amp;gt;] ? lmv_fid_alloc+0x24a/0x3c0 [lmv]
 [&amp;lt;ffffffffa0956165&amp;gt;] lmv_intent_open+0x325/0x9c0 [lmv]
 [&amp;lt;ffffffffa0a76020&amp;gt;] ? ll_md_blocking_ast+0x0/0x7f0 [lustre]
 [&amp;lt;ffffffffa0956abb&amp;gt;] lmv_intent_lock+0x2bb/0x380 [lmv]
 [&amp;lt;ffffffffa0a76020&amp;gt;] ? ll_md_blocking_ast+0x0/0x7f0 [lustre]
 [&amp;lt;ffffffffa0a57b18&amp;gt;] ? ll_prep_md_op_data+0x1a8/0x490 [lustre]
 [&amp;lt;ffffffffa0a7789a&amp;gt;] ll_lookup_it+0x26a/0xad0 [lustre]
 [&amp;lt;ffffffffa0a76020&amp;gt;] ? ll_md_blocking_ast+0x0/0x7f0 [lustre]
 [&amp;lt;ffffffffa0a7818e&amp;gt;] ll_lookup_nd+0x8e/0x5d0 [lustre]
 [&amp;lt;ffffffff811965c2&amp;gt;] __lookup_hash+0x102/0x160
 [&amp;lt;ffffffff81196cea&amp;gt;] lookup_hash+0x3a/0x50
 [&amp;lt;ffffffff8119b7fe&amp;gt;] do_filp_open+0x2de/0xd20
 [&amp;lt;ffffffff81196a76&amp;gt;] ? final_putname+0x26/0x50
 [&amp;lt;ffffffff8114fd1b&amp;gt;] ? __vm_enough_memory+0x3b/0x190
 [&amp;lt;ffffffff811a82e2&amp;gt;] ? alloc_fd+0x92/0x160
 [&amp;lt;ffffffff81185df9&amp;gt;] do_sys_open+0x69/0x140
 [&amp;lt;ffffffff81185f10&amp;gt;] sys_open+0x20/0x30
 [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="82091" author="cliffw" created="Mon, 21 Apr 2014 20:08:12 +0000"  >&lt;p&gt;Log from the LBUG&lt;/p&gt;</comment>
                            <comment id="82350" author="green" created="Thu, 24 Apr 2014 03:34:06 +0000"  >&lt;p&gt;The landed patch from change 6467 has been reverted due to a but it introduced with sec_null types.&lt;br/&gt;
Instead I resurrected the patchset #1 from there at &lt;a href=&quot;http://review.whamcloud.com/10074&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/10074&lt;/a&gt; that does not have the bug.&lt;/p&gt;</comment>
                            <comment id="82732" author="jlevi" created="Tue, 29 Apr 2014 13:32:06 +0000"  >&lt;p&gt;Cliff,&lt;br/&gt;
Can you confirm this fix and close this ticket if appropriate?&lt;br/&gt;
Thank you!&lt;/p&gt;</comment>
                            <comment id="83022" author="jlevi" created="Thu, 1 May 2014 17:17:21 +0000"  >&lt;p&gt;Reversal of patch completed.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="24360">LU-4949</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="24387">LU-4956</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="18788">LU-3309</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="28436">LU-6172</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="14751" name="lustre-log.1398105923.9374.txt.gz" size="4413522" author="cliffw" created="Mon, 21 Apr 2014 20:08:12 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvqvr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8241</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>