<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:13:34 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1107] OSS dumps Call traces occasionally</title>
                <link>https://jira.whamcloud.com/browse/LU-1107</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;our customer is seeing what OSS is dumping the following Call traces occasionally.&lt;/p&gt;

&lt;p&gt;This is similar to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-745&quot; title=&quot;ost-pools test 23 hung&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-745&quot;&gt;&lt;del&gt;LU-745&lt;/del&gt;&lt;/a&gt;, but this customer installed lustre-1.8.7 and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-745&quot; title=&quot;ost-pools test 23 hung&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-745&quot;&gt;&lt;del&gt;LU-745&lt;/del&gt;&lt;/a&gt; should be fixed.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff8002e024&amp;gt;] __wake_up+0x38/0x4f
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88aba7f3&amp;gt;] jbd2_log_wait_commit+0xa3/0xf5 [jbd2]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff800a2dff&amp;gt;] autoremove_wake_function+0x0/0x2e
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88b6590b&amp;gt;] fsfilt_ldiskfs_commit_wait+0xab/0xd0 [fsfilt_ldiskfs]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88ba6194&amp;gt;] filter_commitrw_write+0x1e14/0x2dd0 [obdfilter]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88b47d09&amp;gt;] ost_brw_write+0x1c99/0x2480 [ost]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88881ac8&amp;gt;] ptlrpc_send_reply+0x5e8/0x600 [ptlrpc]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff8884c8b0&amp;gt;] target_committed_to_req+0x40/0x120 [ptlrpc]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff8008e7f9&amp;gt;] default_wake_function+0x0/0xe
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff888860a8&amp;gt;] lustre_msg_check_version_v2+0x8/0x20 [ptlrpc]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88b4b09e&amp;gt;] ost_handle+0x2bae/0x55b0 [ost]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff888956d9&amp;gt;] ptlrpc_server_handle_request+0x989/0xe00 [ptlrpc]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88895e35&amp;gt;] ptlrpc_wait_event+0x2e5/0x310 [ptlrpc]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff8008e7f9&amp;gt;] default_wake_function+0x0/0xe
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88896dc6&amp;gt;] ptlrpc_main+0xf66/0x1120 [ptlrpc]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff8005dfb1&amp;gt;] child_rip+0xa/0x11
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff88895e60&amp;gt;] ptlrpc_main+0x0/0x1120 [ptlrpc]
Feb 14 15:03:45 oss02 kernel:  [&amp;lt;ffffffff8005dfa7&amp;gt;] child_rip+0x0/0x11
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The another call trace is here&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff80046823&amp;gt;] try_to_wake_up+0x27/0x484
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8028b1ca&amp;gt;] __down_trylock+0x39/0x4e
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8006473d&amp;gt;] __down_failed_trylock+0x35/0x3a
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8886c0c1&amp;gt;] ldlm_pool_shrink+0x31/0xf0 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8884a1e6&amp;gt;] .text.lock.ldlm_resource+0x7d/0x87 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8886d24c&amp;gt;] ldlm_pools_shrink+0x15c/0x2f0 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff80064614&amp;gt;] __down_read+0x12/0x92
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8003f285&amp;gt;] shrink_slab+0xdc/0x153
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff800ce4ce&amp;gt;] zone_reclaim+0x235/0x2cd
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff800ca81e&amp;gt;] __rmqueue+0x44/0xc6
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8000a939&amp;gt;] get_page_from_freelist+0xbf/0x442
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8000f46f&amp;gt;] __alloc_pages+0x78/0x308
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff80025e20&amp;gt;] find_or_create_page+0x32/0x72
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88b98445&amp;gt;] filter_get_page+0x35/0x70 [obdfilter]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88b9a68a&amp;gt;] filter_preprw+0x14da/0x1e00 [obdfilter]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8876d121&amp;gt;] LNetMDBind+0x301/0x450 [lnet]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff887d5d30&amp;gt;] class_handle2object+0xe0/0x170 [obdclass]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88b4500c&amp;gt;] ost_brw_write+0xf9c/0x2480 [ost]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8876d121&amp;gt;] LNetMDBind+0x301/0x450 [lnet]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88889c65&amp;gt;] lustre_msg_set_limit+0x35/0xf0 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88883fe5&amp;gt;] lustre_msg_get_version+0x35/0xf0 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88883ef5&amp;gt;] lustre_msg_get_opc+0x35/0xf0 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8008e7f9&amp;gt;] default_wake_function+0x0/0xe
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff888840a8&amp;gt;] lustre_msg_check_version_v2+0x8/0x20 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88b4909e&amp;gt;] ost_handle+0x2bae/0x55b0 [ost]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff887d5d30&amp;gt;] class_handle2object+0xe0/0x170 [obdclass]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8883e19a&amp;gt;] lock_res_and_lock+0xba/0xd0 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88843168&amp;gt;] __ldlm_handle2lock+0x2f8/0x360 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff888936d9&amp;gt;] ptlrpc_server_handle_request+0x989/0xe00 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88893e35&amp;gt;] ptlrpc_wait_event+0x2e5/0x310 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88894dc6&amp;gt;] ptlrpc_main+0xf66/0x1120 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8005dfb1&amp;gt;] child_rip+0xa/0x11
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff88893e60&amp;gt;] ptlrpc_main+0x0/0x1120 [ptlrpc]
Feb 15 11:10:47 oss03 kernel:  [&amp;lt;ffffffff8005dfa7&amp;gt;] child_rip+0x0/0x11
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;any related to bugs?&lt;/p&gt;</description>
                <environment></environment>
        <key id="13191">LU-1107</key>
            <summary>OSS dumps Call traces occasionally</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="green">Oleg Drokin</assignee>
                                    <reporter username="ihara">Shuichi Ihara</reporter>
                        <labels>
                    </labels>
                <created>Wed, 15 Feb 2012 13:00:19 +0000</created>
                <updated>Mon, 29 May 2017 03:19:42 +0000</updated>
                            <resolved>Mon, 29 May 2017 03:19:42 +0000</resolved>
                                    <version>Lustre 1.8.7</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="28790" author="green" created="Wed, 15 Feb 2012 15:55:40 +0000"  >&lt;p&gt;The two call traces are quite of different nature I would say.&lt;br/&gt;
The first sample looks like some sort of journal deadlock. In that case it would have been very useful to have backtraces for all processes in the system. I checked the logs and the samples there are very limited.&lt;/p&gt;

&lt;p&gt;The other stacktrace looks like an outt of memory induced ldlm shrink problems, I think Johann battled quite a bit of those not so long ago.&lt;br/&gt;
I propose you to separate these two reports into two different bugs.&lt;/p&gt;</comment>
                            <comment id="28856" author="ihara" created="Wed, 15 Feb 2012 23:43:19 +0000"  >&lt;p&gt;got backtrace when the problem happens.&lt;/p&gt;

&lt;p&gt;In addtion, there are some updates. During OSS is dumping the calltraces, the client can&apos;t access to servers, but after 30-60mins server recovered without reboot or other any actions.&lt;/p&gt;</comment>
                            <comment id="28889" author="ihara" created="Thu, 16 Feb 2012 09:45:10 +0000"  >&lt;p&gt;The customer hit same situation again today. They did sysrq-t to get backtrace. pleaes have a look at attached all oss log files.&lt;/p&gt;</comment>
                            <comment id="28896" author="cliffw" created="Thu, 16 Feb 2012 10:52:06 +0000"  >&lt;p&gt;What is in the logs prior to the stack dumps? There should be a related error, indicating why the stack was dumped.&lt;/p&gt;</comment>
                            <comment id="28900" author="ihara" created="Thu, 16 Feb 2012 11:10:24 +0000"  >&lt;p&gt;hmm.. they hit same situation that client can&apos;t access to the servers, but not many call traces today. &lt;br/&gt;
see messages_oss03 at Feb 16 13:12:15 and Feb 16 22:30:30, these are when the problme happened.&lt;/p&gt;</comment>
                            <comment id="29143" author="ihara" created="Thu, 16 Feb 2012 20:47:14 +0000"  >&lt;p&gt;this is client log. it was hanging when the problem happens. Is it related to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-25&quot; title=&quot;Blocking network request in ldlm shrinker&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-25&quot;&gt;&lt;del&gt;LU-25&lt;/del&gt;&lt;/a&gt;?&lt;/p&gt;</comment>
                            <comment id="29505" author="ihara" created="Tue, 21 Feb 2012 11:17:56 +0000"  >&lt;p&gt;OK, I want to focus jbd2 related problem on this ticket. Please change the title to &quot;jbd2 deadlock&quot; whatever reasonable title name.&lt;/p&gt;

&lt;p&gt;Today, we saw same jbd2 related call trace on an OSS, and got sysrq -t before umount/mount OSTs.&lt;/p&gt;

&lt;p&gt;After umount/mount OSTs without reboot, the problem was gone. But, In fact, the same problem happened again.&lt;/p&gt;</comment>
                            <comment id="29506" author="ihara" created="Tue, 21 Feb 2012 11:19:48 +0000"  >&lt;p&gt;the log files when the problem happened again at 02/22&lt;/p&gt;</comment>
                            <comment id="29615" author="green" created="Thu, 23 Feb 2012 00:16:56 +0000"  >&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Feb 22 06:02:18 oss01 kernel: Lustre: Service thread pid 18979 was inactive for 200.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
Feb 22 06:02:18 oss01 kernel: Pid: 18979, comm: ll_ost_io_78
Feb 22 06:02:18 oss01 kernel: 
Feb 22 06:02:18 oss01 kernel: Call Trace:
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff889f47f3&amp;gt;] jbd2_log_wait_commit+0xa3/0xf5 [jbd2]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff800a2dff&amp;gt;] autoremove_wake_function+0x0/0x2e
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff88a9f90b&amp;gt;] fsfilt_ldiskfs_commit_wait+0xab/0xd0 [fsfilt_ldiskfs]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff88ae0194&amp;gt;] filter_commitrw_write+0x1e14/0x2dd0 [obdfilter]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff88a81d09&amp;gt;] ost_brw_write+0x1c99/0x2480 [ost]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff887bbac8&amp;gt;] ptlrpc_send_reply+0x5e8/0x600 [ptlrpc]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff887868b0&amp;gt;] target_committed_to_req+0x40/0x120 [ptlrpc]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff8008e7f9&amp;gt;] default_wake_function+0x0/0xe
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff887c00a8&amp;gt;] lustre_msg_check_version_v2+0x8/0x20 [ptlrpc]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff88a8509e&amp;gt;] ost_handle+0x2bae/0x55b0 [ost]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff88711d30&amp;gt;] class_handle2object+0xe0/0x170 [obdclass]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff8877a19a&amp;gt;] lock_res_and_lock+0xba/0xd0 [ptlrpc]
Feb 22 06:02:18 oss01 kernel:  [&amp;lt;ffffffff8877f168&amp;gt;] __ldlm_handle2lock+0x2f8/0x360 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887cf6d9&amp;gt;] ptlrpc_server_handle_request+0x989/0xe00 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887cfe35&amp;gt;] ptlrpc_wait_event+0x2e5/0x310 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887d0dc6&amp;gt;] ptlrpc_main+0xf66/0x1120 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8005dfb1&amp;gt;] child_rip+0xa/0x11
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887cfe60&amp;gt;] ptlrpc_main+0x0/0x1120 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8005dfa7&amp;gt;] child_rip+0x0/0x11
Feb 22 06:02:19 oss01 kernel: 
Feb 22 06:02:19 oss01 kernel: LustreError: dumping log to /tmp/lustre-log.1329858138.18979
Feb 22 06:02:19 oss01 kernel: Lustre: Service thread pid 18919 was inactive for 200.00s. The thread might be hung, or it might only be slow and will resume later. Dumping the stack trace for debugging purposes:
Feb 22 06:02:19 oss01 kernel: Pid: 18919, comm: ll_ost_io_18
Feb 22 06:02:19 oss01 kernel: 
Feb 22 06:02:19 oss01 kernel: Call Trace:
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff80062ff2&amp;gt;] thread_return+0x62/0xfe
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff80046c6e&amp;gt;] try_to_wake_up+0x472/0x484
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff80064a0b&amp;gt;] __down+0xc3/0xd8
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8008e7f9&amp;gt;] default_wake_function+0x0/0xe
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff800646c9&amp;gt;] __down_failed+0x35/0x3a
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff88a316a0&amp;gt;] ldiskfs_destroy_inode+0x0/0x90 [ldiskfs]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887861e6&amp;gt;] .text.lock.ldlm_resource+0x7d/0x87 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887a9337&amp;gt;] ldlm_pools_shrink+0x247/0x2f0 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff80064614&amp;gt;] __down_read+0x12/0x92
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff800223e9&amp;gt;] __up_read+0x19/0x7f
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8003f279&amp;gt;] shrink_slab+0xd0/0x153
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff800ce4ce&amp;gt;] zone_reclaim+0x235/0x2cd
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff80017ce1&amp;gt;] cache_grow+0x360/0x3c7
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8000a939&amp;gt;] get_page_from_freelist+0xbf/0x442
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8000f46f&amp;gt;] __alloc_pages+0x78/0x308
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff80025e20&amp;gt;] find_or_create_page+0x32/0x72
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff88ad4445&amp;gt;] filter_get_page+0x35/0x70 [obdfilter]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff88ad668a&amp;gt;] filter_preprw+0x14da/0x1e00 [obdfilter]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff886a9121&amp;gt;] LNetMDBind+0x301/0x450 [lnet]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff88711d30&amp;gt;] class_handle2object+0xe0/0x170 [obdclass]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff88a8100c&amp;gt;] ost_brw_write+0xf9c/0x2480 [ost]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887bbac8&amp;gt;] ptlrpc_send_reply+0x5e8/0x600 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887868b0&amp;gt;] target_committed_to_req+0x40/0x120 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887bffe5&amp;gt;] lustre_msg_get_version+0x35/0xf0 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887bfef5&amp;gt;] lustre_msg_get_opc+0x35/0xf0 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8008e7f9&amp;gt;] default_wake_function+0x0/0xe
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887c00a8&amp;gt;] lustre_msg_check_version_v2+0x8/0x20 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff88a8509e&amp;gt;] ost_handle+0x2bae/0x55b0 [ost]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff88711d30&amp;gt;] class_handle2object+0xe0/0x170 [obdclass]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8877a19a&amp;gt;] lock_res_and_lock+0xba/0xd0 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8877f168&amp;gt;] __ldlm_handle2lock+0x2f8/0x360 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887cf6d9&amp;gt;] ptlrpc_server_handle_request+0x989/0xe00 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887cfe35&amp;gt;] ptlrpc_wait_event+0x2e5/0x310 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887d0dc6&amp;gt;] ptlrpc_main+0xf66/0x1120 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8005dfb1&amp;gt;] child_rip+0xa/0x11
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff887cfe60&amp;gt;] ptlrpc_main+0x0/0x1120 [ptlrpc]
Feb 22 06:02:19 oss01 kernel:  [&amp;lt;ffffffff8005dfa7&amp;gt;] child_rip+0x0/0x11
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This second stacktrace is the culprit I guess. Attempting to allocate some mmemory got into ldlm to drop ldiskfs inode which might hold a journal or something and upsettign the other thread as a result.&lt;/p&gt;

&lt;p&gt;I think we had some patches about it that I am now trying to find.&lt;/p&gt;</comment>
                            <comment id="29722" author="johann" created="Thu, 23 Feb 2012 17:58:09 +0000"  >&lt;p&gt;hm, on the server side, the ldlm pool shrinker is &quot;just&quot; supposed to decrease the SLV (see ldlm_srv_pool_shrink()), so i&apos;m not sure to understand how it can end up in ldiskfs_destroy_inode() ... really odd.&lt;/p&gt;</comment>
                            <comment id="29789" author="ihara" created="Fri, 24 Feb 2012 04:13:44 +0000"  >&lt;p&gt;sorry, they did only apply patches to OSSs. we should apply the patches of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-25&quot; title=&quot;Blocking network request in ldlm shrinker&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-25&quot;&gt;&lt;del&gt;LU-25&lt;/del&gt;&lt;/a&gt; to both OSSs and clients, right?&lt;/p&gt;
</comment>
                            <comment id="29792" author="johann" created="Fri, 24 Feb 2012 04:27:44 +0000"  >&lt;p&gt;Ihara, the patch from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-25&quot; title=&quot;Blocking network request in ldlm shrinker&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-25&quot;&gt;&lt;del&gt;LU-25&lt;/del&gt;&lt;/a&gt; only helps on the client side, so i don&apos;t think it will make any difference with this bug which is an OSS issue.&lt;br/&gt;
Could you please tell us what patches you applied on the OSS side?&lt;/p&gt;</comment>
                            <comment id="29793" author="ihara" created="Fri, 24 Feb 2012 04:43:52 +0000"  >&lt;p&gt;Johann,&lt;br/&gt;
OK... they applied &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-25&quot; title=&quot;Blocking network request in ldlm shrinker&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-25&quot;&gt;&lt;del&gt;LU-25&lt;/del&gt;&lt;/a&gt; patches only to OSSs, but not to the clients. This was our miscommunication, sorry. let we try the same patch to clients in this time, just in case....&lt;/p&gt;</comment>
                            <comment id="29848" author="ihara" created="Sun, 26 Feb 2012 18:31:14 +0000"  >&lt;p&gt;The customer tested patch for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-25&quot; title=&quot;Blocking network request in ldlm shrinker&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-25&quot;&gt;&lt;del&gt;LU-25&lt;/del&gt;&lt;/a&gt; on the client, but it didn&apos;t help. The problem seems to be easy replicate. Just copy the data the local filesystem to lustre with rsync command. (e.g. rsync -avHS --exclude=/lustre --exclude=/dev --exclude=/proc --exclude=/sys /* /lustre/)&lt;/p&gt;

&lt;p&gt;Please investigate.. The production system is not working well if one client runs rsync on the client... &lt;/p&gt;</comment>
                            <comment id="29849" author="pjones" created="Sun, 26 Feb 2012 18:43:43 +0000"  >&lt;p&gt;Ihara&lt;/p&gt;

&lt;p&gt;I am sorry if we have not been clear enough - we did not expect the patch from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-25&quot; title=&quot;Blocking network request in ldlm shrinker&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-25&quot;&gt;&lt;del&gt;LU-25&lt;/del&gt;&lt;/a&gt; to help with this issue as it is a different kind of issue. What is blocking us moving forward is to get details from you about whether any patches have been applied to the vanilla release&lt;/p&gt;

&lt;p&gt;Regards&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="29850" author="ihara" created="Sun, 26 Feb 2012 18:57:07 +0000"  >&lt;p&gt;Peter,&lt;/p&gt;

&lt;p&gt;Yes, I agreed. Johann suggested the patch doesn&apos;t help. The customer did test just in case..&lt;br/&gt;
Anyway, we only applied a patch (&lt;a href=&quot;http://review.whamcloud.com/#change,1919&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,1919&lt;/a&gt;) to lustre-1.8.7-wc1 on this site.&lt;br/&gt;
The reason of what we applied this patch is that we hit an issue on &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-967&quot; title=&quot;OSS hangs due to heavey IO loads&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-967&quot;&gt;&lt;del&gt;LU-967&lt;/del&gt;&lt;/a&gt; and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-952&quot; title=&quot;Hung thread with HIGH OSS load&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-952&quot;&gt;&lt;del&gt;LU-952&lt;/del&gt;&lt;/a&gt; at different sites. (Just in order to prevent same issue on this site)&lt;/p&gt;</comment>
                            <comment id="29973" author="ihara" created="Wed, 29 Feb 2012 13:15:36 +0000"  >&lt;p&gt;Oleg suggested the following changes and we tested on it.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;--- a/lustre/ldlm/ldlm_pool.c
+++ b/lustre/ldlm/ldlm_pool.c
@@ -1062,8 +1062,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
         int total = 0, cached = 0, nr_ns;
         struct ldlm_namespace *ns;
 
-        if (client == LDLM_NAMESPACE_CLIENT &amp;amp;&amp;amp; nr != 0 &amp;amp;&amp;amp;
-            !(gfp_mask &amp;amp; __GFP_FS))
+        if (nr != 0 &amp;amp;&amp;amp; !(gfp_mask &amp;amp; __GFP_FS))
                 return -1;
 
         if (nr != 0)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;We didn&apos;t see same issue and same jbd2 calltrace on OSS and copy the files with rsync worked well.&lt;br/&gt;
Howerver, we got another calltrace here during the same testing.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Feb 29 16:41:17 oss03 kernel: Call Trace:
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff80046c1e&amp;gt;] try_to_wake_up+0x422/0x484
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff8028b1ca&amp;gt;] __down_trylock+0x39/0x4e
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff8006473d&amp;gt;] __down_failed_trylock+0x35/0x3a
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887c20f4&amp;gt;] ldlm_pool_shrink+0x64/0xf0 [ptlrpc]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887a01dc&amp;gt;] .text.lock.ldlm_resource+0x73/0x87 [ptlrpc]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887c332f&amp;gt;] ldlm_pools_shrink+0x23f/0x2d0 [ptlrpc]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff80064614&amp;gt;] __down_read+0x12/0x92
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff800223e9&amp;gt;] __up_read+0x19/0x7f
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff8003f285&amp;gt;] shrink_slab+0xdc/0x153
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff800ce4ce&amp;gt;] zone_reclaim+0x235/0x2cd
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff8000985a&amp;gt;] __d_lookup+0xb0/0xff
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff800ca81e&amp;gt;] __rmqueue+0x44/0xc6
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff8000a939&amp;gt;] get_page_from_freelist+0xbf/0x442
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff8000f46f&amp;gt;] __alloc_pages+0x78/0x308
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff80025e20&amp;gt;] find_or_create_page+0x32/0x72
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff88aee445&amp;gt;] filter_get_page+0x35/0x70 [obdfilter]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff88aeea81&amp;gt;] filter_preprw_read+0x601/0xd30 [obdfilter]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff886ca230&amp;gt;] LNetPut+0x730/0x840 [lnet]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887d5303&amp;gt;] ptl_send_buf+0x3f3/0x5b0 [ptlrpc]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887dfc45&amp;gt;] lustre_msg_set_limit+0x35/0xf0 [ptlrpc]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff88af0f47&amp;gt;] filter_preprw+0x1d97/0x1e00 [obdfilter]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887d5aa8&amp;gt;] ptlrpc_send_reply+0x5e8/0x600 [ptlrpc]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887ddc3a&amp;gt;] lustre_pack_reply_flags+0x86a/0x950 [ptlrpc]
Feb 29 16:41:17 oss03 kernel:  [&amp;lt;ffffffff887d5aa8&amp;gt;] ptlrpc_send_reply+0x5e8/0x600 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff88a96ac3&amp;gt;] ost_brw_read+0xb33/0x1a70 [ost]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff887d9ed5&amp;gt;] lustre_msg_get_opc+0x35/0xf0 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff8008e7f9&amp;gt;] default_wake_function+0x0/0xe
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff887da088&amp;gt;] lustre_msg_check_version_v2+0x8/0x20 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff88a9f363&amp;gt;] ost_handle+0x2e73/0x55b0 [ost]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff8872bd30&amp;gt;] class_handle2object+0xe0/0x170 [obdclass]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff8879419a&amp;gt;] lock_res_and_lock+0xba/0xd0 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff88799168&amp;gt;] __ldlm_handle2lock+0x2f8/0x360 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff887e96b9&amp;gt;] ptlrpc_server_handle_request+0x989/0xe00 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff887e9e15&amp;gt;] ptlrpc_wait_event+0x2e5/0x310 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff8008cc1e&amp;gt;] __wake_up_common+0x3e/0x68
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff887eada6&amp;gt;] ptlrpc_main+0xf66/0x1120 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff8005dfb1&amp;gt;] child_rip+0xa/0x11
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff887e9e40&amp;gt;] ptlrpc_main+0x0/0x1120 [ptlrpc]
Feb 29 16:41:18 oss03 kernel:  [&amp;lt;ffffffff8005dfa7&amp;gt;] child_rip+0x0/0x11
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
</comment>
                            <comment id="29974" author="ihara" created="Wed, 29 Feb 2012 13:17:19 +0000"  >&lt;p&gt;log files on OSS afer remove &quot;client == LDLM_NAMESPACE_CLIENT&quot;&lt;/p&gt;</comment>
                            <comment id="30073" author="ihara" created="Thu, 1 Mar 2012 01:41:23 +0000"  >&lt;p&gt;The same problem happened again.&lt;/p&gt;

&lt;p&gt;In this site, the client memory size is 96GB, each OSS&apos;s memory 24GB and they have 4 OSSs.&lt;br/&gt;
We have enabled lru_resize on this client and it seems that OSS is getting memory pressure since OSS&apos;s memory is not reclaimed quickly.&lt;/p&gt;

&lt;p&gt;We just disabled lru_resize and used fixed lru_size and we haven&apos;t been seen same issue so far.&lt;/p&gt;

&lt;p&gt;We are going to be more testing on this configuration if the problem is gone completely.&lt;/p&gt;</comment>
                            <comment id="30096" author="green" created="Thu, 1 Mar 2012 10:53:29 +0000"  >&lt;p&gt;Hm, I guess there&apos;s some balancing to do on the server side to better reclaim locks preemptively before OOM situation arises.&lt;/p&gt;</comment>
                            <comment id="41013" author="ihara" created="Thu, 21 Jun 2012 20:31:07 +0000"  >&lt;p&gt;Hello, Oleg, &lt;/p&gt;

&lt;p&gt;The problem seems not to be fixed yet.. we are still getting some many call traces on OSSs even disable dynamic lru_size..&lt;br/&gt;
I just uploaded the recent OSS&apos;s log files on ftp site. uploads/&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1107&quot; title=&quot;OSS dumps Call traces occasionally&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1107&quot;&gt;&lt;del&gt;LU-1107&lt;/del&gt;&lt;/a&gt;/2012-06-20.tar.gz&lt;/p&gt;

&lt;p&gt;Please have a look at them and investigate.&lt;/p&gt;

&lt;p&gt;Thanks! &lt;/p&gt;</comment>
                            <comment id="43923" author="kitwestneat" created="Wed, 29 Aug 2012 02:57:35 +0000"  >&lt;p&gt;We hit this bug again today, but it caused 80+ clients to be unable to reconnect for a while. The customer restarted the machine, so it didn&apos;t finish trying to do the shrink. There was, however, an earlier event on the 23rd of August and that took over 3 hours &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/warning.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt; to resolve. This is Lustre 1.8.6 (gce5e033)&lt;/p&gt;

&lt;p&gt;I&apos;ll try to get the lustre-logs that were dumped. In the meantime, I&apos;ll upload the oss log. It&apos;s worth looking at just to see this:&lt;br/&gt;
Aug 23 15:53:37 oss2 kernel: Lustre: Service thread pid 7842 completed after 11067.64s. This indicates the system was overloaded (too many service threads, or there were not enough hardware resources).&lt;/p&gt;

&lt;p&gt;&lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;</comment>
                            <comment id="43924" author="kitwestneat" created="Wed, 29 Aug 2012 02:58:21 +0000"  >&lt;p&gt;Kernel log from OSS2, the one that hung&lt;/p&gt;</comment>
                            <comment id="43953" author="kitwestneat" created="Wed, 29 Aug 2012 12:12:06 +0000"  >&lt;p&gt;This looks very similar to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1128&quot; title=&quot;Complete investigation of the LDLM pool shrinker and SLV handling&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1128&quot;&gt;&lt;del&gt;LU-1128&lt;/del&gt;&lt;/a&gt;, and therefore &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1535&quot; title=&quot;LustreError: 1843:0:(mds_open.c:1645:mds_close()) &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1535&quot;&gt;&lt;del&gt;LU-1535&lt;/del&gt;&lt;/a&gt;. Can someone take a look at whether or not it&apos;s the same issue?&lt;/p&gt;</comment>
                            <comment id="43957" author="pjones" created="Wed, 29 Aug 2012 13:18:28 +0000"  >&lt;p&gt;Lai could you please comment?&lt;/p&gt;</comment>
                            <comment id="43978" author="laisiyao" created="Thu, 30 Aug 2012 05:18:50 +0000"  >&lt;p&gt;Hmm, IMO it&apos;s the same issue as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1128&quot; title=&quot;Complete investigation of the LDLM pool shrinker and SLV handling&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1128&quot;&gt;&lt;del&gt;LU-1128&lt;/del&gt;&lt;/a&gt; and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1535&quot; title=&quot;LustreError: 1843:0:(mds_open.c:1645:mds_close()) &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1535&quot;&gt;&lt;del&gt;LU-1535&lt;/del&gt;&lt;/a&gt;, Kit, could you verify that patch is not included in your code?&lt;/p&gt;</comment>
                            <comment id="43984" author="kitwestneat" created="Thu, 30 Aug 2012 09:46:25 +0000"  >&lt;p&gt;Hi Lai, we are not carrying the patch in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1535&quot; title=&quot;LustreError: 1843:0:(mds_open.c:1645:mds_close()) &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1535&quot;&gt;&lt;del&gt;LU-1535&lt;/del&gt;&lt;/a&gt;, it looks like it&apos;s targeted for a 1.8.9 release. I&apos;ll get a build together for the customer to test on. &lt;/p&gt;</comment>
                            <comment id="197360" author="adilger" created="Mon, 29 May 2017 03:19:42 +0000"  >&lt;p&gt;Close old ticket.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="11808" name="UA-oss2-kern.log" size="704270" author="kitwestneat" created="Wed, 29 Aug 2012 02:58:21 +0000"/>
                            <attachment id="10867" name="client_messages" size="45734" author="ihara" created="Tue, 21 Feb 2012 11:19:48 +0000"/>
                            <attachment id="10907" name="messages-2-29" size="632191" author="ihara" created="Wed, 29 Feb 2012 13:17:19 +0000"/>
                            <attachment id="10848" name="messages.tar.gz" size="319513" author="ihara" created="Thu, 16 Feb 2012 09:45:10 +0000"/>
                            <attachment id="10852" name="messages_cluster1" size="275465" author="ihara" created="Thu, 16 Feb 2012 20:47:14 +0000"/>
                            <attachment id="10866" name="oss01_messages" size="1477646" author="ihara" created="Tue, 21 Feb 2012 11:19:48 +0000"/>
                            <attachment id="10842" name="oss02_messages" size="2948691" author="ihara" created="Wed, 15 Feb 2012 13:00:19 +0000"/>
                            <attachment id="10843" name="oss03_messages" size="3879227" author="ihara" created="Wed, 15 Feb 2012 13:00:19 +0000"/>
                            <attachment id="10847" name="sysrq-t" size="796302" author="ihara" created="Wed, 15 Feb 2012 23:43:19 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10490" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>End date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Fri, 25 Jan 2013 13:00:19 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                            <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzw1gf:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>10317</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10493" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>Start date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Wed, 15 Feb 2012 13:00:19 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                    </customfields>
    </item>
</channel>
</rss>