<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:08:07 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-547] 1.8&lt;-&gt;2.1 / 2.3&lt;-&gt;2.4 interop: dbench kept doing cleanup</title>
                <link>https://jira.whamcloud.com/browse/LU-547</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;While running sanity-benchmark test suite, the dbench test kept doing cleanup and could not stop:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;&amp;lt;~snip~&amp;gt;
   1    737819     5.51 MB/sec  execute 598 sec  latency 38819.862 ms
   1    737819     5.50 MB/sec  execute 599 sec  latency 39820.034 ms
   1  cleanup 600 sec
   1  cleanup 601 sec
   ......
   1  cleanup 2897 sec
   1  cleanup 2898 sec
   ......
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;On client node fat-amd-3-ib:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# ps auxww
&amp;lt;~snip~&amp;gt;
500      18628  0.0  0.0 107264  2552 pts/0    S+   00:50   0:00 bash rundbench -D /mnt/lustre/d0.fat-amd-3-ib 1
500      18636  0.0  0.0   6092   596 pts/0    S+   00:50   0:00 dbench -c client.txt 1
500      18637  2.2  0.0   8140  2540 pts/0    S+   00:50   2:00 dbench -c client.txt 1

# echo t &amp;gt; /proc/sysrq-trigger
&amp;lt;~snip~&amp;gt;
dbench        S 0000000000000009     0 18636  18628 0x00000080
 ffff880219239e78 0000000000000086 0000000000000000 ffffffff812688c5
 ffff880323c50f98 ffff880218872068 0000000000000000 0000000100b91979
 ffff880218af3a78 ffff880219239fd8 000000000000f598 ffff880218af3a78
Call Trace:
 [&amp;lt;ffffffff812688c5&amp;gt;] ? rb_insert_color+0x125/0x160
 [&amp;lt;ffffffff8106b325&amp;gt;] do_wait+0x1c5/0x250
 [&amp;lt;ffffffff8106b453&amp;gt;] sys_wait4+0xa3/0x100
 [&amp;lt;ffffffff810699b0&amp;gt;] ? child_wait_callback+0x0/0x70
 [&amp;lt;ffffffff8100b425&amp;gt;] ? int_check_syscall_exit_work+0x34/0x3d
 [&amp;lt;ffffffff8100b172&amp;gt;] system_call_fastpath+0x16/0x1b
dbench        S 000000000000000c     0 18637  18636 0x00000080
 ffff880419933a98 0000000000000082 0000000000000000 000000004e31176c
 ffff880419933a78 ffffffffa04a2cac ffff880419933a78 ffff8803191cb000
 ffff8804197b05f8 ffff880419933fd8 000000000000f598 ffff8804197b05f8
Call Trace:
 [&amp;lt;ffffffffa04a2cac&amp;gt;] ? ptlrpc_at_adj_net_latency+0x7c/0x230 [ptlrpc]
 [&amp;lt;ffffffffa049c00d&amp;gt;] ldlm_flock_completion_ast+0x61d/0x9f0 [ptlrpc]
 [&amp;lt;ffffffffa046c15f&amp;gt;] ? lock_res_and_lock+0x5f/0xe0 [ptlrpc]
 [&amp;lt;ffffffff8105dc20&amp;gt;] ? default_wake_function+0x0/0x20
 [&amp;lt;ffffffffa048a565&amp;gt;] ldlm_cli_enqueue_fini+0x6c5/0xba0 [ptlrpc]
 [&amp;lt;ffffffff8105dc20&amp;gt;] ? default_wake_function+0x0/0x20
 [&amp;lt;ffffffffa048e074&amp;gt;] ldlm_cli_enqueue+0x344/0x7a0 [ptlrpc]
 [&amp;lt;ffffffffa0677edd&amp;gt;] ll_file_flock+0x47d/0x6b0 [lustre]
 [&amp;lt;ffffffffa049b9f0&amp;gt;] ? ldlm_flock_completion_ast+0x0/0x9f0 [ptlrpc]
 [&amp;lt;ffffffff811bc243&amp;gt;] vfs_lock_file+0x23/0x40
 [&amp;lt;ffffffff811bc497&amp;gt;] fcntl_setlk+0x177/0x320
 [&amp;lt;ffffffff8107ff06&amp;gt;] ? group_send_sig_info+0x26/0x70
 [&amp;lt;ffffffff81080212&amp;gt;] ? sys_kill+0x92/0x190
 [&amp;lt;ffffffff811845f7&amp;gt;] sys_fcntl+0x197/0x530
 [&amp;lt;ffffffff8100b172&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/4f04bb24-b8f5-11e0-8bdf-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/4f04bb24-b8f5-11e0-8bdf-52540025f9af&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Please find the attached dbench-debug_log.tar.bz2 for debug logs.&lt;/p&gt;</description>
                <environment>&lt;br/&gt;
Lustre Clients:&lt;br/&gt;
Tag: 1.8.6-wc1&lt;br/&gt;
Distro/Arch: RHEL6/x86_64 (kernel version: 2.6.32_131.2.1.el6)&lt;br/&gt;
Build: &lt;a href=&quot;http://newbuild.whamcloud.com/job/lustre-b1_8/100/arch=x86_64,build_type=client,distro=el6,ib_stack=inkernel/&quot;&gt;http://newbuild.whamcloud.com/job/lustre-b1_8/100/arch=x86_64,build_type=client,distro=el6,ib_stack=inkernel/&lt;/a&gt;&lt;br/&gt;
Network: IB (inkernel OFED)&lt;br/&gt;
ENABLE_QUOTA=yes&lt;br/&gt;
&lt;br/&gt;
Lustre Servers:&lt;br/&gt;
Tag: v2_0_66_0&lt;br/&gt;
Distro/Arch: RHEL6/x86_64 (kernel version: 2.6.32-131.2.1.el6_lustre)&lt;br/&gt;
Build: &lt;a href=&quot;http://newbuild.whamcloud.com/job/lustre-master/228/arch=x86_64,build_type=server,distro=el6,ib_stack=inkernel/&quot;&gt;http://newbuild.whamcloud.com/job/lustre-master/228/arch=x86_64,build_type=server,distro=el6,ib_stack=inkernel/&lt;/a&gt;&lt;br/&gt;
Network: IB (inkernel OFED)&lt;br/&gt;
</environment>
        <key id="11411">LU-547</key>
            <summary>1.8&lt;-&gt;2.1 / 2.3&lt;-&gt;2.4 interop: dbench kept doing cleanup</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="2">Won&apos;t Fix</resolution>
                                        <assignee username="green">Oleg Drokin</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Thu, 28 Jul 2011 05:33:41 +0000</created>
                <updated>Tue, 16 Aug 2016 16:29:31 +0000</updated>
                            <resolved>Tue, 16 Aug 2016 16:29:31 +0000</resolved>
                                    <version>Lustre 2.1.0</version>
                    <version>Lustre 2.4.0</version>
                    <version>Lustre 1.8.6</version>
                                    <fixVersion>Lustre 2.1.0</fixVersion>
                    <fixVersion>Lustre 1.8.7</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="18367" author="yujian" created="Thu, 28 Jul 2011 05:38:16 +0000"  >&lt;p&gt;sanity-quota test 8 also hit the same issue:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/700c103e-b8cc-11e0-8bdf-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/700c103e-b8cc-11e0-8bdf-52540025f9af&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="18519" author="pjones" created="Fri, 29 Jul 2011 11:35:39 +0000"  >&lt;p&gt;Oleg will look into this one. He suspects the same root cause as LU451&lt;/p&gt;</comment>
                            <comment id="18621" author="green" created="Mon, 1 Aug 2011 23:39:52 +0000"  >&lt;p&gt;I now think this is in fact different than &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-451&quot; title=&quot;Test failure on test suite parallel-scale&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-451&quot;&gt;&lt;del&gt;LU-451&lt;/del&gt;&lt;/a&gt; and probably related to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-104&quot; title=&quot;Lustre grants flock exclusive locks on two file descriptors for the same  file&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-104&quot;&gt;&lt;del&gt;LU-104&lt;/del&gt;&lt;/a&gt; interop, even though that seemed to work well for me at the time.&lt;/p&gt;

&lt;p&gt;Unfortunately debug logs don&apos;t shed enough light on the situation. client log is empty.&lt;br/&gt;
MDS log only lists current lock that we are blocking due to conflicts, but I see no other lock activity so I have noway to know what was it conflicting with.&lt;/p&gt;

&lt;p&gt;I wonder if this dbench test works between 1.8 client and 1.8 server if you mount client with -o flock? (I don&apos;t think we mount with flock by default in 1.8 nor 2.1).&lt;/p&gt;</comment>
                            <comment id="18622" author="yujian" created="Tue, 2 Aug 2011 00:18:52 +0000"  >&lt;blockquote&gt;
&lt;p&gt;Unfortunately debug logs don&apos;t shed enough light on the situation. client log is empty.&lt;br/&gt;
MDS log only lists current lock that we are blocking due to conflicts, but I see no other lock activity so I have noway to know what was it conflicting with.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;The issue could be reproduced consistently under the 1.8.6-wc1 client and 2.0.66.0 server configuration. After the Toro cluster is back, I&apos;d reproduce the issue and gather more debug logs.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;I wonder if this dbench test works between 1.8 client and 1.8 server if you mount client with -o flock? (I don&apos;t think we mount with flock by default in 1.8 nor 2.1).&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;&quot;flock&quot; option is used by default in our test suite on both b1_8 and master branches:&lt;br/&gt;
MOUNTOPT=${MOUNTOPT:-&quot;-o user_xattr,acl,flock&quot;}&lt;/p&gt;

&lt;p&gt;and dbench test passed on 1.8.6-wc1 with flock option:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7deb786c-a269-11e0-aee5-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7deb786c-a269-11e0-aee5-52540025f9af&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="18693" author="yujian" created="Thu, 4 Aug 2011 01:04:53 +0000"  >&lt;p&gt;Hi Oleg,&lt;br/&gt;
I reproduced the issue and gathered more debug logs while running the dbench test with &quot;PTLDEBUG=-1&quot;:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/94f6dce0-be57-11e0-8bdf-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/94f6dce0-be57-11e0-8bdf-52540025f9af&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="19056" author="green" created="Wed, 10 Aug 2011 21:26:25 +0000"  >&lt;p&gt;It seems since we don&apos;t enable flock support by default, let&apos;s not include mounting with -o flock for interop testing for now.&lt;br/&gt;
I don&apos;t think this is such an important use case anyway. We can add some extra interop code in 1.8.7 if this poses a problem for real customers in the future.&lt;/p&gt;</comment>
                            <comment id="48524" author="sarah" created="Thu, 29 Nov 2012 02:43:36 +0000"  >&lt;p&gt;Looks like the same issue seen in 2.1.3&amp;lt;-&amp;gt;2.4 interop testing:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/07ffc460-397f-11e2-9fda-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/07ffc460-397f-11e2-9fda-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;server: 2.3 RHEL6 &lt;br/&gt;
client: lustre master build #1065&lt;/p&gt;

&lt;p&gt;test log shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;   1  cleanup 7149 sec
   1  cleanup 7150 sec
   1  cleanup 7151 sec
   1  cleanup 7152 sec
   1  cleanup 7153 sec
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;client console shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;17:38:39:Lustre: DEBUG MARKER: == sanity-benchmark test dbench: dbench == 17:38:24 (1353980304)
17:39:21:Lustre: 3001:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1353980345/real 1353980345]  req@ffff880074163400 x1419751045755358/t0(0) o2-&amp;gt;lustre-OST0002-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 440/432 e 0 to 1 dl 1353980352 ref 1 fl Rpc:X/0/ffffffff rc 0/-1
17:39:21:Lustre: lustre-OST0002-osc-ffff8800749b5c00: Connection to lustre-OST0002 (at 10.10.4.155@tcp) was lost; in progress operations using this service will wait for recovery to complete
17:39:21:Lustre: 3002:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1353980345/real 1353980345]  req@ffff880075a1d000 x1419751045755356/t0(0) o4-&amp;gt;lustre-OST0005-osc-ffff8800749b5c00@10.10.4.155@tcp:6/4 lens 488/448 e 0 to 1 dl 1353980352 ref 2 fl Rpc:X/0/ffffffff rc 0/-1
17:39:21:Lustre: lustre-OST0005-osc-ffff8800749b5c00: Connection to lustre-OST0005 (at 10.10.4.155@tcp) was lost; in progress operations using this service will wait for recovery to complete
17:39:22:Lustre: 3002:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for sent delay: [sent 1353980350/real 0]  req@ffff880075a1d800 x1419751045755403/t0(0) o4-&amp;gt;lustre-OST0005-osc-ffff8800749b5c00@10.10.4.155@tcp:6/4 lens 488/448 e 0 to 1 dl 1353980357 ref 3 fl Rpc:X/0/ffffffff rc 0/-1
17:39:22:Lustre: lustre-OST0003-osc-ffff8800749b5c00: Connection to lustre-OST0003 (at 10.10.4.155@tcp) was lost; in progress operations using this service will wait for recovery to complete
17:39:22:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for sent delay: [sent 1353980352/real 0]  req@ffff880075de8000 x1419751045755406/t0(0) o8-&amp;gt;lustre-OST0002-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 400/544 e 0 to 1 dl 1353980358 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
17:39:22:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) Skipped 19 previous similar messages
17:39:22:Lustre: lustre-OST0004-osc-ffff8800749b5c00: Connection to lustre-OST0004 (at 10.10.4.155@tcp) was lost; in progress operations using this service will wait for recovery to complete
17:39:22:Lustre: Skipped 3 previous similar messages
17:39:33:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for sent delay: [sent 1353980358/real 0]  req@ffff880037692000 x1419751045755416/t0(0) o8-&amp;gt;lustre-OST0004-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 400/544 e 0 to 1 dl 1353980364 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
17:39:33:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) Skipped 25 previous similar messages
17:39:33:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for sent delay: [sent 1353980358/real 0]  req@ffff880037730000 x1419751045755415/t0(0) o8-&amp;gt;lustre-OST0002-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 400/544 e 0 to 1 dl 1353980369 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
17:39:33:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) Skipped 5 previous similar messages
17:39:44:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for sent delay: [sent 1353980368/real 0]  req@ffff880037739000 x1419751045755422/t0(0) o8-&amp;gt;lustre-OST0000-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 400/544 e 0 to 1 dl 1353980379 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
17:39:44:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) Skipped 1 previous similar message
17:40:26:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1353980393/real 1353980394]  req@ffff8800376fd000 x1419751045755451/t0(0) o8-&amp;gt;lustre-OST0000-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 400/544 e 0 to 1 dl 1353980418 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
17:40:26:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) Skipped 18 previous similar messages
17:41:07:INFO: task dbench:8110 blocked for more than 120 seconds.
17:41:07:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
17:41:07:dbench        D 0000000000000000     0  8110   8109 0x00000080
17:41:07: ffff880075d8ba58 0000000000000082 ffff880075d8ba08 ffff8800746ac0d0
17:41:07: 0000000000000286 0000000000000003 0000000000000001 0000000000000286
17:41:07: ffff880076192638 ffff880075d8bfd8 000000000000fb88 ffff880076192638
17:41:07:Call Trace:
17:41:07: [&amp;lt;ffffffffa04367da&amp;gt;] ? cfs_waitq_signal+0x1a/0x20 [libcfs]
17:41:07: [&amp;lt;ffffffff814fe7d5&amp;gt;] schedule_timeout+0x215/0x2e0
17:41:08: [&amp;lt;ffffffffa077bfbc&amp;gt;] ? ptlrpc_request_bufs_pack+0x5c/0x80 [ptlrpc]
17:41:08: [&amp;lt;ffffffffa0791c30&amp;gt;] ? lustre_swab_ost_body+0x0/0x10 [ptlrpc]
17:41:08: [&amp;lt;ffffffff814fe453&amp;gt;] wait_for_common+0x123/0x180
17:41:08: [&amp;lt;ffffffff81060250&amp;gt;] ? default_wake_function+0x0/0x20
17:41:08: [&amp;lt;ffffffff814fe56d&amp;gt;] wait_for_completion+0x1d/0x20
17:41:08: [&amp;lt;ffffffffa099b1d4&amp;gt;] osc_io_setattr_end+0xc4/0x1a0 [osc]
17:41:08: [&amp;lt;ffffffffa0a1dc50&amp;gt;] ? lov_io_end_wrapper+0x0/0x100 [lov]
17:41:08: [&amp;lt;ffffffffa0628a80&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
17:41:08: [&amp;lt;ffffffffa0629350&amp;gt;] ? cl_io_start+0x0/0x140 [obdclass]
17:41:08: [&amp;lt;ffffffffa0a1dd41&amp;gt;] lov_io_end_wrapper+0xf1/0x100 [lov]
17:41:08: [&amp;lt;ffffffffa0a1d7ce&amp;gt;] lov_io_call+0x8e/0x130 [lov]
17:41:08: [&amp;lt;ffffffffa0a1f42c&amp;gt;] lov_io_end+0x4c/0x110 [lov]
17:41:08: [&amp;lt;ffffffffa0628a80&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
17:41:08: [&amp;lt;ffffffffa062dce2&amp;gt;] cl_io_loop+0xc2/0x1b0 [obdclass]
17:41:08: [&amp;lt;ffffffffa0ad3148&amp;gt;] cl_setattr_ost+0x208/0x2d0 [lustre]
17:41:08: [&amp;lt;ffffffffa0aa2472&amp;gt;] ll_setattr_raw+0x752/0xfd0 [lustre]
17:41:08: [&amp;lt;ffffffffa0aa2d4b&amp;gt;] ll_setattr+0x5b/0xf0 [lustre]
17:41:08: [&amp;lt;ffffffff81197368&amp;gt;] notify_change+0x168/0x340
17:41:08: [&amp;lt;ffffffff81187e25&amp;gt;] ? putname+0x35/0x50
17:41:08: [&amp;lt;ffffffff811aacdc&amp;gt;] utimes_common+0xdc/0x1b0
17:41:08: [&amp;lt;ffffffff811aae8e&amp;gt;] do_utimes+0xde/0xf0
17:41:08: [&amp;lt;ffffffff811ab023&amp;gt;] sys_utime+0x23/0x90
17:41:08: [&amp;lt;ffffffff8100b0f2&amp;gt;] system_call_fastpath+0x16/0x1b
17:41:20:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1353980453/real 1353980454]  req@ffff8800376fd000 x1419751045755487/t0(0) o8-&amp;gt;lustre-OST0000-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 400/544 e 0 to 1 dl 1353980478 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
17:41:20:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) Skipped 13 previous similar messages
17:42:25:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) @@@ Request sent has failed due to network error: [sent 1353980543/real 1353980543]  req@ffff8800756cc400 x1419751045755544/t0(0) o8-&amp;gt;lustre-OST0000-osc-ffff8800749b5c00@10.10.4.155@tcp:28/4 lens 400/544 e 0 to 1 dl 1353980568 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
17:42:25:Lustre: 3000:0:(client.c:1826:ptlrpc_expire_one_request()) Skipped 20 previous similar messages
17:43:07:INFO: task dbench:8110 blocked for more than 120 seconds.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="162034" author="simmonsja" created="Tue, 16 Aug 2016 16:29:31 +0000"  >&lt;p&gt;Old blocker for unsupported version&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="10331" name="dbench-debug_log.tar.bz2" size="2125219" author="yujian" created="Thu, 28 Jul 2011 05:33:41 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvex3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>6058</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>