<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:40:20 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4174] Failure on test suite recovery-mds-scale test_failover_ost: import is not in FULL state</title>
                <link>https://jira.whamcloud.com/browse/LU-4174</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/de5e20d0-399a-11e3-8e4c-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/de5e20d0-399a-11e3-8e4c-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_failover_ost failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;import is not in FULL state&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;client 3 console shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;13:45:10:Lustre: DEBUG MARKER: ==== Checking the clients loads BEFORE failover -- failure NOT OK ELAPSED=6700 DURATION=86400 PERIOD=900
13:45:10:Lustre: DEBUG MARKER: rc=$([ -f /proc/sys/lnet/catastrophe ] &amp;amp;&amp;amp;
13:45:10:		echo $(&amp;lt; /proc/sys/lnet/catastrophe) || echo 0);
13:45:11:		if [ $rc -ne 0 ]; then echo $(hostname): $rc; fi
13:45:11:		exit $rc
13:45:11:Lustre: DEBUG MARKER: ps auxwww | grep -v grep | grep -q run_tar.sh
13:45:11:Lustre: DEBUG MARKER: /usr/sbin/lctl mark Wait ost7 recovery complete before doing next failover...
13:45:11:Lustre: DEBUG MARKER: Wait ost7 recovery complete before doing next failover...
13:45:52:LustreError: 16584:0:(osc_cache.c:897:osc_extent_wait()) extent ffff88005197cd08@{[0 -&amp;gt; 2/255], [2|0|+|cache|wihuY|ffff8800507e21b8], [12288|3|+|-|ffff88005046e3d8|256|(null)]} lustre-OST0002-osc-ffff880037e99000: wait ext to 0 timedout, recovery in progress?
13:46:23:INFO: task tar:28315 blocked for more than 120 seconds.
13:46:23:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
13:46:23:tar           D 0000000000000000     0 28315  28313 0x00000080
13:46:23: ffff88004fa89a18 0000000000000086 ffff88004fa899c8 ffffffff810097cc
13:46:23: ffff88007d4b4b18 0000000000000000 0000000000a899d8 ffff880002214280
13:46:24: ffff880066aab058 ffff88004fa89fd8 000000000000fb88 ffff880066aab058
13:46:24:Call Trace:
13:46:24: [&amp;lt;ffffffff810097cc&amp;gt;] ? __switch_to+0x1ac/0x320
13:46:24: [&amp;lt;ffffffff8150e130&amp;gt;] ? thread_return+0x4e/0x76e
13:46:24: [&amp;lt;ffffffff8150efa5&amp;gt;] schedule_timeout+0x215/0x2e0
13:46:24: [&amp;lt;ffffffffa0696bf0&amp;gt;] ? lustre_swab_ost_body+0x0/0x10 [ptlrpc]
13:46:24: [&amp;lt;ffffffff8150ec23&amp;gt;] wait_for_common+0x123/0x180
13:46:24: [&amp;lt;ffffffff81063410&amp;gt;] ? default_wake_function+0x0/0x20
13:46:24: [&amp;lt;ffffffff8150ed3d&amp;gt;] wait_for_completion+0x1d/0x20
13:46:24: [&amp;lt;ffffffffa089d5ec&amp;gt;] osc_io_setattr_end+0xbc/0x190 [osc]
13:46:24: [&amp;lt;ffffffffa092cdf0&amp;gt;] ? lov_io_end_wrapper+0x0/0x100 [lov]
13:46:24: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:46:24: [&amp;lt;ffffffffa055b780&amp;gt;] ? cl_io_start+0x0/0x140 [obdclass]
13:46:24: [&amp;lt;ffffffffa092cee1&amp;gt;] lov_io_end_wrapper+0xf1/0x100 [lov]
13:46:24: [&amp;lt;ffffffffa092c86e&amp;gt;] lov_io_call+0x8e/0x130 [lov]
13:46:24: [&amp;lt;ffffffffa092e5dc&amp;gt;] lov_io_end+0x4c/0xf0 [lov]
13:46:24: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:46:24: [&amp;lt;ffffffffa055ff02&amp;gt;] cl_io_loop+0xc2/0x1b0 [obdclass]
13:46:25: [&amp;lt;ffffffffa09fcfc8&amp;gt;] cl_setattr_ost+0x218/0x2f0 [lustre]
13:46:25: [&amp;lt;ffffffffa09c9145&amp;gt;] ll_setattr_raw+0xa45/0x1070 [lustre]
13:46:25: [&amp;lt;ffffffffa09c97cd&amp;gt;] ll_setattr+0x5d/0xf0 [lustre]
13:46:25: [&amp;lt;ffffffff8119ea48&amp;gt;] notify_change+0x168/0x340
13:46:25: [&amp;lt;ffffffff811b2aec&amp;gt;] utimes_common+0xdc/0x1b0
13:46:25: [&amp;lt;ffffffff81182bc1&amp;gt;] ? __fput+0x1a1/0x210
13:46:25: [&amp;lt;ffffffff811b2c9e&amp;gt;] do_utimes+0xde/0xf0
13:46:25: [&amp;lt;ffffffff811b2db2&amp;gt;] sys_utimensat+0x32/0x90
13:46:25: [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
13:48:28:INFO: task tar:28315 blocked for more than 120 seconds.
13:48:29:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
13:48:29:tar           D 0000000000000000     0 28315  28313 0x00000080
13:48:30: ffff88004fa89a18 0000000000000086 ffff88004fa899c8 ffffffff810097cc
13:48:31: ffff88007d4b4b18 0000000000000000 0000000000a899d8 ffff880002214280
13:48:31: ffff880066aab058 ffff88004fa89fd8 000000000000fb88 ffff880066aab058
13:48:32:Call Trace:
13:48:33: [&amp;lt;ffffffff810097cc&amp;gt;] ? __switch_to+0x1ac/0x320
13:48:33: [&amp;lt;ffffffff8150e130&amp;gt;] ? thread_return+0x4e/0x76e
13:48:34: [&amp;lt;ffffffff8150efa5&amp;gt;] schedule_timeout+0x215/0x2e0
13:48:35: [&amp;lt;ffffffffa0696bf0&amp;gt;] ? lustre_swab_ost_body+0x0/0x10 [ptlrpc]
13:48:36: [&amp;lt;ffffffff8150ec23&amp;gt;] wait_for_common+0x123/0x180
13:48:36: [&amp;lt;ffffffff81063410&amp;gt;] ? default_wake_function+0x0/0x20
13:48:36: [&amp;lt;ffffffff8150ed3d&amp;gt;] wait_for_completion+0x1d/0x20
13:48:37: [&amp;lt;ffffffffa089d5ec&amp;gt;] osc_io_setattr_end+0xbc/0x190 [osc]
13:48:37: [&amp;lt;ffffffffa092cdf0&amp;gt;] ? lov_io_end_wrapper+0x0/0x100 [lov]
13:48:38: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:48:38: [&amp;lt;ffffffffa055b780&amp;gt;] ? cl_io_start+0x0/0x140 [obdclass]
13:48:39: [&amp;lt;ffffffffa092cee1&amp;gt;] lov_io_end_wrapper+0xf1/0x100 [lov]
13:48:40: [&amp;lt;ffffffffa092c86e&amp;gt;] lov_io_call+0x8e/0x130 [lov]
13:48:41: [&amp;lt;ffffffffa092e5dc&amp;gt;] lov_io_end+0x4c/0xf0 [lov]
13:48:41: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:48:42: [&amp;lt;ffffffffa055ff02&amp;gt;] cl_io_loop+0xc2/0x1b0 [obdclass]
13:48:42: [&amp;lt;ffffffffa09fcfc8&amp;gt;] cl_setattr_ost+0x218/0x2f0 [lustre]
13:48:42: [&amp;lt;ffffffffa09c9145&amp;gt;] ll_setattr_raw+0xa45/0x1070 [lustre]
13:48:43: [&amp;lt;ffffffffa09c97cd&amp;gt;] ll_setattr+0x5d/0xf0 [lustre]
13:48:43: [&amp;lt;ffffffff8119ea48&amp;gt;] notify_change+0x168/0x340
13:48:43: [&amp;lt;ffffffff811b2aec&amp;gt;] utimes_common+0xdc/0x1b0
13:48:43: [&amp;lt;ffffffff81182bc1&amp;gt;] ? __fput+0x1a1/0x210
13:48:44: [&amp;lt;ffffffff811b2c9e&amp;gt;] do_utimes+0xde/0xf0
13:48:44: [&amp;lt;ffffffff811b2db2&amp;gt;] sys_utimensat+0x32/0x90
13:48:44: [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
13:50:30:INFO: task tar:28315 blocked for more than 120 seconds.
13:50:32:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
13:50:32:tar           D 0000000000000000     0 28315  28313 0x00000080
13:50:33: ffff88004fa89a18 0000000000000086 ffff88004fa899c8 ffffffff810097cc
13:50:33: ffff88007d4b4b18 0000000000000000 0000000000a899d8 ffff880002214280
13:50:34: ffff880066aab058 ffff88004fa89fd8 000000000000fb88 ffff880066aab058
13:50:34:Call Trace:
13:50:34: [&amp;lt;ffffffff810097cc&amp;gt;] ? __switch_to+0x1ac/0x320
13:50:35: [&amp;lt;ffffffff8150e130&amp;gt;] ? thread_return+0x4e/0x76e
13:50:35: [&amp;lt;ffffffff8150efa5&amp;gt;] schedule_timeout+0x215/0x2e0
13:50:36: [&amp;lt;ffffffffa0696bf0&amp;gt;] ? lustre_swab_ost_body+0x0/0x10 [ptlrpc]
13:50:38: [&amp;lt;ffffffff8150ec23&amp;gt;] wait_for_common+0x123/0x180
13:50:39: [&amp;lt;ffffffff81063410&amp;gt;] ? default_wake_function+0x0/0x20
13:50:39: [&amp;lt;ffffffff8150ed3d&amp;gt;] wait_for_completion+0x1d/0x20
13:50:40: [&amp;lt;ffffffffa089d5ec&amp;gt;] osc_io_setattr_end+0xbc/0x190 [osc]
13:50:40: [&amp;lt;ffffffffa092cdf0&amp;gt;] ? lov_io_end_wrapper+0x0/0x100 [lov]
13:50:41: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:50:41: [&amp;lt;ffffffffa055b780&amp;gt;] ? cl_io_start+0x0/0x140 [obdclass]
13:50:42: [&amp;lt;ffffffffa092cee1&amp;gt;] lov_io_end_wrapper+0xf1/0x100 [lov]
13:50:42: [&amp;lt;ffffffffa092c86e&amp;gt;] lov_io_call+0x8e/0x130 [lov]
13:50:42: [&amp;lt;ffffffffa092e5dc&amp;gt;] lov_io_end+0x4c/0xf0 [lov]
13:50:43: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:50:43: [&amp;lt;ffffffffa055ff02&amp;gt;] cl_io_loop+0xc2/0x1b0 [obdclass]
13:50:43: [&amp;lt;ffffffffa09fcfc8&amp;gt;] cl_setattr_ost+0x218/0x2f0 [lustre]
13:50:44: [&amp;lt;ffffffffa09c9145&amp;gt;] ll_setattr_raw+0xa45/0x1070 [lustre]
13:50:44: [&amp;lt;ffffffffa09c97cd&amp;gt;] ll_setattr+0x5d/0xf0 [lustre]
13:50:45: [&amp;lt;ffffffff8119ea48&amp;gt;] notify_change+0x168/0x340
13:50:45: [&amp;lt;ffffffff811b2aec&amp;gt;] utimes_common+0xdc/0x1b0
13:50:45: [&amp;lt;ffffffff81182bc1&amp;gt;] ? __fput+0x1a1/0x210
13:50:45: [&amp;lt;ffffffff811b2c9e&amp;gt;] do_utimes+0xde/0xf0
13:50:45: [&amp;lt;ffffffff811b2db2&amp;gt;] sys_utimensat+0x32/0x90
13:50:46: [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
13:52:09:LustreError: 11-0: lustre-OST0002-osc-ffff880037e99000: Communicating with 10.10.4.183@tcp, operation ldlm_enqueue failed with -12.
13:52:09:LustreError: Skipped 391089 previous similar messages
13:52:30:INFO: task tar:28315 blocked for more than 120 seconds.
13:52:30:&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
13:52:30:tar           D 0000000000000000     0 28315  28313 0x00000080
13:52:30: ffff88004fa89a18 0000000000000086 ffff88004fa899c8 ffffffff810097cc
13:52:30: ffff88007d4b4b18 0000000000000000 0000000000a899d8 ffff880002214280
13:52:30: ffff880066aab058 ffff88004fa89fd8 000000000000fb88 ffff880066aab058
13:52:30:Call Trace:
13:52:30: [&amp;lt;ffffffff810097cc&amp;gt;] ? __switch_to+0x1ac/0x320
13:52:30: [&amp;lt;ffffffff8150e130&amp;gt;] ? thread_return+0x4e/0x76e
13:52:30: [&amp;lt;ffffffff8150efa5&amp;gt;] schedule_timeout+0x215/0x2e0
13:52:30: [&amp;lt;ffffffffa0696bf0&amp;gt;] ? lustre_swab_ost_body+0x0/0x10 [ptlrpc]
13:52:30: [&amp;lt;ffffffff8150ec23&amp;gt;] wait_for_common+0x123/0x180
13:52:31: [&amp;lt;ffffffff81063410&amp;gt;] ? default_wake_function+0x0/0x20
13:52:31: [&amp;lt;ffffffff8150ed3d&amp;gt;] wait_for_completion+0x1d/0x20
13:52:31: [&amp;lt;ffffffffa089d5ec&amp;gt;] osc_io_setattr_end+0xbc/0x190 [osc]
13:52:31: [&amp;lt;ffffffffa092cdf0&amp;gt;] ? lov_io_end_wrapper+0x0/0x100 [lov]
13:52:31: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:52:31: [&amp;lt;ffffffffa055b780&amp;gt;] ? cl_io_start+0x0/0x140 [obdclass]
13:52:31: [&amp;lt;ffffffffa092cee1&amp;gt;] lov_io_end_wrapper+0xf1/0x100 [lov]
13:52:31: [&amp;lt;ffffffffa092c86e&amp;gt;] lov_io_call+0x8e/0x130 [lov]
13:52:31: [&amp;lt;ffffffffa092e5dc&amp;gt;] lov_io_end+0x4c/0xf0 [lov]
13:52:31: [&amp;lt;ffffffffa055aed0&amp;gt;] cl_io_end+0x60/0x150 [obdclass]
13:52:31: [&amp;lt;ffffffffa055ff02&amp;gt;] cl_io_loop+0xc2/0x1b0 [obdclass]
13:52:31: [&amp;lt;ffffffffa09fcfc8&amp;gt;] cl_setattr_ost+0x218/0x2f0 [lustre]
13:52:31: [&amp;lt;ffffffffa09c9145&amp;gt;] ll_setattr_raw+0xa45/0x1070 [lustre]
13:52:31: [&amp;lt;ffffffffa09c97cd&amp;gt;] ll_setattr+0x5d/0xf0 [lustre]
13:52:31: [&amp;lt;ffffffff8119ea48&amp;gt;] notify_change+0x168/0x340
13:52:31: [&amp;lt;ffffffff811b2aec&amp;gt;] utimes_common+0xdc/0x1b0
13:52:31: [&amp;lt;ffffffff81182bc1&amp;gt;] ? __fput+0x1a1/0x210
13:52:32: [&amp;lt;ffffffff811b2c9e&amp;gt;] do_utimes+0xde/0xf0
13:52:32: [&amp;lt;ffffffff811b2db2&amp;gt;] sys_utimensat+0x32/0x90
13:52:32: [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
13:56:27:Lustre: DEBUG MARKER: /usr/sbin/lctl mark Checking clients are in FULL state before doing next failover...
13:56:30:Lustre: DEBUG MARKER: Checking clients are in FULL state before doing next failover...
13:56:30:Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
13:56:30:Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:30:Lustre: DEBUG MARKER: lctl get_param -n at_max
13:56:30:Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0000-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0001-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: osc.lustre-OST0001-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: osc.lustre-OST0000-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0001-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: /usr/sbin/lctl mark osc.lustre-OST0001-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: osc.lustre-OST0001-osc-*.ost_server_uuid in FULL state after 0 sec
13:56:31:Lustre: DEBUG MARKER: osc.lustre-OST0001-osc-*.ost_server_uuid in FULL state after 0 sec
14:02:10:LustreError: 11-0: lustre-OST0002-osc-ffff880037e99000: Communicating with 10.10.4.183@tcp, operation ldlm_enqueue failed with -12.
14:02:10:LustreError: Skipped 464241 previous similar messages
14:07:35:Lustre: DEBUG MARKER: /usr/sbin/lctl mark  rpc : @@@@@@ FAIL: can\&apos;t put import for osc.lustre-OST0002-osc-*.ost_server_uuid into FULL state after 662 sec, have REPLAY_WAIT 
14:07:37:Lustre: DEBUG MARKER: rpc : @@@@@@ FAIL: can&apos;t put import for osc.lustre-OST0002-osc-*.ost_server_uuid into FULL state after 662 sec, have REPLAY_WAIT
14:07:37:Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:./../utils:/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests
14:07:37:Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /tmp/test_logs/1382129779/rpc..debug_log.$(hostname -s).1382130447.log;
14:07:37:         dmesg &amp;gt; /tmp/test_logs/1382129779/rpc..dmesg.$(hostname -s).1382130447.log
14:07:37:Lustre: DEBUG MARKER: rsync -az /tmp/test_logs/1382129779/rpc..*.1382130447.log client-30vm5.lab.whamcloud.com:/tmp/test_logs/1382129779
14:07:37:Lustre: DEBUG MARKER: /usr/sbin/lctl mark  rpc : @@@@@@ FAIL: can\&apos;t put import for osc.lustre-OST0002-osc-*.ost_server_uuid into FULL state after 662 sec, have CONNECTING 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>client and server: lustre-b2_5 build #2 RHEL6 ldiskfs</environment>
        <key id="21695">LU-4174</key>
            <summary>Failure on test suite recovery-mds-scale test_failover_ost: import is not in FULL state</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Tue, 29 Oct 2013 07:25:02 +0000</created>
                <updated>Tue, 14 Dec 2021 22:51:53 +0000</updated>
                            <resolved>Tue, 14 Dec 2021 22:51:53 +0000</resolved>
                                    <version>Lustre 2.5.0</version>
                    <version>Lustre 2.5.3</version>
                                                        <due></due>
                            <votes>1</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="92763" author="yujian" created="Thu, 28 Aug 2014 23:29:46 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;https://build.hpdd.intel.com/job/lustre-b2_5/85/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://build.hpdd.intel.com/job/lustre-b2_5/85/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.5/x86_64&lt;br/&gt;
FSTYPE=ldiskfs&lt;br/&gt;
TEST_GROUP=failover&lt;/p&gt;

&lt;p&gt;The same failure occurred:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/fe82fdc0-2f05-11e4-b34e-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/fe82fdc0-2f05-11e4-b34e-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="101543" author="yujian" created="Sun, 14 Dec 2014 08:04:14 +0000"  >&lt;p&gt;More instance on Lustre b2_5 branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b78efc68-82aa-11e4-a4ae-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b78efc68-82aa-11e4-a4ae-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzw75j:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11303</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>