<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:39:54 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4125] sanity-hsm test_228 failure: &apos;request on 0x20000040b:0x61:0x0 is not SUCCEED&apos; </title>
                <link>https://jira.whamcloud.com/browse/LU-4125</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Test results at: &lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/af9dfc14-3834-11e3-8bc4-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/af9dfc14-3834-11e3-8bc4-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;From the test_log:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanity-hsm test 228: On released file, return extend to FIEMAP. For [cp,tar] --sparse == 08:09:43 (1382108983)
pdsh@c15: c08: ssh exited with exit code 1
Purging archive on c08
Starting copytool agt1 on c08
1+0 records in
1+0 records out
1048576 bytes (1.0 MB) copied, 0.145494 s, 7.2 MB/s
Changed after 0s: from &apos;&apos; to &apos;STARTED&apos;
Waiting 100 secs for update
Changed after 1s: from &apos;STARTED&apos; to &apos;FAILED&apos;
Waiting 90 secs for update
Waiting 80 secs for update
Waiting 70 secs for update
Waiting 60 secs for update
Waiting 50 secs for update
Changed after 60s: from &apos;FAILED&apos; to &apos;&apos;
Waiting 40 secs for update
Waiting 30 secs for update
Waiting 20 secs for update
Waiting 10 secs for update
Update not seen after 100s: wanted &apos;SUCCEED&apos; got &apos;&apos;
 sanity-hsm test_228: @@@@@@ FAIL: request on 0x20000040b:0x61:0x0 is not SUCCEED 
  Trace dump:
  = /usr/lib64/lustre/tests/test-framework.sh:4264:error_noexit()
  = /usr/lib64/lustre/tests/test-framework.sh:4291:error()
  = /usr/lib64/lustre/tests/sanity-hsm.sh:474:wait_request_state()
  = /usr/lib64/lustre/tests/sanity-hsm.sh:3085:test_228()
  = /usr/lib64/lustre/tests/test-framework.sh:4530:run_one()
  = /usr/lib64/lustre/tests/test-framework.sh:4563:run_one_logged()
  = /usr/lib64/lustre/tests/test-framework.sh:4433:run_test()
  = /usr/lib64/lustre/tests/sanity-hsm.sh:3111:main()
Dumping lctl log to /tmp/test_logs/2013-10-18/074316/sanity-hsm.test_228.*.1382109102.log
Copytool is stopped on c08
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;From the copytool_log on c08:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lhsmtool_posix[26046]: action=0 src=(null) dst=(null) mount_point=/lustre/scratch
lhsmtool_posix[26047]: waiting for message from kernel
lhsmtool_posix[26047]: copytool fs=scratch archive#=2 item_count=1
lhsmtool_posix[26047]: waiting for message from kernel
lhsmtool_posix[26048]: &apos;[0x20000040b:0x61:0x0]&apos; action ARCHIVE reclen 72, cookie=0x52614df7
lhsmtool_posix[26048]: processing file &apos;f.sanity-hsm.228&apos;
lhsmtool_posix[26048]: archiving &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos; to &apos;/lustre/archive/0061/0000/040b/0000/0002/0000/0x20000040b:0x61:0x0_tmp&apos;
lhsmtool_posix[26048]: saving stripe info of &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos; in /lustre/archive/0061/0000/040b/0000/0002/0000/0x20000040b:0x61:0x0_tmp.lov
lhsmtool_posix[26048]: going to copy data from &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos; to &apos;/lustre/archive/0061/0000/040b/0000/0002/0000/0x20000040b:0x61:0x0_tmp&apos;
lhsmtool_posix[26048]: progress ioctl for copy &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos;-&amp;gt;&apos;/lustre/archive/0061/0000/040b/0000/0002/0000/0x20000040b:0x61:0x0_tmp&apos; failed: No such file or directory (2)
lhsmtool_posix[26048]: data copy failed from &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos; to &apos;/lustre/archive/0061/0000/040b/0000/0002/0000/0x20000040b:0x61:0x0_tmp&apos;: No such file or directory (2)
lhsmtool_posix[26048]: Action completed, notifying coordinator cookie=0x52614df7, FID=[0x20000040b:0x61:0x0], hp_flags=0 err=2
lhsmtool_posix[26048]: llapi_hsm_action_end() on &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos; failed: No such file or directory (2)
exiting: Interrupt
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;From dmesg on the MDS (c03):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == sanity-hsm test 228: On released file, return extend to FIEMAP. For [cp,tar] --sparse == 08:09:43 (1382108983)
LustreError: 7622:0:(mdt_coordinator.c:1448:mdt_hsm_update_request_state()) scratch-MDT0000: Cannot find running request for cookie 0x52614df7 on fid=[0x20000040b:0x61:0x0]
LustreError: 7622:0:(mdt_coordinator.c:1448:mdt_hsm_update_request_state()) scratch-MDT0000: Cannot find running request for cookie 0x52614df7 on fid=[0x20000040b:0x61:0x0]
Lustre: DEBUG MARKER: sanity-hsm test_228: @@@@@@ FAIL: request on 0x20000040b:0x61:0x0 is not SUCCEED
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>Lustre 2.5.0-RC1, el6&lt;br/&gt;
&lt;br/&gt;
OpenSFS cluster with combined MGS/MDS (c03), single OSS (c04) with two OSTs, archive MGS/MDS (c05), archive OST (c06) with two OSTs, archive OST2 (c07) with two OSTs, eight clients; one agent + client(c08), one robinhood/db + client(c09) and others just running as Lustre clients (c10, c11, c12, c13,c14, c15) </environment>
        <key id="21519">LU-4125</key>
            <summary>sanity-hsm test_228 failure: &apos;request on 0x20000040b:0x61:0x0 is not SUCCEED&apos; </summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="bfaccini">Bruno Faccini</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                            <label>HSM</label>
                    </labels>
                <created>Fri, 18 Oct 2013 23:23:55 +0000</created>
                <updated>Tue, 3 Jun 2014 12:52:15 +0000</updated>
                            <resolved>Mon, 31 Mar 2014 22:16:36 +0000</resolved>
                                    <version>Lustre 2.5.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="69933" author="bfaccini" created="Fri, 25 Oct 2013 17:00:10 +0000"  >&lt;p&gt;James, did you encounter the same kind of error multiple times during your testing on OpenSFS cluster ?&lt;br/&gt;
BTW, I am asking this because it seems that multiple sanity-hsm sub-tests failed within the same suite which has been prolific in term of errors/pbs and of created/generated tickets !! This may come from a new configuration exposure or may be some external factor ??&#8230;&lt;/p&gt;

&lt;p&gt;These particular lines/errors seen by copytool indicate a real problem during ARCHIVE from sanity-hsm/test_228 :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lhsmtool_posix[26048]: progress ioctl for copy &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos;-&amp;gt;&apos;/lustre/archive/0061/0000/040b/0000/0002/0000/0x20000040b:0x61:0x0_tmp&apos; failed: No such file or directory (2)
lhsmtool_posix[26048]: data copy failed from &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos; to &apos;/lustre/archive/0061/0000/040b/0000/0002/0000/0x20000040b:0x61:0x0_tmp&apos;: No such file or directory (2)
lhsmtool_posix[26048]: Action completed, notifying coordinator cookie=0x52614df7, FID=[0x20000040b:0x61:0x0], hp_flags=0 err=2
lhsmtool_posix[26048]: llapi_hsm_action_end() on &apos;/lustre/scratch/.lustre/fid/0x20000040b:0x61:0x0&apos; failed: No such file or directory (2)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And it seems to correspond to the following error in the agent/node dmesg :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == sanity-hsm test 228: On released file, return extend to FIEMAP. For [cp,tar] --sparse == 08:09:43 (1382108983)
LustreError: 11-0: scratch-OST0000-osc-ffff8806309c0400: Communicating with 192.168.2.104@o2ib, operation ost_getattr failed with -12.
Lustre: DEBUG MARKER: sanity-hsm test_228: @@@@@@ FAIL: request on 0x20000040b:0x61:0x0 is not SUCCEED
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And also to the corresponding error in OSS dmesg :&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == sanity-hsm test 228: On released file, return extend to FIEMAP. For [cp,tar] --sparse == 08:09:43 (1382108983)
LustreError: 11216:0:(ldlm_resource.c:1188:ldlm_resource_get()) scratch-OST0000: lvbo_init failed for resource 0x18c:0x0: rc = -2
Lustre: DEBUG MARKER: sanity-hsm test_228: @@@@@@ FAIL: request on 0x20000040b:0x61:0x0 is not SUCCEED
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
</comment>
                            <comment id="70620" author="bfaccini" created="Mon, 4 Nov 2013 16:04:08 +0000"  >&lt;p&gt;According to Maloo/auto-tests stats, this problem has not re-occured yet. What about on your side James ??&lt;/p&gt;

</comment>
                            <comment id="70668" author="jamesanunez" created="Mon, 4 Nov 2013 23:03:22 +0000"  >&lt;p&gt;I just installed the latest master build and ran sanity-hsm and only tests 90 and 300 failed. I then ran sanity-hsm test 228 alone and it failed with the same error message as above. So, I can get this test to fail, but not consistently.&lt;/p&gt;</comment>
                            <comment id="72606" author="bfaccini" created="Mon, 2 Dec 2013 16:28:17 +0000"  >&lt;p&gt;James,&lt;br/&gt;
I am sorry but I am unable to reproduce this actually. I ran test_228 in a loop on different local configs but no failure at all.&lt;br/&gt;
Do you still see this ? And then can you better describe the platform/configuration you use when you trigger problem?? I the &quot;Environment&quot; section of this ticket, you indicate &quot;OpenSFS cluster&quot;, did you only see the problem there ?&lt;/p&gt;</comment>
                            <comment id="72755" author="jamesanunez" created="Tue, 3 Dec 2013 22:25:35 +0000"  >&lt;p&gt;Bruno, &lt;/p&gt;

&lt;p&gt;I just ran sanity-hsm on the OpenSFS cluster and was able to reproduce the test 228 sanity-hsm failure. Here&apos;s the set up:&lt;/p&gt;

&lt;p&gt;Lustre file system with combined MDS/MGS, single OSS with two OSTs&lt;br/&gt;
archive Lustre file system with combined MDS/MGS, single OSS with two OSTs&lt;br/&gt;
One HSM agent + Lustre client (c12)  - mounts both file systems&lt;br/&gt;
One Robinhood DB node running Robinhood 2.5.0 + Lustre client(c14) - does not mount archive file system&lt;br/&gt;
One additional Lustre client (c15) - this is where I ran the tests, does not mount archive file system&lt;/p&gt;

&lt;p&gt;Using Lustre:&lt;br/&gt;
lustre: 2.5.51&lt;br/&gt;
kernel: patchless_client&lt;br/&gt;
build:  jenkins-arch=x86_64,build_type=client,distro=el6,ib_stack=inkernel-1790-g645d1e9-PRISTINE-2.6.32-358.23.2.el6.x86_64&lt;/p&gt;

&lt;p&gt;First I ran the whole sanity-hsm suite of tests and got one failure:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;./auster -v -k -f ncli sanity-hsm
...
== sanity-hsm test complete, duration 1970 sec == 10:47:42 (1386096462)
sanity-hsm: FAIL: test_300 hsm_control state is not &apos;stopped&apos; on mds1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I then ran sanity-hsm again and got several failures. I did nothing to the nodes in between these two runs of sanity-hsm:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;./auster -v -k -f ncli sanity-hsm
...
== sanity-hsm test complete, duration 2117 sec == 11:32:11 (1386099131)
sanity-hsm: FAIL: test_58 truncate 3158 does not trig restore, state = 
sanity-hsm: FAIL: test_228 request on 0x200000406:0x63:0x0 is not SUCCEED
sanity-hsm: FAIL: test_300 hsm_control state is not &apos;stopped&apos; on mds1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;There may be an issue with some file left behind in the file system or archive that is impacting test 228. I say this because, in my limited testing, test 228 doesn&apos;t fail on the first run of sanity-hsm, only after a full run of sanity-hsm completed. Maybe something with test 300 failing influences test 228? Does test 300 fail for you?&lt;/p&gt;

&lt;p&gt;Keeping everything the same, I just changed to using c12, c13 and c15 as clients because I didn&apos;t want to run anything on the DB node. When I made that change, the failures were the same; I still get test 300 failing on the first pass of sanity-hsm and test 228 and 300 failing the second run of sanity-hsm. Test 58 did not fail this time. Running test 228 alone after these two sanity-hsm runs fails with the same error message.*&lt;/p&gt;

&lt;p&gt;I have not tried to reproduce this error on any other cluster. I&apos;ll leave the nodes alone for a while and can provide more configuration information if you tell me what you need.&lt;/p&gt;

&lt;p&gt;I&apos;m happy to try some other configuration, try some other tests/test sequence or let you get on the nodes to look around. &lt;/p&gt;

&lt;p&gt;I&apos;ve added the following to cfg/local.sh:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;AGTCOUNT=1
AGTDEV1=&lt;span class=&quot;code-quote&quot;&gt;&quot;/lustre/archive&quot;&lt;/span&gt;
agt1_HOST=&lt;span class=&quot;code-quote&quot;&gt;&quot;c12&quot;&lt;/span&gt;
HSMTOOL_VERBOSE=&lt;span class=&quot;code-quote&quot;&gt;&quot;-vvvvvv&quot;&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;ul&gt;
	&lt;li&gt;Note: Until &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2524&quot; title=&quot;Tests regressions: tests interrelation introduced.&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2524&quot;&gt;&lt;del&gt;LU-2524&lt;/del&gt;&lt;/a&gt; lands, the mkdir in test 228 needs a -p when run alone. I&apos;ll open a ticket for this.&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="72787" author="bfaccini" created="Wed, 4 Dec 2013 08:19:38 +0000"  >&lt;p&gt;Hello James,&lt;br/&gt;
Concerning the test_300 failures for &quot;hsm_control state is not &apos;stopped&apos; on mds1&quot;, I see the tag of the version you run is 2.5.51 and it may not integrate my patch for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4065&quot; title=&quot;sanity-hsm test_300 failure: &amp;#39;cdt state is not stopped&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4065&quot;&gt;&lt;del&gt;LU-4065&lt;/del&gt;&lt;/a&gt; which should fix this problem and has landed recently ?? Can you check/test with latest master ??&lt;/p&gt;

&lt;p&gt;Also it is true that when I run sanity-hsm full/sub-tests in a loop, I always clear Lustre and archive file-systems between loops since I also found before that left-over files/archives can impact next runs/loops &#8230;&lt;/p&gt;
</comment>
                            <comment id="72808" author="jamesanunez" created="Wed, 4 Dec 2013 15:28:40 +0000"  >&lt;p&gt;The testing reported above was run with the latest master at the time of setting up the nodes; build #1790. The patch for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4065&quot; title=&quot;sanity-hsm test_300 failure: &amp;#39;cdt state is not stopped&amp;#39; &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4065&quot;&gt;&lt;del&gt;LU-4065&lt;/del&gt;&lt;/a&gt; was included in build #1765. So, I was testing with this patch.&lt;/p&gt;</comment>
                            <comment id="72872" author="jamesanunez" created="Thu, 5 Dec 2013 03:52:26 +0000"  >&lt;p&gt;I just uploaded some results for sanity-hsm test 228 failing on the OpenSFS cluster at &lt;a href=&quot;https://maloo.whamcloud.com/test_sessions/643460bc-5d4e-11e3-956b-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sessions/643460bc-5d4e-11e3-956b-52540035b04c&lt;/a&gt; . I don&apos;t know if there is any new information here; a quick review of the logs look the same as what was posted previously. &lt;/p&gt;

&lt;p&gt;The node setup is the same as described in my comment above from Dec 03 and these sanity-hsm results were from the second run of sanity-hsm; one run where only test 300 failed and then the second run, the results above, where 300 and 228 failed. I then ran test 228 alone and it failed. I then cleaned out the archive and test 228 passed. Even when it passes, test 228 doesn&apos;t clean up after itself. So, I just ran test 228 again, with something in the archive and it failed. Clean out the archive again and test 228 passes. &lt;/p&gt;

&lt;p&gt;With some more digging, it looks like the test fails when the shadow directory is in the archive when test 228 starts. The key here is what is in the shadow directory. By running test 228 with the /lustre/archive/shadow/&amp;lt;file name that test 228 is writing to&amp;gt;, the test fails. If there is no shadow directory or if file in the shadow directory is not the same name as the one test 228 is trying to write to, test 228 passes.&lt;/p&gt;</comment>
                            <comment id="73109" author="bfaccini" created="Mon, 9 Dec 2013 18:21:12 +0000"  >&lt;p&gt;James, thanks for you searches and comment!&lt;br/&gt;
Hummm, the orphan entry in shadow directory is a good start-point in order to find the cause of the failed HSM Archive op. I will investigate in this direction.&lt;/p&gt;</comment>
                            <comment id="73360" author="bfaccini" created="Thu, 12 Dec 2013 14:30:31 +0000"  >&lt;p&gt;Also I wonder if the patch you pushed for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4343&quot; title=&quot;sanity-hsm test_228 failure: FAIL: tar failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4343&quot;&gt;&lt;del&gt;LU-4343&lt;/del&gt;&lt;/a&gt; (an other issue with test_228!), and where you clean files being left actually by test_228, could also be a good work-around for this ticket?&lt;/p&gt;</comment>
                            <comment id="79059" author="bfaccini" created="Tue, 11 Mar 2014 23:06:31 +0000"  >&lt;p&gt;James, &lt;/p&gt;

&lt;p&gt;I did some analysis of the 24 sanity-hsm/test_228 failures in auto-tests since mid-december and I did not find any that could be linked to the situation described in this ticket. In fact these auto-tests failures occured after almost all the preceeding subtests (starting with test_8 !!) in sanity-hsm already failed for the same reason, like if any ARCHIVE request failed.&lt;/p&gt;

&lt;p&gt;So again, I still strongly suspect that the cleanup you introduced as part of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4343&quot; title=&quot;sanity-hsm test_228 failure: FAIL: tar failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4343&quot;&gt;&lt;del&gt;LU-4343&lt;/del&gt;&lt;/a&gt; should have fix the issue for this ticket.&lt;/p&gt;

&lt;p&gt;What do you think ??&lt;/p&gt;</comment>
                            <comment id="79148" author="jamesanunez" created="Wed, 12 Mar 2014 17:07:13 +0000"  >&lt;p&gt;Bruno, &lt;/p&gt;

&lt;p&gt;I&apos;m testing with 2.5.1-RC3 running sanity-hsm on the OpenSFS cluster. I am not able to recreate this problem. Test 228 succeeds running the full sanity-hsm suite and running just test 228 alone. &lt;/p&gt;

&lt;p&gt;James&lt;/p&gt;</comment>
                            <comment id="79983" author="bfaccini" created="Fri, 21 Mar 2014 12:25:06 +0000"  >&lt;p&gt;Ok so, do you agree if we close tis ticket as a dup of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4343&quot; title=&quot;sanity-hsm test_228 failure: FAIL: tar failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4343&quot;&gt;&lt;del&gt;LU-4343&lt;/del&gt;&lt;/a&gt; ?&lt;/p&gt;</comment>
                            <comment id="79999" author="jamesanunez" created="Fri, 21 Mar 2014 15:17:16 +0000"  >&lt;p&gt;Since I can&apos;t reproduce anymore, I&apos;m fine with closing this ticket.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="22318">LU-4343</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzw687:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11134</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>