<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:54:43 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5810] sanity: rm: cannot remove `/mnt/lustre/d0.tar-shadow-23vm5/etc/init.d/rc3.d&apos;: Directory not empty</title>
                <link>https://jira.whamcloud.com/browse/LU-5810</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for John Hammond &amp;lt;john.hammond@intel.com&amp;gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;-----============= acceptance-small: sanity ============----- Sat Oct 25 17:04:33 UTC 2014
Running: bash /usr/lib64/lustre/tests/sanity.sh
== sanity test complete, duration -o sec == 17:04:34 (1414256674)
CMD: shadow-23vm10.shadow.whamcloud.com,shadow-23vm9 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh check_config_client /mnt/lustre 
shadow-23vm9: Checking config lustre mounted on /mnt/lustre
shadow-23vm10: Checking config lustre mounted on /mnt/lustre
Checking servers environments
CMD: shadow-23vm11 running=\$(grep -c /mnt/ost1&apos; &apos; /proc/mounts);
mpts=\$(mount | grep -c /mnt/ost1&apos; &apos;);
if [ \$running -ne \$mpts ]; then
    echo \$(hostname) env are INSANE!;
    exit 1;
fi
...
CMD: shadow-23vm12 lctl get_param -n timeout
Using TIMEOUT=20
CMD: shadow-23vm12 lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
CMD: shadow-23vm10.shadow.whamcloud.com lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
disable quota as required
CMD: shadow-23vm11,shadow-23vm12,shadow-23vm8,shadow-23vm9 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/bin:/usr/bin:/bin:/usr/sbin:/sbin::/sbin:/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
CMD: shadow-23vm11,shadow-23vm12,shadow-23vm8 /usr/sbin/lctl set_param 				 osd-ldiskfs.track_declares_assert=1 || true
osd-ldiskfs.track_declares_assert=1
osd-ldiskfs.track_declares_assert=1
osd-ldiskfs.track_declares_assert=1
rm: cannot remove `/mnt/lustre/d0.tar-shadow-23vm5/etc/init.d/rc3.d&apos;: Directory not empty
status        script            Total(sec) E(xcluded) S(low) 
------------------------------------------------------------------------------------
test-framework exiting on error
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6ad49578-5c8e-11e4-b08a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6ad49578-5c8e-11e4-b08a-5254006e85c2&lt;/a&gt;.&lt;/p&gt;</description>
                <environment></environment>
        <key id="27324">LU-5810</key>
            <summary>sanity: rm: cannot remove `/mnt/lustre/d0.tar-shadow-23vm5/etc/init.d/rc3.d&apos;: Directory not empty</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 27 Oct 2014 17:18:03 +0000</created>
                <updated>Fri, 11 Sep 2020 23:54:44 +0000</updated>
                            <resolved>Fri, 11 Sep 2020 23:54:44 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="97752" author="adilger" created="Tue, 28 Oct 2014 20:27:24 +0000"  >&lt;p&gt;It is strange that there is a &lt;tt&gt;shadow-23vm5&lt;/tt&gt; directory that is not empty, yet according to the config for the test session, the nodes listed are &lt;tt&gt;shadow-23vm&lt;span class=&quot;error&quot;&gt;&amp;#91;8-12&amp;#93;&lt;/span&gt;&lt;/tt&gt;.  This implies that the shadow-23vm5 node mounted the wrong filesystem for some reason and proceeded to write there.&lt;/p&gt;</comment>
                            <comment id="97753" author="adilger" created="Tue, 28 Oct 2014 20:31:52 +0000"  >&lt;p&gt;A similar configuration problem appeared in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5064&quot; title=&quot;sanity-scrub test_13: ls should fail&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5064&quot;&gt;&lt;del&gt;LU-5064&lt;/del&gt;&lt;/a&gt; and &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5076&quot; title=&quot;Test failure on test suite conf-sanity, subtest test_46a test failed to respond and timed out&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5076&quot;&gt;&lt;del&gt;LU-5076&lt;/del&gt;&lt;/a&gt;, and possibly others, where nodes that were not part of the test configuration were accessing the filesystem.&lt;/p&gt;

&lt;p&gt;They have been marked duplicates of TEI-1993.&lt;/p&gt;</comment>
                            <comment id="97763" author="mdiep" created="Tue, 28 Oct 2014 21:25:20 +0000"  >&lt;p&gt;After researched, I doubt this is a problem where we cross mount.&lt;br/&gt;
The file /mnt/lustre/d0.tar&amp;#45;$hostname comes from a script run_tar.sh, this is only called from recovery&amp;#45;[mds|double|random]-scale.sh. I have checked report around the time of this report (up to a few days) and can not find any shadow&amp;#45;23vm5 report running recovery&amp;#45;[mds|double|random]&amp;#45;scale tests. It&apos;s possible that someone/somescript that accidently login to shadow&amp;#45;23vm5 and run the test and left it behind.&lt;/p&gt;</comment>
                            <comment id="97776" author="adilger" created="Tue, 28 Oct 2014 22:41:11 +0000"  >&lt;p&gt;Can you please check if shadow-23vm5 is reserved for some user job, or if it is some stuck or forgotten process that is still running there?  That also happened with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5064&quot; title=&quot;sanity-scrub test_13: ls should fail&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5064&quot;&gt;&lt;del&gt;LU-5064&lt;/del&gt;&lt;/a&gt;. &lt;/p&gt;</comment>
                            <comment id="97873" author="mdiep" created="Wed, 29 Oct 2014 18:56:36 +0000"  >&lt;p&gt;shadow-23vm5 has always been in autotest. at the time around the failure shadow-23vm5 wasn&apos;t running recover-mds-scale.&lt;/p&gt;</comment>
                            <comment id="102019" author="jamesanunez" created="Fri, 19 Dec 2014 00:23:25 +0000"  >&lt;p&gt;I&apos;ve experienced a similar problem on the OpenSFS cluster; the test framework can&apos;t remove a directory from a previous test, not another node/VM. If you think this is a different problem, I can open a new ticket for this. &lt;/p&gt;

&lt;p&gt;Results are at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/f13ba544-8618-11e4-ac52-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/f13ba544-8618-11e4-ac52-5254006e85c2&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;replay-dual had several tests fail including 22a and 22c. When replay-vbr starts up, no tests run due to the remove at the top of the script. This remove fails with&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;rm: cannot remove `/lustre/scratch/d22a.replay-dual&apos;: Directory not empty
rm: cannot remove `/lustre/scratch/d22c.replay-dual&apos;: Directory not empty
status        script            Total(sec) E(xcluded) S(low) 
------------------------------------------------------------------------------------
test-framework exiting on error
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;replay-vbr is marked as FAIL with 0/0 subtests passed.&lt;/p&gt;

&lt;p&gt;Then insanity starts running and runs 16 tests. The test suite is marked as FAIL with no subtest actually failing, but the remove during the test cleanup must have triggered the failure. In the test logs, we see:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== insanity test complete, duration 2157 sec == 14:50:03 (1418770203)
rm: cannot remove `/lustre/scratch/d22a.replay-dual&apos;: Directory not empty
rm: cannot remove `/lustre/scratch/d22c.replay-dual&apos;: Directory not empty
 insanity : @@@@@@ FAIL: remove sub-test dirs failed 
  Trace dump:
  = /usr/lib64/lustre/tests/test-framework.sh:4665:error_noexit()
  = /usr/lib64/lustre/tests/test-framework.sh:4696:error()
  = /usr/lib64/lustre/tests/test-framework.sh:4210:check_and_cleanup_lustre()
  = /usr/lib64/lustre/tests/insanity.sh:781:main()
Dumping lctl log to /tmp/test_logs/2014-12-15/220919/insanity..*.1418770204.log
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="152897" author="adilger" created="Thu, 19 May 2016 20:29:45 +0000"  >&lt;p&gt;Debug patch for this:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LU-5810 tests: add client hostname to lctl mark

Improve debug messages to include the originating hostname.

Signed-off-by: Andreas Dilger &amp;lt;andreas.dilger@intel.com&amp;gt;
Change-Id: I441bf8294c38135276a5a0f0853dbebf4358c563
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="153770" author="gerrit" created="Fri, 27 May 2016 00:57:39 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/13113/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13113/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5810&quot; title=&quot;sanity: rm: cannot remove `/mnt/lustre/d0.tar-shadow-23vm5/etc/init.d/rc3.d&amp;#39;: Directory not empty&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5810&quot;&gt;&lt;del&gt;LU-5810&lt;/del&gt;&lt;/a&gt; tests: add client hostname to lctl mark&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 9c4156e6fc146a198bb342e28eb246f1076889bd&lt;/p&gt;</comment>
                            <comment id="156341" author="gerrit" created="Tue, 21 Jun 2016 14:56:19 +0000"  >&lt;p&gt;James Nunez (james.a.nunez@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/20894&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/20894&lt;/a&gt;&lt;br/&gt;
Subject: Revert &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5810&quot; title=&quot;sanity: rm: cannot remove `/mnt/lustre/d0.tar-shadow-23vm5/etc/init.d/rc3.d&amp;#39;: Directory not empty&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5810&quot;&gt;&lt;del&gt;LU-5810&lt;/del&gt;&lt;/a&gt; tests: add client hostname to lctl mark&quot;&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: dc25382d26a409c12b79d0c9a82ba5a0fa7c521c&lt;/p&gt;</comment>
                            <comment id="156577" author="gerrit" created="Wed, 22 Jun 2016 21:20:26 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/20894/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/20894/&lt;/a&gt;&lt;br/&gt;
Subject: Revert &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5810&quot; title=&quot;sanity: rm: cannot remove `/mnt/lustre/d0.tar-shadow-23vm5/etc/init.d/rc3.d&amp;#39;: Directory not empty&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5810&quot;&gt;&lt;del&gt;LU-5810&lt;/del&gt;&lt;/a&gt; tests: add client hostname to lctl mark&quot;&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: d700bd76aadb7b3ae8fda27dec1d58723b9b95fe&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="24725">LU-5064</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwzjr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>16296</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>