<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:52:24 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5544] Interop 2.5.1&lt;-&gt;2.7 failure on test suite sanity-scrub test_11: error on LL_IOC_LMV_SETSTRIPE &apos;/mnt/lustre/1408415864/mds1&apos; (3): Unknown error 524</title>
                <link>https://jira.whamcloud.com/browse/LU-5544</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d5923d3a-2821-11e4-8135-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d5923d3a-2821-11e4-8135-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_11 failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;(1) Fail to mkdir /mnt/lustre/1408415864/mds1&lt;/p&gt;&lt;/blockquote&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: onyx-58vm5,onyx-58vm6.onyx.hpdd.intel.com mount | grep /mnt/lustre&apos; &apos;
CMD: onyx-58vm5 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/bin:/usr/bin:/bin:/sbin:/usr/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;vfstrace rpctrace dlmtrace neterror ha config ioctl super\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
CMD: onyx-58vm3 lctl get_param -n timeout
CMD: onyx-58vm3 lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
CMD: onyx-58vm6.onyx.hpdd.intel.com lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
CMD: onyx-58vm3 /usr/sbin/lctl get_param -n version
CMD: onyx-58vm3 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-MDT0000.quota_slave.enabled
CMD: onyx-58vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0000.quota_slave.enabled
CMD: onyx-58vm3 /usr/sbin/lctl conf_param lustre.quota.mdt=ug3
CMD: onyx-58vm3 /usr/sbin/lctl conf_param lustre.quota.ost=ug3
error on LL_IOC_LMV_SETSTRIPE &apos;/mnt/lustre/1408415864/mds1&apos; (3): Unknown error 524
error: mkdir: create stripe dir &apos;/mnt/lustre/1408415864/mds1&apos; failed
 sanity-scrub test_11: @@@@@@ FAIL: (1) Fail to mkdir /mnt/lustre/1408415864/mds1 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="26165">LU-5544</key>
            <summary>Interop 2.5.1&lt;-&gt;2.7 failure on test suite sanity-scrub test_11: error on LL_IOC_LMV_SETSTRIPE &apos;/mnt/lustre/1408415864/mds1&apos; (3): Unknown error 524</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Tue, 26 Aug 2014 01:15:39 +0000</created>
                <updated>Mon, 20 Apr 2015 09:41:49 +0000</updated>
                            <resolved>Mon, 20 Apr 2015 09:41:49 +0000</resolved>
                                    <version>Lustre 2.5.1</version>
                    <version>Lustre 2.7.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="92836" author="jlevi" created="Fri, 29 Aug 2014 17:49:21 +0000"  >&lt;p&gt;Fan Yong,&lt;br/&gt;
Can you please comment on this one?&lt;br/&gt;
Thank you!&lt;/p&gt;</comment>
                            <comment id="92906" author="yong.fan" created="Mon, 1 Sep 2014 00:07:40 +0000"  >&lt;p&gt;The bash environment $tdir for sanity-tset 11 should be &quot;d11.sanity-scrub&quot;, but according to the log, its name become &quot;1408415864&quot;. That is totally unexpected, seems the dcache on client-side is broken. In fact, before the test 11, there were already some abnormal filenames in the logs that indicated the confused dcache:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;00000080:00200000:0.0:1408415641.266775:0:16918:0:(file.c:3091:__ll_inode_revalidate_it()) VFS Op:inode=144115188193296385/33554432(ffff88007a12bb38),name=/
00000002:00010000:0.0:1408415641.266781:0:16918:0:(mdc_locks.c:1173:mdc_intent_lock()) (name: ,[0x200000007:0x1:0x0]) in obj [0x200000007:0x1:0x0], intent: lookup flags 00
00010000:00010000:0.0:1408415641.266786:0:16918:0:(ldlm_lock.c:795:ldlm_lock_addref_internal_nolock()) ### ldlm_lock_addref(PR) ns: lustre-MDT0000-mdc-ffff88007a58d800 lock: ffff88007a9dbd40/0xf204e34aa6520700 lrc: 2/1,0 mode: PR/PR res: [0x200000007:0x1:0x0].0 bits 0x11 rrc: 2 type: IBT flags: 0x0 nid: local remote: 0x61a08be236e96d6d expref: -99 pid: 16916 timeout: 0 lvb_type: 0
00010000:00010000:0.0:1408415641.266791:0:16918:0:(ldlm_lock.c:1417:ldlm_lock_match()) ### matched (0 0) ns: lustre-MDT0000-mdc-ffff88007a58d800 lock: ffff88007a9dbd40/0xf204e34aa6520700 lrc: 2/1,0 mode: PR/PR res: [0x200000007:0x1:0x0].0 bits 0x11 rrc: 1 type: IBT flags: 0x0 nid: local remote: 0x61a08be236e96d6d expref: -99 pid: 16916 timeout: 0 lvb_type: 0
00000080:00010000:0.0:1408415641.266798:0:16918:0:(dcache.c:351:ll_lookup_finish_locks()) setting l_data to inode ffff88007a12bb38 (144115188193296385/33554432)
00000080:00010000:0.0:1408415641.266800:0:16918:0:(llite_internal.h:1573:ll_set_lock_data()) setting l_data to inode ffff88007a12bb38 (144115188193296385/33554432) &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; lock 0xf204e34aa6520700
00000080:00010000:0.0:1408415641.266803:0:16918:0:(dcache.c:252:ll_intent_drop_lock()) releasing lock with cookie 0xf204e34aa6520700 from it ffff88007b0bbb88
00010000:00010000:0.0:1408415641.266805:0:16918:0:(ldlm_lock.c:848:ldlm_lock_decref_internal_nolock()) ### ldlm_lock_decref(PR) ns: lustre-MDT0000-mdc-ffff88007a58d800 lock: ffff88007a9dbd40/0xf204e34aa6520700 lrc: 3/1,0 mode: PR/PR res: [0x200000007:0x1:0x0].0 bits 0x11 rrc: 1 type: IBT flags: 0x10000000000000 nid: local remote: 0x61a08be236e96d6d expref: -99 pid: 16916 timeout: 0 lvb_type: 0
00010000:00010000:0.0:1408415641.266810:0:16918:0:(ldlm_lock.c:916:ldlm_lock_decref_internal()) ### add lock into lru list ns: lustre-MDT0000-mdc-ffff88007a58d800 lock: ffff88007a9dbd40/0xf204e34aa6520700 lrc: 2/0,0 mode: PR/PR res: [0x200000007:0x1:0x0].0 bits 0x11 rrc: 1 type: IBT flags: 0x10000000000000 nid: local remote: 0x61a08be236e96d6d expref: -99 pid: 16916 timeout: 0 lvb_type: 0
00000080:00200000:0.0:1408415641.266816:0:16918:0:(file.c:3364:ll_inode_permission()) VFS Op:inode=144115188193296385/33554432(ffff88007a12bb38), inode mode 41ed mask 1
00000080:00200000:0.0:1408415641.266819:0:16918:0:(dcache.c:385:ll_revalidate_it()) VFS Op:name=d9.sanity-scrub,intent=0
00000080:00200000:0.0:1408415641.266821:0:16918:0:(file.c:3364:ll_inode_permission()) VFS Op:inode=144115205255725057/33554436(ffff88007b7f3b78), inode mode 41ed mask 1
00000080:00200000:0.0:1408415641.266823:0:16918:0:(dcache.c:385:ll_revalidate_it()) VFS Op:name=mds1,intent=0
00000080:00200000:0.0:1408415641.266824:0:16918:0:(file.c:3364:ll_inode_permission()) VFS Op:inode=144115205255725058/33554436(ffff88007a12b638), inode mode 41ed mask 1
00000080:00200000:0.0:1408415641.266828:0:16918:0:(file.c:3364:ll_inode_permission()) VFS Op:inode=144115205255725058/33554436(ffff88007a12b638), inode mode 41ed mask 1
00000080:00200000:0.0:1408415641.266832:0:16918:0:(namei.c:527:ll_lookup_it()) VFS Op:name=f9.sanity-scrub7933,dir=144115205255725058/33554436(ffff88007a12b638),intent=open|creat
00000002:00010000:0.0:1408415641.266837:0:16918:0:(mdc_locks.c:1173:mdc_intent_lock()) (name: f9.sanity-scrub7933,[0x200000400:0x1f4a:0x0]) in obj [0x200000400:0x2:0x0], intent: open|creat flags 0100103
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In sanity-test 9, the filename &quot;name: f9.sanity-scrub7933&quot; is abnormal.&lt;/p&gt;

&lt;p&gt;Currently, I am not sure what caused the bad filename, maybe related with some special client-side patch(es) on Lustre-2.5.2. I will investigate more.&lt;/p&gt;</comment>
                            <comment id="102292" author="yong.fan" created="Wed, 24 Dec 2014 10:45:41 +0000"  >&lt;p&gt;Sarah, have you ever reproduced the same failure with more debug logs recently? Thanks!&lt;/p&gt;</comment>
                            <comment id="112370" author="yong.fan" created="Mon, 20 Apr 2015 09:41:49 +0000"  >&lt;p&gt;Will reopen it when hit it again.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwukf:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>15446</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>