<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:07:48 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-7313] sanity-hsm test_404 test failed: LustreError: 11377:0:(fld_request.c:489:fld_client_lookup()) ASSERTION( env != ((void *)0) ) failed</title>
                <link>https://jira.whamcloud.com/browse/LU-7313</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Server 2.7.61&lt;br/&gt;
Client 2.7.61&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
404.console.fre0304.log
LustreError: 11377:0:(fld_request.c:489:fld_client_lookup()) ASSERTION( env != ((void *)0) ) failed: 
LustreError: 11377:0:(fld_request.c:489:fld_client_lookup()) LBUG

stdout.log
 sanity-hsm test_404: @@@@@@ FAIL: request on 0x200000405:0x1:0x0 is not SUCCEED on mds1 
  Trace dump:
  = /usr/lib64/lustre/tests/test-framework.sh:4784:error_noexit()
  = /usr/lib64/lustre/tests/test-framework.sh:4815:error()
  = /usr/lib64/lustre/tests/sanity-hsm.sh:719:wait_request_state()
  = /usr/lib64/lustre/tests/sanity-hsm.sh:4495:test_404()
  = /usr/lib64/lustre/tests/test-framework.sh:5062:run_one()
  = /usr/lib64/lustre/tests/test-framework.sh:5099:run_one_logged()
  = /usr/lib64/lustre/tests/test-framework.sh:4916:run_test()
  = /usr/lib64/lustre/tests/sanity-hsm.sh:4505:main()
Dumping lctl log to /tmp/test_logs/1445245656/sanity-hsm.test_404.*.1445245912.log
fre0304: open /proc/sys/lnet/dump_kernel failed: No such file or directory
fre0304: open(dump_kernel) failed: No such file or directory
fre0304: Warning: Permanently added &apos;fre0303,192.168.103.3&apos; (RSA) to the list of known hosts.
fre0301: Warning: Permanently added &apos;fre0303,192.168.103.3&apos; (RSA) to the list of known hosts.
fre0302: Warning: Permanently added &apos;fre0303,192.168.103.3&apos; (RSA) to the list of known hosts.
FAIL 404 (227s)
sanity-hsm: FAIL: test_404 request on 0x200000405:0x1:0x0 is not SUCCEED on mds1
Stopping clients: fre0303,fre0304 /mnt/lustre2 (opts:)
Stopping client fre0303 /mnt/lustre2 opts:


stderr.log
Using TIMEOUT=20
running as uid/gid/euid/egid 500/500/500/500, groups:
 [touch] [/mnt/lustre/d0_runas_test/f22269]
excepting tests: 34 35 36
pdsh@fre0303: fre0304: ssh exited with exit code 1
== sanity-hsm test complete, duration 257 sec == 09:11:53 (1445245913)



&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>4 node setup ( MDS / OSS / 2 Clients), DNE, Single MDS&lt;br/&gt;
MDSCOUNT=2 OSTCOUNT=2&lt;br/&gt;
</environment>
        <key id="32695">LU-7313</key>
            <summary>sanity-hsm test_404 test failed: LustreError: 11377:0:(fld_request.c:489:fld_client_lookup()) ASSERTION( env != ((void *)0) ) failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="parinay">parinay v kondekar</reporter>
                        <labels>
                    </labels>
                <created>Mon, 19 Oct 2015 11:49:59 +0000</created>
                <updated>Mon, 7 Dec 2015 13:45:52 +0000</updated>
                            <resolved>Mon, 7 Dec 2015 13:45:52 +0000</resolved>
                                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="131041" author="adilger" created="Wed, 21 Oct 2015 19:16:29 +0000"  >&lt;p&gt;Could you please provide the stack trace for the failing thread.  I don&apos;t think we can use the vmcore unless we have the exact kernel build and modules available, but it isn&apos;t mentioned if you are using our build, or for which kernel/distro/arch it is.  &lt;/p&gt;

&lt;p&gt;Are you doing anything different in your testing or configuration to trigger this?  We haven&apos;t hit anything similar in our testing.&lt;/p&gt;</comment>
                            <comment id="131507" author="parinay" created="Mon, 26 Oct 2015 10:32:29 +0000"  >&lt;p&gt;My bad, apologies for incomplete info&lt;/p&gt;

&lt;p&gt;Here are the details&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;491 &amp;lt;4&amp;gt;Lustre: DEBUG MARKER: == sanity-hsm test 404: Inactive MDT does not block requests for active MDTs == 09:08:05 (1445245685)
492 &amp;lt;4&amp;gt;Lustre: setting import lustre-MDT0001_UUID INACTIVE by administrator request
493 &amp;lt;4&amp;gt;Lustre: Skipped 1 previous similar message
494 &amp;lt;0&amp;gt;LustreError: 11377:0:(fld_request.c:489:fld_client_lookup()) ASSERTION( env != ((void *)0) ) failed:
495 &amp;lt;0&amp;gt;LustreError: 11377:0:(fld_request.c:489:fld_client_lookup()) LBUG
496 &amp;lt;4&amp;gt;Pid: 11377, comm: lhsmtool_posix
497 &amp;lt;4&amp;gt;
498 &amp;lt;4&amp;gt;Call Trace:
499 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa02f3875&amp;gt;] libcfs_debug_dumpstack+0x55/0x80 [libcfs]
500 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa02f3e77&amp;gt;] lbug_with_loc+0x47/0xb0 [libcfs]
501 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa08abd8b&amp;gt;] fld_client_lookup+0x47b/0x4e0 [fld]
502 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa08df2e1&amp;gt;] lmv_fld_lookup+0xf1/0x440 [lmv]
503 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa08d9eda&amp;gt;] lmv_iocontrol+0x11fa/0x3230 [lmv]
504 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa02f327b&amp;gt;] ? cfs_set_ptldebug_header+0x2b/0xc0 [libcfs]
505 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa02ff523&amp;gt;] ? libcfs_debug_vmsg2+0x5e3/0xbe0 [libcfs]
506 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8116fe9c&amp;gt;] ? __kmalloc+0x20c/0x220
507 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa09ce9bb&amp;gt;] ll_fid2path+0x3fb/0x870 [lustre]
508 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa09b40fc&amp;gt;] ll_dir_ioctl+0x135c/0x7440 [lustre]
509 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa0a0666c&amp;gt;] ? ll_authorize_statahead+0x2c/0xc0 [lustre]
510 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa09cb823&amp;gt;] ? ll_file_open+0x5b3/0xca0 [lustre]
511 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa063d740&amp;gt;] ? ptlrpc_req_finished+0x10/0x20 [ptlrpc]
512 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa09c48bd&amp;gt;] ? __ll_inode_revalidate+0x1bd/0xc60 [lustre]
513 &amp;lt;4&amp;gt; [&amp;lt;ffffffff81196643&amp;gt;] ? generic_permission+0x23/0xb0
514 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa09aeb40&amp;gt;] ? ll_dir_open+0x0/0xf0 [lustre]
515 &amp;lt;4&amp;gt; [&amp;lt;ffffffffa09aeb40&amp;gt;] ? ll_dir_open+0x0/0xf0 [lustre]
516 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8118639f&amp;gt;] ? __dentry_open+0x23f/0x360
517 &amp;lt;4&amp;gt; [&amp;lt;ffffffff812284cf&amp;gt;] ? security_inode_permission+0x1f/0x30
518 &amp;lt;4&amp;gt; [&amp;lt;ffffffff811865d4&amp;gt;] ? nameidata_to_filp+0x54/0x70
519 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8119c31a&amp;gt;] ? do_filp_open+0x6ea/0xd20
520 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8104fa68&amp;gt;] ? flush_tlb_others_ipi+0x128/0x130
521 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8119e972&amp;gt;] vfs_ioctl+0x22/0xa0
522 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8119eb14&amp;gt;] do_vfs_ioctl+0x84/0x580
523 &amp;lt;4&amp;gt; [&amp;lt;ffffffff81196dd6&amp;gt;] ? final_putname+0x26/0x50
524 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8119f091&amp;gt;] sys_ioctl+0x81/0xa0
525 &amp;lt;4&amp;gt; [&amp;lt;ffffffff810e202e&amp;gt;] ? __audit_syscall_exit+0x25e/0x290
526 &amp;lt;4&amp;gt; [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
527 &amp;lt;4&amp;gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Lustre: Build Version: 2.7.61-gef63c03-PRISTINE-2.6.32-431.29.2.el6&lt;/p&gt;
</comment>
                            <comment id="131510" author="parinay" created="Mon, 26 Oct 2015 10:42:42 +0000"  >&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;&amp;gt; Are you doing anything different in your testing or configuration to trigger this? We haven&apos;t hit anything similar in our testing.&lt;br/&gt;
Its 4 node, DNE setup.&lt;/li&gt;
&lt;/ul&gt;



&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;cmd line&lt;/li&gt;
&lt;/ul&gt;



&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;SLOW=YES NAME=ncli  NETTYPE=tcp mds1_HOST=fre0301 MDSDEV1=/dev/vdb mds_HOST=fre0301 MDSDEV=/dev/vdb mds2_HOST=fre0301 MDSDEV2=/dev/vdc MDSCOUNT=2 ost1_HOST=fre0302 OSTDEV1=/dev/vdb ost2_HOST=fre0302 OSTDEV2=/dev/vdc OSTCOUNT=2 CLIENTS=fre0303 RCLIENTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;fre0304&quot;&lt;/span&gt;  DIR=/mnt/lustre PDSH=&lt;span class=&quot;code-quote&quot;&gt;&quot;/usr/bin/pdsh -R ssh -S -w &quot;&lt;/span&gt; ONLY=404 MDS_MOUNT_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;-o rw,user_xattr&quot;&lt;/span&gt; OST_MOUNT_OPTS=&lt;span class=&quot;code-quote&quot;&gt;&quot;-o user_xattr&quot;&lt;/span&gt; MDSSIZE=0 OSTSIZE=0 ENABLE_QUOTA=&lt;span class=&quot;code-quote&quot;&gt;&quot;yes&quot;&lt;/span&gt; MDSJOURNALSIZE=&lt;span class=&quot;code-quote&quot;&gt;&quot;22&quot;&lt;/span&gt; MAXFREE=&lt;span class=&quot;code-quote&quot;&gt;&quot;1400000&quot;&lt;/span&gt; mdtest_nFiles=&lt;span class=&quot;code-quote&quot;&gt;&quot;50000&quot;&lt;/span&gt; mdtest_iteration=&lt;span class=&quot;code-quote&quot;&gt;&quot;5&quot;&lt;/span&gt;  SHARED_DIRECTORY=&lt;span class=&quot;code-quote&quot;&gt;&quot;/shared/fremont/test-results/xperior-custom/914&lt;span class=&quot;code-comment&quot;&gt;//quad3-quartet-1/shared-dir//sanity-hsm&quot;&lt;/span&gt;  /usr/lib64/lustre/tests/sanity-hsm.sh 2&amp;gt;     /&lt;span class=&quot;code-keyword&quot;&gt;var&lt;/span&gt;/log/xperior/test_stderr.166789.log 1&amp;gt;  /&lt;span class=&quot;code-keyword&quot;&gt;var&lt;/span&gt;/log/xperior/test_stdout.166789.log&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Hope this helps. Let me know, if anything more is needed.&lt;/p&gt;</comment>
                            <comment id="135369" author="parinay" created="Mon, 7 Dec 2015 08:33:07 +0000"  >&lt;p&gt;Just realized that the client was not running &quot;patchless&quot; client RPMs. Re-test (MULTIRUN=10) with patchless clients RPMS on client, passed the test. The issue can be closed.&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;</comment>
                            <comment id="135371" author="parinay" created="Mon, 7 Dec 2015 09:06:55 +0000"  >&lt;p&gt;sorry ran a wrong test. sanity-hsm/test_404 rerun in progress. Ignore earlier comment. Thanks&lt;/p&gt;</comment>
                            <comment id="135377" author="parinay" created="Mon, 7 Dec 2015 11:23:52 +0000"  >&lt;p&gt;Its observed during re-run that, as the clients running with patchless client RPMs, the ASSERT &quot;ASSERTION( env != ((void *)0) ) failed&quot; is not reproducible. The issue can b closed.&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="19325" name="404.lctl.tgz" size="715700" author="parinay" created="Mon, 19 Oct 2015 11:49:59 +0000"/>
                            <attachment id="19442" name="dmesg.txt" size="27850" author="parinay" created="Mon, 26 Oct 2015 10:34:00 +0000"/>
                            <attachment id="19441" name="lustre.log" size="1741884" author="parinay" created="Mon, 26 Oct 2015 10:34:00 +0000"/>
                            <attachment id="19326" name="vmcore" size="199" author="parinay" created="Mon, 19 Oct 2015 12:04:43 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzxqrb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>