<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:38:20 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10804] obdfilter-survey test_3a: lprocfs_alloc_obd_stats()) ASSERTION( obd-&gt;obd_proc_entry != ((void *)0) ) failed; LBUG</title>
                <link>https://jira.whamcloud.com/browse/LU-10804</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for elena &amp;lt;c17455@cray.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/5d126b54-2233-11e8-9ec4-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/5d126b54-2233-11e8-9ec4-52540065bddc&lt;/a&gt;&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;[ 3072.347530] LustreError: 30713:0:(lprocfs_status_server.c:647:lprocfs_alloc_obd_stats()) ASSERTION( obd-&amp;gt;obd_proc_entry != ((void *)0) ) failed: 
[ 3072.351782] LustreError: 30713:0:(lprocfs_status_server.c:647:lprocfs_alloc_obd_stats()) LBUG
[ 3072.354022] Pid: 30713, comm: lctl
[ 3072.355821] 
[ 3072.355821] Call Trace:
[ 3072.359137]  [&amp;lt;ffffffffc06917ae&amp;gt;] libcfs_call_trace+0x4e/0x60 [libcfs]
[ 3072.361177]  [&amp;lt;ffffffffc069183c&amp;gt;] lbug_with_loc+0x4c/0xb0 [libcfs]
[ 3072.363336]  [&amp;lt;ffffffffc0cbe06f&amp;gt;] lprocfs_alloc_obd_stats+0x18f/0x200 [obdclass]
[ 3072.365454]  [&amp;lt;ffffffffc0c81730&amp;gt;] ? lprocfs_obd_setup+0x130/0x320 [obdclass]
[ 3072.367602]  [&amp;lt;ffffffffc12b13ad&amp;gt;] echo_srv_device_alloc+0x48d/0x540 [obdecho]
[ 3072.369738]  [&amp;lt;ffffffffc0c91cb4&amp;gt;] obd_setup+0x114/0x2a0 [obdclass]
[ 3072.371892]  [&amp;lt;ffffffffc0c920e8&amp;gt;] class_setup+0x2a8/0x840 [obdclass]
[ 3072.373960]  [&amp;lt;ffffffffc0c9651f&amp;gt;] class_process_config+0x1b3f/0x27e0 [obdclass]
[ 3072.376145]  [&amp;lt;ffffffffc0c86ff9&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
[ 3072.378240]  [&amp;lt;ffffffffc0c7053a&amp;gt;] class_handle_ioctl+0x56a/0x1df0 [obdclass]
[ 3072.380342]  [&amp;lt;ffffffff8121f7b8&amp;gt;] ? destroy_inode+0x38/0x60
[ 3072.382250]  [&amp;lt;ffffffff812b3ea8&amp;gt;] ? security_capable+0x18/0x20
[ 3072.384178]  [&amp;lt;ffffffffc0c567f2&amp;gt;] obd_class_ioctl+0xd2/0x170 [obdclass]
[ 3072.386089]  [&amp;lt;ffffffff8121730d&amp;gt;] do_vfs_ioctl+0x33d/0x540
[ 3072.387929]  [&amp;lt;ffffffff81224a04&amp;gt;] ? mntput+0x24/0x40
[ 3072.389676]  [&amp;lt;ffffffff81205013&amp;gt;] ? __fput+0x183/0x260
[ 3072.391419]  [&amp;lt;ffffffff812175b1&amp;gt;] SyS_ioctl+0xa1/0xc0
[ 3072.393128]  [&amp;lt;ffffffff816b8930&amp;gt;] ? system_call_after_swapgs+0x15d/0x214
[ 3072.394862]  [&amp;lt;ffffffff816b89fd&amp;gt;] system_call_fastpath+0x16/0x1b
[ 3072.396554]  [&amp;lt;ffffffff816b889d&amp;gt;] ? system_call_after_swapgs+0xca/0x214
[ 3072.398245] 
[ 3072.399550] Kernel panic - not syncing: LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="51285">LU-10804</key>
            <summary>obdfilter-survey test_3a: lprocfs_alloc_obd_stats()) ASSERTION( obd-&gt;obd_proc_entry != ((void *)0) ) failed; LBUG</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="tappro">Mikhail Pershin</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 12 Mar 2018 13:55:26 +0000</created>
                <updated>Sat, 17 Mar 2018 13:20:02 +0000</updated>
                            <resolved>Sat, 17 Mar 2018 13:20:02 +0000</resolved>
                                    <version>Lustre 2.11.0</version>
                                    <fixVersion>Lustre 2.11.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>12</watches>
                                                                            <comments>
                            <comment id="223312" author="spitzcor" created="Mon, 12 Mar 2018 14:14:25 +0000"  >&lt;p&gt;Obdfilter-survey ought to work very reliably. &#160;We should consider this a major issue (or greater).&lt;/p&gt;</comment>
                            <comment id="223365" author="green" created="Mon, 12 Mar 2018 21:07:04 +0000"  >&lt;p&gt;The report referenced is for a gerrit patch that is based on another patch that seems to change relevant functionality. We looked and this sort of failure was not observed in the latest master test results, so the primary suspicion is it&apos;s the patch in question that broke things, esp. since if you look, the baseline patch is &quot;move procfs stuff to debugfs&quot; and the assertion you hit is in procfs-related code.&lt;/p&gt;</comment>
                            <comment id="223468" author="jamesanunez" created="Mon, 12 Mar 2018 23:29:54 +0000"  >&lt;p&gt;I&apos;ve reviewed all obdfilter-survey test 3a hangs over all branches starting from January 1, 2018 to today and I&apos;ve found the following test sessions that have the same assertion as in this ticket:&lt;/p&gt;

&lt;p&gt;2018-03-09 22:59:29 UTC full-patchless - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/67d21086-240d-11e8-b74b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/67d21086-240d-11e8-b74b-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-09 23:53:56 UTC full-patchless - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6d7aa550-240f-11e8-b3c6-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6d7aa550-240f-11e8-b3c6-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-10 02:13:04 UTC full-patchless - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/8d7b15dc-242e-11e8-b74b-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/8d7b15dc-242e-11e8-b74b-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-09 00:54:41 UTC full-patchless - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7c8f22cc-2348-11e8-9852-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7c8f22cc-2348-11e8-9852-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-08 18:53:42 UTC full-patchless - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/965a62b4-231d-11e8-9852-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/965a62b4-231d-11e8-9852-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-08 21:41:26 UTC full-patchless - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/15d7653a-233e-11e8-8d2f-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/15d7653a-233e-11e8-8d2f-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2018-03-08 22:23:08 UTC full-patchless &#8211; &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/0f8acdfa-2340-11e8-88de-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/0f8acdfa-2340-11e8-88de-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;All are from full test sessions for our patchless Lustre builds/testing.&lt;/p&gt;</comment>
                            <comment id="223474" author="green" created="Tue, 13 Mar 2018 03:44:44 +0000"  >&lt;p&gt;hm, so that&apos;s all after Mar 6th round of landings, I guess.&lt;/p&gt;</comment>
                            <comment id="223500" author="egryaznova" created="Tue, 13 Mar 2018 12:40:36 +0000"  >&lt;p&gt;This was not observed in the latest master test results because of test_3a was not functional, see &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7420&quot; title=&quot; obdfilter-survey.sh test_3a:  tgt_request_handle()) @@@ echo_srv: No target for connected export&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7420&quot;&gt;&lt;del&gt;LU-7420&lt;/del&gt;&lt;/a&gt; :&lt;/p&gt;

&lt;p&gt;&amp;gt; this test passes at Maloo, seems it does just nothing.&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7420&quot; title=&quot; obdfilter-survey.sh test_3a:  tgt_request_handle()) @@@ echo_srv: No target for connected export&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7420&quot;&gt;&lt;del&gt;LU-7420&lt;/del&gt;&lt;/a&gt; patch &lt;a href=&quot;https://review.whamcloud.com/#/c/18443/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/18443/&lt;/a&gt;&#160;fixes the test to be executed. The reported issue hit with this patch.&lt;/p&gt;</comment>
                            <comment id="223534" author="pjones" created="Tue, 13 Mar 2018 17:11:38 +0000"  >&lt;p&gt;Mike&lt;/p&gt;

&lt;p&gt;Can you please investigate?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="223536" author="adilger" created="Tue, 13 Mar 2018 17:14:20 +0000"  >&lt;p&gt;Looking at the assertion, this appears like it is related to changes in /proc, possibly related to /sysfs changes?  Possibly James already has a patch to fix this part of the code?&lt;/p&gt;</comment>
                            <comment id="223559" author="sarah" created="Tue, 13 Mar 2018 23:23:56 +0000"  >&lt;p&gt;Hit this in interop testing&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/baa94546-2709-11e8-b3c6-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/baa94546-2709-11e8-b3c6-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;client: 2.10.3&lt;/p&gt;

&lt;p&gt;server: lustre-master tag-2.10.59&lt;/p&gt;

&lt;p&gt;OSS console&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
[75396.029755] Lustre: DEBUG MARKER: == obdfilter-survey test 3a: Network survey ========================================================== 18:48:29 (1520966909)
[75401.745879] Lustre: lustre-MDT0000-lwp-OST0001: Connection to lustre-MDT0000 (at 10.9.4.73@tcp) was lost; in progress operations using this service will wait for recovery to complete
[75401.749951] Lustre: Skipped 9 previous similar messages
[75410.250117] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost1&apos; &apos; /proc/mounts
[75410.567342] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-ost1
[75410.784095] Lustre: server umount lustre-OST0000 complete
[75410.950964] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[75411.276952] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost2&apos; &apos; /proc/mounts
[75411.590521] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-ost2
[75411.965347] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[75412.289128] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost3&apos; &apos; /proc/mounts
[75412.601354] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-ost3
[75412.988541] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[75413.310862] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost4&apos; &apos; /proc/mounts
[75413.620037] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-ost4
[75413.745052] Lustre: 26996:0:(client.c:2100:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1520966920/real 1520966920] req@ffff880037402400 x1594796080200464/t0(0) o400-&amp;gt;MGC10.9.4.73@tcp@10.9.4.73@tcp:26/25 lens 224/224 e 0 to 1 dl 1520966927 ref 1 fl Rpc:XN/0/ffffffff rc 0/-1
[75413.751282] Lustre: 26996:0:(client.c:2100:ptlrpc_expire_one_request()) Skipped 1 previous similar message
[75413.753466] LustreError: 166-1: MGC10.9.4.73@tcp: Connection to MGS (at 10.9.4.73@tcp) was lost; in progress operations using this service will fail
[75413.977797] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[75414.299315] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost5&apos; &apos; /proc/mounts
[75414.606635] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-ost5
[75414.967344] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[75415.289338] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost6&apos; &apos; /proc/mounts
[75415.598284] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-ost6
[75415.793961] Lustre: server umount lustre-OST0005 complete
[75415.795939] Lustre: Skipped 4 previous similar messages
[75415.954904] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[75416.274434] Lustre: DEBUG MARKER: grep -c /mnt/lustre-ost7&apos; &apos; /proc/mounts
[75416.583237] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-ost7
[75416.965393] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
[75419.722863] Lustre: DEBUG MARKER: /usr/sbin/lctl list_nids | grep tcp | cut -f 1 -d @
[75421.172060] LustreError: 14767:0:(lprocfs_status_server.c:647:lprocfs_alloc_obd_stats()) ASSERTION( obd-&amp;gt;obd_proc_entry != ((void *)0) ) failed: 
[75421.176024] LustreError: 14767:0:(lprocfs_status_server.c:647:lprocfs_alloc_obd_stats()) LBUG
[75421.178166] Pid: 14767, comm: lctl
[75421.179941] 
[75421.179941] Call Trace:
[75421.207941] [&amp;lt;ffffffffc06ae7ae&amp;gt;] libcfs_call_trace+0x4e/0x60 [libcfs]
[75421.210086] [&amp;lt;ffffffffc06ae83c&amp;gt;] lbug_with_loc+0x4c/0xb0 [libcfs]
[75421.212244] [&amp;lt;ffffffffc08c867f&amp;gt;] lprocfs_alloc_obd_stats+0x18f/0x200 [obdclass]
[75421.214387] [&amp;lt;ffffffffc088b920&amp;gt;] ? lprocfs_obd_setup+0x130/0x320 [obdclass]
[75421.237295] [&amp;lt;ffffffffc10a8ced&amp;gt;] echo_setup+0x1bd/0x2c0 [obdecho]
[75421.273090] [&amp;lt;ffffffffc0ab6810&amp;gt;] ? ldlm_completion_ast+0x0/0x920 [ptlrpc]
[75421.275072] [&amp;lt;ffffffffc089bfda&amp;gt;] obd_setup+0x18a/0x2b0 [obdclass]
[75421.276950] [&amp;lt;ffffffffc06c0a66&amp;gt;] ? cfs_hash_create+0x366/0xa20 [libcfs]
[75421.278899] [&amp;lt;ffffffffc089c3a8&amp;gt;] class_setup+0x2a8/0x840 [obdclass]
[75421.280725] [&amp;lt;ffffffffc08a081c&amp;gt;] class_process_config+0x1b5c/0x2810 [obdclass]
[75421.282654] [&amp;lt;ffffffffc0891219&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
[75421.284527] [&amp;lt;ffffffffc087a71a&amp;gt;] class_handle_ioctl+0x56a/0x1de0 [obdclass]
[75421.286438] [&amp;lt;ffffffff8122239b&amp;gt;] ? destroy_inode+0x3b/0x60
[75421.288202] [&amp;lt;ffffffff812b72be&amp;gt;] ? security_capable+0x1e/0x20
[75421.290020] [&amp;lt;ffffffffc08607f2&amp;gt;] obd_class_ioctl+0xd2/0x170 [obdclass]
[75421.291824] [&amp;lt;ffffffff81219e90&amp;gt;] do_vfs_ioctl+0x350/0x560
[75421.293554] [&amp;lt;ffffffff812275e4&amp;gt;] ? mntput+0x24/0x40
[75421.295289] [&amp;lt;ffffffff812079b6&amp;gt;] ? __fput+0x186/0x260
[75421.296973] [&amp;lt;ffffffff8121a141&amp;gt;] SyS_ioctl+0xa1/0xc0
[75421.298680] [&amp;lt;ffffffff816c0655&amp;gt;] ? system_call_after_swapgs+0xa2/0x146
[75421.300454] [&amp;lt;ffffffff816c0715&amp;gt;] system_call_fastpath+0x1c/0x21
[75421.302229] [&amp;lt;ffffffff816c0661&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[75421.304033] 
[75421.305435] Kernel panic - not syncing: LBUG
[75421.306428] CPU: 1 PID: 14767 Comm: lctl Tainted: G OE ------------ 3.10.0-693.21.1.el7_lustre.x86_64 #1
[75421.306428] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
[75421.306428] Call Trace:
[75421.306428] [&amp;lt;ffffffff816ae7c8&amp;gt;] dump_stack+0x19/0x1b
[75421.306428] [&amp;lt;ffffffff816a8634&amp;gt;] panic+0xe8/0x21f
[75421.306428] [&amp;lt;ffffffffc06ae854&amp;gt;] lbug_with_loc+0x64/0xb0 [libcfs]
[75421.306428] [&amp;lt;ffffffffc08c867f&amp;gt;] lprocfs_alloc_obd_stats+0x18f/0x200 [obdclass]
[75421.306428] [&amp;lt;ffffffffc088b920&amp;gt;] ? lprocfs_obd_setup+0x130/0x320 [obdclass]
[75421.306428] [&amp;lt;ffffffffc10a8ced&amp;gt;] echo_setup+0x1bd/0x2c0 [obdecho]
[75421.306428] [&amp;lt;ffffffffc0ab6810&amp;gt;] ? ldlm_expired_completion_wait+0x220/0x220 [ptlrpc]
[75421.306428] [&amp;lt;ffffffffc089bfda&amp;gt;] obd_setup+0x18a/0x2b0 [obdclass]
[75421.306428] [&amp;lt;ffffffffc06c0a66&amp;gt;] ? cfs_hash_create+0x366/0xa20 [libcfs]
[75421.306428] [&amp;lt;ffffffffc089c3a8&amp;gt;] class_setup+0x2a8/0x840 [obdclass]
[75421.306428] [&amp;lt;ffffffffc08a081c&amp;gt;] class_process_config+0x1b5c/0x2810 [obdclass]
[75421.306428] [&amp;lt;ffffffffc0891219&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
[75421.306428] [&amp;lt;ffffffffc087a71a&amp;gt;] class_handle_ioctl+0x56a/0x1de0 [obdclass]
[75421.306428] [&amp;lt;ffffffff8122239b&amp;gt;] ? destroy_inode+0x3b/0x60
[75421.306428] [&amp;lt;ffffffff812b72be&amp;gt;] ? security_capable+0x1e/0x20
[75421.306428] [&amp;lt;ffffffffc08607f2&amp;gt;] obd_class_ioctl+0xd2/0x170 [obdclass]
[75421.306428] [&amp;lt;ffffffff81219e90&amp;gt;] do_vfs_ioctl+0x350/0x560
[75421.306428] [&amp;lt;ffffffff812275e4&amp;gt;] ? mntput+0x24/0x40
[75421.306428] [&amp;lt;ffffffff812079b6&amp;gt;] ? __fput+0x186/0x260
[75421.306428] [&amp;lt;ffffffff8121a141&amp;gt;] SyS_ioctl+0xa1/0xc0
[75421.306428] [&amp;lt;ffffffff816c0655&amp;gt;] ? system_call_after_swapgs+0xa2/0x146
[75421.306428] [&amp;lt;ffffffff816c0715&amp;gt;] system_call_fastpath+0x1c/0x21
[75421.306428] [&amp;lt;ffffffff816c0661&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 0.000000] Initializing cgroup subsys cpuset

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="223785" author="tappro" created="Thu, 15 Mar 2018 19:07:51 +0000"  >&lt;p&gt;This is &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8066&quot; title=&quot;Move lustre procfs handling to sysfs and debugfs.&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8066&quot;&gt;LU-8066&lt;/a&gt;, commit 0100ab268c and this particular change:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;diff --git a/lustre/obdecho/echo_client.c b/lustre/obdecho/echo_client.c
index 081a339..6e25a02 100644
--- a/lustre/obdecho/echo_client.c
+++ b/lustre/obdecho/echo_client.c
@@ -3071,7 +3071,7 @@ &lt;span class=&quot;code-keyword&quot;&gt;static&lt;/span&gt; &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; __init obdecho_init(void)
         &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc != 0)
                 &lt;span class=&quot;code-keyword&quot;&gt;goto&lt;/span&gt; failed_0;
 
-       rc = class_register_type(&amp;amp;echo_obd_ops, NULL, &lt;span class=&quot;code-keyword&quot;&gt;true&lt;/span&gt;, NULL,
+       rc = class_register_type(&amp;amp;echo_obd_ops, NULL, &lt;span class=&quot;code-keyword&quot;&gt;false&lt;/span&gt;, NULL,
                                 LUSTRE_ECHO_NAME, NULL);
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (rc != 0)
                &lt;span class=&quot;code-keyword&quot;&gt;goto&lt;/span&gt; failed_1;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;that change causes empty lprocfs root for echo server device and later it asserts on it in lprocfs_alloc_obd_stats() as I can see.&lt;/p&gt;</comment>
                            <comment id="223791" author="gerrit" created="Thu, 15 Mar 2018 19:38:40 +0000"  >&lt;p&gt;Mike Pershin (mike.pershin@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31664&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31664&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10804&quot; title=&quot;obdfilter-survey test_3a: lprocfs_alloc_obd_stats()) ASSERTION( obd-&amp;gt;obd_proc_entry != ((void *)0) ) failed; LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10804&quot;&gt;&lt;del&gt;LU-10804&lt;/del&gt;&lt;/a&gt; echo: allow echo server to setup procfs&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8685e8db0b55651fab21170ed75bc9df454c0e58&lt;/p&gt;</comment>
                            <comment id="223903" author="gerrit" created="Sat, 17 Mar 2018 05:13:38 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/31664/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31664/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10804&quot; title=&quot;obdfilter-survey test_3a: lprocfs_alloc_obd_stats()) ASSERTION( obd-&amp;gt;obd_proc_entry != ((void *)0) ) failed; LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10804&quot;&gt;&lt;del&gt;LU-10804&lt;/del&gt;&lt;/a&gt; echo: allow echo server to setup procfs&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: af94e4b7afd3fa724d161a5127dc9a89d898e73c&lt;/p&gt;</comment>
                            <comment id="223908" author="pjones" created="Sat, 17 Mar 2018 13:20:02 +0000"  >&lt;p&gt;Landed for 2.11&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="36381">LU-8066</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzu9z:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>