<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:35:08 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3580] Panic in ptlrpc when rerunning lustre-rsync-test/8 without remount</title>
                <link>https://jira.whamcloud.com/browse/LU-3580</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;I was running lustre-rsync-test test_8 repeatedly without umount/remount to reproduce &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3573&quot; title=&quot;lustre-rsync-test test_8: @@@@@@ FAIL: Failure in replication; differences found. &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3573&quot;&gt;&lt;del&gt;LU-3573&lt;/del&gt;&lt;/a&gt;, when my MDS hit an LBUG:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 48946:0:(sec_null.c:318:null_alloc_rs()) ASSERTION( rs-&amp;gt;rs_size &amp;gt;= rs_size ) failed: 
LustreError: 48946:0:(sec_null.c:318:null_alloc_rs()) LBUG
Kernel panic - not syncing: LBUG
Pid: 48946, comm: mdt00_002 Tainted: P           ---------------    2.6.32-358.11.1.el6_lustre.g3b657b6.x86_64 #1
Call Trace:
 [&amp;lt;ffffffff8150d8f8&amp;gt;] ? panic+0xa7/0x16f
 [&amp;lt;ffffffffa0629eeb&amp;gt;] ? lbug_with_loc+0x9b/0xb0 [libcfs]
 [&amp;lt;ffffffffa0979632&amp;gt;] ? null_alloc_rs+0x272/0x390 [ptlrpc]
 [&amp;lt;ffffffffa0967dd9&amp;gt;] ? sptlrpc_svc_alloc_rs+0x1d9/0x2a0 [ptlrpc]
 [&amp;lt;ffffffffa093d533&amp;gt;] ? lustre_pack_reply_v2+0x93/0x280 [ptlrpc]
 [&amp;lt;ffffffffa093d7ce&amp;gt;] ? lustre_pack_reply_flags+0xae/0x1f0 [ptlrpc]
 [&amp;lt;ffffffffa093d921&amp;gt;] ? lustre_pack_reply+0x11/0x20 [ptlrpc]
 [&amp;lt;ffffffffa09654e3&amp;gt;] ? req_capsule_server_pack+0x53/0x100 [ptlrpc]
 [&amp;lt;ffffffffa0d37f1e&amp;gt;] ? mdt_get_info+0xae/0x19b0 [mdt]
 [&amp;lt;ffffffffa0d29fbd&amp;gt;] ? mdt_unpack_req_pack_rep+0x4d/0x4d0 [mdt]
 [&amp;lt;ffffffffa093e52c&amp;gt;] ? lustre_msg_get_version+0x8c/0x100 [ptlrpc]
 [&amp;lt;ffffffffa0d33cf7&amp;gt;] ? mdt_handle_common+0x647/0x16d0 [mdt]
 [&amp;lt;ffffffffa0d6d155&amp;gt;] ? mds_regular_handle+0x15/0x20 [mdt]
 [&amp;lt;ffffffffa094d978&amp;gt;] ? ptlrpc_server_handle_request+0x398/0xc60 [ptlrpc]
 [&amp;lt;ffffffffa062a54e&amp;gt;] ? cfs_timer_arm+0xe/0x10 [libcfs]
 [&amp;lt;ffffffffa063ba9f&amp;gt;] ? lc_watchdog_touch+0x6f/0x170 [libcfs]
 [&amp;lt;ffffffffa0944d99&amp;gt;] ? ptlrpc_wait_event+0xa9/0x290 [ptlrpc]
 [&amp;lt;ffffffff81063310&amp;gt;] ? default_wake_function+0x0/0x20
 [&amp;lt;ffffffffa094ecfd&amp;gt;] ? ptlrpc_main+0xabd/0x1700 [ptlrpc]
 [&amp;lt;ffffffffa094e240&amp;gt;] ? ptlrpc_main+0x0/0x1700 [ptlrpc]
 [&amp;lt;ffffffff81096936&amp;gt;] ? kthread+0x96/0xa0
 [&amp;lt;ffffffff8100c0ca&amp;gt;] ? child_rip+0xa/0x20
 [&amp;lt;ffffffff810968a0&amp;gt;] ? kthread+0x0/0xa0
 [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>1 OSS (2 osts), 1 MDS, 1 Client (all running lustre-master build 1546), MDS and OSS using ZFS</environment>
        <key id="19791">LU-3580</key>
            <summary>Panic in ptlrpc when rerunning lustre-rsync-test/8 without remount</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="10100">Low Priority</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="utopiabound">Nathaniel Clark</reporter>
                        <labels>
                            <label>zfs</label>
                    </labels>
                <created>Fri, 12 Jul 2013 15:55:36 +0000</created>
                <updated>Thu, 9 Jan 2020 07:06:26 +0000</updated>
                            <resolved>Thu, 9 Jan 2020 07:06:26 +0000</resolved>
                                    <version>Lustre 2.5.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="62298" author="green" created="Mon, 15 Jul 2013 15:44:41 +0000"  >&lt;p&gt;Shadow, I wonder if you have an opinion on this?&lt;/p&gt;

&lt;p&gt;Thee was a class of bugs in the past that you worked on where a missing OST led to some smaller allocations and then everything came down once we realized we had more OSTs in the system.&lt;/p&gt;

&lt;p&gt;Nathaniel, why is the OST down?&lt;/p&gt;</comment>
                            <comment id="62317" author="utopiabound" created="Mon, 15 Jul 2013 19:15:46 +0000"  >&lt;p&gt;Oleg, The OST wasn&apos;t down.  lustre-rsync-test/8 builds a directory tree with createmany and some nested Gfor loops for directories, and then does a lustre_rsync to a local directory (on the client).  I had been running that in a loop to try to recreate the bug I was looking for when the MDT went down, it&apos;s pretty reproducable, you just have to keep the filesystem mounted between runs.  I can reproduce if you want cleaner logs.&lt;/p&gt;</comment>
                            <comment id="62349" author="shadow" created="Tue, 16 Jul 2013 06:11:01 +0000"  >&lt;p&gt;Oleg,&lt;/p&gt;

&lt;p&gt;it&apos;s looks new bug in sptlrpc code, and don&apos;t related to the MDC&amp;lt;&amp;gt;MDT exchange.&lt;br/&gt;
OSC have an own pool for requests - where it&apos;s preallocted with messages - but looks some reply`s need more size then set in preallocate time or it&apos;s related to the early reply.&lt;br/&gt;
as i see lustre_pack_reply may called more then one time - first for early reply second for the real reply - in that case we will have different request format and size for &lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;rc = sptlrpc_svc_alloc_rs(req, msg_len);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;did we have a crashdump ?&lt;/p&gt;</comment>
                            <comment id="260850" author="adilger" created="Thu, 9 Jan 2020 07:06:26 +0000"  >&lt;p&gt;Close old bug&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="19751">LU-3573</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="13154" name="serial-manager.txt" size="294784" author="utopiabound" created="Fri, 12 Jul 2013 15:55:36 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvv9b:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9068</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>