<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:42:41 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4432] recovery-mds-scale test_failover_ost: tar: Cannot write: Cannot allocate memory</title>
                <link>https://jira.whamcloud.com/browse/LU-4432</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;While running recovery-mds-scale test failover_ost, tar operation on one of the client nodes failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;tar: etc/libreport/plugins/rhtsupport.conf: Cannot write: Cannot allocate memory
tar: Exiting with failure status due to previous errors
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/e8a2857a-7529-11e3-936d-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/e8a2857a-7529-11e3-936d-52540035b04c&lt;/a&gt;&lt;/p&gt;</description>
                <environment>&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_5/5/&quot;&gt;http://build.whamcloud.com/job/lustre-b2_5/5/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.4/x86_64&lt;br/&gt;
FSTYPE=zfs&lt;br/&gt;
TEST_GROUP=failover&lt;br/&gt;
</environment>
        <key id="22628">LU-4432</key>
            <summary>recovery-mds-scale test_failover_ost: tar: Cannot write: Cannot allocate memory</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="3">Duplicate</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="yujian">Jian Yu</reporter>
                        <labels>
                    </labels>
                <created>Sat, 4 Jan 2014 12:33:49 +0000</created>
                <updated>Thu, 13 Feb 2014 22:09:18 +0000</updated>
                            <resolved>Thu, 13 Feb 2014 22:09:18 +0000</resolved>
                                    <version>Lustre 2.5.1</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="74651" author="yujian" created="Thu, 9 Jan 2014 15:57:28 +0000"  >&lt;p&gt;Lustre client build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_4/70/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_4/70/&lt;/a&gt; (2.4.2)&lt;br/&gt;
Lustre server build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_5/8/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_5/8/&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;performance-sanity test 8 failed as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;rank 0: stat(f173313) error: Cannot allocate memory
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Console log on OSS:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;08:33:51:Lustre: DEBUG MARKER: ===== mdsrate-stat-large.sh
08:33:51:ldlm_cn00_006: page allocation failure. order:1, mode:0x40
08:33:51:Pid: 640, comm: ldlm_cn00_006 Not tainted 2.6.32-358.18.1.el6_lustre.g6093be6.x86_64 #1
08:33:52:Call Trace:
08:33:52: [&amp;lt;ffffffff8112c257&amp;gt;] ? __alloc_pages_nodemask+0x757/0x8d0
08:33:52: [&amp;lt;ffffffff8127f72c&amp;gt;] ? put_dec+0x10c/0x110
08:33:53: [&amp;lt;ffffffff81166d92&amp;gt;] ? kmem_getpages+0x62/0x170
08:33:53: [&amp;lt;ffffffff811679aa&amp;gt;] ? fallback_alloc+0x1ba/0x270
08:33:53: [&amp;lt;ffffffff811673ff&amp;gt;] ? cache_grow+0x2cf/0x320
08:33:54: [&amp;lt;ffffffff81167729&amp;gt;] ? ____cache_alloc_node+0x99/0x160
08:33:54: [&amp;lt;ffffffff811688f0&amp;gt;] ? kmem_cache_alloc_node_trace+0x90/0x200
08:33:54: [&amp;lt;ffffffff81168b0d&amp;gt;] ? __kmalloc_node+0x4d/0x60
08:33:54: [&amp;lt;ffffffffa0457651&amp;gt;] ? cfs_cpt_malloc+0x31/0x60 [libcfs]
08:33:54: [&amp;lt;ffffffffa0a42b48&amp;gt;] ? ptlrpc_alloc_rqbd+0x1e8/0x670 [ptlrpc]
08:33:54: [&amp;lt;ffffffffa0a430b5&amp;gt;] ? ptlrpc_grow_req_bufs+0xe5/0x2a0 [ptlrpc]
08:33:54: [&amp;lt;ffffffff81063410&amp;gt;] ? default_wake_function+0x0/0x20
08:33:55: [&amp;lt;ffffffffa0a474bd&amp;gt;] ? ptlrpc_main+0xb5d/0x1740 [ptlrpc]
08:33:55: [&amp;lt;ffffffffa0a46960&amp;gt;] ? ptlrpc_main+0x0/0x1740 [ptlrpc]
08:33:55: [&amp;lt;ffffffff81096a36&amp;gt;] ? kthread+0x96/0xa0
08:33:55: [&amp;lt;ffffffff8100c0ca&amp;gt;] ? child_rip+0xa/0x20
08:33:56: [&amp;lt;ffffffff810969a0&amp;gt;] ? kthread+0x0/0xa0
08:33:56: [&amp;lt;ffffffff8100c0c0&amp;gt;] ? child_rip+0x0/0x20
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/51d6d872-78ed-11e3-a27b-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/51d6d872-78ed-11e3-a27b-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="74787" author="yujian" created="Sun, 12 Jan 2014 14:21:37 +0000"  >&lt;p&gt;More instance on Lustre b2_5 branch:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/2df52e18-7ab4-11e3-8b19-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/2df52e18-7ab4-11e3-8b19-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="74994" author="m.magrys" created="Wed, 15 Jan 2014 09:56:13 +0000"  >&lt;p&gt;I think I did report a similar issue some time ago: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4034&quot; title=&quot;Cannot allocate memory on clients with 2.4.X&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4034&quot;&gt;&lt;del&gt;LU-4034&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="75167" author="yujian" created="Fri, 17 Jan 2014 09:23:44 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_5/13/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_5/13/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.4/x86_64&lt;br/&gt;
TEST_GROUP=failover&lt;/p&gt;

&lt;p&gt;The similar issue occurred on client while running recovery-double-scale test:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;10:54:02:Lustre: DEBUG MARKER: mount -t lustre -o user_xattr,flock client-27vm3:client-27vm7:/lustre /mnt/lustre
10:54:02:mount.lustre: page allocation failure. order:2, mode:0x40
10:54:02:Pid: 10206, comm: mount.lustre Not tainted 2.6.32-358.18.1.el6.x86_64 #1
10:54:02:Call Trace:
10:54:02: [&amp;lt;ffffffff8112c257&amp;gt;] ? __alloc_pages_nodemask+0x757/0x8d0
10:54:03: [&amp;lt;ffffffff81166d92&amp;gt;] ? kmem_getpages+0x62/0x170
10:54:03: [&amp;lt;ffffffff811679aa&amp;gt;] ? fallback_alloc+0x1ba/0x270
10:54:03: [&amp;lt;ffffffff811673ff&amp;gt;] ? cache_grow+0x2cf/0x320
10:54:03: [&amp;lt;ffffffff81167729&amp;gt;] ? ____cache_alloc_node+0x99/0x160
10:54:03: [&amp;lt;ffffffffa0706bc6&amp;gt;] ? null_alloc_repbuf+0x66/0x3b0 [ptlrpc]
10:54:03: [&amp;lt;ffffffff811684f9&amp;gt;] ? __kmalloc+0x189/0x220
10:54:03: [&amp;lt;ffffffffa0706bc6&amp;gt;] ? null_alloc_repbuf+0x66/0x3b0 [ptlrpc]
10:54:03: [&amp;lt;ffffffffa06f4f25&amp;gt;] ? sptlrpc_cli_alloc_repbuf+0x175/0x220 [ptlrpc]
10:54:03: [&amp;lt;ffffffffa06c88ec&amp;gt;] ? ptl_send_rpc+0x93c/0xc40 [ptlrpc]
10:54:03: [&amp;lt;ffffffff81281734&amp;gt;] ? snprintf+0x34/0x40
10:54:04: [&amp;lt;ffffffffa03e77b1&amp;gt;] ? libcfs_debug_msg+0x41/0x50 [libcfs]
10:54:04: [&amp;lt;ffffffffa06bd894&amp;gt;] ? ptlrpc_send_new_req+0x454/0x790 [ptlrpc]
10:54:04: [&amp;lt;ffffffffa06c2e7e&amp;gt;] ? ptlrpc_set_wait+0x5be/0x860 [ptlrpc]
10:54:04: [&amp;lt;ffffffffa053d8ec&amp;gt;] ? lustre_get_jobid+0xcc/0x380 [obdclass]
10:54:04: [&amp;lt;ffffffffa06cc316&amp;gt;] ? lustre_msg_set_jobid+0xb6/0x140 [ptlrpc]
10:54:04: [&amp;lt;ffffffffa06c31a7&amp;gt;] ? ptlrpc_queue_wait+0x87/0x220 [ptlrpc]
10:54:04: [&amp;lt;ffffffffa06e12d8&amp;gt;] ? llog_client_read_header+0xd8/0x5e0 [ptlrpc]
10:54:04: [&amp;lt;ffffffffa0533d2c&amp;gt;] ? llog_init_handle+0xcc/0x960 [obdclass]
10:54:04: [&amp;lt;ffffffffa0565683&amp;gt;] ? class_config_parse_llog+0x1a3/0x330 [obdclass]
10:54:04: [&amp;lt;ffffffffa09f0302&amp;gt;] ? mgc_process_log+0xd22/0x18e0 [mgc]
10:54:04: [&amp;lt;ffffffffa09f1630&amp;gt;] ? config_recover_log_add+0x150/0x280 [mgc]
10:54:04: [&amp;lt;ffffffffa09ea360&amp;gt;] ? mgc_blocking_ast+0x0/0x810 [mgc]
10:54:04: [&amp;lt;ffffffffa06aa530&amp;gt;] ? ldlm_completion_ast+0x0/0x920 [ptlrpc]
10:54:05: [&amp;lt;ffffffffa09f24a5&amp;gt;] ? mgc_process_config+0x645/0x11d0 [mgc]
10:54:05: [&amp;lt;ffffffffa0574626&amp;gt;] ? lustre_process_log+0x256/0xa60 [obdclass]
10:54:05: [&amp;lt;ffffffff8128ca66&amp;gt;] ? __percpu_counter_init+0x56/0x70
10:54:05: [&amp;lt;ffffffffa0a732d8&amp;gt;] ? ll_fill_super+0xaa8/0x14d0 [lustre]
10:54:05: [&amp;lt;ffffffffa057993d&amp;gt;] ? lustre_fill_super+0x34d/0x510 [obdclass]
10:54:05: [&amp;lt;ffffffffa05795f0&amp;gt;] ? lustre_fill_super+0x0/0x510 [obdclass]
10:54:05: [&amp;lt;ffffffff811845cf&amp;gt;] ? get_sb_nodev+0x5f/0xa0
10:54:05: [&amp;lt;ffffffffa0571545&amp;gt;] ? lustre_get_sb+0x25/0x30 [obdclass]
10:54:05: [&amp;lt;ffffffff81183beb&amp;gt;] ? vfs_kern_mount+0x7b/0x1b0
10:54:05: [&amp;lt;ffffffff81183d92&amp;gt;] ? do_kern_mount+0x52/0x130
10:54:05: [&amp;lt;ffffffff811a3ef2&amp;gt;] ? do_mount+0x2d2/0x8d0
10:54:05: [&amp;lt;ffffffff811a4580&amp;gt;] ? sys_mount+0x90/0xe0
10:54:05: [&amp;lt;ffffffff8100b072&amp;gt;] ? system_call_fastpath+0x16/0x1b
10:54:05:Mem-Info:
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Maloo report: &lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/d75a7160-7f3d-11e3-94f3-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/d75a7160-7f3d-11e3-94f3-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="77031" author="adilger" created="Thu, 13 Feb 2014 22:09:05 +0000"  >&lt;p&gt;Shows mode:0x40 == __GFP_IO, but missing __GFP_WAIT from &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4357&quot; title=&quot;page allocation failure. mode:0x40 caused by missing __GFP_WAIT flag&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4357&quot;&gt;&lt;del&gt;LU-4357&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                        <issuelink>
            <issuekey id="22373">LU-4357</issuekey>
        </issuelink>
                            </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwc6n:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>12176</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>