<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:10:31 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-798] Test failure on test suite performance-sanity,subtest test_5</title>
                <link>https://jira.whamcloud.com/browse/LU-798</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6924c914-00ed-11e1-bb4f-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6924c914-00ed-11e1-bb4f-52540025f9af&lt;/a&gt;.&lt;br/&gt;
I am not sure if this is duplicate with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-758&quot; title=&quot;Test failure on test suite performance-sanity, subtest test_8&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-758&quot;&gt;&lt;del&gt;LU-758&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</description>
                <environment></environment>
        <key id="12272">LU-798</key>
            <summary>Test failure on test suite performance-sanity,subtest test_5</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Thu, 27 Oct 2011 19:47:59 +0000</created>
                <updated>Wed, 13 Jul 2016 20:57:27 +0000</updated>
                            <resolved>Wed, 13 Jul 2016 20:57:27 +0000</resolved>
                                    <version>Lustre 2.1.2</version>
                    <version>Lustre 2.1.3</version>
                    <version>Lustre 2.1.4</version>
                    <version>Lustre 1.8.8</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>8</watches>
                                                                            <comments>
                            <comment id="22108" author="pjones" created="Thu, 27 Oct 2011 22:04:10 +0000"  >&lt;p&gt;Bobijam will look into this one&lt;/p&gt;</comment>
                            <comment id="22110" author="bobijam" created="Thu, 27 Oct 2011 22:17:42 +0000"  >&lt;p&gt;from the dmesg log, I don&apos;t see any error happens during the test except test_5 took almost an hour, I suspect its 3600 seconds test limit interrupt the test.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedHeader panelHeader&quot; style=&quot;border-bottom-width: 1px;&quot;&gt;&lt;b&gt;client dmesg&lt;/b&gt;&lt;/div&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: DEBUG MARKER: == performance-sanity test 5: lookup rate 10M file dir =============================================== 09:45:59 (1319733959)
Lustre: DEBUG MARKER: ===== mdsrate-lookup-1dir.sh ======
Lustre: DEBUG MARKER: Using TIMEOUT=20
Lustre: DEBUG MARKER: ===== mdsrate-lookup-1dir.sh Test preparation: creating 1000000 files.
Lustre: DEBUG MARKER: ===== mdsrate-lookup-1dir.sh
Lustre: DEBUG MARKER: ===== mdsrate-lookup-1dir.sh
Lustre: DEBUG MARKER: == mdsrate-lookup-1dir mdsrate-lookup-1dir.sh: test complete, cleaning up ============================ 10:33:31 (1319736811)
SysRq : Show State

                                                       sibling
  task                 PC          pid father child younger older
init          S ffff81007e3567e0     0     1      0     2               (NOTLB)
 ffff81007ff95a28 0000000000000082 00000000000280d2 ffff810000017c10
 ffff81007ff857a0 000000000000000a ffff81007ff857a0 ffff81007e3567e0
 00000aabaa90b8db 0000000000006e20 ffff81007ff85988 0000000088031cb7
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="22111" author="bobijam" created="Fri, 28 Oct 2011 03:26:28 +0000"  >&lt;p&gt;I checked maloo for some passed performance-sanity test_5 to get a rough understanding how much time was needed for successfull test.&lt;/p&gt;

&lt;p&gt;This test case &lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/9e94a664-f710-11e0-a451-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/9e94a664-f710-11e0-a451-52540025f9af&lt;/a&gt; takes 2638s, while it only test with 433075 files (by checking test script, I think it was set by NUM_FILES environment variable)&lt;/p&gt;

&lt;p&gt;This test case &lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/116bdd9a-e467-11e0-9909-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/116bdd9a-e467-11e0-9909-52540025f9af&lt;/a&gt; takes 2493s with 452393 files.&lt;/p&gt;

&lt;p&gt;1M files test cases all timed out on cleaning up phase. &lt;/p&gt;</comment>
                            <comment id="22262" author="yujian" created="Tue, 1 Nov 2011 11:47:03 +0000"  >&lt;p&gt;performance-sanity test 5 passed on v1_8_7_WC1_RC1 in 2692s with 438805 files:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/eb31d5ac-f4cf-11e0-908b-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/eb31d5ac-f4cf-11e0-908b-52540025f9af&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;UUID                      Inodes       IUsed       IFree IUse% Mounted on
lustre-MDT0000_UUID      5000040          52     4999988   0% /mnt/lustre[MDT:0]
lustre-OST0000_UUID       445440        2202      443238   0% /mnt/lustre[OST:0]
lustre-OST0001_UUID       445440          88      445352   0% /mnt/lustre[OST:1]
lustre-OST0002_UUID       445440          89      445351   0% /mnt/lustre[OST:2]
lustre-OST0003_UUID       445440          89      445351   0% /mnt/lustre[OST:3]
lustre-OST0004_UUID       445440          89      445351   0% /mnt/lustre[OST:4]
lustre-OST0005_UUID       445440          89      445351   0% /mnt/lustre[OST:5]
lustre-OST0006_UUID       445440          89      445351   0% /mnt/lustre[OST:6]

filesystem summary:      5000040          52     4999988   0% /mnt/lustre

===== mdsrate-lookup-1dir.sh Test preparation: creating 438805 files.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The number of files was calculated as follows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mdsrate-lookup-1dir.sh:

NUM_FILES=${NUM_FILES:-1000000}

IFree=$(mdsrate_inodes_available)
if [ $IFree -lt $NUM_FILES ]; then
    NUM_FILES=$IFree
fi

test-framework.sh:

inodes_available () {
    local IFree=$($LFS df -i $MOUNT | grep ^$FSNAME | awk &apos;{print $4}&apos; | sort -un | head -1) || return 1
    echo $IFree
}

mdsrate_inodes_available () {
    local min_inodes=$(inodes_available)
    echo $((min_inodes * 99 / 100))
}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So, it depends on the minimum number of free inodes on MDT and OST. For RC1, the OSTSIZE was about 30G and the min IFree was about 443238, however, for RC2, the OSTSIZE on the vm cluster was about 150G and the min IFree was about 2246430:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;UUID                      Inodes       IUsed       IFree IUse% Mounted on
lustre-MDT0000_UUID      5000040          52     4999988   0% /mnt/lustre[MDT:0]
lustre-OST0000_UUID      2257920       11490     2246430   1% /mnt/lustre[OST:0]
lustre-OST0001_UUID      2257920          89     2257831   0% /mnt/lustre[OST:1]
lustre-OST0002_UUID      2257920          89     2257831   0% /mnt/lustre[OST:2]
lustre-OST0003_UUID      2257920          88     2257832   0% /mnt/lustre[OST:3]
lustre-OST0004_UUID      2257920          89     2257831   0% /mnt/lustre[OST:4]
lustre-OST0005_UUID      2257920          89     2257831   0% /mnt/lustre[OST:5]
lustre-OST0006_UUID      2257920          89     2257831   0% /mnt/lustre[OST:6]

filesystem summary:      5000040          52     4999988   0% /mnt/lustre

===== mdsrate-lookup-1dir.sh Test preparation: creating 1000000 files.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So, this is the similar issue as &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-797&quot; title=&quot;Test failure on test suite ost-pools, subtest test_14, test_18, test_23&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-797&quot;&gt;&lt;del&gt;LU-797&lt;/del&gt;&lt;/a&gt;, which is caused by large OSTSIZE and limited timeout value. &lt;/p&gt;

&lt;p&gt;Chris, could you please take a look at this issue and make some adjustment on the OSTSIZE or timeout value? Thanks!&lt;/p&gt;</comment>
                            <comment id="22389" author="pjones" created="Thu, 3 Nov 2011 08:58:59 +0000"  >&lt;p&gt;&amp;gt; Chris, could you please take a look at this issue and make some adjustment on the OSTSIZE or timeout value? Thanks!&lt;/p&gt;

&lt;p&gt;Bobijam, it would be better to make such requests via a TT JIRA ticket.&lt;/p&gt;</comment>
                            <comment id="22457" author="sarah" created="Thu, 3 Nov 2011 18:10:34 +0000"  >&lt;p&gt;the remaining tests of performance-sanity are pass: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7d4cb962-0668-11e1-9433-52540025f9af&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7d4cb962-0668-11e1-9433-52540025f9af&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="38624" author="yujian" created="Fri, 11 May 2012 09:22:41 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b1_8/194/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b1_8/194/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL5.8/x86_64 (kernel 2.6.18-308.4.1.el5)&lt;br/&gt;
Network: TCP (1GigE)&lt;br/&gt;
ENABLE_QUOTA=yes&lt;/p&gt;

&lt;p&gt;The same issue occurred: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/b8d82eb2-9b50-11e1-a0a0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/b8d82eb2-9b50-11e1-a0a0-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="39708" author="yujian" created="Thu, 31 May 2012 04:49:39 +0000"  >&lt;p&gt;Lustre Tag: v2_1_2_RC2&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/86/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/86/&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.2/x86_64(server), RHEL6.2/i686(client)&lt;br/&gt;
Network: TCP (1GigE)&lt;br/&gt;
ENABLE_QUOTA=yes&lt;/p&gt;

&lt;p&gt;performance-sanity test 6 timed out: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/9b7b720a-aa34-11e1-971d-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/9b7b720a-aa34-11e1-971d-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="43544" author="yujian" created="Tue, 21 Aug 2012 06:58:01 +0000"  >&lt;p&gt;More instances occurred while testing Lustre 2.1.3 RC2:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/b5722254-eb33-11e1-ba73-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/b5722254-eb33-11e1-ba73-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/9c1e8436-eb3e-11e1-ba73-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/9c1e8436-eb3e-11e1-ba73-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="48981" author="yujian" created="Mon, 10 Dec 2012 10:24:16 +0000"  >&lt;p&gt;Lustre Branch: b2_1&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_1/148&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_1/148&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.3/x86_64 (kernel version: 2.6.32-279.14.1.el6)&lt;br/&gt;
Network: TCP (1GigE)&lt;/p&gt;

&lt;p&gt;The same issue occurred:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/eba241ba-41dc-11e2-adcf-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/eba241ba-41dc-11e2-adcf-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="49232" author="bobijam" created="Thu, 13 Dec 2012 23:18:29 +0000"  >&lt;p&gt;b2_1 patch tracking at &lt;a href=&quot;http://review.whamcloud.com/4830&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4830&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedHeader panelHeader&quot; style=&quot;border-bottom-width: 1px;&quot;&gt;&lt;b&gt;commit message&lt;/b&gt;&lt;/div&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;    LU-798 test: reduce performance test file number
    
    The performance test takes too much time deleting all the test files
    which timedout the test suite.
    
    Test-Parameters: envdefinitions=SLOW=yes,ENABLE_QUOTA=yes testlist=performance-sanity

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="49349" author="yujian" created="Mon, 17 Dec 2012 22:34:52 +0000"  >&lt;p&gt;Lustre Tag: v2_1_4_RC1&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/07afddb8-4831-11e2-8cdc-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/07afddb8-4831-11e2-8cdc-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/af0b031e-482a-11e2-a406-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/af0b031e-482a-11e2-a406-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="49675" author="yujian" created="Wed, 26 Dec 2012 06:00:44 +0000"  >&lt;p&gt;From the historical Maloo reports, I found that all of the passed performance-sanity test runs were performed with about 450000 files, while all of the timeouted performance-sanity test runs were performed with 1000000 files due to large OSTSIZE.&lt;/p&gt;</comment>
                            <comment id="49676" author="yujian" created="Wed, 26 Dec 2012 06:01:37 +0000"  >&lt;p&gt;Hello Chris,&lt;/p&gt;

&lt;p&gt;Oleg suggested to adjust the length of time that is allocated to the performance-sanity test so that it has time to complete. Could you please increase the autotest timeout value for the performance-sanity test? Thanks.&lt;/p&gt;</comment>
                            <comment id="49829" author="chris" created="Wed, 2 Jan 2013 10:21:25 +0000"  >&lt;p&gt;performance-sanity is set to 2 hours for test_4, test_5 and test_7 so I fear the code might be broken in autotest. I need to figure out how to investigate.&lt;/p&gt;</comment>
                            <comment id="49870" author="chris" created="Thu, 3 Jan 2013 10:00:25 +0000"  >&lt;p&gt;OK.&lt;/p&gt;

&lt;p&gt;What confused me here is that this bug dates back to 2011 and so many of the examples have been fixed a long time ago.&lt;/p&gt;

&lt;p&gt;I&apos;ve increased the timeout for test_7 to 7200 which makes it like 5,6 and 8.&lt;/p&gt;</comment>
                            <comment id="49871" author="chris" created="Thu, 3 Jan 2013 10:09:26 +0000"  >&lt;p&gt;Looking at the recent examples this issue should now be fixed.&lt;/p&gt;</comment>
                            <comment id="50392" author="yujian" created="Sun, 13 Jan 2013 06:29:57 +0000"  >&lt;p&gt;Hi Chris,&lt;/p&gt;

&lt;p&gt;The performance-sanity test timed out on the latest b1_8 build #241. This time it&apos;s sub-test 8:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/79fc558c-5ce9-11e2-afaf-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/79fc558c-5ce9-11e2-afaf-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I did not find any Lustre error messages in the logs. So, it&apos;s still due to low performance and needs more time.&lt;/p&gt;</comment>
                            <comment id="158723" author="adilger" created="Wed, 13 Jul 2016 20:57:27 +0000"  >&lt;p&gt;Closing this old bug.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvdi7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5794</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>