<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:41:50 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4340] conf-sanity test_69: error: File too large</title>
                <link>https://jira.whamcloud.com/browse/LU-4340</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/77c48f3e-59f3-11e3-98fc-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/77c48f3e-59f3-11e3-98fc-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_69 failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;create file after reformat&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;test log shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: client-16vm3 lctl get_param -n osc.lustre-OST0000-osc-MDT0000.prealloc_last_id
 - created 10000 (time 1385749407.25 total 47.32 last 47.32)
 - created 20000 (time 1385749454.94 total 95.01 last 47.69)
 - created 30000 (time 1385749503.05 total 143.12 last 48.11)
 - created 40000 (time 1385749551.55 total 191.62 last 48.50)
open(/mnt/lustre/d0.conf-sanity/d69/f.conf-sanity.69-49787) error: File too large
total: 49787 creates in 240.39 seconds: 207.11 creates/second
stop ost1 service on client-16vm4
CMD: client-16vm4 grep -c /mnt/ost1&apos; &apos; /proc/mounts
Stopping /mnt/ost1 (opts:-f) on client-16vm4
CMD: client-16vm4 umount -d -f /mnt/ost1
CMD: client-16vm4 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: client-16vm4 grep -c /mnt/ost1&apos; &apos; /proc/mounts
CMD: client-16vm4 lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp; lctl dl | grep &apos; ST &apos;
CMD: client-16vm4 mkfs.lustre --mgsnode=client-16vm3@tcp --fsname=lustre --ost --index=0 --param=sys.timeout=20 --backfstype=ldiskfs --device-size=200000 --mkfsoptions=\&quot;-E lazy_itable_init\&quot; --reformat --replace /dev/lvm-Role_OSS/P1

   Permanent disk data:
Target:     lustre-OST0000
Index:      0
Lustre FS:  lustre
Mount type: ldiskfs
Flags:      0x42
              (OST update )
Persistent mount opts: errors=remount-ro
Parameters: mgsnode=10.10.4.122@tcp sys.timeout=20

device size = 2048MB
formatting backing filesystem ldiskfs on /dev/lvm-Role_OSS/P1
	target name  lustre-OST0000
	4k blocks     50000
	options        -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=4290772992,lazy_journal_init -F
mkfs_cmd = mke2fs -j -b 4096 -L lustre-OST0000  -I 256 -q -O extents,uninit_bg,dir_nlink,quota,huge_file,flex_bg -G 256 -E lazy_itable_init,resize=4290772992,lazy_journal_init -F /dev/lvm-Role_OSS/P1 50000
Writing CONFIGS/mountdata
start ost1 service on client-16vm4
CMD: client-16vm4 mkdir -p /mnt/ost1
CMD: client-16vm4 test -b /dev/lvm-Role_OSS/P1
Starting ost1:   /dev/lvm-Role_OSS/P1 /mnt/ost1
CMD: client-16vm4 mkdir -p /mnt/ost1; mount -t lustre   		                   /dev/lvm-Role_OSS/P1 /mnt/ost1
CMD: client-16vm4 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/bin:/usr/bin:/bin:/sbin:/usr/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh set_default_debug \&quot;-1\&quot; \&quot;all -lnet -lnd -pinger\&quot; 4 
CMD: client-16vm4 e2label /dev/lvm-Role_OSS/P1 2&amp;gt;/dev/null
Started lustre-OST0000
CMD: client-16vm3 /usr/sbin/lctl get_param -n version
CMD: client-16vm3 /usr/sbin/lctl get_param -n version
CMD: client-16vm3 lctl list_param osc.lustre-OST*-osc             &amp;gt; /dev/null 2&amp;gt;&amp;amp;1
CMD: client-16vm3 lctl get_param -n at_min
can&apos;t get osc.lustre-OST0000-osc-MDT0000.ost_server_uuid by list_param in 40 secs
Go with osc.lustre-OST0000-osc-MDT0000.ost_server_uuid directly
CMD: client-16vm3 PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/openmpi/bin:/usr/bin:/bin:/sbin:/usr/sbin::/sbin:/bin:/usr/sbin: NAME=autotest_config sh rpc.sh wait_import_state FULL osc.lustre-OST0000-osc-MDT0000.ost_server_uuid 40 
client-16vm3: osc.lustre-OST0000-osc-MDT0000.ost_server_uuid in FULL state after 0 sec
touch: cannot touch `/mnt/lustre/d0.conf-sanity/d69/f.conf-sanity.69-last&apos;: File too large
 conf-sanity test_69: @@@@@@ FAIL: create file after reformat 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>client and server: lustre-master build #1783 RHEL6.4 ldiskfs</environment>
        <key id="22303">LU-4340</key>
            <summary>conf-sanity test_69: error: File too large</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="jamesanunez">James Nunez</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>patch</label>
                    </labels>
                <created>Tue, 3 Dec 2013 18:22:24 +0000</created>
                <updated>Tue, 28 Mar 2017 19:01:36 +0000</updated>
                            <resolved>Fri, 28 Aug 2015 07:15:38 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                    <version>Lustre 2.4.2</version>
                    <version>Lustre 2.5.1</version>
                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>11</watches>
                                                                            <comments>
                            <comment id="73151" author="green" created="Mon, 9 Dec 2013 23:41:00 +0000"  >&lt;p&gt;This is probably like &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2113&quot; title=&quot;ENOSPC sometimes incorrectly reported as file too bigin lfs setstripe&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2113&quot;&gt;&lt;del&gt;LU-2113&lt;/del&gt;&lt;/a&gt; - when we are out of space, we sometimes get file too large in error instead.&lt;/p&gt;

&lt;p&gt;Is the test running out of space somehow?&lt;/p&gt;</comment>
                            <comment id="73304" author="sarah" created="Wed, 11 Dec 2013 19:09:01 +0000"  >&lt;p&gt;Found following error in the MDS dmesg, rc=-28 is no space:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 17127:0:(osp_precreate.c:481:osp_precreate_send()) lustre-OST0000-osc-MDT0000: can&apos;t precreate: rc = -28
LustreError: 17109:0:(lod_qos.c:946:lod_alloc_specific()) can&apos;t lstripe objid [0x240000bd0:0xc27f:0x0]: have 0 want 1
LustreError: 11-0: lustre-OST0000-osc-MDT0000: Communicating with 10.10.4.123@tcp, operation ost_destroy failed with -107.
LustreError: Skipped 3 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="73987" author="yujian" created="Sat, 21 Dec 2013 14:25:36 +0000"  >&lt;p&gt;Lustre Tag: 2.4.2 RC1&lt;br/&gt;
Lustre Client: CentOS 6.5/x86_64 (kernel version: 2.6.32-431.1.2.0.1.el6.x86_64)&lt;br/&gt;
Lustre Server: CentOS 6.4/x86_64 (kernel version: 2.6.32-358.23.2.el6_lustre.x86_64) &lt;/p&gt;

&lt;p&gt;The same failure occurred:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/5f44a050-6a2b-11e3-81c0-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/5f44a050-6a2b-11e3-81c0-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="74015" author="yujian" created="Mon, 23 Dec 2013 09:01:09 +0000"  >&lt;p&gt;Lustre Tag: 2.4.2 RC2&lt;br/&gt;
Lustre Client: CentOS 6.5/x86_64 (kernel version: 2.6.32-431.1.2.0.1.el6.x86_64)&lt;br/&gt;
Lustre Server: CentOS 6.4/x86_64 (kernel version: 2.6.32-358.23.2.el6_lustre.x86_64)&lt;/p&gt;

&lt;p&gt;The same failure occurred:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/c897cc3c-6b82-11e3-91a4-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/c897cc3c-6b82-11e3-91a4-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="74392" author="yujian" created="Mon, 6 Jan 2014 13:06:00 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_5/5/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_5/5/&lt;/a&gt;&lt;br/&gt;
Lustre Client: CentOS 6.5/x86_64 (kernel version: 2.6.32-431.1.2.0.1.el6)&lt;br/&gt;
Lustre Server: CentOS 6.4/x86_64 (kernel version: 2.6.32-358.18.1.el6)&lt;/p&gt;

&lt;p&gt;The same failure occurred:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/7daa132e-768b-11e3-a081-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/7daa132e-768b-11e3-a081-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="75703" author="sarah" created="Mon, 27 Jan 2014 18:32:47 +0000"  >&lt;p&gt;also seen in interop test between 2.5 client and 2.6 server:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/fa9b2594-8333-11e3-a5fa-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/fa9b2594-8333-11e3-a5fa-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="76930" author="yujian" created="Thu, 13 Feb 2014 03:10:23 +0000"  >&lt;p&gt;More instance on master branch:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/b9ee0148-9430-11e3-854a-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/b9ee0148-9430-11e3-854a-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/86ec6c20-9666-11e3-a009-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/86ec6c20-9666-11e3-a009-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/ce6c0350-98f8-11e3-968c-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/ce6c0350-98f8-11e3-968c-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="77340" author="yujian" created="Wed, 19 Feb 2014 11:20:47 +0000"  >&lt;p&gt;This failure kept occurring on master branch in full group test sessions and in review group test sessions (with SLOW=yes).&lt;/p&gt;</comment>
                            <comment id="84796" author="jamesanunez" created="Fri, 23 May 2014 17:48:02 +0000"  >&lt;p&gt;I&apos;m hitting this &quot;File too large&quot; error and there is enough space on OST0 and the MDS, but I&apos;ve used up all my inodes:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# lfs df -i
UUID                      Inodes       IUsed       IFree IUse% Mounted on
lscratch-MDT0000_UUID      100000       49642       50358  50% /lustre/scratch[MDT:0]
lscratch-MDT0001_UUID      100000         201       99799   0% /lustre/scratch[MDT:1]
lscratch-MDT0002_UUID      100000         201       99799   0% /lustre/scratch[MDT:2]
lscratch-MDT0003_UUID      100000         201       99799   0% /lustre/scratch[MDT:3]
lscratch-OST0000_UUID       50016       50016           0 100% /lustre/scratch[OST:0]

filesystem summary:       400000       50245      349755  13% /lustre/scratch
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;We use the small OST and MDS size for all tests in conf-sanity. So, why doesn&apos;t this error always occur? From conf-sanity:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# use small MDS + OST size to speed formatting time
# do not use too small MDSSIZE/OSTSIZE, which affect the default jouranl size
# STORED_MDSSIZE is used in test_18
STORED_MDSSIZE=$MDSSIZE
STORED_OSTSIZE=$OSTSIZE
MDSSIZE=200000
OSTSIZE=200000
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="89168" author="sarah" created="Tue, 15 Jul 2014 23:06:46 +0000"  >&lt;p&gt;Hit the error again in b2_6 build # 2 (2.6.0-RC2)&lt;br/&gt;
server and client: RHEL6 ldiskfs&lt;/p&gt;

&lt;p&gt;MDSCOUNT=1&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/181e5ee6-0c47-11e4-b749-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/181e5ee6-0c47-11e4-b749-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="99421" author="jamesanunez" created="Mon, 17 Nov 2014 21:50:52 +0000"  >&lt;p&gt;Hit this bug on master, tag 2.6.90. Test results at: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/7f927c9a-6ccf-11e4-a452-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/7f927c9a-6ccf-11e4-a452-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="101889" author="jamesanunez" created="Wed, 17 Dec 2014 23:58:00 +0000"  >&lt;p&gt;I experienced this bug with lustre-master tag 2.6.91 build # 2771. Results are at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f326bd9e-8618-11e4-ac52-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f326bd9e-8618-11e4-ac52-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="103523" author="yujian" created="Wed, 14 Jan 2015 20:49:26 +0000"  >&lt;p&gt;More failure instances on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/1261560a-9bdc-11e4-a352-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/1261560a-9bdc-11e4-a352-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/3e12c382-9bec-11e4-afb8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/3e12c382-9bec-11e4-afb8-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/3e12c382-9bec-11e4-afb8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/3e12c382-9bec-11e4-afb8-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/79089440-9bbe-11e4-b679-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/79089440-9bbe-11e4-b679-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/e379ea16-9b9d-11e4-9d4a-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/e379ea16-9b9d-11e4-9d4a-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="109112" author="jamesanunez" created="Fri, 6 Mar 2015 19:23:32 +0000"  >&lt;p&gt;Hit this issue with 2.7.0-RC4. Results are at: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/193dce6a-c42f-11e4-a0ef-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/193dce6a-c42f-11e4-a0ef-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="120270" author="gerrit" created="Fri, 3 Jul 2015 12:28:33 +0000"  >&lt;p&gt;Ashish Purkar (ashish.purkar@seagate.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/15487&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/15487&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4340&quot; title=&quot;conf-sanity test_69: error: File too large&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4340&quot;&gt;&lt;del&gt;LU-4340&lt;/del&gt;&lt;/a&gt; tests: Fix test_69 of conf-sanity test&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: abc7765b941fcebe7c489137af0e27d2212c71de&lt;/p&gt;</comment>
                            <comment id="121616" author="gerrit" created="Sun, 19 Jul 2015 04:01:57 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/15487/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/15487/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4340&quot; title=&quot;conf-sanity test_69: error: File too large&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4340&quot;&gt;&lt;del&gt;LU-4340&lt;/del&gt;&lt;/a&gt; tests: Fix test_69 of conf-sanity test&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 34f94efa4847ebd84b2fa42b7a0fc85bd7f6f8e3&lt;/p&gt;</comment>
                            <comment id="121629" author="pjones" created="Sun, 19 Jul 2015 15:00:16 +0000"  >&lt;p&gt;Landed for 2.8&lt;/p&gt;</comment>
                            <comment id="122480" author="yujian" created="Tue, 28 Jul 2015 21:22:11 +0000"  >&lt;p&gt;The failure occurred consistently on master branch:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/1f034d6c-34ea-11e5-be21-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/1f034d6c-34ea-11e5-be21-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/7a5d164a-34ed-11e5-b875-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/7a5d164a-34ed-11e5-b875-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ba0dd0aa-34a6-11e5-a9b3-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ba0dd0aa-34a6-11e5-a9b3-5254006e85c2&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/3aaaa0b4-3437-11e5-be70-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/3aaaa0b4-3437-11e5-be70-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="123970" author="gerrit" created="Wed, 12 Aug 2015 17:45:45 +0000"  >&lt;p&gt;James Nunez (james.a.nunez@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/15966&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/15966&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4340&quot; title=&quot;conf-sanity test_69: error: File too large&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4340&quot;&gt;&lt;del&gt;LU-4340&lt;/del&gt;&lt;/a&gt; tests: Adding debug to conf-sanity test 69&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: cca19d41a6b39ae2020655923290939cbe21bade&lt;/p&gt;</comment>
                            <comment id="124156" author="jamesanunez" created="Fri, 14 Aug 2015 15:43:05 +0000"  >&lt;p&gt;We ran conf-sanity five times in a row to see if we could reproduce this error on master (pre-2.8). The logs are at &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/b3556da0-41e3-11e5-9e18-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/b3556da0-41e3-11e5-9e18-5254006e85c2&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;Looking back at previous failures of conf-sanity test 69, the createmany routine fails with two different errors; &#8216;File too large&#8217; and &#8216;No space left on device&#8217;, failure codes 27 and 28.&lt;/p&gt;

&lt;p&gt;From createmany, this is the part of the code that is failing:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                     &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (do_open) {
                        &lt;span class=&quot;code-object&quot;&gt;int&lt;/span&gt; fd = open(filename, O_CREAT|O_RDWR, 0644);
                        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (fd &amp;lt; 0) {
                                printf(&lt;span class=&quot;code-quote&quot;&gt;&quot;open(%s) error: %s\n&quot;&lt;/span&gt;, filename,
                                       strerror(errno));
                                rc = errno;
                               &lt;span class=&quot;code-keyword&quot;&gt;break&lt;/span&gt;;
                        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In the recent debug tests I ran, I only hit the &#8220;No space left on device&#8221; error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;- created 10000 (time 1439485515.71 total 61.79 last 61.79)
- created 20000 (time 1439485577.47 total 123.55 last 61.76)
- created 30000 (time 1439485640.13 total 186.21 last 62.66)
- created 40000 (time 1439485702.88 total 248.96 last 62.76)
open(/mnt/lustre/d69.conf-sanity/f69.conf-sanity-49585) error: No space left on device
total: 49585 creates in 310.77 seconds: 159.56 creates/second
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In test 69, before we start creating files, the file system capacity looks like (&#8216;lfs df&#8217; and &#8216;lfs df -i&#8217; output):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;UUID                   1K-blocks        Used   Available Use% Mounted on
lustre-MDT0000_UUID       133560        1752      122084   1% /mnt/lustre[MDT:0]
lustre-MDT0001_UUID       133560        1604      122232   1% /mnt/lustre[MDT:1]
lustre-MDT0002_UUID       133560        1608      122228   1% /mnt/lustre[MDT:2]
lustre-MDT0003_UUID       133560        1608      122228   1% /mnt/lustre[MDT:3]
lustre-OST0000_UUID       171080        9812      150848   6% /mnt/lustre[OST:0]

filesystem summary:       171080        9812      150848   6% /mnt/lustre

UUID                      Inodes       IUsed       IFree IUse% Mounted on
lustre-MDT0000_UUID       100000         250       99750   0% /mnt/lustre[MDT:0]
lustre-MDT0001_UUID       100000         230       99770   0% /mnt/lustre[MDT:1]
lustre-MDT0002_UUID       100000         230       99770   0% /mnt/lustre[MDT:2]
lustre-MDT0003_UUID       100000         230       99770   0% /mnt/lustre[MDT:3]
lustre-OST0000_UUID        50016         463       49553   1% /mnt/lustre[OST:0]

filesystem summary:        50493         940       49553   2% /mnt/lustre
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So, there are no space issues before the test is run. &lt;/p&gt;

&lt;p&gt;NOTE: As previously pointed out, for conf-sanity, the size of the MDT and OSTs are reduced to 200000.&lt;/p&gt;

&lt;p&gt;After creating files, the file system capacity looks like (&#8216;lfs df&#8217; and &#8216;lfs df -i&#8217; output):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;UUID                   1K-blocks        Used   Available Use% Mounted on
lustre-MDT0000_UUID       133560        7536      116312   6% /mnt/lustre[MDT:0]
lustre-MDT0001_UUID       133560        1604      122232   1% /mnt/lustre[MDT:1]
lustre-MDT0002_UUID       133560        1608      122228   1% /mnt/lustre[MDT:2]
lustre-MDT0003_UUID       133560        1608      122228   1% /mnt/lustre[MDT:3]
lustre-OST0000_UUID       171080       10964      149696   7% /mnt/lustre[OST:0]

filesystem summary:       171080       10964      149696   7% /mnt/lustre

UUID                      Inodes       IUsed       IFree IUse% Mounted on
lustre-MDT0000_UUID       100000       49836       50164  50% /mnt/lustre[MDT:0]
lustre-MDT0001_UUID       100000         230       99770   0% /mnt/lustre[MDT:1]
lustre-MDT0002_UUID       100000         230       99770   0% /mnt/lustre[MDT:2]
lustre-MDT0003_UUID       100000         230       99770   0% /mnt/lustre[MDT:3]
lustre-OST0000_UUID        50016       50016           0 100% /mnt/lustre[OST:0]

filesystem summary:        50526       50526           0 100% /mnt/lustre
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Thus, we are running out of inodes on OST0. &lt;/p&gt;

&lt;p&gt;The test currently tries to write 3* OST_MAX_PRECREATE, where OST_MAX_PRECREATE = 20000, with the stated goal of making LAST_ID over 1.5 * OST_MAX_PRECREATE. From the error messages of the failed tests, we know that the test fails a little over 49,000 files. So, wouldn&#8217;t changing the number of files to create to 2 * OST_MAX_PRECREATE solve this issues and still achieve the goal of making LAST_ID over 1.5 * OST_MAX_PRECREATE?&lt;/p&gt;

&lt;p&gt;If we limit the number of files to be created to 2*OST_MAX_PRECREATE, the test should pass and does pass. Logs are at: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sessions/d8222034-425a-11e5-af45-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sessions/d8222034-425a-11e5-af45-5254006e85c2&lt;/a&gt; &lt;/p&gt;</comment>
                            <comment id="124501" author="adilger" created="Tue, 18 Aug 2015 18:37:42 +0000"  >&lt;p&gt;I also figured out where the &lt;tt&gt;-EFBIG&lt;/tt&gt; (-27) vs &lt;tt&gt;-ENOSPC&lt;/tt&gt; difference was coming from.  That is returned by &lt;tt&gt;lod_alloc_specific()&lt;/tt&gt; returning an error when a specific layout is requested but there are no OST objects available.  Normally that makes sense because a specific layout specifies the stripe count, and if that cannot be satisfied then the file may grow too large for the available number of stripes.  In the case of this test, there is only one stripe but the directory specifies it must be on OST0000, so it triggers this condition:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;        /* If we were passed specific striping params, then a failure to
         * meet those requirements is an error, since we can&apos;t reallocate
         * that memory (it might be part of a larger array or something).
         */
        CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;can&apos;t lstripe objid &quot;&lt;/span&gt;DFID&lt;span class=&quot;code-quote&quot;&gt;&quot;: have %d want %u\n&quot;&lt;/span&gt;,
               PFID(lu_object_fid(lod2lu_obj(lo))), stripe_num,
               lo-&amp;gt;ldo_stripenr);
        rc = stripe_num == 0 ? -ENOSPC : -EFBIG;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Previously it always returned -EFBIG, but I changed it in &lt;a href=&quot;http://review.whamcloud.com/12937&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/12937&lt;/a&gt; &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5246&quot; title=&quot;Failure on test suite sanity test_220: error: File too large&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5246&quot;&gt;&lt;del&gt;LU-5246&lt;/del&gt;&lt;/a&gt; tests: create OST objects on correct MDT&quot; to return -ENOSPC in case no objects could be created at all.  That patch was only landed on Aug 9, so I suspect the cases of -EFBIG being returned will decline and -ENOSPC will be returned instead (as it should be).&lt;/p&gt;</comment>
                            <comment id="125490" author="gerrit" created="Fri, 28 Aug 2015 07:02:27 +0000"  >&lt;p&gt;Andreas Dilger (andreas.dilger@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/15966/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/15966/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4340&quot; title=&quot;conf-sanity test_69: error: File too large&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4340&quot;&gt;&lt;del&gt;LU-4340&lt;/del&gt;&lt;/a&gt; tests: Correct conf-sanity test 69&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 22d0d85f5405859fb31104485ccfd55e4484d365&lt;/p&gt;</comment>
                            <comment id="125491" author="adilger" created="Fri, 28 Aug 2015 07:15:38 +0000"  >&lt;p&gt;Patch landed to master for 2.8.0. &lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="22748">LU-4490</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="22301">LU-4338</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="22302">LU-4339</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="21832">LU-4204</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="25275">LU-5246</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="36992">LU-8158</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="28198">LU-6123</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwagv:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>11869</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>