<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:49:47 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5245] sanity-quota test_1: user write success, but expect EDQUOT </title>
                <link>https://jira.whamcloud.com/browse/LU-5245</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for wangdi &amp;lt;di.wang@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;http://maloo.whamcloud.com/test_sets/540fb674-fad7-11e3-b499-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://maloo.whamcloud.com/test_sets/540fb674-fad7-11e3-b499-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_1 failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;Total allocated inode limit: 0, total allocated block limit: 0&lt;br/&gt;
Files for user (quota_usr):&lt;br/&gt;
  File: `/mnt/lustre/d1.sanity-quota/f1.sanity-quota-0&apos;&lt;br/&gt;
  Size: 11534336  	Blocks: 22528      IO Block: 4194304 regular file&lt;br/&gt;
Device: 2c54f966h/743766374d	Inode: 288230930219270255  Links: 1&lt;br/&gt;
Access: (0644/&lt;del&gt;rw-r&lt;/del&gt;&lt;del&gt;r&lt;/del&gt;-)  Uid: (60000/quota_usr)   Gid: (60000/quota_usr)&lt;br/&gt;
Access: 2014-06-23 00:18:35.000000000 -0700&lt;br/&gt;
Modify: 2014-06-23 00:18:36.000000000 -0700&lt;br/&gt;
Change: 2014-06-23 00:18:36.000000000 -0700&lt;br/&gt;
 sanity-quota test_1: @@@@@@ FAIL: user write success, but expect EDQUOT &lt;br/&gt;
  Trace dump:&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:4528:error_noexit()&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:4559:error()&lt;br/&gt;
  = /usr/lib64/lustre/tests/sanity-quota.sh:154:quota_error()&lt;br/&gt;
  = /usr/lib64/lustre/tests/sanity-quota.sh:440:test_1()&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:4820:run_one()&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:4855:run_one_logged()&lt;br/&gt;
  = /usr/lib64/lustre/tests/test-framework.sh:4708:run_test()&lt;br/&gt;
  = /usr/lib64/lustre/tests/sanity-quota.sh:483:main()&lt;br/&gt;
user write success, but expect EDQUOT&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Info required for matching: sanity-quota 1&lt;/p&gt;</description>
                <environment></environment>
        <key id="25274">LU-5245</key>
            <summary>sanity-quota test_1: user write success, but expect EDQUOT </summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="4">Incomplete</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 23 Jun 2014 17:16:31 +0000</created>
                <updated>Tue, 20 Nov 2018 21:02:51 +0000</updated>
                            <resolved>Tue, 20 Nov 2018 21:02:51 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.8.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>12</watches>
                                                                            <comments>
                            <comment id="87297" author="jlevi" created="Mon, 23 Jun 2014 17:41:45 +0000"  >&lt;p&gt;Niu,&lt;br/&gt;
Can you please comment on this one?&lt;br/&gt;
Thank you!&lt;/p&gt;</comment>
                            <comment id="87334" author="niu" created="Tue, 24 Jun 2014 01:24:39 +0000"  >&lt;blockquote&gt;
&lt;p&gt;01:18:26:== sanity-quota test 1: Block hard limit (normal use and out of quota) == 00:18:26 (1403507906)&lt;br/&gt;
01:18:28:Waiting for local destroys to complete&lt;br/&gt;
01:18:29:Creating test directory&lt;br/&gt;
01:18:29:CMD: shadow-47vm3,shadow-47vm4,shadow-47vm7 lctl set_param fail_val=0 fail_loc=0&lt;br/&gt;
01:18:29:fail_val=0&lt;br/&gt;
01:18:29:fail_loc=0&lt;br/&gt;
01:18:29:fail_val=0&lt;br/&gt;
01:18:29:fail_loc=0&lt;br/&gt;
01:18:29:fail_val=0&lt;br/&gt;
01:18:29:fail_loc=0&lt;br/&gt;
01:18:29:CMD: shadow-47vm3 /usr/sbin/lctl conf_param lustre.quota.ost=ug&lt;br/&gt;
01:18:29:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:29:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0000.quota_slave.enabled&lt;br/&gt;
01:18:29:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0000.quota_slave.enabled&lt;br/&gt;
01:18:29:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:30:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0001.quota_slave.enabled&lt;br/&gt;
01:18:30:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0001.quota_slave.enabled&lt;br/&gt;
01:18:30:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:30:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0002.quota_slave.enabled&lt;br/&gt;
01:18:31:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0002.quota_slave.enabled&lt;br/&gt;
01:18:32:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:32:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0003.quota_slave.enabled&lt;br/&gt;
01:18:32:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0003.quota_slave.enabled&lt;br/&gt;
01:18:32:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:32:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0004.quota_slave.enabled&lt;br/&gt;
01:18:33:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0004.quota_slave.enabled&lt;br/&gt;
01:18:33:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:33:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0005.quota_slave.enabled&lt;br/&gt;
01:18:33:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0005.quota_slave.enabled&lt;br/&gt;
01:18:34:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:34:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0006.quota_slave.enabled&lt;br/&gt;
01:18:34:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0006.quota_slave.enabled&lt;br/&gt;
01:18:34:CMD: shadow-47vm4 /usr/sbin/lctl dl&lt;br/&gt;
01:18:34:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0007.quota_slave.enabled&lt;br/&gt;
01:18:35:CMD: shadow-47vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0007.quota_slave.enabled&lt;br/&gt;
01:18:35:User quota (block hardlimit:10 MB)&lt;br/&gt;
01:18:35:Write...&lt;br/&gt;
01:18:36:running as uid/gid/euid/egid 60000/60000/60000/60000, groups:&lt;br/&gt;
01:18:36: &lt;span class=&quot;error&quot;&gt;&amp;#91;dd&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;if=/dev/zero&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;bs=1M&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;of=/mnt/lustre/d1.sanity-quota/f1.sanity-quota-0&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;count=5&amp;#93;&lt;/span&gt;&lt;br/&gt;
01:18:36:5+0 records in&lt;br/&gt;
01:18:36:5+0 records out&lt;br/&gt;
01:18:36:5242880 bytes (5.2 MB) copied, 0.117616 s, 44.6 MB/s&lt;br/&gt;
01:18:36:Write out of block quota ...&lt;br/&gt;
01:18:36:running as uid/gid/euid/egid 60000/60000/60000/60000, groups:&lt;br/&gt;
01:18:36: &lt;span class=&quot;error&quot;&gt;&amp;#91;dd&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;if=/dev/zero&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;bs=1M&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;of=/mnt/lustre/d1.sanity-quota/f1.sanity-quota-0&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;count=5&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;seek=5&amp;#93;&lt;/span&gt;&lt;br/&gt;
01:18:36:5+0 records in&lt;br/&gt;
01:18:36:5+0 records out&lt;br/&gt;
01:18:36:5242880 bytes (5.2 MB) copied, 0.0293611 s, 179 MB/s&lt;br/&gt;
01:18:36:running as uid/gid/euid/egid 60000/60000/60000/60000, groups:&lt;br/&gt;
01:18:36: &lt;span class=&quot;error&quot;&gt;&amp;#91;dd&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;if=/dev/zero&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;bs=1M&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;of=/mnt/lustre/d1.sanity-quota/f1.sanity-quota-0&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;count=1&amp;#93;&lt;/span&gt; &lt;span class=&quot;error&quot;&gt;&amp;#91;seek=10&amp;#93;&lt;/span&gt;&lt;br/&gt;
01:18:36:1+0 records in&lt;br/&gt;
01:18:36:1+0 records out&lt;br/&gt;
01:18:36:1048576 bytes (1.0 MB) copied, 0.0343534 s, 30.5 MB/s&lt;br/&gt;
01:18:36:Disk quotas for user quota_usr (uid 60000):&lt;br/&gt;
01:18:36:     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace&lt;br/&gt;
01:18:36:    /mnt/lustre   11264*      0   10240       -       1       0       0       -&lt;br/&gt;
01:18:36:lustre-MDT0000_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       1       -       0       -&lt;br/&gt;
01:18:36:lustre-MDT0001_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       0       -       0       -&lt;br/&gt;
01:18:36:lustre-MDT0002_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       0       -       0       -&lt;br/&gt;
01:18:36:lustre-MDT0003_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       0       -       0       -&lt;br/&gt;
01:18:36:lustre-OST0000_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:lustre-OST0001_UUID&lt;br/&gt;
01:18:36:                  11264       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:lustre-OST0002_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:lustre-OST0003_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:lustre-OST0004_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:lustre-OST0005_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:lustre-OST0006_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:lustre-OST0007_UUID&lt;br/&gt;
01:18:36:                      0       -       0       -       -       -       -       -&lt;br/&gt;
01:18:36:Total allocated inode limit: 0, total allocated block limit: 0&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;quota wasn&apos;t enforced on OSTs, looks wait_update() was broken somehow? I didn&apos;t see the message like &quot;Updated after...&quot; or &quot;Update not seen after...&quot; on console message.&lt;br/&gt;
The OSTs logs are missing in maloo too.&lt;/p&gt;</comment>
                            <comment id="87382" author="di.wang" created="Tue, 24 Jun 2014 16:18:19 +0000"  >&lt;p&gt;I retriggered the test, unfortunately, this can not be reproduced. But it seems the failure rate of this test is pretty high&lt;/p&gt;

&lt;p&gt;Error: &apos;user write success, but expect EDQUOT&apos; &lt;br/&gt;
Failure Rate: 43.00% of last 100 executions &lt;span class=&quot;error&quot;&gt;&amp;#91;all branches&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;So could you please search the maloo test result? Thanks. &lt;/p&gt;</comment>
                            <comment id="90943" author="lixi" created="Wed, 6 Aug 2014 00:23:48 +0000"  >&lt;p&gt;We saw similar problem on a production system. A couple of users found their files are truncated when they copied new files to lustre. And finially, they reproduced the problem using following script:&lt;/p&gt;

&lt;p&gt;echo a &amp;gt; testfile1 &amp;amp;&amp;amp; echo b &amp;gt;&amp;gt; testfile1 &amp;amp;&amp;amp; cat testfile1&lt;/p&gt;

&lt;p&gt;And the output of the script is always &apos;a\n&apos; for these users. And the output of &apos;ls -l&apos; shows that the file size is 2. However, we loged in another node, and run &apos;ls -l&apos;. We found that the size of the &apos;testfile1&apos; is actually 4 and the content of it is &apos;a\nb\n&apos;, which means the data has been written correctly. And please note root users do not have such kind of problem.&lt;/p&gt;

&lt;p&gt;We traced the operation, and found following logs.&lt;/p&gt;

&lt;p&gt;00000008:00000001:0.0:1407228580.717510:0:29229:0:(osc_cache.c:2274:osc_queue_async_io()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000008:00000001:0.0:1407228580.717514:0:29229:0:(osc_page.c:224:osc_page_cache_add()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000008:00000001:0.0:1407228580.717515:0:29229:0:(osc_io.c:313:osc_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000020:00000001:0.0:1407228580.717516:0:29229:0:(cl_io.c:801:cl_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00020000:00000001:0.0:1407228580.717516:0:29229:0:(lov_io.c:665:lov_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000020:00000001:0.0:1407228580.717517:0:29229:0:(cl_io.c:801:cl_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000080:00000001:0.0:1407228580.742691:0:29229:0:(xattr.c:321:ll_getxattr_common()) Process leaving (rc=18446744073709551555 : -61 : ffffffffffffffc3)&lt;br/&gt;
00000008:00000001:0.0:1407228580.742728:0:29229:0:(osc_cache.c:2274:osc_queue_async_io()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000008:00000001:0.0:1407228580.742731:0:29229:0:(osc_page.c:224:osc_page_cache_add()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000008:00000001:0.0:1407228580.742732:0:29229:0:(osc_io.c:313:osc_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000020:00000001:0.0:1407228580.742733:0:29229:0:(cl_io.c:801:cl_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00020000:00000001:0.0:1407228580.742734:0:29229:0:(lov_io.c:665:lov_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000020:00000001:0.0:1407228580.742734:0:29229:0:(cl_io.c:801:cl_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000008:00000001:0.0:1407228580.757374:0:29229:0:(osc_cache.c:2274:osc_queue_async_io()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000008:00000001:0.0:1407228580.757377:0:29229:0:(osc_page.c:224:osc_page_cache_add()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000008:00000001:0.0:1407228580.757378:0:29229:0:(osc_io.c:313:osc_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000020:00000001:0.0:1407228580.757379:0:29229:0:(cl_io.c:801:cl_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00020000:00000001:0.0:1407228580.757379:0:29229:0:(lov_io.c:665:lov_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;br/&gt;
00000020:00000001:0.0:1407228580.757380:0:29229:0:(cl_io.c:801:cl_io_commit_async()) Process leaving (rc=18446744073709551494 : -122 : ffffffffffffff86)&lt;/p&gt;

&lt;p&gt;It seems osc_quota_chkdq() returns NO_QUOTA. And +quota log is:&lt;/p&gt;

&lt;p&gt;00000001:04000000:0.0F:1407230285.413534:0:29229:0:(osc_quota.c:64:osc_quota_chkdq()) chkdq found noquota for user 5800&lt;br/&gt;
00000008:04000000:9.0F:1407230285.435331:0:1869:0:(osc_request.c:1528:osc_brw_fini_request()) setdq for &lt;span class=&quot;error&quot;&gt;&amp;#91;5800 1090&amp;#93;&lt;/span&gt; with valid 0x6f184fb9, flags 2100&lt;br/&gt;
00000001:04000000:0.0:1407230285.435884:0:29229:0:(osc_quota.c:64:osc_quota_chkdq()) chkdq found noquota for user 5800&lt;br/&gt;
00000008:04000000:14.0F:1407230285.452187:0:1871:0:(osc_request.c:1528:osc_brw_fini_request()) setdq for &lt;span class=&quot;error&quot;&gt;&amp;#91;5800 1090&amp;#93;&lt;/span&gt; with valid 0x6f184fb9, flags 2100&lt;br/&gt;
00000001:04000000:0.0:1407230285.455988:0:29229:0:(osc_quota.c:64:osc_quota_chkdq()) chkdq found noquota for user 5800&lt;br/&gt;
00000008:04000000:3.0F:1407230285.519352:0:1875:0:(osc_request.c:1528:osc_brw_fini_request()) setdq for &lt;span class=&quot;error&quot;&gt;&amp;#91;5800 1090&amp;#93;&lt;/span&gt; with valid 0x6f184fb9, flags 2100&lt;/p&gt;

&lt;p&gt;It seems that Lustre thinks this user has no quota. However, first, the users definitely had not reached their space limits, and second &apos;echo &amp;gt;&amp;gt;&apos; should return failure if the user&apos;s quota is exceeded.&lt;/p&gt;

&lt;p&gt;After we disable quota enforcement on OSTs, this problem is gone.&lt;/p&gt;</comment>
                            <comment id="90944" author="lixi" created="Wed, 6 Aug 2014 00:26:44 +0000"  >&lt;p&gt;Here is the trace log.&lt;/p&gt;</comment>
                            <comment id="90950" author="niu" created="Wed, 6 Aug 2014 02:05:18 +0000"  >&lt;blockquote&gt;
&lt;p&gt;It seems that Lustre thinks this user has no quota. However, first, the users definitely had not reached their space limits, and second &apos;echo &amp;gt;&amp;gt;&apos; should return failure if the user&apos;s quota is exceeded.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;osc_quota_chkdq() returns NO_QUOTA doesn&apos;t necessarily mean the user is already running out of quota, when the used space approaching quota limit, this function will return NO_QUOTA to inform client to turn the async write to sync write. Could you provide the output of &apos;lfs quota -v&quot;? Thanks.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;And the output of the script is always &apos;a\n&apos; for these users. And the output of &apos;ls -l&apos; shows that the file size is 2. However, we loged in another node, and run &apos;ls -l&apos;. We found that the size of the &apos;testfile1&apos; is actually 4 and the content of it is &apos;a\nb\n&apos;, which means the data has been written correctly. And please note root users do not have such kind of problem.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;I guess that&apos;s not a quota problem, we&apos;d collect logs (on both clients and OST, better with D_TRACE enabled and some debug marker recording where the &apos;ls&apos; start/end) of &apos;ls&apos; to see why the file size is inconsistent on different client.&lt;/p&gt;</comment>
                            <comment id="90951" author="lixi" created="Wed, 6 Aug 2014 02:29:01 +0000"  >&lt;p&gt;Here it goes. Please note that we are using Lustre-2.4.x for servers and Lustre-2.5.x for client. &lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/smile.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;12:26:53 root@r7:~&amp;#93;&lt;/span&gt; # lfs quota -v -u bjm900 /home&lt;br/&gt;
Disk quotas for user bjm900 (uid 5800):&lt;br/&gt;
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace&lt;br/&gt;
          /home 1599228  104857600 104857600       -   59224  1000000 1000000       -&lt;br/&gt;
homsys-MDT0000_UUID&lt;br/&gt;
                  13428       -       0       -   59224       -   64206       -&lt;br/&gt;
homsys-OST0000_UUID&lt;br/&gt;
                  21352       -   22376       -       -       -       -       -&lt;br/&gt;
homsys-OST0001_UUID&lt;br/&gt;
                  16488       -   17512       -       -       -       -       -&lt;br/&gt;
homsys-OST0002_UUID&lt;br/&gt;
                  12920       -   13576       -       -       -       -       -&lt;br/&gt;
homsys-OST0003_UUID&lt;br/&gt;
                  23704       -   24220       -       -       -       -       -&lt;br/&gt;
homsys-OST0004_UUID&lt;br/&gt;
                  17864       -   18888       -       -       -       -       -&lt;br/&gt;
homsys-OST0005_UUID&lt;br/&gt;
                  27436       -   28160       -       -       -       -       -&lt;br/&gt;
homsys-OST0006_UUID&lt;br/&gt;
                  12508       -   13532       -       -       -       -       -&lt;br/&gt;
homsys-OST0007_UUID&lt;br/&gt;
                  20476       -   21496       -       -       -       -       -&lt;br/&gt;
homsys-OST0008_UUID&lt;br/&gt;
                  11136       -   12156       -       -       -       -       -&lt;br/&gt;
homsys-OST0009_UUID&lt;br/&gt;
                  21872       -   22896       -       -       -       -       -&lt;br/&gt;
homsys-OST000a_UUID&lt;br/&gt;
                  13408       -   14432       -       -       -       -       -&lt;br/&gt;
homsys-OST000b_UUID&lt;br/&gt;
                  15312       -   16336       -       -       -       -       -&lt;br/&gt;
homsys-OST000c_UUID&lt;br/&gt;
                  39516       -   40536       -       -       -       -       -&lt;br/&gt;
homsys-OST000d_UUID&lt;br/&gt;
                  21108       -   22132       -       -       -       -       -&lt;br/&gt;
homsys-OST000e_UUID&lt;br/&gt;
                  17880       -   18904       -       -       -       -       -&lt;br/&gt;
homsys-OST000f_UUID&lt;br/&gt;
                  24440       -   25464       -       -       -       -       -&lt;br/&gt;
homsys-OST0010_UUID&lt;br/&gt;
                  18652       -   19676       -       -       -       -       -&lt;br/&gt;
homsys-OST0011_UUID&lt;br/&gt;
                  36456       -   37476       -       -       -       -       -&lt;br/&gt;
homsys-OST0012_UUID&lt;br/&gt;
                  17332       -   17864       -       -       -       -       -&lt;br/&gt;
homsys-OST0013_UUID&lt;br/&gt;
                  28272       -   29296       -       -       -       -       -&lt;br/&gt;
homsys-OST0014_UUID&lt;br/&gt;
                  32920       -   33944       -       -       -       -       -&lt;br/&gt;
homsys-OST0015_UUID&lt;br/&gt;
                  21708       -   22728       -       -       -       -       -&lt;br/&gt;
homsys-OST0016_UUID&lt;br/&gt;
                  21928       -   22952       -       -       -       -       -&lt;br/&gt;
homsys-OST0017_UUID&lt;br/&gt;
                  15104       -   15872       -       -       -       -       -&lt;br/&gt;
homsys-OST0018_UUID&lt;br/&gt;
                  18360       -   19384       -       -       -       -       -&lt;br/&gt;
homsys-OST0019_UUID&lt;br/&gt;
                  22288       -   23304       -       -       -       -       -&lt;br/&gt;
homsys-OST001a_UUID&lt;br/&gt;
                  11524       -   12548       -       -       -       -       -&lt;br/&gt;
homsys-OST001b_UUID&lt;br/&gt;
                  23016       -   24040       -       -       -       -       -&lt;br/&gt;
homsys-OST001c_UUID&lt;br/&gt;
                  14044       -   15068       -       -       -       -       -&lt;br/&gt;
homsys-OST001d_UUID&lt;br/&gt;
                  16692       -   17716       -       -       -       -       -&lt;br/&gt;
homsys-OST001e_UUID&lt;br/&gt;
                  39124       -   40148       -       -       -       -       -&lt;br/&gt;
homsys-OST001f_UUID&lt;br/&gt;
                  13484       -   14012       -       -       -       -       -&lt;br/&gt;
homsys-OST0020_UUID&lt;br/&gt;
                  11500       -   12524       -       -       -       -       -&lt;br/&gt;
homsys-OST0021_UUID&lt;br/&gt;
                  12004       -   13028       -       -       -       -       -&lt;br/&gt;
homsys-OST0022_UUID&lt;br/&gt;
                  26332       -   27356       -       -       -       -       -&lt;br/&gt;
homsys-OST0023_UUID&lt;br/&gt;
                  13896       -   14920       -       -       -       -       -&lt;br/&gt;
homsys-OST0024_UUID&lt;br/&gt;
                  17100       -   18120       -       -       -       -       -&lt;br/&gt;
homsys-OST0025_UUID&lt;br/&gt;
                  27388       -   28412       -       -       -       -       -&lt;br/&gt;
homsys-OST0026_UUID&lt;br/&gt;
                  10800       -   11824       -       -       -       -       -&lt;br/&gt;
homsys-OST0027_UUID&lt;br/&gt;
                  25572       -   26596       -       -       -       -       -&lt;br/&gt;
homsys-OST0028_UUID&lt;br/&gt;
                  23144       -   24064       -       -       -       -       -&lt;br/&gt;
homsys-OST0029_UUID&lt;br/&gt;
                  13700       -   14552       -       -       -       -       -&lt;br/&gt;
homsys-OST002a_UUID&lt;br/&gt;
                  21748       -   22772       -       -       -       -       -&lt;br/&gt;
homsys-OST002b_UUID&lt;br/&gt;
                  21800       -   22824       -       -       -       -       -&lt;br/&gt;
homsys-OST002c_UUID&lt;br/&gt;
                  16600       -   17624       -       -       -       -       -&lt;br/&gt;
homsys-OST002d_UUID&lt;br/&gt;
                  12224       -   13248       -       -       -       -       -&lt;br/&gt;
homsys-OST002e_UUID&lt;br/&gt;
                  12796       -   13820       -       -       -       -       -&lt;br/&gt;
homsys-OST002f_UUID&lt;br/&gt;
                  10436       -   11460       -       -       -       -       -&lt;br/&gt;
homsys-OST0030_UUID&lt;br/&gt;
                  24940       -   25960       -       -       -       -       -&lt;br/&gt;
homsys-OST0031_UUID&lt;br/&gt;
                  13820       -   14844       -       -       -       -       -&lt;br/&gt;
homsys-OST0032_UUID&lt;br/&gt;
                  10276       -   11296       -       -       -       -       -&lt;br/&gt;
homsys-OST0033_UUID&lt;br/&gt;
                  14324       -   14856       -       -       -       -       -&lt;br/&gt;
homsys-OST0034_UUID&lt;br/&gt;
                  11168       -   11776       -       -       -       -       -&lt;br/&gt;
homsys-OST0035_UUID&lt;br/&gt;
                  17876       -   18900       -       -       -       -       -&lt;br/&gt;
homsys-OST0036_UUID&lt;br/&gt;
                  14740       -   15764       -       -       -       -       -&lt;br/&gt;
homsys-OST0037_UUID&lt;br/&gt;
                  24764       -   25788       -       -       -       -       -&lt;br/&gt;
homsys-OST0038_UUID&lt;br/&gt;
                  17848       -   18868       -       -       -       -       -&lt;br/&gt;
homsys-OST0039_UUID&lt;br/&gt;
                  15164       -   15720       -       -       -       -       -&lt;br/&gt;
homsys-OST003a_UUID&lt;br/&gt;
                  18736       -   19760       -       -       -       -       -&lt;br/&gt;
homsys-OST003b_UUID&lt;br/&gt;
                  14476       -   15500       -       -       -       -       -&lt;br/&gt;
homsys-OST003c_UUID&lt;br/&gt;
                   4024       -    5048       -       -       -       -       -&lt;br/&gt;
homsys-OST003d_UUID&lt;br/&gt;
                  13588       -   14612       -       -       -       -       -&lt;br/&gt;
homsys-OST003e_UUID&lt;br/&gt;
                  13576       -   14600       -       -       -       -       -&lt;br/&gt;
homsys-OST003f_UUID&lt;br/&gt;
                  26372       -   27396       -       -       -       -       -&lt;br/&gt;
homsys-OST0040_UUID&lt;br/&gt;
                  50380       -   51404       -       -       -       -       -&lt;br/&gt;
homsys-OST0041_UUID&lt;br/&gt;
                  24796       -   25816       -       -       -       -       -&lt;br/&gt;
homsys-OST0042_UUID&lt;br/&gt;
                  24176       -   25196       -       -       -       -       -&lt;br/&gt;
homsys-OST0043_UUID&lt;br/&gt;
                  12776       -   13800       -       -       -       -       -&lt;br/&gt;
homsys-OST0044_UUID&lt;br/&gt;
                  13444       -   14468       -       -       -       -       -&lt;br/&gt;
homsys-OST0045_UUID&lt;br/&gt;
                  23492       -   24476       -       -       -       -       -&lt;br/&gt;
homsys-OST0046_UUID&lt;br/&gt;
                  11412       -   12436       -       -       -       -       -&lt;br/&gt;
homsys-OST0047_UUID&lt;br/&gt;
                  14552       -   15576       -       -       -       -       -&lt;br/&gt;
homsys-OST0048_UUID&lt;br/&gt;
                  19140       -   19664       -       -       -       -       -&lt;br/&gt;
homsys-OST0049_UUID&lt;br/&gt;
                  12384       -   13408       -       -       -       -       -&lt;br/&gt;
homsys-OST004a_UUID&lt;br/&gt;
                  29392       -   30416       -       -       -       -       -&lt;br/&gt;
homsys-OST004b_UUID&lt;br/&gt;
                  40412       -   41436       -       -       -       -       -&lt;br/&gt;
homsys-OST004c_UUID&lt;br/&gt;
                  52872       -   53896       -       -       -       -       -&lt;br/&gt;
homsys-OST004d_UUID&lt;br/&gt;
                  29372       -   30396       -       -       -       -       -&lt;br/&gt;
homsys-OST004e_UUID&lt;br/&gt;
                  13144       -   14164       -       -       -       -       -&lt;br/&gt;
homsys-OST004f_UUID&lt;br/&gt;
                  13000       -   14024       -       -       -       -       -&lt;br/&gt;
Total allocated inode limit: 64206, total allocated block limit: 1663052&lt;/p&gt;</comment>
                            <comment id="90953" author="niu" created="Wed, 6 Aug 2014 05:41:16 +0000"  >&lt;p&gt;Looks the used space is far below the quota limit, so we&apos;d figure out if it really write to OST first:&lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;If write on OST failed with EDQUOT either, this could be &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4505&quot; title=&quot;invalid &amp;quot;Disk quota exceed&amp;quot; error&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4505&quot;&gt;&lt;del&gt;LU-4505&lt;/del&gt;&lt;/a&gt;, collecting log on OST could be helpful. (for file write, with D_QUOTA enabled)&lt;/li&gt;
	&lt;li&gt;If write on OST succeeded, we&apos;d collect log to see why this client can&apos;t read it (as I mentioned in previous comment), and try the same test on another clean client to see if it can be reproduced.&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Anyway, I don&apos;t think this is related to current ticket, could you create a new ticket for this problem? Thanks.&lt;/p&gt;</comment>
                            <comment id="120522" author="kit.westneat" created="Tue, 7 Jul 2015 01:42:18 +0000"  >&lt;p&gt;This test result looks somewhat similar:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/08918008-2437-11e5-b6b4-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/08918008-2437-11e5-b6b4-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="120922" author="emoly.liu" created="Fri, 10 Jul 2015 03:09:02 +0000"  >&lt;p&gt;I hit the similar issue: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/f878df9a-2661-11e5-8b33-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/f878df9a-2661-11e5-8b33-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="120933" author="niu" created="Fri, 10 Jul 2015 04:40:32 +0000"  >&lt;p&gt;All the failures were because quota slave didn&apos;t connect to master, so that quota wasn&apos;t enforced on slave.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;00000100:00100000:0.0:1436439856.246154:0:23692:0:(nrs_fifo.c:179:nrs_fifo_req_get()) NRS start fifo request from 12345-10.1.4.48@tcp, seq: 852
00000100:00100000:0.0:1436439856.246163:0:23692:0:(service.c:2076:ptlrpc_server_handle_request()) Handling RPC pname:cluuid+ref:pid:xid:nid:opc mdt00_004:0+-99:3137:x1505240840316480:12345-10.1.4.48@tcp:38
00010000:02000400:0.0:1436439856.246191:0:23692:0:(ldlm_lib.c:1026:target_handle_connect()) lustre-MDT0000: Received LWP connection from 10.1.4.48@tcp, removing former export from 10.1.4.52@tcp
00000020:00080000:0.0:1436439856.246195:0:23692:0:(genops.c:1382:class_fail_export()) disconnecting export ffff8800798e9400/lustre-MDT0000-lwp-OST0003_UUID
00000020:00000080:0.0:1436439856.246204:0:23692:0:(genops.c:1215:class_disconnect()) disconnect: cookie 0x515596b0134eb7f7
00000100:00080000:0.0:1436439856.246211:0:23692:0:(&lt;span class=&quot;code-keyword&quot;&gt;import&lt;/span&gt;.c:1601:ptlrpc_cleanup_imp()) ffff880063495000 PA#}: changing &lt;span class=&quot;code-keyword&quot;&gt;import&lt;/span&gt; state from FULL to CLOSED
00000020:00080000:0.0:1436439856.246261:0:23692:0:(genops.c:1399:class_fail_export()) disconnected export ffff8800798e9400/lustre-MDT0000-lwp-OST0003_UUID
00000020:00000080:0.0:1436439856.246263:0:23692:0:(genops.c:814:class_export_put()) &lt;span class=&quot;code-keyword&quot;&gt;final&lt;/span&gt; put ffff8800798e9400/lustre-MDT0000-lwp-OST0003_UUID
00010000:00080000:0.0:1436439856.246290:0:23692:0:(ldlm_lib.c:1092:target_handle_connect()) lustre-MDT0000: connection from lustre-MDT0000-lwp-OST0003_UUID@10.1.4.48@tcp t0 exp (&lt;span class=&quot;code-keyword&quot;&gt;null&lt;/span&gt;) cur 1436439856 last 0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I see from the log that OST connected to master with different IP address during testing, do we failover OSTs while testing?&lt;/p&gt;</comment>
                            <comment id="125873" author="jamesanunez" created="Tue, 1 Sep 2015 16:36:52 +0000"  >&lt;p&gt;I think we hit this issue again on PPC builds. Logs at:&lt;br/&gt;
2015-09-01 03:24:28 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/b1d82b30-508d-11e5-95a9-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/b1d82b30-508d-11e5-95a9-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="133936" author="jamesanunez" created="Thu, 19 Nov 2015 14:49:23 +0000"  >&lt;p&gt;Another instance on master:&lt;br/&gt;
2015-11-15 14:01:26 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/9e1632c2-8bbb-11e5-9933-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/9e1632c2-8bbb-11e5-9933-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-11-18 16:20:31 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/cf90e830-8e25-11e5-8da8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/cf90e830-8e25-11e5-8da8-5254006e85c2&lt;/a&gt;&lt;br/&gt;
2015-11-18 21:25:31 - &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/1ce03240-8e52-11e5-8da8-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/1ce03240-8e52-11e5-8da8-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="197174" author="yong.fan" created="Fri, 26 May 2017 16:11:43 +0000"  >&lt;p&gt;+1 on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/fedc1a0c-4219-11e7-b558-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/fedc1a0c-4219-11e7-b558-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="201064" author="bogl" created="Wed, 5 Jul 2017 21:40:19 +0000"  >&lt;p&gt;another on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/a4bae786-61c9-11e7-9230-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/a4bae786-61c9-11e7-9230-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="229461" author="bogl" created="Tue, 12 Jun 2018 16:38:10 +0000"  >&lt;p&gt;another on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/772bbf04-6dec-11e8-a522-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/772bbf04-6dec-11e8-a522-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="237272" author="adilger" created="Tue, 20 Nov 2018 21:02:51 +0000"  >&lt;p&gt;Closing this old issue, use &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11678&quot; title=&quot;sanity-quota test 1 fails with &amp;#39;user write success, but expect EDQUOT&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11678&quot;&gt;&lt;del&gt;LU-11678&lt;/del&gt;&lt;/a&gt; for new&lt;img class=&quot;emoticon&quot; src=&quot;https://jira.whamcloud.com/images/icons/emoticons/help_16.png&quot; height=&quot;16&quot; width=&quot;16&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt; issue with same symptom.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="45447">LU-9326</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="54056">LU-11678</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="15484" name="bjm900_lustre_2.zip" size="1013125" author="lixi" created="Wed, 6 Aug 2014 00:26:44 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwppr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>14627</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>