<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:37:21 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3839] Incorrect file system usage on Lustre Quota</title>
                <link>https://jira.whamcloud.com/browse/LU-3839</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Lustre quota does not show correct file system usage for an user.&lt;br/&gt;
When the customer counted up file size, the usage of that user is about 62GB, but &quot;lfs quota&quot; is showing 7TB.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;$ COUNT=0; for i in `cat file_size_20130821.txt | awk &apos;{ print $5 }&apos;`

    do
    COUNT=`expr ${COUNT} + ${i}`
    done; echo &quot;SUM ${COUNT}&quot;

SUM 65272511
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Lustre quota shows as follows.&lt;/p&gt;


&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@se1 ~&amp;#93;&lt;/span&gt;# date&lt;br/&gt;
Fri Aug 23 10:00:35 JST 2013&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@se1 ~&amp;#93;&lt;/span&gt;# lfs quota -u kawashin /nshare2&lt;br/&gt;
Disk quotas for user kawashin (uid 14520):&lt;br/&gt;
Filesystem kbytes quota limit grace files quota limit grace&lt;br/&gt;
/nshare2 7076986516 0 0 - 14157 0 0 -&lt;/p&gt;

&lt;p&gt;quotacheck did not work.&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@se1 ~&amp;#93;&lt;/span&gt;# lfs quotacheck -ug /nshare2 ; date&lt;br/&gt;
Fri Aug 23 10:01:50 JST 2013&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@se1 ~&amp;#93;&lt;/span&gt;# lfs quota -u kawashin /nshare2&lt;br/&gt;
Disk quotas for user kawashin (uid 14520):&lt;br/&gt;
Filesystem kbytes quota limit grace files quota limit grace&lt;br/&gt;
/nshare2 7076986516 0 0 - 14157 0 0 -&lt;/p&gt;</description>
                <environment>applied patch following patches against 1.8.8.&lt;br/&gt;
{noformat}&lt;br/&gt;
bd649b1 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1754&quot; title=&quot;Kernel update [RHEL 6.3 2.6.32-279.5.1.el6]&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1754&quot;&gt;&lt;strike&gt;LU-1754&lt;/strike&gt;&lt;/a&gt; kernel: Kernel update [RHEL6.3 2.6.32-279.5.1.el6]&lt;br/&gt;
1513306 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-919&quot; title=&quot;Multiple wrong LBUGs checking cfs_atomic_t vars/fields with inacurate poison value of 0x5a5a5a&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-919&quot;&gt;&lt;strike&gt;LU-919&lt;/strike&gt;&lt;/a&gt; obdclass: remove hard coded 0x5a5a5a&lt;br/&gt;
7a9fc09 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1720&quot; title=&quot;Quota doesn&amp;#39;t work over 4TB on single OST&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1720&quot;&gt;&lt;strike&gt;LU-1720&lt;/strike&gt;&lt;/a&gt; kernel: Quota doesn&amp;#39;t work over 4TB on single OST&lt;br/&gt;
df3a540 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1782&quot; title=&quot;Ignore sb_has_quota_active() in OFED&amp;#39;s header&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1782&quot;&gt;&lt;strike&gt;LU-1782&lt;/strike&gt;&lt;/a&gt; quota: ignore sb_has_quota_active() in OFED&amp;#39;s header&lt;br/&gt;
8c3084c &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1496&quot; title=&quot;Client evicted frequently due to lock callback timer expiration&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1496&quot;&gt;&lt;strike&gt;LU-1496&lt;/strike&gt;&lt;/a&gt; ptlrpc: prolong rw locks even IO RPCs are finished&lt;br/&gt;
747d905 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1115&quot; title=&quot;software raid6 related BUG in fs/bio.c:222 when raid chunk &amp;gt; 64k&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1115&quot;&gt;&lt;strike&gt;LU-1115&lt;/strike&gt;&lt;/a&gt; kernel: software raid6 related BUG&lt;br/&gt;
01811d4 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-359&quot; title=&quot;Confused error message after write failure&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-359&quot;&gt;&lt;strike&gt;LU-359&lt;/strike&gt;&lt;/a&gt; llite: no close error if application has known failure&lt;br/&gt;
944d1c1 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1488&quot; title=&quot;2.1.2 servers, 1.8.8 clients _mdc_blocking_ast()) ### data mismatch with ino&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1488&quot;&gt;&lt;strike&gt;LU-1488&lt;/strike&gt;&lt;/a&gt; mdc: fix fid_res_name_eq() issue.&lt;br/&gt;
8254103 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1511&quot; title=&quot;Kernel update [RHEL5.8 2.6.18-308.11.1.el5]&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1511&quot;&gt;&lt;strike&gt;LU-1511&lt;/strike&gt;&lt;/a&gt; kernel: kernel update [RHEL5.8 2.6.18-308.11.1.el5]&lt;br/&gt;
048c04b &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1563&quot; title=&quot;lustre_quota.h:326:lqs_putref() LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1563&quot;&gt;&lt;strike&gt;LU-1563&lt;/strike&gt;&lt;/a&gt; quota: Put lqs properly in quota_pending_commit()&lt;br/&gt;
e53e756 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1535&quot; title=&quot;LustreError: 1843:0:(mds_open.c:1645:mds_close()) &quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1535&quot;&gt;&lt;strike&gt;LU-1535&lt;/strike&gt;&lt;/a&gt; ldlm: backport fix for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1128&quot; title=&quot;Complete investigation of the LDLM pool shrinker and SLV handling&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1128&quot;&gt;&lt;strike&gt;LU-1128&lt;/strike&gt;&lt;/a&gt;&lt;br/&gt;
3a4f224 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1459&quot; title=&quot;Disabling OSC in file system causes multiple issues&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1459&quot;&gt;&lt;strike&gt;LU-1459&lt;/strike&gt;&lt;/a&gt; llite: Don&amp;#39;t LBUG when import has LUSTRE_IMP_NEW state&lt;br/&gt;
0152977 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1459&quot; title=&quot;Disabling OSC in file system causes multiple issues&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1459&quot;&gt;&lt;strike&gt;LU-1459&lt;/strike&gt;&lt;/a&gt; llite: Don&amp;#39;t use unitialized variable&lt;br/&gt;
65b7e5a &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1448&quot; title=&quot;Disabled OSC can cause NULL pointer dereference when reading import&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1448&quot;&gt;&lt;strike&gt;LU-1448&lt;/strike&gt;&lt;/a&gt; llite: Prevent NULL pointer dereference on disabled OSC&lt;br/&gt;
bd671c0 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1438&quot; title=&quot;quota_chk_acq_common() still haven&amp;#39;t managed to acquire quota&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1438&quot;&gt;&lt;strike&gt;LU-1438&lt;/strike&gt;&lt;/a&gt; quota: quota active checking is missed on slave&lt;br/&gt;
423bfd1 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1438&quot; title=&quot;quota_chk_acq_common() still haven&amp;#39;t managed to acquire quota&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1438&quot;&gt;&lt;strike&gt;LU-1438&lt;/strike&gt;&lt;/a&gt; quota: fix race in quota_chk_acq_common()&lt;br/&gt;
e92a9dd &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-814&quot; title=&quot;automate NFSv3/v4 over Lustre Testing&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-814&quot;&gt;&lt;strike&gt;LU-814&lt;/strike&gt;&lt;/a&gt; tests: remove leading spaces from $WRITE_DISJOINT&lt;br/&gt;
bc88c4c &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-121&quot; title=&quot;yaml.sh and test-framework sometimes attaches the full qualified node name to its output files and sometimes just the first element.&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-121&quot;&gt;&lt;strike&gt;LU-121&lt;/strike&gt;&lt;/a&gt; test: Change framework to only use the short hostname.&lt;br/&gt;
294b409 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-458&quot; title=&quot;silence excess 1.8 error messages&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-458&quot;&gt;&lt;strike&gt;LU-458&lt;/strike&gt;&lt;/a&gt; debug: print client profile name correctly&lt;br/&gt;
7ef90f4 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1424&quot; title=&quot;Kernel update [RHEL6.2 2.6.32-220.17.1.el6]&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1424&quot;&gt;&lt;strike&gt;LU-1424&lt;/strike&gt;&lt;/a&gt; kernel: Kernel update [RHEL6.2 2.6.32-220.17.1.el6]&lt;br/&gt;
48c2f66 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-458&quot; title=&quot;silence excess 1.8 error messages&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-458&quot;&gt;&lt;strike&gt;LU-458&lt;/strike&gt;&lt;/a&gt; debug: use profilenm before running class_del_profile()&lt;br/&gt;
fe92ca6 &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-425&quot; title=&quot;SANITYN sanity_mount_check_clients fails if $MOUNT is a substring of $MOUNT2&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-425&quot;&gt;&lt;strike&gt;LU-425&lt;/strike&gt;&lt;/a&gt; tests: fix the issue of using &amp;quot;grep -w&amp;quot; &lt;br/&gt;
dd8037d &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1340&quot; title=&quot;1.8.8-wc1&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1340&quot;&gt;&lt;strike&gt;LU-1340&lt;/strike&gt;&lt;/a&gt; release: get ready for 1.8.8-wc1 RC1&lt;br/&gt;
{noformat}&lt;br/&gt;
</environment>
        <key id="20625">LU-3839</key>
            <summary>Incorrect file system usage on Lustre Quota</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="6">Not a Bug</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="ihara">Shuichi Ihara</reporter>
                        <labels>
                    </labels>
                <created>Tue, 27 Aug 2013 08:03:29 +0000</created>
                <updated>Thu, 6 Feb 2014 19:27:36 +0000</updated>
                            <resolved>Thu, 6 Feb 2014 19:27:36 +0000</resolved>
                                    <version>Lustre 1.8.8</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="65139" author="ihara" created="Tue, 27 Aug 2013 08:04:57 +0000"  >&lt;p&gt;All log files of OSS/MDS&lt;/p&gt;</comment>
                            <comment id="65140" author="ihara" created="Tue, 27 Aug 2013 08:07:07 +0000"  >&lt;p&gt;There are a lot fo following error messages on MDS.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Aug 23 10:01:32 nmd031i kernel: LustreError: 10005:0:(fsfilt-ldiskfs.c:2243:fsfilt_ldiskfs_dquot()) operate dquot before it&apos;s enabled!
Aug 23 10:01:32 nmd031i kernel: LustreError: 10005:0:(quota_master.c:219:lustre_dqget()) can&apos;t read dquot from admin quotafile! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 10005:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 10009:0:(fsfilt-ldiskfs.c:2243:fsfilt_ldiskfs_dquot()) operate dquot before it&apos;s enabled!
Aug 23 10:01:32 nmd031i kernel: LustreError: 10009:0:(quota_master.c:219:lustre_dqget()) can&apos;t read dquot from admin quotafile! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 10009:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 10029:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 9949:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 10003:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 9967:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 10042:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 9936:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 6081:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 9960:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 9924:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:32 nmd031i kernel: LustreError: 9994:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 9981:0:(fsfilt-ldiskfs.c:2243:fsfilt_ldiskfs_dquot()) operate dquot before it&apos;s enabled!
Aug 23 10:01:36 nmd031i kernel: LustreError: 9981:0:(fsfilt-ldiskfs.c:2243:fsfilt_ldiskfs_dquot()) Skipped 10 previous similar messages
Aug 23 10:01:36 nmd031i kernel: LustreError: 9981:0:(quota_master.c:219:lustre_dqget()) can&apos;t read dquot from admin quotafile! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 9981:0:(quota_master.c:219:lustre_dqget()) Skipped 10 previous similar messages
Aug 23 10:01:36 nmd031i kernel: LustreError: 9981:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 10030:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 9929:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 9939:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 10014:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 10000:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 9985:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
Aug 23 10:01:36 nmd031i kernel: LustreError: 9923:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
....
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="65141" author="niu" created="Tue, 27 Aug 2013 08:55:52 +0000"  >&lt;p&gt;I have few questions:&lt;/p&gt;

&lt;p&gt;1. What&apos;s the source of these error messages? qutoacheck? It looks like the quota wasn&apos;t enabled.&lt;br/&gt;
2. Are the numbers in file_size_20130821.txt in byte?&lt;/p&gt;

&lt;p&gt;If there is really such a huge gap between quota usage &amp;amp; file size, I suspect there could be some orphan objects belong to the user, could you try &quot;lfs quota -v&quot; to see the detailed quota usage?&lt;/p&gt;</comment>
                            <comment id="65142" author="ihara" created="Tue, 27 Aug 2013 09:52:38 +0000"  >&lt;blockquote&gt;
&lt;p&gt;1. What&apos;s the source of these error messages? qutoacheck? It looks like the quota wasn&apos;t enabled.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Always, messages are showing up on the normal operaiton even right now. The quota has been enabling.. we didn&apos;t disable it.&lt;br/&gt;
we collected debug information when it ran &apos;lfs quotacheck&apos;. I will post it soon.&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;2. Are the numbers in file_size_20130821.txt in byte?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Yes, this is an summary of &apos;ls&apos; output.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;If there is really such a huge gap between quota usage &amp;amp; file size, I suspect there could be some orphan objects belong to the user, could you try &quot;lfs quota -v&quot; to see the detailed quota usage?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;sure. will get them and post here.&lt;/p&gt;</comment>
                            <comment id="65143" author="ihara" created="Tue, 27 Aug 2013 09:57:18 +0000"  >&lt;p&gt;this is debug output during it ran &apos;lfs quotacheck&apos; and &apos;lfs quota -u xxx&apos; command.&lt;/p&gt;</comment>
                            <comment id="65220" author="mnishizawa" created="Wed, 28 Aug 2013 00:18:37 +0000"  >&lt;p&gt;Hi, here is output of &quot;lfs quota -u kawashin -v /nshare2&quot;.  &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@wk2 ~]# lfs quota -u kawashin -v /nshare2
Disk quotas for user kawashin (uid 14520):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
       /nshare2 10904878092       0       0       -   14157       0       0       -
nshare2-MDT0000_UUID
                   2736       -       0       -   14157       -       0       -
nshare2-OST0000_UUID
                889661136       -       0       -       -       -       -       -
nshare2-OST0001_UUID
                1598348       -       0       -       -       -       -       -
nshare2-OST0002_UUID
                 939816       -       0       -       -       -       -       -
nshare2-OST0003_UUID
                2554020       -       0       -       -       -       -       -
nshare2-OST0004_UUID
                3709408       -       0       -       -       -       -       -
nshare2-OST0005_UUID
                907519088       -       0       -       -       -       -       -
nshare2-OST0006_UUID
                908373712       -       0       -       -       -       -       -
nshare2-OST0007_UUID
                905286696       -       0       -       -       -       -       -
nshare2-OST0008_UUID
                1015128       -       0       -       -       -       -       -
nshare2-OST0009_UUID
                 578096       -       0       -       -       -       -       -
nshare2-OST000a_UUID
                1666080       -       0       -       -       -       -       -
nshare2-OST000b_UUID
                1009400       -       0       -       -       -       -       -
nshare2-OST000c_UUID
                917450860       -       0       -       -       -       -       -
nshare2-OST000d_UUID
                902701472       -       0       -       -       -       -       -
nshare2-OST000e_UUID
                2146448       -       0       -       -       -       -       -
nshare2-OST000f_UUID
                4541320       -       0       -       -       -       -       -
nshare2-OST0010_UUID
                 594752       -       0       -       -       -       -       -
nshare2-OST0011_UUID
                1957092       -       0       -       -       -       -       -
nshare2-OST0012_UUID
                911761044       -       0       -       -       -       -       -
nshare2-OST0013_UUID
                1382416       -       0       -       -       -       -       -
nshare2-OST0014_UUID
                898266076       -       0       -       -       -       -       -
nshare2-OST0015_UUID
                1392196       -       0       -       -       -       -       -
nshare2-OST0016_UUID
                1201028       -       0       -       -       -       -       -
nshare2-OST0017_UUID
                 690192       -       0       -       -       -       -       -
nshare2-OST0018_UUID
                1400884       -       0       -       -       -       -       -
nshare2-OST0019_UUID
                 909708       -       0       -       -       -       -       -
nshare2-OST001a_UUID
                897309256       -       0       -       -       -       -       -
nshare2-OST001b_UUID
                890611760       -       0       -       -       -       -       -
nshare2-OST001c_UUID
                1325556       -       0       -       -       -       -       -
nshare2-OST001d_UUID
                 808944       -       0       -       -       -       -       -
nshare2-OST001e_UUID
                 667016       -       0       -       -       -       -       -
nshare2-OST001f_UUID
                 558356       -       0       -       -       -       -       -
nshare2-OST0020_UUID
                1013192       -       0       -       -       -       -       -
nshare2-OST0021_UUID
                1164864       -       0       -       -       -       -       -
nshare2-OST0022_UUID
                4285100       -       0       -       -       -       -       -
nshare2-OST0023_UUID
                 945804       -       0       -       -       -       -       -
nshare2-OST0024_UUID
                1489132       -       0       -       -       -       -       -
nshare2-OST0025_UUID
                 991472       -       0       -       -       -       -       -
nshare2-OST0026_UUID
                 946508       -       0       -       -       -       -       -
nshare2-OST0027_UUID
                917889588       -       0       -       -       -       -       -
nshare2-OST0028_UUID
                1156456       -       0       -       -       -       -       -
nshare2-OST0029_UUID
                913405936       -       0       -       -       -       -       -
[root@wk2 ~]# 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="65224" author="niu" created="Wed, 28 Aug 2013 03:19:03 +0000"  >&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;00002000:00000001:11:1377565151.521295:0:9987:0:(fsfilt-ldiskfs.c:2240:fsfilt_ldiskfs_dquot()) &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; entered
00002000:00020000:11:1377565151.521295:0:9987:0:(fsfilt-ldiskfs.c:2243:fsfilt_ldiskfs_dquot()) operate dquot before it&apos;s enabled!
00002000:00000001:11:1377565151.521296:0:9987:0:(fsfilt-ldiskfs.c:2244:fsfilt_ldiskfs_dquot()) &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; leaving (rc=18446744073709551611 : -5 : fffffffffffffffb)
00040000:00020000:11:1377565151.521297:0:9987:0:(quota_master.c:219:lustre_dqget()) can&apos;t read dquot from admin quotafile! (rc:-5)
00040000:00000001:11:1377565151.521297:0:9987:0:(quota_master.c:180:lustre_dqput()) &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; entered
00040000:00000001:11:1377565151.521298:0:9987:0:(quota_master.c:189:lustre_dqput()) &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; leaving
00040000:00000001:11:1377565151.521299:0:9987:0:(quota_master.c:221:lustre_dqget()) &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; leaving (rc=18446744073709551611 : -5 : fffffffffffffffb)
00040000:00000001:11:1377565151.521300:0:9987:0:(quota_master.c:356:dqacq_handler()) &lt;span class=&quot;code-object&quot;&gt;Process&lt;/span&gt; leaving (rc=18446744073709551611 : -5 : fffffffffffffffb)
00010000:00020000:11:1377565151.521301:0:9987:0:(ldlm_lib.c:2123:target_handle_dqacq_callback()) dqacq failed! (rc:-5)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;The log shows that quota wasn&apos;t enabled on MDS, so I suppose that quotacheck didn&apos;t actually doing anything. I didn&apos;t see anything related to quotacheck in the log neither, could you re-run quotacheck to see if it can enable quota properly? If it can enable quota properly, please check if the quota usage is fixed after quotacheck; If it can&apos;t, please enable D_QUOTA &amp;amp; D_TRACE and collect the log while running &apos;lfs quotacheck&apos;(hope we can find out why quotacheck failed from the log). Thanks a lot.&lt;/p&gt;</comment>
                            <comment id="65580" author="mnishizawa" created="Tue, 3 Sep 2013 08:33:27 +0000"  >&lt;p&gt;debug log with D_QUOTA &amp;amp; D_TRACE.  &lt;/p&gt;</comment>
                            <comment id="65581" author="mnishizawa" created="Tue, 3 Sep 2013 08:34:56 +0000"  >&lt;p&gt;Hi, we have received a debug log with D_QUOTA &amp;amp; D_TRACE enabled and captured after quotacheck.  Thanks, &lt;/p&gt;</comment>
                            <comment id="65597" author="niu" created="Tue, 3 Sep 2013 13:43:27 +0000"  >&lt;p&gt;Thank you, Mitsuhiro. I didn&apos;t see anything abnormal in the log, did the quotacheck fixed the problem (incorrect usage), and did the quotacheck enable quota successfully? If it didn&apos;t fix the problem, could you try following commands to capture the log (I didn&apos;t see anything related to quotacheck in this log)?&lt;/p&gt;

&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;Enable D_QUOTA &amp;amp; D_TRACE on both MDS and all OSS by &quot;lctl set_param debug=&lt;ins&gt;quota; lctl set_param debug&lt;/ins&gt;=trace&quot;;&lt;/li&gt;
	&lt;li&gt;Clear debug log buffer on both MDS &amp;amp; OSS by &quot;lctl clear&quot;;&lt;/li&gt;
	&lt;li&gt;Start debug_daemon on both MDS &amp;amp; OSS by &quot;lctl debug_daemon start $tmp_filename 500&quot;;&lt;/li&gt;
	&lt;li&gt;Run quotacheck on client &quot;lfs quotacheck -ug&quot;;&lt;/li&gt;
	&lt;li&gt;After quotacheck done, stop debug daemon on MDS &amp;amp; OSS by &quot;lctl debug_daemon stop&quot;;&lt;/li&gt;
	&lt;li&gt;Convert debug file into text file by &quot;lctl debug_file $tmp_filename $log_filename&quot;;&lt;/li&gt;
	&lt;li&gt;Collect all the log files ($log_filename) and attach them here;&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="65690" author="mnishizawa" created="Wed, 4 Sep 2013 05:47:22 +0000"  >&lt;p&gt;Hi Niu, we have captured the log again, but the log this time may not contain full quotachek output as well since it only contains logs in 19 seconds.  &lt;br/&gt;
Please kindly check the log and let us know if anything to be noticed.  &lt;/p&gt;

&lt;p&gt;We put the log in the following location as the size is 43MB.  &lt;br/&gt;
&lt;a href=&quot;https://woscloud.corp.ddn.com/v2/files/ZjAxZTk1YjlkZWI0ZDE0ZGEzZTExNDI1MWM5NGRiZTZi/content/inline/lctl_debug_20130904.out.gz&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://woscloud.corp.ddn.com/v2/files/ZjAxZTk1YjlkZWI0ZDE0ZGEzZTExNDI1MWM5NGRiZTZi/content/inline/lctl_debug_20130904.out.gz&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="66319" author="niu" created="Wed, 11 Sep 2013 07:30:32 +0000"  >&lt;p&gt;Hi, Mitsuhiro&lt;/p&gt;

&lt;p&gt;Seems there isn&apos;t any error message in the log, did the quotacheck turned quota on? (when you run &apos;lfs quota -u xxx&apos;, if there any error message on MDS/OSS?) Did the incorrect usage problem fixed?&lt;/p&gt;</comment>
                            <comment id="66323" author="mnishizawa" created="Wed, 11 Sep 2013 09:56:28 +0000"  >&lt;p&gt;Hi Niu, &lt;br/&gt;
Quota should be on, but we do not see any error either on MDS/OSS when issuing &apos;lfs quota&apos; (occasionally, &quot;still haven&apos;t managed to acquire quota space...&quot; is output).  &lt;br/&gt;
Incorrect usage has not been fixed.  Here is current outout.  &lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@wk2 ~&amp;#93;&lt;/span&gt;# lfs quota -u kawashin /nshare2&lt;br/&gt;
Disk quotas for user kawashin (uid 14520):&lt;br/&gt;
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace&lt;br/&gt;
       /nshare2 21433982796       0       0       -   14157       0       0       -&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@wk2 ~&amp;#93;&lt;/span&gt;# &lt;/p&gt;

&lt;p&gt;Were you able to find the log when quotacheck was issued?  Should we capture it again using bigger size of file?  &lt;/p&gt;</comment>
                            <comment id="66460" author="ihara" created="Thu, 12 Sep 2013 07:24:33 +0000"  >&lt;p&gt;Hi Niu,&lt;/p&gt;

&lt;p&gt;As Mitsuhiro mentioned, the quota is enabled and this problem is not fixed yet. Still incorrect quota size are visibile on &quot;lfs quota&quot; command.&lt;br/&gt;
As one of posibility, this might be similar issue with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt;? Please advise.&lt;/p&gt;</comment>
                            <comment id="66461" author="niu" created="Thu, 12 Sep 2013 07:39:17 +0000"  >&lt;blockquote&gt;
&lt;p&gt;As Mitsuhiro mentioned, the quota is enabled and this problem is not fixed yet. Still incorrect quota size are visibile on &quot;lfs quota&quot; command.&lt;br/&gt;
As one of posibility, this might be similar issue with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt;? Please advise.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Yes, it&apos;s possible that there are orphan objects leaked if quotacheck doesn&apos;t help. Could you follow the instructions in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt; to remove those orphans? Thanks.&lt;/p&gt;</comment>
                            <comment id="66463" author="ihara" created="Thu, 12 Sep 2013 07:49:15 +0000"  >&lt;p&gt;OK, but it seems we need to stop the lustre.&lt;br/&gt;
we want to check whether if the PENDING direcotry exists or not without stop the Lustre. Can we do that? Any ideas?&lt;br/&gt;
Then, if we confirm this is same issue of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt;, we will go stop the Lustre to fix this.&lt;/p&gt;</comment>
                            <comment id="66473" author="niu" created="Thu, 12 Sep 2013 09:25:28 +0000"  >&lt;p&gt;PENDING directory is always created by lustre once the OST/MDT mount, I&apos;m not sure if there is a good way to check PENDING directory online. (lfsck can check orphan object, but it will be very slow).&lt;/p&gt;

&lt;p&gt;I think you can choose one OST which has most significant quota usage (seen from lfs quota -v) to stop, and see if there are orphans objects belong to the user in PENDING.&lt;/p&gt;</comment>
                            <comment id="66581" author="mnishizawa" created="Fri, 13 Sep 2013 12:52:37 +0000"  >&lt;p&gt;Hi Niu,  &lt;br/&gt;
So the only way to check if there are orphaned object is to mount MDT/OST as ldiskfs and see if there is PENDING* directory.  &lt;br/&gt;
When we found the directory, what we should do?  &lt;br/&gt;
&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt; states &apos;&quot;back to namespace&quot;+unlink&apos; worked, but what &quot;back to namespace&quot; means specifically? unlink is just remove the files by &apos;rm&apos; or &apos;unlink&apos; command?  &lt;/p&gt;</comment>
                            <comment id="66697" author="niu" created="Mon, 16 Sep 2013 02:23:26 +0000"  >&lt;p&gt;PENDING directory is created on MDS to save the open-unlinked files, if you never renamed the PENDING directory (mentioned in &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt;), there should be only one PENDING directory on MDS. (sorry, my previous comment is not quite right and misleanding)&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt; states &apos;&quot;back to namespace&quot;+unlink&apos; worked, but what &quot;back to namespace&quot; means specifically? unlink is just remove the files by &apos;rm&apos; or &apos;unlink&apos; command?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;You need to mount MDT as ldiskfs, and check if there are lots of files under PENDING directory, then check if the file owner is the uid which have incorrect quota usage, if there are such files, you need to do &quot;back to namespace&quot; + &quot;unlink&quot;:&lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;Move the files from PENDING to ROOT directory (back to namespace);&lt;/li&gt;
	&lt;li&gt;Mount as Lustre, and unlink those files from Lustre client (unlink);&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="66909" author="mnishizawa" created="Wed, 18 Sep 2013 11:59:18 +0000"  >&lt;p&gt;Many thanks, Niu! Can you clarify on the following?  We want to be doubly sure as there is limited chance / time to do a maintenance.  &lt;/p&gt;

&lt;p&gt;&amp;gt; Move the files from PENDING to ROOT directory (back to namespace);&lt;br/&gt;
to ROOT directory of ldiskfs, correct?  &lt;/p&gt;

&lt;p&gt;&amp;gt; Mount as Lustre, and unlink those files from Lustre client (unlink);&lt;br/&gt;
Can we know the path and file name from what we can see under PENDING directory?  &lt;br/&gt;
(we are not sure how files exist under PENDING directory though...file/FID?) &lt;/p&gt;</comment>
                            <comment id="66927" author="niu" created="Wed, 18 Sep 2013 16:15:07 +0000"  >&lt;p&gt;Yes, move to the ROOT directory of ldiskfs. You can see that all filename under PENDING directory are composed by i_no:i_generation, once you move those files into ROOT, you can see them on lustre client. &lt;/p&gt;</comment>
                            <comment id="66983" author="mnishizawa" created="Thu, 19 Sep 2013 01:52:38 +0000"  >&lt;p&gt;Niu, how can a file look like after we put it back to the namespace?  &lt;br/&gt;
If I am understanding correctly, that file should be somewhere in lustre file system and we need to search it from i_no/i_generation or file name.  &lt;br/&gt;
Can we distinguish it easily from other files which should not (never) be unlinked?  i_no and file name is the only hint?  &lt;br/&gt;
e.g. &apos;ls -l&apos; show &apos;?&apos; like a case where inode entry exist in MDT, but no object on OST... Many thanks, &lt;/p&gt;</comment>
                            <comment id="66985" author="niu" created="Thu, 19 Sep 2013 02:22:19 +0000"  >&lt;p&gt;You could create a directory &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3839&quot; title=&quot;Incorrect file system usage on Lustre Quota&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3839&quot;&gt;&lt;del&gt;LU-3839&lt;/del&gt;&lt;/a&gt;&quot; in lustre root (then you&apos;ll see this directory in ROOT once you mount it as ldiskfs), and move all files in PENDING into &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3839&quot; title=&quot;Incorrect file system usage on Lustre Quota&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3839&quot;&gt;&lt;del&gt;LU-3839&lt;/del&gt;&lt;/a&gt; directory. When you mount lustre back, you can find all files under the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3839&quot; title=&quot;Incorrect file system usage on Lustre Quota&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3839&quot;&gt;&lt;del&gt;LU-3839&lt;/del&gt;&lt;/a&gt; directory easily.&lt;/p&gt;</comment>
                            <comment id="66986" author="mnishizawa" created="Thu, 19 Sep 2013 02:27:33 +0000"  >&lt;p&gt;Understood well.  Many thanks!&lt;/p&gt;</comment>
                            <comment id="67099" author="mnishizawa" created="Fri, 20 Sep 2013 11:31:17 +0000"  >&lt;p&gt; 1. What can be a cause (or trigger) of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt; issue?  It looks like no patch is available for this issue.  Will this issue be fixed in lustre code?  &lt;br/&gt;
 2. The customer need to stop their service to confirm if they are really affected by the issue of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt; and, if so, to fix the issue.  They are concerned about a chance that there is no file under PENDING directory and our investigation does not proceed even if they stop their service.  Their expectation is that we should at lease identify the cause of incorrect quota report and confirm required action to fix it.  Is there anything we can do to fix the issue when files under PENDING are not culprit?  &lt;/p&gt;</comment>
                            <comment id="67198" author="niu" created="Sun, 22 Sep 2013 01:52:32 +0000"  >&lt;blockquote&gt;
&lt;p&gt;1. What can be a cause (or trigger) of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt; issue? It looks like no patch is available for this issue. Will this issue be fixed in lustre code? &lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;When client failure happens while unlinking a file, orphan could probably be generated on MDS or OST, these orphan should be cleared automatically when restart MDS/OST, but such orphan cleanup could fail as well. For &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt;, I think it caused by user renamed PENDING directory manually (to workaround other problems, see &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-601&quot; title=&quot;kernel BUG at fs/jbd2/transaction.c:1030&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-601&quot;&gt;&lt;del&gt;LU-601&lt;/del&gt;&lt;/a&gt;).&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;2. The customer need to stop their service to confirm if they are really affected by the issue of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt; and, if so, to fix the issue. They are concerned about a chance that there is no file under PENDING directory and our investigation does not proceed even if they stop their service. Their expectation is that we should at lease identify the cause of incorrect quota report and confirm required action to fix it. Is there anything we can do to fix the issue when files under PENDING are not culprit?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Is the inode usage for the user correct? If the inode usage isn&apos;t correct as well, I highly suspect that there are orphans in PENDING dir. If the inode usage is correct, probably there are orphan on OSTs. (Given that quotacheck has been done successfully). &lt;/p&gt;</comment>
                            <comment id="67332" author="mnishizawa" created="Tue, 24 Sep 2013 10:56:18 +0000"  >&lt;p&gt;inode usage is a bit incorrect.  From the list of user files provided when we created this ticket, number of files are 14145 and quota was showing 14157.  &lt;br/&gt;
$ wc file_size_20130821.txt &lt;br/&gt;
   14145  127380 1875155 file_size_20130821.txt&lt;/p&gt;

&lt;p&gt;On the other hands, disk usage had a big different... &lt;br/&gt;
BTW, I found at this late date that &apos;lfs quota&apos; in the description showed 7TB usage, but &apos;lfs quota -v&apos; output above showed 10TB usage while inode count had not changed.  &lt;br/&gt;
...I checked the current &apos;lfs quota&apos; output and noticed the disk usage shows correct value.. &lt;br/&gt;
It is not clear when this occurred.  Do you have any idea why this occurred?  We will attach recent messages files.  &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@wk2 ~]# lfs quota -u kawashin /nshare2
Disk quotas for user kawashin (uid 14520):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
       /nshare2 65116548       0       0       -   14145       0       0       -

[root@wk2 ~]# lfs quota -v -u kawashin /nshare2
Disk quotas for user kawashin (uid 14520):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
       /nshare2 65116548       0       0       -   14145       0       0       -
nshare2-MDT0000_UUID
                   2736       -       0       -   14145       -       0       -
nshare2-OST0000_UUID
                 933072       -       0       -       -       -       -       -
nshare2-OST0001_UUID
                1598348       -       0       -       -       -       -       -
nshare2-OST0002_UUID
                 939816       -       0       -       -       -       -       -
nshare2-OST0003_UUID
                2554020       -       0       -       -       -       -       -
nshare2-OST0004_UUID
                3709408       -       0       -       -       -       -       -
nshare2-OST0005_UUID
                1626504       -       0       -       -       -       -       -
nshare2-OST0006_UUID
                1552952       -       0       -       -       -       -       -
nshare2-OST0007_UUID
                 938512       -       0       -       -       -       -       -
nshare2-OST0008_UUID
                1015128       -       0       -       -       -       -       -
nshare2-OST0009_UUID
                 578096       -       0       -       -       -       -       -
nshare2-OST000a_UUID
                1666080       -       0       -       -       -       -       -
nshare2-OST000b_UUID
                1009400       -       0       -       -       -       -       -
nshare2-OST000c_UUID
                1113732       -       0       -       -       -       -       -
nshare2-OST000d_UUID
                1607584       -       0       -       -       -       -       -
nshare2-OST000e_UUID
                2146448       -       0       -       -       -       -       -
nshare2-OST000f_UUID
                4541320       -       0       -       -       -       -       -
nshare2-OST0010_UUID
                 594752       -       0       -       -       -       -       -
nshare2-OST0011_UUID
                1957092       -       0       -       -       -       -       -
nshare2-OST0012_UUID
                1275508       -       0       -       -       -       -       -
nshare2-OST0013_UUID
                1382416       -       0       -       -       -       -       -
nshare2-OST0014_UUID
                3053048       -       0       -       -       -       -       -
nshare2-OST0015_UUID
                1392196       -       0       -       -       -       -       -
nshare2-OST0016_UUID
                1201028       -       0       -       -       -       -       -
nshare2-OST0017_UUID
                 690192       -       0       -       -       -       -       -
nshare2-OST0018_UUID
                1400884       -       0       -       -       -       -       -
nshare2-OST0019_UUID
                 909708       -       0       -       -       -       -       -
nshare2-OST001a_UUID
                2505724       -       0       -       -       -       -       -
nshare2-OST001b_UUID
                1033740       -       0       -       -       -       -       -
nshare2-OST001c_UUID
                1325556       -       0       -       -       -       -       -
nshare2-OST001d_UUID
                 808944       -       0       -       -       -       -       -
nshare2-OST001e_UUID
                 667016       -       0       -       -       -       -       -
nshare2-OST001f_UUID
                 558356       -       0       -       -       -       -       -
nshare2-OST0020_UUID
                1013192       -       0       -       -       -       -       -
nshare2-OST0021_UUID
                1164864       -       0       -       -       -       -       -
nshare2-OST0022_UUID
                4285100       -       0       -       -       -       -       -
nshare2-OST0023_UUID
                 945804       -       0       -       -       -       -       -
nshare2-OST0024_UUID
                1489132       -       0       -       -       -       -       -
nshare2-OST0025_UUID
                 991472       -       0       -       -       -       -       -
nshare2-OST0026_UUID
                 946508       -       0       -       -       -       -       -
nshare2-OST0027_UUID
                1310688       -       0       -       -       -       -       -
nshare2-OST0028_UUID
                1156456       -       0       -       -       -       -       -
nshare2-OST0029_UUID
                3524016       -       0       -       -       -       -       -
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="67333" author="mnishizawa" created="Tue, 24 Sep 2013 10:57:30 +0000"  >&lt;p&gt;messages files from MDS/OSS on 20130924&lt;/p&gt;</comment>
                            <comment id="67424" author="johann" created="Tue, 24 Sep 2013 18:46:42 +0000"  >&lt;p&gt;I agree with Niu, you likely have open-unlinked files in the PENDING directory that haven&apos;t been cleaned up yet.&lt;br/&gt;
As for the &quot;inconsistence&quot; between lfs quota and lfs quota -v, i am not sure how you ended up with 10TB:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;$ bc -l
bc 1.06
Copyright 1991-1994, 1997, 1998, 2000 Free Software Foundation, Inc.
This is free software with ABSOLUTELY NO WARRANTY.
For details type `warranty&apos;. 
2736+933072+1598348+939816+2554020+3709408+1626504+1552952+938512+1015128+578096+1666080+1009400+1113732+1607584+2146448+4541320+594752+1957092+1275508+1382416+3053048+1392196+1201028+690192+1400884+909708+2505724+1033740+1325556+808944+667016+558356+1013192+1164864+4285100+945804+1489132+991472+946508+1310688+1156456+3524016
65116548
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;For me, the two results perfectly match.&lt;/p&gt;</comment>
                            <comment id="67493" author="mnishizawa" created="Wed, 25 Sep 2013 00:22:58 +0000"  >&lt;p&gt;Why PENDING directory had not been cleaned up so long time?  The customer was seeing incorrect quota usage (as much as 7TB) for at least two weeks.  What triggered cleaned up?  &lt;br/&gt;
They are using quota usage to know how much space each users are using.  If this occur in normal operation, they cannot trust quota output at all.  &lt;/p&gt;</comment>
                            <comment id="67599" author="johann" created="Wed, 25 Sep 2013 19:36:08 +0000"  >&lt;p&gt;As mentioned earlier by Niu, if the PENDING directory was renamed manually, then files present in this directory have never been cleaned up.&lt;br/&gt;
Could you please check with debugfs that:&lt;br/&gt;
1. the PENDING directory has indeed been renamed &amp;amp; recreated&lt;br/&gt;
2. there are indeed files under this directory.&lt;/p&gt;

&lt;p&gt;If so and if the fix for &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-601&quot; title=&quot;kernel BUG at fs/jbd2/transaction.c:1030&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-601&quot;&gt;&lt;del&gt;LU-601&lt;/del&gt;&lt;/a&gt; is applied, i would advise to shut down the MDT, move the open-unlinked files of the renamed PENDING dir to the real PENDING directory and restart the MDT.&lt;/p&gt;</comment>
                            <comment id="67659" author="mnishizawa" created="Thu, 26 Sep 2013 10:08:10 +0000"  >&lt;p&gt;1. the PENDING directory has indeed been renamed &amp;amp; recreated&lt;br/&gt;
The customer would never do this, we think.  How can we check this with debugfs?  &lt;/p&gt;

&lt;p&gt;2. there are indeed files under this directory.&lt;br/&gt;
Can this be checked with debugfs while MDT is servicing as lustre?  Niu said we cannot.  &lt;/p&gt;

&lt;p&gt;What the customer is concerned mostly is if the current quota output is really correct.  &lt;br/&gt;
Can we say quota output is correct when there is no file under PENDING directory (currently, quota usage for the user looks correct apparently)?  &lt;br/&gt;
We see many log like below.  What does this mean?  Is this not related to incorrect usage problem?  (if we should create a new ticket, please let us know)&lt;br/&gt;
Sep 23 22:18:20 nos071i kernel: Lustre: 13850:0:(quota_interface.c:491:quota_chk_acq_common()) still haven&apos;t managed to acquire quota space from the quota master after 10 retries (err=0, rc=0)&lt;/p&gt;</comment>
                            <comment id="67737" author="johann" created="Thu, 26 Sep 2013 17:50:02 +0000"  >&lt;blockquote&gt;
&lt;p&gt;The customer would never do this, we think.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Niu thought that such an action might have been done to address &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-860&quot; title=&quot;Lustre quota inconsistencies after multiple usages of LU-601 work-around&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-860&quot;&gt;&lt;del&gt;LU-860&lt;/del&gt;&lt;/a&gt;. Is it plausible?&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;How can we check this with debugfs?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;1. run &quot;debugfs $device_path&quot;&lt;br/&gt;
2. &quot;ls&quot; to check how many PENDING directories we have&lt;br/&gt;
3. &quot;ls&quot; against all PENDING* directories to check if we have any files in there&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Can this be checked with debugfs while MDT is servicing as lustre? Niu said we cannot.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Yes, you can run debugfs in read-only mode.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;What the customer is concerned mostly is if the current quota output is really correct. &lt;br/&gt;
Can we say quota output is correct when there is no file under PENDING directory (currently, quota usage for the user looks correct apparently)? &lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Since you successfully ran quotacheck, accounting is very likely correct.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;We see many log like below. What does this mean? Is this not related to incorrect usage problem? (if we should create a new ticket, please let us know)&lt;br/&gt;
Sep 23 22:18:20 nos071i kernel: Lustre: 13850:0:(quota_interface.c:491:quota_chk_acq_common()) still haven&apos;t managed to acquire quota space from the quota master after 10 retries (err=0, rc=0)&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;I don&apos;t think it is related to a potential incorrect accounting. It means that it takes several iterations before acquiring space from the master, probably because of contention due to many threads trying to get quota space for the same ID. The &quot;cycle&quot; value seems to be always 10, so it means that the thread finally got space and those messages should be harmless.&lt;/p&gt;</comment>
                            <comment id="68121" author="mnishizawa" created="Wed, 2 Oct 2013 03:18:02 +0000"  >&lt;p&gt;Hi, we checked the PENDING directory and many files under it.  The timestamps of the files are quit old, the oldest is Feb and we can see many files modified in Aug.  &lt;br/&gt;
Currently, we do not see files owned by &quot;kawashin&quot; user (ID: 14520).  &lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;debugfs:  ls -l
 975747585   40777 (2)      0      0   77824  2-Oct-2013 10:13 .
      2   40755 (2)      0      0    4096  7-Aug-2012 23:30 ..
 957411756  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1ac:4430f8c7
 986228362  100644 (1)  14457  10693       0 30-Sep-2013 09:36 3ac8a68a:4624ac43
 957412285  100640 (1)  14148   1000    1592 18-Sep-2013 14:20 3910f3bd:45f57947
 957411948  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f26c:456d16c4
 957411955  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f273:456ed599
 966319255  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc97:455a5a09
 966319282  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcb2:455a5a5b
 957411960  100640 (1)  14148   1000    1432  6-Aug-2013 15:00 3910f278:456ed5a9
 957412018  100640 (1)  14148   1000   34128  6-Aug-2013 15:00 3910f2b2:456ed5ca
 957412276  100640 (1)  14148   1000    1808 18-Sep-2013 14:20 3910f3b4:45f57935
 957412293  100640 (1)  14148   1000    5240 18-Sep-2013 14:20 3910f3c5:45f57957
 957411772  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1bc:4430f887
 957410622  100640 (1)  14148   1000       0  9-May-2013 09:58 3910ed3e:4430f89b
 957410623  100640 (1)  14148   1000       0  9-May-2013 09:58 3910ed3f:4430f8a1
 957411779  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1c3:4430f8a9
 957411789  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1cd:4430f8ab
 957411790  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1ce:4430f8ad
 957411791  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1cf:4430f8af
 957411876  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f224:4430f8b3
 957411761  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1b1:4430f8c3
 957411751  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1a7:4430f8c5
 957412004  100640 (1)  14148   1000    1808  6-Aug-2013 15:00 3910f2a4:456ed58d
 957412284  100640 (1)  14148   1000    1544 18-Sep-2013 14:20 3910f3bc:45f57945
 957411962  100640 (1)  14148   1000    2784  6-Aug-2013 15:00 3910f27a:456ed5af
 957412019  100640 (1)  14148   1000   15504  6-Aug-2013 15:00 3910f2b3:456ed5cc
 957412312  100640 (1)  14148   1000    1992 18-Sep-2013 14:20 3910f3d8:45f57923
 966319285  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcb5:455a5a33
 957410600  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed28:455a5ae6
 957412005  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f2a5:456ed58f
 957411959  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f277:456ed5a7
 957412299  100640 (1)  14148   1000   15504 18-Sep-2013 14:20 3910f3cb:45f57963
 957411744  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910f1a0:46136e2d
 957411747  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1a3:4430f8b7
 957411759  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1af:4430f8bf
 966319287  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcb7:455a5a25
 957410603  100640 (1)  14148   1000    4328  6-Aug-2013 15:00 3910ed2b:456ed589
 957412272  100640 (1)  14148   1000   27280 18-Sep-2013 14:20 3910f3b0:45f5792b
 957410606  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed2e:455a5afa
 965848243  100640 (1)  14148   1000     280  5-Aug-2013 16:10 3991acb3:456d1746
 957412002  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f2a2:456ed57b
 966319275  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcab:455a5a4d
 957411961  100640 (1)  14148   1000    3560  6-Aug-2013 15:00 3910f279:456ed5ac
 957412015  100640 (1)  14148   1000    9136  6-Aug-2013 15:00 3910f2af:456ed5bd
 957412269  100640 (1)  14148   1000   16496 18-Sep-2013 14:20 3910f3ad:45f57925
 957412292  100640 (1)  14148   1000    7000 18-Sep-2013 14:20 3910f3c4:45f57955
 957412295  100640 (1)  14148   1000    9136 18-Sep-2013 14:20 3910f3c7:45f5795b
 957410611  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910ed33:46136e1d
 957411878  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910f226:46136e29
 957412000  100640 (1)  14148   1000   16496  6-Aug-2013 15:00 3910f2a0:456ed569
 974177487  100600 (1)  14126  10756       0 29-Jul-2013 17:04 3a10c4cf:4552f7b9
 957412266  100640 (1)  14148   1000    3080 18-Sep-2013 14:20 3910f3aa:45f57931
 957412275  100640 (1)  14148   1000    4328 18-Sep-2013 14:20 3910f3b3:45f57933
 966319269  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca5:455a5a45
 966319281  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcb1:455a5a59
 966319283  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcb3:455a5a5d
 957411946  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f26a:456d16c0
 957411947  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f26b:456d16c2
 957411762  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f1b2:455a5b06
 957411910  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f246:455a5b18
 957411938  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f262:455a5b2a
 986228498  100644 (1)  14457  10693       0 30-Sep-2013 09:41 3ac8a712:4624ae85
 957412006  100640 (1)  14148   1000    1432  6-Aug-2013 15:00 3910f2a6:456ed591
 957411758  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1ae:4430f8c1
 986228399  100644 (1)  14457  10693       0 30-Sep-2013 09:36 3ac8a6af:4624ac44
 957412289  100640 (1)  14148   1000    3560 18-Sep-2013 14:20 3910f3c1:45f5794f
 957412014  100640 (1)  14148   1000    2920  6-Aug-2013 15:00 3910f2ae:456ed5b9
 966319257  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc99:455a5a29
 966319278  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcae:455a5a53
 957412296  100640 (1)  14148   1000    5936 18-Sep-2013 14:20 3910f3c8:45f5795d
 957411914  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f24a:456d16b3
 957411899  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f23b:455a5b14
 957411934  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f25e:455a5b22
 957411935  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f25f:455a5b24
      0       0 (1)      0      0       0                   386b27d7:462c8b08
 957412011  100640 (1)  14148   1000    1456  6-Aug-2013 15:00 3910f2ab:456ed5b1
 957412283  100640 (1)  14148   1000    1592 18-Sep-2013 14:20 3910f3bb:45f57943
 966319266  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca2:455a5a3b
 957412274  100640 (1)  14148   1000   34728 18-Sep-2013 14:20 3910f3b2:45f5792f
 957411944  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f268:456d16bc
 957411748  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1a4:4430f8b9
 957411912  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f248:455a5b1a
 957411936  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f260:455a5b26
 996731325  100644 (1)  14527  10771   11899 21-Apr-2013 14:30 3b68e9bd:4410aa43
 957411777  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1c1:4430f8a3
 996713954  100644 (1)  14527  10771       0 22-Apr-2013 21:44 3b68a5e2:441201ad
 957412290  100640 (1)  14148   1000    2784 18-Sep-2013 14:20 3910f3c2:45f57951
 957411783  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910f1c7:46136e23
 957411746  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910f1a2:46136e31
 957411952  100640 (1)  14148   1000    3080  6-Aug-2013 15:00 3910f270:456ed587
 957411793  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910f1d1:46136e25
 946549666  100600 (1)     27     27       0 19-Feb-2013 21:14 386b33a2:4383ad98
 957411909  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f245:455a5b16
 957411937  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f261:455a5b28
 957411792  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1d0:4430f8b1
 957411956  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f274:456ed59b
 957411957  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f275:456ed5a1
 957412277  100640 (1)  14148   1000   15392 18-Sep-2013 14:20 3910f3b5:45f57937
 957412288  100640 (1)  14148   1000    1432 18-Sep-2013 14:20 3910f3c0:45f5794d
 957411755  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1ab:4430f885
 957411774  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1be:4430f88d
 957411786  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1ca:4430f89f
 957411760  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1b0:4430f897
 946562084  100600 (1)     27     27       0 19-Feb-2013 21:14 386b6424:4383ad9a
 957412281  100640 (1)  14148   1000    1528 18-Sep-2013 14:20 3910f3b9:45f5793f
 974177590  100600 (1)  14126  10756       0 29-Jul-2013 17:04 3a10c536:4552f7ba
 966319254  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc96:455a5a15
 966319267  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca3:455a5a41
 957412017  100640 (1)  14148   1000    9160  6-Aug-2013 15:00 3910f2b1:456ed5c8
 957411949  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f26d:456d16c6
 957410609  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed31:455a5b00
 957411771  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1bb:4430f895
 957410630  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910ed46:46136e2b
 957411785  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1c9:4430f893
 957411951  100640 (1)  14148   1000   34728  6-Aug-2013 15:00 3910f26f:456ed583
 957412282  100640 (1)  14148   1000    1552 18-Sep-2013 14:20 3910f3ba:45f57941
 947502954  100644 (1)  14147   1000    2583  1-Aug-2013 13:56 3879bf6a:455a9f0c
 957412270  100640 (1)  14148   1000   11512 18-Sep-2013 14:20 3910f3ae:45f57927
 966319264  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca0:455a5a3d
 957411945  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f269:456d16be
 957411750  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1a6:4430f8bd
 957410605  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed2d:455a5af8
 957412010  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f2aa:456ed59e
 966319274  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcaa:455a5a4b
 957412291  100640 (1)  14148   1000    1456 18-Sep-2013 14:20 3910f3c3:45f57953
 957412294  100640 (1)  14148   1000    2920 18-Sep-2013 14:20 3910f3c6:45f57959
 957412298  100640 (1)  14148   1000   34128 18-Sep-2013 14:20 3910f3ca:45f57961
 957410626  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910ed42:46136e27
 986228494  100644 (1)  14457  10693       0 30-Sep-2013 09:41 3ac8a70e:4624ae84
 957411787  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1cb:4430f8a7
 957411752  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1a8:4430f8b5
 957410619  100640 (1)  14148   1000       0  9-May-2013 09:58 3910ed3b:4430f899
 957412271  100640 (1)  14148   1000   27408 18-Sep-2013 14:20 3910f3af:45f57929
 966319263  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc9f:455a5a35
 946549314  100600 (1)     27     27       0 19-Feb-2013 21:14 386b3242:4383ad97
 966319268  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca4:455a5a43
 966319270  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca6:455a5a47
 966319276  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcac:455a5a4f
 966319277  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcad:455a5a51
 957412280  100640 (1)  14148   1000    1512 18-Sep-2013 14:20 3910f3b8:45f5793d
 1007223734  100755 (1)  14581  10888   35201  5-Sep-2013 19:58 3c0903b6:45cbc972
 957411763  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1b3:4430f88f
 957411883  100640 (1)  14148   1000   11512  6-Aug-2013 15:00 3910f22b:456ed56b
 957410599  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed27:455a5ae4
 957411931  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f25b:455a5b20
 957412001  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f2a1:456ed572
 966319258  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc9a:455a5a2b
 966319280  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcb0:455a5a57
 1004121320  100644 (1)  14581  10888    7999  6-Sep-2013 13:16 3bd9ace8:45ce741f
 957410625  100640 (1)  14148   1000       0  9-May-2013 09:58 3910ed41:4430f883
 957411757  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1ad:4430f8bb
 957411788  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1cc:4430f8a5
 957412286  100640 (1)  14148   1000    1544 18-Sep-2013 14:20 3910f3be:45f57949
 957412012  100640 (1)  14148   1000    7000  6-Aug-2013 15:00 3910f2ac:456ed5b5
 957412016  100640 (1)  14148   1000    5936  6-Aug-2013 15:00 3910f2b0:456ed5c1
 957412287  100640 (1)  14148   1000    1544 18-Sep-2013 14:20 3910f3bf:45f5794b
 957411941  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f265:456d16b5
 957410604  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed2c:455a5af6
 957411928  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f258:455a5b1c
 957411930  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910f25a:455a5b1e
 957412008  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f2a8:456ed593
 957412279  100640 (1)  14148   1000    1512 18-Sep-2013 14:20 3910f3b7:45f5793b
 957410612  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910ed34:46136e21
 957411745  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910f1a1:46136e2f
 966319256  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc98:455a5a37
 966319262  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc9e:455a5a39
 957411942  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f266:456d16b8
 957411943  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f267:456d16ba
 957410607  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed2f:455a5afc
 966319279  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dcaf:455a5a55
 986228475  100644 (1)  14457  10693       0 30-Sep-2013 09:40 3ac8a6fb:4624ae40
 1007224421  100755 (1)  14581  10888   10597  5-Sep-2013 20:09 3c090665:45cbccfc
 957411784  100640 (1)  14148   1000       0  9-May-2013 09:58 3910f1c8:4430f89d
 957412022  100640 (1)  14148   1000    1992  6-Aug-2013 15:00 3910f2b6:456ed567
 986228464  100644 (1)  14457  10693       0 30-Sep-2013 09:40 3ac8a6f0:4624ae3f
 996730516  100755 (1)  14527  10771   1082706  6-Mar-2013 01:57 3b68e694:4410a146
 966319261  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc9d:455a5a31
 966319265  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca1:455a5a3f
 966319273  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dca9:455a5a49
 957410598  100640 (1)  14148   1000       0  1-Aug-2013 09:54 3910ed26:455a5ae2
 957411953  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f271:456ed595
 957411958  100640 (1)  14148   1000       0  6-Aug-2013 15:00 3910f276:456ed5a3
 946562078  100600 (1)     27     27       0 19-Feb-2013 21:14 386b641e:4383ad99
 957412278  100640 (1)  14148   1000    1432 18-Sep-2013 14:20 3910f3b6:45f57939
 957411950  100640 (1)  14148   1000       0  5-Aug-2013 16:09 3910f26e:456d16c8
 966319252  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc94:455a5a0b
 966319260  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc9c:455a5a2f
 946562086  100600 (1)     27     27       0 19-Feb-2013 21:14 386b6426:4383ad9b
      0       0 (1)      0      0       0                   386b27d7:462c58b9
 966319253  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc95:455a5a0d
 966319259  100644 (1)  14147   1000       0  1-Aug-2013 09:54 3998dc9b:455a5a2d
 957412009  100640 (1)  14148   1000    1528  6-Aug-2013 15:00 3910f2a9:456ed597
 957412013  100640 (1)  14148   1000    5240  6-Aug-2013 15:00 3910f2ad:456ed5b7
 957410618  100640 (1)  14148   1000       0 24-Sep-2013 13:32 3910ed3a:46136e1f
 957412297  100640 (1)  14148   1000    9160 18-Sep-2013 14:20 3910f3c9:45f5795f
 946996450  100640 (1)  14276  10361       0  2-Oct-2013 09:54 387204e2:462ca88a

debugfs:   
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Can we retrieve file name from this FID?  &lt;br/&gt;
Some files has size, but many are &quot;0&quot;.  Why many files are showing &quot;0&quot; size?  &lt;br/&gt;
Some files are not showing even user/group id.  Why this occurred?  &lt;br/&gt;
Does this output mean these files are all open unlinked files?  i.e. there should be a process which is opening the file.  &lt;br/&gt;
The customer said it is unlikely a file is opened for months..Also, they said usage for kawashin user reported by lfs quota increased gradually while there was no creation or modification.  &lt;br/&gt;
As far as we understand this issue, PENDING directory does not explain this behavior.  &lt;/p&gt;</comment>
                            <comment id="68145" author="johann" created="Wed, 2 Oct 2013 13:43:27 +0000"  >&lt;blockquote&gt;
&lt;p&gt;Currently, we do not see files owned by &quot;kawashin&quot; user (ID: 14520).&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Shall i understand that there was only one PENDING* directory just called PENDING?&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Can we retrieve file name from this FID? &lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;I am afraid that the extended attribute storing the name &amp;amp; parent FID has been updated already, so it is not possible.&lt;br/&gt;
Anyway, you run 1.8, so the attribute does not even exist.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Some files has size, but many are &quot;0&quot;. Why many files are showing &quot;0&quot; size? &lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;The size reported on the MDS is just a hint updated at close time. There is no data on the MDS. The actual content of those files are stored in OST objects. The LOV EA attribute should still be valid.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Some files are not showing even user/group id. Why this occurred? &lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Are the same files still present if you rerun &quot;ls&quot; through debugfs a second time?&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Does this output mean these files are all open unlinked files? i.e. there should be a process which is opening the file.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Right, there should be.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;The customer said it is unlikely a file is opened for months.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Then you should move those files back to the namespace and unlink them. I think this has been advised &lt;b&gt;multiple&lt;/b&gt; times (by Niu and myself) and it hasn&apos;t been performed yet.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Also, they said usage for kawashin user reported by lfs quota increased gradually while there was no creation or modification.&lt;br/&gt;
As far as we understand this issue, PENDING directory does not explain this behavior.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Could you please tell me in details how many inodes are reported by lfs quota and how many the user thinks it has?&lt;br/&gt;
Previously, you said:&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;inode usage is a bit incorrect. From the list of user files provided when we created this ticket, number of files are 14145 and quota was showing 14157. &lt;br/&gt;
$ wc file_size_20130821.txt &lt;br/&gt;
14145 127380 1875155 file_size_20130821.txt&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;However, the latest output of lfs quota you provided correctly showed 14145 files:&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;root@wk2 ~&amp;#93;&lt;/span&gt;# lfs quota -v -u kawashin /nshare2&lt;br/&gt;
Disk quotas for user kawashin (uid 14520):&lt;br/&gt;
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace&lt;br/&gt;
       /nshare2 65116548       0       0       -   14145       0       0       -&lt;br/&gt;
...&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Could you please elaborate?&lt;/p&gt;</comment>
                            <comment id="76376" author="ihara" created="Thu, 6 Feb 2014 19:26:16 +0000"  >&lt;p&gt;This was not bug and finally we solved this situation when files in PENDING directory cleaned up. Thanks investigation, please close ticket.&lt;/p&gt;</comment>
                            <comment id="76377" author="pjones" created="Thu, 6 Feb 2014 19:27:36 +0000"  >&lt;p&gt;ok. Thanks Ihara&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                            <attachment id="13405" name="2013-08-23.tar.gz" size="76405" author="ihara" created="Tue, 27 Aug 2013 08:04:57 +0000"/>
                            <attachment id="13406" name="lctl_dk.out.2.gz" size="4223147" author="ihara" created="Tue, 27 Aug 2013 09:57:18 +0000"/>
                            <attachment id="13421" name="lctl_dk_wQUOTA_TRACE.out.2.gz" size="4648764" author="mnishizawa" created="Tue, 3 Sep 2013 08:33:27 +0000"/>
                            <attachment id="13534" name="messages_20130924.tar.gz" size="127894" author="mnishizawa" created="Tue, 24 Sep 2013 10:57:30 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvzbr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9936</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>