<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:49:19 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-5192] upgrade 2.1 -&gt; 2.4.3 quota errors</title>
                <link>https://jira.whamcloud.com/browse/LU-5192</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;We upgrade our 2.1 server to 2.4.3&lt;/p&gt;

&lt;p&gt;ran &lt;br/&gt;
lctl --quota on all OST and MDT&lt;/p&gt;

&lt;p&gt;we are getting the following errors&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;pfe21 /nobackupp8/mhanafi # lfs  quota -v -u mhanafi /nobackupp8
Disk quotas &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; user mhanafi (uid 11312):
     Filesystem  kbytes   quota   limit   grace   files   quota   limit   grace
    /nobackupp8 [60908]  1275423612 75000000000       -  108850  100000  200000       -
nbp8-MDT0000_UUID
                  60908       -       0       -  108850       -       0       -
nbp8-OST0000_UUID
                  83168       -       0       -       -       -       -       -
nbp8-OST0001_UUID
                  14892       -       0       -       -       -       -       -
nbp8-OST0002_UUID
                  41212       -       0       -       -       -       -       -
nbp8-OST0003_UUID
                  70332       -       0       -       -       -       -       -
nbp8-OST0004_UUID
                  60488       -       0       -       -       -       -       -
nbp8-OST0005_UUID
                  39652       -       0       -       -       -       -       -
nbp8-OST0006_UUID
                  60868       -       0       -       -       -       -       -
Total allocated inode limit: 0, total allocated block limit: 0
Some errors happened when getting quota info. Some devices may be not working or deactivated. The data in &lt;span class=&quot;code-quote&quot;&gt;&quot;[]&quot;&lt;/span&gt; is inaccurate.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDS&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;nbp8-mds1 ~ # lctl get_param osd-*.*.quota_slave.info
osd-ldiskfs.nbp8-MDT0000.quota_slave.info=
target name:    nbp8-MDT0000
pool ID:        0
type:           md
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;OSS&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;nbp8-oss2 ~ # lctl get_param osd-*.*.quota_slave.info
osd-ldiskfs.nbp8-OST0001.quota_slave.info=
target name:    nbp8-OST0001
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST001b.quota_slave.info=
target name:    nbp8-OST001b
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST0035.quota_slave.info=
target name:    nbp8-OST0035
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST004f.quota_slave.info=
target name:    nbp8-OST004f
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST0069.quota_slave.info=
target name:    nbp8-OST0069
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST0083.quota_slave.info=
target name:    nbp8-OST0083
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST009d.quota_slave.info=
target name:    nbp8-OST009d
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST00b7.quota_slave.info=
target name:    nbp8-OST00b7
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST00d1.quota_slave.info=
target name:    nbp8-OST00d1
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST00eb.quota_slave.info=
target name:    nbp8-OST00eb
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST0105.quota_slave.info=
target name:    nbp8-OST0105
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
osd-ldiskfs.nbp8-OST011f.quota_slave.info=
target name:    nbp8-OST011f
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;nbp8-mds1 ~ # lctl dl
  0 UP osd-ldiskfs MGS-osd MGS-osd_UUID 5
  1 UP mgs MGS MGS 23455
  2 UP mgc MGC10.151.27.60@o2ib 3b2ba8a8-1b82-764e-a3ef-c10d5df8bf04 5
  3 UP osd-ldiskfs nbp8-MDT0000-osd nbp8-MDT0000-osd_UUID 319
  4 UP mds MDS MDS_uuid 3
  5 UP lod nbp8-MDT0000-mdtlov nbp8-MDT0000-mdtlov_UUID 4
  6 UP mdt nbp8-MDT0000 nbp8-MDT0000_UUID 23401
  7 UP mdd nbp8-MDD0000 nbp8-MDD0000_UUID 4
  8 UP qmt nbp8-QMT0000 nbp8-QMT0000_UUID 4
  9 UP osp nbp8-OST0063-osc-MDT0000 nbp8-MDT0000-mdtlov_UUID 5
 10 UP osp nbp8-OST003d-osc-MDT0000 nbp8-MDT0000-mdtlov_UUID 5
 11 UP osp nbp8-OST001c-osc-MDT0000 nbp8-MDT0000-mdtlov_UUID 5
 12 UP osp nbp8-OST012c-osc-MDT0000 nbp8-MDT0000-mdtlov_UUID 5
.
.
.
.
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;nbp8-mds1 ~ # tune2fs -l /dev/mapper/nbp8--vg-mdt8 
tune2fs 1.42.7.wc2 (07-Nov-2013)
Filesystem volume name:   nbp8-MDT0000
Last mounted on:          /
Filesystem UUID:          04d0b84c-180c-4230-9fa6-fcbede07f1bc
Filesystem magic number:  0xEF53
Filesystem revision #:    1 (dynamic)
Filesystem features:      has_journal ext_attr resize_inode dir_index filetype needs_recovery flex_bg dirdata sparse_super large_file huge_file uninit_bg dir_nlink extra_isize quota
Filesystem flags:         signed_directory_hash 
Default mount options:    user_xattr acl
Filesystem state:         clean
Errors behavior:          Continue
Filesystem OS type:       Linux
Inode count:              966380512
Block count:              483184640
Reserved block count:     0
Free blocks:              325181297
Free inodes:              827897945
First block:              0
Block size:               4096
Fragment size:            4096
Reserved GDT blocks:      1024
Blocks per group:         16376
Fragments per group:      16376
Inodes per group:         32752
Inode blocks per group:   4094
Flex block group size:    16
Filesystem created:       Wed Jun  5 17:40:07 2013
Last mount time:          Wed Jun 11 18:15:54 2014
Last write time:          Wed Jun 11 18:15:54 2014
Mount count:              99
Maximum mount count:      -1
Last checked:             Wed Jun  5 17:40:07 2013
Check interval:           0 (&amp;lt;none&amp;gt;)
Lifetime writes:          48 TB
Reserved blocks uid:      0 (user root)
Reserved blocks gid:      0 (group root)
First inode:              11
Inode size:	          512
Required extra isize:     28
Desired extra isize:      28
Journal UUID:             4c0a58b3-e261-47cc-80dc-6b45346e8db6
Journal device:	          0xfd01
Default directory hash:   half_md4
Directory Hash Seed:      6ee52b70-b975-477f-9136-9b5bd0eb10b4
Journal backup:           inode blocks
User quota inode:         3
Group quota inode:        4
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;tune2fs 1.42.7.wc2 (07-Nov-2013)
Filesystem volume name:   nbp8-OST0001
Last mounted on:          /
Filesystem UUID:          819a930e-2e30-48c8-b666-4d1db350bcb7
Filesystem magic number:  0xEF53
Filesystem revision #:    1 (dynamic)
Filesystem features:      has_journal ext_attr resize_inode dir_index filetype needs_recovery extent 64bit flex_bg sparse_super large_file huge_file uninit_bg dir_nlink extra_isize quota
Filesystem flags:         signed_directory_hash 
Default mount options:    user_xattr acl
Filesystem state:         clean
Errors behavior:          Continue
Filesystem OS type:       Linux
Inode count:              22888704
Block count:              5859483648
Reserved block count:     0
Free blocks:              1795574486
Free inodes:              21669767
First block:              0
Block size:               4096
Fragment size:            4096
Reserved GDT blocks:      1024
Blocks per group:         32768
Fragments per group:      32768
Inodes per group:         128
Inode blocks per group:   8
Flex block group size:    256
Filesystem created:       Wed Jun  5 19:08:44 2013
Last mount time:          Wed Jun 11 18:16:40 2014
Last write time:          Wed Jun 11 18:16:40 2014
Mount count:              25
Maximum mount count:      -1
Last checked:             Wed Jun  5 19:08:44 2013
Check interval:           0 (&amp;lt;none&amp;gt;)
Lifetime writes:          42 TB
Reserved blocks uid:      0 (user root)
Reserved blocks gid:      0 (group root)
First inode:              11
Inode size:	          256
Required extra isize:     28
Desired extra isize:      28
Journal UUID:             fe5db948-55c4-4b70-9b01-2eecf994bb91
Journal device:	          0xfd00
Default directory hash:   half_md4
Directory Hash Seed:      78f2ecbc-31f7-4764-9391-12de7c25a94a
User quota inode:         3
Group quota inode:        4
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="25151">LU-5192</key>
            <summary>upgrade 2.1 -&gt; 2.4.3 quota errors</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="6">Not a Bug</resolution>
                                        <assignee username="niu">Niu Yawei</assignee>
                                    <reporter username="mhanafi">Mahmoud Hanafi</reporter>
                        <labels>
                    </labels>
                <created>Fri, 13 Jun 2014 16:48:48 +0000</created>
                <updated>Wed, 16 Jul 2014 18:11:05 +0000</updated>
                            <resolved>Wed, 16 Jul 2014 18:11:04 +0000</resolved>
                                    <version>Lustre 2.4.3</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="86607" author="pjones" created="Fri, 13 Jun 2014 19:34:11 +0000"  >&lt;p&gt;Niu&lt;/p&gt;

&lt;p&gt;Could you please assist with this one?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="86648" author="niu" created="Mon, 16 Jun 2014 01:21:32 +0000"  >&lt;p&gt;What&apos;s your client version? Client needs be upgraded to 2.1.4 or later.&lt;/p&gt;</comment>
                            <comment id="86737" author="mhanafi" created="Mon, 16 Jun 2014 20:07:39 +0000"  >&lt;p&gt;the client is 2.4.3. Please note that &lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;conn to master: not setup yet
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I think this is the main issue.&lt;/p&gt;</comment>
                            <comment id="86771" author="niu" created="Tue, 17 Jun 2014 02:06:13 +0000"  >&lt;p&gt;Could you enable D_QUOTA and collect debug logs on both clients/MDS/OSTs when issuing &apos;lfs quota&apos; command? Thanks.&lt;/p&gt;</comment>
                            <comment id="86997" author="mhanafi" created="Wed, 18 Jun 2014 23:16:58 +0000"  >&lt;p&gt;uploaded logs to ftp site. uploads/lu-5192/quotadebug.&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-5192&quot; title=&quot;upgrade 2.1 -&amp;gt; 2.4.3 quota errors&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-5192&quot;&gt;&lt;del&gt;LU-5192&lt;/del&gt;&lt;/a&gt;.tgz&lt;/p&gt;</comment>
                            <comment id="87000" author="niu" created="Thu, 19 Jun 2014 02:37:36 +0000"  >&lt;p&gt;All quotactl failed for -ENOTSUPP.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;00000001:00020000:20.0F:1403015706.581986:0:53237:0:(osc_quota.c:274:osc_quotactl()) ptlrpc_queue_wait failed, rc: -95
...
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;It probably because there was something wrong with looking up quota accounting file, but the proc file showed that quota accounting files are fine:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;osd-ldiskfs.nbp8-OST0001.quota_slave.info=
target name:    nbp8-OST0001
pool ID:        0
type:           dt
quota enabled:  none
conn to master: not setup yet
space acct:     ug
user uptodate:  glb[0],slv[0],reint[1]
group uptodate: glb[0],slv[0],reint[1]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And the super block showed that quota feature is enabled and the ino of quota file are correct:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;Filesystem features:      has_journal ext_attr resize_inode dir_index filetype needs_recovery extent 64bit flex_bg sparse_super large_file huge_file uninit_bg dir_nlink extra_isize quota
User quota inode:         3
Group quota inode:        4
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Could you verify the accounting files via procfs? (cat  /proc/fs/lustre/osd-ldiskfs/$OST/quota_slave/acct_user (or acct_group)). And could you re-capture the log again? I think we&apos;d enable D_TRACE this time, so that we can figure out where exactly the -ENOTSUPP returned from. Thanks a lot.&lt;/p&gt;

&lt;p&gt;BTW: the &quot;conn to master: not setup yet&quot; means quota slave is not connected to master yet, I don&apos;t know why it can&apos;t connect to master, but that only affect quota enforcement, quota usage collection should be fine.&lt;/p&gt;</comment>
                            <comment id="87094" author="mhanafi" created="Thu, 19 Jun 2014 20:27:47 +0000"  >&lt;p&gt;I found that  a number of OSTs  quota feature is not turned on and their acct_user file is empty. &lt;/p&gt;

&lt;p&gt;to recover from this should we just disable and re-enable quota options on all OSTs and remount.&lt;/p&gt;
</comment>
                            <comment id="87119" author="niu" created="Fri, 20 Jun 2014 01:47:45 +0000"  >&lt;blockquote&gt;
&lt;p&gt;I found that a number of OSTs quota feature is not turned on and their acct_user file is empty.&lt;br/&gt;
to recover from this should we just disable and re-enable quota options on all OSTs and remount.&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Yes, we should enable quota feature and remount those OSTs.&lt;/p&gt;</comment>
                            <comment id="89255" author="mhanafi" created="Wed, 16 Jul 2014 18:09:03 +0000"  >&lt;p&gt;This can be closed.&lt;/p&gt;</comment>
                            <comment id="89257" author="pjones" created="Wed, 16 Jul 2014 18:11:05 +0000"  >&lt;p&gt;Thanks Mahmoud&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwp1b:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>14510</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>