<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:44:37 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11524] sanity-sec test 31 fails with  &apos;/usr/bin/lfs setquota -u quota_usr -b 1024 -B 1075 -i -I 0 /mnt/lustre FAILED!&apos;</title>
                <link>https://jira.whamcloud.com/browse/LU-11524</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;sanity-sec test_31 is failing when ENABLE_QUOTA=yes which is done when running the full test group. This test has failed full test group since October 13, 2018 lustre-master build #3805; the first full test session run since test 31 landed.&lt;/p&gt;

&lt;p&gt;An example of this failure is at &lt;a href=&quot;https://testing.whamcloud.com/test_sets/9b92b186-cf12-11e8-9238-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/9b92b186-cf12-11e8-9238-52540065bddc&lt;/a&gt; . In the client test_log, we see some issues setting osc.&lt;b&gt;.idle_timeout=debug and getting mdc.&lt;/b&gt;.connect_flag, but I think those can be ignored because the real issue is that we don&#8217;t have any value for inode-softlimit (lfs setquota &#8211;i value) and the inode-hardlimit is 0 (lfs setquota -I value)&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: onyx-30vm4 lctl get_param -n timeout
Using TIMEOUT=20
CMD: onyx-30vm4 lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
CMD: onyx-30vm1.onyx.whamcloud.com lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
error: set_param: param_path &apos;osc/*/idle_timeout&apos;: No such file or directory
error: get_param: param_path &apos;mdc/*/connect_flags&apos;: No such file or directory
jobstats not supported by server
enable quota as required
CMD: onyx-30vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-MDT0000.quota_slave.enabled
CMD: onyx-30vm3 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0000.quota_slave.enabled
[HOST:onyx-30vm1.onyx.whamcloud.com] [old_mdt_qtype:none] [old_ost_qtype:none] [new_qtype:ug3]
CMD: onyx-30vm4 /usr/sbin/lctl conf_param lustre.quota.mdt=ug3
CMD: onyx-30vm4 /usr/sbin/lctl conf_param lustre.quota.ost=ug3
Total disk size:   block-softlimit: 1024 block-hardlimit: 1075 inode-softlimit:  inode-hardlimit: 0
Setting up quota on onyx-30vm1.onyx.whamcloud.com:/mnt/lustre for quota_usr...
+ /usr/bin/lfs setquota -u quota_usr -b 1024 -B 1075 -i  -I 0 /mnt/lustre
lfs setquota: warning: block softlimit &apos;1024&apos; smaller than minimum qunit size
See &apos;lfs help setquota&apos; or Lustre manual for details
lfs: invalid limit &apos;-I&apos;
Set filesystem quotas.
usage: setquota &amp;lt;-u|-g|-p&amp;gt; &amp;lt;uname&amp;gt;|&amp;lt;uid&amp;gt;|&amp;lt;gname&amp;gt;|&amp;lt;gid&amp;gt;|&amp;lt;projid&amp;gt;
                -b &amp;lt;block-softlimit&amp;gt; -B &amp;lt;block-hardlimit&amp;gt;
                -i &amp;lt;inode-softlimit&amp;gt; -I &amp;lt;inode-hardlimit&amp;gt; &amp;lt;filesystem&amp;gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;In setup_quota(), we calculate the inode &#8211;softlimit from&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
2129         # get_filesystem_size
2130         local disksz=$(lfs_df $mntpt | grep &lt;span class=&quot;code-quote&quot;&gt;&quot;summary&quot;&lt;/span&gt; | awk &lt;span class=&quot;code-quote&quot;&gt;&apos;{print $2}&apos;&lt;/span&gt;)
2131         local blk_soft=$((disksz + 1024))
2132         local blk_hard=$((blk_soft + blk_soft / 20)) # Go 5% over
2133 
2134         local inodes=$(lfs_df -i $mntpt | grep &lt;span class=&quot;code-quote&quot;&gt;&quot;summary&quot;&lt;/span&gt; | awk &lt;span class=&quot;code-quote&quot;&gt;&apos;{print $2}&apos;&lt;/span&gt;)
2135         local i_soft=$inodes
2136         local i_hard=$((i_soft + i_soft / 20))
2137 
2138         echo &lt;span class=&quot;code-quote&quot;&gt;&quot;Total disk size: $disksz  block-softlimit: $blk_soft&quot;&lt;/span&gt; \
2139                 &lt;span class=&quot;code-quote&quot;&gt;&quot;block-hardlimit: $blk_hard inode-softlimit: $i_soft&quot;&lt;/span&gt; \
2140                 &lt;span class=&quot;code-quote&quot;&gt;&quot;inode-hardlimit: $i_hard&quot;&lt;/span&gt;
2141 
2142         local cmd
2143         &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; usr in $quota_usrs; &lt;span class=&quot;code-keyword&quot;&gt;do&lt;/span&gt;
2144                 echo &lt;span class=&quot;code-quote&quot;&gt;&quot;Setting up quota on $HOSTNAME:$mntpt &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; $usr...&quot;&lt;/span&gt;
2145                 &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; type in u g; &lt;span class=&quot;code-keyword&quot;&gt;do&lt;/span&gt;
2146                         cmd=&lt;span class=&quot;code-quote&quot;&gt;&quot;$LFS setquota -$type $usr -b $blk_soft&quot;&lt;/span&gt;
2147                         cmd=&lt;span class=&quot;code-quote&quot;&gt;&quot;$cmd -B $blk_hard -i $i_soft -I $i_hard $mntpt&quot;&lt;/span&gt;
2148                         echo &lt;span class=&quot;code-quote&quot;&gt;&quot;+ $cmd&quot;&lt;/span&gt;
2149                         eval $cmd || error &lt;span class=&quot;code-quote&quot;&gt;&quot;$cmd FAILED!&quot;&lt;/span&gt;
2150                 done
2151                 # display the quota status
2152                 echo &lt;span class=&quot;code-quote&quot;&gt;&quot;Quota settings &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; $usr : &quot;&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The strange thing is, sanity-sec test 25 enables quota just as test 31 does and it succeeds. From the suite_log, we see&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;osc.lustre-OST0005-osc-ffff9c13bc588000.idle_timeout=debug
osc.lustre-OST0006-osc-ffff9c13bc588000.idle_timeout=debug
enable quota as required
CMD: onyx-30vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-MDT0000.quota_slave.enabled
CMD: onyx-30vm3 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0000.quota_slave.enabled
[HOST:onyx-30vm1.onyx.whamcloud.com] [old_mdt_qtype:ug] [old_ost_qtype:ug] [new_qtype:ug3]
CMD: onyx-30vm4 /usr/sbin/lctl conf_param lustre.quota.mdt=ug3
CMD: onyx-30vm4 /usr/sbin/lctl conf_param lustre.quota.ost=ug3
Total disk size: 13532932  block-softlimit: 13533956 block-hardlimit: 14210653 inode-softlimit: 838864 inode-hardlimit: 880807
Setting up quota on onyx-30vm1.onyx.whamcloud.com:/mnt/lustre for quota_usr...
+ /usr/bin/lfs setquota -u quota_usr -b 13533956 -B 14210653 -i 838864 -I 880807 /mnt/lustre
+ /usr/bin/lfs setquota -g quota_usr -b 13533956 -B 14210653 -i 838864 -I 880807 /mnt/lustre
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Unfortunately, almost every other test suite fails to run any tests after sanity-sec test 31 fails with various issues. Looking in the suite_log for each test suite,&lt;br/&gt;
In sanity-pfl, the client fails with&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: onyx-30vm4 lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
CMD: onyx-30vm1.onyx.whamcloud.com lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
error: set_param: param_path &apos;osc/*/idle_timeout&apos;: No such file or directory
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;lustre-rsync-test, metadata-updates, ost-pools, &#8230; fails with&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Starting mds1:   /dev/mapper/mds1_flakey /mnt/lustre-mds1
CMD: onyx-30vm4 mkdir -p /mnt/lustre-mds1; mount -t lustre   /dev/mapper/mds1_flakey /mnt/lustre-mds1
onyx-30vm4: mount.lustre: according to /etc/mtab /dev/mapper/mds1_flakey is already mounted on /mnt/lustre-mds1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="53606">LU-11524</key>
            <summary>sanity-sec test 31 fails with  &apos;/usr/bin/lfs setquota -u quota_usr -b 1024 -B 1075 -i -I 0 /mnt/lustre FAILED!&apos;</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="sebastien">Sebastien Buisson</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                    </labels>
                <created>Mon, 15 Oct 2018 22:47:04 +0000</created>
                <updated>Mon, 29 Oct 2018 15:13:33 +0000</updated>
                            <resolved>Mon, 29 Oct 2018 15:13:33 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                                    <fixVersion>Lustre 2.12.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="234982" author="sebastien" created="Tue, 16 Oct 2018 14:00:03 +0000"  >&lt;p&gt;The problem stems from the fact that init_param_vars() tries to do Lustre client specific tunings (including quota settings), even when Lustre clients are not mounted.&lt;br/&gt;
I will push a patch to address this problem.&lt;/p&gt;</comment>
                            <comment id="234983" author="gerrit" created="Tue, 16 Oct 2018 14:00:14 +0000"  >&lt;p&gt;Sebastien Buisson (sbuisson@ddn.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/33380&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33380&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11524&quot; title=&quot;sanity-sec test 31 fails with  &amp;#39;/usr/bin/lfs setquota -u quota_usr -b 1024 -B 1075 -i -I 0 /mnt/lustre FAILED!&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11524&quot;&gt;&lt;del&gt;LU-11524&lt;/del&gt;&lt;/a&gt; tests: make init_param_vars() aware of server_only&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: fbd14dd0a08a80892be29ef73e6a8a478df9c919&lt;/p&gt;</comment>
                            <comment id="235174" author="gerrit" created="Fri, 19 Oct 2018 23:12:21 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/33380/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33380/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11524&quot; title=&quot;sanity-sec test 31 fails with  &amp;#39;/usr/bin/lfs setquota -u quota_usr -b 1024 -B 1075 -i -I 0 /mnt/lustre FAILED!&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11524&quot;&gt;&lt;del&gt;LU-11524&lt;/del&gt;&lt;/a&gt; tests: fix sanity-sec test_31 for all situations&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: fde0e290a8cc370f6eb7986e9ada8b5bcc41fef7&lt;/p&gt;</comment>
                            <comment id="235766" author="pjones" created="Mon, 29 Oct 2018 15:13:33 +0000"  >&lt;p&gt;Seems to have landed for 2.12&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i0048n:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>