<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:39:32 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10940] sanity test_802: set mdt quota type failed</title>
                <link>https://jira.whamcloud.com/browse/LU-10940</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/d3a87ae4-471b-11e8-95c0-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/d3a87ae4-471b-11e8-95c0-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;test_802 failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;set mdt quota type failed
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;This failure seems start showing on 2.11.50.51, b3738 on April 9, 2018&lt;/p&gt;

&lt;p&gt;test log&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
CMD: trevis-4vm4 lctl get_param -n timeout
Using TIMEOUT=20
CMD: trevis-4vm4 lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
CMD: trevis-4vm1.trevis.hpdd.intel.com lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
error: get_param: param_path &apos;mdc/*/connect_flags&apos;: No such file or directory
jobstats not supported by server
enable quota as required
CMD: trevis-4vm4 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-MDT0000.quota_slave.enabled
CMD: trevis-4vm3 /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-OST0000.quota_slave.enabled
[HOST:trevis-4vm1.trevis.hpdd.intel.com] [old_mdt_qtype:ug] [old_ost_qtype:ug] [new_qtype:ug3]
CMD: trevis-4vm4 /usr/sbin/lctl conf_param lustre.quota.mdt=ug3
trevis-4vm4: error: conf_param: Read-only file system
 sanity test_802: @@@@@@ FAIL: set mdt quota type failed 
 Trace dump:

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDS dmesg&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
[ 7400.522030] Lustre: DEBUG MARKER: SKIP: sanity test_801c
[ 7400.803247] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == sanity test 802: simulate readonly device ========================================================= 23:28:21 \(1524439701\)
[ 7400.993579] Lustre: DEBUG MARKER: == sanity test 802: simulate readonly device ========================================================= 23:28:21 (1524439701)
[ 7401.164206] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n version 2&amp;gt;/dev/null ||
 /usr/sbin/lctl lustre_build_version 2&amp;gt;/dev/null ||
 /usr/sbin/lctl --version 2&amp;gt;/dev/null | cut -d&apos; &apos; -f2
[ 7401.912727] Lustre: DEBUG MARKER: lctl set_param -n os[cd]*.*MDT*.force_sync=1
[ 7405.691947] Lustre: DEBUG MARKER: lctl set_param -n os[cd]*.*MDT*.force_sync=1
[ 7407.328094] Lustre: DEBUG MARKER: grep -c /mnt/lustre-mds1&apos; &apos; /proc/mounts || true
[ 7407.639629] Lustre: DEBUG MARKER: umount -d -f /mnt/lustre-mds1
[ 7412.735986] Lustre: lustre-MDT0000: Not available for connect from 10.9.4.31@tcp (stopping)
[ 7412.738161] Lustre: Skipped 3 previous similar messages
[ 7417.729349] LustreError: 137-5: lustre-MDT0000_UUID: not available for connect from 10.9.4.31@tcp (no target). If you are running an HA pair check that the target is mounted on the other server.
[ 7417.733806] LustreError: Skipped 15 previous similar messages
[ 7419.993052] Lustre: 7085:0:(client.c:2099:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1524439714/real 1524439714] req@ffff880061347900 x1598483208664528/t0(0) o251-&amp;gt;MGC10.9.4.32@tcp@0@lo:26/25 lens 224/224 e 0 to 1 dl 1524439720 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
[ 7420.027093] Lustre: server umount lustre-MDT0000 complete
[ 7420.199653] Lustre: DEBUG MARKER: lsmod | grep lnet &amp;gt; /dev/null &amp;amp;&amp;amp;
lctl dl | grep &apos; ST &apos; || true
[ 7420.520293] Lustre: DEBUG MARKER: modprobe dm-flakey;
 dmsetup targets | grep -q flakey
[ 7432.809742] Lustre: DEBUG MARKER: running=$(grep -c /mnt/lustre-mds1&apos; &apos; /proc/mounts);
mpts=$(mount | grep -c /mnt/lustre-mds1&apos; &apos;);
if [ $running -ne $mpts ]; then
 echo $(hostname) env are INSANE!;
 exit 1;
fi
[ 7433.175430] Lustre: DEBUG MARKER: running=$(grep -c /mnt/lustre-mds1&apos; &apos; /proc/mounts);
mpts=$(mount | grep -c /mnt/lustre-mds1&apos; &apos;);
if [ $running -ne $mpts ]; then
 echo $(hostname) env are INSANE!;
 exit 1;
fi
[ 7434.341465] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-mds1
[ 7434.650345] Lustre: DEBUG MARKER: modprobe dm-flakey;
 dmsetup targets | grep -q flakey
[ 7434.951416] Lustre: DEBUG MARKER: dmsetup status /dev/mapper/mds1_flakey &amp;gt;/dev/null 2&amp;gt;&amp;amp;1
[ 7435.251720] Lustre: DEBUG MARKER: dmsetup status /dev/mapper/mds1_flakey 2&amp;gt;&amp;amp;1
[ 7435.550827] Lustre: DEBUG MARKER: test -b /dev/mapper/mds1_flakey
[ 7435.846295] Lustre: DEBUG MARKER: e2label /dev/mapper/mds1_flakey
[ 7436.141605] Lustre: DEBUG MARKER: mkdir -p /mnt/lustre-mds1; mount -t lustre -o rdonly_dev /dev/mapper/mds1_flakey /mnt/lustre-mds1
[ 7436.313923] LDISKFS-fs (dm-3): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro,no_mbcache,nodelalloc
[ 7436.317692] Turning device dm-3 (0xfc00003) read-only
[ 7436.319501] Lustre: lustre-MDT0000-osd: set dev_rdonly on this device
[ 7436.395144] Lustre: lustre-MDT0000: Imperative Recovery not enabled, recovery window 60-180
[ 7436.566211] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n health_check
[ 7436.878021] Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/u
[ 7437.467981] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm4.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7437.468228] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm4.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7437.655837] Lustre: DEBUG MARKER: trevis-4vm4.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7437.655862] Lustre: DEBUG MARKER: trevis-4vm4.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7437.827912] Lustre: DEBUG MARKER: lctl set_param -n mdt.lustre*.enable_remote_dir=1
[ 7438.131105] Lustre: DEBUG MARKER: e2label /dev/mapper/mds1_flakey 2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]\{3}[0-9]\{4}&apos;
[ 7438.434939] Lustre: DEBUG MARKER: e2label /dev/mapper/mds1_flakey 2&amp;gt;/dev/null | grep -E &apos;:[a-zA-Z]\{3}[0-9]\{4}&apos;
[ 7438.762419] Lustre: DEBUG MARKER: e2label /dev/mapper/mds1_flakey 2&amp;gt;/dev/null
[ 7439.087312] Lustre: DEBUG MARKER: lctl set_param -n mdt.lustre*.enable_remote_dir=1
[ 7442.556447] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7442.755098] Lustre: DEBUG MARKER: trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7447.098744] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7447.288273] Lustre: DEBUG MARKER: trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7451.644719] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7451.830105] Lustre: DEBUG MARKER: trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7456.196503] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7456.388152] Lustre: DEBUG MARKER: trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7460.757075] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7460.957519] Lustre: DEBUG MARKER: trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7465.345849] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7465.538568] Lustre: DEBUG MARKER: trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7469.992735] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7470.193712] Lustre: DEBUG MARKER: trevis-4vm3.trevis.hpdd.intel.com: executing set_default_debug vfstrace rpctrace dlmtrace neterror ha config ioctl super lfsck all 4
[ 7471.395975] Lustre: DEBUG MARKER: lctl get_param -n timeout
[ 7471.796323] Lustre: DEBUG MARKER: /usr/sbin/lctl mark Using TIMEOUT=20
[ 7471.989513] Lustre: DEBUG MARKER: Using TIMEOUT=20
[ 7472.150495] Lustre: DEBUG MARKER: lctl dl | grep &apos; IN osc &apos; 2&amp;gt;/dev/null | wc -l
[ 7472.495736] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n osd-ldiskfs.lustre-MDT0000.quota_slave.enabled
[ 7473.143673] Lustre: DEBUG MARKER: /usr/sbin/lctl conf_param lustre.quota.mdt=ug3
[ 7473.296428] LustreError: 9740:0:(osd_handler.c:1689:osd_trans_create()) lustre-MDT0000: someone try to start transaction under readonly mode, should be disabled.
[ 7473.302031] CPU: 0 PID: 9740 Comm: llog_process_th Tainted: G OE ------------ 3.10.0-693.21.1.el7_lustre.x86_64 #1
[ 7473.307279] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
[ 7473.309907] Call Trace:
[ 7473.312287] [&amp;lt;ffffffff816ae7c8&amp;gt;] dump_stack+0x19/0x1b
[ 7473.314849] [&amp;lt;ffffffffc0d2ea9c&amp;gt;] osd_trans_create+0x5cc/0x610 [osd_ldiskfs]
[ 7473.317607] [&amp;lt;ffffffffc0877c71&amp;gt;] llog_write+0x91/0x3d0 [obdclass]
[ 7473.320207] [&amp;lt;ffffffffc0db012a&amp;gt;] mgs_modify_handler+0x36a/0x440 [mgs]
[ 7473.322805] [&amp;lt;ffffffffc08759c9&amp;gt;] llog_process_thread+0x839/0x1560 [obdclass]
[ 7473.325492] [&amp;lt;ffffffffc089fc19&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
[ 7473.328172] [&amp;lt;ffffffffc08770ff&amp;gt;] llog_process_thread_daemonize+0x9f/0xe0 [obdclass]
[ 7473.330884] [&amp;lt;ffffffffc0877060&amp;gt;] ? llog_backup+0x500/0x500 [obdclass]
[ 7473.333483] [&amp;lt;ffffffff810b4031&amp;gt;] kthread+0xd1/0xe0
[ 7473.335897] [&amp;lt;ffffffff810b3f60&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 7473.338389] [&amp;lt;ffffffff816c0577&amp;gt;] ret_from_fork+0x77/0xb0
[ 7473.340792] [&amp;lt;ffffffff810b3f60&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 7473.343238] LustreError: 9739:0:(mgs_llog.c:954:mgs_modify()) MGS: modify lustre/quota.mdt failed: rc = -30
[ 7473.345910] LustreError: 9739:0:(mgs_llog.c:1940:mgs_write_log_direct_all()) MGS: Can&apos;t modify llog lustre-MDT0000: rc = -30
[ 7473.348694] CPU: 1 PID: 9739 Comm: lctl Tainted: G OE ------------ 3.10.0-693.21.1.el7_lustre.x86_64 #1
[ 7473.351406] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007
[ 7473.353790] Call Trace:
[ 7473.355798] [&amp;lt;ffffffff816ae7c8&amp;gt;] dump_stack+0x19/0x1b
[ 7473.358052] [&amp;lt;ffffffffc0d2ea9c&amp;gt;] osd_trans_create+0x5cc/0x610 [osd_ldiskfs]
[ 7473.360387] [&amp;lt;ffffffffc0877c71&amp;gt;] llog_write+0x91/0x3d0 [obdclass]
[ 7473.362665] [&amp;lt;ffffffffc0dad80e&amp;gt;] record_marker+0x15e/0x2b0 [mgs]
[ 7473.364843] [&amp;lt;ffffffffc0dae9f2&amp;gt;] mgs_write_log_direct+0xe2/0x2d0 [mgs]
[ 7473.367092] [&amp;lt;ffffffffc0dbd6cb&amp;gt;] mgs_write_log_direct_all+0x38b/0x640 [mgs]
[ 7473.369279] [&amp;lt;ffffffffc0dd06ea&amp;gt;] mgs_write_log_quota+0x2d7/0x31d [mgs]
[ 7473.371448] [&amp;lt;ffffffffc0dbe4bb&amp;gt;] mgs_write_log_param+0x5ab/0x1e30 [mgs]
[ 7473.373529] [&amp;lt;ffffffffc0dbfd87&amp;gt;] ? mgs_find_fsdb+0x47/0x70 [mgs]
[ 7473.375591] [&amp;lt;ffffffffc0dc2677&amp;gt;] ? mgs_find_or_make_fsdb+0x67/0x1c0 [mgs]
[ 7473.377614] [&amp;lt;ffffffffc0dc6d6c&amp;gt;] mgs_set_param+0xabc/0xd40 [mgs]
[ 7473.379604] [&amp;lt;ffffffffc0dac23a&amp;gt;] mgs_iocontrol+0xd2a/0xde0 [mgs]
[ 7473.381507] [&amp;lt;ffffffffc088aae3&amp;gt;] class_handle_ioctl+0x18d3/0x1de0 [obdclass]
[ 7473.383517] [&amp;lt;ffffffff811b1f16&amp;gt;] ? do_read_fault.isra.44+0xe6/0x130
[ 7473.385376] [&amp;lt;ffffffff812b72be&amp;gt;] ? security_capable+0x1e/0x20
[ 7473.387227] [&amp;lt;ffffffffc086f802&amp;gt;] obd_class_ioctl+0xd2/0x170 [obdclass]
[ 7473.389074] [&amp;lt;ffffffff81219e90&amp;gt;] do_vfs_ioctl+0x350/0x560
[ 7473.390832] [&amp;lt;ffffffff816bb521&amp;gt;] ? __do_page_fault+0x171/0x450
[ 7473.392525] [&amp;lt;ffffffff8121a141&amp;gt;] SyS_ioctl+0xa1/0xc0
[ 7473.394199] [&amp;lt;ffffffff816c0655&amp;gt;] ? system_call_after_swapgs+0xa2/0x146
[ 7473.395942] [&amp;lt;ffffffff816c0715&amp;gt;] system_call_fastpath+0x1c/0x21
[ 7473.397679] [&amp;lt;ffffffff816c0661&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 7473.399422] LustreError: 9739:0:(mgs_llog.c:1948:mgs_write_log_direct_all()) MGS: writing log lustre-MDT0000: rc = -30
[ 7473.401661] CPU: 0 PID: 9741 Comm: llog_process_th Tainted: G OE ------------ 3.10.0-693.21.1.el7_lustre.x86_64 #1

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;VVVVVVV DO NOT REMOVE LINES BELOW, Added by Maloo for auto-association VVVVVVV&lt;br/&gt;
 sanity test_802 - set mdt quota type failed&lt;/p&gt;</description>
                <environment></environment>
        <key id="51945">LU-10940</key>
            <summary>sanity test_802: set mdt quota type failed</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="hongchao.zhang">Hongchao Zhang</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 23 Apr 2018 20:33:44 +0000</created>
                <updated>Fri, 10 Aug 2018 19:33:18 +0000</updated>
                            <resolved>Fri, 10 Aug 2018 19:33:18 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                                    <fixVersion>Lustre 2.12.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="227134" author="pjones" created="Wed, 2 May 2018 18:36:36 +0000"  >&lt;p&gt;Hongchao&lt;/p&gt;

&lt;p&gt;Could you please investigate?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="227135" author="adilger" created="Wed, 2 May 2018 18:38:45 +0000"  >&lt;blockquote&gt;
&lt;p&gt;This failure seems start showing on 2.11.50.51, b3738 on April 9, 2018&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;It probably makes sense to see which quota-related patches landed just before then. &lt;/p&gt;</comment>
                            <comment id="231030" author="jamesanunez" created="Sat, 28 Jul 2018 03:07:04 +0000"  >&lt;p&gt;sanity test 802 fails when we run the &#8216;full&#8217; test group, but passes for all other testing; review-ldiskfs, review-dne, etc. On difference between full and all other testing is, for &#8216;full&#8217; testing, we enable quotas for all test suites and for all other testing, we don&#8217;t enable quotas. &lt;/p&gt;

&lt;p&gt;For sanity test 802, we stop all servers and then mount the servers as read only. When we bring up the server, in read only mode, we try and reset quotas in setup_quota() and the following call to conf_param on the mgs fails&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
2119         do_facet mgs $LCTL conf_param $FSNAME.quota.mdt=$QUOTA_TYPE ||
2120                 error &lt;span class=&quot;code-quote&quot;&gt;&quot;set mdt quota type failed&quot;&lt;/span&gt;
2121         do_facet mgs $LCTL conf_param $FSNAME.quota.ost=$QUOTA_TYPE ||
2122                 error &lt;span class=&quot;code-quote&quot;&gt;&quot;set ost quota type failed&quot;&lt;/span&gt;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;One question is, is Lustre behaving properly and not allowing calls to conf_param when  a server is read-only or, more specifically, should we be able to set quotas by calling conf_param on a read only server? &lt;/p&gt;</comment>
                            <comment id="231031" author="adilger" created="Sat, 28 Jul 2018 06:19:34 +0000"  >&lt;p&gt;Since enabling quota requires changes to the filesystem on the targets, it doesn&apos;t make sense to enable it on a read-only filesystem. &lt;/p&gt;</comment>
                            <comment id="231073" author="gerrit" created="Mon, 30 Jul 2018 16:21:53 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/32900&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/32900&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10940&quot; title=&quot;sanity test_802: set mdt quota type failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10940&quot;&gt;&lt;del&gt;LU-10940&lt;/del&gt;&lt;/a&gt; tests: skip sanity test 802 when quota enabled&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: acf177a62c538cf8517a697fe57a20f340de5538&lt;/p&gt;</comment>
                            <comment id="231742" author="gerrit" created="Thu, 9 Aug 2018 18:20:51 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/32900/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/32900/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10940&quot; title=&quot;sanity test_802: set mdt quota type failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10940&quot;&gt;&lt;del&gt;LU-10940&lt;/del&gt;&lt;/a&gt; tests: skip sanity test 802 when quota enabled&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: ddb3d0b61ded0b9507baa25de08a2d51af17b284&lt;/p&gt;</comment>
                            <comment id="231822" author="jamesanunez" created="Fri, 10 Aug 2018 19:33:18 +0000"  >&lt;p&gt;Patch landed to master&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzw8f:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>