<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:25:56 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-9408] Client fails to mount with ZFS master (0.7.0) and Lustre master (2.9.56)</title>
                <link>https://jira.whamcloud.com/browse/LU-9408</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;ZFS: zfs-0.7.0-rc3-225-g7a25f08&lt;br/&gt;
SPL: spl-0.7.0-rc3-8-g481762f&lt;br/&gt;
Lustre: v2_9_56_0-11-gbfa524f&lt;/p&gt;

&lt;p&gt;This is a straightforward single MDS with MDT/MGT&#160;and an OSS with a single OST.&lt;br/&gt;
(also reproduced with split MDT and MGT on a slightly earlier ZFS).&lt;/p&gt;

&lt;p&gt;This setup works without problems with ldiskfs backing.&lt;/p&gt;

&lt;p&gt;Snippet from MDS log on initial mount:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Apr 26 14:01:18 ieel-mds04 kernel: SPL: Loaded module v0.7.0-rc3_8_g481762f
Apr 26 14:01:19 ieel-mds04 kernel: ZFS: Loaded module v0.7.0-rc3_225_g7a25f08, ZFS pool version 5000, ZFS filesystem version 5
...
Apr 26 14:20:05 ieel-mds04 kernel: SPL: using hostid 0x7e3a4ec9
Apr 26 14:20:52 ieel-mds04 kernel: Lustre: MGS: Connection restored to ec5ab9aa-e46a-19dd-47d3-1aa7d07fdc3f (at 0@lo)
Apr 26 14:20:52 ieel-mds04 kernel: Lustre: srv-scratch-MDT0001: No data found on store. Initialize space
Apr 26 14:20:52 ieel-mds04 kernel: Lustre: scratch-MDT0001: new disk, initializing
Apr 26 14:20:52 ieel-mds04 kernel: Lustre: scratch-MDT0001: Imperative Recovery not enabled, recovery window 300-900
Apr 26 14:20:52 ieel-mds04 kernel: LustreError: 21965:0:(osd_oi.c:497:osd_oid()) unsupported quota oid: 0x16
Apr 26 14:20:52 ieel-mds04 kernel: LustreError: 22330:0:(fid_handler.c:329:__seq_server_alloc_meta()) srv-scratch-MDT0001: Allocated super-sequence failed: rc = -115
Apr 26 14:20:52 ieel-mds04 kernel: LustreError: 22330:0:(fid_request.c:227:seq_client_alloc_seq()) cli-scratch-MDT0001: Can&apos;t allocate new meta-sequence,rc -115
Apr 26 14:20:52 ieel-mds04 kernel: LustreError: 22330:0:(fid_request.c:383:seq_client_alloc_fid()) cli-scratch-MDT0001: Can&apos;t allocate new sequence: rc = -115
Apr 26 14:20:52 ieel-mds04 kernel: LustreError: 22330:0:(lod_dev.c:419:lod_sub_recovery_thread()) scratch-MDT0001-osd getting update log failed: rc = -115
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;OST Log on initial mount:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Apr 26 14:20:56 ieel-oss03 kernel: SPL: Loaded module v0.7.0-rc3_8_g481762f
Apr 26 14:20:58 ieel-oss03 kernel: ZFS: Loaded module v0.7.0-rc3_225_g7a25f08, ZFS pool version 5000, ZFS filesystem version 5
Apr 26 14:21:08 ieel-oss03 kernel: SPL: using hostid 0x5d9bdb4b
Apr 26 14:25:01 ieel-oss03 kernel: LNet: HW nodes: 1, HW CPU cores: 2, npartitions: 1
Apr 26 14:25:01 ieel-oss03 kernel: alg: No test for adler32 (adler32-zlib)
Apr 26 14:25:01 ieel-oss03 kernel: alg: No test for crc32 (crc32-table)
Apr 26 14:25:01 ieel-oss03 kernel: Lustre: Lustre: Build Version: 2.9.56_11_gbfa524f
Apr 26 14:25:01 ieel-oss03 kernel: LNet: Added LNI 192.168.56.22@tcp [8/256/0/180]
Apr 26 14:25:01 ieel-oss03 kernel: LNet: Accept secure, port 988
Apr 26 14:25:02 ieel-oss03 kernel: Lustre: scratch-OST0000: new disk, initializing
Apr 26 14:25:02 ieel-oss03 kernel: Lustre: srv-scratch-OST0000: No data found on store. Initialize space
Apr 26 14:25:02 ieel-oss03 kernel: Lustre: scratch-OST0000: Imperative Recovery not enabled, recovery window 300-900
Apr 26 14:25:02 ieel-oss03 kernel: LustreError: 13214:0:(osd_oi.c:497:osd_oid()) unsupported quota oid: 0x16
Apr 26 14:25:07 ieel-oss03 kernel: Lustre: scratch-OST0000: Connection restored to scratch-MDT0001-mdtlov_UUID (at 192.168.56.13@tcp)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Client attempting to mount (messages):&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Apr 26 14:30:44 ieel-c03 kernel: LNet: HW CPU cores: 2, npartitions: 1
Apr 26 14:30:44 ieel-c03 kernel: alg: No test for adler32 (adler32-zlib)
Apr 26 14:30:44 ieel-c03 kernel: alg: No test for crc32 (crc32-table)
Apr 26 14:30:49 ieel-c03 kernel: sha512_ssse3: Using AVX optimized SHA-512 implementation
Apr 26 14:30:52 ieel-c03 kernel: Lustre: Lustre: Build Version: 2.8.0.51-1-PRISTINE-3.10.0-514.6.1.el7.x86_64
Apr 26 14:30:52 ieel-c03 kernel: LNet: Added LNI 192.168.56.32@tcp [8/256/0/180]
Apr 26 14:30:52 ieel-c03 kernel: LNet: Accept secure, port 988
Apr 26 14:30:52 ieel-c03 kernel: LustreError: 2336:0:(lmv_obd.c:553:lmv_check_connect()) scratch-clilmv-ffff88003b98d800: no target configured for index 0.
Apr 26 14:30:52 ieel-c03 kernel: LustreError: 2336:0:(llite_lib.c:265:client_common_fill_super()) cannot connect to scratch-clilmv-ffff88003b98d800: rc = -22
Apr 26 14:30:52 ieel-c03 kernel: LustreError: 2363:0:(lov_obd.c:922:lov_cleanup()) scratch-clilov-ffff88003b98d800: lov tgt 0 not cleaned! deathrow=0, lovrc=1
Apr 26 14:30:52 ieel-c03 kernel: Lustre: Unmounted scratch-client
Apr 26 14:30:52 ieel-c03 kernel: LustreError: 2336:0:(obd_mount.c:1426:lustre_fill_super()) Unable to mount  (-22)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Attached &lt;span class=&quot;nobr&quot;&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/attachment/26487/26487_logs.tbz2&quot; title=&quot;logs.tbz2 attached to LU-9408&quot;&gt;logs.tbz2&lt;sup&gt;&lt;img class=&quot;rendericon&quot; src=&quot;https://jira.whamcloud.com/images/icons/link_attachment_7.gif&quot; height=&quot;7&quot; width=&quot;7&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/span&gt; has debug_kernel dumps.&lt;/p&gt;</description>
                <environment></environment>
        <key id="45742">LU-9408</key>
            <summary>Client fails to mount with ZFS master (0.7.0) and Lustre master (2.9.56)</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="6">Not a Bug</resolution>
                                        <assignee username="wangshilong">Wang Shilong</assignee>
                                    <reporter username="utopiabound">Nathaniel Clark</reporter>
                        <labels>
                            <label>zfs</label>
                    </labels>
                <created>Wed, 26 Apr 2017 18:34:49 +0000</created>
                <updated>Wed, 17 May 2017 02:31:23 +0000</updated>
                            <resolved>Fri, 5 May 2017 17:44:52 +0000</resolved>
                                    <version>Lustre 2.10.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="193666" author="adilger" created="Wed, 26 Apr 2017 19:21:24 +0000"  >&lt;p&gt;As Bob pointed out in chat, this is related to the recent project quota feature landing in patch &lt;a href=&quot;https://review.whamcloud.com/23947&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/23947&lt;/a&gt; &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4017&quot; title=&quot;Add project quota support feature&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4017&quot;&gt;&lt;del&gt;LU-4017&lt;/del&gt;&lt;/a&gt; quota: add project quota support for Lustre&quot;.  It isn&apos;t clear why this is not failing our current ZFS tests with 0.6.5.9, but it definitely needs to be fixed.&lt;/p&gt;</comment>
                            <comment id="193729" author="pjones" created="Thu, 27 Apr 2017 04:20:27 +0000"  >&lt;p&gt;Wang Shilong&lt;/p&gt;

&lt;p&gt;Do you have any suggestions here?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="193754" author="utopiabound" created="Thu, 27 Apr 2017 12:34:23 +0000"  >&lt;p&gt;Acutally, I&apos;ve just tested this with ZFS/SPL 0.6.5.7 and I get the same error.  It must be something in my setup, but I&apos;m at a loss as to what.  ldiskfs works fine. The quota error, I think, is a red herring.  I think the issue is related to these lines:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Apr 27 08:32:26 ieel-mds03 kernel: LustreError: 5565:0:(fid_handler.c:329:__seq_server_alloc_meta()) srv-scratch-MDT0001: Allocated super-sequence failed: rc = -115
Apr 27 08:32:26 ieel-mds03 kernel: LustreError: 5565:0:(fid_request.c:227:seq_client_alloc_seq()) cli-scratch-MDT0001: Can&apos;t allocate new meta-sequence,rc -115
Apr 27 08:32:26 ieel-mds03 kernel: LustreError: 5565:0:(fid_request.c:383:seq_client_alloc_fid()) cli-scratch-MDT0001: Can&apos;t allocate new sequence: rc = -115
Apr 27 08:32:26 ieel-mds03 kernel: LustreError: 5565:0:(lod_dev.c:419:lod_sub_recovery_thread()) scratch-MDT0001-osd getting update log failed: rc = -115
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="193831" author="adilger" created="Thu, 27 Apr 2017 21:03:25 +0000"  >&lt;p&gt;The &lt;tt&gt;-115&lt;/tt&gt; = &lt;tt&gt;-EINPROGRESS&lt;/tt&gt; error means that the server can&apos;t perform the operation for some reason, but that the client should retry until it succeeds.  It is possible that the server code doesn&apos;t expect to see this, so it isn&apos;t retrying, which is something that we should fix.&lt;/p&gt;

&lt;p&gt;That said, it also isn&apos;t clear why the server would be return &lt;tt&gt;-EINPROGRESS&lt;/tt&gt; for something like sequence allocation, unless e.g. LFSCK is running and it can&apos;t look up the object(s) where the last-used sequence number is stored.&lt;/p&gt;

&lt;p&gt;It probably makes sense to run this with -1 debugging and find out where the &lt;tt&gt;-115&lt;/tt&gt; error is coming from, and then we can see what needs to be fixed.&lt;/p&gt;</comment>
                            <comment id="193857" author="wangshilong" created="Fri, 28 Apr 2017 01:57:44 +0000"  >&lt;p&gt;Acutally, i could not reproduce the problem in my local setup.&lt;/p&gt;</comment>
                            <comment id="193952" author="utopiabound" created="Fri, 28 Apr 2017 20:59:32 +0000"  >&lt;p&gt;I turned on tracing and got this back:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;40000000:00000001:1.0:1493296626.206119:0:5772:0:(fid_request.c:350:seq_client_alloc_fid()) Process entered
40000000:00000001:1.0:1493296626.206120:0:5772:0:(fid_request.c:219:seq_client_alloc_seq()) Process entered
40000000:00000001:1.0:1493296626.206122:0:5772:0:(fid_request.c:180:seq_client_alloc_meta()) Process entered
40000000:00000001:1.0:1493296626.206123:0:5772:0:(fid_handler.c:351:seq_server_alloc_meta()) Process entered
40000000:00000001:1.0:1493296626.206124:0:5772:0:(fid_handler.c:322:__seq_server_alloc_meta()) Process entered
40000000:00000001:1.0:1493296626.206126:0:5772:0:(fid_handler.c:277:seq_server_check_and_alloc_super()) Process entered
40000000:00000001:1.0:1493296626.206127:0:5772:0:(fid_request.c:148:seq_client_alloc_super()) Process entered
40000000:00000001:1.0:1493296626.206129:0:5772:0:(fid_request.c:165:seq_client_alloc_super()) Process leaving (rc=18446744073709551501 : -115 : ffffffffffffff8d)
40000000:00080000:1.0:1493296626.206131:0:5772:0:(fid_handler.c:290:seq_server_check_and_alloc_super()) srv-scratch-MDT0001: Can&apos;t allocate super-sequence: rc -115
40000000:00000001:1.0:1493296626.206132:0:5772:0:(fid_handler.c:291:seq_server_check_and_alloc_super()) Process leaving (rc=18446744073709551501 : -115 : ffffffffffffff8d)
40000000:00020000:1.0:1493296626.206134:0:5772:0:(fid_handler.c:329:__seq_server_alloc_meta()) srv-scratch-MDT0001: Allocated super-sequence failed: rc = -115
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Which means in &lt;tt&gt;lustre/fid/fid_request.c::seq_client_alloc_super()&lt;/tt&gt;:&lt;br/&gt;
In &lt;tt&gt;struct lu_client_seq *seq&lt;/tt&gt; the sequence server &lt;tt&gt;seq-&amp;gt;lcs_srv&lt;/tt&gt; and sequence export &lt;tt&gt;seq-&amp;gt;lcs_exp&lt;/tt&gt; are both &lt;tt&gt;NULL&lt;/tt&gt;.&lt;/p&gt;</comment>
                            <comment id="193992" author="ihara" created="Sun, 30 Apr 2017 02:13:58 +0000"  >&lt;p&gt;Nathaniel,&lt;/p&gt;

&lt;p&gt;Just want to confirm. This is what you are testing exact Lustre vesion below, correct?&lt;/p&gt;

&lt;p&gt;OSS/MDS&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Apr 26 14:25:01 ieel-oss03 kernel: Lustre: Lustre: Build Version: 2.9.56_11_gbfa524f
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Cient&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Apr 26 14:30:52 ieel-c03 kernel: Lustre: Lustre: Build Version: 2.8.0.51-1-PRISTINE-3.10.0-514.6.1.el7.x86_64
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;We have tested latest master (both server and cient) with zfs-0.6.5 and didn&apos;t reproduce your problem. But, our tested client version might be diferent from your setup.&lt;br/&gt;
I&apos;m wondering whether if there are some version interoperability issue here. We will investigate this again.&lt;/p&gt;</comment>
                            <comment id="194077" author="utopiabound" created="Mon, 1 May 2017 20:41:53 +0000"  >&lt;p&gt;Yes that&apos;s the exact version for OSS/MDS.&lt;/p&gt;

&lt;p&gt;The client version is &lt;tt&gt;lustre-client-2.9.56_11_gbfa524f-1.el7.centos.x86_64&lt;/tt&gt;.&lt;/p&gt;

&lt;p&gt;I&apos;m going to retest with latest master.&lt;/p&gt;</comment>
                            <comment id="194098" author="ihara" created="Mon, 1 May 2017 23:34:24 +0000"  >&lt;p&gt;This confused me. As far as I read your original description, client version is &quot;2.8.0.51-1&quot;, not same version of OSS/MDS (2.9.56_11_gbfa524f).&lt;/p&gt;</comment>
                            <comment id="194128" author="utopiabound" created="Tue, 2 May 2017 10:16:12 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=ihara&quot; class=&quot;user-hover&quot; rel=&quot;ihara&quot;&gt;ihara&lt;/a&gt;,&lt;/p&gt;

&lt;p&gt;Oh, you are correct, I&apos;ve updated the client code to match the OSS/MDS but I didn&apos;t unload the old modules.&lt;/p&gt;

&lt;p&gt;Using the quoted version above, I get the same result.  Works with ldiskfs, does not work with zfs.&lt;/p&gt;

&lt;p&gt;I&apos;ve go two setups with running the same version of lusture: 2.9.56_11_gbfa524f&lt;br/&gt;
ZFS:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@ieel-mds03 ~]# lctl dl
  0 UP osd-zfs scratch-MDT0001-osd scratch-MDT0001-osd_UUID 8
  1 UP mgs MGS MGS 7
  2 UP mgc MGC192.168.56.12@tcp 3e0eccdf-f828-338f-d3fc-2e717a638014 5
  3 UP mds MDS MDS_uuid 3
  4 UP lod scratch-MDT0001-mdtlov scratch-MDT0001-mdtlov_UUID 4
  5 UP mdt scratch-MDT0001 scratch-MDT0001_UUID 5
  6 UP mdd scratch-MDD0001 scratch-MDD0001_UUID 4
  7 UP osp scratch-OST0000-osc-MDT0001 scratch-MDT0001-mdtlov_UUID 5
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;ldiskfs:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@ieel-mds04 ~]# lctl dl
  0 UP osd-ldiskfs scratch-MDT0000-osd scratch-MDT0000-osd_UUID 10
  1 UP mgs MGS MGS 7
  2 UP mgc MGC192.168.56.13@tcp 0e5f0018-97cf-c2a4-4817-f51b7410ec7b 5
  3 UP mds MDS MDS_uuid 3
  4 UP lod scratch-MDT0000-mdtlov scratch-MDT0000-mdtlov_UUID 4
  5 UP mdt scratch-MDT0000 scratch-MDT0000_UUID 13
  6 UP mdd scratch-MDD0000 scratch-MDD0000_UUID 4
  7 UP qmt scratch-QMT0000 scratch-QMT0000_UUID 4
  8 UP osp scratch-OST0000-osc-MDT0000 scratch-MDT0000-mdtlov_UUID 5
  9 UP osp scratch-OST0001-osc-MDT0000 scratch-MDT0000-mdtlov_UUID 5
 10 UP lwp scratch-MDT0000-lwp-MDT0000 scratch-MDT0000-lwp-MDT0000_UUID 5
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Should qmt be missing from ZFS?&lt;/p&gt;</comment>
                            <comment id="194226" author="wangshilong" created="Wed, 3 May 2017 01:22:34 +0000"  >&lt;p&gt;Hi Nathaniel Clark,&lt;/p&gt;

&lt;p&gt;   could you show me your exact mkfs options, so that i could reproduce here.&lt;/p&gt;

&lt;p&gt;Thanks,&lt;br/&gt;
Shilong&lt;/p&gt;</comment>
                            <comment id="194294" author="utopiabound" created="Wed, 3 May 2017 15:33:22 +0000"  >&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zpool create -f -o ashift=12 -o cachefile=none mdt00 /dev/sdc /dev/sdd
mkfs.lustre --reformat --backfstype=zfs --mgs --mdt --index=1 --fsname=scratch mdt00/mdt
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="194305" author="ihara" created="Wed, 3 May 2017 16:17:52 +0000"  >&lt;p&gt;Ah, are you sure does &quot;--index=1&quot; work without --index=0?&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;Apr 26 14:20:52 ieel-mds04 kernel: Lustre: scratch-MDT0001: new disk, initializing&lt;br/&gt;
Apr 26 14:20:52 ieel-mds04 kernel: Lustre: scratch-MDT0001: Imperative Recovery not enabled, recovery window 300-900&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;it seem you&apos;ve also setup --index=1 without --index=0 in your original description.. &lt;/p&gt;

&lt;p&gt;However, you have --index=0 for MDT in your ldiskfs setup.&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;  8 UP osp scratch-OST0000-osc-MDT0000 scratch-MDT0000-mdtlov_UUID 5&lt;br/&gt;
  9 UP osp scratch-OST0001-osc-MDT0000 scratch-MDT0000-mdtlov_UUID 5&lt;/p&gt;&lt;/blockquote&gt;</comment>
                            <comment id="194656" author="utopiabound" created="Fri, 5 May 2017 11:58:02 +0000"  >&lt;p&gt;If I format mdt00 with --index=0, everything works just fine.  This could be closed as &quot;not a bug&quot; i guess, though it&apos;s kind of a strange one to debug.&lt;/p&gt;</comment>
                            <comment id="194717" author="pjones" created="Fri, 5 May 2017 17:44:52 +0000"  >&lt;p&gt;Thanks Nathaniel&lt;/p&gt;</comment>
                            <comment id="196097" author="adilger" created="Tue, 16 May 2017 22:58:31 +0000"  >&lt;p&gt;It would be good to get a patch to quiet the spurious &quot;&lt;tt&gt;osd_oid()) unsupported quota oid: 0x16&lt;/tt&gt;&quot; message at startup, since even I find that confusing and wonder whether there is something wrong.  We know this is for project quota, which isn&apos;t supported in ZFS yet.&lt;/p&gt;</comment>
                            <comment id="196112" author="wangshilong" created="Wed, 17 May 2017 02:20:34 +0000"  >&lt;p&gt;Andreas, Fan Yong is working on Project quota for ZFS, i think that messages will be removed too with zfs project quota supported.&lt;/p&gt;</comment>
                            <comment id="196113" author="yong.fan" created="Wed, 17 May 2017 02:31:23 +0000"  >&lt;p&gt;The osd_oid() has been totally removed via the patch:&lt;br/&gt;
&lt;a href=&quot;https://review.whamcloud.com/#/c/27093/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/27093/&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="35896">LU-7991</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="26487" name="logs.tbz2" size="15080" author="utopiabound" created="Wed, 26 Apr 2017 18:32:41 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzbb3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>