<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:41:45 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11193] lsnapshot mount fails with DNE</title>
                <link>https://jira.whamcloud.com/browse/LU-11193</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for sarah &amp;lt;sarah@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/44e5d976-9097-11e8-a9f7-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/44e5d976-9097-11e8-a9f7-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;test_1a failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;(5) Fail to mount lss_1a_0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;server/client: lustre-master tag-2.11.53 ZFS DNE&lt;/p&gt;

&lt;p&gt;VVVVVVV DO NOT REMOVE LINES BELOW, Added by Maloo for auto-association VVVVVVV&lt;br/&gt;
 sanity-lsnapshot test_1a - (5) Fail to mount lss_1a_0&lt;/p&gt;</description>
                <environment></environment>
        <key id="52858">LU-11193</key>
            <summary>lsnapshot mount fails with DNE</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="utopiabound">Nathaniel Clark</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>DNE</label>
                            <label>zfs</label>
                    </labels>
                <created>Mon, 30 Jul 2018 17:15:12 +0000</created>
                <updated>Sat, 15 Dec 2018 18:06:18 +0000</updated>
                            <resolved>Tue, 23 Oct 2018 05:36:19 +0000</resolved>
                                    <version>Lustre 2.12.0</version>
                                    <fixVersion>Lustre 2.12.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="231077" author="sarah" created="Mon, 30 Jul 2018 17:22:00 +0000"  >&lt;p&gt;test log&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanity-lsnapshot test 1a: mount/umount lustre snapshot ============================================ 01:59:42 (1532483982)
Create lss_1a_0
CMD: trevis-3vm4 /usr/sbin/lctl snapshot_create -F lustre -n lss_1a_0
Check whether mounted (1)
CMD: trevis-3vm4 /usr/sbin/lctl snapshot_list -F lustre -n lss_1a_0 -d
Mount lss_1a_0
CMD: trevis-3vm4 /usr/sbin/lctl snapshot_mount -F lustre -n lss_1a_0
trevis-3vm4: mount.lustre: mount lustre-mdt2/mdt2@lss_1a_0 at /mnt/lss_1a_0_MDT0001 failed: Read-only file system
trevis-3vm4: mount.lustre: mount lustre-mdt3/mdt3@lss_1a_0 at /mnt/lss_1a_0_MDT0002 failed: Read-only file system
trevis-3vm4: mount.lustre: mount lustre-mdt4/mdt4@lss_1a_0 at /mnt/lss_1a_0_MDT0003 failed: Read-only file system
trevis-3vm4: 3 of 12 pieces of the snapshot lss_1a_0 can&apos;t be mounted: Read-only file system
CMD: trevis-3vm4 cat /var/log/lsnapshot.log
Wed Jul 25 01:59:15 2018 (32588:jt_snapshot_create:1475:lustre:ssh): Create snapshot lss_0_0 successfully with comment &amp;lt;(null)&amp;gt;, barrier &amp;lt;enable&amp;gt;, timeout &amp;lt;30&amp;gt;
Wed Jul 25 01:59:24 2018 (1080:jt_snapshot_create:1475:lustre:ssh): Create snapshot lss_0_1 successfully with comment &amp;lt;(null)&amp;gt;, barrier &amp;lt;disable&amp;gt;, timeout &amp;lt;-1&amp;gt;
Wed Jul 25 01:59:30 2018 (2042:jt_snapshot_create:1475:lustre:ssh): Create snapshot lss_0_2 successfully with comment &amp;lt;This is test_0&amp;gt;, barrier &amp;lt;enable&amp;gt;, timeout &amp;lt;30&amp;gt;
Wed Jul 25 01:59:38 2018 (2792:jt_snapshot_create:1475:lustre:ssh): Create snapshot lss_0_3 successfully with comment &amp;lt;Another one&amp;gt;, barrier &amp;lt;enable&amp;gt;, timeout &amp;lt;30&amp;gt;
Wed Jul 25 01:59:40 2018 (3484:jt_snapshot_create:1468:lustre:ssh): Can&apos;t create snapshot lss_0_0 with comment &amp;lt;(null)&amp;gt; barrier &amp;lt;enable&amp;gt;, timeout &amp;lt;30&amp;gt;: -17
Wed Jul 25 01:59:49 2018 (3927:jt_snapshot_create:1475:lustre:ssh): Create snapshot lss_1a_0 successfully with comment &amp;lt;(null)&amp;gt;, barrier &amp;lt;enable&amp;gt;, timeout &amp;lt;30&amp;gt;
Wed Jul 25 02:00:00 2018 (5079:snapshot_mount_target:2225:lustre:ssh): Can&apos;t execute &quot;ssh trevis-3vm5 &apos;PATH=&apos;/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/lib64/lustre/utils:/usr/lib64/lustre/tests:/sbin:/usr/sbin&apos; zpool import -d /dev/lvm-Role_MDS lustre-mdt2 &amp;gt; /dev/null 2&amp;gt;&amp;amp;1; mkdir -p /mnt/lss_1a_0_MDT0001 &amp;amp;&amp;amp; mount -t lustre -o rdonly_dev lustre-mdt2/mdt2@lss_1a_0 /mnt/lss_1a_0_MDT0001&apos;&quot; on the target (trevis-3vm5:2:1): rc = -30
Wed Jul 25 02:00:00 2018 (5080:snapshot_mount_target:2225:lustre:ssh): Can&apos;t execute &quot;ssh trevis-3vm4 &apos;PATH=&apos;/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/lib64/lustre/utils:/usr/lib64/lustre/tests:/sbin:/usr/sbin&apos; zpool import -d /dev/lvm-Role_MDS lustre-mdt3 &amp;gt; /dev/null 2&amp;gt;&amp;amp;1; mkdir -p /mnt/lss_1a_0_MDT0002 &amp;amp;&amp;amp; mount -t lustre -o rdonly_dev lustre-mdt3/mdt3@lss_1a_0 /mnt/lss_1a_0_MDT0002&apos;&quot; on the target (trevis-3vm4:2:2): rc = -30
Wed Jul 25 02:00:00 2018 (5081:snapshot_mount_target:2225:lustre:ssh): Can&apos;t execute &quot;ssh trevis-3vm5 &apos;PATH=&apos;/usr/lib64/qt-3.3/bin:/usr/lib64/compat-openmpi16/bin:/usr/bin:/bin:/usr/lib64/lustre/utils:/usr/lib64/lustre/tests:/sbin:/usr/sbin&apos; zpool import -d /dev/lvm-Role_MDS lustre-mdt4 &amp;gt; /dev/null 2&amp;gt;&amp;amp;1; mkdir -p /mnt/lss_1a_0_MDT0003 &amp;amp;&amp;amp; mount -t lustre -o rdonly_dev lustre-mdt4/mdt4@lss_1a_0 /mnt/lss_1a_0_MDT0003&apos;&quot; on the target (trevis-3vm5:2:3): rc = -30
Wed Jul 25 02:00:08 2018 (4843:jt_snapshot_mount:2400:lustre:ssh): Can&apos;t mount snapshot lss_1a_0: -30
 sanity-lsnapshot test_1a: @@@@@@ FAIL: (5) Fail to mount lss_1a_0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;MDS dmesg&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[43846.721497] Lustre: DEBUG MARKER: dmesg
[43847.506346] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == sanity-lsnapshot test 1a: mount\/umount lustre snapshot ============================================ 01:59:42 \(1532483982\)
[43847.729856] Lustre: DEBUG MARKER: == sanity-lsnapshot test 1a: mount/umount lustre snapshot ============================================ 01:59:42 (1532483982)
[43848.191021] Lustre: DEBUG MARKER: /usr/sbin/lctl snapshot_create -F lustre -n lss_1a_0
[43853.437578] Lustre: DEBUG MARKER: /usr/sbin/lctl snapshot_list -F lustre -n lss_1a_0 -d
[43861.501650] Lustre: DEBUG MARKER: /usr/sbin/lctl snapshot_mount -F lustre -n lss_1a_0
[43863.279517] Lustre: 30b6b646-MDT0000: Imperative Recovery enabled, recovery window shrunk from 60-180 down to 60-180
[43864.230307] CPU: 1 PID: 5175 Comm: mount.lustre Kdump: loaded Tainted: P        W  OE  ------------   3.10.0-862.3.2.el7_lustre.x86_64 #1
[43864.232025] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[43864.232822] Call Trace:
[43864.233331]  [&amp;lt;ffffffffac90e78e&amp;gt;] dump_stack+0x19/0x1b
[43864.234075]  [&amp;lt;ffffffffc133a63a&amp;gt;] osd_trans_create+0x3ca/0x410 [osd_zfs]
[43864.235198]  [&amp;lt;ffffffffc0e823a4&amp;gt;] llog_destroy+0x1f4/0x3f0 [obdclass]
[43864.235937]  [&amp;lt;ffffffffc0e8ab76&amp;gt;] llog_cat_process_common+0x396/0x420 [obdclass]
[43864.236698]  [&amp;lt;ffffffffc0e8bd67&amp;gt;] llog_cat_reverse_process_cb+0x57/0x200 [obdclass]
[43864.237722]  [&amp;lt;ffffffffc0e8710c&amp;gt;] llog_reverse_process+0x38c/0xaa0 [obdclass]
[43864.238542]  [&amp;lt;ffffffffc0e8bd10&amp;gt;] ? llog_cat_size_cb+0x210/0x210 [obdclass]
[43864.239292]  [&amp;lt;ffffffffc0e87e29&amp;gt;] llog_cat_reverse_process+0x179/0x270 [obdclass]
[43864.240077]  [&amp;lt;ffffffffc0e82675&amp;gt;] ? llog_init_handle+0xd5/0x9b0 [obdclass]
[43864.241018]  [&amp;lt;ffffffffc0e83b88&amp;gt;] ? llog_open_create+0x78/0x320 [obdclass]
[43864.241961]  [&amp;lt;ffffffffc1647800&amp;gt;] ? mdd_key_init+0xd0/0xd0 [mdd]
[43864.242612]  [&amp;lt;ffffffffc164ccdc&amp;gt;] mdd_changelog_llog_init+0x75c/0xbc0 [mdd]
[43864.243495]  [&amp;lt;ffffffffac26814e&amp;gt;] ? kvm_clock_get_cycles+0x1e/0x20
[43864.244276]  [&amp;lt;ffffffffc164db08&amp;gt;] mdd_prepare+0x2b8/0x13b0 [mdd]
[43864.245190]  [&amp;lt;ffffffffc14e0177&amp;gt;] mdt_prepare+0x57/0x3b0 [mdt]
[43864.246099]  [&amp;lt;ffffffffc0ef4138&amp;gt;] server_start_targets+0x2298/0x2a30 [obdclass]
[43864.246996]  [&amp;lt;ffffffffc0ec1ca0&amp;gt;] ? class_config_dump_handler+0x7e0/0x7e0 [obdclass]
[43864.248055]  [&amp;lt;ffffffffc0ef59cc&amp;gt;] server_fill_super+0x10fc/0x18c0 [obdclass]
[43864.249055]  [&amp;lt;ffffffffc0d79eb7&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[43864.249810]  [&amp;lt;ffffffffc0ecb578&amp;gt;] lustre_fill_super+0x328/0x950 [obdclass]
[43864.250620]  [&amp;lt;ffffffffc0ecb250&amp;gt;] ? lustre_common_put_super+0x270/0x270 [obdclass]
[43864.251465]  [&amp;lt;ffffffffac41ef3f&amp;gt;] mount_nodev+0x4f/0xb0
[43864.252040]  [&amp;lt;ffffffffc0ec36c8&amp;gt;] lustre_mount+0x38/0x60 [obdclass]
[43864.252695]  [&amp;lt;ffffffffac41fabe&amp;gt;] mount_fs+0x3e/0x1b0
[43864.253277]  [&amp;lt;ffffffffac43d097&amp;gt;] vfs_kern_mount+0x67/0x110
[43864.253945]  [&amp;lt;ffffffffac43f6bf&amp;gt;] do_mount+0x1ef/0xce0
[43864.254571]  [&amp;lt;ffffffffac3f77ac&amp;gt;] ? kmem_cache_alloc_trace+0x3c/0x200
[43864.255251]  [&amp;lt;ffffffffac4404f3&amp;gt;] SyS_mount+0x83/0xd0
[43864.255810]  [&amp;lt;ffffffffac92082f&amp;gt;] system_call_fastpath+0x1c/0x21
[43864.256455]  [&amp;lt;ffffffffac92077b&amp;gt;] ? system_call_after_swapgs+0xc8/0x160
[43864.257216] Lustre: 5175:0:(llog_cat.c:855:llog_cat_process_common()) 30b6b646-MDD0002: can&apos;t destroy empty log [0x26:0x1:0x0]: rc = -30
[43864.262501] CPU: 1 PID: 5175 Comm: mount.lustre Kdump: loaded Tainted: P        W  OE  ------------   3.10.0-862.3.2.el7_lustre.x86_64 #1
[43864.263901] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[43864.264634] Call Trace:
[43864.265050]  [&amp;lt;ffffffffac90e78e&amp;gt;] dump_stack+0x19/0x1b
[43864.265782]  [&amp;lt;ffffffffc133a63a&amp;gt;] osd_trans_create+0x3ca/0x410 [osd_zfs]
[43864.266617]  [&amp;lt;ffffffffc0e8472c&amp;gt;] llog_cancel_rec+0x1bc/0x850 [obdclass]
[43864.267378]  [&amp;lt;ffffffffc0e8b02b&amp;gt;] llog_cat_cleanup+0xdb/0x380 [obdclass]
[43864.268247]  [&amp;lt;ffffffffc0e8beb5&amp;gt;] llog_cat_reverse_process_cb+0x1a5/0x200 [obdclass]
[43864.269074]  [&amp;lt;ffffffffc0e8710c&amp;gt;] llog_reverse_process+0x38c/0xaa0 [obdclass]
[43864.269811]  [&amp;lt;ffffffffc0e8bd10&amp;gt;] ? llog_cat_size_cb+0x210/0x210 [obdclass]
[43864.270616]  [&amp;lt;ffffffffc0e87e29&amp;gt;] llog_cat_reverse_process+0x179/0x270 [obdclass]
[43864.271403]  [&amp;lt;ffffffffc0e82675&amp;gt;] ? llog_init_handle+0xd5/0x9b0 [obdclass]
[43864.272149]  [&amp;lt;ffffffffc0e83b88&amp;gt;] ? llog_open_create+0x78/0x320 [obdclass]
[43864.272900]  [&amp;lt;ffffffffc1647800&amp;gt;] ? mdd_key_init+0xd0/0xd0 [mdd]
[43864.273523]  [&amp;lt;ffffffffc164ccdc&amp;gt;] mdd_changelog_llog_init+0x75c/0xbc0 [mdd]
[43864.274262]  [&amp;lt;ffffffffac26814e&amp;gt;] ? kvm_clock_get_cycles+0x1e/0x20
[43864.275026]  [&amp;lt;ffffffffc164db08&amp;gt;] mdd_prepare+0x2b8/0x13b0 [mdd]
[43864.275667]  [&amp;lt;ffffffffc14e0177&amp;gt;] mdt_prepare+0x57/0x3b0 [mdt]
[43864.276303]  [&amp;lt;ffffffffc0ef4138&amp;gt;] server_start_targets+0x2298/0x2a30 [obdclass]
[43864.277073]  [&amp;lt;ffffffffc0ec1ca0&amp;gt;] ? class_config_dump_handler+0x7e0/0x7e0 [obdclass]
[43864.277930]  [&amp;lt;ffffffffc0ef59cc&amp;gt;] server_fill_super+0x10fc/0x18c0 [obdclass]
[43864.278770]  [&amp;lt;ffffffffc0d79eb7&amp;gt;] ? libcfs_debug_msg+0x57/0x80 [libcfs]
[43864.279487]  [&amp;lt;ffffffffc0ecb578&amp;gt;] lustre_fill_super+0x328/0x950 [obdclass]
[43864.280216]  [&amp;lt;ffffffffc0ecb250&amp;gt;] ? lustre_common_put_super+0x270/0x270 [obdclass]
[43864.281009]  [&amp;lt;ffffffffac41ef3f&amp;gt;] mount_nodev+0x4f/0xb0
[43864.281560]  [&amp;lt;ffffffffc0ec36c8&amp;gt;] lustre_mount+0x38/0x60 [obdclass]
[43864.282417]  [&amp;lt;ffffffffac41fabe&amp;gt;] mount_fs+0x3e/0x1b0
[43864.283041]  [&amp;lt;ffffffffac43d097&amp;gt;] vfs_kern_mount+0x67/0x110
[43864.283743]  [&amp;lt;ffffffffac43f6bf&amp;gt;] do_mount+0x1ef/0xce0
[43864.284386]  [&amp;lt;ffffffffac3f77ac&amp;gt;] ? kmem_cache_alloc_trace+0x3c/0x200
[43864.285062]  [&amp;lt;ffffffffac4404f3&amp;gt;] SyS_mount+0x83/0xd0
[43864.285590]  [&amp;lt;ffffffffac92082f&amp;gt;] system_call_fastpath+0x1c/0x21
[43864.286217]  [&amp;lt;ffffffffac92077b&amp;gt;] ? system_call_after_swapgs+0xc8/0x160
[43864.287052] LustreError: 5175:0:(mdd_device.c:501:mdd_changelog_llog_init()) 30b6b646-MDD0002: changelog user init failed: rc = -30
[43864.289607] LustreError: 5175:0:(mdd_device.c:588:mdd_changelog_init()) 30b6b646-MDD0002: changelog setup during init failed: rc = -30
[43864.290911] LustreError: 5175:0:(mdd_device.c:1256:mdd_prepare()) 30b6b646-MDD0002: failed to initialize changelog: rc = -30
[43864.292143] LustreError: 5175:0:(obd_mount_server.c:1936:server_fill_super()) Unable to start targets: -30
[43864.297888] LustreError: 5248:0:(llog_osd.c:262:llog_osd_read_header()) 30b6b646-MDT0000-osp-MDT0002: bad log  [0x200000402:0x1:0x0] header magic: 0x0 (expected 0x10645539)
[43864.299573] LustreError: 5248:0:(lod_dev.c:427:lod_sub_recovery_thread()) 30b6b646-MDT0000-osp-MDT0002 get update log failed: rc = -5
[43864.312054] LustreError: 5249:0:(lod_sub_object.c:934:lod_sub_prep_llog()) 30b6b646-MDT0002-mdtlov: can&apos;t get id from catalogs: rc = -5
[43864.313397] LustreError: 5249:0:(lod_dev.c:427:lod_sub_recovery_thread()) 30b6b646-MDT0001-osp-MDT0002 get update log failed: rc = -5
[43864.319552] LustreError: 5250:0:(lod_sub_object.c:934:lod_sub_prep_llog()) 30b6b646-MDT0002-mdtlov: can&apos;t get id from catalogs: rc = -5
[43864.320962] LustreError: 5250:0:(lod_dev.c:427:lod_sub_recovery_thread()) 30b6b646-MDT0003-osp-MDT0002 get update log failed: rc = -5
[43864.427856] LustreError: 5175:0:(obd_mount.c:1599:lustre_fill_super()) Unable to mount  (-30)
[43870.310896] LustreError: 32242:0:(ldlm_lib.c:3235:target_bulk_io()) @@@ timeout on bulk WRITE after 6+0s  req@ffff91d455464c50 x1606880080163600/t0(0) o1000-&amp;gt;30b6b646-MDT0002-mdtlov_UUID@0@lo:106/0 lens 336/33016 e 0 to 0 dl 1532484006 ref 1 fl Interpret:/0/0 rc 0/0
[43873.074762] Lustre: DEBUG MARKER: cat /var/log/lsnapshot.log
[43873.482785] Lustre: DEBUG MARKER: /usr/sbin/lctl mark  sanity-lsnapshot test_1a: @@@@@@ FAIL: \(5\) Fail to mount lss_1a_0 
[43873.706267] Lustre: DEBUG MARKER: sanity-lsnapshot test_1a: @@@@@@ FAIL: (5) Fail to mount lss_1a_0
[43873.972000] Lustre: DEBUG MARKER: /usr/sbin/lctl dk &amp;gt; /autotest/trevis/2018-07-24/lustre-master-el7-x86_64--full--1_4_1__3766___00d2c6de-3162-4c33-8dca-239543c570b0/sanity-lsnapshot.test_1a.debug_log.$(hostname -s).1532484009.log;
         dmesg &amp;gt; /autotest/trevis/2018-07-24/lustre-master
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&#160;&lt;/p&gt;

&lt;p&gt;The first failure seems on 2018-05-30 2.11.52.40&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/9b618996-6485-11e8-abc3-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/9b618996-6485-11e8-abc3-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="232870" author="pjones" created="Fri, 31 Aug 2018 22:04:35 +0000"  >&lt;p&gt;Nathaniel&lt;/p&gt;

&lt;p&gt;Can you please investigate?&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="233172" author="utopiabound" created="Fri, 7 Sep 2018 13:05:07 +0000"  >&lt;p&gt;First failure seems to be &lt;a href=&quot;https://testing.whamcloud.com/test_sets/2f0b61ae-6be7-11e8-a522-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/2f0b61ae-6be7-11e8-a522-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;2.11.52.55 rev d66bba81fc641f8b3f7f8d01fe5da0d6406b7ed0&lt;/p&gt;

&lt;p&gt;Though it it looks like all DNE on ZFS testing sanity-lsnaphot, fail with this error.&lt;/p&gt;</comment>
                            <comment id="233470" author="gerrit" created="Thu, 13 Sep 2018 17:45:05 +0000"  >&lt;p&gt;Nathaniel Clark (nclark@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/33157&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33157&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11193&quot; title=&quot;lsnapshot mount fails with DNE&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11193&quot;&gt;&lt;del&gt;LU-11193&lt;/del&gt;&lt;/a&gt; llog: Do not write to read-only devices&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: de1e6bda2b4f04c74fe895ab88dfc9f6c329833a&lt;/p&gt;</comment>
                            <comment id="235308" author="gerrit" created="Tue, 23 Oct 2018 05:16:03 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/33157/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/33157/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11193&quot; title=&quot;lsnapshot mount fails with DNE&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11193&quot;&gt;&lt;del&gt;LU-11193&lt;/del&gt;&lt;/a&gt; llog: Do not write to read-only devices&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 12a695d5b2fea73ee5edb7a0a79d557c94cafa35&lt;/p&gt;</comment>
                            <comment id="235320" author="pjones" created="Tue, 23 Oct 2018 05:36:19 +0000"  >&lt;p&gt;Landed for 2.12&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="53376">LU-11411</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00013:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>