<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:36:14 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10566] parallel-scale-nfsv4 test_metabench: mkdir: cannot create directory on Read-only file system</title>
                <link>https://jira.whamcloud.com/browse/LU-10566</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;parallel-scale-nfsv4 test_metabench - metabench failed! 1&lt;br/&gt;
^^^^^^^^^^^^^ DO NOT REMOVE LINE ABOVE ^^^^^^^^^^^^^&lt;/p&gt;

&lt;p&gt;This issue was created by maloo for sarah_lw &amp;lt;wei3.liu@intel.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/6e890fe6-fd53-11e7-a6ad-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/6e890fe6-fd53-11e7-a6ad-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;test_metabench failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;metabench failed! 1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;server: 2.10.57 RHEL7 ldiskfs&lt;br/&gt;
client: SLES12SP3&lt;/p&gt;

&lt;p&gt;test log&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== parallel-scale-nfsv4 test metabench: metabench ==================================================== 11:23:59 (1516389839)
OPTIONS:
METABENCH=/usr/bin/metabench
clients=onyx-33vm1,onyx-33vm2
mbench_NFILES=10000
mbench_THREADS=4
onyx-33vm1
onyx-33vm2
mkdir: cannot create directory &#8216;/mnt/lustre/d0.parallel-scale-nfs&#8217;: Read-only file system
chmod: cannot access &apos;/mnt/lustre/d0.parallel-scale-nfs/d0.metabench&apos;: No such file or directory
+ /usr/bin/metabench -w /mnt/lustre/d0.parallel-scale-nfs/d0.metabench -c 10000 -C -S 
+ chmod 0777 /mnt/lustre
chmod: changing permissions of &apos;/mnt/lustre&apos;: Read-only file system
dr-xr-xr-x 23 root root 4096 Jan 19 00:29 /mnt/lustre
+ su mpiuser sh -c &quot;/usr/lib64/mpi/gcc/openmpi/bin/mpirun --mca btl tcp,self --mca btl_tcp_if_include eth0 -mca boot ssh -machinefile /tmp/parallel-scale-nfs.machines -np 8 /usr/bin/metabench -w /mnt/lustre/d0.parallel-scale-nfs/d0.metabench -c 10000 -C -S &quot;
[onyx-33vm2:14600] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm2:14600] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm2:14601] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm2:14601] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09898] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09898] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09900] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09900] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09899] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09899] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm2:14602] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm2:14602] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm2:14604] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm2:14604] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09902] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_ofi: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
[onyx-33vm1:09902] mca: base: component_find: unable to open /usr/lib64/mpi/gcc/openmpi/lib64/openmpi/mca_mtl_psm: libpsm_infinipath.so.1: cannot open shared object file: No such file or directory (ignored)
Metadata Test &amp;lt;no-name&amp;gt; on 01/19/2018 at 11:23:59

Rank   0 process on node onyx-33vm1
Rank   1 process on node onyx-33vm1
Rank   2 process on node onyx-33vm1
Rank   3 process on node onyx-33vm1
Rank   4 process on node onyx-33vm2
Rank   5 process on node onyx-33vm2
Rank   6 process on node onyx-33vm2
Rank   7 process on node onyx-33vm2

[01/19/2018 11:23:59] FATAL error on process 0
Proc 0: cannot create component d0.parallel-scale-nfs in /mnt/lustre/d0.parallel-scale-nfs/d0.metabench: Read-only file system
[onyx-33vm1][[7407,1],1][btl_tcp_frag.c:238:mca_btl_tcp_frag_recv] mca_btl_tcp_frag_recv: readv failed: Connection reset by peer (104)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="50398">LU-10566</key>
            <summary>parallel-scale-nfsv4 test_metabench: mkdir: cannot create directory on Read-only file system</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="4" iconUrl="https://jira.whamcloud.com/images/icons/statuses/reopened.png" description="This issue was once resolved, but the resolution was deemed incorrect. From here issues are either marked assigned or resolved.">Reopened</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="sarah">Sarah Liu</reporter>
                        <labels>
                    </labels>
                <created>Thu, 25 Jan 2018 18:12:01 +0000</created>
                <updated>Wed, 14 Apr 2021 16:14:13 +0000</updated>
                                            <version>Lustre 2.11.0</version>
                    <version>Lustre 2.10.4</version>
                    <version>Lustre 2.12.5</version>
                    <version>Lustre 2.12.6</version>
                                    <fixVersion>Lustre 2.12.0</fixVersion>
                    <fixVersion>Lustre 2.10.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>5</watches>
                                                                            <comments>
                            <comment id="219283" author="jamesanunez" created="Fri, 26 Jan 2018 18:36:54 +0000"  >&lt;p&gt;From the top of the suite_log, we see that the file system is 99% full:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;UUID                   1K-blocks        Used   Available Use% Mounted on
lustre-MDT0000_UUID      1165900       85376      977328   8% /mnt/lustre[MDT:0]
lustre-OST0000_UUID      1933276     1801184       10852  99% /mnt/lustre[OST:0]
lustre-OST0001_UUID      1933276     1795316       16720  99% /mnt/lustre[OST:1]
lustre-OST0002_UUID      1933276     1795232       16776  99% /mnt/lustre[OST:2]
lustre-OST0003_UUID      1933276     1795292       16660  99% /mnt/lustre[OST:3]
lustre-OST0004_UUID      1933276     1795180       16828  99% /mnt/lustre[OST:4]
lustre-OST0005_UUID      1933276     1795300       16596  99% /mnt/lustre[OST:5]
lustre-OST0006_UUID      1933276     1803440        8596 100% /mnt/lustre[OST:6]

filesystem_summary:     13532932    12580944      103028  99% /mnt/lustre
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Does this explain why the file system changes to &apos;read-only&apos; ? I suspect that the NFS file system is read-only, but we should confirm that the Lustre file system is not read-only.&lt;/p&gt;</comment>
                            <comment id="220139" author="jamesanunez" created="Tue, 6 Feb 2018 16:00:37 +0000"  >&lt;p&gt;We are seeing this issue on failover test sessions with DNE configured and ZFS servers with servers and clients el7:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ed44110c-fd83-11e7-a7cd-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ed44110c-fd83-11e7-a7cd-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="220434" author="mdiep" created="Thu, 8 Feb 2018 16:35:04 +0000"  >&lt;p&gt;+1 on b2_10&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ba76657c-0b5c-11e8-a6ad-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ba76657c-0b5c-11e8-a6ad-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="220435" author="gerrit" created="Thu, 8 Feb 2018 17:06:20 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31231&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31231&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10566&quot; title=&quot;parallel-scale-nfsv4 test_metabench: mkdir: cannot create directory on Read-only file system&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10566&quot;&gt;LU-10566&lt;/a&gt; test: don&apos;t direct lfs df to /dev/null&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: b9bc92bd01d9a769ddb4d8669b27fe6db8e7cf54&lt;/p&gt;</comment>
                            <comment id="220646" author="mdiep" created="Fri, 9 Feb 2018 21:11:10 +0000"  >&lt;p&gt;I found that obdfilter-survey test_1c did not clean up propertly&lt;/p&gt;

&lt;p&gt;=============&amp;gt; Destroy 1 on 10.9.6.12:lustre-OST0000_ecc&lt;br/&gt;
error: destroy: invalid objid &apos;3&apos;&lt;br/&gt;
destroy OST object &amp;lt;objid&amp;gt; [num &lt;span class=&quot;error&quot;&gt;&amp;#91;verbose&amp;#93;&lt;/span&gt;]&lt;br/&gt;
usage: destroy &amp;lt;num&amp;gt; objects, starting at objid &amp;lt;objid&amp;gt;&lt;br/&gt;
run &amp;lt;command&amp;gt; after connecting to device &amp;lt;devno&amp;gt;&lt;br/&gt;
--device &amp;lt;devno&amp;gt; &amp;lt;command &lt;span class=&quot;error&quot;&gt;&amp;#91;args ...&amp;#93;&lt;/span&gt;&amp;gt;&lt;/p&gt;</comment>
                            <comment id="223647" author="jamesanunez" created="Wed, 14 Mar 2018 20:27:29 +0000"  >&lt;p&gt;It looks like we are hitting this again with 2.10.59 RHEL 7 ldiskfs servers and RHEL 7 clients.&lt;/p&gt;

&lt;p&gt;This time, the file system is not almost full. From the suite_log, before we run any parallel-scale-nfsv4 tests,&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;UUID&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160; 1K-blocks&#160;&#160;&#160;&#160;&#160;&#160;&#160; Used&#160;&#160; Available Use% Mounted on

lustre-MDT0000_UUID&#160;&#160;&#160;&#160;&#160; 1165900&#160;&#160;&#160;&#160;&#160;&#160; 17368&#160;&#160;&#160;&#160; 1045336&#160;&#160; 2% /mnt/lustre[MDT:0]
lustre-OST0000_UUID&#160;&#160;&#160;&#160;&#160; 1933276&#160;&#160;&#160;&#160;&#160;&#160; 26956&#160;&#160;&#160;&#160; 1781868&#160;&#160; 1% /mnt/lustre[OST:0]
lustre-OST0001_UUID&#160;&#160;&#160;&#160;&#160; 1933276&#160;&#160;&#160;&#160;&#160;&#160; 26944&#160;&#160;&#160;&#160; 1785064&#160;&#160; 1% /mnt/lustre[OST:1]
lustre-OST0002_UUID&#160;&#160;&#160;&#160;&#160; 1933276&#160;&#160;&#160;&#160;&#160;&#160; 31044&#160;&#160;&#160;&#160; 1780088&#160;&#160; 2% /mnt/lustre[OST:2]
lustre-OST0003_UUID&#160;&#160;&#160;&#160;&#160; 1933276&#160;&#160;&#160;&#160;&#160; &#160;26956&#160;&#160;&#160;&#160; 1784908&#160;&#160; 1% /mnt/lustre[OST:3]
lustre-OST0004_UUID&#160;&#160;&#160;&#160;&#160; 1933276&#160;&#160;&#160;&#160;&#160;&#160; 26948&#160;&#160;&#160;&#160; 1784972&#160;&#160; 1% /mnt/lustre[OST:4]
lustre-OST0005_UUID&#160;&#160;&#160;&#160;&#160; 1933276&#160;&#160;&#160;&#160;&#160;&#160; 26988&#160;&#160;&#160;&#160; 1784812&#160;&#160; 1% /mnt/lustre[OST:5]
lustre-OST0006_UUID&#160;&#160;&#160;&#160;&#160; 1933276&#160;&#160;&#160;&#160;&#160;&#160; 26960&#160;&#160;&#160;&#160; 1784960&#160;&#160; 1% /mnt/lustre[OST:6]

filesystem_summary:&#160;&#160;&#160;&#160; 13532932&#160;&#160;&#160;&#160;&#160; 192796&#160;&#160;&#160; 12486672&#160;&#160; 2% /mnt/lustre&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;We see the same output from metabench as in the description&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== parallel-scale-nfsv4 test metabench: metabench ==================================================== 08:43:41 (1521017021)
OPTIONS:
METABENCH=/usr/bin/metabench
clients=onyx-30vm5.onyx.hpdd.intel.com,onyx-30vm6
mbench_NFILES=10000
mbench_THREADS=4
onyx-30vm5.onyx.hpdd.intel.com
onyx-30vm6
mkdir: cannot create directory &apos;/mnt/lustre/d0.parallel-scale-nfs&apos;: Read-only file system
chmod: cannot access &apos;/mnt/lustre/d0.parallel-scale-nfs/d0.metabench&apos;: No such file or directory
+ /usr/bin/metabench -w /mnt/lustre/d0.parallel-scale-nfs/d0.metabench -c 10000 -C -S
+ chmod 0777 /mnt/lustre
chmod: changing permissions of &apos;/mnt/lustre&apos;: Read-only file system
dr-xr-xr-x 23 root root 4096 Mar 14 07:59 /mnt/lustre&#160;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/734ea438-2773-11e8-9e0e-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/734ea438-2773-11e8-9e0e-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/3b702578-2769-11e8-9e0e-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/3b702578-2769-11e8-9e0e-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="223872" author="gerrit" created="Fri, 16 Mar 2018 18:37:27 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31679&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31679&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10566&quot; title=&quot;parallel-scale-nfsv4 test_metabench: mkdir: cannot create directory on Read-only file system&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10566&quot;&gt;LU-10566&lt;/a&gt; test: debug&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 7550568625d2732afeb36a52df36db0109bda82d&lt;/p&gt;</comment>
                            <comment id="225506" author="gerrit" created="Mon, 9 Apr 2018 19:51:01 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/31679/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31679/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10566&quot; title=&quot;parallel-scale-nfsv4 test_metabench: mkdir: cannot create directory on Read-only file system&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10566&quot;&gt;LU-10566&lt;/a&gt; test: fix nfs exports clean up&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 2cdc1ad8b86d013fdb8ffc70ee567284537eee47&lt;/p&gt;</comment>
                            <comment id="225550" author="pjones" created="Mon, 9 Apr 2018 20:59:42 +0000"  >&lt;p&gt;&lt;font color=&quot;#000000&quot;&gt;Landed for 2.12&lt;/font&gt;&lt;/p&gt;</comment>
                            <comment id="225751" author="gerrit" created="Wed, 11 Apr 2018 15:26:40 +0000"  >&lt;p&gt;Minh Diep (minh.diep@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/31953&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31953&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10566&quot; title=&quot;parallel-scale-nfsv4 test_metabench: mkdir: cannot create directory on Read-only file system&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10566&quot;&gt;LU-10566&lt;/a&gt; test: fix nfs exports clean up&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 5a86bc8daac771e428ad839f27ea1542a4a40f48&lt;/p&gt;</comment>
                            <comment id="226102" author="gerrit" created="Mon, 16 Apr 2018 19:01:41 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/31953/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/31953/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-10566&quot; title=&quot;parallel-scale-nfsv4 test_metabench: mkdir: cannot create directory on Read-only file system&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-10566&quot;&gt;LU-10566&lt;/a&gt; test: fix nfs exports clean up&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_10&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 741347aafb8053d02294650add007e1bf050e978&lt;/p&gt;</comment>
                            <comment id="237628" author="sarah" created="Thu, 29 Nov 2018 00:22:25 +0000"  >&lt;p&gt;hit this again on b2_10 2.10.6-rc2 zfs DNE&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.whamcloud.com/test_sets/05a4f148-ef60-11e8-bfe1-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/05a4f148-ef60-11e8-bfe1-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="272001" author="jamesanunez" created="Thu, 4 Jun 2020 18:30:50 +0000"  >&lt;p&gt;I&apos;m reopening this ticket because we are still seeing the read-only file system problem for 2.12.5 RC1 at &lt;a href=&quot;https://testing.whamcloud.com/test_sets/bd833e99-d557-4ec6-a768-91440b98b55e&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/bd833e99-d557-4ec6-a768-91440b98b55e&lt;/a&gt; .&lt;/p&gt;

&lt;p&gt;Maybe &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12231&quot; title=&quot;parallel-scale-nfsv4 test racer_on_nfs fails with &amp;#39;test_racer_on_nfs failed with 1&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12231&quot;&gt;&lt;del&gt;LU-12231&lt;/del&gt;&lt;/a&gt; is the same issue and this one can be closed since &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-12231&quot; title=&quot;parallel-scale-nfsv4 test racer_on_nfs fails with &amp;#39;test_racer_on_nfs failed with 1&amp;#39;&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-12231&quot;&gt;&lt;del&gt;LU-12231&lt;/del&gt;&lt;/a&gt; is still open?&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="50801">LU-10663</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="51511">LU-10851</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="47879">LU-9892</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="55507">LU-12231</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzrqn:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>