<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:21:09 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-1957] Test failure on test suite sanity, subtest test_180b</title>
                <link>https://jira.whamcloud.com/browse/LU-1957</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Li Wei &amp;lt;liwei@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/f1cbcf24-fe85-11e1-b4cd-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/f1cbcf24-fe85-11e1-b4cd-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_180b failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;test_180b failed with 1&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;From the test output:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== sanity test 180b: test obdecho directly on obdfilter == 03:16:00 (1347617760)
CMD: client-26vm4 lsmod | grep -q obdecho ||  { insmod /usr/lib64/lustre/obdecho/obdecho.ko ||  modprobe obdecho; }
CMD: client-26vm4 /usr/sbin/lctl dl
CMD: client-26vm4 /usr/sbin/lctl attach echo_client ec ec_uuid
CMD: client-26vm4 /usr/sbin/lctl --device ec setup lustre-OST0000
CMD: client-26vm4 /usr/sbin/lctl --device ec create 1
client-26vm4: error: create: #1 - Operation not supported
New object id is 
CMD: client-26vm4 /usr/sbin/lctl --device ec  cleanup
CMD: client-26vm4 /usr/sbin/lctl --device ec  detach
obecho_create_test failed: 3
CMD: client-26vm4 rmmod obdecho
 sanity test_180b: @@@@@@ FAIL: test_180b failed with 1 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This was b2_3 with OFD and ZFS OSTs.&lt;/p&gt;

&lt;p&gt;Info required for matching: sanity 180b&lt;/p&gt;</description>
                <environment></environment>
        <key id="16000">LU-1957</key>
            <summary>Test failure on test suite sanity, subtest test_180b</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>ldiskfs</label>
                    </labels>
                <created>Sun, 16 Sep 2012 22:40:36 +0000</created>
                <updated>Fri, 3 Jan 2020 23:53:24 +0000</updated>
                            <resolved>Wed, 18 Sep 2019 21:41:06 +0000</resolved>
                                    <version>Lustre 2.3.0</version>
                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.13.0</fixVersion>
                    <fixVersion>Lustre 2.12.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="44978" author="liwei" created="Sun, 16 Sep 2012 23:03:58 +0000"  >&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/29df0cfe-fedc-11e1-b4cd-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/29df0cfe-fedc-11e1-b4cd-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;This was b2_3 with OFD and ZFS OSTs.&lt;/p&gt;</comment>
                            <comment id="45214" author="yujian" created="Wed, 19 Sep 2012 07:35:05 +0000"  >&lt;p&gt;Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_3/19&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_3/19&lt;/a&gt;&lt;br/&gt;
USE_OFD=yes&lt;br/&gt;
OSTFSTYPE=zfs&lt;br/&gt;
LOAD_MODULES_REMOTE=true&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/ff130926-0241-11e2-ab94-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/ff130926-0241-11e2-ab94-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="45404" author="liwei" created="Sun, 23 Sep 2012 20:57:02 +0000"  >&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/cbb8e36a-0490-11e2-bfd4-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/cbb8e36a-0490-11e2-bfd4-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;This was master with OFD and LDiskFS.&lt;/p&gt;</comment>
                            <comment id="52813" author="utopiabound" created="Thu, 21 Feb 2013 11:09:43 +0000"  >&lt;p&gt;The zfs portion of this bug is possibly handled by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2803&quot; title=&quot;sanity.sh test_180 fails with zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2803&quot;&gt;&lt;del&gt;LU-2803&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="53068" author="keith" created="Tue, 26 Feb 2013 23:11:12 +0000"  >&lt;p&gt;So from looking at maloo test 180b is still failing. An ldiskfs can be seen as &lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/33b45e16-7a59-11e2-b916-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/33b45e16-7a59-11e2-b916-52540035b04c&lt;/a&gt;  (it failed 4 times in the last 4 weeked on the ldiskfs patch review jobs)&lt;/p&gt;


&lt;p&gt;For both zfs and ldiskfs it seems there is some fun error like&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: wtm-19vm4 lsmod | grep -q obdecho ||  { insmod /usr/lib64/lustre/obdecho/obdecho.ko ||  modprobe obdecho; }
wtm-19vm4: insmod: can&apos;t read &apos;/usr/lib64/lustre/obdecho/obdecho.ko&apos;: No such file or directory
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;There is no module?&lt;/p&gt;

</comment>
                            <comment id="53070" author="keith" created="Tue, 26 Feb 2013 23:15:32 +0000"  >&lt;p&gt;Nope there is a module is just seems to not want to load on the OST&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;12:11:05:Lustre: DEBUG MARKER: /usr/sbin/lctl --device ec create 1
12:11:05:LustreError: 21642:0:(ofd_obd.c:1191:ofd_create()) lustre-OST0000: Can&apos;t find FID Sequence 0x2: rc = -22
12:11:05:LustreError: 21642:0:(echo_client.c:2306:echo_create_object()) Cannot create objects: rc = -22
12:11:06:LustreError: 21642:0:(echo_client.c:2330:echo_create_object()) create object failed with: rc = -22
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="53308" author="keith" created="Mon, 4 Mar 2013 23:27:40 +0000"  >&lt;p&gt;Ok current update. The &quot;insmod: can&apos;t read &quot; thing is not part of the issue.  It is just the way the test is written.  It seem obdecho can be in  /usr/lib64/lustre/obdecho/ sometimes and sometimes it is with the kernel.  It is not quite clear to me yet of the module from /usr/lib64/lustre/obdecho/ would be different then the one that modprobe returns. &lt;/p&gt;

&lt;p&gt;In any case in the main error path is the one seen above where &quot;/usr/sbin/lctl --device ec create 1 &quot; fails to find the FID Sequence. This is the real error. &lt;/p&gt;

&lt;p&gt;I am running endless testing to recreate the FID sequence error.  I will submit an autotest job tomorrow  I am unable to re-pro overnight.  I would say it is a rare error at this point in time. &lt;/p&gt;


</comment>
                            <comment id="53363" author="keith" created="Tue, 5 Mar 2013 14:29:09 +0000"  >&lt;p&gt;So far 11k Iterations of the test without a repro. &lt;/p&gt;

&lt;p&gt;It seems &lt;a href=&quot;http://review.whamcloud.com/5307&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5307&lt;/a&gt; was landed on Feb  14th.  The last know ldiskfs error was Feb 18th so I doubt the error has been encountered since the patch landed. &lt;/p&gt;

&lt;p&gt;Patch 5307 is &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2775&quot; title=&quot;Interop 2.1.4&amp;lt;-&amp;gt;2.4 failure on test suite lustre-initialization-1: ASSERTION( fid_seq_is_mdt(loi-&amp;gt;loi_oi.oi_seq) ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2775&quot;&gt;&lt;del&gt;LU-2775&lt;/del&gt;&lt;/a&gt; osp: enable fid-on-OST only for DNE.&quot;  Basically now we only use use FID for DNE, and autotest runs should not be using FID sequence on the OST. &lt;/p&gt;

&lt;p&gt;I am inclined to say this issue has been fixed.  I have emailed Wang Di.&lt;/p&gt;</comment>
                            <comment id="53399" author="di.wang" created="Tue, 5 Mar 2013 19:10:55 +0000"  >&lt;p&gt;Yes, with patch 5307, Normal FID will be only used when DNE is enabled. But I do not understand why this can fix this problem, echo client should always use seq 2, no matter OST FID is enabled or not. Probably I miss sth here.  The debug log from those failure links are so less. Keith, Do you have debug log you can post here? Thanks.  &lt;/p&gt;</comment>
                            <comment id="53404" author="keith" created="Tue, 5 Mar 2013 21:02:11 +0000"  >&lt;p&gt;It seems the error has not occurred for over two weeks the last know error was Feb18th.    I do not have any local debug logs as I have not been  only the autotests logs. &lt;/p&gt;

&lt;p&gt;Below is the OST debug log from the Feb 18th run. &lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_logs/1dcc313a-7a5b-11e2-b916-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_logs/1dcc313a-7a5b-11e2-b916-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;In general I don&apos;t see anytime more interesting than &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;00000100:00100000:0.0:1360080302.580440:0:13196:0:(service.c:1976:ptlrpc_server_handle_request()) Handling RPC pname:cluuid+ref:pid:xid:nid:opc ll_ost00_002:lustre-MDT0000-mdtlov_UUID+5:3022:x1426144806071293:12345-10.10.16.242@tcp:400
00000100:00100000:0.0:1360080302.580447:0:13196:0:(service.c:2020:ptlrpc_server_handle_request()) Handled RPC pname:cluuid+ref:pid:xid:nid:opc ll_ost00_002:lustre-MDT0000-mdtlov_UUID+5:3022:x1426144806071293:12345-10.10.16.242@tcp:400 Request procesed in 7us (156us total) trans 0 rc 0/0
00000100:00100000:0.0:1360080302.580448:0:13196:0:(nrs_fifo.c:245:nrs_fifo_req_stop()) NRS stop fifo request from 12345-10.10.16.242@tcp, seq: 251
00000100:00100000:0.0:1360080302.580450:0:13196:0:(nrs_fifo.c:223:nrs_fifo_req_start()) NRS start fifo request from 12345-10.10.16.242@tcp, seq: 252
00000100:00100000:0.0:1360080302.580451:0:13196:0:(service.c:1976:ptlrpc_server_handle_request()) Handling RPC pname:cluuid+ref:pid:xid:nid:opc ll_ost00_002:lustre-MDT0000-mdtlov_UUID+5:3022:x1426144806071295:12345-10.10.16.242@tcp:400
00000100:00100000:0.0:1360080302.580456:0:13196:0:(service.c:2020:ptlrpc_server_handle_request()) Handled RPC pname:cluuid+ref:pid:xid:nid:opc ll_ost00_002:lustre-MDT0000-mdtlov_UUID+5:3022:x1426144806071295:12345-10.10.16.242@tcp:400 Request procesed in 5us (161us total) trans 0 rc 0/0
00000100:00100000:0.0:1360080302.580458:0:13196:0:(nrs_fifo.c:245:nrs_fifo_req_stop()) NRS stop fifo request from 12345-10.10.16.242@tcp, seq: 252
00002000:00100000:0.0:1360080302.620394:0:21137:0:(ofd_obd.c:135:ofd_parse_connect_data()) lustre-OST0000: cli ECHO_UUID/ffff88006041b800 ocd_connect_flags: 0x405000000068 ocd_version: 2033c00 ocd_grant: 0 ocd_index: 0 ocd_group 2
00002000:00100000:0.0:1360080302.620403:0:21137:0:(ofd_obd.c:234:ofd_parse_connect_data()) lustre-OST0000: cli (no nid) does not support OBD_CONNECT_CKSUM, CRC32 will be used
00002000:00080000:0.0:1360080302.620456:0:21137:0:(ofd_obd.c:317:ofd_obd_connect()) lustre-OST0000: get connection from MDS 2
00000001:02000400:0.0:1360080302.715088:0:21160:0:(debug.c:445:libcfs_debug_mark_buffer()) DEBUG MARKER: /usr/sbin/lctl --device ec create 1
00002000:00020000:0.0:1360080302.803633:0:21184:0:(ofd_obd.c:1178:ofd_create()) lustre-OST0000: Can&apos;t find oseq 0x2: -22
00008000:00020000:0.0:1360080302.803636:0:21184:0:(echo_client.c:2300:echo_create_object()) Cannot create objects: rc = -22
00008000:00020000:0.0:1360080302.804450:0:21184:0:(echo_client.c:2324:echo_create_object()) create object failed with: rc = -22
00000001:02000400:0.0:1360080302.902789:0:21207:0:(debug.c:445:libcfs_debug_mark_buffer()) DEBUG MARKER: /usr/sbin/lctl --device ec  cleanup
00000100:00100000:0.0:1360080302.936335:0:5561:0:(client.c:1418:ptlrpc_send_new_req()) Sending RPC pname:cluuid:pid:xid:nid:opc ptlrpcd_0:29659cc7-7815-f2e4-6cf2-2103848e55b6:5561:1426144820745791:10.10.16.242@tcp:400
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;in the logs. &lt;/p&gt;</comment>
                            <comment id="53468" author="adilger" created="Wed, 6 Mar 2013 14:01:39 +0000"  >&lt;p&gt;In my maloo search, it does appear that sanity.sh test_180b is failing several times a day:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/query?utf8=%E2%9C%93&amp;amp;test_set[test_set_script_id]=f9516376-32bc-11e0-aaee-52540025f9ae&amp;amp;sub_test[sub_test_script_id]=14b0513a-32be-11e0-b685-52540025f9ae&amp;amp;sub_test[status]=FAIL&amp;amp;sub_test[query_bugs]=&amp;amp;test_session[test_host]=&amp;amp;test_session[test_group]=&amp;amp;test_session[user_id]=&amp;amp;test_session[query_date]=&amp;amp;test_session[query_recent_period]=2419200&amp;amp;test_node[os_type_id]=&amp;amp;test_node[distribution_type_id]=&amp;amp;test_node[architecture_type_id]=&amp;amp;test_node[file_system_type_id]=&amp;amp;test_node[lustre_branch_id]=&amp;amp;test_node_network[network_type_id]=&amp;amp;commit=Update+results&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/query?utf8=%E2%9C%93&amp;amp;test_set[test_set_script_id]=f9516376-32bc-11e0-aaee-52540025f9ae&amp;amp;sub_test[sub_test_script_id]=14b0513a-32be-11e0-b685-52540025f9ae&amp;amp;sub_test[status]=FAIL&amp;amp;sub_test[query_bugs]=&amp;amp;test_session[test_host]=&amp;amp;test_session[test_group]=&amp;amp;test_session[user_id]=&amp;amp;test_session[query_date]=&amp;amp;test_session[query_recent_period]=2419200&amp;amp;test_node[os_type_id]=&amp;amp;test_node[distribution_type_id]=&amp;amp;test_node[architecture_type_id]=&amp;amp;test_node[file_system_type_id]=&amp;amp;test_node[lustre_branch_id]=&amp;amp;test_node_network[network_type_id]=&amp;amp;commit=Update+results&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The most recent failures are at:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/0b93da12-833a-11e2-85c9-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/0b93da12-833a-11e2-85c9-52540035b04c&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/sub_tests/e26a1e3c-8242-11e2-ba47-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/sub_tests/e26a1e3c-8242-11e2-ba47-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Looks like all of the failures on ZFS.&lt;/p&gt;</comment>
                            <comment id="53471" author="keith" created="Wed, 6 Mar 2013 14:28:00 +0000"  >&lt;p&gt;Correct fails ZFS alot.  There has not been an Ldiskfs Failure since Feb 18th.   &lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2803&quot; title=&quot;sanity.sh test_180 fails with zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2803&quot;&gt;&lt;del&gt;LU-2803&lt;/del&gt;&lt;/a&gt; sanity/180 fail with zfs :  is a separate LU that tracks the zfs issue.  Alex has a Patch out for the issue.   I have taken this LU to mean Ldiskfs. &lt;/p&gt;</comment>
                            <comment id="53496" author="keith" created="Wed, 6 Mar 2013 23:45:45 +0000"  >&lt;p&gt;Are we all ok to bring this down out of blocker state? &lt;/p&gt;

&lt;p&gt;Perhaps a close as unreproducible for ldiskfs? &lt;/p&gt;</comment>
                            <comment id="53513" author="adilger" created="Thu, 7 Mar 2013 02:27:11 +0000"  >&lt;p&gt;Fixed for ldiskfs, use &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2803&quot; title=&quot;sanity.sh test_180 fails with zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2803&quot;&gt;&lt;del&gt;LU-2803&lt;/del&gt;&lt;/a&gt; for the current ZFS failures. &lt;/p&gt;</comment>
                            <comment id="95395" author="adilger" created="Wed, 1 Oct 2014 07:14:58 +0000"  >&lt;p&gt;sanity.sh test_180 is still being skipped on ZFS filesystems due to this issue.  If it was fixed by &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2803&quot; title=&quot;sanity.sh test_180 fails with zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2803&quot;&gt;&lt;del&gt;LU-2803&lt;/del&gt;&lt;/a&gt; then a patch should be submitted to re-enable the test.&lt;/p&gt;</comment>
                            <comment id="161839" author="simmonsja" created="Sun, 14 Aug 2016 17:19:06 +0000"  >&lt;p&gt;Really old blocker for unsupported version&lt;/p&gt;</comment>
                            <comment id="197447" author="adilger" created="Mon, 29 May 2017 06:04:53 +0000"  >&lt;p&gt;Reopen to clear flags.&lt;/p&gt;</comment>
                            <comment id="253623" author="adilger" created="Mon, 26 Aug 2019 22:18:40 +0000"  >&lt;p&gt;We still always skip test_180 for ZFS targets due to &lt;tt&gt;ALWAYS_EXCEPT&lt;/tt&gt;.  A patch should be submitted to remove the subtests from &lt;tt&gt;ALWAYS_EXCEPT&lt;/tt&gt; on the assumption that &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-2803&quot; title=&quot;sanity.sh test_180 fails with zfs&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-2803&quot;&gt;&lt;del&gt;LU-2803&lt;/del&gt;&lt;/a&gt; fixed that problem.&lt;/p&gt;</comment>
                            <comment id="253627" author="gerrit" created="Mon, 26 Aug 2019 23:23:58 +0000"  >&lt;p&gt;Andreas Dilger (adilger@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/35930&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35930&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1957&quot; title=&quot;Test failure on test suite sanity, subtest test_180b&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1957&quot;&gt;&lt;del&gt;LU-1957&lt;/del&gt;&lt;/a&gt; tests: remove sanity test 180 from ALWAYS_EXCEPT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 2b0119932f5abfd85f01ffac5614f41d5b9fe559&lt;/p&gt;</comment>
                            <comment id="254814" author="gerrit" created="Mon, 16 Sep 2019 23:05:01 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/35930/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35930/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1957&quot; title=&quot;Test failure on test suite sanity, subtest test_180b&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1957&quot;&gt;&lt;del&gt;LU-1957&lt;/del&gt;&lt;/a&gt; tests: remove sanity test 180 from ALWAYS_EXCEPT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 72b59b85a253e508ec1b192fbf8cad840ca6ff2c&lt;/p&gt;</comment>
                            <comment id="255014" author="adilger" created="Wed, 18 Sep 2019 21:41:06 +0000"  >&lt;p&gt;Bug was fixed in 2.4.0, test enabled in 2.13.0.&lt;/p&gt;</comment>
                            <comment id="259221" author="gerrit" created="Thu, 5 Dec 2019 16:49:05 +0000"  >&lt;p&gt;James Nunez (jnunez@whamcloud.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/36930&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36930&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1957&quot; title=&quot;Test failure on test suite sanity, subtest test_180b&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1957&quot;&gt;&lt;del&gt;LU-1957&lt;/del&gt;&lt;/a&gt; tests: remove sanity test 180 from ALWAYS_EXCEPT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 3e47ed64ed481d59140fd74ff92d9f774d0e39da&lt;/p&gt;</comment>
                            <comment id="260582" author="gerrit" created="Fri, 3 Jan 2020 23:41:48 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/36930/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/36930/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-1957&quot; title=&quot;Test failure on test suite sanity, subtest test_180b&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-1957&quot;&gt;&lt;del&gt;LU-1957&lt;/del&gt;&lt;/a&gt; tests: remove sanity test 180 from ALWAYS_EXCEPT&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: c3d53269c5133e938b90f0f0488cddf29c35701b&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="17542">LU-2803</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzv3in:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>4069</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>