<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:01:32 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-13469] MDS hung during mount</title>
                <link>https://jira.whamcloud.com/browse/LU-13469</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;1 MDS hung during mount during failover process. &lt;/p&gt;

&lt;p&gt;soak-9 console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 3961.086008] mount.lustre    D ffff8f5730291070     0  5206   5205 0x00000082
[ 3961.093940] Call Trace:
[ 3961.096752]  [&amp;lt;ffffffffc1333360&amp;gt;] ? class_config_dump_handler+0x7e0/0x7e0 [obdclass]
[ 3961.105419]  [&amp;lt;ffffffff99380a09&amp;gt;] schedule+0x29/0x70
[ 3961.110980]  [&amp;lt;ffffffff9937e511&amp;gt;] schedule_timeout+0x221/0x2d0
[ 3961.117509]  [&amp;lt;ffffffff98ce10f6&amp;gt;] ? select_task_rq_fair+0x5a6/0x760
[ 3961.124565]  [&amp;lt;ffffffffc1333360&amp;gt;] ? class_config_dump_handler+0x7e0/0x7e0 [obdclass]
[ 3961.133226]  [&amp;lt;ffffffff99380dbd&amp;gt;] wait_for_completion+0xfd/0x140
[ 3961.139955]  [&amp;lt;ffffffff98cdb4c0&amp;gt;] ? wake_up_state+0x20/0x20
[ 3961.146222]  [&amp;lt;ffffffffc12f8b84&amp;gt;] llog_process_or_fork+0x254/0x520 [obdclass]
[ 3961.154226]  [&amp;lt;ffffffffc12f8e64&amp;gt;] llog_process+0x14/0x20 [obdclass]
[ 3961.161271]  [&amp;lt;ffffffffc132b055&amp;gt;] class_config_parse_llog+0x125/0x350 [obdclass]
[ 3961.169552]  [&amp;lt;ffffffffc15beaf8&amp;gt;] mgc_process_cfg_log+0x788/0xc40 [mgc]
[ 3961.176961]  [&amp;lt;ffffffffc15c223f&amp;gt;] mgc_process_log+0x3bf/0x920 [mgc]
[ 3961.184004]  [&amp;lt;ffffffffc1333360&amp;gt;] ? class_config_dump_handler+0x7e0/0x7e0 [obdclass]
[ 3961.192673]  [&amp;lt;ffffffffc15c3cc3&amp;gt;] mgc_process_config+0xc63/0x1870 [mgc]
[ 3961.200110]  [&amp;lt;ffffffffc1336f27&amp;gt;] lustre_process_log+0x2d7/0xad0 [obdclass]
[ 3961.207925]  [&amp;lt;ffffffffc136a064&amp;gt;] server_start_targets+0x12d4/0x2970 [obdclass]
[ 3961.216133]  [&amp;lt;ffffffffc1339fe7&amp;gt;] ? lustre_start_mgc+0x257/0x2420 [obdclass]
[ 3961.224020]  [&amp;lt;ffffffff98e23db6&amp;gt;] ? kfree+0x106/0x140
[ 3961.229698]  [&amp;lt;ffffffffc1333360&amp;gt;] ? class_config_dump_handler+0x7e0/0x7e0 [obdclass]
[ 3961.238396]  [&amp;lt;ffffffffc136c7cc&amp;gt;] server_fill_super+0x10cc/0x1890 [obdclass]
[ 3961.246314]  [&amp;lt;ffffffffc133cd88&amp;gt;] lustre_fill_super+0x498/0x990 [obdclass]
[ 3961.254033]  [&amp;lt;ffffffffc133c8f0&amp;gt;] ? lustre_common_put_super+0x270/0x270 [obdclass]
[ 3961.262511]  [&amp;lt;ffffffff98e4e7df&amp;gt;] mount_nodev+0x4f/0xb0
[ 3961.268390]  [&amp;lt;ffffffffc1334d98&amp;gt;] lustre_mount+0x18/0x20 [obdclass]
[ 3961.275401]  [&amp;lt;ffffffff98e4f35e&amp;gt;] mount_fs+0x3e/0x1b0
[ 3961.281064]  [&amp;lt;ffffffff98e6d507&amp;gt;] vfs_kern_mount+0x67/0x110
[ 3961.287299]  [&amp;lt;ffffffff98e6fc5f&amp;gt;] do_mount+0x1ef/0xce0
[ 3961.293070]  [&amp;lt;ffffffff98e4737a&amp;gt;] ? __check_object_size+0x1ca/0x250
[ 3961.300073]  [&amp;lt;ffffffff98e250ec&amp;gt;] ? kmem_cache_alloc_trace+0x3c/0x200
[ 3961.307276]  [&amp;lt;ffffffff98e70a93&amp;gt;] SyS_mount+0x83/0xd0
[ 3961.312939]  [&amp;lt;ffffffff9938dede&amp;gt;] system_call_fastpath+0x25/0x2a
[ 3961.319665]  [&amp;lt;ffffffff9938de21&amp;gt;] ? system_call_after_swapgs+0xae/0x146
[ 4024.321554] Lustre: soaked-MDT0001: Imperative Recovery enabled, recovery window shrunk from 300-900 down to 150-900
[ 4024.360505] Lustre: soaked-MDT0001: in recovery but waiting for the first client to connect
[ 4025.087731] Lustre: soaked-MDT0001: Will be in recovery for at least 2:30, or until 27 clients reconnect

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>lustre-master-ib #404</environment>
        <key id="58852">LU-13469</key>
            <summary>MDS hung during mount</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="5">Cannot Reproduce</resolution>
                                        <assignee username="bzzz">Alex Zhuravlev</assignee>
                                    <reporter username="sarah">Sarah Liu</reporter>
                        <labels>
                            <label>soak</label>
                    </labels>
                <created>Tue, 21 Apr 2020 17:22:16 +0000</created>
                <updated>Wed, 27 Sep 2023 08:33:34 +0000</updated>
                            <resolved>Wed, 27 Sep 2023 08:33:34 +0000</resolved>
                                    <version>Lustre 2.14.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="268164" author="bzzz" created="Tue, 21 Apr 2020 18:03:23 +0000"  >&lt;p&gt;it would be helpful to see all available traces&lt;/p&gt;</comment>
                            <comment id="268268" author="sarah" created="Wed, 22 Apr 2020 17:37:51 +0000"  >&lt;p&gt;I restarted the test and the system crashed overnight on 3 MDSs . There are crash dumps available  on spirit for soak-8/9/10, the path is:&lt;br/&gt;
/scratch/dumps/soak-9.spirit.whamcloud.com/10.10.1.109-2020-04-22-01:59:08&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Last login: Tue Apr 21 12:05:20 on ttys001
DDN11368M:~ wliu$ ssh w3liu@ssh-1.spirit.whamcloud.int
Last login: Tue Apr 21 18:50:40 2020 from host-64-47-132-181.masergy.com
[w3liu@ssh-1 ~]$ ssh root@soak-16
The authenticity of host &apos;soak-16 (&amp;lt;no hostip for proxy command&amp;gt;)&apos; can&apos;t be established.
RSA key fingerprint is 37:1f:14:66:49:de:a8:f9:fa:ab:c7:2d:28:0f:26:44.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added &apos;soak-16&apos; (RSA) to the list of known hosts.
Password: 
Last login: Tue Apr 21 17:06:58 2020 from ssh-1.spirit.whamcloud.com
[root@soak-16 ~]# ssh soak-9
Last login: Tue Mar  3 23:55:21 2020 from soak-16.spirit.whamcloud.com
[root@soak-9 ~]# ls /tmp/
krb5ccmachine_SPIRIT.WHAMCLOUD.COM                                                      systemd-private-ba1d46467241463c9bf777e964af7eaa-ntpd.service-yRleX7
openldap-tlsmc-certs--F2A4EB14DB359420C21982FE2480932C55B52B238FF1E9079F379137D27D13CC  systemd-private-e3a3d4c94d1741a68674eb88c50144a6-ntpd.service-5xNWmx
[root@soak-9 ~]# exit
logout
Connection to soak-9 closed.
[root@soak-16 ~]# ssh soak-8
Last login: Mon Apr  6 19:47:28 2020 from soak-16.spirit.whamcloud.com
[root@soak-8 ~]# ls /tmp/
krb5ccmachine_SPIRIT.WHAMCLOUD.COM                                                      systemd-private-c062f40ef20c422c98b9375d108e2d78-ntpd.service-bpcY2l
openldap-tlsmc-certs--283FB28B7CD85BA8C6BD6F2F1837458C8301AFC4036ADCD22B40A473F4F87DAE  systemd-private-d5bd51fa26ff4fc3aab039698e87f37f-ntpd.service-wXZA7q
systemd-private-1faf9075d15b445eab312529a1bc7e2e-ntpd.service-oOzCQD                    systemd-private-f83975b64fc54f1498329d9f871d565d-ntpd.service-tdxy0M
[root@soak-8 ~]# exit
logout
Connection to soak-8 closed.
[root@soak-16 ~]# screen -x
[detached from 12070.pts-0.soak-16]
[root@soak-16 ~]# exit
logout
Connection to soak-16 closed.
[w3liu@ssh-1 ~]$ ls
aeon          ddn-681             HP259  lu-13050.stack  savelog     soak-conf        soak-homedir.tgz  soak-toolbin  tmp          toolbin.tgz
backup-frank  download-build-bin  logs   repo            soak-5.tgz  soak-format.tgz  soak-merge        test-git      tmp-scripts
[w3liu@ssh-1 ~]$ mkdir lu-13469
[w3liu@ssh-1 ~]$ cd lu-13469/
[w3liu@ssh-1 lu-13469]$ scp soak-16://scratch/logs/console/soak-9.log-20200419.tgz .
The authenticity of host &apos;soak-16 (&amp;lt;no hostip for proxy command&amp;gt;)&apos; can&apos;t be established.
RSA key fingerprint is 37:1f:14:66:49:de:a8:f9:fa:ab:c7:2d:28:0f:26:44.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added &apos;soak-16&apos; (RSA) to the list of known hosts.
scp: //scratch/logs/console/soak-9.log-20200419.tgz: No such file or directory
[w3liu@ssh-1 lu-13469]$ ssh soak-16
The authenticity of host &apos;soak-16 (&amp;lt;no hostip for proxy command&amp;gt;)&apos; can&apos;t be established.
RSA key fingerprint is 37:1f:14:66:49:de:a8:f9:fa:ab:c7:2d:28:0f:26:44.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added &apos;soak-16&apos; (RSA) to the list of known hosts.
Last login: Mon Apr 20 21:08:14 2020 from ssh-1.spirit.whamcloud.com
[w3liu@soak-16 ~]$ exit
logout
Connection to soak-16 closed.
[w3liu@ssh-1 lu-13469]$ ssh root@soak-16
The authenticity of host &apos;soak-16 (&amp;lt;no hostip for proxy command&amp;gt;)&apos; can&apos;t be established.
RSA key fingerprint is 37:1f:14:66:49:de:a8:f9:fa:ab:c7:2d:28:0f:26:44.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added &apos;soak-16&apos; (RSA) to the list of known hosts.
Password: 
Last login: Tue Apr 21 20:26:23 2020 from ssh-1.spirit.whamcloud.com
[root@soak-16 ~]# screen -x
[detached from 12070.pts-0.soak-16]
[root@soak-16 ~]# exit
logout
Connection to soak-16 closed.
[w3liu@ssh-1 lu-13469]$ scp root@soak-16:/scratch/logs/console/soak-9.log-20200419.tgz .
The authenticity of host &apos;soak-16 (&amp;lt;no hostip for proxy command&amp;gt;)&apos; can&apos;t be established.
RSA key fingerprint is 37:1f:14:66:49:de:a8:f9:fa:ab:c7:2d:28:0f:26:44.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added &apos;soak-16&apos; (RSA) to the list of known hosts.
Password: 
scp: /scratch/logs/console/soak-9.log-20200419.tgz: No such file or directory
[w3liu@ssh-1 lu-13469]$ scp root@soak-16:/scratch/logs/console/soak-9.log-20200419.gz .
The authenticity of host &apos;soak-16 (&amp;lt;no hostip for proxy command&amp;gt;)&apos; can&apos;t be established.
RSA key fingerprint is 37:1f:14:66:49:de:a8:f9:fa:ab:c7:2d:28:0f:26:44.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added &apos;soak-16&apos; (RSA) to the list of known hosts.
Password: 
soak-9.log-20200419.gz                                                                                                          100%  184KB 183.6KB/s   00:00    
[w3liu@ssh-1 lu-13469]$ ssh root@soak-16
The authenticity of host &apos;soak-16 (&amp;lt;no hostip for proxy command&amp;gt;)&apos; can&apos;t be established.
RSA key fingerprint is 37:1f:14:66:49:de:a8:f9:fa:ab:c7:2d:28:0f:26:44.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added &apos;soak-16&apos; (RSA) to the list of known hosts.
Password: 
Last login: Tue Apr 21 20:29:06 2020 from ssh-1.spirit.whamcloud.com
[root@soak-16 ~]# screen -x

[338152.399548] Lustre: Skipped 1 previous similar message
[338153.078244] Lustre: soaked-MDT0000: Client 333b7f48-68da-4 (at 192.168.1.120@o2ib) reconnecting
[338153.088076] Lustre: Skipped 6 previous similar messages
[338154.126802] Lustre: soaked-MDT0000: Client b8a5f1df-2f21-4 (at 192.168.1.119@o2ib) reconnecting
[338154.136639] Lustre: Skipped 8 previous similar messages
[338533.965788] LNet: 57040:0:(o2iblnd_cb.c:3394:kiblnd_check_conns()) Timed out tx for 192.168.1.110@o2ib: 1 seconds
[338533.977364] LNet: 57040:0:(o2iblnd_cb.c:3394:kiblnd_check_conns()) Skipped 146 previous similar messages
[338610.961038] LNetError: 57040:0:(lib-move.c:3680:lnet_handle_recovery_reply()) peer NI (192.168.1.110@o2ib) recovery failed with -110
[338610.974469] LNetError: 57040:0:(lib-move.c:3680:lnet_handle_recovery_reply()) Skipped 122 previous similar messages
[338753.412960] Lustre: soaked-MDT0000: Client f59a1caf-b57f-4 (at 192.168.1.130@o2ib) reconnecting
[338753.422834] Lustre: Skipped 2 previous similar messages
[338753.428846] Lustre: soaked-MDT0000: Connection restored to 3a61c982-16a5-4 (at 192.168.1.130@o2ib)
[338753.438969] Lustre: Skipped 20 previous similar messages
[338760.544124] Lustre: soaked-MDT0000: Received new LWP connection from 192.168.1.110@o2ib, removing former export from same NID
[339000.160995] Lustre: 57949:0:(mdd_device.c:1825:mdd_changelog_clear()) soaked-MDD0000: No entry for user 1
[340067.766748] LustreError: 57125:0:(out_handler.c:918:out_tx_end()) soaked-MDT0000-osd: undo for /tmp/rpmbuild-lustre-jenkins-UoS8uXZU/BUILD/lustre-2.13.53_18_g
6706bfa/lustre/ptlrpc/../../lustre/target/out_handler.c:453: rc = -524
[340067.789495] LustreError: 57125:0:(out_handler.c:918:out_tx_end()) Skipped 299 previous similar messages
[340131.819910] LustreError: 58494:0:(out_handler.c:918:out_tx_end()) soaked-MDT0000-osd: undo for /tmp/rpmbuild-lustre-jenkins-UoS8uXZU/BUILD/lustre-2.13.53_18_g
6706bfa/lustre/ptlrpc/../../lustre/target/out_handler.c:453: rc = -524
[340131.842686] LustreError: 58494:0:(out_handler.c:918:out_tx_end()) Skipped 1855 previous similar messages
[340265.331826] LustreError: 57983:0:(out_handler.c:918:out_tx_end()) soaked-MDT0000-osd: undo for /tmp/rpmbuild-lustre-jenkins-UoS8uXZU/BUILD/lustre-2.13.53_18_g
6706bfa/lustre/ptlrpc/../../lustre/target/out_handler.c:453: rc = -524
[340265.354552] LustreError: 57983:0:(out_handler.c:918:out_tx_end()) Skipped 2651 previous similar messages
[340419.033915] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 llog-records: rc = -2
[340419.048319] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 of 1 llog-records: rc = -2
[340424.618904] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 llog-records: rc = -2
[340424.633301] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) Skipped 22 previous similar messages
[340424.644781] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 of 1 llog-records: rc = -2
[340424.659646] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) Skipped 22 previous similar messages
[340432.347952] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 llog-records: rc = -2
...
[340475.463999] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 of 1 llog-records: rc = -116
[340475.479044] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) Skipped 177 previous similar messages
[340516.956460] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 llog-records: rc = -2
[340516.970860] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) Skipped 294 previous similar messages
[340516.982425] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 of 1 llog-records: rc = -2
[340516.997295] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) Skipped 294 previous similar messages
[340581.591455] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 llog-records: rc = -2
[340581.605835] LustreError: 57214:0:(llog_cat.c:756:llog_cat_cancel_arr_rec()) Skipped 172 previous similar messages
[340581.617394] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) soaked-MDT0003-osp-MDT0000: fail to cancel 1 of 1 llog-records: rc = -2
[340581.632256] LustreError: 57214:0:(llog_cat.c:793:llog_cat_cancel_records()) Skipped 172 previous similar messages
[340681.741732] LustreError: 58187:0:(llog_osd.c:616:llog_osd_write_rec()) soaked-MDT0003-osp-MDT0000: index 36369 already set in log bitmap
[340681.755535] LustreError: 58187:0:(llog_osd.c:618:llog_osd_write_rec()) LBUG
[340681.763417] Pid: 58187, comm: mdt_rdpg00_004 3.10.0-1062.9.1.el7_lustre.x86_64 #1 SMP Tue Feb 11 19:11:56 UTC 2020
[340681.775080] Call Trace:
[340681.777921]  [&amp;lt;ffffffffc1041e6c&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
[340681.785341]  [&amp;lt;ffffffffc1041f1c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]
[340681.792402]  [&amp;lt;ffffffffc10d34e5&amp;gt;] llog_osd_write_rec+0x1395/0x14b0 [obdclass]
[340681.800519]  [&amp;lt;ffffffffc10c22c3&amp;gt;] llog_write_rec+0x273/0x540 [obdclass]
[340681.808044]  [&amp;lt;ffffffffc10c79f0&amp;gt;] llog_cat_add_rec+0x220/0x880 [obdclass]
[340681.815763]  [&amp;lt;ffffffffc10bf422&amp;gt;] llog_add+0x162/0x1d0 [obdclass]
[340681.822701]  [&amp;lt;ffffffffc1490a01&amp;gt;] sub_updates_write+0x2e8/0xdd7 [ptlrpc]
[340681.830372]  [&amp;lt;ffffffffc1472027&amp;gt;] top_trans_stop+0x827/0xbc0 [ptlrpc]
[340681.837748]  [&amp;lt;ffffffffc19ab98c&amp;gt;] lod_trans_stop+0x25c/0x340 [lod]
[340681.844795]  [&amp;lt;ffffffffc1a6623e&amp;gt;] mdd_trans_stop+0x2e/0x174 [mdd]
[340681.851729]  [&amp;lt;ffffffffc1a59a07&amp;gt;] mdd_attr_set+0x6a7/0xdd0 [mdd]
[340681.858558]  [&amp;lt;ffffffffc18e21b9&amp;gt;] mdt_mfd_close+0x6b9/0x860 [mdt]
[340681.865523]  [&amp;lt;ffffffffc18e7a71&amp;gt;] mdt_close_internal+0x121/0x220 [mdt]
[340681.872958]  [&amp;lt;ffffffffc18e7d91&amp;gt;] mdt_close+0x221/0x790 [mdt]
[340681.879515]  [&amp;lt;ffffffffc145db9a&amp;gt;] tgt_request_handle+0x95a/0x1630 [ptlrpc]
[340681.887378]  [&amp;lt;ffffffffc1402f76&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[340681.896074]  [&amp;lt;ffffffffc1407424&amp;gt;] ptlrpc_main+0xbb4/0x1550 [ptlrpc]
[340681.903228]  [&amp;lt;ffffffff842c61f1&amp;gt;] kthread+0xd1/0xe0
[340681.908808]  [&amp;lt;ffffffff8498dd37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[340681.915929]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[340681.921610] Kernel panic - not syncing: LBUG
[340681.926473] CPU: 7 PID: 58187 Comm: mdt_rdpg00_004 Kdump: loaded Tainted: P           OE  ------------   3.10.0-1062.9.1.el7_lustre.x86_64 #1
[340681.940752] Hardware name: Intel Corporation S2600GZ ........../S2600GZ, BIOS SE5C600.86B.01.08.0003.022620131521 02/26/2013
[340681.953368] Call Trace:
[340681.956203]  [&amp;lt;ffffffff8497ac23&amp;gt;] dump_stack+0x19/0x1b
[340681.962038]  [&amp;lt;ffffffff84974967&amp;gt;] panic+0xe8/0x21f
[340681.967489]  [&amp;lt;ffffffffc1041f6b&amp;gt;] lbug_with_loc+0x9b/0xa0 [libcfs]
[340681.974504]  [&amp;lt;ffffffffc10d34e5&amp;gt;] llog_osd_write_rec+0x1395/0x14b0 [obdclass]
[340681.982585]  [&amp;lt;ffffffffc10c22c3&amp;gt;] llog_write_rec+0x273/0x540 [obdclass]
[340681.990085]  [&amp;lt;ffffffffc10c79f0&amp;gt;] llog_cat_add_rec+0x220/0x880 [obdclass]
[340681.997775]  [&amp;lt;ffffffffc10bf422&amp;gt;] llog_add+0x162/0x1d0 [obdclass]
[340682.004696]  [&amp;lt;ffffffffc10edf19&amp;gt;] ? lprocfs_counter_add+0xf9/0x160 [obdclass]
[340682.012803]  [&amp;lt;ffffffffc1490a01&amp;gt;] sub_updates_write+0x2e8/0xdd7 [ptlrpc]
[340682.020422]  [&amp;lt;ffffffffc1475b43&amp;gt;] ? update_records_attr_set_pack+0xc3/0xf0 [ptlrpc]
[340682.029124]  [&amp;lt;ffffffffc14906fd&amp;gt;] ? prepare_writing_updates.isra.12+0x139/0x14a [ptlrpc]
[340682.038311]  [&amp;lt;ffffffffc1472027&amp;gt;] top_trans_stop+0x827/0xbc0 [ptlrpc]
[340682.045606]  [&amp;lt;ffffffffc19ab98c&amp;gt;] lod_trans_stop+0x25c/0x340 [lod]
[340682.052620]  [&amp;lt;ffffffffc1a6623e&amp;gt;] mdd_trans_stop+0x2e/0x174 [mdd]
[340682.059528]  [&amp;lt;ffffffffc1a59a07&amp;gt;] mdd_attr_set+0x6a7/0xdd0 [mdd]
[340682.066362]  [&amp;lt;ffffffffc13f5e97&amp;gt;] ? lustre_msg_add_version+0x27/0xa0 [ptlrpc]
[340682.074451]  [&amp;lt;ffffffffc18e21b9&amp;gt;] mdt_mfd_close+0x6b9/0x860 [mdt]
[340682.081377]  [&amp;lt;ffffffffc13f636f&amp;gt;] ? lustre_pack_reply_flags+0x6f/0x1e0 [ptlrpc]
[340682.089646]  [&amp;lt;ffffffffc18e7a71&amp;gt;] mdt_close_internal+0x121/0x220 [mdt]
[340682.097039]  [&amp;lt;ffffffffc18e7d91&amp;gt;] mdt_close+0x221/0x790 [mdt]
[340682.103593]  [&amp;lt;ffffffffc145db9a&amp;gt;] tgt_request_handle+0x95a/0x1630 [ptlrpc]
[340682.111377]  [&amp;lt;ffffffffc102902e&amp;gt;] ? ktime_get_real_seconds+0xe/0x10 [libcfs]
[340682.119391]  [&amp;lt;ffffffffc1402f76&amp;gt;] ptlrpc_server_handle_request+0x256/0xb10 [ptlrpc]
[340682.128070]  [&amp;lt;ffffffffc13fde2e&amp;gt;] ? ptlrpc_wait_event+0x12e/0x5b0 [ptlrpc]
[340682.135852]  [&amp;lt;ffffffff842d3903&amp;gt;] ? __wake_up+0x13/0x20
[340682.141814]  [&amp;lt;ffffffffc140254a&amp;gt;] ? ptlrpc_server_handle_req_in+0x92a/0x1100 [ptlrpc]
[340682.150657]  [&amp;lt;ffffffff842c72e0&amp;gt;] ? wake_up_atomic_t+0x30/0x30
[340682.157298]  [&amp;lt;ffffffffc1407424&amp;gt;] ptlrpc_main+0xbb4/0x1550 [ptlrpc]
[340682.164440]  [&amp;lt;ffffffffc1406870&amp;gt;] ? ptlrpc_register_service+0xf90/0xf90 [ptlrpc]
[340682.172793]  [&amp;lt;ffffffff842c61f1&amp;gt;] kthread+0xd1/0xe0
[340682.178335]  [&amp;lt;ffffffff842c6120&amp;gt;] ? insert_kthread_work+0x40/0x40
[340682.185231]  [&amp;lt;ffffffff8498dd37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[340682.192614]  [&amp;lt;ffffffff842c6120&amp;gt;] ? insert_kthread_work+0x40/0x40
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="268273" author="bzzz" created="Wed, 22 Apr 2020 17:54:54 +0000"  >&lt;p&gt;I think this share root cause with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13195&quot; title=&quot;replay-single test_118: dt_declare_record_write() ASSERTION( dt-&amp;gt;do_body_ops ) failed&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13195&quot;&gt;&lt;del&gt;LU-13195&lt;/del&gt;&lt;/a&gt; - basically llog got corrupted and then lead to different symptoms.&lt;/p&gt;</comment>
                            <comment id="268296" author="sarah" created="Wed, 22 Apr 2020 21:03:00 +0000"  >&lt;p&gt;The LBUG seems can be reproduced easily on soak, I have come cross this 2/3 times so far.&lt;/p&gt;</comment>
                            <comment id="268329" author="bzzz" created="Thu, 23 Apr 2020 07:32:48 +0000"  >&lt;p&gt;would it be possible to grab full Lustre logs with libcfs_panic_on_lbug=0 ?&lt;/p&gt;</comment>
                            <comment id="268366" author="sarah" created="Thu, 23 Apr 2020 16:11:26 +0000"  >&lt;p&gt;Hi Alex, I will restart with the debug on&lt;/p&gt;</comment>
                            <comment id="268459" author="sarah" created="Fri, 24 Apr 2020 04:18:38 +0000"  >&lt;p&gt;Hi Alex,&lt;/p&gt;

&lt;p&gt;I am having a weird issue when setting up the panic_on_lbug=0 permanently on soak-8(MGS), Here is what I did &lt;br/&gt;
1. lctl set_param -P panic_on_lbug=0&lt;br/&gt;
2. umount and remount as ldiskfs and check the config log, and the value was set as 0&lt;br/&gt;
3. mount lustre, check the panic_on_lbug=1, it didn&apos;t change.&lt;/p&gt;

&lt;p&gt;I am not sure if this is related to the llog issue here, can you please check?  Do you need any log for this?  If it is unrelated, I will create a new ticket, and may need delete bad stuff and restart.&lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;</comment>
                            <comment id="268588" author="bzzz" created="Sun, 26 Apr 2020 18:03:58 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=sarah&quot; class=&quot;user-hover&quot; rel=&quot;sarah&quot;&gt;sarah&lt;/a&gt; I don&apos;t think there is any relation here. I think you can either modify the source or set panic_on_lbug=0 in the scripts? or in modules conf file&lt;/p&gt;</comment>
                            <comment id="268882" author="sarah" created="Wed, 29 Apr 2020 17:55:09 +0000"  >&lt;p&gt;I just uploaded the lustre log and trace of soak-8, with panic_on_lbug=0. Please let me know if anything else needed.&lt;/p&gt;</comment>
                            <comment id="269390" author="bzzz" created="Wed, 6 May 2020 06:51:45 +0000"  >&lt;p&gt;thanks.. looking at the logs - there were lots of invalidations in OSP which shouldn&apos;t be common - regular failover shouldn&apos;t cause this.&lt;br/&gt;
can you please explain what the test is doing?&lt;/p&gt;</comment>
                            <comment id="269440" author="sarah" created="Wed, 6 May 2020 17:23:07 +0000"  >&lt;p&gt;there are 2 kinds of mds fault injections, I think when the crash happened, it was in the middle of mds_failover&lt;br/&gt;
1. mds1 failover&lt;br/&gt;
reboot mds1&lt;br/&gt;
mount the disks to failover pair mds2&lt;br/&gt;
after mds1 up, fail back the disks to mds1&lt;/p&gt;

&lt;p&gt;2. mds restart&lt;br/&gt;
this is similar to mds failover, just not mounting the disk to the failover pair but wait and mount the disk back when the server is up&lt;/p&gt;</comment>
                            <comment id="269451" author="bzzz" created="Wed, 6 May 2020 18:44:38 +0000"  >&lt;p&gt;sorry, it&apos;s not quite enough information..&lt;br/&gt;
it would be very helpful if you can start the test and then grab logs (let&apos;s start with messages and/or consoles) from all the nodes.&lt;br/&gt;
one interesting thing from the log attached:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;
[ 1279.175117] sd 0:0:1:1: task abort: SUCCESS scmd(ffff99512626abc0)
[ 1279.182085] sd 0:0:1:1: attempting task abort! scmd(ffff99512626aa00)
[ 1279.189301] sd 0:0:1:1: [sdi] tag#96 CDB: Write(16) 8a 00 00 00 00 00 02 a8 01 90 00 00 00 08 00 00
[ 1279.199423] scsi target0:0:1: handle(0x0009), sas_address(0x50080e52ff4f0004), phy(0)
[ 1279.208168] scsi target0:0:1: enclosure logical id(0x500605b005d6e9a0), slot(3) 
[ 1279.367751] sd 0:0:1:1: task abort: SUCCESS scmd(ffff99512626aa00)
[ 1279.374697] sd 0:0:1:1: attempting task abort! scmd(ffff99512626a840)
[ 1279.381918] sd 0:0:1:1: [sdi] tag#95 CDB: Write(16) 8a 00 00 00 00 00 02 a8 01 70 00 00 00 08 00 00
[ 1279.392037] scsi target0:0:1: handle(0x0009), sas_address(0x50080e52ff4f0004), phy(0)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;I guess this shouldn&apos;t happen during this test?&lt;/p&gt;</comment>
                            <comment id="269611" author="sarah" created="Thu, 7 May 2020 22:40:55 +0000"  >&lt;p&gt;Ok, I will restart the tests and post logs.&lt;br/&gt;
The quoted log seems hardware related, not expected during the test.&lt;/p&gt;</comment>
                            <comment id="269867" author="sarah" created="Mon, 11 May 2020 22:19:08 +0000"  >&lt;p&gt;restarted the test, not seeing the LBUG, but MDS failover still failed. The secondary MDS didn&apos;t failback the device, please check the 2 attachments ending with 051120 &lt;span class=&quot;nobr&quot;&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/attachment/34890/34890_soak-11.log-051120&quot; title=&quot;soak-11.log-051120 attached to LU-13469&quot;&gt;soak-11.log-051120&lt;sup&gt;&lt;img class=&quot;rendericon&quot; src=&quot;https://jira.whamcloud.com/images/icons/link_attachment_7.gif&quot; height=&quot;7&quot; width=&quot;7&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/span&gt;  &lt;span class=&quot;nobr&quot;&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/attachment/34891/34891_trace-s-11-051120&quot; title=&quot;trace-s-11-051120 attached to LU-13469&quot;&gt;trace-s-11-051120&lt;sup&gt;&lt;img class=&quot;rendericon&quot; src=&quot;https://jira.whamcloud.com/images/icons/link_attachment_7.gif&quot; height=&quot;7&quot; width=&quot;7&quot; align=&quot;absmiddle&quot; alt=&quot;&quot; border=&quot;0&quot;/&gt;&lt;/sup&gt;&lt;/a&gt;&lt;/span&gt; &lt;/p&gt;</comment>
                            <comment id="270099" author="bzzz" created="Wed, 13 May 2020 17:53:48 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=sarah&quot; class=&quot;user-hover&quot; rel=&quot;sarah&quot;&gt;sarah&lt;/a&gt; I think you should try with the recent master which has &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13402&quot; title=&quot;sanity test_252: Invalid number of mdtlov clients returned by /usr/sbin/lr_reader&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13402&quot;&gt;&lt;del&gt;LU-13402&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="270741" author="bzzz" created="Wed, 20 May 2020 16:54:48 +0000"  >&lt;p&gt;hello, any updates on this issue?&lt;/p&gt;

&lt;p&gt;&#160;&lt;/p&gt;</comment>
                            <comment id="270988" author="pjones" created="Sat, 23 May 2020 13:51:03 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=bzzz&quot; class=&quot;user-hover&quot; rel=&quot;bzzz&quot;&gt;bzzz&lt;/a&gt; I believe that the testing has been delayed due to hardware issues&lt;/p&gt;</comment>
                            <comment id="271442" author="sarah" created="Thu, 28 May 2020 17:19:19 +0000"  >&lt;p&gt;Here is the update. 1 MDS hit LBUG again but a different one when running with master build including the fix of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-13402&quot; title=&quot;sanity test_252: Invalid number of mdtlov clients returned by /usr/sbin/lr_reader&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-13402&quot;&gt;&lt;del&gt;LU-13402&lt;/del&gt;&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;soak-8 console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[11360.312904] Lustre: Failing over soaked-MDT0001
[11360.312929] LustreError: 11-0: soaked-MDT0001-osp-MDT0000: operation out_update to node 0@lo failed: rc = -19
[11360.312937] Lustre: soaked-MDT0001-osp-MDT0000: Connection to soaked-MDT0001 (at 0@lo) was lost; in progress operations using this service will wait for recove
ry to complete
[11360.349350] LustreError: 4582:0:(import.c:705:ptlrpc_connect_import_locked()) can&apos;t connect to a closed import
[11360.853248] Lustre: server umount soaked-MDT0001 complete
[11361.443391] LustreError: 137-5: soaked-MDT0001_UUID: not available for connect from 192.168.1.102@o2ib (no target). If you are running an HA pair check that th
e target is mounted on the other server.
[11361.463245] LustreError: Skipped 93 previous similar messages
[11363.753573] Lustre: soaked-MDT0000: Received LWP connection from 192.168.1.109@o2ib, removing former export from 0@lo
[11363.765487] Lustre: Skipped 1 previous similar message
[11393.837251] LustreError: 167-0: soaked-MDT0001-osp-MDT0000: This client was evicted by soaked-MDT0001; in progress operations using this service will fail.
[11393.853109] LustreError: 4733:0:(llog_osd.c:263:llog_osd_read_header()) soaked-MDT0001-osp-MDT0000: bad log  [0x240000401:0x1:0x0] header magic: 0x0 (expected 
0x10645539)
[11393.871120] LustreError: 4733:0:(lod_sub_object.c:938:lod_sub_prep_llog()) ASSERTION( ctxt != ((void *)0) ) failed: 
[11393.882886] LustreError: 4733:0:(lod_sub_object.c:938:lod_sub_prep_llog()) LBUG
[11393.891077] Pid: 4733, comm: lod0000_rec0001 3.10.0-1062.9.1.el7_lustre.x86_64 #1 SMP Thu Apr 23 02:14:20 UTC 2020
[11393.902641] Call Trace:
[11393.905394]  [&amp;lt;ffffffffc0ad10cc&amp;gt;] libcfs_call_trace+0x8c/0xc0 [libcfs]
[11393.912739]  [&amp;lt;ffffffffc0ad117c&amp;gt;] lbug_with_loc+0x4c/0xa0 [libcfs]
[11393.919677]  [&amp;lt;ffffffffc14bd168&amp;gt;] lod_sub_prep_llog+0x708/0x783 [lod]
[11393.926941]  [&amp;lt;ffffffffc1479ac3&amp;gt;] lod_sub_recovery_thread+0x263/0xc10 [lod]
[11393.934759]  [&amp;lt;ffffffffa4ac61f1&amp;gt;] kthread+0xd1/0xe0
[11393.940234]  [&amp;lt;ffffffffa518dd37&amp;gt;] ret_from_fork_nospec_end+0x0/0x39
[11393.947259]  [&amp;lt;ffffffffffffffff&amp;gt;] 0xffffffffffffffff
[11393.953057] LustreError: dumping log to /tmp/lustre-log.1590658382.4733
[11520.707452] INFO: task lod0000_rec0001:4733 blocked for more than 120 seconds.
[11520.715960] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[11520.724727] lod0000_rec0001 D ffff9fe7563b5230     0  4733      2 0x00000080
[11520.732665] Call Trace:
[11520.735438]  [&amp;lt;ffffffffa4adb4c0&amp;gt;] ? wake_up_state+0x20/0x20
[11520.741684]  [&amp;lt;ffffffffa5180a09&amp;gt;] schedule+0x29/0x70
[11520.747261]  [&amp;lt;ffffffffc0ad11ad&amp;gt;] lbug_with_loc+0x7d/0xa0 [libcfs]
[11520.754205]  [&amp;lt;ffffffffc14bd168&amp;gt;] lod_sub_prep_llog+0x708/0x783 [lod]
[11520.761429]  [&amp;lt;ffffffffc1479ac3&amp;gt;] lod_sub_recovery_thread+0x263/0xc10 [lod]
[11520.769234]  [&amp;lt;ffffffffc1479860&amp;gt;] ? lod_connect_to_osd+0xc00/0xc00 [lod]
[11520.776734]  [&amp;lt;ffffffffa4ac61f1&amp;gt;] kthread+0xd1/0xe0
[11520.782200]  [&amp;lt;ffffffffa4ac6120&amp;gt;] ? insert_kthread_work+0x40/0x40
[11520.789020]  [&amp;lt;ffffffffa518dd37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[11520.796318]  [&amp;lt;ffffffffa4ac6120&amp;gt;] ? insert_kthread_work+0x40/0x40
[11550.607865] Lustre: 4736:0:(ldlm_lib.c:1863:extend_recovery_timer()) soaked-MDT0000: extended recovery timer reached hard limit: 900, extend: 1
[11550.622272] Lustre: 4736:0:(ldlm_lib.c:1863:extend_recovery_timer()) Skipped 31 previous similar messages
[11640.796481] INFO: task lod0000_rec0001:4733 blocked for more than 120 seconds.
[11640.804564] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[11640.813320] lod0000_rec0001 D ffff9fe7563b5230     0  4733      2 0x00000080
[11640.821289] Call Trace:
[11640.824059]  [&amp;lt;ffffffffa4adb4c0&amp;gt;] ? wake_up_state+0x20/0x20
[11640.830319]  [&amp;lt;ffffffffa5180a09&amp;gt;] schedule+0x29/0x70
[11640.835909]  [&amp;lt;ffffffffc0ad11ad&amp;gt;] lbug_with_loc+0x7d/0xa0 [libcfs]
[11640.842839]  [&amp;lt;ffffffffc14bd168&amp;gt;] lod_sub_prep_llog+0x708/0x783 [lod]
[11640.850079]  [&amp;lt;ffffffffc1479ac3&amp;gt;] lod_sub_recovery_thread+0x263/0xc10 [lod]
[11640.857882]  [&amp;lt;ffffffffc1479860&amp;gt;] ? lod_connect_to_osd+0xc00/0xc00 [lod]
[11640.865394]  [&amp;lt;ffffffffa4ac61f1&amp;gt;] kthread+0xd1/0xe0
[11640.870885]  [&amp;lt;ffffffffa4ac6120&amp;gt;] ? insert_kthread_work+0x40/0x40
[11640.877723]  [&amp;lt;ffffffffa518dd37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[11640.885019]  [&amp;lt;ffffffffa4ac6120&amp;gt;] ? insert_kthread_work+0x40/0x40
[11760.885726] INFO: task lod0000_rec0001:4733 blocked for more than 120 seconds.
[11760.893864] &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.
[11760.902634] lod0000_rec0001 D ffff9fe7563b5230     0  4733      2 0x00000080
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="271446" author="bzzz" created="Thu, 28 May 2020 18:02:15 +0000"  >&lt;p&gt;how many successful failovers had it done?&lt;/p&gt;</comment>
                            <comment id="271540" author="sarah" created="Fri, 29 May 2020 16:49:15 +0000"  >&lt;p&gt;9 MDS failover completed before the failure&lt;/p&gt;</comment>
                            <comment id="271579" author="bzzz" created="Sat, 30 May 2020 04:26:02 +0000"  >&lt;blockquote&gt;&lt;p&gt;9 MDS failover completed before the failure&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;is it any better than before?&lt;/p&gt;

&lt;p&gt;also, full logs are very appreciated.&lt;br/&gt;
there are two patches which can help to cure the corrupted llog&apos;s problem:&lt;br/&gt;
&lt;a href=&quot;https://review.whamcloud.com/#/c/38385/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/38385/&lt;/a&gt;&lt;br/&gt;
&lt;a href=&quot;https://review.whamcloud.com/#/c/38387/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/38387/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="272266" author="sarah" created="Mon, 8 Jun 2020 16:05:29 +0000"  >&lt;p&gt;Hi Alex, yes, it is better than before.  Right now soak is running with b2_12 testing, I will try the patches when the testing is done. &lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                                        </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="58560">LU-13402</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="57980">LU-13195</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="69090">LU-15644</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="34785" name="lustre-log.1588133843.6068-soak-8" size="130151888" author="sarah" created="Wed, 29 Apr 2020 17:53:59 +0000"/>
                            <attachment id="34890" name="soak-11.log-051120" size="966465" author="sarah" created="Mon, 11 May 2020 22:19:09 +0000"/>
                            <attachment id="34731" name="soak-9.log-20200419.gz" size="187975" author="sarah" created="Wed, 22 Apr 2020 00:54:32 +0000"/>
                            <attachment id="34737" name="trace-8" size="1025881" author="sarah" created="Wed, 22 Apr 2020 17:25:54 +0000"/>
                            <attachment id="34891" name="trace-s-11-051120" size="1020709" author="sarah" created="Mon, 11 May 2020 22:19:09 +0000"/>
                            <attachment id="34786" name="trace-soak8" size="999717" author="sarah" created="Wed, 29 Apr 2020 17:51:06 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00yf3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>