<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:48:24 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-11956] conf-sanity test_32a failed with 1</title>
                <link>https://jira.whamcloud.com/browse/LU-11956</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for jianyu &amp;lt;yujian@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/a63d2a9e-2b9c-11e9-b3df-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/a63d2a9e-2b9c-11e9-b3df-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;test_32a failed with the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;CMD: trevis-24vm6 mount -t lustre -o loop,exclude=t32fs-OST0000 /tmp/t32/mdt /tmp/t32/mnt/mdt
CMD: trevis-24vm6 umount -d /tmp/t32/mnt/mdt
CMD: trevis-24vm6 rm -rf /tmp/t32
 conf-sanity test_32a: @@@@@@ FAIL: test_32a failed with 1 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;







&lt;p&gt;VVVVVVV DO NOT REMOVE LINES BELOW, Added by Maloo for auto-association VVVVVVV&lt;br/&gt;
conf-sanity test_32a - test_32a failed with 1&lt;/p&gt;</description>
                <environment></environment>
        <key id="54857">LU-11956</key>
            <summary>conf-sanity test_32a failed with 1</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="dongyang">Dongyang Li</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 11 Feb 2019 23:00:47 +0000</created>
                <updated>Fri, 29 May 2020 21:55:21 +0000</updated>
                            <resolved>Fri, 29 May 2020 21:55:21 +0000</resolved>
                                    <version>Lustre 2.13.0</version>
                    <version>Lustre 2.12.1</version>
                    <version>Lustre 2.12.4</version>
                                    <fixVersion>Lustre 2.13.0</fixVersion>
                    <fixVersion>Lustre 2.12.4</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="241784" author="jamesanunez" created="Tue, 12 Feb 2019 18:27:58 +0000"  >&lt;p&gt;In the logs for this failure, we see the following in MDS1 console logs&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[40671.635023] LNet: 31304:0:(socklnd.c:2550:ksocknal_shutdown()) waiting for 1 peers to disconnect
[40704.635048] LNet: Removed LNI 10.9.5.40@tcp
[40704.708067] LustreError: 31346:0:(class_obd.c:824:obdclass_exit()) obd_memory max: 203787793, leaked: 7294919
[40705.248049] LNet: HW NUMA nodes: 1, HW CPU cores: 2, npartitions: 1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Could this be related to &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11920&quot; title=&quot;memory leak in lod_lds_buf_get()&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11920&quot;&gt;&lt;del&gt;LU-11920&lt;/del&gt;&lt;/a&gt; ?&lt;/p&gt;</comment>
                            <comment id="245537" author="sarah" created="Wed, 10 Apr 2019 18:11:16 +0000"  >&lt;p&gt;another one on b2_12 branch SLES12 SP3 DNE&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sets/d9df000a-5ac9-11e9-a256-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/d9df000a-5ac9-11e9-a256-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;MDS console&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[34732.473160] Lustre: DEBUG MARKER: dmesg
[34733.623267] Lustre: DEBUG MARKER: /usr/sbin/lctl mark == conf-sanity test 32a: Upgrade \(not live\) ========================================================== 10:04:14 \(1554743054\)
[34733.670594] Lustre: DEBUG MARKER: == conf-sanity test 32a: Upgrade (not live) ========================================================== 10:04:14 (1554743054)
[34733.692376] Lustre: DEBUG MARKER: which tunefs.lustre
[34733.730553] Lustre: DEBUG MARKER: find /usr/lib64/lustre/tests -maxdepth 1 -name &apos;disk*-ldiskfs.tar.bz2&apos;
[34735.594154] Lustre: DEBUG MARKER: /usr/sbin/lctl list_nids
[34735.630206] Lustre: DEBUG MARKER: mkdir -p /tmp/t32/mnt/mdt /tmp/t32/mnt/mdt1 /tmp/t32/mnt/ost
[34735.663544] Lustre: DEBUG MARKER: tar xjvf /usr/lib64/lustre/tests/disk2_9-ldiskfs.tar.bz2 -S -C /tmp/t32
[34735.781726] Lustre: DEBUG MARKER: cat /tmp/t32/commit
[34735.815364] Lustre: DEBUG MARKER: cat /tmp/t32/kernel
[34735.848358] Lustre: DEBUG MARKER: cat /tmp/t32/arch
[34735.881322] Lustre: DEBUG MARKER: cat /tmp/t32/bspace
[34735.914886] Lustre: DEBUG MARKER: cat /tmp/t32/ispace
[34735.948408] Lustre: DEBUG MARKER: test -f /tmp/t32/blimit
[34735.982034] Lustre: DEBUG MARKER: cat /tmp/t32/blimit
[34736.015054] Lustre: DEBUG MARKER: test -f /tmp/t32/ilimit
[34736.048105] Lustre: DEBUG MARKER: cat /tmp/t32/ilimit
[34736.174414] Lustre: DEBUG MARKER: test -f /tmp/t32/mdt2
[34736.207377] Lustre: DEBUG MARKER: /usr/sbin/lctl set_param debug=-1
[34736.241592] Lustre: DEBUG MARKER: tunefs.lustre --dryrun /tmp/t32/mdt
[34736.957068] Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lust
[34737.260424] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-35vm4: executing \/usr\/sbin\/lctl device_list
[34737.308969] Lustre: DEBUG MARKER: trevis-35vm4: executing /usr/sbin/lctl device_list
[34737.337722] Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lust
[34737.630641] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-35vm4: executing losetup -a
[34737.677227] Lustre: DEBUG MARKER: trevis-35vm4: executing losetup -a
[34737.796388] Lustre: DEBUG MARKER: mount -t lustre -o loop,exclude=t32fs-OST0000 /tmp/t32/mdt /tmp/t32/mnt/mdt
[34738.028666] loop: module loaded
[34738.385785] LDISKFS-fs (loop0): mounted filesystem with ordered data mode. Opts: user_xattr,errors=remount-ro,no_mbcache,nodelalloc
[34738.790192] Lustre: 10306:0:(obd_mount.c:972:lustre_check_exclusion()) Excluding t32fs-OST0000 (on exclusion list)
[34753.912623] Lustre: t32fs-MDT0000: Imperative Recovery not enabled, recovery window 60-180
[34753.912630] Lustre: Skipped 7 previous similar messages
[34754.784409] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n mdt.t32fs-MDT0000.uuid
[34754.819328] Lustre: DEBUG MARKER: tunefs.lustre --dryrun /tmp/t32/ost
[34755.614690] Lustre: DEBUG MARKER: mount -t lustre -onomgs -oloop,mgsnode=10.9.5.121@tcp /tmp/t32/ost /tmp/t32/mnt/ost
[34755.924426] LDISKFS-fs (loop1): mounted filesystem with ordered data mode. Opts: errors=remount-ro,no_mbcache,nodelalloc
[34759.777761] Lustre: DEBUG MARKER: /usr/sbin/lctl get_param -n obdfilter.t32fs-OST0000.uuid
[34759.812960] Lustre: DEBUG MARKER: /usr/sbin/lctl conf_param t32fs-OST0000.failover.node=10.9.5.121@tcp
[34759.851786] Lustre: DEBUG MARKER: /usr/sbin/lctl conf_param t32fs-MDT0000.failover.node=10.9.5.121@tcp
[34759.887341] Lustre: DEBUG MARKER: /usr/sbin/lctl conf_param t32fs-OST0000.osc.max_dirty_mb=15
[34759.923865] Lustre: DEBUG MARKER: /usr/sbin/lctl conf_param t32fs-MDT0000.mdc.max_rpcs_in_flight=9
[34759.959701] Lustre: DEBUG MARKER: /usr/sbin/lctl conf_param t32fs-MDT0000.lov.stripesize=4M
[34759.995805] Lustre: DEBUG MARKER: /usr/sbin/lctl conf_param t32fs-MDT0000.mdd.atime_diff=70
[34760.031367] Lustre: DEBUG MARKER: /usr/sbin/lctl pool_new t32fs.interop
[34763.312597] LustreError: 10284:0:(mgc_request.c:1572:mgc_apply_recover_logs()) mgc: cannot find uuid by nid 10.9.5.121@tcp
[34763.312609] Lustre: 10284:0:(mgc_request.c:1792:mgc_process_recover_nodemap_log()) MGC10.9.5.121@tcp: error processing recovery log t32fs-mdtir: rc = -2
[34763.312630] LustreError: 10284:0:(mgc_request.c:2127:mgc_process_log()) MGC10.9.5.121@tcp: recover log t32fs-mdtir failed, not fatal: rc = -2
[34763.320299] Lustre: 10570:0:(obd_mount.c:972:lustre_check_exclusion()) Excluding t32fs-OST0000 (on exclusion list)
[34763.812009] Lustre: t32fs-OST0000: deleting orphan objects from 0x0:33 to 0x0:97
[34764.072473] Lustre: DEBUG MARKER: umount /tmp/t32/mnt/mdt
[34764.093785] Lustre: Failing over t32fs-MDT0000
[34764.311493] LustreError: 10312:0:(osp_precreate.c:656:osp_precreate_send()) t32fs-OST0000-osc-MDT0000: can&apos;t precreate: rc = -5
[34764.311513] LustreError: 10312:0:(osp_precreate.c:1312:osp_precreate_thread()) t32fs-OST0000-osc-MDT0000: cannot precreate objects: rc = -5
[34764.789363] blk_update_request: I/O error, dev loop0, sector 0
[34764.794440] blk_update_request: I/O error, dev loop0, sector 0
[34764.799324] blk_update_request: I/O error, dev loop0, sector 0
[34764.802428] Lustre: DEBUG MARKER: umount /tmp/t32/mnt/ost
[34764.806207] blk_update_request: I/O error, dev loop0, sector 0
[34764.811405] blk_update_request: I/O error, dev loop0, sector 0
[34764.815350] blk_update_request: I/O error, dev loop0, sector 0
[34770.821690] Lustre: 10627:0:(client.c:2134:ptlrpc_expire_one_request()) @@@ Request sent has timed out for slow reply: [sent 1554743086/real 1554743086]  req@ffff880061977c80 x1630230038952912/t0(0) o39-&amp;gt;t32fs-MDT0000-lwp-OST0000@0@lo:12/10 lens 224/224 e 0 to 1 dl 1554743092 ref 2 fl Rpc:XN/0/ffffffff rc 0/-1
[34770.821698] Lustre: 10627:0:(client.c:2134:ptlrpc_expire_one_request()) Skipped 4 previous similar messages
[34778.313094] blk_update_request: I/O error, dev loop1, sector 0
[34778.317682] blk_update_request: I/O error, dev loop1, sector 0
[34778.322686] blk_update_request: I/O error, dev loop1, sector 0
[34778.328470] blk_update_request: I/O error, dev loop1, sector 0
[34778.332926] blk_update_request: I/O error, dev loop1, sector 0
[34778.334269] Lustre: DEBUG MARKER: PATH=/usr/lib64/lustre/tests:/usr/lib/lustre/tests:/usr/lib64/lustre/tests:/opt/iozone/bin:/usr/lib64/lustre/tests//usr/lib64/lustre/tests:/usr/lib64/lustre/tests:/usr/lib64/lustre/tests/../utils:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lust
[34778.343911] blk_update_request: I/O error, dev loop1, sector 0
[34778.621078] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-35vm4: executing \/usr\/sbin\/lustre_rmmod ldiskfs
[34778.661091] Lustre: DEBUG MARKER: trevis-35vm4: executing /usr/sbin/lustre_rmmod ldiskfs
[34780.437293] LNet: 11109:0:(socklnd.c:2550:ksocknal_shutdown()) waiting for 2 peers to disconnect
[34784.437136] LNet: 11109:0:(socklnd.c:2550:ksocknal_shutdown()) waiting for 2 peers to disconnect
[34792.436817] LNet: 11109:0:(socklnd.c:2550:ksocknal_shutdown()) waiting for 2 peers to disconnect
[34808.436197] LNet: 11109:0:(socklnd.c:2550:ksocknal_shutdown()) waiting for 2 peers to disconnect
[34840.434930] LNet: 11109:0:(socklnd.c:2550:ksocknal_shutdown()) waiting for 2 peers to disconnect
[34872.433661] LNet: Removed LNI 10.9.5.121@tcp
[34872.552055] LustreError: 11153:0:(class_obd.c:821:obdclass_exit()) obd_memory max: 201964618, leaked: 216
[34872.924983] LNet: HW NUMA nodes: 1, HW CPU cores: 2, npartitions: 1
[34872.928217] alg: No test for adler32 (adler32-zlib)
[34873.749768] Lustre: DEBUG MARKER: /usr/sbin/lctl mark trevis-35vm4: executing check_mem_leak
[34873.790449] Lustre: DEBUG MARKER: trevis-35vm4: executing check_mem_leak
[34873.922632] Lustre: DEBUG MARKER: /usr/sbin/lctl mark  conf-sanity test_32a: @@@@@@ FAIL: Reloading modules 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="250124" author="gerrit" created="Thu, 27 Jun 2019 03:37:05 +0000"  >&lt;p&gt;Li Dongyang (dongyangli@ddn.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/35333&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35333&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11956&quot; title=&quot;conf-sanity test_32a failed with 1&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11956&quot;&gt;&lt;del&gt;LU-11956&lt;/del&gt;&lt;/a&gt; mdd: do not reset original lu_buf.lb_len&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: edd6b2321fff95feb52b70cbc9c2542bc4f9eccb&lt;/p&gt;</comment>
                            <comment id="255100" author="gerrit" created="Fri, 20 Sep 2019 07:54:52 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/35333/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/35333/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11956&quot; title=&quot;conf-sanity test_32a failed with 1&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11956&quot;&gt;&lt;del&gt;LU-11956&lt;/del&gt;&lt;/a&gt; mdd: do not reset original lu_buf.lb_len&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 94a5bc1bcb6c6373ead5b091ff5915dfe452377b&lt;/p&gt;</comment>
                            <comment id="255146" author="pjones" created="Fri, 20 Sep 2019 14:40:56 +0000"  >&lt;p&gt;Landed for 2.13&lt;/p&gt;</comment>
                            <comment id="259969" author="gerrit" created="Mon, 16 Dec 2019 14:16:11 +0000"  >&lt;p&gt;Sebastien Buisson (sbuisson@ddn.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/37029&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37029&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11956&quot; title=&quot;conf-sanity test_32a failed with 1&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11956&quot;&gt;&lt;del&gt;LU-11956&lt;/del&gt;&lt;/a&gt; mdd: do not reset original lu_buf.lb_len&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 0a01e15afaf12c544d061bd1e14a0bf08c274cc3&lt;/p&gt;</comment>
                            <comment id="260594" author="gerrit" created="Fri, 3 Jan 2020 23:43:52 +0000"  >&lt;p&gt;Oleg Drokin (green@whamcloud.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/37029/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/37029/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-11956&quot; title=&quot;conf-sanity test_32a failed with 1&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-11956&quot;&gt;&lt;del&gt;LU-11956&lt;/del&gt;&lt;/a&gt; mdd: do not reset original lu_buf.lb_len&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: b2_12&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 9ba87725d1395dc729db7c90f72145e3a339f9c0&lt;/p&gt;</comment>
                            <comment id="261905" author="jamesanunez" created="Mon, 27 Jan 2020 17:53:02 +0000"  >&lt;p&gt;I&apos;m still seeing memory leaks for conf-sanity test 32a at &lt;a href=&quot;https://testing.whamcloud.com/test_sets/f4e2fb40-3ead-11ea-ac52-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/f4e2fb40-3ead-11ea-ac52-52540065bddc&lt;/a&gt; for 2.12.3.100. &lt;/p&gt;

&lt;p&gt;In the client test_log, we see&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;...
trevis-63vm8: trevis-63vm8.trevis.whamcloud.com: executing check_mem_leak
trevis-63vm8: [47023.722414] LustreError: 4730:0:(class_obd.c:821:obdclass_exit()) obd_memory max: 226318582, leaked: 33040
trevis-63vm8: 
trevis-63vm8: Memory leaks detected
trevis-63vm8: mv: cannot stat &apos;/tmp/debug&apos;: No such file or directory
 conf-sanity test_32a: @@@@@@ FAIL: Reloading modules 
  Trace dump:
  = /usr/lib64/lustre/tests/test-framework.sh:5888:error_noexit()
  = /usr/lib64/lustre/tests/conf-sanity.sh:2329:t32_test()
&#8230;
CMD: trevis-63vm8 /usr/sbin/lctl list_nids
trevis-63vm8: IOC_LIBCFS_GET_NI error 22: Invalid argument
CMD: trevis-63vm8 mkdir -p /tmp/t32/mnt/mdt /tmp/t32/mnt/mdt1 /tmp/t32/mnt/ost
&#8230;
exiting before disk write.
CMD: trevis-63vm8 mount -t lustre -o loop,exclude=t32fs-OST0000 /tmp/t32/mdt /tmp/t32/mnt/mdt
CMD: trevis-63vm8 umount -d /tmp/t32/mnt/mdt
CMD: trevis-63vm8 rm -rf /tmp/t32
 conf-sanity test_32a: @@@@@@ FAIL: test_32a failed with 1 
  Trace dump:
  = /usr/lib64/lustre/tests/test-framework.sh:5900:error()
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="268741" author="adilger" created="Tue, 28 Apr 2020 07:57:51 +0000"  >&lt;p&gt;Hit this when testing e2fsprogs against SLES12.3, but it was running 2.12.58 on the server, which does not have commit v2_12_58-70-g94a5bc1bcb.  I&apos;ve filed ATM-1728 to have e2fsprogs run against master, so that this issue is not hit.&lt;/p&gt;</comment>
                            <comment id="271566" author="adilger" created="Fri, 29 May 2020 21:55:21 +0000"  >&lt;p&gt;I&apos;m going to mark this issue resolved.  Memory leaks could happen in a number of different ways, so it makes sense to open separate tickets for those issues.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="56184">LU-12469</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00bh3:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>