<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:15:39 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8218] lfsck not able to recover files lost from MDT</title>
                <link>https://jira.whamcloud.com/browse/LU-8218</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;My understanding is that lfsck in lustre-2.7 should be able to handle lost file information on the MDT, as long as the objects are still on the OSTs.  However, a simple test to simulate this is not recovering the files.  Shouldn&apos;t it at least be able to put them into lost+found?  Or am I misunderstanding the capabilities of lfsck?  Or is the following test case invalid in some way?&lt;/p&gt;

&lt;p&gt;On the client, just create some test files...&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# cd /mnt/lustre/client/lfscktest
# echo foo &amp;gt; foo
# mkdir bar
# echo baz &amp;gt; bar/baz

# lfs getstripe foo bar/baz
foo
lmm_stripe_count:   1
lmm_stripe_size:    1048576
lmm_pattern:        1
lmm_layout_gen:     0
lmm_stripe_offset:  9
    obdidx         objid         objid         group
         9            460962          0x708a2                 0

bar/baz
lmm_stripe_count:   1
lmm_stripe_size:    1048576
lmm_pattern:        1
lmm_layout_gen:     0
lmm_stripe_offset:  12
    obdidx         objid         objid         group
        12            460866          0x70842                 0

# sync
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;On the MDS, simulate the MDT losing the information, such as could happen through restoring from a slightly outdated MDT backup...&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# umount /mnt/lustre/nbptest-mdt
# mount -t ldiskfs /dev/mapper/nbptest--vg-mdttest /mnt/lustre/nbptest-mdt
# cd /mnt/lustre/nbptest-mdt/ROOT

# ls -ld lfscktest lfscktest/*
drwxr-xr-x+ 3 root root 4096 May 30 08:15 lfscktest
drwxr-xr-x+ 2 root root 4096 May 30 08:15 lfscktest/bar
-rw-r--r--  1 root root    0 May 30 08:14 lfscktest/foo

# rm -rf lfscktest/*

# cd
# umount /mnt/lustre/nbptest-mdt
# mount -t lustre /dev/mapper/nbptest--vg-mdttest /mnt/lustre/nbptest-mdt
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Now check the filesystem...&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# lctl clear
# lctl debug_daemon start /var/log/lfsck.debug
# lctl lfsck_start -A -M nbptest-MDT0000 -c on -C on -o
Started LFSCK on the device nbptest-MDT0000: scrub layout namespace

# lctl get_param -n osd-ldiskfs.*.oi_scrub | grep status
status: init
status: completed

# lctl debug_daemon stop
# lctl debug_file /var/log/lfsck.debug | egrep -v &quot; (NRS|RPC) &quot; &amp;gt; /var/log/lfsck.log
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;And look back on the client...&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# cd /mnt/lustre/client/         

# ls -la lfscktest/
total 8
drwxr-xr-x+ 2 root root 4096 May 30 08:22 .
drwxr-xr-x+ 9 root root 4096 May 30 08:14 ..

# ls -la .lustre/lost+found/MDT0000
total 8
drwx------+ 3 root root 4096 May 27 10:44 .
dr-x------+ 3 root root 4096 May 27 09:01 ..
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Notice that there is no sign of the files being restored anywhere.  Nor do I find any mention of the object ID&apos;s in the lfsck.log file.&lt;/p&gt;

&lt;p&gt;Note that running lfsck_start with the &quot;-t layout&quot; option did not change the behaviour either.&lt;/p&gt;</description>
                <environment></environment>
        <key id="37256">LU-8218</key>
            <summary>lfsck not able to recover files lost from MDT</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="yong.fan">nasf</assignee>
                                    <reporter username="ndauchy">Nathan Dauchy</reporter>
                        <labels>
                    </labels>
                <created>Mon, 30 May 2016 15:50:26 +0000</created>
                <updated>Thu, 14 Jun 2018 21:41:19 +0000</updated>
                            <resolved>Thu, 22 Sep 2016 05:53:43 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="153989" author="pjones" created="Mon, 30 May 2016 17:15:39 +0000"  >&lt;p&gt;Fan Yong&lt;/p&gt;

&lt;p&gt;Could you please advise&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="154034" author="yong.fan" created="Tue, 31 May 2016 08:40:30 +0000"  >&lt;p&gt;In theory, the layout LFSCK should have the functionality to find out orphan OST-objects, and use the orphan OST-objects&apos; PFID EA to re-generaate the MDT-object&apos;s LOV EA. So how to find out orphan OST-objects is important. For layout LFSCK, orphan OST-object means it exists and has ever been modified after pre-created, but nobody reference it. In your case, I am not sure whether the data has been written back to the OST before the layout LFSCK. If the dirty data has not been written back to the OST in time, then related OST-object will be in the pre-created status, not modified, and then, the layout LFSCK will not regard it as orphan OST-object. That can be verified via dump (debugfs) related OST-object on the OST.&lt;/p&gt;

&lt;p&gt;In our sanity-lfsck test, we flush dirty data back to the OST via cancelling OST locks (lctl set_param -n ldlm.namespaces.&lt;b&gt;osc&lt;/b&gt;.lru_size=clear) on the client. Such mechanism has been verified. So please try as following:&lt;/p&gt;
&lt;ol&gt;
	&lt;li&gt;cd /mnt/lustre/client/lfscktest&lt;/li&gt;
	&lt;li&gt;echo foo &amp;gt; foo&lt;/li&gt;
	&lt;li&gt;lctl set_param -n ldlm.namespaces.&amp;#42;osc&amp;#42;.lru_size=clear&lt;br/&gt;
Then other subsequent operations as you did above. Start the layout LFSCK as &quot;lctl start_lfsck -M nbptest-MDT0000 -t layout -o -r&quot;.&lt;/li&gt;
&lt;/ol&gt;


&lt;p&gt;Thanks!&lt;/p&gt;</comment>
                            <comment id="154079" author="ndauchy" created="Tue, 31 May 2016 13:33:15 +0000"  >&lt;p&gt;Adding &quot;lru_size=clear&quot; to the test process did not seem to change anything.  I will try another attempt, this time with unmounting the client and fully restarting the file system targets.&lt;/p&gt;</comment>
                            <comment id="154087" author="ndauchy" created="Tue, 31 May 2016 14:21:33 +0000"  >&lt;p&gt;The following procedure did &lt;b&gt;not&lt;/b&gt; allow the files to be recovered either...&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;write files on client&lt;/li&gt;
	&lt;li&gt;set_param lru_size=clear&lt;/li&gt;
	&lt;li&gt;unmount client&lt;/li&gt;
	&lt;li&gt;stop all targets&lt;/li&gt;
	&lt;li&gt;mount MDT as ldiskfs, and remove the files, unmount&lt;/li&gt;
	&lt;li&gt;start all targets&lt;/li&gt;
	&lt;li&gt;run lfsck as &quot;lctl lfsck_start -M nbptest-MDT0000 -t layout -o -r&quot;&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;What other debugging information can I gather, to determine where those &quot;lost&quot; objects are ending up and why lfsck can&apos;t recover them?&lt;/p&gt;</comment>
                            <comment id="154207" author="yong.fan" created="Wed, 1 Jun 2016 03:12:24 +0000"  >&lt;p&gt;After &quot;set_param lru_size=clear&quot;, would you please to dump related OST-object&apos;s attr and PFID EA via debugfs on the OST to check whether it is modified properly? Thanks!&lt;/p&gt;</comment>
                            <comment id="154257" author="ndauchy" created="Wed, 1 Jun 2016 15:37:52 +0000"  >&lt;p&gt;Is this the information you are looking for?&lt;/p&gt;

&lt;p&gt;Client:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# cd /mnt/lustre/client/lfscktest
# echo foo &amp;gt; foo
# mkdir bar
# echo baz &amp;gt; bar/baz

# lctl get_param ldlm.namespaces.*osc*.lru_size | grep -v =0
  ldlm.namespaces.nbptest-OST0000-osc-ffff8805daad9800.lru_size=1
  ldlm.namespaces.nbptest-OST0007-osc-ffff8805daad9800.lru_size=2
  ldlm.namespaces.nbptest-OST0008-osc-ffff8805daad9800.lru_size=1
  ldlm.namespaces.nbptest-OST0009-osc-ffff8805daad9800.lru_size=1
  ldlm.namespaces.nbptest-OST000b-osc-ffff8805daad9800.lru_size=1
  ldlm.namespaces.nbptest-OST000c-osc-ffff8805daad9800.lru_size=1
# lctl set_param -n ldlm.namespaces.*osc*.lru_size=clear
# lctl get_param ldlm.namespaces.*osc*.lru_size | grep -v =0
  (nothing returned)

# getfattr -d -m &quot;.*&quot; -e hex foo bar/baz 
# file: foo
lustre.lov=0xd00bd10b010000000100000000000000b03a0000020000000000100001000000e20807000000000000000000000000000000000008000000
trusted.link=0xdff1ea11010000002d00000000000000000000000000000000150000000200002b100000000900000000666f6f
trusted.lma=0x0000000000000000b03a0000020000000100000000000000
trusted.lov=0xd00bd10b010000000100000000000000b03a0000020000000000100001000000e20807000000000000000000000000000000000008000000

# file: bar/baz
lustre.lov=0xd00bd10b010000000300000000000000b03a0000020000000000100001000000e2080700000000000000000000000000000000000b000000
trusted.link=0xdff1ea11010000002d00000000000000000000000000000000150000000200003ab0000000020000000062617a
trusted.lma=0x0000000000000000b03a0000020000000300000000000000
trusted.lov=0xd00bd10b010000000300000000000000b03a0000020000000000100001000000e2080700000000000000000000000000000000000b000000

service320 /mnt/lustre/client/lfscktest # 

# lfs getstripe foo bar/baz
foo
lmm_stripe_count:   1
lmm_stripe_size:    1048576
lmm_pattern:        1
lmm_layout_gen:     0
lmm_stripe_offset:  8
	obdidx		 objid		 objid		 group
	     8	        461026	      0x708e2	             0

bar/baz
lmm_stripe_count:   1
lmm_stripe_size:    1048576
lmm_pattern:        1
lmm_layout_gen:     0
lmm_stripe_offset:  11
	obdidx		 objid		 objid		 group
	    11	        461026	      0x708e2	             0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# echo $(( 461026 % 32 ))         
2

# debugfs /dev/mapper/nbptest-ost8
debugfs 1.42.13.wc4 (28-Nov-2015)
debugfs:  cd O
debugfs:  cd 0
debugfs:  cd d2
debugfs:  stat 461026
Inode: 487   Type: regular    Mode:  0666   Flags: 0x80000
Generation: 2904170364    Version: 0x0000000c:00000005
User:     0   Group:     0   Size: 4
File ACL: 0    Directory ACL: 0
Links: 1   Blockcount: 8
Fragment:  Address: 0    Number: 0    Size: 0
 ctime: 0x574efbee:00000000 -- Wed Jun  1 08:14:54 2016
 atime: 0x00000000:00000000 -- Wed Dec 31 16:00:00 1969
 mtime: 0x574efbee:00000000 -- Wed Jun  1 08:14:54 2016
crtime: 0x574d9b4f:3f4d531c -- Tue May 31 07:10:23 2016
Size of extra inode fields: 28
Extended attributes stored in inode body: 
invalid EA entry in inode
EXTENTS:
(0):152064
debugfs:  dump 461026 /tmp/obj.461026.foo
debugfs:  quit

# cat /tmp/obj.461026.foo
foo
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# debugfs /dev/mapper/nbptest-ost11
debugfs 1.42.13.wc4 (28-Nov-2015)
debugfs:  cd O
debugfs:  cd 0
debugfs:  cd d2
debugfs:  stat 461026
Inode: 489   Type: regular    Mode:  0666   Flags: 0x80000
Generation: 3312724559    Version: 0x0000000c:00000007
User:     0   Group:     0   Size: 4
File ACL: 0    Directory ACL: 0
Links: 1   Blockcount: 8
Fragment:  Address: 0    Number: 0    Size: 0
 ctime: 0x574efbf6:00000000 -- Wed Jun  1 08:15:02 2016
 atime: 0x00000000:00000000 -- Wed Dec 31 16:00:00 1969
 mtime: 0x574efbf6:00000000 -- Wed Jun  1 08:15:02 2016
crtime: 0x574d9b4f:58007134 -- Tue May 31 07:10:23 2016
Size of extra inode fields: 28
Extended attributes stored in inode body: 
invalid EA entry in inode
EXTENTS:
(0):128768
debugfs:  dump 461026 /tmp/obj.461026.baz
debugfs:  quit

# cat /tmp/obj.461026.baz
baz
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="154355" author="yong.fan" created="Thu, 2 Jun 2016 01:07:58 +0000"  >&lt;p&gt;Yes, that is what I want to know. The OST-object&apos;s size has been updated, that means the dirty data bas been flushed back to the OST, although the PFID EA (&quot;trusted.fid&quot;) is not printed properly.&lt;/p&gt;

&lt;p&gt;Please run layout LFSCK just on this system with LFSCK debug enabled, and collect the kernel debug logs on both the MDT and nbptest-ost8 and nbptest-ost11. Thanks!&lt;/p&gt;</comment>
                            <comment id="154471" author="ndauchy" created="Thu, 2 Jun 2016 17:27:51 +0000"  >&lt;p&gt;debug logs from the servers while lfsck was run.&lt;/p&gt;

&lt;p&gt;service320 is client and where ost8 runs&lt;br/&gt;
service322 is MDS&lt;br/&gt;
service323 is where ost11 runs&lt;/p&gt;</comment>
                            <comment id="154545" author="yong.fan" created="Fri, 3 Jun 2016 06:49:04 +0000"  >&lt;p&gt;Because you only removed the files on the MDT under ldiskfs mode directly, but kept the OI files (oi.16.xxx) there which contains stale OI mappings for those removed MDT-objects as to the further LFSCK cannot locate objects properly. So please remove the OI files under ldiskfs mode and run LFSCK after that.&lt;/p&gt;

&lt;p&gt;Thanks!&lt;/p&gt;</comment>
                            <comment id="154563" author="ndauchy" created="Fri, 3 Jun 2016 12:48:06 +0000"  >&lt;p&gt;OK... I can test that, but what if this was a &quot;real&quot; case of MDT corruption where only the files were lost?  Is a new feature or phase in lfsck needed to manage the stale OI mappings?&lt;/p&gt;</comment>
                            <comment id="154564" author="yong.fan" created="Fri, 3 Jun 2016 13:59:00 +0000"  >&lt;p&gt;From the LFSCK view, the case of removing MDT-object directly without destroy the OI mapping is indistinguishable from the case of MDT file-level backup/restore. When the OSD tries to locate the local object/inode via the ino# that is obtained from the stale OI mapping, it does not know whether the real MDT-object exists or not. The possible solution is that the OI scrub should make double scanning: the first phase scanning is inode table based to scan all know object on the device; the second phase scanning is OI files based to find out all staled OI mappings. Currently, it only does the first phase scanning.&lt;/p&gt;</comment>
                            <comment id="154845" author="ndauchy" created="Tue, 7 Jun 2016 00:19:13 +0000"  >&lt;p&gt;Just to clarify the status of this ticket... we are on hold waiting for a new phase of scanning to be added to lfsck?&lt;/p&gt;

&lt;p&gt;In the meantime, is there a workaround we can use as part of the MDT recovery procedure when getting such stale mappings is expected?  Can we mount as ldiskfs and manually check or clean things up?&lt;/p&gt;</comment>
                            <comment id="154847" author="yong.fan" created="Tue, 7 Jun 2016 01:04:31 +0000"  >&lt;p&gt;The workaround for your special case is that if you want to remove some MDT-object under &quot;ldiskfs&quot; mode directly, then please remove the OI files also.&lt;/p&gt;</comment>
                            <comment id="154863" author="gerrit" created="Tue, 7 Jun 2016 04:05:49 +0000"  >&lt;p&gt;Fan Yong (fan.yong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/20659&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/20659&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8218&quot; title=&quot;lfsck not able to recover files lost from MDT&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8218&quot;&gt;&lt;del&gt;LU-8218&lt;/del&gt;&lt;/a&gt; osd: handle stale OI mapping for non-restore case&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 31cf77414ad4f88c28d6eb2be54b32a7ec399ab7&lt;/p&gt;</comment>
                            <comment id="154864" author="yong.fan" created="Tue, 7 Jun 2016 05:15:31 +0000"  >&lt;p&gt;Nathan,&lt;/p&gt;

&lt;p&gt;Above patch may be not perfect solution, but it should be enough to resolve your case. &lt;/p&gt;</comment>
                            <comment id="155084" author="ndauchy" created="Wed, 8 Jun 2016 15:36:32 +0000"  >&lt;p&gt;Fan Yong, thank you for the patch!  I haven&apos;t had a chance to test with a new build yet, but did do a quick check of running lfsck after &quot;rm -f oi.16.*&quot; under ldiskfs.  The lfsck then resulted in files like the following in &quot;.lustre/lost+found/MDT0000/&quot;:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;.lustre/lost+found/MDT0000/[0x200003ab0:0x1:0x0]-R-0
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;That is what we should expect, even with the patch, right?  There is no way to determine the object&apos;s path once it is lost from the ROOT tree on the MDT?&lt;/p&gt;</comment>
                            <comment id="155190" author="yong.fan" created="Wed, 8 Jun 2016 23:48:44 +0000"  >&lt;blockquote&gt;
&lt;p&gt;That is what we should expect, even with the patch, right? There is no way to determine the object&apos;s path once it is lost from the ROOT tree on the MDT?&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;Yes, that is what we can do now. The path information is stored as linkEA (&quot;trusted.link&quot;) in the MDT-object. There is no other backup in the system. So if the MDT-object itself lost, then the LFSCK cannot know its original location, and have to put it under .luster/lost+found/&lt;/p&gt;</comment>
                            <comment id="155420" author="jaylan" created="Fri, 10 Jun 2016 21:32:20 +0000"  >&lt;p&gt;Hi Fan Yong,&lt;/p&gt;

&lt;p&gt;Do you intend to land &lt;a href=&quot;http://review.whamcloud.com/20659&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/20659&lt;/a&gt; to master and future releases or to provide a workaround for us?&lt;/p&gt;</comment>
                            <comment id="155442" author="yong.fan" created="Sun, 12 Jun 2016 01:26:08 +0000"  >&lt;p&gt;The patch 20659 should be landed to master, and then be ported to the other branches.&lt;/p&gt;</comment>
                            <comment id="166853" author="gerrit" created="Thu, 22 Sep 2016 03:04:30 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/20659/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/20659/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8218&quot; title=&quot;lfsck not able to recover files lost from MDT&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8218&quot;&gt;&lt;del&gt;LU-8218&lt;/del&gt;&lt;/a&gt; osd: handle stale OI mapping for non-restore case&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: cecde8bdb4913fd4405d425b0bf3aead03181e9d&lt;/p&gt;</comment>
                            <comment id="166859" author="pjones" created="Thu, 22 Sep 2016 05:53:43 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                            <comment id="166929" author="mhanafi" created="Thu, 22 Sep 2016 16:18:50 +0000"  >&lt;p&gt;Can be closed. Add nasa label.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="21724" name="LU-8218_lfsck_lost_files.tgz" size="1582541" author="ndauchy" created="Thu, 2 Jun 2016 17:27:51 +0000"/>
                            <attachment id="21661" name="lfsck.log" size="469883" author="ndauchy" created="Mon, 30 May 2016 15:50:26 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10490" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>End date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Tue, 7 Jun 2016 15:50:26 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                            <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyd47:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                        <customfield id="customfield_10493" key="com.atlassian.jira.plugin.system.customfieldtypes:datepicker">
                        <customfieldname>Start date</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>Mon, 30 May 2016 15:50:26 +0000</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                    </customfields>
    </item>
</channel>
</rss>