<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:45:19 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4727] Lhsmtool_posix process stuck in ll_layout_refresh() when restoring</title>
                <link>https://jira.whamcloud.com/browse/LU-4727</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This is easy to reproduce. I hit this problem every time when I trying to run following commands.&lt;/p&gt;

&lt;p&gt;rm /mnt/lustre/XXXX -f;&lt;br/&gt;
echo XXX &amp;gt; /mnt/lustre/XXXX;&lt;br/&gt;
cat /mnt/lustre/XXXX;&lt;br/&gt;
lfs hsm_archive --archive=5 /mnt/lustre/XXXX;&lt;br/&gt;
cat /mnt/lustre/XXXX;&lt;br/&gt;
lfs hsm_release /mnt/lustre/XXXX;&lt;br/&gt;
cat /mnt/lustre/XXXX; # This will restore automatically&lt;br/&gt;
lfs hsm_release /mnt/lustre/XXXX;&lt;br/&gt;
lfs hsm_restore /mnt/lustre/XXXX; # Lhsmtool_posix actually hang here&lt;br/&gt;
cat /mnt/lustre/XXXX; # this will stuck&lt;/p&gt;

&lt;p&gt;And after some time, following messages shew up.&lt;/p&gt;

&lt;p&gt;INFO: task flush-lustre-1:4106 blocked for more than 120 seconds.&lt;br/&gt;
&quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.&lt;br/&gt;
flush-lustre- D 0000000000000005     0  4106      2 0x00000080&lt;br/&gt;
 ffff8808165b7830 0000000000000046 0000000000000000 0000000000000000&lt;br/&gt;
 0000000000013180 0000000000000000 ffff880851fc10f8 ffff88082d4e0c00&lt;br/&gt;
 ffff88082cb7fab8 ffff8808165b7fd8 000000000000fb88 ffff88082cb7fab8&lt;br/&gt;
Call Trace:&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff814fc9fe&amp;gt;&amp;#93;&lt;/span&gt; __mutex_lock_slowpath+0x13e/0x180&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff814fc89b&amp;gt;&amp;#93;&lt;/span&gt; mutex_lock+0x2b/0x50&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0c2814c&amp;gt;&amp;#93;&lt;/span&gt; ll_layout_refresh+0x26c/0x1080 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff813104bb&amp;gt;&amp;#93;&lt;/span&gt; ? mix_pool_bytes_extract+0x16b/0x180&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81135cf9&amp;gt;&amp;#93;&lt;/span&gt; ? zone_statistics+0x99/0xc0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa059e007&amp;gt;&amp;#93;&lt;/span&gt; ? cfs_hash_bd_lookup_intent+0x37/0x130 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0c51230&amp;gt;&amp;#93;&lt;/span&gt; ? ll_md_blocking_ast+0x0/0x7f0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa08b7450&amp;gt;&amp;#93;&lt;/span&gt; ? ldlm_completion_ast+0x0/0x930 &lt;span class=&quot;error&quot;&gt;&amp;#91;ptlrpc&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06dbba1&amp;gt;&amp;#93;&lt;/span&gt; ? cl_io_slice_add+0xc1/0x190 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0c78410&amp;gt;&amp;#93;&lt;/span&gt; vvp_io_init+0x340/0x490 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa05a11aa&amp;gt;&amp;#93;&lt;/span&gt; ? cfs_hash_find_or_add+0x9a/0x190 &lt;span class=&quot;error&quot;&gt;&amp;#91;libcfs&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06daff8&amp;gt;&amp;#93;&lt;/span&gt; cl_io_init0+0x98/0x160 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa06ddc14&amp;gt;&amp;#93;&lt;/span&gt; cl_io_init+0x64/0xe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;obdclass&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0c1894d&amp;gt;&amp;#93;&lt;/span&gt; cl_sync_file_range+0x12d/0x500 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0c46cac&amp;gt;&amp;#93;&lt;/span&gt; ll_writepages+0x9c/0x220 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81128d81&amp;gt;&amp;#93;&lt;/span&gt; do_writepages+0x21/0x40&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a43bd&amp;gt;&amp;#93;&lt;/span&gt; writeback_single_inode+0xdd/0x290&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a47ce&amp;gt;&amp;#93;&lt;/span&gt; writeback_sb_inodes+0xce/0x180&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a492b&amp;gt;&amp;#93;&lt;/span&gt; writeback_inodes_wb+0xab/0x1b0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a4ccb&amp;gt;&amp;#93;&lt;/span&gt; wb_writeback+0x29b/0x3f0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff814fb3a0&amp;gt;&amp;#93;&lt;/span&gt; ? thread_return+0x4e/0x76e&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8107eb42&amp;gt;&amp;#93;&lt;/span&gt; ? del_timer_sync+0x22/0x30&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a4fb9&amp;gt;&amp;#93;&lt;/span&gt; wb_do_writeback+0x199/0x240&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a50c3&amp;gt;&amp;#93;&lt;/span&gt; bdi_writeback_task+0x63/0x1b0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81091f97&amp;gt;&amp;#93;&lt;/span&gt; ? bit_waitqueue+0x17/0xd0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811379e0&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x0/0x100&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81137a66&amp;gt;&amp;#93;&lt;/span&gt; bdi_start_fn+0x86/0x100&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811379e0&amp;gt;&amp;#93;&lt;/span&gt; ? bdi_start_fn+0x0/0x100&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81091d66&amp;gt;&amp;#93;&lt;/span&gt; kthread+0x96/0xa0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c14a&amp;gt;&amp;#93;&lt;/span&gt; child_rip+0xa/0x20&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81091cd0&amp;gt;&amp;#93;&lt;/span&gt; ? kthread+0x0/0xa0&lt;br/&gt;
 &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100c140&amp;gt;&amp;#93;&lt;/span&gt; ? child_rip+0x0/0x20&lt;/p&gt;

&lt;p&gt;It seems copy tool is waiting for md_enqueue(MDS_INODELOCK_LAYOUT). Other processes who are trying to lock lli-&amp;gt;lli_layout_mutex will be stuck. This problem won&apos;t recover until lock enque times out and client reconnects.&lt;/p&gt;
</description>
                <environment></environment>
        <key id="23513">LU-4727</key>
            <summary>Lhsmtool_posix process stuck in ll_layout_refresh() when restoring</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="jay">Jinshan Xiong</assignee>
                                    <reporter username="lixi">Li Xi</reporter>
                        <labels>
                            <label>HSM</label>
                            <label>cea</label>
                    </labels>
                <created>Fri, 7 Mar 2014 03:32:03 +0000</created>
                <updated>Tue, 25 Jan 2022 20:54:48 +0000</updated>
                            <resolved>Thu, 23 Apr 2015 16:44:10 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                    <version>Lustre 2.5.1</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>19</watches>
                                                                            <comments>
                            <comment id="78677" author="rread" created="Fri, 7 Mar 2014 06:48:58 +0000"  >&lt;p&gt;I&apos;m seeing a similar hang here. My copytool stack:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Call Trace:
[&amp;lt;ffffffff81058d53&amp;gt;] ? __wake_up+0x53/0x70
[&amp;lt;ffffffffa0d2ec00&amp;gt;] ? ldlm_expired_completion_wait+0x0/0x360 [ptlrpc]
[&amp;lt;ffffffffa0d334a5&amp;gt;] ldlm_completion_ast+0x545/0x920 [ptlrpc]
[&amp;lt;ffffffff81065df0&amp;gt;] ? default_wake_function+0x0/0x20
[&amp;lt;ffffffffa0d2d9f6&amp;gt;] ldlm_cli_enqueue_fini+0x936/0xe70 [ptlrpc]
[&amp;lt;ffffffffa0d2e2d5&amp;gt;] ldlm_cli_enqueue+0x3a5/0x790 [ptlrpc]
[&amp;lt;ffffffffa0d32f60&amp;gt;] ? ldlm_completion_ast+0x0/0x920 [ptlrpc]
[&amp;lt;ffffffffa0b30240&amp;gt;] ? ll_md_blocking_ast+0x0/0x7d0 [lustre]
[&amp;lt;ffffffffa03bcd3e&amp;gt;] mdc_enqueue+0x2be/0x1a10 [mdc]
[&amp;lt;ffffffffa037da84&amp;gt;] lmv_enqueue+0x2f4/0xfc0 [lmv]
[&amp;lt;ffffffffa0b0b145&amp;gt;] ll_layout_refresh+0x515/0xfe0 [lustre]
[&amp;lt;ffffffffa0b30240&amp;gt;] ? ll_md_blocking_ast+0x0/0x7d0 [lustre]
[&amp;lt;ffffffffa0d32f60&amp;gt;] ? ldlm_completion_ast+0x0/0x920 [ptlrpc]
[&amp;lt;ffffffffa0b57113&amp;gt;] vvp_io_fini+0x173/0x260 [lustre]
[&amp;lt;ffffffffa0c46cc7&amp;gt;] cl_io_fini+0x77/0x280 [obdclass]
[&amp;lt;ffffffffa0afad37&amp;gt;] ll_file_io_generic+0xe7/0x610 [lustre]
[&amp;lt;ffffffffa0afbeb2&amp;gt;] ll_file_aio_write+0x142/0x2c0 [lustre]
[&amp;lt;ffffffffa0afc19c&amp;gt;] ll_file_write+0x16c/0x2a0 [lustre]
[&amp;lt;ffffffff81188f68&amp;gt;] vfs_write+0xb8/0x1a0
[&amp;lt;ffffffff81189861&amp;gt;] sys_write+0x51/0x90
[&amp;lt;ffffffff810e1e4e&amp;gt;] ? __audit_syscall_exit+0x25e/0x290
[&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="78680" author="jay" created="Fri, 7 Mar 2014 08:18:42 +0000"  >&lt;p&gt;I believe this is broken due to a recent optimization on the MD stack. Will create a workaround patch tomorrow.&lt;/p&gt;</comment>
                            <comment id="78707" author="jhammond" created="Fri, 7 Mar 2014 15:04:57 +0000"  >&lt;p&gt;Hi Li Xi, I can only reproduce this if I run the copytool on the same mount point as used for the accesses. Can you confirm this? I strongly recommend using a dedicated mount point for the copytool. This should be somewhere in the HSM documentation.&lt;/p&gt;</comment>
                            <comment id="78710" author="lixi" created="Fri, 7 Mar 2014 15:24:41 +0000"  >&lt;p&gt;Hi John,&lt;/p&gt;

&lt;p&gt;Yeah, you are correct! I wan running the copytool on the same mount point as used for the accesses. Sorry for misusing it.&lt;/p&gt;

&lt;p&gt;However, I don&apos;t understand why we have to use a dedicated mount point for the copytool. Is there any special reason for it?&lt;/p&gt;</comment>
                            <comment id="78716" author="rread" created="Fri, 7 Mar 2014 15:52:34 +0000"  >&lt;p&gt;I&apos;ve seen this when running my copytool in a dedicated mount point.  &lt;/p&gt;</comment>
                            <comment id="78732" author="jay" created="Fri, 7 Mar 2014 17:34:25 +0000"  >&lt;p&gt;I suspect that running copy tool on dedicated mount point can not eliminate the problem but it can surely reduce the chance of it to occur.&lt;/p&gt;

&lt;p&gt;The root cause of this problem is as follows:&lt;/p&gt;

&lt;p&gt;in mdt_getattr_internal() it checks if the file is being restored. If this is the case, it will mark it as MS_RESTORE so that the client won&apos;t do glimpse.&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;        &lt;span class=&quot;code-comment&quot;&gt;/* &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; file is released, check &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; a restore is running */&lt;/span&gt;
        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; ((ma-&amp;gt;ma_valid &amp;amp; MA_HSM) &amp;amp;&amp;amp; (ma-&amp;gt;ma_hsm.mh_flags &amp;amp; HS_RELEASED) &amp;amp;&amp;amp;
            mdt_hsm_restore_is_running(info, mdt_object_fid(o))) {
                repbody-&amp;gt;t_state = MS_RESTORE;
                repbody-&amp;gt;valid |= OBD_MD_TSTATE;
        }
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;However, when the restore request reaches the MDT, it doesn&apos;t flush UPDATE lock so if the client has UPDATE lock cached, it won&apos;t see this flag at all.&lt;/p&gt;

&lt;p&gt;The fix would be to adjust the sequence of adding RESTORE request into action list. Right now it does as follow:&lt;/p&gt;

&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                        mdt_lock_reg_init(&amp;amp;crh-&amp;gt;crh_lh, LCK_EX);
                        obj = mdt_object_find_lock(mti, &amp;amp;crh-&amp;gt;crh_fid,
                                                   &amp;amp;crh-&amp;gt;crh_lh,
                                                   MDS_INODELOCK_LAYOUT);
                        &lt;span class=&quot;code-keyword&quot;&gt;if&lt;/span&gt; (IS_ERR(obj)) {
                                rc = PTR_ERR(obj);
                                CERROR(&lt;span class=&quot;code-quote&quot;&gt;&quot;%s: cannot take layout lock &lt;span class=&quot;code-keyword&quot;&gt;for&lt;/span&gt; &quot;&lt;/span&gt;
                                       DFID&lt;span class=&quot;code-quote&quot;&gt;&quot;: rc = %d\n&quot;&lt;/span&gt;, mdt_obd_name(mdt),
                                       PFID(&amp;amp;crh-&amp;gt;crh_fid), rc);
                                OBD_SLAB_FREE_PTR(crh, mdt_hsm_cdt_kmem);
                                GOTO(out, rc);
                        }

                        /* we choose to not keep a keep a reference
                         * on the object during the restore time which can be
                         * very &lt;span class=&quot;code-object&quot;&gt;long&lt;/span&gt; */
                        mdt_object_put(mti-&amp;gt;mti_env, obj);

                        mutex_lock(&amp;amp;cdt-&amp;gt;cdt_restore_lock);
                        list_add_tail(&amp;amp;crh-&amp;gt;crh_list, &amp;amp;cdt-&amp;gt;cdt_restore_hdl);
                        mutex_unlock(&amp;amp;cdt-&amp;gt;cdt_restore_lock);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So it requests LAYOUT lock and then add the request into a global list, we should change it to:&lt;br/&gt;
1. add to global list&lt;br/&gt;
2. flush UPDATE lock&lt;br/&gt;
3. request LAYOUT lock&lt;/p&gt;

&lt;p&gt;In this way, we can make sure that all clients will see the MS_RESTORE flag.&lt;/p&gt;

&lt;p&gt;Another way to fix the problem is to not getattr in copy tool process. In that case, we can&apos;t call ct_stat_by_fid() in llapi_hsm_action_begin(). The stat fetched by ct_stat_by_fid() will be used for setting UID/GID and ATIME/MTIME to volatile file. I think we can do this in other ways.&lt;/p&gt;

&lt;p&gt;Robert, as a workaround fix, I think you can disable the function call ct_stat_by_fid() in llapi_hsm_action_begin() and do some corresponding cleanup - so that you can continue your work while we&apos;re fixing the problem.&lt;/p&gt;

&lt;p&gt;John, do you remember why we should set UID/GID to the volatile file?&lt;/p&gt;</comment>
                            <comment id="78733" author="jhammond" created="Fri, 7 Mar 2014 17:39:39 +0000"  >&lt;p&gt;We do it to maintain quota. Specifically, layout swap will fail it the ownership of the two files are not equal.&lt;/p&gt;</comment>
                            <comment id="78734" author="jhammond" created="Fri, 7 Mar 2014 17:42:15 +0000"  >&lt;p&gt;Yes it would be nice if the MDT sent what was needed to the copytool. I found that it&apos;s hard to fit that extra information into the current protocol. Otherwise a backdoor stat of something would be helpful.&lt;/p&gt;</comment>
                            <comment id="78736" author="jay" created="Fri, 7 Mar 2014 17:51:32 +0000"  >&lt;p&gt;Another way is to save attribute in archive and restore it at restore time(copy tool may have already supported this, IIRC). The only problem is UID/GID setting because it may change when the file is in release state; mtime/atime won&apos;t be a problem at all.&lt;/p&gt;

&lt;p&gt;Or we can use a dedicated ioctl() to get the attribute in kernel but it won&apos;t try to glimpse the file size.&lt;/p&gt;</comment>
                            <comment id="78740" author="jhammond" created="Fri, 7 Mar 2014 18:31:27 +0000"  >&lt;p&gt;If we assume that the copytool has a dedicated mount point (and we&apos;re feeling devious) then it may be enough to do:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;static int ct_stat_by_fid(const struct hsm_copytool_private *ct,
                          const struct lu_fid *fid,
                          struct stat *buf)
{
        char fid_name[FID_NOBRACE_LEN + 1];

        snprintf(fid_name, sizeof(fid_name), DFID_NOBRACE, PFID(fid));
+       unlinkat(ct-&amp;gt;open_by_fid_fd, fid_name, 0);
        return fstatat(ct-&amp;gt;open_by_fid_fd, fid_name, buf, 0);
}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;But that&apos;s not something to be proud of.&lt;/p&gt;</comment>
                            <comment id="78766" author="rread" created="Fri, 7 Mar 2014 22:40:11 +0000"  >&lt;p&gt;It turns out my issue was self-inflicted. (In my version of the copytool I had neglected to call flush() before calling llapi_hsm_action_end(), and this left data in the file pointer buffers that wasn&apos;t flushed until after the end call when I closed my file handle. So either you need to flush or just close the volatile file handle before calling end. But that is unrelated to this bug.)&lt;/p&gt;</comment>
                            <comment id="79022" author="adilger" created="Tue, 11 Mar 2014 17:24:06 +0000"  >&lt;p&gt;Robert, are there any restrictions on using this HSM API (e.g. capability needed, only on dedicated agent nodes set up by the sysadmin)?  Otherwise, it seems like a potential problem for bad users to be able to lock up the system.  Also, what is the extent of the problem?  Is it only this one process that is hung (an acceptable loss for a self-inflicted problem) or does it affect the whole client, or even the MDS?&lt;/p&gt;</comment>
                            <comment id="79028" author="rread" created="Tue, 11 Mar 2014 19:14:19 +0000"  >&lt;p&gt;I believe the copytool must be run as root, but it can be run on any client.  In my case only the copytool process was hung and unkillable. It also prevents the file in question from being restored, at least until the coordinator times out the action request and sends it to another copytool.&lt;/p&gt;</comment>
                            <comment id="84353" author="lixi" created="Mon, 19 May 2014 02:24:09 +0000"  >&lt;p&gt;Hi all,&lt;/p&gt;

&lt;p&gt;Is there any progress in this issue? This issue is really annoying when I am testing HSM. Is there any easy way to walk around it at least? Using a dedicated mount point for the copytool is not helping....&lt;/p&gt;

&lt;p&gt;Thanks!&lt;/p&gt;</comment>
                            <comment id="91453" author="paf" created="Tue, 12 Aug 2014 19:20:55 +0000"  >&lt;p&gt;Jinshan - Looking at your description of a possible solution...&lt;br/&gt;
&quot;So it requests LAYOUT lock and then add the request into a global list, we should change it to:&lt;br/&gt;
1. add to global list&lt;br/&gt;
2. flush UPDATE lock&lt;br/&gt;
3. request LAYOUT lock&quot;&lt;/p&gt;

&lt;p&gt;When you say &quot;flush UPDATE lock&quot;, how are you suggesting this be done?  Take an update lock on the object, then take the layout lock?  If so, when do we release the update lock?  Before taking the layout lock, after getting the layout lock, or some other time?&lt;/p&gt;


&lt;p&gt;Also, this comment at the top of the function is confusing me:&lt;br/&gt;
&quot; * in case of restore, caller must hold layout lock&quot;&lt;br/&gt;
Is this comment in error?  If not, what layout lock is it referring to/what sort of lock on layout?  I ask because if it&apos;s a restore request, we take a layout lock, which seems to imply the caller did not have a layout lock already.&lt;/p&gt;</comment>
                            <comment id="102018" author="jay" created="Thu, 18 Dec 2014 23:57:16 +0000"  >&lt;p&gt;I&apos;m thinking about a solution for this problem.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;When you say &quot;flush UPDATE lock&quot;, how are you suggesting this be done? Take an update lock on the object, then take the layout lock? If so, when do we release the update lock? Before taking the layout lock, after getting the layout lock, or some other time?&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;By &quot;flush UPDATE lock&quot;, I meant to acquire the UPDATE lock and release it immediately.&lt;/p&gt;

&lt;blockquote&gt;
&lt;p&gt;Is this comment in error? If not, what layout lock is it referring to/what sort of lock on layout? I ask because if it&apos;s a restore request, we take a layout lock, which seems to imply the caller did not have a layout lock already.&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;that means layout lock to take in the function, i.e., the code&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;                        mdt_lock_reg_init(&amp;amp;crh-&amp;gt;crh_lh, LCK_EX);
                        obj = mdt_object_find_lock(mti, &amp;amp;crh-&amp;gt;crh_fid,
                                                   &amp;amp;crh-&amp;gt;crh_lh,
                                                   MDS_INODELOCK_LAYOUT);
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="102027" author="gerrit" created="Fri, 19 Dec 2014 04:26:18 +0000"  >&lt;p&gt;Jinshan Xiong (jinshan.xiong@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/13138&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13138&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4727&quot; title=&quot;Lhsmtool_posix process stuck in ll_layout_refresh() when restoring&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4727&quot;&gt;&lt;del&gt;LU-4727&lt;/del&gt;&lt;/a&gt; hsm: flush UPDATE lock for restore&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: bf2e4b958f60cb7eda9303ad0c079fd23ff2d16b&lt;/p&gt;</comment>
                            <comment id="102028" author="jay" created="Fri, 19 Dec 2014 04:26:41 +0000"  >&lt;p&gt;Please try patch 13138 and check if it can fix the problem.&lt;/p&gt;</comment>
                            <comment id="102038" author="vinayak_clogeny" created="Fri, 19 Dec 2014 09:40:02 +0000"  >&lt;p&gt; &lt;a href=&quot;http://review.whamcloud.com/13138&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13138&lt;/a&gt; solves the problem on single node setup on local vm. Thanks for the patch Jinshan&lt;/p&gt;</comment>
                            <comment id="105868" author="jay" created="Thu, 5 Feb 2015 17:28:05 +0000"  >&lt;p&gt;the patch has been in Gerrit for a long time. Please let me know what I can do to move this forward, sigh.&lt;/p&gt;</comment>
                            <comment id="106849" author="gerrit" created="Thu, 12 Feb 2015 19:59:48 +0000"  >&lt;p&gt;John L. Hammond (john.hammond@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/13750&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13750&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4727&quot; title=&quot;Lhsmtool_posix process stuck in ll_layout_refresh() when restoring&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4727&quot;&gt;&lt;del&gt;LU-4727&lt;/del&gt;&lt;/a&gt; hsm: use IOC_MDC_GETFILEINFO in restore&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 8ec6354ded37f3e1f39d6e0336c9e17b1a97785b&lt;/p&gt;</comment>
                            <comment id="109165" author="gerrit" created="Sun, 8 Mar 2015 11:40:09 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/13750/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/13750/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4727&quot; title=&quot;Lhsmtool_posix process stuck in ll_layout_refresh() when restoring&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4727&quot;&gt;&lt;del&gt;LU-4727&lt;/del&gt;&lt;/a&gt; hsm: use IOC_MDC_GETFILEINFO in restore&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 96dbac2eaef7a5d1090807bedc9951279c06d037&lt;/p&gt;</comment>
                            <comment id="110412" author="fzago" created="Mon, 23 Mar 2015 18:47:32 +0000"  >&lt;p&gt;Jinshan,&lt;/p&gt;

&lt;p&gt;I got this trace when using stat on a file I was restoring (or was restored). I haven&apos;t been able to reproduce it so far.  Is it the error you mention in your patch? John&apos;s patch is applied on that tree, and works well otherwise.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Mar 20 15:00:55 tasclient01 kernel: INFO: task stat:951 blocked for more than 120 seconds.
Mar 20 15:00:55 tasclient01 kernel:      Tainted: P           ---------------    2.6.32-431.17.1.el6.x86_64 #1
Mar 20 15:00:55 tasclient01 kernel: &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables thi
s message.
Mar 20 15:00:55 tasclient01 kernel: stat          D 0000000000000000     0   951  29769 0x00000084
Mar 20 15:00:55 tasclient01 kernel: ffff8801d0471a58 0000000000000082 0000000000000000 000000000000000d
Mar 20 15:00:55 tasclient01 kernel: 0000000000000004 ffff880237fee800 ffff880116db5610 0000000000000630
Mar 20 15:00:55 tasclient01 kernel: ffff8802143f1098 ffff8801d0471fd8 000000000000fbc8 ffff8802143f1098
Mar 20 15:00:55 tasclient01 kernel: Call Trace:
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8152935e&amp;gt;] __mutex_lock_slowpath+0x13e/0x180
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff815291fb&amp;gt;] mutex_lock+0x2b/0x50
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa09e009a&amp;gt;] ll_layout_refresh+0x1da/0xc60 [lustre]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff810f0e46&amp;gt;] ? ftrace_test_stop_func+0x16/0x20
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8100ad96&amp;gt;] ? ftrace_call+0x5/0x2b
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff810f0e46&amp;gt;] ? ftrace_test_stop_func+0x16/0x20
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa0a04ab0&amp;gt;] ? ll_md_blocking_ast+0x0/0x7d0 [lustre]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa06996a0&amp;gt;] ? ldlm_completion_ast+0x0/0x920 [ptlrpc]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa04dfe31&amp;gt;] ? cl_io_slice_add+0xc1/0x190 [obdclass]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa0a2d8f0&amp;gt;] vvp_io_init+0x340/0x490 [lustre]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff810f0e46&amp;gt;] ? ftrace_test_stop_func+0x16/0x20
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8100ad96&amp;gt;] ? ftrace_call+0x5/0x2b
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa04df278&amp;gt;] cl_io_init0+0x98/0x160 [obdclass]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa04e1ea4&amp;gt;] cl_io_init+0x64/0xe0 [obdclass]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa0a23161&amp;gt;] cl_glimpse_size0+0x91/0x1d0 [lustre]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa09d4c25&amp;gt;] ll_inode_revalidate_it+0x1a5/0x1d0 [lustre]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa09d4c99&amp;gt;] ll_getattr_it+0x49/0x170 [lustre]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffffa09d4df7&amp;gt;] ll_getattr+0x37/0x40 [lustre]
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff81227163&amp;gt;] ? security_inode_getattr+0x23/0x30
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8118e631&amp;gt;] vfs_getattr+0x51/0x80
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8118e6c4&amp;gt;] vfs_fstatat+0x64/0xa0
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff810f0e46&amp;gt;] ? ftrace_test_stop_func+0x16/0x20
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8118e76e&amp;gt;] vfs_lstat+0x1e/0x20
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8118e794&amp;gt;] sys_newlstat+0x24/0x50
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff810e1cc7&amp;gt;] ? audit_syscall_entry+0x1d7/0x200
Mar 20 15:00:55 tasclient01 kernel: [&amp;lt;ffffffff8100b072&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="110415" author="jay" created="Mon, 23 Mar 2015 19:16:19 +0000"  >&lt;p&gt;What&apos;s this process? From the name I guess it&apos;s not copy tool. John&apos;s patch can only fix copy tool case.&lt;/p&gt;

&lt;p&gt;You will need patch 13138 too to address this case if the restoring will take longer than 120 seconds.&lt;/p&gt;</comment>
                            <comment id="110416" author="fzago" created="Mon, 23 Mar 2015 19:26:21 +0000"  >&lt;p&gt;The process is stat, which does a stat() on a file.&lt;br/&gt;
Is it the problem your patch should fix, or is it fixing something else?&lt;/p&gt;</comment>
                            <comment id="110483" author="jay" created="Tue, 24 Mar 2015 15:55:47 +0000"  >&lt;p&gt;Yes, the patch can fix this problem&lt;/p&gt;</comment>
                            <comment id="113232" author="pjones" created="Thu, 23 Apr 2015 16:43:46 +0000"  >&lt;p&gt;This patch landed quite some while back. Please speak up (or open a new ticket) if there are still residual issues to address.&lt;/p&gt;</comment>
                            <comment id="114355" author="lixi" created="Wed, 6 May 2015 11:16:41 +0000"  >&lt;p&gt;Hmm, I still got following error on recent master branch with 13750 patch. &lt;/p&gt;

&lt;p&gt;May  6 19:13:44 server1 kernel: INFO: task test.py:32411 blocked for more than 120 seconds.&lt;br/&gt;
May  6 19:13:44 server1 kernel:      Not tainted 2.6.32-431.29.2.el6_lustre.2.5.29.ddnpf4.x86_64 #1&lt;br/&gt;
May  6 19:13:44 server1 kernel: &quot;echo 0 &amp;gt; /proc/sys/kernel/hung_task_timeout_secs&quot; disables this message.&lt;br/&gt;
May  6 19:13:44 server1 kernel: test.py       D 0000000000000000     0 32411  23005 0x00000080&lt;br/&gt;
May  6 19:13:44 server1 kernel: ffff88055238fb08 0000000000000082 0000000000000000 ffff88055351c5c0&lt;br/&gt;
May  6 19:13:44 server1 kernel: ffff88055238fab8 ffffffffa0e0685e ffff88086d4a49d8 ffff880597c9b980&lt;br/&gt;
May  6 19:13:44 server1 kernel: ffff880553ee85f8 ffff88055238ffd8 000000000000fbc8 ffff880553ee85f8&lt;br/&gt;
May  6 19:13:44 server1 kernel: Call Trace:&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0e0685e&amp;gt;&amp;#93;&lt;/span&gt; ? mdc_set_open_replay_data+0x29e/0x500 &lt;span class=&quot;error&quot;&gt;&amp;#91;mdc&amp;#93;&lt;/span&gt;&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8152b9de&amp;gt;&amp;#93;&lt;/span&gt; __mutex_lock_slowpath+0x13e/0x180&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8152b87b&amp;gt;&amp;#93;&lt;/span&gt; mutex_lock+0x2b/0x50&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0ec7ea1&amp;gt;&amp;#93;&lt;/span&gt; ll_layout_refresh+0x191/0x300 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0ed78ac&amp;gt;&amp;#93;&lt;/span&gt; ll_setattr_raw+0x5cc/0xbe0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffffa0ed7f25&amp;gt;&amp;#93;&lt;/span&gt; ll_setattr+0x65/0xd0 &lt;span class=&quot;error&quot;&gt;&amp;#91;lustre&amp;#93;&lt;/span&gt;&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a8158&amp;gt;&amp;#93;&lt;/span&gt; notify_change+0x168/0x340&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81187374&amp;gt;&amp;#93;&lt;/span&gt; do_truncate+0x64/0xa0&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8119c4b1&amp;gt;&amp;#93;&lt;/span&gt; do_filp_open+0x861/0xd20&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81290c5a&amp;gt;&amp;#93;&lt;/span&gt; ? strncpy_from_user+0x4a/0x90&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff811a9032&amp;gt;&amp;#93;&lt;/span&gt; ? alloc_fd+0x92/0x160&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81186039&amp;gt;&amp;#93;&lt;/span&gt; do_sys_open+0x69/0x140&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff81186150&amp;gt;&amp;#93;&lt;/span&gt; sys_open+0x20/0x30&lt;br/&gt;
May  6 19:13:44 server1 kernel: &lt;span class=&quot;error&quot;&gt;&amp;#91;&amp;lt;ffffffff8100b072&amp;gt;&amp;#93;&lt;/span&gt; system_call_fastpath+0x16/0x1b&lt;/p&gt;</comment>
                            <comment id="114372" author="jhammond" created="Wed, 6 May 2015 14:26:55 +0000"  >&lt;p&gt;&amp;gt; Hmm, I still got following error on recent master branch with 13750 patch.&lt;br/&gt;
&amp;gt; ...&lt;/p&gt;

&lt;p&gt;Please post your reproducer.&lt;/p&gt;
</comment>
                            <comment id="114384" author="lixi" created="Wed, 6 May 2015 16:22:10 +0000"  >&lt;p&gt;Hi John,&lt;/p&gt;

&lt;p&gt;Sorry, the problem I got today is a different issue. And I am not sure whether it is a Bug or not. It is confirmed that &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-4727&quot; title=&quot;Lhsmtool_posix process stuck in ll_layout_refresh() when restoring&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-4727&quot;&gt;&lt;del&gt;LU-4727&lt;/del&gt;&lt;/a&gt; is fixed for me too. &lt;/p&gt;

&lt;p&gt;I got the new problem when the HSM tool (which is a private tool, not standard lhsmtool_posix) exits right after &apos;lfs hsm_restore&apos; completes. But the action status in &quot;lctl get_param mdt.server1-MDT0000.hsm.actions&quot; is still STARTED, not SUCCEED. As long as the tool keeps itself alive until the action status become SUCCEED, this problem is gone.&lt;/p&gt;

&lt;p&gt;However, this might be a issue too, because the HSM tool can exist at any time. And even it restarts, the action will never continue. I don&apos;t know any other way to recover HSM status except rebooting the machine. Do you have any advice? Thanks!&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                            <outwardlinks description="duplicates">
                                                        </outwardlinks>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="25155">LU-5196</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="25873">LUDOC-252</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="23514">LU-4728</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="29495">LU-6460</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="21107">LU-4002</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwh1j:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>13001</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>