<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:45:00 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-4691] HSM operations mostly fail in striped directories</title>
                <link>https://jira.whamcloud.com/browse/LU-4691</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Most archives fail (depends on MDT index). Successfully released files cannot be restored.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;# export MOUNT_2=y
# export MDSCOUNT=4
# llmount.sh
# for ((i = 0; i &amp;lt; MDSCOUNT; i++)); do
    mdt_name=$(printf &apos;lustre-MDT%04x&apos; $i)
    lctl conf_param $mdt_name.mdt.hsm_control=enabled
done
# mount $HOSTNAME@tcp:/lustre /mnt/lustre-hsm -t lustre -o user_xattr,flock
# lhsmtool_posix -vvvv --hsm_root=$HSM_ROOT --daemon /mnt/lustre-hsm 2&amp;gt; /tmp/hsm.log
# lfs mkdir -i0 -c4 /mnt/lustre/d0
# cd /mnt/lustre/d0
# echo &amp;gt; f0
# echo &amp;gt; f1
# echo &amp;gt; f2
# echo &amp;gt; f3
# lfs hsm_archive f0
# lfs hsm_state f0
f0: (0x00000001) exists, archive_id:1
# lfs hsm_release f0
Cannot send HSM request (use of f0): Operation not permitted
#  
# lfs hsm_archive f1
# lfs hsm_archive f2
# lfs hsm_archive f3
# lfs hsm_release f1
# lfs hsm_release f2
Cannot send HSM request (use of f2): Operation not permitted
# lfs hsm_release f3
Cannot send HSM request (use of f3): Operation not permitted
# lfs hsm_state *
f0: (0x00000001) exists, archive_id:1
f1: (0x0000000d) released exists archived, archive_id:1
f2: (0x00000001) exists, archive_id:1
f3: (0x00000001) exists, archive_id:1
# cat f1
^C
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="23407">LU-4691</key>
            <summary>HSM operations mostly fail in striped directories</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="2" iconUrl="https://jira.whamcloud.com/images/icons/priorities/critical.svg">Critical</priority>
                        <status id="6" iconUrl="https://jira.whamcloud.com/images/icons/statuses/closed.png" description="The issue is considered finished, the resolution is correct. Issues which are closed can be reopened.">Closed</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="jhammond">John Hammond</reporter>
                        <labels>
                            <label>HSM</label>
                            <label>dne2</label>
                            <label>mdt</label>
                    </labels>
                <created>Sat, 1 Mar 2014 00:05:39 +0000</created>
                <updated>Thu, 31 Jul 2014 15:19:06 +0000</updated>
                            <resolved>Thu, 31 Jul 2014 15:18:47 +0000</resolved>
                                    <version>Lustre 2.6.0</version>
                                    <fixVersion>Lustre 2.7.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>7</watches>
                                                                            <comments>
                            <comment id="78177" author="adegremont" created="Sun, 2 Mar 2014 19:47:03 +0000"  >&lt;p&gt;&lt;tt&gt;lfs hsm_state&lt;/tt&gt; clearly shows that only &lt;tt&gt;f1&lt;/tt&gt; was successfully archived, that&apos;s the reason why files could not be released/restored.&lt;/p&gt;

&lt;p&gt;Could you display what is in copytool log &lt;tt&gt;/tmp/hsm.log&lt;/tt&gt;, MDTs logs and client logs?&lt;/p&gt;</comment>
                            <comment id="86519" author="di.wang" created="Fri, 13 Jun 2014 06:11:28 +0000"  >&lt;p&gt;Hmm, I tried to reproduce the problem, but seems works for me, though it seems the daemon can not be started correctly after the mount, and I have to start again.&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@mds tests]# sh -vx tests.sh 
umount /mnt/lustre-hsm
+ umount /mnt/lustre-hsm
MOUNT_2=y  MDSCOUNT=4 sh llmount.sh
+ MOUNT_2=y
+ MDSCOUNT=4
+ sh llmount.sh
Stopping clients: mds /mnt/lustre (opts:)
Stopping client mds /mnt/lustre opts:
Stopping clients: mds /mnt/lustre2 (opts:)
Stopping client mds /mnt/lustre2 opts:
Stopping /mnt/mds1 (opts:-f) on mds
Stopping /mnt/mds2 (opts:-f) on mds
Stopping /mnt/mds3 (opts:-f) on mds
Stopping /mnt/mds4 (opts:-f) on mds
Stopping /mnt/ost1 (opts:-f) on mds
Stopping /mnt/ost2 (opts:-f) on mds
waited 0 for 31 ST ost OSS OSS_uuid 0
Loading modules from /work/lustre-release_new/lustre-release/lustre/tests/..
detected 8 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=vfstrace rpctrace dlmtrace neterror ha config ioctl super
subsystem_debug=all -lnet -lnd -pinger
gss/krb5 is not supported
Formatting mgs, mds, osts
Format mds1: /tmp/lustre-mdt1
Format mds2: /tmp/lustre-mdt2
Format mds3: /tmp/lustre-mdt3
Format mds4: /tmp/lustre-mdt4
Format ost1: /tmp/lustre-ost1
Format ost2: /tmp/lustre-ost2
Checking servers environments
Checking clients mds environments
Loading modules from /work/lustre-release_new/lustre-release/lustre/tests/..
detected 8 online CPUs by sysfs
libcfs will create CPU partition based on online CPUs
debug=vfstrace rpctrace dlmtrace neterror ha config ioctl super
subsystem_debug=all -lnet -lnd -pinger
gss/krb5 is not supported
Setup mgs, mdt, osts
Starting mds1:   -o loop /tmp/lustre-mdt1 /mnt/mds1
Started lustre-MDT0000
Starting mds2:   -o loop /tmp/lustre-mdt2 /mnt/mds2
Started lustre-MDT0001
Starting mds3:   -o loop /tmp/lustre-mdt3 /mnt/mds3
Started lustre-MDT0002
Starting mds4:   -o loop /tmp/lustre-mdt4 /mnt/mds4
Started lustre-MDT0003
Starting ost1:   -o loop /tmp/lustre-ost1 /mnt/ost1
Started lustre-OST0000
Starting ost2:   -o loop /tmp/lustre-ost2 /mnt/ost2
Started lustre-OST0001
Starting client: mds: -o user_xattr,flock mds@tcp:/lustre /mnt/lustre
Starting client: mds: -o user_xattr,flock mds@tcp:/lustre /mnt/lustre2
Using TIMEOUT=20
disable quota as required
/work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0000.mdt.hsm_control=enabled
+ /work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0000.mdt.hsm_control=enabled
/work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0001.mdt.hsm_control=enabled
+ /work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0001.mdt.hsm_control=enabled
/work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0002.mdt.hsm_control=enabled
+ /work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0002.mdt.hsm_control=enabled
/work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0003.mdt.hsm_control=enabled
+ /work/lustre-release_new/lustre-release/lustre/utils/lctl conf_param lustre-MDT0003.mdt.hsm_control=enabled
mount -t lustre -o user_xattr,flock mds:/lustre /mnt/lustre-hsm
+ mount -t lustre -o user_xattr,flock mds:/lustre /mnt/lustre-hsm
sleep 5
+ sleep 5
/work/lustre-release_new/lustre-release/lustre/utils/lhsmtool_posix -vvvv --hsm_root=/mnt/lustre --daemon /mnt/lustre-hsm 2&amp;gt; /tmp/hsm.log
+ /work/lustre-release_new/lustre-release/lustre/utils/lhsmtool_posix -vvvv --hsm_root=/mnt/lustre --daemon /mnt/lustre-hsm
/work/lustre-release_new/lustre-release/lustre/utils/lfs mkdir -i0 -c4 /mnt/lustre/d0
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs mkdir -i0 -c4 /mnt/lustre/d0
cd /mnt/lustre/d0
+ cd /mnt/lustre/d0
echo &amp;gt; f0
+ echo
echo &amp;gt; f1
+ echo
echo &amp;gt; f2
+ echo
echo &amp;gt; f3
+ echo

[root@mds tests]# vim /tmp/hsm.log 
[root@mds tests]# sh -vx test_1.sh 
cd /mnt/lustre/d0
+ cd /mnt/lustre/d0
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f0
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f0
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f0
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f0
f0: (0x00000000)
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f0
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f0
Cannot send HSM request (use of f0): Operation not permitted

/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f1
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f1
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f1
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f1
f1: (0x00000000)
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f1
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f1
Cannot send HSM request (use of f1): Operation not permitted

/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f2
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f2
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f2
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f2
f2: (0x00000000)
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f2
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f2
Cannot send HSM request (use of f2): Operation not permitted

/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f3
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f3
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f3
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f3
f3: (0x00000000)
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f3
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f3
Cannot send HSM request (use of f3): Operation not permitted
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;It seems because the daemon is not being start up correctly, then I start the daemon again, it works&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@mds tests]# /work/lustre-release_new/lustre-release/lustre/utils/lhsmtool_posix -vvvv --hsm_root=/mnt/lustre --daemon /mnt/lustre-hsm 
lhsmtool_posix[25558]: action=0 src=(null) dst=(null) mount_point=/mnt/lustre-hsm
[root@mds tests]# lhsmtool_posix[25559]: waiting for message from kernel
lhsmtool_posix[25559]: copytool fs=lustre archive#=1 item_count=1
lhsmtool_posix[25559]: waiting for message from kernel
lhsmtool_posix[25560]: &apos;[0x300000401:0x1:0x0]&apos; action ARCHIVE reclen 72, cookie=0x5399b0c2
lhsmtool_posix[25560]: processing file &apos;d0/f3&apos;
lhsmtool_posix[25560]: archiving &apos;/mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25560]: saving stripe info of &apos;/mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0&apos; in /mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp.lov
lhsmtool_posix[25560]: going to copy data from &apos;/mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25560]: Going to copy 1 bytes /mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0 -&amp;gt; /mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp

lhsmtool_posix[25560]: data archiving for &apos;/mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos; done
lhsmtool_posix[25560]: attr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0&apos; saved to archive &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25560]: fsetxattr of &apos;trusted.hsm&apos; on &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25560]: fsetxattr of &apos;trusted.link&apos; on &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25560]: fsetxattr of &apos;trusted.lov&apos; on &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25560]: fsetxattr of &apos;trusted.lma&apos; on &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25560]: fsetxattr of &apos;lustre.lov&apos; on &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25560]: xattr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0&apos; saved to archive &apos;/mnt/lustre/0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25560]: symlink &apos;/mnt/lustre/shadow/d0/f3&apos; to &apos;../../0001/0000/0401/0000/0003/0000/0x300000401:0x1:0x0&apos; done
lhsmtool_posix[25560]: Action completed, notifying coordinator cookie=0x5399b0c2, FID=[0x300000401:0x1:0x0], hp_flags=0 err=0
lhsmtool_posix[25560]: llapi_hsm_action_end() on &apos;/mnt/lustre-hsm/.lustre/fid/0x300000401:0x1:0x0&apos; ok (rc=0)
lhsmtool_posix[25559]: copytool fs=lustre archive#=1 item_count=1
lhsmtool_posix[25559]: waiting for message from kernel
lhsmtool_posix[25562]: &apos;[0x340000401:0x1:0x0]&apos; action ARCHIVE reclen 72, cookie=0x5399b0c2
lhsmtool_posix[25562]: processing file &apos;d0/f0&apos;
lhsmtool_posix[25562]: archiving &apos;/mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25562]: saving stripe info of &apos;/mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0&apos; in /mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp.lov
lhsmtool_posix[25562]: going to copy data from &apos;/mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25562]: Going to copy 1 bytes /mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0 -&amp;gt; /mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp

lhsmtool_posix[25562]: data archiving for &apos;/mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos; done
lhsmtool_posix[25562]: attr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0&apos; saved to archive &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25562]: fsetxattr of &apos;trusted.hsm&apos; on &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25562]: fsetxattr of &apos;trusted.link&apos; on &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25562]: fsetxattr of &apos;trusted.lov&apos; on &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25562]: fsetxattr of &apos;trusted.lma&apos; on &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25562]: fsetxattr of &apos;lustre.lov&apos; on &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25562]: xattr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0&apos; saved to archive &apos;/mnt/lustre/0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25562]: symlink &apos;/mnt/lustre/shadow/d0/f0&apos; to &apos;../../0001/0000/0401/4000/0003/0000/0x340000401:0x1:0x0&apos; done
lhsmtool_posix[25562]: Action completed, notifying coordinator cookie=0x5399b0c2, FID=[0x340000401:0x1:0x0], hp_flags=0 err=0
lhsmtool_posix[25562]: llapi_hsm_action_end() on &apos;/mnt/lustre-hsm/.lustre/fid/0x340000401:0x1:0x0&apos; ok (rc=0)
lhsmtool_posix[25559]: copytool fs=lustre archive#=1 item_count=1
lhsmtool_posix[25559]: waiting for message from kernel
lhsmtool_posix[25563]: &apos;[0x280000400:0x2:0x0]&apos; action ARCHIVE reclen 72, cookie=0x5399b0c2
lhsmtool_posix[25563]: processing file &apos;d0/f1&apos;
lhsmtool_posix[25563]: archiving &apos;/mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0&apos; to &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos;
lhsmtool_posix[25563]: saving stripe info of &apos;/mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0&apos; in /mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp.lov
lhsmtool_posix[25563]: going to copy data from &apos;/mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0&apos; to &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos;
lhsmtool_posix[25563]: Going to copy 1 bytes /mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0 -&amp;gt; /mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp

lhsmtool_posix[25563]: data archiving for &apos;/mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0&apos; to &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos; done
lhsmtool_posix[25563]: attr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0&apos; saved to archive &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos;
lhsmtool_posix[25563]: fsetxattr of &apos;trusted.hsm&apos; on &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25563]: fsetxattr of &apos;trusted.link&apos; on &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25563]: fsetxattr of &apos;trusted.lov&apos; on &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25563]: fsetxattr of &apos;trusted.lma&apos; on &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25563]: fsetxattr of &apos;lustre.lov&apos; on &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25563]: xattr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0&apos; saved to archive &apos;/mnt/lustre/0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0_tmp&apos;
lhsmtool_posix[25563]: symlink &apos;/mnt/lustre/shadow/d0/f1&apos; to &apos;../../0002/0000/0400/8000/0002/0000/0x280000400:0x2:0x0&apos; done
lhsmtool_posix[25563]: Action completed, notifying coordinator cookie=0x5399b0c2, FID=[0x280000400:0x2:0x0], hp_flags=0 err=0
lhsmtool_posix[25563]: llapi_hsm_action_end() on &apos;/mnt/lustre-hsm/.lustre/fid/0x280000400:0x2:0x0&apos; ok (rc=0)
lhsmtool_posix[25559]: copytool fs=lustre archive#=1 item_count=1
lhsmtool_posix[25559]: waiting for message from kernel
lhsmtool_posix[25564]: &apos;[0x2c0000401:0x1:0x0]&apos; action ARCHIVE reclen 72, cookie=0x5399b0c2
lhsmtool_posix[25564]: processing file &apos;d0/f2&apos;
lhsmtool_posix[25564]: archiving &apos;/mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25564]: saving stripe info of &apos;/mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0&apos; in /mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp.lov
lhsmtool_posix[25564]: going to copy data from &apos;/mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25564]: Going to copy 1 bytes /mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0 -&amp;gt; /mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp

lhsmtool_posix[25564]: data archiving for &apos;/mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0&apos; to &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos; done
lhsmtool_posix[25564]: attr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0&apos; saved to archive &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25564]: fsetxattr of &apos;trusted.hsm&apos; on &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25564]: fsetxattr of &apos;trusted.link&apos; on &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25564]: fsetxattr of &apos;trusted.lov&apos; on &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25564]: fsetxattr of &apos;trusted.lma&apos; on &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25564]: fsetxattr of &apos;lustre.lov&apos; on &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos; rc=0 (Success)
lhsmtool_posix[25564]: xattr file for &apos;/mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0&apos; saved to archive &apos;/mnt/lustre/0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0_tmp&apos;
lhsmtool_posix[25564]: symlink &apos;/mnt/lustre/shadow/d0/f2&apos; to &apos;../../0001/0000/0401/c000/0002/0000/0x2c0000401:0x1:0x0&apos; done
lhsmtool_posix[25564]: Action completed, notifying coordinator cookie=0x5399b0c2, FID=[0x2c0000401:0x1:0x0], hp_flags=0 err=0
lhsmtool_posix[25564]: llapi_hsm_action_end() on &apos;/mnt/lustre-hsm/.lustre/fid/0x2c0000401:0x1:0x0&apos; ok (rc=0)

[root@mds tests]# sh -vx test_1.sh 
cd /mnt/lustre/d0
+ cd /mnt/lustre/d0
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f0
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f0
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f0
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f0
f0: (0x00000009) exists archived, archive_id:1
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f0
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f0

/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f1
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f1
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f1
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f1
f1: (0x00000009) exists archived, archive_id:1
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f1
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f1

/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f2
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f2
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f2
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f2
f2: (0x00000009) exists archived, archive_id:1
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f2
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f2

/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f3
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_archive f3
sleep 2
+ sleep 2
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f3
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_state f3
f3: (0x00000009) exists archived, archive_id:1
/work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f3
+ /work/lustre-release_new/lustre-release/lustre/utils/lfs hsm_release f3

[root@mds tests]# 

&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;So I suspect this is related with daemon, not about striped directory.&lt;/p&gt;</comment>
                            <comment id="87175" author="jhammond" created="Fri, 20 Jun 2014 16:16:38 +0000"  >&lt;p&gt;I don&apos;t see any verification that restore either via lfs hsm_restore or by file access works as expected.&lt;/p&gt;

&lt;p&gt;On current master I see that restore in a striped directory doesn&apos;t usually work.&lt;/p&gt;</comment>
                            <comment id="87660" author="di.wang" created="Fri, 27 Jun 2014 07:21:11 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/10866&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/10866&lt;/a&gt;  add striped dir to sanity-hsm.sh&lt;/p&gt;</comment>
                            <comment id="88231" author="jhammond" created="Sun, 6 Jul 2014 14:21:13 +0000"  >&lt;p&gt;Please note that landing &lt;a href=&quot;http://review.whamcloud.com/#/c/10866/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/10866/&lt;/a&gt; does not mean that this issue has been fixed.&lt;/p&gt;</comment>
                            <comment id="89298" author="jhammond" created="Wed, 16 Jul 2014 21:18:49 +0000"  >&lt;p&gt;Di&apos;s patch is at &lt;a href=&quot;http://review.whamcloud.com/#/c/10866/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/10866/&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="90543" author="jhammond" created="Thu, 31 Jul 2014 15:18:47 +0000"  >&lt;p&gt;Patch landed to master.&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzwggf:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>12900</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>