<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:40:43 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LUDOC-161] document backup/restore process for ZFS backing filesystems</title>
                <link>https://jira.whamcloud.com/browse/LUDOC-161</link>
                <project id="10070" key="LUDOC">Lustre Documentation</project>
                    <description>&lt;p&gt;The Lustre backup/restore documentation describes how to do a backup/restore using both &quot;tar&quot; and &quot;dd&quot; for ldiskfs devices.  However, the preferred mechanism for backing up and restoring ZFS filesystems is via the &quot;&lt;tt&gt;zfs send&lt;/tt&gt;&quot; and &quot;&lt;tt&gt;zfs recv&lt;/tt&gt;&quot; mechanism.  This will preserve the ZFS dnode numbers, and all of the FID-&amp;gt;dnode mappings in the OI files.  The zfs dump/restore functionality can also be used to do incremental backups, and keep two physically-separate devices in relatively close synchronization for disaster-recovery and other purposes.&lt;/p&gt;

&lt;p&gt;It is also not currently possible to do a file-level backup from ldiskfs and restore this into a new ZFS filesystem because there is no OI Scrub facility for ZFS OSDs, so that should be documented as unsupported until &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7585&quot; title=&quot;Implement OI Scrub for ZFS&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7585&quot;&gt;&lt;del&gt;LU-7585&lt;/del&gt;&lt;/a&gt; is resolved. &lt;/p&gt;</description>
                <environment></environment>
        <key id="19505">LUDOC-161</key>
            <summary>document backup/restore process for ZFS backing filesystems</summary>
                <type id="4" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11310&amp;avatarType=issuetype">Improvement</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="LM-Triage">Lustre Manual Triage</assignee>
                                    <reporter username="adilger">Andreas Dilger</reporter>
                        <labels>
                            <label>release</label>
                    </labels>
                <created>Thu, 20 Jun 2013 22:24:05 +0000</created>
                <updated>Fri, 13 May 2022 00:09:38 +0000</updated>
                            <resolved>Fri, 13 May 2022 00:09:38 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>13</watches>
                                                                            <comments>
                            <comment id="61097" author="sknolin" created="Mon, 24 Jun 2013 15:29:17 +0000"  >&lt;p&gt;Here are my notes for a backup and restore test.&lt;/p&gt;

&lt;h1&gt;&lt;a name=&quot;ZFSsnapshotsandsend%2Freceiveforobjectbackups.&quot;&gt;&lt;/a&gt;ZFS snapshots and send/receive for object backups.&lt;/h1&gt;

&lt;p&gt;This example is for combined mgs/mdt object, but the same would apply for an OST device-level backup. This example was run on on Redhat Enterprise Linux 6.2 and lustre 2.4.0. &lt;/p&gt;

&lt;h2&gt;&lt;a name=&quot;Serversandfilesystemsintheexample&quot;&gt;&lt;/a&gt;Servers and filesystems in the example&lt;/h2&gt;

&lt;p&gt;&lt;b&gt;luste2-8-25&lt;/b&gt; - MGS/MDT server&lt;br/&gt;
lustre-meta/meta - Lustre ZFS MGS/MDT volume/filesystem on lustre2-8-25&lt;br/&gt;
&lt;b&gt;lustre2-8-11&lt;/b&gt; - OSS/OST server&lt;br/&gt;
lustre-ost0 - Lustre ZFS OST volume on lustre2-8-11&lt;/p&gt;

&lt;h2&gt;&lt;a name=&quot;Backinguptheobject&quot;&gt;&lt;/a&gt;Backing up the object &lt;/h2&gt;

&lt;h3&gt;&lt;a name=&quot;Takeasnapshot&quot;&gt;&lt;/a&gt;Take a snapshot&lt;/h3&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs snapshot -r lustre-meta@lustre-meta-backup
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;&quot;-r&quot; means do a recursive snapshot, so this will include both the volume and the filesystem.&lt;/p&gt;

&lt;h3&gt;&lt;a name=&quot;listexistingsnapshots&quot;&gt;&lt;/a&gt;list existing snapshots&lt;/h3&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lustre2-8-25 ~]# zfs list -t snapshot
NAME                                  USED  AVAIL  REFER  MOUNTPOINT
lustre-meta@lustre-meta-backup           0      -    30K  -
lustre-meta/meta@lustre-meta-backup      0      -   287M  -
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt; 

&lt;h3&gt;&lt;a name=&quot;sendandstoreonaremoteZFS%2FLustreserver%3A&quot;&gt;&lt;/a&gt;send and store on a remote ZFS/Lustre server:&lt;/h3&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs send -R lustre-meta@lustre-meta-backup | ssh lustre2-8-11 zfs receive lustre-ost0/lustre-meta
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;&lt;b&gt;note&lt;/b&gt; &quot;-R&quot; recursively sends the volume, filesystem, and preserves all properties. It is critical to preserve filesystem properties. If not using the &quot;-R&quot; flag, be sure to use &quot;-p&quot;, we will show that during recovery.&lt;/p&gt;

&lt;h3&gt;&lt;a name=&quot;Examineonremoteside&quot;&gt;&lt;/a&gt;Examine on remote side&lt;/h3&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lustre2-8-25 ~]# ssh lustre2-8-11 zfs list -t snapshot
NAME                                              USED  AVAIL  REFER  MOUNTPOINT
lustre-ost0/lustre-meta@lustre-meta-backup           0      -  64.5K  -
lustre-ost0/lustre-meta/meta@lustre-meta-backup      0      -   605M  -
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;h2&gt;&lt;a name=&quot;Recoveryfromfailure.&quot;&gt;&lt;/a&gt;Recovery from failure. &lt;/h2&gt;

&lt;p&gt;In testing, I first corrupted the filesystem with &apos;dd&apos;. You could also simply reformat it for testing.&lt;/p&gt;

&lt;h3&gt;&lt;a name=&quot;CreateanewZFSlustrevolume%2Ffilesystemwiththesamename.&quot;&gt;&lt;/a&gt;Create a new ZFS lustre volume/filesystem with the same name. &lt;/h3&gt;

&lt;p&gt;In my test case we have a raid 10:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mkfs.lustre --fsname=cove --mgs --mdt --backfstype=zfs lustre-meta/meta mirror ssd0 ssd1 mirror ssd2 ssd3
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;h3&gt;&lt;a name=&quot;Mountwith%22servicestartlustre%22&quot;&gt;&lt;/a&gt;Mount with &quot;service start lustre&quot;&lt;/h3&gt;

&lt;p&gt;This makes a volume called &quot;lustre-meta&quot; and filesystem &quot;meta&quot;&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@lustre2-8-25 ~]# zfs list
NAME               USED  AVAIL  REFER  MOUNTPOINT
lustre-meta        315M   732G    30K  /lustre-meta
lustre-meta/meta   315M   732G   287M  /lustre-meta/meta
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&quot;mount&quot; command shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lustre-meta/meta on /mnt/lustre/local/cove-MDT0000 type lustre (rw)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;h3&gt;&lt;a name=&quot;Loginintolustre2811%28remotetargetwhereyoustoredthesnapshot%29%2Candsendthefilesystemback.&quot;&gt;&lt;/a&gt;Login into lustre2-8-11 (remote target where you stored the snapshot), and send the filesystem back.  &lt;/h3&gt;

&lt;p&gt;Now I will only send the filesystem back, not the whole volume (why do a whole volume? Convenient if you have multiple datasets?)&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs send -p lustre-ost0/lustre-meta/meta@meta-backup | ssh lustre2-8-25 zfs receive lustre-meta/meta-new@recover
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;-p = preserve attributes, important for the lustre filesystem to mount.&lt;/p&gt;

&lt;h3&gt;&lt;a name=&quot;BackonOnlustre2811%28failedmetadataserver%29%2Crenamethefilesystemtomakethesnapshotactive.&quot;&gt;&lt;/a&gt;Back on On lustre2-8-11 (failed metadata server), rename the filesystem to make the snapshot active.&lt;/h3&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs rename lustre-meta/meta lustre-meta/meta-old
cannot rename &apos;lustre-meta/meta&apos;: dataset is busy
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;&lt;b&gt;oops!&lt;/b&gt; That didn&apos;t work. You need to unmount the filesystem so it isn&apos;t busy.&lt;br/&gt;
Note, this doesn&apos;t mean stop the lustre service, if you do you can&apos;t access the zfs volume.&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;umount /mnt/lustre/local/cove-MDT0000
zfs rename lustre-meta/meta lustre-meta/meta-old
zfs rename lustre-meta/meta-new lustre-meta/meta
zfs destroy lustre-meta/meta-old
service lustre stop
service lustre start
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;&lt;b&gt;You should now be recovered.&lt;/b&gt;&lt;/p&gt;</comment>
                            <comment id="84347" author="sknolin" created="Sun, 18 May 2014 01:55:35 +0000"  >&lt;p&gt;After living with this for a while, there are obviously a lot of things to consider and possibly add to any documentation on backup/restore for zfs backed filesystems.&lt;/p&gt;

&lt;p&gt;While snapshots do work, zfs send performance can be very slow, and there is an impact on the file system.&lt;/p&gt;

&lt;p&gt;More than just a zfs snapshot/send type recipe, some best practice recommendations are needed.&lt;/p&gt;</comment>
                            <comment id="174694" author="jgmitter" created="Tue, 22 Nov 2016 18:16:45 +0000"  >&lt;p&gt;Sure Andreas, I will sync up with Nathaniel and Zhiqi.&lt;/p&gt;</comment>
                            <comment id="197705" author="adilger" created="Wed, 31 May 2017 16:25:36 +0000"  >&lt;p&gt;Hi Tom,&lt;br/&gt;
Based on your LUG presentation today about using ZFS send/recv for migration, the process to do a backup of the ZFS MDT (or OST). Would you be interested to take a crack st updating the Lustre user manual to document the procedure to do a device level backup of a ZFS MDT or OST?  It should be noted that rsync is not a workable solution for ZFS until the ZFS OI scrub is implemented. &lt;/p&gt;</comment>
                            <comment id="197717" author="thcrowe" created="Wed, 31 May 2017 18:09:26 +0000"  >&lt;p&gt;Hi Andreas,&lt;/p&gt;

&lt;p&gt;I would be glad to take a crack at updating the Lustre user manual to document the procedure to do a device level backup of a ZFS MDT or OST. &lt;/p&gt;</comment>
                            <comment id="213782" author="yong.fan" created="Wed, 15 Nov 2017 16:08:40 +0000"  >&lt;p&gt;The steps for backup ZFS backend var ZPL by &apos;tar&apos;:&lt;/p&gt;

&lt;p&gt;1) Before umount the target (MDT or OST), please enable index backup on the target. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lctl set_param osd-zfs.${fsname}-MDT0000.index_backup=1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Or you can write the proc interface on the target directly:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;echo 1 &amp;gt; /proc/fs/lustre/osd-zfs/${fsname}-MDT0000/index_backup
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;2) Umount the target.&lt;/p&gt;

&lt;p&gt;3) Import the pool for the target if it is exported during the step 2). For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zpool import lustre-mdt1 [-d ${target_device_dir}]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;4) Enable &quot;canmount&quot;  property on the target FS. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs set canmount=on ${fsname}-mdt1/mdt1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;You also can specify the &quot;mountpoint&quot; property. By default, it will be /${fsname}-mdt1/mdt1&lt;/p&gt;

&lt;p&gt;5) Mount the target as &apos;zfs&apos;. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs mount ${fsname}-mdt1/mdt1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;6) Backup the data. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;tar jcf /tmp/target.tar.bz2 --xattrs --xattrs-include=&quot;trusted.*&quot; -C /${fsname}-mdt1/mdt1/ .
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;7) Umount the target and export the pool if you want.&lt;/p&gt;

&lt;p&gt;Please save the /tmp/target.bz2 and /tmp/target.ea. If you want to migrate system from ZFS to ldiskfs, please backup the system as the same steps.&lt;/p&gt;</comment>
                            <comment id="213784" author="yong.fan" created="Wed, 15 Nov 2017 16:22:44 +0000"  >&lt;p&gt;The steps for restore ZFS backend via ZPL by &apos;tar&apos;:&lt;/p&gt;

&lt;p&gt;1) Create new pool for the target if necessary, then reformat new Lustre FS with &quot;--replace&quot; parameter. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mkfs.lustre --mgs --fsname=${fsname} --mdt --index=0 --replace --param=sys.timeout=20 --backfstype=zfs --reformat ${fsname}-mdt1/mdt1 ${target_device}
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;2) Enable &quot;canmount&quot; property on the target FS. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs set canmount=on ${fsname}-mdt1/mdt1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;3) Mount the target as &apos;zfs&apos;. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;zfs mount ${fsname}-mdt1/mdt1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;4) Retore the data. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;tar jxf /tmp/target.tar.bz2 --xattrs --xattrs-include=&quot;trusted.*&quot; -C /${fsname}-mdt1/mdt1/
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;5) Remove stale OIs and index objects. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;cd /${fsname}-mdt1/mdt1 &amp;amp;&amp;amp; rm -rf oi.* OI_* lfsck_* LFSCK &amp;amp;&amp;amp; sync &amp;amp;&amp;amp; cd -
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;6) Umount the target.&lt;/p&gt;

&lt;p&gt;7) (optional) If the restored system has different NID as the backup system, please change NID. For detail, please refer to Lustre manual 14.5. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mount -t lustre -o nosvc ${fsname}-mdt1/mdt1 $mount_point
lctl replace_nids ${fsname}-MDTxxxx $new_nids
...
umount $mount_point
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;8) Mount the target as &quot;lustre&quot;. Usually, we will use &quot;-o abort_recov&quot; option to skip unnecessary recovery. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;mount -t lustre -o abort_recov ${fsname}-mdt1/mdt1 $mount_point
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;The osd-zfs can detect the restore automatically when mount the target, then trigger OI scrub to rebuild the OIs and index objects asynchronously at background. You can check the OI scrub status. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lctl get_param -n osd-zfs.${fsname}-${target}.oi_scrub
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Or you can read the proc interface on the target directly:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;cat /proc/fs/lustre/osd-zfs/${fsname}-${target}.oi_scrub
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;If you want to restore the system from ldiskfs-based backup, please follow the same steps.&lt;/p&gt;</comment>
                            <comment id="213786" author="yong.fan" created="Wed, 15 Nov 2017 16:39:23 +0000"  >&lt;p&gt;If you want to migrate the system from ldiskfs to ZFS, please backup your system as following:&lt;/p&gt;

&lt;p&gt;1) Before umount the target (MDT or OST), please enable index backup on the target. For example:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;lctl set_param osd-ldiskfs.${fsname}-MDT0000.index_backup=1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Or you can write the proc interface on the target directly:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;echo 1 &amp;gt; /proc/fs/lustre/osd-ldiskfs/${fsname}-MDT0000/index_backup
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;2) Umount the target.&lt;/p&gt;

&lt;p&gt;Then you can follow the standard steps to backup the target normally via &quot;tar&quot; and &quot;getfattr&quot; as described in the Lustre manual 18.3.&lt;/p&gt;</comment>
                            <comment id="213787" author="yong.fan" created="Wed, 15 Nov 2017 16:42:03 +0000"  >&lt;p&gt;Please consider to update Lustre manual according to above three comments. Thanks!&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.hpdd.intel.com/browse/LUDOC-161?focusedCommentId=213782&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-213782&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LUDOC-161?focusedCommentId=213782&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-213782&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.hpdd.intel.com/browse/LUDOC-161?focusedCommentId=213784&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-213784&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LUDOC-161?focusedCommentId=213784&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-213784&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.hpdd.intel.com/browse/LUDOC-161?focusedCommentId=213786&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-213786&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LUDOC-161?focusedCommentId=213786&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-213786&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="214147" author="gerrit" created="Mon, 20 Nov 2017 16:15:50 +0000"  >&lt;p&gt;Joseph Gmitter (joseph.gmitter@intel.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/30172&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30172&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LUDOC-161&quot; title=&quot;document backup/restore process for ZFS backing filesystems&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LUDOC-161&quot;&gt;&lt;del&gt;LUDOC-161&lt;/del&gt;&lt;/a&gt; zfs: Backup/Restore of ZFS backend filesystems&lt;br/&gt;
Project: doc/manual&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 843d957d7553ade81404dd3fdbefc6240b32d11a&lt;/p&gt;</comment>
                            <comment id="224765" author="gerrit" created="Wed, 28 Mar 2018 20:33:02 +0000"  >&lt;p&gt;Joseph Gmitter (joseph.gmitter@intel.com) merged in patch &lt;a href=&quot;https://review.whamcloud.com/30172/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/30172/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LUDOC-161&quot; title=&quot;document backup/restore process for ZFS backing filesystems&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LUDOC-161&quot;&gt;&lt;del&gt;LUDOC-161&lt;/del&gt;&lt;/a&gt; zfs: Backup/Restore of ZFS backend filesystems&lt;br/&gt;
Project: doc/manual&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 3eba9c153757b350882c28161e45b4e5815617b2&lt;/p&gt;</comment>
                            <comment id="334702" author="adilger" created="Fri, 13 May 2022 00:09:38 +0000"  >&lt;p&gt;Included in the 2.11 manual.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="33790">LU-7585</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="14211">LUDOC-56</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="36378">LU-8124</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="43008">LU-9023</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvtq7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8773</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                                                                                </customfields>
    </item>
</channel>
</rss>