<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:23:07 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2190] failure on conf-sanity.sh test_49: Different LDLM_TIMEOUT:6 20 20</title>
                <link>https://jira.whamcloud.com/browse/LU-2190</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for Li Wei &amp;lt;liwei@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/6f4fd31a-1735-11e2-afe1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/6f4fd31a-1735-11e2-afe1-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;The sub-test test_49 failed with the following error:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;Different LDLM_TIMEOUT:6 20 20&lt;/p&gt;&lt;/blockquote&gt;

&lt;p&gt;Info required for matching: conf-sanity 49&lt;/p&gt;</description>
                <environment></environment>
        <key id="16376">LU-2190</key>
            <summary>failure on conf-sanity.sh test_49: Different LDLM_TIMEOUT:6 20 20</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="laisiyao">Lai Siyao</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                            <label>sequoia</label>
                            <label>zfs</label>
                    </labels>
                <created>Tue, 16 Oct 2012 00:49:58 +0000</created>
                <updated>Tue, 7 Jun 2016 03:05:16 +0000</updated>
                            <resolved>Wed, 13 Mar 2013 05:07:53 +0000</resolved>
                                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.4.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>11</watches>
                                                                            <comments>
                            <comment id="47910" author="liwei" created="Fri, 16 Nov 2012 03:33:09 +0000"  >&lt;p&gt;&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/18e3b25c-2f96-11e2-bd52-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/18e3b25c-2f96-11e2-bd52-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="48069" author="pjones" created="Tue, 20 Nov 2012 09:15:48 +0000"  >&lt;p&gt;Lai will work on this one&lt;/p&gt;</comment>
                            <comment id="48474" author="laisiyao" created="Wed, 28 Nov 2012 02:17:14 +0000"  >&lt;p&gt;Hi Alex, zfs_write_ldd() doesn&apos;t write parameters except &quot;PARAM_ID_UPCALL&quot;, is this on purpose, or they are missing?&lt;/p&gt;

&lt;p&gt;If it&apos;s on purpose, then current test framework needs to set these parameters (eg. timeout, at_min...) explictely via `lctl conf_param ...`, this will be a lot of changes. Besides customer may find their previous script not working properly.&lt;/p&gt;

&lt;p&gt;Any suggestion for this?&lt;/p&gt;</comment>
                            <comment id="48475" author="bzzz" created="Wed, 28 Nov 2012 02:26:26 +0000"  >&lt;p&gt;no, you&apos;re right that zfs_write_ldd() should be putting all the parameters into some variable. I&apos;d suggest to introduce another zfs property to store all the parameters, but let&apos;s clarify with Brian he is OK with that.&lt;/p&gt;</comment>
                            <comment id="48476" author="bzzz" created="Wed, 28 Nov 2012 02:30:07 +0000"  >&lt;p&gt;Brian, would you be OK if we store all the parameters an user specify into a single ZFS property. parse_ldd() prefix all the parameters found in mountdata and obd_mount.c recognizes this prefix properly. I guess one possible issue is compatibility with the existing setups, but I hope we can work this around easily.&lt;/p&gt;</comment>
                            <comment id="48594" author="adilger" created="Fri, 30 Nov 2012 13:27:30 +0000"  >&lt;p&gt;Ideally, these properties would be added to the normal conf_param log, so that we didn&apos;t have to have two different places to look for them, but I agree that this may make things more complex for format-time config parameters.&lt;/p&gt;

&lt;p&gt;I think it is fine to store the mount config parameters as a ZFS property.  Please use a name like &quot;org.lustre.ldd&quot; or something, which is standard for this type of property.&lt;/p&gt;
</comment>
                            <comment id="48659" author="laisiyao" created="Mon, 3 Dec 2012 03:53:38 +0000"  >&lt;p&gt;I made a patch at &lt;a href=&quot;http://review.whamcloud.com/#change,4732&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,4732&lt;/a&gt;, but I&apos;m not sure it fits your requirement, please check it.&lt;/p&gt;</comment>
                            <comment id="48824" author="behlendorf" created="Wed, 5 Dec 2012 14:52:56 +0000"  >&lt;p&gt;Alex, no we want to keep them all as separate ZFS user properties.  ZFS supports are arbitrary number of user properties of the form lustre:&amp;lt;key&amp;gt;=&amp;lt;value&amp;gt; which are easily accessible in both kernel space and user space.  From the user perspective we&apos;ve already found this to be far more convenient than how ldiskfs stores this data.  Why would you want to collapse them to a single entry?&lt;/p&gt;</comment>
                            <comment id="49202" author="bzzz" created="Thu, 13 Dec 2012 15:04:31 +0000"  >&lt;p&gt;Brian, because then you need to know literally all possible parameters. and one adding a new parameter would need to modify mount_utils_zfs.c as well.&lt;br/&gt;
I tend to think this is a potential source of different issues in the future.&lt;/p&gt;
</comment>
                            <comment id="50094" author="laisiyao" created="Tue, 8 Jan 2013 00:55:15 +0000"  >&lt;p&gt;Patch is landed on master.&lt;/p&gt;</comment>
                            <comment id="50989" author="morrone" created="Tue, 22 Jan 2013 17:11:00 +0000"  >&lt;p&gt;Maybe we weren&apos;t clear enough.  We do NOT want this!  This change was not at all backwards compatible, and has now broken Lustre on our ZFS systems.  Please revert!&lt;/p&gt;</comment>
                            <comment id="50992" author="prakash" created="Tue, 22 Jan 2013 18:20:58 +0000"  >&lt;p&gt;I pushed a patch to revert that commit here: &lt;a href=&quot;http://review.whamcloud.com/5144&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5144&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="51182" author="adilger" created="Fri, 25 Jan 2013 00:29:42 +0000"  >&lt;p&gt;Chris or Prakash, can you please comment on which specific parameters you are setting with these dataset properties before the patch landed that broke your mouting?&lt;/p&gt;

&lt;p&gt;Are you looking to be able to store arbitrary configuration parameters on the ZFS dataset as properties, or only a specific limited number of parameters that are needed during mount that cannot be fetched from the MGS config?&lt;/p&gt;</comment>
                            <comment id="51241" author="behlendorf" created="Fri, 25 Jan 2013 14:17:24 +0000"  >&lt;p&gt;Andreas, I think for ZFS OSDs we need to absolutely store all the Lustre parameters of interest to an administrator this way.  Certainly everything the administrator might need to modify, and potentially make the rest visible as read-only properties.   It makes it far easier to 1) know what options are available, 2) verify what they are set too, and 3) change a particular option.  As a nice bonus we can even cleanly register callbacks so Lustre gets notified when one of them is changed so they may be modified online.  This is an ideal interface for Lustre to use.  If it needs to be extended in some way for Lustre we should look in to that.  Frankly, as it was done for ldiskfs was a constant pain anytime something needed to change so I&apos;d like to avoid that entirely with ZFS if we can.&lt;/p&gt;

&lt;p&gt;In fact, doing it this way already has saved us a lot of trouble in one instance where we needed to clear a single bit in the ldd flags on all the servers due to a registration bug.  We were able to trivially clear the single bit we needed  and verify that change across all the servers.  It was soooooooo nice.&lt;/p&gt;</comment>
                            <comment id="51251" author="adilger" created="Fri, 25 Jan 2013 16:07:16 +0000"  >&lt;p&gt;Brian,&lt;br/&gt;
storing the individual fields from &quot;lustre_disk_data&quot; (e.g. ldd_flags) is not in question here.  The original issue is that while the lustre_disk_data struct an &quot;ldd_params&quot; field for storing arbitrary configuration strings, it was split into separate fields for only ZFS.  This patch restored it to the original behaviour of just containing a string of parameters that didn&apos;t need to be handled individually and explicitly in userspace, and keeps the handling uniform between ZFS and ldiskfs.  Without this, there were a number of parameters (e.g. &quot;timeout&quot;, &quot;network&quot;, etc) that were not explicitly handled, and this would add an ongoing extra maintenance effort.&lt;/p&gt;

&lt;p&gt;I have no objection to restore the 4 parameters that were disabled by the &lt;a href=&quot;http://review.whamcloud.com/4732&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4732&lt;/a&gt; patch:&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;#define LDD_MGSNODE_PROP                &quot;lustre:mgsnode&quot;
#define LDD_FAILNODE_PROP               &quot;lustre:failnode&quot;
#define LDD_FAILMODE_PROP               &quot;lustre:failmode&quot;
#define LDD_IDENTITY_UPCALL_PROP        &quot;lustre:identity_upcall&quot;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;since they are the only ones that could have been broken by this patch (though strictly only the first three are needed, &quot;identity_upcall&quot; could be set from the MGS for all MDTs).&lt;/p&gt;

&lt;p&gt;&quot;ldd_params&quot; is intended for a limited number of parameters (e.g. mgsnode, failnode, etc) that need to be set on a per-target basis when the filesystem is mounted and do not have /proc/fs/lustre equivalents.  The remaining global parameters can be fetched from the MGS config log for all OSTs, or they would have to be updated manually for every MDT or OST in the filesystem, and applied for every new target that is added.&lt;/p&gt;

&lt;p&gt;I now see that the &quot;network&quot; option is not explicitly supported by the &quot;lustre:*&quot; parsing, which would need to be added in addition to explicit &quot;timeout&quot; and potentially other tunables.&lt;/p&gt;

&lt;p&gt;Separately, if there is a desire for setting arbitrary parameters in lprocfs for a particular ZFS target (i.e. anything in /proc/fs/lustre/&lt;/p&gt;
{type}
&lt;p&gt;/&lt;/p&gt;
{name}
&lt;p&gt;/&lt;/p&gt;
{parameter}) a generic mechanism could be created to find and parse ZFS properties of the form &quot;lustre:param:{parameter}
&lt;p&gt;&quot; for that target.&lt;/p&gt;

&lt;p&gt;I agree that the Lustre configuration code could be improved, and there is a desire to do this for Chroma as well, but I don&apos;t think this bug is the right place to do that design.&lt;/p&gt;

&lt;p&gt;Note that for ldiskfs, unlike ZFS, there is no space for arbitrary parameter storage beyond the &quot;ldd_params&quot; field, so it wouldn&apos;t be possible to add/remove individual parameters for ldiskfs without essentially writing helpers to extract them and concatenate them again in mount_utils_ldiskfs.c and in the kernel, and it still wouldn&apos;t be shared between kernel &amp;amp; userspace like the ZFS dataset properties are today.&lt;/p&gt;</comment>
                            <comment id="51478" author="behlendorf" created="Wed, 30 Jan 2013 15:55:05 +0000"  >&lt;p&gt;&amp;gt; storing the individual fields from &quot;lustre_disk_data&quot; (e.g. ldd_flags) is not in question here.&lt;/p&gt;

&lt;p&gt;Ahh good, that was part of my misunderstanding.  That&apos;s my fault for not actually looking at the patch.  Then to be perfectly clear the following lustre_disk_data entries will be left as properties.&lt;/p&gt;

&lt;p&gt;```&lt;br/&gt;
/* Persistent mount data is stored in these user attributes */&lt;br/&gt;
#define LDD_VERSION_PROP          &quot;lustre:version&quot;             /* ldd-&amp;gt;ldd_config_ver */&lt;br/&gt;
#define LDD_FLAGS_PROP            &quot;lustre:flags&quot;               /* ldd-&amp;gt;ldd_flags */&lt;br/&gt;
#define LDD_INDEX_PROP            &quot;lustre:index&quot;               /* ldd-&amp;gt;ldd_svindex /*&lt;br/&gt;
#define LDD_FSNAME_PROP           &quot;lustre:fsname&quot;              /* ldd-&amp;gt;ldd_fsname */&lt;br/&gt;
#define LDD_SVNAME_PROP           &quot;lustre:svname&quot;              /* ldd-&amp;gt;ldd_svname */&lt;br/&gt;
#define LDD_UUID_PROP             &quot;lustre:uuid&quot;                /* ldd-&amp;gt;ldd_uuid */&lt;br/&gt;
#define LDD_USERDATA_PROP         &quot;lustre:userdata&quot;            /* ldd-&amp;gt;ldd_userdata */&lt;br/&gt;
#define LDD_MOUNTOPTS_PROP        &quot;lustre:mountopts&quot;           /* ldd-&amp;gt;ldd_mount_ops */&lt;br/&gt;
```&lt;/p&gt;

&lt;p&gt;&amp;gt; &quot;ldd_params&quot; is intended for a limited number of parameters (e.g. mgsnode, failnode, etc) that need to be set on a per-target basis when the filesystem is mounted...&lt;/p&gt;

&lt;p&gt;Correct me if I&apos;m wrong, but the only reason to make this distinction in the first place (which is subtle and totally non-obvious to an end user) is so these options can be passed as a string to mount(2).  Exactly how the lustre_disk_data gets stored on disk for a particular osd should be handled by that osd.  We wouldn&apos;t want the limitations of any one OSD to dictate to the others how this data must be stored.  That&apos;s why the wrapper osd_write_ldd() and osd_read_ldd() functions were added in the first place.&lt;/p&gt;

&lt;p&gt;It sounds to me like the problem here isn&apos;t that they are stored on disk differently, it&apos;s that several were omitted in the initial implementation.  Let me propose a new patch (today/tomorrow) which instead of collapsing these individual ZFS properties in to ldd_params adds all the missing PARAM_*s in a generic fashion.  The LDD_MSGNODE_PROP, LDD_FAILNODE_PROP, LDD_FAILMODE_PROP, and LDD_INDENTITY_UPCALL_PROP will be removed and the ZFS property names for params can be dynamically generated as &apos;lustre:&amp;lt;param key&amp;gt;&apos;.  This way if you add a new PARAM_XYZ you won&apos;t need to update the zfs_read_ldd() or zfs_write_ldd() functions.  That should address your maintenance concerns.  For LDD_FAILNODE_PROP-&amp;gt;PARAM_FAILNODE and LDD_FAILMODE_PROP-&amp;gt;PARAM_FAILMODE where the names currently differ we can rename the property on our existing file systems to avoid the need for compatibility code. Presumably there aren&apos;t other sites running ZFS just yet (are there?).&lt;/p&gt;

&lt;p&gt;On a related but separate note, it would be awfully to nice to someday pass a versioned &apos;struct lustre_disk_data&apos; as the mount data instead of this string.  This would effectively eliminate a lot of the current parsing headaches.  Or better yet, the lustre_disk_data could be read from disk in the context of mount(2).  But that&apos;s of course a much more ambitious bit of development work.&lt;/p&gt;</comment>
                            <comment id="51500" author="prakash" created="Wed, 30 Jan 2013 20:11:48 +0000"  >&lt;p&gt;Please see: &lt;a href=&quot;http://review.whamcloud.com/5220&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/5220&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="53803" author="jlevi" created="Tue, 12 Mar 2013 14:09:10 +0000"  >&lt;p&gt;Patch landed to master. Please reopen or let me know if I need to reopen if more work needs to be done.&lt;/p&gt;</comment>
                            <comment id="53804" author="pjones" created="Tue, 12 Mar 2013 14:14:03 +0000"  >&lt;p&gt;Isn&apos;t &lt;a href=&quot;http://review.whamcloud.com/#change,5671&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5671&lt;/a&gt; needed also?&lt;/p&gt;</comment>
                            <comment id="53805" author="morrone" created="Tue, 12 Mar 2013 14:15:55 +0000"  >&lt;p&gt;Yes, &lt;a href=&quot;http://review.whamcloud.com/5671&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;5671&lt;/a&gt; is the one that we need at LLNL.&lt;/p&gt;</comment>
                            <comment id="53806" author="morrone" created="Tue, 12 Mar 2013 14:18:35 +0000"  >&lt;p&gt;Oh, sorry, that looks like code cleanup in 5671, not critical.&lt;/p&gt;</comment>
                            <comment id="53807" author="pjones" created="Tue, 12 Mar 2013 14:21:06 +0000"  >&lt;p&gt;ok then let&apos;s reopen but lower the priority to track the code cleanup patch landing&lt;/p&gt;</comment>
                            <comment id="53872" author="laisiyao" created="Wed, 13 Mar 2013 05:07:53 +0000"  >&lt;p&gt;all needed patches landed.&lt;/p&gt;</comment>
                            <comment id="53902" author="jlevi" created="Wed, 13 Mar 2013 08:48:30 +0000"  >&lt;p&gt;Now that &lt;a href=&quot;http://review.whamcloud.com/#change,5671&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#change,5671&lt;/a&gt; has landed, can this ticket be closed?&lt;/p&gt;</comment>
                            <comment id="53925" author="prakash" created="Wed, 13 Mar 2013 13:01:54 +0000"  >&lt;p&gt;From my point of view it can be closed. Thanks.&lt;/p&gt;</comment>
                            <comment id="154690" author="yong.fan" created="Mon, 6 Jun 2016 01:14:52 +0000"  >&lt;p&gt;Hit it again on master:&lt;br/&gt;
&lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/ce06c2b0-2b04-11e6-a0ce-5254006e85c2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/ce06c2b0-2b04-11e6-a0ce-5254006e85c2&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="154857" author="adilger" created="Tue, 7 Jun 2016 02:24:25 +0000"  >&lt;p&gt;If a bug hasn&apos;t been hit in 3+ years, but the same test fails again it deserves to have a new bug filed.&lt;/p&gt;</comment>
                            <comment id="154859" author="yong.fan" created="Tue, 7 Jun 2016 03:05:16 +0000"  >&lt;p&gt;New ticket for this failure:&lt;br/&gt;
&lt;a href=&quot;https://jira.hpdd.intel.com/browse/LU-8243&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LU-8243&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvahb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5233</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>