<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:23:03 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-2181] failure conf-sanity test_23a: umount -f client hung in stat() when MDS down</title>
                <link>https://jira.whamcloud.com/browse/LU-2181</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for yujian &amp;lt;yujian@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://maloo.whamcloud.com/test_sets/140674a6-16b2-11e2-962d-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/140674a6-16b2-11e2-962d-52540035b04c&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;Lustre Tag: v2_3_0_RC3&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_3/36&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_3/36&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.3/x86_64(server), FC15/x86_64(client)&lt;br/&gt;
Network: TCP&lt;br/&gt;
ENABLE_QUOTA=yes&lt;/p&gt;

&lt;p&gt;The sub-test test_23a hung at unmounting the client:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;== conf-sanity test 23a: interrupt client during recovery mount delay ================================ 02:41:31 (1350294091)
start mds service on fat-amd-2
Starting mds1:   /dev/sdc5 /mnt/mds1
Started lustre-MDT0000
start ost1 service on fat-amd-3
Starting ost1:   /dev/sdc5 /mnt/ost1
Started lustre-OST0000
mount lustre on /mnt/lustre.....
Starting client: client-5: -o user_xattr,flock fat-amd-2@tcp:/lustre /mnt/lustre
Stopping /mnt/mds1 (opts:) on fat-amd-2
Stopping client /mnt/lustre (opts: -f)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Stack trace on client:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 5526.947537] umount          S ffff880316bb3170     0  7395   7009 0x00000080
[ 5526.954596]  ffff8803136e57c8 0000000000000082 00000001004fdeea ffff88030af44560
[ 5526.962037]  ffff8803136e5fd8 ffff8803136e5fd8 0000000000013840 0000000000013840
[ 5526.969479]  ffff880323191720 ffff88030af44560 0000000000000000 0000000000000286
[ 5526.976921] Call Trace:
[ 5526.979396]  [&amp;lt;ffffffffa054a570&amp;gt;] ? ptlrpc_interrupted_set+0x0/0x120 [ptlrpc]
[ 5526.986517]  [&amp;lt;ffffffff8147461a&amp;gt;] schedule_timeout+0xa7/0xde
[ 5526.992168]  [&amp;lt;ffffffff81060b58&amp;gt;] ? process_timeout+0x0/0x10
[ 5526.997829]  [&amp;lt;ffffffffa02ae761&amp;gt;] cfs_waitq_timedwait+0x11/0x20 [libcfs]
[ 5527.004550]  [&amp;lt;ffffffffa0555a9c&amp;gt;] ptlrpc_set_wait+0x2ec/0x8c0 [ptlrpc]
[ 5527.011066]  [&amp;lt;ffffffff8104df76&amp;gt;] ? default_wake_function+0x0/0x14
[ 5527.017270]  [&amp;lt;ffffffffa05560e8&amp;gt;] ptlrpc_queue_wait+0x78/0x230 [ptlrpc]
[ 5527.023900]  [&amp;lt;ffffffffa05386c5&amp;gt;] ldlm_cli_enqueue+0x2f5/0x7b0 [ptlrpc]
[ 5527.030528]  [&amp;lt;ffffffffa0536d90&amp;gt;] ? ldlm_completion_ast+0x0/0x6f0 [ptlrpc]
[ 5527.037408]  [&amp;lt;ffffffffa0905cc0&amp;gt;] ? ll_md_blocking_ast+0x0/0x710 [lustre]
[ 5527.044186]  [&amp;lt;ffffffffa0744e55&amp;gt;] mdc_enqueue+0x505/0x1590 [mdc]
[ 5527.050196]  [&amp;lt;ffffffffa02b9578&amp;gt;] ? libcfs_log_return+0x28/0x40 [libcfs]
[ 5527.056885]  [&amp;lt;ffffffffa074609e&amp;gt;] ? mdc_revalidate_lock+0x1be/0x1d0 [mdc]
[ 5527.063661]  [&amp;lt;ffffffffa0746270&amp;gt;] mdc_intent_lock+0x1c0/0x5c0 [mdc]
[ 5527.069932]  [&amp;lt;ffffffffa0905cc0&amp;gt;] ? ll_md_blocking_ast+0x0/0x710 [lustre]
[ 5527.076734]  [&amp;lt;ffffffffa0536d90&amp;gt;] ? ldlm_completion_ast+0x0/0x6f0 [ptlrpc]
[ 5527.083601]  [&amp;lt;ffffffffa09eed8b&amp;gt;] lmv_intent_lookup+0x3bb/0x11c0 [lmv]
[ 5527.090136]  [&amp;lt;ffffffffa0905cc0&amp;gt;] ? ll_md_blocking_ast+0x0/0x710 [lustre]
[ 5527.096913]  [&amp;lt;ffffffffa09f12f0&amp;gt;] lmv_intent_lock+0x310/0x370 [lmv]
[ 5527.103190]  [&amp;lt;ffffffffa0905cc0&amp;gt;] ? ll_md_blocking_ast+0x0/0x710 [lustre]
[ 5527.109982]  [&amp;lt;ffffffffa08e0944&amp;gt;] __ll_inode_revalidate_it+0x214/0xd90 [lustre]
[ 5527.117295]  [&amp;lt;ffffffffa0905cc0&amp;gt;] ? ll_md_blocking_ast+0x0/0x710 [lustre]
[ 5527.124084]  [&amp;lt;ffffffffa08e1764&amp;gt;] ll_inode_revalidate_it+0x44/0x1a0 [lustre]
[ 5527.131136]  [&amp;lt;ffffffffa08e1903&amp;gt;] ll_getattr_it+0x43/0x170 [lustre]
[ 5527.137408]  [&amp;lt;ffffffffa08e1a64&amp;gt;] ll_getattr+0x34/0x40 [lustre]
[ 5527.143317]  [&amp;lt;ffffffff81125113&amp;gt;] vfs_getattr+0x45/0x63
[ 5527.148535]  [&amp;lt;ffffffff8112517e&amp;gt;] vfs_fstatat+0x4d/0x63
[ 5527.153751]  [&amp;lt;ffffffff811251cf&amp;gt;] vfs_stat+0x1b/0x1d
[ 5527.158709]  [&amp;lt;ffffffff811252ce&amp;gt;] sys_newstat+0x1a/0x33
[ 5527.163927]  [&amp;lt;ffffffff81129f89&amp;gt;] ? path_put+0x1f/0x23
[ 5527.169059]  [&amp;lt;ffffffff8109fa08&amp;gt;] ? audit_syscall_entry+0x145/0x171
[ 5527.175315]  [&amp;lt;ffffffff81009bc2&amp;gt;] system_call_fastpath+0x16/0x1b
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Info required for matching: conf-sanity 23a&lt;/p&gt;
</description>
                <environment></environment>
        <key id="16364">LU-2181</key>
            <summary>failure conf-sanity test_23a: umount -f client hung in stat() when MDS down</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bogl">Bob Glossman</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Mon, 15 Oct 2012 06:40:47 +0000</created>
                <updated>Thu, 13 Apr 2023 22:24:21 +0000</updated>
                            <resolved>Wed, 1 Oct 2014 18:04:45 +0000</resolved>
                                    <version>Lustre 2.3.0</version>
                    <version>Lustre 2.4.0</version>
                                    <fixVersion>Lustre 2.4.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="46578" author="yujian" created="Mon, 15 Oct 2012 11:24:41 +0000"  >&lt;p&gt;Lustre Tag: v2_3_0_RC3&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_3/36&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_3/36&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.3/x86_64(server), FC15/x86_64(client)&lt;br/&gt;
Network: TCP&lt;br/&gt;
ENABLE_QUOTA=yes&lt;/p&gt;

&lt;p&gt;conf-sanity test 34b also hit the same issue:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/b1d732d8-16da-11e2-afe1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/b1d732d8-16da-11e2-afe1-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="46581" author="yujian" created="Mon, 15 Oct 2012 12:16:36 +0000"  >&lt;p&gt;Lustre Tag: v2_3_0_RC3&lt;br/&gt;
Lustre Build: &lt;a href=&quot;http://build.whamcloud.com/job/lustre-b2_3/36&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://build.whamcloud.com/job/lustre-b2_3/36&lt;/a&gt;&lt;br/&gt;
Distro/Arch: RHEL6.3/x86_64(server), FC15/x86_64(client)&lt;br/&gt;
Network: TCP&lt;br/&gt;
ENABLE_QUOTA=yes&lt;/p&gt;

&lt;p&gt;conf-sanity test 45 also hit the same issue:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/c0834482-16e1-11e2-afe1-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/c0834482-16e1-11e2-afe1-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="46584" author="pjones" created="Mon, 15 Oct 2012 12:50:31 +0000"  >&lt;p&gt;Yangsheng is looking into this one&lt;/p&gt;</comment>
                            <comment id="46592" author="ys" created="Mon, 15 Oct 2012 15:00:39 +0000"  >&lt;p&gt;From first investigate, looks like client loop forever in ptlrpc_set_wait(). maybe relate to commit 7223b4746c71bf450c178ed21ddf99a0e3e26a1a. But i am still not understand why it just impact FC15 client. &lt;/p&gt;</comment>
                            <comment id="47349" author="sarah" created="Sat, 3 Nov 2012 00:06:50 +0000"  >&lt;p&gt;Found another failure in SLES11 SP2 client.&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/a9ac1d4a-2539-11e2-9e7c-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/a9ac1d4a-2539-11e2-9e7c-52540035b04c&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;client trace shows:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[20995.079218] umount &#160; &#160; &#160; &#160; &#160;S 0000000000000001 &#160; &#160; 0 &#160;6319 &#160; 5616 0x00000000
[20995.079218] &#160;ffff880062751878 0000000000000082 0000000000000000 ffff880062750010
[20995.079218] &#160;0000000000011180 0000000000011180 ffff880062751fd8 ffff880062751fd8
[20995.079218] &#160;0000000000011180 ffff88006c426340 ffff880000000028 ffff880026852440
[20995.079218] Call Trace:
[20995.079218] &#160;[&amp;lt;ffffffff81441570&amp;gt;] schedule_timeout+0x1b0/0x2a0
[20995.079218] &#160;[&amp;lt;ffffffffa080bfbb&amp;gt;] ptlrpc_set_wait+0x2eb/0x920 [ptlrpc]
[20995.079218] &#160;[&amp;lt;ffffffffa080c666&amp;gt;] ptlrpc_queue_wait+0x76/0x230 [ptlrpc]
[20995.079218] &#160;[&amp;lt;ffffffffa07ef83b&amp;gt;] ldlm_cli_enqueue+0x23b/0x790 [ptlrpc]
[20995.079218] &#160;[&amp;lt;ffffffffa09928c8&amp;gt;] mdc_enqueue+0x2b8/0xcb0 [mdc]
[20995.079218] &#160;[&amp;lt;ffffffffa0993468&amp;gt;] mdc_intent_lock+0x1a8/0xd40 [mdc]
[20995.079218] &#160;[&amp;lt;ffffffffa09497a7&amp;gt;] lmv_intent_lookup+0x3b7/0x11b0 [lmv]
[20995.079218] &#160;[&amp;lt;ffffffffa094a8e2&amp;gt;] lmv_intent_lock+0x342/0x3a0 [lmv]
[20995.079218] &#160;[&amp;lt;ffffffffa0b17ea0&amp;gt;] __ll_inode_revalidate_it+0x550/0xd70 [lustre]
[20995.079218] &#160;[&amp;lt;ffffffffa0b18959&amp;gt;] ll_inode_revalidate_it+0x49/0x1c0 [lustre]
[20995.079218] &#160;[&amp;lt;ffffffffa0b18b0e&amp;gt;] ll_getattr_it+0x3e/0x160 [lustre]
[20995.079218] &#160;[&amp;lt;ffffffffa0b18c5f&amp;gt;] ll_getattr+0x2f/0x40 [lustre]
[20995.079218] &#160;[&amp;lt;ffffffff81154631&amp;gt;] vfs_fstatat+0x81/0x90
[20995.079218] &#160;[&amp;lt;ffffffff8115478f&amp;gt;] sys_newstat+0x1f/0x50
[20995.079218] &#160;[&amp;lt;ffffffff8144ac92&amp;gt;] system_call_fastpath+0x16/0x1b
[20995.079218] &#160;[&amp;lt;00007f60d1ef9335&amp;gt;] 0x7f60d1ef9334
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="48111" author="bogl" created="Tue, 20 Nov 2012 14:03:37 +0000"  >&lt;p&gt;I have seen this repeatedly in sles11 sp2 clients too, so it&apos;s not just FC15 client.&lt;/p&gt;

&lt;p&gt;I&apos;m very suspicious that this may be due to version skew in the umount command.  In el6 which doesn&apos;t fail umount is found in util-linux-ng 2.17.2, in sles11 sp2 it is in util-linux 2.19.1, don&apos;t know what the version is in FC15.&lt;/p&gt;

&lt;p&gt;Running strace on a &apos;umount -f&apos; with mds down or unreachable, in el6 I see the first significant syscall is umount():&lt;/p&gt;

&lt;p&gt;....&lt;br/&gt;
getuid()                                = 0&lt;br/&gt;
geteuid()                               = 0&lt;br/&gt;
readlink(&quot;/mnt&quot;, 0x7fff08de1ad0, 4096)  = -1 EINVAL (Invalid argument)&lt;br/&gt;
readlink(&quot;/mnt/lustre&quot;, 0x7fff08de1ad0, 4096) = -1 EINVAL (Invalid argument)&lt;br/&gt;
umask(077)                              = 022&lt;br/&gt;
open(&quot;/etc/mtab&quot;, O_RDONLY)             = 3&lt;br/&gt;
umask(022)                              = 077&lt;br/&gt;
fstat(3, &lt;/p&gt;
{st_mode=S_IFREG|0644, st_size=480, ...}
&lt;p&gt;) = 0&lt;br/&gt;
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x7f4c55c00000&lt;br/&gt;
read(3, &quot;/dev/mapper/vg_centos1-lv_root /&quot;..., 4096) = 480&lt;br/&gt;
read(3, &quot;&quot;, 4096)                       = 0&lt;br/&gt;
close(3)                                = 0&lt;br/&gt;
munmap(0x7f4c55c00000, 4096)            = 0&lt;br/&gt;
stat(&quot;/sbin/umount.lustre&quot;, 0x7fff08de2900) = -1 ENOENT (No such file or directory)&lt;br/&gt;
rt_sigprocmask(SIG_BLOCK, ~&lt;span class=&quot;error&quot;&gt;&amp;#91;TRAP SEGV RTMIN RT_1&amp;#93;&lt;/span&gt;, NULL, 8) = 0&lt;br/&gt;
umount(&quot;/mnt/lustre&quot;, MNT_FORCE)        = 0&lt;br/&gt;
 ....&lt;/p&gt;

&lt;p&gt;In sles11 sp2 the first significant syscall is stat() on the mount point:&lt;/p&gt;

&lt;p&gt; ....&lt;br/&gt;
getuid()                                = 0&lt;br/&gt;
geteuid()                               = 0&lt;br/&gt;
readlink(&quot;/mnt&quot;, 0x7fff0e6455d0, 4096)  = -1 EINVAL (Invalid argument)&lt;br/&gt;
readlink(&quot;/mnt/lustre&quot;, 0x7fff0e6455d0, 4096) = -1 EINVAL (Invalid argument)&lt;br/&gt;
stat(&quot;/mnt/lustre&quot;,&lt;br/&gt;
   &amp;lt; hangs here &amp;gt;&lt;/p&gt;

&lt;p&gt;It appears to be the stat() call of the mount point that gets into the permanent client loop YS has described and the umount command never gets to the umount() syscall.  In earlier versions of the umount command, as seen in el6 clients, there is no stat() call and the umount() call succeeds even with the mds down.&lt;/p&gt;</comment>
                            <comment id="48121" author="adilger" created="Tue, 20 Nov 2012 14:59:07 +0000"  >&lt;p&gt;Bob, the util-linux sources are at &lt;a href=&quot;https://github.com/karelzak/util-linux&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/karelzak/util-linux&lt;/a&gt; and would be a good place to investigate where/why the &lt;tt&gt;stat()&lt;/tt&gt; call was added to the code before &lt;tt&gt;umount()&lt;/tt&gt; is called.  As I was typing this, Oleg already checked this and found the following patch which fixes the problem:&lt;/p&gt;

&lt;p&gt;   &lt;a href=&quot;https://github.com/karelzak/util-linux/commit/6d5d2b5fd342308bb2db6b82d89ac22ef7374184&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://github.com/karelzak/util-linux/commit/6d5d2b5fd342308bb2db6b82d89ac22ef7374184&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;We need to get this patch into the upstream SLES release if it isn&apos;t already.  Peter will be contacting SuSE about this.  I&apos;ll contact Red Hat about the same.&lt;/p&gt;

&lt;p&gt;In the meantime, can you please add this subtest to the ALWAYS_EXCEPT list for SLES11 SP2.&lt;/p&gt;</comment>
                            <comment id="48123" author="bogl" created="Tue, 20 Nov 2012 15:04:55 +0000"  >&lt;p&gt;Andreas, I don&apos;t think just adding this subtest to ALWAYS_EXCEPT is sufficient.  I know of at least one other test that does umount with mds down, conf-sanity 34b, and there are probably others I don&apos;t know immediately.&lt;/p&gt;</comment>
                            <comment id="48134" author="bogl" created="Tue, 20 Nov 2012 16:04:19 +0000"  >&lt;p&gt;patch to disable the tests I know about for sles11 sp2:&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/4639&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4639&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="48425" author="adilger" created="Tue, 27 Nov 2012 13:33:04 +0000"  >&lt;p&gt;This bug needs to stay open for tracking until SLES11 has the fix to umount to remove the stat() call when -f is given.&lt;/p&gt;</comment>
                            <comment id="49408" author="sarah" created="Tue, 18 Dec 2012 16:59:12 +0000"  >&lt;p&gt;conf-sanity test_45 also hit this error on sles11 sp2 client:&lt;br/&gt;
&lt;a href=&quot;https://maloo.whamcloud.com/test_sets/3ed08c6c-46dc-11e2-b16f-52540035b04c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://maloo.whamcloud.com/test_sets/3ed08c6c-46dc-11e2-b16f-52540035b04c&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="49411" author="bogl" created="Tue, 18 Dec 2012 17:46:19 +0000"  >&lt;p&gt;I suspect Sarah hit this problem due to running tests with SLOW=yes.  By default SLOW=no and test 45 gets skipped. Probably needs to be fixed by adding 45 to the sles11 always skip list at the top of conf-sanity.sh&lt;/p&gt;</comment>
                            <comment id="49551" author="bogl" created="Fri, 21 Dec 2012 11:39:35 +0000"  >&lt;p&gt;added subtest 45 to the sles11 sp2 skip list&lt;br/&gt;
&lt;a href=&quot;http://review.whamcloud.com/4884&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/4884&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="50431" author="jaylan" created="Mon, 14 Jan 2013 14:16:29 +0000"  >&lt;p&gt;SUSE pointed me to this patch:&lt;br/&gt;
&lt;a href=&quot;http://download.novell.com/Download?buildid=G4nSHdRyeOI~&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://download.novell.com/Download?buildid=G4nSHdRyeOI~&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The patch consists of 7 patches:&lt;br/&gt;
libblkid1-2.19.1-6.33.35.1.x86_64.rpm&lt;br/&gt;
libblkid1-32bit-2.19.1-6.33.35.1.x86_64.rpm&lt;br/&gt;
libuuid1-2.19.1-6.33.35.1.x86_64.rpm&lt;br/&gt;
libuuid1-32bit-2.19.1-6.33.35.1.x86_64.rpm&lt;br/&gt;
util-linux-2.19.1-6.33.35.1.x86_64.rpm&lt;br/&gt;
util-linux-lang-2.19.1-6.33.35.1.x86_64.rpm&lt;br/&gt;
uuid-runtime-2.19.1-6.33.35.1.x86_64.rpm&lt;/p&gt;

&lt;p&gt;I am not sure I need all of them, but installed them anyway. With these rpm set installed, the test passed!&lt;/p&gt;</comment>
                            <comment id="50433" author="bogl" created="Mon, 14 Jan 2013 14:28:31 +0000"  >&lt;p&gt;Thanks for the info, Jay.  I suspect the important part of that patch is the util-linux rpm.  When that patch becomes part of a regular sles11 sp2 update and no longer needs to be specially downloaded and applied, we can probably go ahead and take out the test skips added to work around the problem.  We&apos;ve been waiting for that to happen.&lt;/p&gt;
</comment>
                            <comment id="95382" author="adilger" created="Wed, 1 Oct 2014 06:50:55 +0000"  >&lt;p&gt;Bob, any idea if this update has made it into SP2 or SP3 (whatever we are currently testing master on)?  Then we could remove the exception from conf-sanity.sh.&lt;/p&gt;</comment>
                            <comment id="95418" author="bogl" created="Wed, 1 Oct 2014 14:52:34 +0000"  >&lt;p&gt;Andreas, the conf-sanity.sh exception as it is now only skips the test on SP2.  it runs fine on SP3 and always has.  That being the case there is no harm to leaving the exception in there forever.  However it could indeed be removed as we no longer build or test SP2 in master.&lt;/p&gt;</comment>
                            <comment id="95442" author="adilger" created="Wed, 1 Oct 2014 18:04:45 +0000"  >&lt;p&gt;Closing bug, since the test exception is only enforced for SLES11SP2, and will naturally expire. &lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="43298">LU-9042</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvae7:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>5219</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>