<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 03:09:47 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-14443] review-dne-ssk test session failed: Error checking ski of cli2mdt</title>
                <link>https://jira.whamcloud.com/browse/LU-14443</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;This issue was created by maloo for jianyu &amp;lt;yujian@whamcloud.com&amp;gt;&lt;/p&gt;

&lt;p&gt;This issue relates to the following test suite run: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/86c32e36-1722-48ae-a3e8-efee318e647c&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/86c32e36-1722-48ae-a3e8-efee318e647c&lt;/a&gt;&lt;/p&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;checking cli2mdt...found 0/8 ski connections
checking cli2mdt...found 0/8 ski connections
checking cli2mdt...found 0/8 ski connections
Error checking ski of cli2mdt: expect 8, actual 0
CMD: trevis-211vm7,trevis-211vm8,trevis-211vm9 keyctl show
Session Keyring
 949457656 --alswrv      0     0  keyring: _ses
 739709307 ----s-rv      0     0   \_ user: invocation_id
Session Keyring
 751217567 --alswrv      0     0  keyring: _ses
 890051499 ----s-rv      0     0   \_ user: invocation_id
Session Keyring
 234519632 --alswrv      0     0  keyring: _ses
 157615560 ----s-rv      0     0   \_ user: invocation_id
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="62930">LU-14443</key>
            <summary>review-dne-ssk test session failed: Error checking ski of cli2mdt</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="6">Not a Bug</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="maloo">Maloo</reporter>
                        <labels>
                    </labels>
                <created>Thu, 18 Feb 2021 02:44:09 +0000</created>
                <updated>Thu, 25 Feb 2021 16:17:31 +0000</updated>
                            <resolved>Thu, 25 Feb 2021 15:02:33 +0000</resolved>
                                    <version>Lustre 2.14.0</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>10</watches>
                                                                            <comments>
                            <comment id="292250" author="yujian" created="Thu, 18 Feb 2021 02:50:32 +0000"  >&lt;p&gt;The failure occurred consistently today. It&apos;s blocking the patch testing on master branch.&lt;/p&gt;</comment>
                            <comment id="292251" author="yujian" created="Thu, 18 Feb 2021 02:56:26 +0000"  >&lt;p&gt;The review-dne-ssk test session passed on 2021-02-13. There are no Maloo reports between 2021-02-13 and 2021-02-17.&lt;br/&gt;
On master branch, only the following one commit landed since 2021-02-13:&lt;/p&gt;
&lt;ul class=&quot;alternate&quot; type=&quot;square&quot;&gt;
	&lt;li&gt;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14424&quot; title=&quot;write performance regression in Lustre-2.14.0-RC1&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14424&quot;&gt;&lt;del&gt;LU-14424&lt;/del&gt;&lt;/a&gt; Revert &quot;&lt;a href=&quot;https://jira.whamcloud.com/browse/LU-9679&quot; title=&quot;Prepare lustre for adoption into the linux kernel&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-9679&quot;&gt;&lt;del&gt;LU-9679&lt;/del&gt;&lt;/a&gt; osc: simplify osc_extent_find()&quot; (details / gitweb)&lt;/li&gt;
&lt;/ul&gt;
</comment>
                            <comment id="292256" author="adilger" created="Thu, 18 Feb 2021 06:08:34 +0000"  >&lt;p&gt;Is it possible that the clocks are out of sync in the test cluster since the reboot?&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 25556:0:(gss_keyring.c:1445:gss_kt_update()) negotiation: rpc err 0, gss err d0000
LustreError: 25556:0:(gss_keyring.c:1445:gss_kt_update()) Skipped 520 previous similar messages
Lustre: 25556:0:(sec_gss.c:315:cli_ctx_expire()) ctx 000000002d43236a(0-&amp;gt;lustre-OST0004_UUID) get expired: 1613618660(+37s)
Lustre: 25556:0:(sec_gss.c:315:cli_ctx_expire()) Skipped 520 previous similar messages
Lustre: 7722:0:(sec_gss.c:1228:gss_cli_ctx_fini_common()) gss.keyring@00000000e20fbcfe: destroy ctx 000000002d43236a(0-&amp;gt;lustre-OST0004_UUID)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="292257" author="adilger" created="Thu, 18 Feb 2021 06:11:37 +0000"  >&lt;p&gt;And later in the client logs:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 46837:0:(file.c:4747:ll_inode_revalidate_fini()) lustre: revalidate FID [0x200000007:0x1:0x0] error: rc = -108
LustreError: 46837:0:(file.c:4747:ll_inode_revalidate_fini()) Skipped 3 previous similar messages
LustreError: 46851:0:(gss_keyring.c:864:gss_sec_lookup_ctx_kr()) failed request key: -126
LustreError: 46851:0:(gss_keyring.c:864:gss_sec_lookup_ctx_kr()) Skipped 7 previous similar messages
LustreError: 46851:0:(sec.c:452:sptlrpc_req_get_ctx()) req 000000009c749cf6: fail to get context
LustreError: 46851:0:(sec.c:452:sptlrpc_req_get_ctx()) 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="292389" author="gerrit" created="Fri, 19 Feb 2021 10:49:00 +0000"  >&lt;p&gt;Sebastien Buisson (sbuisson@ddn.com) uploaded a new patch: &lt;a href=&quot;https://review.whamcloud.com/41695&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41695&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-14443&quot; title=&quot;review-dne-ssk test session failed: Error checking ski of cli2mdt&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-14443&quot;&gt;&lt;del&gt;LU-14443&lt;/del&gt;&lt;/a&gt; test: run review-dne-ssk&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 0513a57039e62636dbf9ef29dc15e1d0f33ea294&lt;/p&gt;</comment>
                            <comment id="292390" author="sebastien" created="Fri, 19 Feb 2021 10:52:36 +0000"  >&lt;p&gt;I have pushed a test patch to see if the failures are due to commit&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;b592f75446 LU-14424 Revert &quot;LU-9679 osc: simplify osc_extent_find()&quot;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;But it is quite unlikely. Failures seem to be environmental and have started occurring after the lab power failure. But I checked on trevis, I cannot see out of sync clocks.&lt;/p&gt;</comment>
                            <comment id="292471" author="jamesanunez" created="Fri, 19 Feb 2021 18:35:58 +0000"  >&lt;p&gt;I&#8217;ve run review-dne-ssk issues for patch &lt;a href=&quot;https://review.whamcloud.com/#/c/40884/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/#/c/40884/&lt;/a&gt; over the past few months are part of validating Lustre on RHEL8.3. The testing shows/implies&lt;/p&gt;

&lt;p&gt;1. The review-dne-ssk issue is not cuased by the Linux distro because the tests run on Feb 4 didn&#8217;t hit this issue, but the tests from Feb 19 do hit this issue and they were both using same kernel version of RHEL8.3.&lt;br/&gt;
Yet, there is a lustre-mr test run that passed review-dne-ssk recently. The MR patch is base on 2.14.0-RC3, but runs RHEL7.8; &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/686d5e5a-5798-4403-a6e3-c8445ee8b177&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/686d5e5a-5798-4403-a6e3-c8445ee8b177&lt;/a&gt;. &lt;/p&gt;

&lt;p&gt;2. The review-dne-ssk issue is not a Lustre issue because the 40884 patch uses the same parent/version of Lustre from January 27; I did not rebase between Feb 4 and Feb 19. As noted, review-dne-ssk passed on Feb 4 and failed on Feb 19.&lt;/p&gt;

&lt;p&gt;That&apos;s the data I have and my, possibly faulty, thoughts on this issue.&lt;/p&gt;</comment>
                            <comment id="292482" author="adilger" created="Fri, 19 Feb 2021 21:06:21 +0000"  >&lt;p&gt;The cause of the failure seems fairly clear in the following Maloo search:&lt;br/&gt;
&lt;a href=&quot;https://testing.whamcloud.com/test_sessions?test_groups%5B%5D=review-dne-ssk&amp;amp;start_date=2021-02-12&amp;amp;end_date=2021-02-17&amp;amp;source=test_sessions#redirect&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions?test_groups%5B%5D=review-dne-ssk&amp;amp;start_date=2021-02-12&amp;amp;end_date=2021-02-17&amp;amp;source=test_sessions#redirect&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The test has never passed on RHEL8.3 since 2021-02-17, but passed consistently with RHEL7.8 until that date. &lt;/p&gt;</comment>
                            <comment id="292486" author="adilger" created="Fri, 19 Feb 2021 21:24:08 +0000"  >&lt;p&gt;It looks like there were some el8.3 passes until 2021-02-04, but none since then:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://testing.whamcloud.com/test_sessions?client_distribution_type_id=309da983-628c-4d7a-bd36-5aeee0b55610&amp;amp;test_groups%5B%5D=review-dne-ssk&amp;amp;start_date=2020-12-01&amp;amp;end_date=2021-02-17&amp;amp;source=test_sessions#redirect&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions?client_distribution_type_id=309da983-628c-4d7a-bd36-5aeee0b55610&amp;amp;test_groups%5B%5D=review-dne-ssk&amp;amp;start_date=2020-12-01&amp;amp;end_date=2021-02-17&amp;amp;source=test_sessions#redirect&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="292527" author="jamesanunez" created="Sat, 20 Feb 2021 14:54:25 +0000"  >&lt;p&gt;Update: &lt;br/&gt;
review-dne-ssk with RHEL8.3 server with RHEL7.8 and 7.9 clients both failed; &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/f1fa92d8-bec3-427f-8662-9b37d3880c81&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/f1fa92d8-bec3-427f-8662-9b37d3880c81&lt;/a&gt; and &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/19386d1f-9133-46c8-b47f-a3500c99b168&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/19386d1f-9133-46c8-b47f-a3500c99b168&lt;/a&gt; .&lt;/p&gt;

&lt;p&gt;RHEL7.8 server/client and RHEL7.9 server/client test sessions both passed with pre-RC Lustre code; &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/985a61e8-d552-4a69-a9df-76a04a677d4f&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/985a61e8-d552-4a69-a9df-76a04a677d4f&lt;/a&gt; and &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/3f446025-55f9-4e7b-95c4-e6de9b772905&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/3f446025-55f9-4e7b-95c4-e6de9b772905&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;I haven&apos;t tested 2.14.0 GA, but will submit those tests.&lt;/p&gt;</comment>
                            <comment id="292571" author="adilger" created="Mon, 22 Feb 2021 06:43:56 +0000"  >&lt;p&gt;From James:&lt;/p&gt;
&lt;blockquote&gt;
&lt;p&gt;RHEL8.3 clients with 7.8 adn 7.9 servers performed better the 8.3 clients and 8.3 servers, but all test suites still fail. With 8.3/8.3, all test suites fail all tests (sanity, recovery-small, sanity-sec) in fact, the test suites don&apos;t even run any of the individual tests. All 8.3/7.8 or 7.9 test suites fail, but all test suites run some individual tests and some of those tests pass. I haven&apos;t looked at these results yet, but see &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/c5ee2ab6-8bf2-4a68-a851-745a371beb55&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/c5ee2ab6-8bf2-4a68-a851-745a371beb55&lt;/a&gt; and &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/f36771b6-2e6f-4bdd-a8e2-1d9bb5a0dfdb&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/f36771b6-2e6f-4bdd-a8e2-1d9bb5a0dfdb&lt;/a&gt;&lt;/p&gt;&lt;/blockquote&gt;</comment>
                            <comment id="292574" author="sebastien" created="Mon, 22 Feb 2021 07:29:21 +0000"  >&lt;p&gt;Thank you guys, this is helpful.&lt;/p&gt;

&lt;p&gt;It is like something on the el8 distro used on the test nodes has been &quot;activated&quot; only after the power outage. I still do not know what it is, as at least the kernel version before and after is exactly the same.&lt;/p&gt;

&lt;p&gt;Thanks to the &lt;tt&gt;livedebug&lt;/tt&gt; test parameter I used for patch &lt;a href=&quot;https://review.whamcloud.com/41695&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://review.whamcloud.com/41695&lt;/a&gt;, I managed to have nodes allocated on Trevis for me, so I am going to try to reproduce this issue. Looking at the logs you pointed to, the first concerning message for me is the inability for the client to use the SSK key:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LustreError: 46851:0:(gss_keyring.c:864:gss_sec_lookup_ctx_kr()) failed request key: -126
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;given that&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;#define ENOKEY          126     /* Required key not available */
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="292628" author="sebastien" created="Mon, 22 Feb 2021 16:45:01 +0000"  >&lt;p&gt;As a quick update, I tried to reproduce manually with nodes allocated on trevis (trevis-209vm&lt;span class=&quot;error&quot;&gt;&amp;#91;1-5&amp;#93;&lt;/span&gt;), but I did not manage to so far. I was able to properly setup a Lustre file system with &lt;tt&gt;SHARED_KEY&lt;/tt&gt; enabled, by using &lt;tt&gt;llmount.sh&lt;/tt&gt; with a cfg file that I have been using regularly on this cluster.&lt;/p&gt;

&lt;p&gt;Is there a way to get the cfg file used by Maloo in the &lt;tt&gt;lustre-initialization&lt;/tt&gt; phase of review-dne-ssk test group?&lt;/p&gt;

&lt;p&gt;It is also interesting to note that although review-dne-ssk has been failing consistently on RHEL8.3 since the power outage in the lab, review-dne-selinux-ssk is passing without any issue. And as far as I know, the only difference between those 2 test groups is that SELinux is enforced in addition to SSK.&lt;/p&gt;</comment>
                            <comment id="292636" author="jamesanunez" created="Mon, 22 Feb 2021 17:33:06 +0000"  >&lt;p&gt;&amp;gt; Is there a way to get the cfg file used by Maloo in the lustre-initialization phase of review-dne-ssk test group?&lt;/p&gt;

&lt;p&gt;I was talking to Charlie about this and ... Yes, you can view the configuration. The configuration can change based on what test group is running. So, it&apos;s best to look a lustre-initialization test session results in the lustre-initialization-1.autotest log. The environment parameters are displayed in that log starting with the line&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;cat /root/autotest_config.sh
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;For example, lookin at the lustre-initialization autotest log for a review-dne-ssk test session that failed &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/816df1e1-ea66-4ef9-b388-7e6b41ab67fc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/816df1e1-ea66-4ef9-b388-7e6b41ab67fc&lt;/a&gt;, we see&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2021-02-19T15:00:45 cat /root/autotest_config.sh
2021-02-19T15:00:45 #!/bin/bash
2021-02-19T15:00:45 #Auto Generated By Whamcloud Autotest
2021-02-19T15:00:45 #Key Exports
2021-02-19T15:00:45 export mgs_HOST=onyx-44vm4
2021-02-19T15:00:45 export mds_HOST=onyx-44vm4
2021-02-19T15:00:45 export MGSDEV=/dev/lvm-Role_MDS/P1
2021-02-19T15:00:45 export MDSDEV=/dev/lvm-Role_MDS/P1
2021-02-19T15:00:45 export mds1_HOST=onyx-44vm4
2021-02-19T15:00:45 export MDSDEV1=/dev/lvm-Role_MDS/P1
2021-02-19T15:00:45 export mds2_HOST=onyx-44vm5
2021-02-19T15:00:45 export MDSDEV2=/dev/lvm-Role_MDS/P2
2021-02-19T15:00:45 export mds3_HOST=onyx-44vm4
2021-02-19T15:00:45 export MDSDEV3=/dev/lvm-Role_MDS/P3
2021-02-19T15:00:45 export mds4_HOST=onyx-44vm5
2021-02-19T15:00:45 export MDSDEV4=/dev/lvm-Role_MDS/P4
2021-02-19T15:00:45 export MDSCOUNT=4
2021-02-19T15:00:45 export MDSSIZE=2097152
2021-02-19T15:00:45 export MGSSIZE=2097152
2021-02-19T15:00:45 export MDSFSTYPE=ldiskfs
2021-02-19T15:00:45 export MGSFSTYPE=ldiskfs
2021-02-19T15:00:45 export ost_HOST=onyx-44vm3
2021-02-19T15:00:45 export ost1_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV1=/dev/lvm-Role_OSS/P1
2021-02-19T15:00:45 export ost2_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV2=/dev/lvm-Role_OSS/P2
2021-02-19T15:00:45 export ost3_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV3=/dev/lvm-Role_OSS/P3
2021-02-19T15:00:45 export ost4_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV4=/dev/lvm-Role_OSS/P4
2021-02-19T15:00:45 export ost5_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV5=/dev/lvm-Role_OSS/P5
2021-02-19T15:00:45 export ost6_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV6=/dev/lvm-Role_OSS/P6
2021-02-19T15:00:45 export ost7_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV7=/dev/lvm-Role_OSS/P7
2021-02-19T15:00:45 export ost8_HOST=onyx-44vm3
2021-02-19T15:00:45 export OSTDEV8=/dev/lvm-Role_OSS/P8
2021-02-19T15:00:45 # some setup for conf-sanity test 24a, 24b, 33a
2021-02-19T15:00:45 export fs2mds_DEV=/dev/lvm-Role_MDS/S1
2021-02-19T15:00:45 export fs2ost_DEV=/dev/lvm-Role_OSS/S1
2021-02-19T15:00:45 export fs3ost_DEV=/dev/lvm-Role_OSS/S2
2021-02-19T15:00:45 export RCLIENTS=&quot;onyx-44vm2&quot;
2021-02-19T15:00:45 export OSTCOUNT=8
2021-02-19T15:00:45 export NETTYPE=tcp
2021-02-19T15:00:45 export OSTSIZE=10051911
2021-02-19T15:00:45 export OSTFSTYPE=ldiskfs
2021-02-19T15:00:45 export FSTYPE=ldiskfs
2021-02-19T15:00:45 export LOGDIR=/autotest/autotest-1/2021-02-19/lustre-reviews_review-dne-ssk_79237_1_103_816df1e1-ea66-4ef9-b388-7e6b41ab67fc
2021-02-19T15:00:45 export SHARED_DIRECTORY=/autotest/autotest-1/2021-02-19/lustre-reviews_review-dne-ssk_79237_1_103_816df1e1-ea66-4ef9-b388-7e6b41ab67fc/shared_dir
2021-02-19T15:00:45 export SHARED_KEY=true
2021-02-19T15:00:45 export PDSH=&quot;pdsh -t 120 -S -Rmrsh -w&quot;
2021-02-19T15:00:45 # Adding contents of /opt/autotest/releases/02_08_2021/external/mecturk/mecturk-ncli.sh
2021-02-19T15:00:45 # Entries above here are created by configure_cluster.rb
2021-02-19T15:00:45 # Entries below here come from mecturk-ncli.sh
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # This config file should only contain entries for non-default
2021-02-19T15:00:45 # values that override settings in ncli.sh or local.sh.
2021-02-19T15:00:45 
2021-02-19T15:00:45 VERBOSE=true
2021-02-19T15:00:45 
2021-02-19T15:00:45 # override local.sh as it does not point to the powerman host
2021-02-19T15:00:45 POWER_DOWN=${POWER_DOWN:-&quot;powerman -h powerman --off&quot;}
2021-02-19T15:00:45 POWER_UP=${POWER_UP:-&quot;powerman -h powerman --on&quot;}
2021-02-19T15:00:45 
2021-02-19T15:00:45 # non-standard ports for liblustre TCP connections
2021-02-19T15:00:45 export LNET_ACCEPT_PORT=7988
2021-02-19T15:00:45 export ACCEPTOR_PORT=7988
2021-02-19T15:00:45 
2021-02-19T15:00:45 # Check for wide striping.  This was added to local.sh for 2.6+
2021-02-19T15:00:45 [ $OSTCOUNT -gt 160 -a $MDSFSTYPE = &quot;ldiskfs&quot; ] &amp;amp;&amp;amp;
2021-02-19T15:00:45 	MDSOPT=$MDSOPT&quot; --mkfsoptions=&apos;-O large_xattr -J size=4096&apos;&quot;
2021-02-19T15:00:45 
2021-02-19T15:00:45 # TT-430
2021-02-19T15:00:45 SERVER_FAILOVER_PERIOD=$((60 * 20))
2021-02-19T15:00:45 
2021-02-19T15:00:45 export RSYNC_RSH=rsh
2021-02-19T15:00:45 
2021-02-19T15:00:45 cbench_DIR=/usr/bin
2021-02-19T15:00:45 cnt_DIR=/opt/connectathon
2021-02-19T15:00:45 
2021-02-19T15:00:45 # Set-up shell environment for openmpi
2021-02-19T15:00:45 [ -r /etc/profile.d/openmpi.sh ] &amp;amp;&amp;amp; . /etc/profile.d/openmpi.sh
2021-02-19T15:00:45 MPIRUN_OPTIONS=&quot;-mca boot ssh&quot;
2021-02-19T15:00:45 [ &quot;${NETTYPE}&quot; = &apos;tcp&apos; ] &amp;amp;&amp;amp;
2021-02-19T15:00:45     MPIRUN_OPTIONS=&quot;--mca btl tcp,self --mca btl_tcp_if_include eth0 -mca boot ssh&quot;
2021-02-19T15:00:45 
2021-02-19T15:00:45 # the ncli.sh config script includes local.sh in turn.
2021-02-19T15:00:45 . $LUSTRE/tests/cfg/ncli.sh
2021-02-19T15:00:45 (./run_test.sh:53): main
2021-02-19T15:00:45 echo &apos;**************************************************************************************************************&apos;
2021-02-19T15:00:45 **************************************************************************************************************
2021-02-19T15:00:45 (./run_test.sh:54): main
2021-02-19T15:00:45 echo ncli.sh
2021-02-19T15:00:45 ncli.sh
2021-02-19T15:00:45 (./run_test.sh:55): main
2021-02-19T15:00:45 echo &apos;**************************************************************************************************************&apos;
2021-02-19T15:00:45 **************************************************************************************************************
2021-02-19T15:00:45 (./run_test.sh:56): main
2021-02-19T15:00:45 cat /usr/lib64/lustre/tests/cfg/ncli.sh
2021-02-19T15:00:45 . $LUSTRE/tests/cfg/local.sh
2021-02-19T15:00:45 
2021-02-19T15:00:45 # For multiple clients testing, we need use the cfg/ncli.sh config file, and
2021-02-19T15:00:45 # only need specify the &quot;RCLIENTS&quot; variable. The &quot;CLIENTS&quot; and &quot;CLIENTCOUNT&quot;
2021-02-19T15:00:45 # variables are defined in init_clients_lists(), called from cfg/ncli.sh.
2021-02-19T15:00:45 CLIENT1=${CLIENT1:-$(hostname)}
2021-02-19T15:00:45 SINGLECLIENT=$CLIENT1
2021-02-19T15:00:45 RCLIENTS=${RCLIENTS:-&quot;&quot;}
2021-02-19T15:00:45 
2021-02-19T15:00:45 init_clients_lists
2021-02-19T15:00:45 
2021-02-19T15:00:45 [ -n &quot;$RCLIENTS&quot; -a &quot;$PDSH&quot; = &quot;no_dsh&quot; ] &amp;amp;&amp;amp;
2021-02-19T15:00:45 	error &quot;tests for remote clients $RCLIENTS needs pdsh != do_dsh &quot; || true
2021-02-19T15:00:45 
2021-02-19T15:00:45 [ -n &quot;$FUNCTIONS&quot; ] &amp;amp;&amp;amp; . $FUNCTIONS || true
2021-02-19T15:00:45 
2021-02-19T15:00:45 # for recovery scale tests
2021-02-19T15:00:45 # default boulder cluster iozone location
2021-02-19T15:00:45 export PATH=/opt/iozone/bin:$PATH
2021-02-19T15:00:45 
2021-02-19T15:00:45 LOADS=${LOADS:-&quot;dd tar dbench iozone&quot;}
2021-02-19T15:00:45 for i in $LOADS; do
2021-02-19T15:00:45 	[ -f $LUSTRE/tests/run_${i}.sh ] || error &quot;incorrect load: $i&quot;
2021-02-19T15:00:45 done
2021-02-19T15:00:45 CLIENT_LOADS=($LOADS)
2021-02-19T15:00:45 
2021-02-19T15:00:45 # This is used when testing on SLURM environment.
2021-02-19T15:00:45 # Test will use srun when SRUN_PARTITION is set
2021-02-19T15:00:45 SRUN=${SRUN:-$(which srun 2&amp;gt;/dev/null || true)}
2021-02-19T15:00:45 SRUN_PARTITION=${SRUN_PARTITION:-&quot;&quot;}
2021-02-19T15:00:45 SRUN_OPTIONS=${SRUN_OPTIONS:-&quot;-W 1800 -l -O&quot;}
2021-02-19T15:00:45 (./run_test.sh:57): main
2021-02-19T15:00:45 echo &apos;**************************************************************************************************************&apos;
2021-02-19T15:00:45 **************************************************************************************************************
2021-02-19T15:00:45 (./run_test.sh:58): main
2021-02-19T15:00:45 echo local.sh
2021-02-19T15:00:45 local.sh
2021-02-19T15:00:45 (./run_test.sh:59): main
2021-02-19T15:00:45 echo &apos;**************************************************************************************************************&apos;
2021-02-19T15:00:45 **************************************************************************************************************
2021-02-19T15:00:45 (./run_test.sh:60): main
2021-02-19T15:00:45 cat /usr/lib64/lustre/tests/cfg/local.sh
2021-02-19T15:00:45 FSNAME=${FSNAME:-lustre}
2021-02-19T15:00:45 
2021-02-19T15:00:45 # facet hosts
2021-02-19T15:00:45 mds_HOST=${mds_HOST:-$(hostname)}
2021-02-19T15:00:45 mdsfailover_HOST=${mdsfailover_HOST}
2021-02-19T15:00:45 mgs_HOST=${mgs_HOST:-$mds_HOST}
2021-02-19T15:00:45 ost_HOST=${ost_HOST:-$(hostname)}
2021-02-19T15:00:45 ostfailover_HOST=${ostfailover_HOST}
2021-02-19T15:00:45 CLIENTS=&quot;&quot;
2021-02-19T15:00:45 # FILESET variable is used by sanity.sh to verify fileset
2021-02-19T15:00:45 # feature, tests should pass even under subdirectory namespace.
2021-02-19T15:00:45 FILESET=${FILESET:-&quot;&quot;}
2021-02-19T15:00:45 [[ -z &quot;$FILESET&quot; ]] || [[ &quot;${FILESET:0:1}&quot; = &quot;/&quot; ]] || FILESET=&quot;/$FILESET&quot;
2021-02-19T15:00:45 
2021-02-19T15:00:45 TMP=${TMP:-/tmp}
2021-02-19T15:00:45 
2021-02-19T15:00:45 DAEMONSIZE=${DAEMONSIZE:-500}
2021-02-19T15:00:45 MDSCOUNT=${MDSCOUNT:-1}
2021-02-19T15:00:45 MDSDEVBASE=${MDSDEVBASE:-$TMP/${FSNAME}-mdt}
2021-02-19T15:00:45 MDSSIZE=${MDSSIZE:-250000}
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # Format options of facets can be specified with these variables:
2021-02-19T15:00:45 #
2021-02-19T15:00:45 #   - &amp;lt;facet_type&amp;gt;OPT
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # Arguments for &quot;--mkfsoptions&quot; shall be specified with these
2021-02-19T15:00:45 # variables:
2021-02-19T15:00:45 #
2021-02-19T15:00:45 #   - &amp;lt;fstype&amp;gt;_MKFS_OPTS
2021-02-19T15:00:45 #   - &amp;lt;facet_type&amp;gt;_FS_MKFS_OPTS
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # A number of other options have their own specific variables.  See
2021-02-19T15:00:45 # mkfs_opts().
2021-02-19T15:00:45 #
2021-02-19T15:00:45 MDSOPT=${MDSOPT:-}
2021-02-19T15:00:45 MDS_FS_MKFS_OPTS=${MDS_FS_MKFS_OPTS:-}
2021-02-19T15:00:45 MDS_MOUNT_OPTS=${MDS_MOUNT_OPTS:-}
2021-02-19T15:00:45 # &amp;lt;facet_type&amp;gt;_MOUNT_FS_OPTS is the mount options specified when formatting
2021-02-19T15:00:45 # the underlying device by argument &quot;--mountfsoptions&quot;
2021-02-19T15:00:45 MDS_MOUNT_FS_OPTS=${MDS_MOUNT_FS_OPTS:-}
2021-02-19T15:00:45 
2021-02-19T15:00:45 MGSSIZE=${MGSSIZE:-$MDSSIZE}
2021-02-19T15:00:45 MGSOPT=${MGSOPT:-}
2021-02-19T15:00:45 MGS_FS_MKFS_OPTS=${MGS_FS_MKFS_OPTS:-}
2021-02-19T15:00:45 MGS_MOUNT_OPTS=${MGS_MOUNT_OPTS:-}
2021-02-19T15:00:45 MGS_MOUNT_FS_OPTS=${MGS_MOUNT_FS_OPTS:-}
2021-02-19T15:00:45 
2021-02-19T15:00:45 OSTCOUNT=${OSTCOUNT:-2}
2021-02-19T15:00:45 OSTDEVBASE=${OSTDEVBASE:-$TMP/${FSNAME}-ost}
2021-02-19T15:00:45 OSTSIZE=${OSTSIZE:-400000}
2021-02-19T15:00:45 OSTOPT=${OSTOPT:-}
2021-02-19T15:00:45 OST_FS_MKFS_OPTS=${OST_FS_MKFS_OPTS:-}
2021-02-19T15:00:45 OST_MOUNT_OPTS=${OST_MOUNT_OPTS:-}
2021-02-19T15:00:45 OST_MOUNT_FS_OPTS=${OST_MOUNT_FS_OPTS:-}
2021-02-19T15:00:45 OST_INDEX_LIST=${OST_INDEX_LIST:-}
2021-02-19T15:00:45 # Can specify individual ost devs with
2021-02-19T15:00:45 # OSTDEV1=&quot;/dev/sda&quot;
2021-02-19T15:00:45 # on specific hosts with
2021-02-19T15:00:45 # ost1_HOST=&quot;uml2&quot;
2021-02-19T15:00:45 # ost1_JRN=&quot;/dev/sdb1&quot;
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # For ZFS, ost devices can be specified via either or both of the following:
2021-02-19T15:00:45 # OSTZFSDEV1=&quot;${FSNAME}-ost1/ost1&quot;
2021-02-19T15:00:45 # OSTDEV1=&quot;/dev/sdb1&quot;
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # OST indices can be specified as follows:
2021-02-19T15:00:45 # OSTINDEX1=&quot;1&quot;
2021-02-19T15:00:45 # OSTINDEX2=&quot;2&quot;
2021-02-19T15:00:45 # OSTINDEX3=&quot;4&quot;
2021-02-19T15:00:45 # ......
2021-02-19T15:00:45 # or
2021-02-19T15:00:45 # OST_INDEX_LIST=&quot;[1,2,4-6,8]&quot;	# [n-m,l-k,...], where n &amp;lt; m and l &amp;lt; k, etc.
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # The default index value of an individual OST is its facet number minus 1.
2021-02-19T15:00:45 # More specific ones override more general ones. See facet_index().
2021-02-19T15:00:45 
2021-02-19T15:00:45 NETTYPE=${NETTYPE:-tcp}
2021-02-19T15:00:45 MGSNID=${MGSNID:-$(h2nettype $mgs_HOST)}
2021-02-19T15:00:45 
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # Back end file system type(s) of facets can be specified with these
2021-02-19T15:00:45 # variables:
2021-02-19T15:00:45 #
2021-02-19T15:00:45 #   1. &amp;lt;facet&amp;gt;_FSTYPE
2021-02-19T15:00:45 #   2. &amp;lt;facet_type&amp;gt;FSTYPE
2021-02-19T15:00:45 #   3. FSTYPE
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # More specific ones override more general ones.  See facet_fstype().
2021-02-19T15:00:45 #
2021-02-19T15:00:45 FSTYPE=${FSTYPE:-ldiskfs}
2021-02-19T15:00:45 
2021-02-19T15:00:45 LDISKFS_MKFS_OPTS=${LDISKFS_MKFS_OPTS:-}
2021-02-19T15:00:45 ZFS_MKFS_OPTS=${ZFS_MKFS_OPTS:-}
2021-02-19T15:00:45 
2021-02-19T15:00:45 LOAD_MODULES_REMOTE=${LOAD_MODULES_REMOTE:-false}
2021-02-19T15:00:45 
2021-02-19T15:00:45 DEF_STRIPE_SIZE=${DEF_STRIPE_SIZE:-}   # filesystem default stripe size in bytes
2021-02-19T15:00:45 DEF_STRIPE_COUNT=${DEF_STRIPE_COUNT:-} # filesystem default stripe count
2021-02-19T15:00:45 TIMEOUT=${TIMEOUT:-20}
2021-02-19T15:00:45 PTLDEBUG=${PTLDEBUG:-&quot;vfstrace rpctrace dlmtrace neterror ha config \
2021-02-19T15:00:45 		      ioctl super lfsck&quot;}
2021-02-19T15:00:45 SUBSYSTEM=${SUBSYSTEM:-&quot;all&quot;}
2021-02-19T15:00:45 
2021-02-19T15:00:45 # promise 2MB for every cpu
2021-02-19T15:00:45 if [ -f /sys/devices/system/cpu/possible ]; then
2021-02-19T15:00:45     _debug_mb=$((($(cut -d &quot;-&quot; -f 2 /sys/devices/system/cpu/possible)+1)*2))
2021-02-19T15:00:45 else
2021-02-19T15:00:45     _debug_mb=$(($(getconf _NPROCESSORS_CONF)*2))
2021-02-19T15:00:45 fi
2021-02-19T15:00:45 
2021-02-19T15:00:45 DEBUG_SIZE=${DEBUG_SIZE:-$_debug_mb}
2021-02-19T15:00:45 
2021-02-19T15:00:45 ENABLE_QUOTA=${ENABLE_QUOTA:-&quot;&quot;}
2021-02-19T15:00:45 QUOTA_TYPE=${QUOTA_TYPE:-&quot;ug3&quot;}
2021-02-19T15:00:45 QUOTA_USERS=${QUOTA_USERS:-&quot;quota_usr quota_2usr sanityusr sanityusr1&quot;}
2021-02-19T15:00:45 # &quot;error: conf_param: No such device&quot; issue in every test suite logs
2021-02-19T15:00:45 # sanity-quota test_32 hash_lqs_cur_bits is not set properly
2021-02-19T15:00:45 LQUOTAOPTS=${LQUOTAOPTS:-&quot;hash_lqs_cur_bits=3&quot;}
2021-02-19T15:00:45 
2021-02-19T15:00:45 #client
2021-02-19T15:00:45 MOUNT=${MOUNT:-/mnt/${FSNAME}}
2021-02-19T15:00:45 MOUNT1=${MOUNT1:-$MOUNT}
2021-02-19T15:00:45 MOUNT2=${MOUNT2:-${MOUNT}2}
2021-02-19T15:00:45 MOUNT3=${MOUNT3:-${MOUNT}3}
2021-02-19T15:00:45 # Comma-separated option list used as &quot;mount [...] -o $MOUNT_OPTS [...]&quot;
2021-02-19T15:00:45 MOUNT_OPTS=${MOUNT_OPTS:-&quot;user_xattr,flock&quot;}
2021-02-19T15:00:45 # Mount flags (e.g. &quot;-n&quot;) used as &quot;mount [...] $MOUNT_FLAGS [...]&quot;
2021-02-19T15:00:45 MOUNT_FLAGS=${MOUNT_FLAGS:-&quot;&quot;}
2021-02-19T15:00:45 DIR=${DIR:-$MOUNT}
2021-02-19T15:00:45 DIR1=${DIR:-$MOUNT1}
2021-02-19T15:00:45 DIR2=${DIR2:-$MOUNT2}
2021-02-19T15:00:45 DIR3=${DIR3:-$MOUNT3}
2021-02-19T15:00:45 
2021-02-19T15:00:45 if [ $UID -ne 0 ]; then
2021-02-19T15:00:45         log &quot;running as non-root uid $UID&quot;
2021-02-19T15:00:45         RUNAS_ID=&quot;$UID&quot;
2021-02-19T15:00:45         RUNAS_GID=`id -g $USER`
2021-02-19T15:00:45         RUNAS=&quot;&quot;
2021-02-19T15:00:45 else
2021-02-19T15:00:45         RUNAS_ID=${RUNAS_ID:-500}
2021-02-19T15:00:45         RUNAS_GID=${RUNAS_GID:-$RUNAS_ID}
2021-02-19T15:00:45         RUNAS=${RUNAS:-&quot;runas -u $RUNAS_ID -g $RUNAS_GID&quot;}
2021-02-19T15:00:45 fi
2021-02-19T15:00:45 
2021-02-19T15:00:45 PDSH=${PDSH:-no_dsh}
2021-02-19T15:00:45 FAILURE_MODE=${FAILURE_MODE:-SOFT} # or HARD
2021-02-19T15:00:45 POWER_DOWN=${POWER_DOWN:-&quot;powerman --off&quot;}
2021-02-19T15:00:45 POWER_UP=${POWER_UP:-&quot;powerman --on&quot;}
2021-02-19T15:00:45 SLOW=${SLOW:-no}
2021-02-19T15:00:45 FAIL_ON_ERROR=${FAIL_ON_ERROR:-true}
2021-02-19T15:00:45 
2021-02-19T15:00:45 MPIRUN=${MPIRUN:-$(which mpirun 2&amp;gt;/dev/null || true)}
2021-02-19T15:00:45 MPI_USER=${MPI_USER:-mpiuser}
2021-02-19T15:00:45 SHARED_DIR_LOGS=${SHARED_DIR_LOGS:-&quot;&quot;}
2021-02-19T15:00:45 MACHINEFILE_OPTION=${MACHINEFILE_OPTION:-&quot;-machinefile&quot;}
2021-02-19T15:00:45 
2021-02-19T15:00:45 # This is used by a small number of tests to share state between the client
2021-02-19T15:00:45 # running the tests, or in some cases between the servers (e.g. lfsck.sh).
2021-02-19T15:00:45 # It needs to be a non-lustre filesystem that is available on all the nodes.
2021-02-19T15:00:45 SHARED_DIRECTORY=${SHARED_DIRECTORY:-$TMP}	# bug 17839 comment 65
2021-02-19T15:00:45 
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # In order to test multiple remote HSM agents, a new facet type named &quot;AGT&quot; and
2021-02-19T15:00:45 # the following associated variables are added:
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # AGTCOUNT: number of agents
2021-02-19T15:00:45 # AGTDEV{N}: target HSM mount point (root path of the backend)
2021-02-19T15:00:45 # agt{N}_HOST: hostname of the agent agt{N}
2021-02-19T15:00:45 # SINGLEAGT: facet of the single agent
2021-02-19T15:00:45 #
2021-02-19T15:00:45 # Please refer to init_agt_vars() in sanity-hsm.sh for the default values of
2021-02-19T15:00:45 # these variables.
2021-02-19T15:00:45 #
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="292767" author="sebastien" created="Tue, 23 Feb 2021 14:15:49 +0000"  >&lt;p&gt;This tip is really helpful, thanks.&lt;/p&gt;

&lt;p&gt;I looked for differences between the config files used in the 3 following test cases:&lt;/p&gt;
&lt;ul&gt;
	&lt;li&gt;review-dne-ssk on CentOS 8.3: SSK setup fails&lt;/li&gt;
	&lt;li&gt;review-dne-ssk on CentOS 7.9: SSK setup succeeds&lt;/li&gt;
	&lt;li&gt;review-dne-selinux-ssk on CentOS 8.3: SSK setup succeeds&lt;/li&gt;
&lt;/ul&gt;


&lt;p&gt;Config files are almost similar, the only relevant difference being the &lt;tt&gt;PDSH&lt;/tt&gt; variable. For review-dne-ssk on CentOS 7.9 and CentOS 8.3, it is:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;PDSH=&quot;pdsh -t 120 -S -Rmrsh -w&quot;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;For review-dne-selinux-ssk on CentOS 8.3, it is:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;PDSH=&quot;pdsh -t 120 -S -Rssh -w&quot;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;So the combo for SSK failure is CentOS 8.3 + mrsh.&lt;/p&gt;

&lt;p&gt;I tried to push a patch to have review-dne-ssk run on CentOS 8.3 with &lt;tt&gt;env=PDSH=&quot;pdsh -t 120 -S -Rssh -w&quot;&lt;/tt&gt;, but as can be seen in the test logs at &lt;a href=&quot;https://testing.whamcloud.com/test_sessions/e0bc0219-f623-45b4-954e-fe4b03de7b93&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sessions/e0bc0219-f623-45b4-954e-fe4b03de7b93&lt;/a&gt;, this value gets overwritten by the default one, so it is not conclusive:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;2021-02-23T10:53:25 export PDSH=&quot;pdsh -t 120 -S -Rssh -w&quot;
2021-02-23T10:53:25 export PDSH=&quot;pdsh -t 120 -S -Rmrsh -w&quot;
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;However, I managed to reproduce manually the review-dne-ssk failure on trevis, just by using mrsh instead of ssh: if I set &lt;tt&gt;PDSH=&quot;pdsh -t 120 -S -Rssh -w&quot;&lt;/tt&gt; in my cfg file, SSK is setup properly by &lt;tt&gt;llmount.sh&lt;/tt&gt;, but if I set &lt;tt&gt;PDSH=&quot;pdsh -t 120 -S -Rmrsh -w&quot;&lt;/tt&gt; in my cfg file, then it fails.&lt;/p&gt;

&lt;p&gt;I have opened ATM-1962 to request switch from &lt;tt&gt;mrsh&lt;/tt&gt; to &lt;tt&gt;ssh&lt;/tt&gt; for pdsh rcmd module, but I do not know if maybe &lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=colmstea&quot; class=&quot;user-hover&quot; rel=&quot;colmstea&quot;&gt;colmstea&lt;/a&gt; or &lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=leonel8a&quot; class=&quot;user-hover&quot; rel=&quot;leonel8a&quot;&gt;leonel8a&lt;/a&gt; have a way to trigger a review-dne-ssk test with this beforehand, in order to confirm that it fixes the problem.&lt;/p&gt;</comment>
                            <comment id="292999" author="sebastien" created="Thu, 25 Feb 2021 12:12:00 +0000"  >&lt;p&gt;Now that Charlie has landed fix for ATM-1962, review-dne-ssk passes successfully. I think this ticket can be closed.&lt;/p&gt;</comment>
                            <comment id="293042" author="adilger" created="Thu, 25 Feb 2021 16:15:27 +0000"  >&lt;p&gt;Do we need to re-enable the review-dne-ssk series as enforced again?&lt;/p&gt;</comment>
                            <comment id="293044" author="colmstea" created="Thu, 25 Feb 2021 16:17:31 +0000"  >&lt;p&gt;I re-enabled it as enforced yesterday morning&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="62841">LU-14424</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                                        </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i01mxj:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>