<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:37:00 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-10651] Failed to provision nodes: No such process</title>
                <link>https://jira.whamcloud.com/browse/LU-10651</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;Maloo failed to provision nodes, with error &quot;Failed to provision nodes: No such process&quot;.&#160; See &lt;a href=&quot;https://testing.hpdd.intel.com/test_sets/c15c2304-0c7e-11e8-bd00-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.hpdd.intel.com/test_sets/c15c2304-0c7e-11e8-bd00-52540065bddc&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Node provisioning log tail:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;
2018-02-08T02:53:42 Rebooting trevis-57vm1, trevis-57vm2, trevis-57vm3, trevis-57vm4, trevis-57vm5...
2018-02-08T02:56:09 trevis-57vm2 is reachable and ready
2018-02-08T02:57:49 trevis-57vm4 is reachable and ready
2018-02-08T02:58:20 trevis-57vm5 is reachable and ready
2018-02-08T02:58:24 trevis-57vm3 is reachable and ready
2018-02-08T02:58:40 trevis-57vm1 is reachable and ready
2018-02-08T02:58:40 All nodes rebooted successfully
2018-02-08T02:58:40 Creating partitions for OSSs...
2018-02-08T02:58:40 Creating lvm partitions
2018-02-08T02:58:44 trevis-57vm3 - lvm_size=96.5
2018-02-08T02:58:44 trevis-57vm3 - node.partition_size_gb=11.99
2018-02-08T02:59:23 Creating partitions for MDSs...
2018-02-08T02:59:23 Creating lvm partitions
2018-02-08T02:59:29 trevis-57vm4 - lvm_size=96.5
2018-02-08T02:59:29 trevis-57vm4 - node.partition_size_gb=2
2018-02-08T03:00:50 trevis-57vm5 - lvm_size=96.5
2018-02-08T03:00:51 trevis-57vm5 - node.partition_size_gb=2
2018-02-08T03:02:07 Creating partitions for servers complete!
2018-02-08T03:02:07 Rebooting nodes...
2018-02-08T03:02:07 Rebooting trevis-57vm3, trevis-57vm4, trevis-57vm5...
2018-02-08T03:13:14 trevis-57vm5 is reachable and ready
2018-02-08T03:13:15 Errno::ESRCH
No such process
/home/autotest2/autotest/lib/interruptable_process.rb:69:in `getpgid&apos;
/home/autotest2/autotest/lib/interruptable_process.rb:69:in `cleanup_process&apos;
/home/autotest2/autotest/lib/interruptable_process.rb:50:in `ensure in run3&apos;
/home/autotest2/autotest/lib/interruptable_process.rb:50:in `run3&apos;
/home/autotest2/autotest/lib/system_utils.rb:74:in `block (2 levels) in execute&apos;
/usr/local/rbenv/versions/2.3.1/lib/ruby/2.3.0/timeout.rb:91:in `block in timeout&apos;
/usr/local/rbenv/versions/2.3.1/lib/ruby/2.3.0/timeout.rb:33:in `block in catch&apos;
/usr/local/rbenv/versions/2.3.1/lib/ruby/2.3.0/timeout.rb:33:in `catch&apos;
/usr/local/rbenv/versions/2.3.1/lib/ruby/2.3.0/timeout.rb:33:in `catch&apos;
/usr/local/rbenv/versions/2.3.1/lib/ruby/2.3.0/timeout.rb:106:in `timeout&apos;
/home/autotest2/autotest/lib/system_utils.rb:73:in `block in execute&apos;
/home/autotest2/autotest/lib/retry_loop.rb:28:in `block (2 levels) in retry_loop&apos;
/home/autotest2/autotest/lib/retry_loop.rb:27:in `upto&apos;
/home/autotest2/autotest/lib/retry_loop.rb:27:in `block in retry_loop&apos;
/home/autotest2/autotest/lib/retry_loop.rb:26:in `catch&apos;
/home/autotest2/autotest/lib/retry_loop.rb:26:in `retry_loop&apos;
/home/autotest2/autotest/lib/system_utils.rb:69:in `execute&apos;
/home/autotest2/autotest/lib/system_utils.rb:108:in `block in rexec&apos;
/home/autotest2/autotest/lib/system_utils.rb:297:in `via_nfs&apos;
/home/autotest2/autotest/lib/system_utils.rb:104:in `rexec&apos;
/home/autotest2/autotest/lib/system_utils.rb:116:in `rexec_no_retry&apos;
/home/autotest2/autotest/lib/system_utils.rb:202:in `reachable?&apos;
/home/autotest2/autotest/lib/configure_cluster.rb:280:in `block in reboot_nodes&apos;
/home/autotest2/autotest/vendor/bundle/ruby/2.3.0/gems/parallel-1.11.1/lib/parallel.rb:484:in `call_with_index&apos;
/home/autotest2/autotest/vendor/bundle/ruby/2.3.0/gems/parallel-1.11.1/lib/parallel.rb:342:in `block (2 levels) in work_in_threads&apos;
/home/autotest2/autotest/vendor/bundle/ruby/2.3.0/gems/parallel-1.11.1/lib/parallel.rb:493:in `with_instrumentation&apos;
/home/autotest2/autotest/vendor/bundle/ruby/2.3.0/gems/parallel-1.11.1/lib/parallel.rb:341:in `block in work_in_threads&apos;
/home/autotest2/autotest/vendor/bundle/ruby/2.3.0/gems/parallel-1.11.1/lib/parallel.rb:206:in `block (2 levels) in in_threads&apos;
2018-02-08T03:13:16 Getting console log trevis-57vm1.log
2018-02-08T03:13:17 Getting console log trevis-57vm2.log
2018-02-08T03:13:21 Getting console log trevis-57vm3.log
2018-02-08T03:13:28 Getting console log trevis-57vm4.log
2018-02-08T03:13:44 Getting console log trevis-57vm5.log
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment></environment>
        <key id="50718">LU-10651</key>
            <summary>Failed to provision nodes: No such process</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="10000">Done</resolution>
                                        <assignee username="colmstea">Charlie Olmstead</assignee>
                                    <reporter username="ofaaland">Olaf Faaland</reporter>
                        <labels>
                    </labels>
                <created>Thu, 8 Feb 2018 18:52:14 +0000</created>
                <updated>Fri, 9 Feb 2018 18:40:01 +0000</updated>
                            <resolved>Fri, 9 Feb 2018 16:14:40 +0000</resolved>
                                                                        <due></due>
                            <votes>0</votes>
                                    <watches>4</watches>
                                                                            <comments>
                            <comment id="220566" author="pjones" created="Fri, 9 Feb 2018 13:15:16 +0000"  >&lt;p&gt;Lee&lt;/p&gt;

&lt;p&gt;Do you consider this issue to be related to the test infrastructure rather than the Lustre code?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="220588" author="colmstea" created="Fri, 9 Feb 2018 16:14:03 +0000"  >&lt;p&gt;From what I can tell, this appears to be an edge case. The library for handling external calls does not handle those that died between timing a call out and cleaning the call up. I&apos;ll create an internal ticket to handle this case properly.&lt;/p&gt;</comment>
                            <comment id="220614" author="leonel8a" created="Fri, 9 Feb 2018 17:56:12 +0000"  >&lt;p&gt;I guess moving a ticket doesn&apos;t add you to the watchers, I&apos;m just now seeing the comments.&lt;/p&gt;

&lt;p&gt;So yes Peter, this was definitely a test infrastructure issue rather than Lustre. For future cases I think the best option would be to create an ATM ticket and link the two so reporters don&apos;t loose visibility and update them accordingly. Sorry about the confusion.&lt;/p&gt;</comment>
                            <comment id="220628" author="pjones" created="Fri, 9 Feb 2018 18:40:01 +0000"  >&lt;p&gt;Thanks Lee/Charlie. Olaf, are you ok to close this ticket out as a duplicate of the ticket opened against our test infrastructure? It seems to not be a bug from a Lustre point of view (though does affect testing)...&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzzslb:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>