<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 01:32:55 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-3322] ko2iblnd support for different map_on_demand and peer_credits between systems</title>
                <link>https://jira.whamcloud.com/browse/LU-3322</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;ko2iblnd currently doesn&apos;t support different values of peer_credits or map_on_demand between systems.  &lt;/p&gt;

&lt;p&gt;After I finish some testing I will upload a patch to gerrit in the next couple of days.&lt;/p&gt;</description>
                <environment></environment>
        <key id="18907">LU-3322</key>
            <summary>ko2iblnd support for different map_on_demand and peer_credits between systems</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="3" iconUrl="https://jira.whamcloud.com/images/icons/priorities/major.svg">Major</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="ashehata">Amir Shehata</assignee>
                                    <reporter username="jfilizetti">Jeremy Filizetti</reporter>
                        <labels>
                            <label>patch</label>
                    </labels>
                <created>Mon, 13 May 2013 02:02:57 +0000</created>
                <updated>Fri, 14 Jun 2019 16:40:56 +0000</updated>
                            <resolved>Tue, 24 Nov 2015 14:42:47 +0000</resolved>
                                    <version>Lustre 2.7.0</version>
                    <version>Lustre 2.8.0</version>
                                    <fixVersion>Lustre 2.8.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>31</watches>
                                                                            <comments>
                            <comment id="58251" author="liang" created="Mon, 13 May 2013 02:32:46 +0000"  >&lt;p&gt;Add Isaac and me to watching list.&lt;/p&gt;</comment>
                            <comment id="58270" author="pjones" created="Mon, 13 May 2013 13:04:47 +0000"  >&lt;p&gt;ok Jeremy, I will assign this ticket to myself for now and reassign it to an engineer when you upload the patch&lt;/p&gt;</comment>
                            <comment id="71950" author="jfilizetti" created="Wed, 20 Nov 2013 13:23:26 +0000"  >&lt;p&gt;Sorry, I&apos;ve been sitting on this forever.  Finally uploaded at &lt;a href=&quot;http://review.whamcloud.com/#/c/8342/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/8342/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="71951" author="pjones" created="Wed, 20 Nov 2013 13:32:37 +0000"  >&lt;p&gt;Thanks Jeremy! &lt;/p&gt;

&lt;p&gt;Amir, &lt;/p&gt;

&lt;p&gt;could you please review Jeremy&apos;s patch? &lt;/p&gt;

&lt;p&gt;Thanks&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="72015" author="ashehata" created="Thu, 21 Nov 2013 00:26:19 +0000"  >&lt;p&gt;I looked at it and as far as I could tell the patch is ok.  I would suggest we add Isaac and/or Liang to take a look at it since they know the o2iblnd driver more thoroughly.&lt;/p&gt;</comment>
                            <comment id="78807" author="jfc" created="Sat, 8 Mar 2014 01:54:35 +0000"  >&lt;p&gt;Amir or Jeremy,&lt;br/&gt;
Did we get this finished and if so can I mark the ticket as resolved?&lt;br/&gt;
Thanks,&lt;br/&gt;
~ jfc.&lt;/p&gt;</comment>
                            <comment id="90336" author="jfc" created="Tue, 29 Jul 2014 17:47:50 +0000"  >&lt;p&gt;Isaac or Liang,&lt;br/&gt;
Do you have comments to add here: &lt;a href=&quot;http://review.whamcloud.com/#/c/8342/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/8342/&lt;/a&gt; as requested by Andreas?&lt;/p&gt;

&lt;p&gt;If this ticket, or the patch itself, is no longer being worked on, can we mark it as resolved please?&lt;/p&gt;

&lt;p&gt;Thanks,&lt;br/&gt;
~ jfc.&lt;/p&gt;</comment>
                            <comment id="90409" author="liang" created="Wed, 30 Jul 2014 01:32:29 +0000"  >&lt;p&gt;sorry for late reply, I actually think we should have this feature, but I need to review it first....&lt;/p&gt;</comment>
                            <comment id="92948" author="liang" created="Tue, 2 Sep 2014 08:23:53 +0000"  >&lt;p&gt;I think we probably should prioritise this patch and have this feature in 2.7, at least for the map_on_demand part, because this is something asked by different people for years. &lt;/p&gt;</comment>
                            <comment id="93397" author="jfilizetti" created="Sun, 7 Sep 2014 03:14:01 +0000"  >&lt;p&gt;New patch for master uploaded at: &lt;a href=&quot;http://review.whamcloud.com/#/c/11794/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/11794/&lt;/a&gt;&lt;/p&gt;</comment>
                            <comment id="115272" author="simmonsja" created="Wed, 13 May 2015 21:22:11 +0000"  >&lt;p&gt;This is most excellent. I have a system that will not accepted the normal 63 peer_credits we use. I&apos;m going to test this out right now and let you know the results. I refreshed the patch.&lt;/p&gt;</comment>
                            <comment id="116388" author="simmonsja" created="Tue, 26 May 2015 15:06:41 +0000"  >&lt;p&gt;I did some testing and found some issues with the following setup. On the client side I&apos;m using the mlx5 driver from the Mellanox 2.4 stack. This driver does not  support FMR but it does support PMR. The clients module parameters are as follows:&lt;/p&gt;

&lt;p&gt;options ko2iblnd timeout=100 credits=2560 ntx=5120 peer_credits=63 concurrent_sends=63 pmr_pool_size=1280 fmr_pool_size=1280 fmr_flush_trigger=1024 map_on_demand=64&lt;/p&gt;

&lt;p&gt;Here are the logs for the client:&lt;/p&gt;

&lt;p&gt;931569.400915] LNet: HW CPU cores: 160, npartitions: 16&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;931611.029095&amp;#93;&lt;/span&gt; fmr_pool: Device mlx5_0 does not support FMRs&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;931611.029212&amp;#93;&lt;/span&gt; LNetError: 26732:0:(o2iblnd.c:1509:kiblnd_create_fmr_pool()) Failed to create FMR pool: -38&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;931611.029340&amp;#93;&lt;/span&gt; LNet: 26732:0:(o2iblnd.c:2301:kiblnd_net_init_pools()) Device does not support FMR, failing back to PMR&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;931611.088611&amp;#93;&lt;/span&gt; LNet: Added LNI 10.37.202.11@o2ib1 &lt;span class=&quot;error&quot;&gt;&amp;#91;63/8064/0/180&amp;#93;&lt;/span&gt;&lt;/p&gt;

&lt;p&gt;---------------------------------------------------------------------------------------------------------------------------------------------------------------------&lt;br/&gt;
On the server side I&apos;m running the default OFED stack that comes with RHEL6.5 which uses the mlx4 driver. This driver supports FMR. The server module parameters are as follows:&lt;/p&gt;

&lt;p&gt;options ko2iblnd timeout=100 credits=2560 ntx=5120 peer_credits=63 concurrent_sends=63 fmr_pool_size=1280 pmr_pool_size=1280 fmr_flush_trigger=1024&lt;/p&gt;

&lt;p&gt;So as you can see the only difference is map_on_demand on the client. Now when I attempt to ping the server node from the client I get&lt;br/&gt;
the following error on the SERVER:&lt;/p&gt;

&lt;p&gt;&lt;span class=&quot;error&quot;&gt;&amp;#91;499128.871773&amp;#93;&lt;/span&gt; LNetError: 1982:0:(o2iblnd_cb.c:2359:kiblnd_passive_connect()) Can&apos;t accept conn from 10.37.202.11@o2ib1 (version 12): max_frags 64 incompatible without FMR/PMR pool (256 wanted)&lt;/p&gt;

&lt;p&gt;In the reverse direction, server node pinging the client node I get :&lt;/p&gt;

&lt;p&gt;932734.599925] LNetError: 20856:0:(o2iblnd_cb.c:2345:kiblnd_passive_connect()) Can&apos;t accept conn from 10.37.248.67@o2ib1 (version 12): max_frags 256 too large (64 wanted)&lt;/p&gt;

&lt;p&gt;Is there any way to avoid having to reconfigure my entire LNet fabric to make this work?&lt;/p&gt;</comment>
                            <comment id="116413" author="jfilizetti" created="Tue, 26 May 2015 16:44:48 +0000"  >&lt;p&gt;You will need to set map_on_demand=256 on the servers as well.  It&apos;s a little odd because even though the default frags is 256 without setting map_on_demand explicitly the kiblnd_fmr_map_tx/kiblnd_pmr_map_tx is never called from kiblnd_map_tx.&lt;/p&gt;

&lt;p&gt;Since I&apos;ve never added any documentation here is some more details from a writeup on it I did quite a while back.  I didn&apos;t verify that things haven&apos;t changed with the module parameters so hopefully it&apos;s still accurate.&lt;/p&gt;

&lt;p&gt;Summary:&lt;br/&gt;
Normally the Lustre ko2iblnd can only operate with identical peer_credits and map_on_demand between systems.  The patch affects the active (initiator) connection in IB for clients/routers and the passive (responder) connections for IB with servers/routers.  Passive connections will automatically negotiate down if the parameters permit and reject if they are to high for the remote request.  Active connections will send their defaults initially and if rejected attempt to use the lower values from the reject message.  If the values are higher the active connection won&apos;t retry because its not supported.&lt;/p&gt;

&lt;p&gt;There are 3 parameters in the ko2iblnd of interest here: peer_credits, map_on_demand, and concurrent_sends.  The default settings for ko2iblnd are peer_credits=8, concurrent_sends=8, and map_on_demand=0 (disabled)&lt;/p&gt;

&lt;p&gt;peer_credits determines how many messages you can receive from a single connection (queue pair).  &lt;br/&gt;
map_on_demand determines the number of DMA segments per credit that are sent.  Each segment is usually (always?) a page and is sent as a separate work request so these are typically (256 * 4k pages) going across the wire.&lt;br/&gt;
concurrent_sends determines how many send messages you can queue at a time to a single connection,  concurrent_sends can&apos;t be less than half of peer_credits but concurrent_sends needs to be &amp;lt;= 62 to not exceed the maximum number of work requests per queue pair in the standard Mellanox ConnectX&lt;span class=&quot;error&quot;&gt;&amp;#91;123&amp;#93;&lt;/span&gt; HCAs.  &lt;/p&gt;

&lt;p&gt;The relation between those values is: work_requests_allocated = (map_on_demand + 1) * concurrent_sends&lt;/p&gt;

&lt;p&gt;You can see your max work requests per queue pair with:&lt;/p&gt;
&lt;ol&gt;
	&lt;li&gt;ibv_devinfo -v | grep max_qp_wr&lt;br/&gt;
        max_qp_wr:                      16384&lt;/li&gt;
&lt;/ol&gt;


&lt;p&gt;My recommendation is the following for maximum compatibility.&lt;br/&gt;
Lustre MDS/OSS Severs and Routers:&lt;br/&gt;
Use the patch and run with peer_credits=124 concurrent_sends=62 map_on_demand=256&lt;/p&gt;

&lt;p&gt;Existing Lustre clients:&lt;br/&gt;
Need to apply the patch or lower the current values for peer_credits and concurrent_sends to match the OSS/MDS setting.&lt;/p&gt;</comment>
                            <comment id="116571" author="simmonsja" created="Wed, 27 May 2015 18:02:15 +0000"  >&lt;p&gt;I did some testing and your advice on setting map_on_demand=256 worked!! I&apos;m now running nodes with very different map_on_demand settings with no problems.&lt;/p&gt;</comment>
                            <comment id="116617" author="doug" created="Wed, 27 May 2015 23:10:55 +0000"  >&lt;p&gt;From the code, I&apos;ve always considered map_on_demand = 0 an &quot;off&quot; switch for FMR.  So, if mlx5 does not support FMR anymore, should map_on_demand be set to zero there?  Will that cause connection issues with older mlx versions with FMR turned on?&lt;/p&gt;

&lt;p&gt;I&apos;m happy with the patch and will give it a +1 as well.  I&apos;m also going to open an LUDOC ticket for documenting how all of this works.  Our current documentation is a bit too sparse on all these tunables and how they relate.&lt;/p&gt;

&lt;p&gt;Is there a recommendation for changing the default parameters to something more reasonable?  I know that you have to turn on FMR (map_on_demand &amp;gt; 0) for Truescale to work at a reasonable performance rate.&lt;/p&gt;</comment>
                            <comment id="127036" author="ashehata" created="Fri, 11 Sep 2015 03:24:55 +0000"  >&lt;p&gt;I have pushed a patch which addresses the race conditions that Isaac pointed.&lt;/p&gt;

&lt;p&gt;However, I have some questions, which IMHO seem fundamental to me.&lt;/p&gt;

&lt;p&gt;1. Do we care whether map-on-demand is set on both peers? or do we care if the number of fragments communicated between peers are compatible? IE: the active side has equal or less than number of fragments of the passive peer?&lt;/p&gt;

&lt;p&gt;In the original code, in kiblnd_passive_connect() it checked whether the number of fragments were not equal. On a peer with map-on-demand off IBLND_RDMA_FRAGS returns 256, so a peer with map-on-demand on and == 256 can still connect with the one with map-on-demand off.&lt;/p&gt;

&lt;p&gt;In the &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3322&quot; title=&quot;ko2iblnd support for different map_on_demand and peer_credits between systems&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3322&quot;&gt;&lt;del&gt;LU-3322&lt;/del&gt;&lt;/a&gt; patch that code was changed to add the restriction of checking if FMR is enabled locally even if the number of frags is less than what it is locally.  What was the reason for that?&lt;/p&gt;

&lt;p&gt;2. Why is it that the number of fragments on both sides need to be compatible? &amp;lt;=?  Why do we simply not remove that restriction completely and let each network admin determine the best values - while we provide documentation on ideal values? &lt;/p&gt;

&lt;p&gt;I have attached a compatibility matrix excel sheet, which I hope to have outlined all the different cases, and whether a connection is accepted or rejected.  It&apos;ll be great if some one vets it.  Ideally, we&apos;d agree on the desired behavior, then move on with implementing that behavior.  Currently it seems to me that there is a disagreement on how old/new and new/new versions of the software should behave.&lt;/p&gt;</comment>
                            <comment id="127857" author="jfilizetti" created="Fri, 18 Sep 2015 22:36:39 +0000"  >&lt;p&gt;1.  I don&apos;t think MOD needs to be a requirement for both.  For the FMR explanation see 2.&lt;/p&gt;

&lt;p&gt;2.  For a client with a lower number of fragments I don&apos;t see a way to guarantee that the number of pages/frags passed into kiblnd_setup_rd_{iov,kiov} is less than 256.  o2iblnd receives messages from LNet which has LNET_MAX_IOV=256.  If you didn&apos;t have FMR to create a single fragment from multiple pages/segments you would end up sending a tx with too many fragments for the peer. &lt;/p&gt;</comment>
                            <comment id="129710" author="gerrit" created="Wed, 7 Oct 2015 17:39:27 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/11794/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/11794/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3322&quot; title=&quot;ko2iblnd support for different map_on_demand and peer_credits between systems&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3322&quot;&gt;&lt;del&gt;LU-3322&lt;/del&gt;&lt;/a&gt; ko2iblnd: Support different configs between systems&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 7f5c9753872cfa8ad47821be3fa924c74c4c8b0d&lt;/p&gt;</comment>
                            <comment id="129717" author="jgmitter" created="Wed, 7 Oct 2015 17:50:06 +0000"  >&lt;p&gt;Landed for 2.8&lt;/p&gt;</comment>
                            <comment id="129753" author="mhanafi" created="Wed, 7 Oct 2015 20:18:20 +0000"  >&lt;p&gt;Can we get this back ported to lustre 2.5.3?&lt;/p&gt;</comment>
                            <comment id="132826" author="dmiter" created="Fri, 6 Nov 2015 10:47:10 +0000"  >&lt;p&gt;Unfortunately the setting peer_creadits cannot be different.&lt;br/&gt;
I have on one machine:&lt;/p&gt;
&lt;blockquote&gt;&lt;p&gt;options ko2iblnd credits=2560 ntx=5120 concurrent_sends=63 peer_credits=16&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;on other&lt;/p&gt;
&lt;blockquote&gt;&lt;p&gt;options ko2iblnd credits=2560 ntx=5120 concurrent_sends=63&lt;/p&gt;&lt;/blockquote&gt;
&lt;p&gt;and then got the following error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted)
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted)
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Skipped 134 previous similar messages
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted)
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Skipped 265 previous similar messages
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted)
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Skipped 520 previous similar messages
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted)
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Skipped 1020 previous similar messages
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted)
LNetError: 2895:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Skipped 1971 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="132827" author="dmiter" created="Fri, 6 Nov 2015 10:48:03 +0000"  >&lt;p&gt;This was tested on master v2_7_62_0-25-g8248c89&lt;br/&gt;
one machine:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: Lustre: Build Version: 2.7.62-g8248c89-CHANGED-2.6.32-573.7.1.el6_lustre.g95557d5.x86_64
LNet: Added LNI 192.168.3.102@o2ib [16/2560/0/180]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;second machine:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Lustre: Lustre: Build Version: 2.7.62-g8248c89-CHANGED-2.6.32-573.7.1.el6_lustre.g95557d5.x86_64
LNet: Added LNI 192.168.3.104@o2ib [8/2560/0/180]
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="132856" author="jfilizetti" created="Fri, 6 Nov 2015 18:02:36 +0000"  >&lt;p&gt;In patch set 4 and earlier the call from kiblnd_reconnect() was storing the peer&apos;s maximums in the kib_peer_t which was not recreated and would be reused when reconnecting through kiblnd_active_connect.  With patch set 5 and later this was removed so its likely the rejected connections wont work for map_on_demand or peer credits including the patch cherry-picked for master.  However, it should accept connections for remote hosts that don&apos;t exceed the host&apos;s maximums.  So the landed patch is missing half of the functionality.&lt;/p&gt;</comment>
                            <comment id="132892" author="gerrit" created="Fri, 6 Nov 2015 20:41:42 +0000"  >&lt;p&gt;Amir Shehata (amir.shehata@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/17074&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17074&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3322&quot; title=&quot;ko2iblnd support for different map_on_demand and peer_credits between systems&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3322&quot;&gt;&lt;del&gt;LU-3322&lt;/del&gt;&lt;/a&gt; lnet: make connect parameters persistent&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 1d262cebcdccd8ba6a2f678f131d99df55f1b692&lt;/p&gt;</comment>
                            <comment id="132905" author="simmonsja" created="Fri, 6 Nov 2015 21:57:48 +0000"  >&lt;p&gt;I have to say we haven&apos;t seen any of these problems and I have been running a mlx4 &amp;lt;-&amp;gt; mlx5 setup with different peer_credits on each end. Will try the patch.&lt;/p&gt;</comment>
                            <comment id="133030" author="dmiter" created="Mon, 9 Nov 2015 19:40:08 +0000"  >&lt;p&gt;With patch &lt;a href=&quot;http://review.whamcloud.com/#/c/17074/2&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/17074/2&lt;/a&gt; it works but anyway I see one error message:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;in one machine:
LNet: Added LNI 192.168.3.102@o2ib [16/512/0/180]

in other machine:
LNet: Added LNI 192.168.3.104@o2ib [8/256/0/180]
LNetError: 123936:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;So, this message a bit confused. Can you omit it?&lt;/p&gt;</comment>
                            <comment id="133043" author="jfilizetti" created="Mon, 9 Nov 2015 20:28:39 +0000"  >&lt;p&gt;We could alter the error message to say something like &quot;Can&apos;t accept conn from 192.168.3.102@o2ib, queue depth too large:  16 (&amp;lt;=8 wanted), returning maximum supported values to peer for reconnect&quot; to be more specific.  The problem with leaving this out is if you had an a client without the patch connecting you would see no error messages for why the client can&apos;t connect and it wouldn&apos;t negotiate to the lower supported value it would just repeatedly fail with no error messages.  Perhaps the better thing to do here is to bump the IBLND_MSG_VERSION and report errors for only clients with IBLND_MSG_VERSION_2.  I&apos;ll have to take a look to see if that&apos;s a possibility.&lt;/p&gt;</comment>
                            <comment id="133045" author="dmiter" created="Mon, 9 Nov 2015 20:36:27 +0000"  >&lt;p&gt;Is it expected that old clients cannot work if they have different settings?&lt;br/&gt;
So, this patch should be everywhere (servers, routers and clients). Is it possible to accommodate to old client and make it workable?&lt;/p&gt;</comment>
                            <comment id="133046" author="jfilizetti" created="Mon, 9 Nov 2015 20:42:36 +0000"  >&lt;p&gt;See comment:&lt;/p&gt;

&lt;p&gt;&lt;a href=&quot;https://jira.hpdd.intel.com/browse/LU-3322?focusedCommentId=116413&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-116413&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LU-3322?focusedCommentId=116413&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-116413&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;The unpatched clients can connect as long as it&apos;s values are less then or equal but if they are higher the patched system is unable to allow the connection which is why it sends it&apos;s supported values back in the rejection message.  Unpatched clients ignore these parameters sent back and only only support connections if their values match exactly.&lt;/p&gt;</comment>
                            <comment id="133047" author="dmiter" created="Mon, 9 Nov 2015 20:44:28 +0000"  >&lt;p&gt;Thanks Jeremy,&lt;/p&gt;

&lt;p&gt;From my point of view it would be better to say explicitly that we try to make other handshake with different settings. In this case the error message will make sense. Or check the version and report it only for old clients.&lt;/p&gt;

&lt;p&gt;P.S. Also I understand if the server can not operate with high value from old client it&apos;s not possible to support it, We need a clear message about this.&lt;/p&gt;</comment>
                            <comment id="133131" author="dmiter" created="Tue, 10 Nov 2015 16:02:27 +0000"  >&lt;p&gt;Another issue I have:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;Server with mlx5 have:
LNet: Added LNI 192.168.3.100@o2ib [16/2560/0/180]
LNetError: 107216:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.8@o2ib, queue depth too large:  128 (&amp;lt;=16 wanted)
LNet: 107216:0:(o2iblnd_cb.c:2291:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.8@o2ib (version 12): max_frags 32 incompatible without FMR pool (256 wanted)

Router with mlx4 and hfi have:
LNet: Added LNI 192.168.3.8@o2ib [128/4096/0/180]
LNet: Added LNI 192.168.5.8@o2ib1 [128/4096/0/180]
# lctl ping 192.168.3.100@o2ib0
failed to ping 192.168.3.100@o2ib: Input/output error
# lctl ping 192.168.5.200@o2ib1
12345-0@lo
12345-192.168.5.200@o2ib1

old client with hfi have:
LNet: Added LNI 192.168.5.200@o2ib1 [128/4096/0/180]
# lctl ping 192.168.3.100@o2ib0
failed to ping 192.168.3.100@o2ib: Input/output error
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Client and Router has the same tunables. Only Server have different tunables but both server and router have new version of Lustre with &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3322&quot; title=&quot;ko2iblnd support for different map_on_demand and peer_credits between systems&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3322&quot;&gt;&lt;del&gt;LU-3322&lt;/del&gt;&lt;/a&gt; patch. So, again different tunables don&apos;t work.&lt;/p&gt;</comment>
                            <comment id="133135" author="jfilizetti" created="Tue, 10 Nov 2015 16:26:35 +0000"  >&lt;p&gt;This is also explained in the earlier comment.  Without FMR (mlx5 doesn&apos;t support it) or some other way to coalesce the fragments you can&apos;t connect to a client with a lower number of frags because you will end up with RDMA&apos;s too fragmented.  I&apos;m assuming your server is running with map_on_demand=32 and your router is running without it set?&lt;/p&gt;

&lt;p&gt;See &lt;a href=&quot;https://jira.hpdd.intel.com/browse/LU-3322?focusedCommentId=127857&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-127857&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LU-3322?focusedCommentId=127857&amp;amp;page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel#comment-127857&lt;/a&gt;&lt;/p&gt;
</comment>
                            <comment id="133137" author="dmiter" created="Tue, 10 Nov 2015 16:34:32 +0000"  >&lt;p&gt;yes, router have map_on_demand=32 but server no. Why they cannot handshake the workable parameters? Both have latest version.&lt;/p&gt;</comment>
                            <comment id="133140" author="jfilizetti" created="Tue, 10 Nov 2015 16:52:45 +0000"  >&lt;p&gt;The mlx5 doesn&apos;t support FMR pools which is Lustre&apos;s only method of mapping a group of fragments into a single fragment.   o2iblnd is written so that 1 LNet message = 1 o2iblnd message (2 technically: 1 IB RDMA paired with an 1 IB Send) so there is no way to split the LNet message into multiple o2iblnd messages.  This could be added but IIRC it&apos;s not a trivial change due to credit accounting and failure handling.&lt;/p&gt;</comment>
                            <comment id="133142" author="dmiter" created="Tue, 10 Nov 2015 16:58:05 +0000"  >&lt;p&gt;Hmm. There are no ways to connect mlx5 &amp;lt;=&amp;gt; mlx4. With map_on_demand=256 on mlx4 I got:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNetError: 4927:0:(o2iblnd.c:866:kiblnd_create_conn()) Can&apos;t create QP: -22, send_wr: 65792, recv_wr: 512
LNetError: 4927:0:(o2iblnd.c:866:kiblnd_create_conn()) Skipped 3 previous similar messages
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;with map_on_demand=0 on mlx4 I got on mlx5 node:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNetError: 107218:0:(o2iblnd_cb.c:2655:kiblnd_rejected()) 192.168.3.8@o2ib rejected: o2iblnd no resources
LNetError: 107218:0:(o2iblnd_cb.c:2655:kiblnd_rejected()) 192.168.3.8@o2ib rejected: o2iblnd no resources
LNetError: 107218:0:(o2iblnd_cb.c:2655:kiblnd_rejected()) 192.168.3.8@o2ib rejected: o2iblnd no resources
LNetError: 107218:0:(o2iblnd_cb.c:2655:kiblnd_rejected()) 192.168.3.8@o2ib rejected: o2iblnd no resources
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="133143" author="jfilizetti" created="Tue, 10 Nov 2015 17:07:52 +0000"  >&lt;p&gt;Lower your peer_credits and concurrent_sends on the router and you can connect with map_on_demand=0.  If you lower your concurrent_sends (and possibly your peer/router credts) on the server you won&apos;t get the QP: -22 error.&lt;/p&gt;</comment>
                            <comment id="133148" author="dmiter" created="Tue, 10 Nov 2015 17:46:15 +0000"  >&lt;p&gt;Hmm. It looks we cannot use in optimal way mlx5 and OPA/TS cards anyway.&lt;br/&gt;
if I set optimal OPA/TS settings on mlx4:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;options ko2iblnd-opa peer_credits=128 peer_credits_hiw=64 credits=1024 concurrent_sends=256 ntx=2048 map_on_demand=32 fmr_pool_size=2048 fmr_flush_trigger=512 fmr_cache=1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;I cannot connect because of from mlx5 side with &lt;b&gt;default&lt;/b&gt; settings I got:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNetError: 107215:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.8@o2ib, queue depth too large:  128 (&amp;lt;=8 wanted)
LNet: 107215:0:(o2iblnd_cb.c:2291:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.8@o2ib (version 12): max_frags 32 incompatible without FMR pool (256 wanted)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;If I set &lt;tt&gt;map_on_demand=0&lt;/tt&gt; I got QP: -22 error from mlx4. And only when I decrease &lt;tt&gt;peer_credits=16&lt;/tt&gt; on mlx4 the communication between mlx4 and mlx5 become work.&lt;/p&gt;

&lt;p&gt;So, the current solution have a very limited usage because of I cannot use an optimal tunables anyway. We cannot recommend it for hybrid fabrics with mlx5.&lt;/p&gt;</comment>
                            <comment id="133167" author="ashehata" created="Tue, 10 Nov 2015 19:12:28 +0000"  >&lt;p&gt;If I understand the description properly, the problem on mlx5 is due to:&lt;/p&gt;
&lt;div class=&quot;code panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;codeContent panelContent&quot;&gt;
&lt;pre class=&quot;code-java&quot;&gt;LNetError: 107215:0:(o2iblnd_cb.c:2264:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.8@o2ib, queue depth too large:  128 (&amp;lt;=8 wanted)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;That happens because the mlx4 has peer_credits set to 128 and mlx5 has that set to the default. Try setting the mlx5 peer_credits to 128 as well.&lt;/p&gt;

&lt;p&gt;Have you tried that?&lt;/p&gt;</comment>
                            <comment id="133173" author="dmiter" created="Tue, 10 Nov 2015 19:41:44 +0000"  >&lt;p&gt;I cannot set peer_credits more than 16 on mlx5 because of &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-7124&quot; title=&quot;MLX5: Limit hit in cap.max_send_wr&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-7124&quot;&gt;&lt;del&gt;LU-7124&lt;/del&gt;&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="133196" author="ashehata" created="Wed, 11 Nov 2015 01:13:03 +0000"  >&lt;p&gt;I updated the &lt;a href=&quot;http://review.whamcloud.com/#/c/17074/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/17074/&lt;/a&gt;.&lt;/p&gt;</comment>
                            <comment id="133214" author="adilger" created="Wed, 11 Nov 2015 09:02:44 +0000"  >&lt;p&gt;What about something like patch &lt;a href=&quot;http://review.whamcloud.com/16141&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/16141&lt;/a&gt; ?  Having larger-order allocations would reduce the number of fragments that need to be sent.&lt;/p&gt;</comment>
                            <comment id="133628" author="dmiter" created="Mon, 16 Nov 2015 21:11:59 +0000"  >&lt;p&gt;Amir, unfortunately, last patch set #4 don&apos;t work with different settings as well. From mlx4 site it reports:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[643723.574292] LNet: 45941:0:(o2iblnd_cb.c:2278:kiblnd_passive_connect()) Can&apos;t accept conn from 192.168.3.102@o2ib (version 12): max_frags 256 too large (32 wanted)
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Just recall from mlx5 site &lt;tt&gt;peer_credits=16&lt;/tt&gt;. From mlx4 side &lt;tt&gt;peer_credits=128&lt;/tt&gt;. Probably Andreas is right and we should reduce the number of allocations to make it workable.&lt;/p&gt;</comment>
                            <comment id="133648" author="ashehata" created="Tue, 17 Nov 2015 00:44:08 +0000"  >&lt;p&gt;Dmitry,&lt;/p&gt;

&lt;p&gt;The message you posted only indicates that the active has sent a connection request to the passive, but the passive can&apos;t handle 256. The reply to the connection attempt should go back to the active side indicating 32, and the active side should be attempting a reconnect.  Is that not happening? If the active side doesn&apos;t have map-on-demand the negotiation will fail. What&apos;s the log on the active side say?&lt;/p&gt;</comment>
                            <comment id="133682" author="dmiter" created="Tue, 17 Nov 2015 09:52:42 +0000"  >&lt;p&gt;Amir,&lt;/p&gt;

&lt;p&gt;From active side nothing in kernel messages, but lst show the error:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;create session RPC failed on 12345-192.168.3.8@o2ib: Unknown error 18446744073709551503
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;P.S. the active side is mlx5. So, it don&apos;t have map-on-demand.&lt;/p&gt;</comment>
                            <comment id="133683" author="dmiter" created="Tue, 17 Nov 2015 09:56:16 +0000"  >&lt;p&gt;with &lt;tt&gt;map_on_demand=32&lt;/tt&gt; on mlx5 I got the following:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;LNetError: 208538:0:(o2iblnd.c:2085:kiblnd_net_init_pools()) Can&apos;t set fmr pool size (512) &amp;lt; ntx / 4(1280)
LNetError: 208538:0:(o2iblnd.c:2906:kiblnd_startup()) Failed to initialize NI pools: -22
LNetError: 105-4: Error -100 starting up LNI o2ib
LNetError: 208538:0:(rpc.c:1605:srpc_startup()) LNetNIInit() has failed: -100
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="133696" author="jfilizetti" created="Tue, 17 Nov 2015 15:11:49 +0000"  >&lt;p&gt;If you are still using your previous configuration you could modify things to make everything work.&lt;/p&gt;

&lt;p&gt;1. Server with mlx5 doesn&apos;t support FMR so map_on_demand should remain 0.&lt;br/&gt;
2. Router can support FMR with the mlx4 (not sure about hfi) so you could use map_on_demand=256.  You will need to drop your concurrent_sends to &amp;lt;=62 I believe for the QP to not fail creation for the mlx4.&lt;br/&gt;
3. Client with hfi I don&apos;t know if it supports FMR.  But you could use defaults (map_on_demand=0) if not.&lt;/p&gt;

&lt;p&gt;With all that I think your configuration will be able to connect to everything.  At least from what I quickly glanced over with respect to the map_on_demand settings.&lt;/p&gt;
</comment>
                            <comment id="133720" author="olaf" created="Tue, 17 Nov 2015 17:37:16 +0000"  >&lt;p&gt;FWIW 18446744073709551503 = -113 assuming 2&apos;s complement. On x86 Linux, think that&apos;s&lt;br/&gt;
also -EHOSTUNREACH.&lt;/p&gt;</comment>
                            <comment id="133744" author="dmiter" created="Tue, 17 Nov 2015 19:28:38 +0000"  >&lt;p&gt;The following parameters are compatible with mlx5 and works fine for OPA:&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;options ko2iblnd-opa peer_credits=62 peer_credits_hiw=64 credits=1024 concurrent_sends=62 ntx=2048 map_on_demand=256 fmr_pool_size=2048 fmr_flush_trigger=512 fmr_cache=1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="133958" author="chunteraa" created="Thu, 19 Nov 2015 17:51:28 +0000"  >&lt;p&gt;There are different bundles of the OFED/infiniband kernel drivers available from different sources eg) openfabrics.org OFED, mellanox OFED (MOFED), Truescale OFED+ and RHEL rdma. All these packages seem to tweak the kernel drivers (eg. different versions, additional code, etc.) To further confuse the list, there a multiple versions of the packages (eg. MOFED 2.x, 3.x).&lt;/p&gt;

&lt;p&gt;Which IB driver packaging are you using for testing the ko2iblnd patches ? stock OFED ? MOFED ?&lt;/p&gt;

&lt;p&gt;thanks,&lt;br/&gt;
chris hunter&lt;/p&gt;
</comment>
                            <comment id="133980" author="jfilizetti" created="Thu, 19 Nov 2015 21:25:12 +0000"  >&lt;p&gt;I only used stock centos 6 kernels for the initial work.  Only after noting that MLX5 memory registration does not support FMR have I even started looking at and testing Mellanox ofed.  There are not any issues with this patch and map_on_demand settings that I&apos;m aware of even though they seem to be getting reported here as such.  The problem is that it requires too much low level understanding of the driver to configure and ko2iblnd does not abstract the differing hardware well enough at this point.  My goal with this patch was to allow interop with systems configured for IB WAN performance and those that may come from a vendor solution with different parameters.  Given the current memory registration upstream changes and lack of flexibility, ko2iblnd really needs some additional work to make things more robust and support multiple configurations.  This patch only serves as a stop-gap for that larger necessary work.  The best that really can be done is to make recommendations to people based on their needs here.&lt;/p&gt;</comment>
                            <comment id="134026" author="chunteraa" created="Thu, 19 Nov 2015 23:43:40 +0000"  >&lt;p&gt;Hi Jeremy,&lt;br/&gt;
Thanks for the explanation at least it shows the challenges involved. From your comments the default ko2iblnd-opa parameters (ie. &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-6735&quot; title=&quot;select appropriate optimization options for ko2iblnd according IB cards installed on the system&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-6735&quot;&gt;&lt;del&gt;LU-6735&lt;/del&gt;&lt;/a&gt;) should work for ConnectX&lt;span class=&quot;error&quot;&gt;&amp;#91;123&amp;#93;&lt;/span&gt; and Truescale adapters. &lt;br/&gt;
Our hardware, we have max work requests per QP (max_qp_wr) vaules of 16351, 16383 or 16384.&lt;/p&gt;</comment>
                            <comment id="134029" author="jfilizetti" created="Fri, 20 Nov 2015 00:01:05 +0000"  >&lt;p&gt;I don&apos;t know anything about the ko2iblnd-opa, I&apos;ve always used a custom module parameter file for Lustre.  Now that you&apos;ve included the link I see this is now being included with Lustre which I wasn&apos;t aware of before.  From what I can see you should be ok to use those parameters with those adapters but I&apos;m not sure they are the &quot;ideal&quot; settings. &lt;/p&gt;</comment>
                            <comment id="134033" author="doug" created="Fri, 20 Nov 2015 00:39:46 +0000"  >&lt;p&gt;This ticket seems to have expanded to be a catch-all for anything related to map_on_demand/peer_credit settings.  I&apos;d rather see this ticket be used for its original purpose, what Jeremy describes above.  Anything new should become a new ticket (or set of tickets) so we don&apos;t get confused and link a bunch of new tickets to this one believing these patches will solve all issues in this area.&lt;/p&gt;

&lt;p&gt;Once patch &lt;a href=&quot;http://review.whamcloud.com/#/c/17074/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/#/c/17074/&lt;/a&gt; has landed, I&apos;d like this ticket to be closed.  If there are any more problems with optimized settings for specific hardware setups, please open separate tickets so they can be prioritized and addressed accordingly.&lt;/p&gt;</comment>
                            <comment id="134375" author="gerrit" created="Tue, 24 Nov 2015 14:25:04 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/17074/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17074/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-3322&quot; title=&quot;ko2iblnd support for different map_on_demand and peer_credits between systems&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-3322&quot;&gt;&lt;del&gt;LU-3322&lt;/del&gt;&lt;/a&gt; lnet: make connect parameters persistent&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 4c689a573fafcfa1ca7474a275f958e00b1deddc&lt;/p&gt;</comment>
                            <comment id="134383" author="jgmitter" created="Tue, 24 Nov 2015 14:42:47 +0000"  >&lt;p&gt;&lt;a href=&quot;http://review.whamcloud.com/17074/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/17074/&lt;/a&gt; has landed for 2.8.&lt;br/&gt;
Resolving the ticket as noted in the commentary above.&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10010">
                    <name>Duplicate</name>
                                                                <inwardlinks description="is duplicated by">
                                        <issuelink>
            <issuekey id="26914">LU-5718</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="33033">LU-7401</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                        <issuelink>
            <issuekey id="31920">LU-7101</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="33736">LU-7569</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="27145">LU-5783</issuekey>
        </issuelink>
                            </outwardlinks>
                                                                <inwardlinks description="is related to">
                                        <issuelink>
            <issuekey id="32878">LU-7351</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="34042">LU-7650</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="32697">LU-7314</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="55913">LU-12419</issuekey>
        </issuelink>
            <issuelink>
            <issuekey id="30403">LUDOC-286</issuekey>
        </issuelink>
                            </inwardlinks>
                                    </issuelinktype>
                    </issuelinks>
                <attachments>
                            <attachment id="18893" name="compatibility matrix.xlsx" size="11224" author="ashehata" created="Fri, 11 Sep 2015 03:13:07 +0000"/>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                    <customfield id="customfield_10020" key="com.atlassian.jira.plugin.system.customfieldtypes:float">
                        <customfieldname>Bugzilla ID</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>20543.0</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzvsjr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>8528</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>