<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:17:44 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-8460] (osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp; ~chunk_mask) == 0 ) failed: LBUG</title>
                <link>https://jira.whamcloud.com/browse/LU-8460</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;As part of CORAL testing I was trying to use ior to to write/read some 16MB RPCs.  As soon as the test started it appeared all the Lustre clients took an LBUG, crashed, and rebooted. &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[263032.826900] LustreError: 54616:0:(mgc_request.c:1536:mgc_apply_recover_logs()) mgc: cannot find uuid by nid 192.168.1.4@o2ib
[263032.839625] Lustre: 54616:0:(mgc_request.c:1756:mgc_process_recover_nodemap_log()) MGC192.168.1.5@o2ib: error processing recovery log lsdraid-cliir: rc = -2
[263032.940768] Lustre: Mounted lsdraid-client
[270650.452658] LustreError: 64198:0:(osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: 
[270650.465098] LustreError: 64198:0:(osc_cache.c:658:osc_extent_find()) LBUG
[270650.472830] Pid: 64198, comm: ior
[270650.476666] 
Call Trace:
[270650.482816]  [&amp;lt;ffffffffa08267d3&amp;gt;] libcfs_debug_dumpstack+0x53/0x80 [libcfs]
[270650.492278]  [&amp;lt;ffffffffa0826d75&amp;gt;] lbug_with_loc+0x45/0xc0 [libcfs]
[270650.502128]  [&amp;lt;ffffffffa0dc066d&amp;gt;] osc_extent_find+0x2ec/0x1a98 [osc]
[270650.510720]  [&amp;lt;ffffffff81189b79&amp;gt;] ? zone_statistics+0x89/0xa0
[270650.518827]  [&amp;lt;ffffffffa0dc2354&amp;gt;] ? osc_enter_cache+0x53b/0xe7a [osc]
[270650.527638]  [&amp;lt;ffffffff810b5f8d&amp;gt;] ? ttwu_do_activate.constprop.84+0x5d/0x70
[270650.537084]  [&amp;lt;ffffffff81173317&amp;gt;] ? __alloc_pages_nodemask+0x197/0xba0
[270650.545977]  [&amp;lt;ffffffff81285e38&amp;gt;] ? security_capable+0x18/0x20
[270650.554180]  [&amp;lt;ffffffffa0db8705&amp;gt;] osc_queue_async_io+0x6a5/0x1870 [osc]
[270650.563226]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[270650.572635]  [&amp;lt;ffffffffa0da55e3&amp;gt;] osc_page_cache_add+0x43/0x140 [osc]
[270650.581481]  [&amp;lt;ffffffffa0dad903&amp;gt;] osc_io_commit_async+0x173/0x440 [osc]
[270650.590561]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[270650.600035]  [&amp;lt;ffffffffa0959317&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[270650.609317]  [&amp;lt;ffffffffa0c8654c&amp;gt;] lov_io_commit_async+0x2dc/0x4a0 [lov]
[270650.618397]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[270650.627864]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[270650.637324]  [&amp;lt;ffffffffa0959317&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[270650.646690]  [&amp;lt;ffffffffa0d2d97a&amp;gt;] vvp_io_write_commit+0x17a/0x8d0 [lustre]
[270650.655374]  [&amp;lt;ffffffffa0d1b807&amp;gt;] ll_write_end+0xc7/0x400 [lustre]
[270650.663281]  [&amp;lt;ffffffff8116a214&amp;gt;] generic_file_buffered_write+0x184/0x290
[270650.671879]  [&amp;lt;ffffffff8116b6d5&amp;gt;] __generic_file_aio_write+0x1d5/0x3e0
[270650.680178]  [&amp;lt;ffffffffa0d2e355&amp;gt;] vvp_io_write_start+0x285/0x650 [lustre]
[270650.688744]  [&amp;lt;ffffffffa0958925&amp;gt;] cl_io_start+0x65/0x130 [obdclass]
[270650.696904]  [&amp;lt;ffffffffa095acc5&amp;gt;] cl_io_loop+0xa5/0x190 [obdclass]
[270650.704945]  [&amp;lt;ffffffffa0cdbbef&amp;gt;] ll_file_io_generic+0x67f/0xaa0 [lustre]
[270650.713656]  [&amp;lt;ffffffff811d4256&amp;gt;] ? mem_cgroup_update_page_stat+0x16/0x50
[270650.722373]  [&amp;lt;ffffffffa0cdc2dd&amp;gt;] ll_file_aio_write+0x12d/0x1f0 [lustre]
[270650.731000]  [&amp;lt;ffffffffa0cdc46e&amp;gt;] ll_file_write+0xce/0x1e0 [lustre]
[270650.739126]  [&amp;lt;ffffffff811dec1d&amp;gt;] vfs_write+0xbd/0x1e0
[270650.745994]  [&amp;lt;ffffffff811df6bf&amp;gt;] SyS_write+0x7f/0xe0
[270650.752766]  [&amp;lt;ffffffff81646889&amp;gt;] system_call_fastpath+0x16/0x1b
[270650.760571] 
[270650.763508] Kernel panic - not syncing: LBUG
[270650.769875] CPU: 17 PID: 64198 Comm: ior Tainted: G          IOE  ------------   3.10.0-327.22.2.el7.x86_64 #1
[270650.782689] Hardware name: Intel Corporation S2600WTT/S2600WTT, BIOS SE5C610.86B.01.01.0008.021120151325 02/11/2015
[270650.796038]  ffffffffa0843def 0000000067048a74 ffff88103fd9f560 ffffffff816360fc
[270650.806102]  ffff88103fd9f5e0 ffffffff8162f977 ffffffff00000008 ffff88103fd9f5f0
[270650.816170]  ffff88103fd9f590 0000000067048a74 ffffffffa0dc63b0 0000000000000000
[270650.826189] Call Trace:
[270650.830566]  [&amp;lt;ffffffff816360fc&amp;gt;] dump_stack+0x19/0x1b
[270650.837897]  [&amp;lt;ffffffff8162f977&amp;gt;] panic+0xd8/0x1e7
[270650.844870]  [&amp;lt;ffffffffa0826ddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
[270650.853345]  [&amp;lt;ffffffffa0dc066d&amp;gt;] osc_extent_find+0x2ec/0x1a98 [osc]
[270650.862000]  [&amp;lt;ffffffff81189b79&amp;gt;] ? zone_statistics+0x89/0xa0
[270650.869976]  [&amp;lt;ffffffffa0dc2354&amp;gt;] ? osc_enter_cache+0x53b/0xe7a [osc]
[270650.878736]  [&amp;lt;ffffffff810b5f8d&amp;gt;] ? ttwu_do_activate.constprop.84+0x5d/0x70
[270650.888101]  [&amp;lt;ffffffff81173317&amp;gt;] ? __alloc_pages_nodemask+0x197/0xba0
[270650.896965]  [&amp;lt;ffffffff81285e38&amp;gt;] ? security_capable+0x18/0x20
[270650.905069]  [&amp;lt;ffffffffa0db8705&amp;gt;] osc_queue_async_io+0x6a5/0x1870 [osc]
[270650.914081]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[270650.923740]  [&amp;lt;ffffffffa0da55e3&amp;gt;] osc_page_cache_add+0x43/0x140 [osc]
[270650.932528]  [&amp;lt;ffffffffa0dad903&amp;gt;] osc_io_commit_async+0x173/0x440 [osc]
[270650.941515]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[270650.951198]  [&amp;lt;ffffffffa0959317&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[270650.960478]  [&amp;lt;ffffffffa0c8654c&amp;gt;] lov_io_commit_async+0x2dc/0x4a0 [lov]
[270650.969481]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[270650.979160]  [&amp;lt;ffffffffa0d2aa70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[270650.989192]  [&amp;lt;ffffffffa0959317&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[270650.998797]  [&amp;lt;ffffffffa0d2d97a&amp;gt;] vvp_io_write_commit+0x17a/0x8d0 [lustre]
[270651.008434]  [&amp;lt;ffffffffa0d1b807&amp;gt;] ll_write_end+0xc7/0x400 [lustre]
[270651.017239]  [&amp;lt;ffffffff8116a214&amp;gt;] generic_file_buffered_write+0x184/0x290
[270651.026725]  [&amp;lt;ffffffff8116b6d5&amp;gt;] __generic_file_aio_write+0x1d5/0x3e0
[270651.035938]  [&amp;lt;ffffffffa0d2e355&amp;gt;] vvp_io_write_start+0x285/0x650 [lustre]
[270651.045484]  [&amp;lt;ffffffffa0958925&amp;gt;] cl_io_start+0x65/0x130 [obdclass]
[270651.054443]  [&amp;lt;ffffffffa095acc5&amp;gt;] cl_io_loop+0xa5/0x190 [obdclass]
[270651.063300]  [&amp;lt;ffffffffa0cdbbef&amp;gt;] ll_file_io_generic+0x67f/0xaa0 [lustre]
[270651.072803]  [&amp;lt;ffffffff811d4256&amp;gt;] ? mem_cgroup_update_page_stat+0x16/0x50
[270651.082339]  [&amp;lt;ffffffffa0cdc2dd&amp;gt;] ll_file_aio_write+0x12d/0x1f0 [lustre]
[270651.091766]  [&amp;lt;ffffffffa0cdc46e&amp;gt;] ll_file_write+0xce/0x1e0 [lustre]
[270651.100727]  [&amp;lt;ffffffff811dec1d&amp;gt;] vfs_write+0xbd/0x1e0
[270651.108378]  [&amp;lt;ffffffff811df6bf&amp;gt;] SyS_write+0x7f/0xe0
[270651.115918]  [&amp;lt;ffffffff81646889&amp;gt;] system_call_fastpath+0x16/0x1b 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Here is what IOR saw: &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;IOR-3.0.1: MPI Coordinated Test of Parallel I/O

Began: Mon Aug  1 18:28:46 2016
Command line used: /home/johnsali/wolf-3/ior/src/ior -i 5 -v -b 4096M -t 16m
Machine: Linux wolf-6.wolf.hpdd.intel.com
Start time skew across all tasks: 0.00 sec

Test 0 started: Mon Aug  1 18:28:46 2016
Path: /mnt/lustre
FS: 13.9 TiB   Used FS: 0.0%   Inodes: 4.2 Mi   Used Inodes: 0.0%
Participating tasks: 3
Summary:
	api                = POSIX
	test filename      = testFile
	access             = single-shared-file
	pattern            = segmented (1 segment)
	ordering in a file = sequential offsets
	ordering inter file= no tasks offsets
	clients            = 3 (1 per node)
	repetitions        = 5
	xfersize           = 16 MiB
	blocksize          = 4 GiB
	aggregate filesize = 12 GiB

access    bw(MiB/s)  block(KiB) xfer(KiB)  open(s)    wr/rd(s)   close(s)   total(s)   iter
------    ---------  ---------- ---------  --------   --------   --------   --------   ----
Commencing write performance test: Mon Aug  1 18:28:46 2016

Message from syslogd@wolf-6 at Aug  1 18:28:48 ...
 kernel:LustreError: 64198:0:(osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: 

Message from syslogd@wolf-6 at Aug  1 18:28:48 ...
 kernel:LustreError: 64198:0:(osc_cache.c:658:osc_extent_find()) LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;
&lt;p&gt;Attempts to change the max_pages_per_rpc have not been successful: &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@wolf-8 ~]#  lctl set_param osc.*.max_pages_per_rpc=1024
error: set_param: setting /proc/fs/lustre/osc/lsdraid-OST0000-osc-ffff88104f0c1000/max_pages_per_rpc=1024: Numerical result out of range 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;System crash files can be found at (all clients hit the LBUG/crash/reboot at the same time): &lt;br/&gt;
/scratch/dumps/wolf-6.wolf.hpdd.intel.com/10.8.1.6-2016-08-01-18:29:12&lt;br/&gt;
/scratch/dumps/wolf-7.wolf.hpdd.intel.com/10.8.1.7-2016-08-01-18:29:10 &lt;br/&gt;
/scratch/dumps/wolf-8.wolf.hpdd.intel.com/10.8.1.8-2016-08-01-18:29:11 &lt;/p&gt;</description>
                <environment></environment>
        <key id="38546">LU-8460</key>
            <summary>(osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp; ~chunk_mask) == 0 ) failed: LBUG</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="1" iconUrl="https://jira.whamcloud.com/images/icons/priorities/blocker.svg">Blocker</priority>
                        <status id="5" iconUrl="https://jira.whamcloud.com/images/icons/statuses/resolved.png" description="A resolution has been taken, and it is awaiting verification by reporter. From here issues are either reopened, or are closed.">Resolved</status>
                    <statusCategory id="3" key="done" colorName="success"/>
                                    <resolution id="1">Fixed</resolution>
                                        <assignee username="bobijam">Zhenyu Xu</assignee>
                                    <reporter username="jsalians_intel">John Salinas</reporter>
                        <labels>
                            <label>LS_RZ</label>
                    </labels>
                <created>Mon, 1 Aug 2016 19:17:49 +0000</created>
                <updated>Thu, 14 Jun 2018 21:39:15 +0000</updated>
                            <resolved>Wed, 17 Aug 2016 23:35:39 +0000</resolved>
                                                    <fixVersion>Lustre 2.9.0</fixVersion>
                                        <due></due>
                            <votes>0</votes>
                                    <watches>6</watches>
                                                                            <comments>
                            <comment id="160475" author="jsalians_intel" created="Mon, 1 Aug 2016 19:23:23 +0000"  >&lt;p&gt;This is currently blocking Lustre Streaming testing for CORAL&lt;/p&gt;</comment>
                            <comment id="160574" author="gerrit" created="Tue, 2 Aug 2016 17:56:24 +0000"  >&lt;p&gt;Andreas Dilger (andreas.dilger@intel.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/21637&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21637&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8460&quot; title=&quot;(osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8460&quot;&gt;&lt;del&gt;LU-8460&lt;/del&gt;&lt;/a&gt; osc: improve LASSERT for RPC size check&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: 75bb06909b96e21b6b10e28969ed770da384115b&lt;/p&gt;</comment>
                            <comment id="160575" author="adilger" created="Tue, 2 Aug 2016 17:56:47 +0000"  >&lt;p&gt;What version of Lustre are you running?&lt;/p&gt;

&lt;p&gt;Also, based on your last comments do you have 16MB RPCs enabled at all?  The stock Lustre code should handle up to 4MB RPC size out of the box, so it is strange that this is failing when essentially just doing 16MB sized writes?&lt;/p&gt;

&lt;p&gt;If you are able to repeat this, could you please provide the steps used for setup (e.g. the current value of &lt;tt&gt;obdfilter.&amp;#42;.brw_size&lt;/tt&gt; on the OSS, &lt;tt&gt;osc.&amp;#42;.max_pages_per_rpc&lt;/tt&gt; and other relevant settings on the client) and improve the LASSERT() in &lt;tt&gt;osc_extent_find()&lt;/tt&gt; with the patch &lt;a href=&quot;http://review.whamcloud.com/21637&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21637&lt;/a&gt; to help debug this problem.&lt;/p&gt;
</comment>
                            <comment id="160576" author="jsalians_intel" created="Tue, 2 Aug 2016 18:01:49 +0000"  >&lt;p&gt;This specific hit was on master, it appears we have the same issue for 2.9 or master though.  We are not using 2.8 because it does not have the large 16MB RPC patches we are looking to use for Lustre streaming. &lt;/p&gt;

&lt;p&gt;It appears that setting this on the clients failed: lctl set_param osc.*.max_pages_per_rpc=1024 because we had not set this on the OSS nodes: lctl set_param obdfilter.lsdraid-OST0000.brw_size=16.   The default was &quot;1&quot; which is what it was when we hit this issue.  Last night we set both of these and were able to do a couple of IOR runs and we saw 16MB pages being used: &lt;/p&gt;

&lt;p&gt;OSS0&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@wolf-3 ~&amp;#93;&lt;/span&gt;# cat /proc/fs/lustre/obdfilter/lsdraid-OST0000/brw_stats &lt;br/&gt;
snapshot_time:         1470125909.249936 (secs.usecs)&lt;/p&gt;

&lt;p&gt;                           read      |     write&lt;br/&gt;
pages per bulk r/w     rpcs  % cum % |  rpcs        % cum %&lt;br/&gt;
1:		         5   0   0   | 597115  50  50&lt;br/&gt;
2:		         2   0   0   |    2   0  50&lt;br/&gt;
4:		         0   0   0   |    0   0  50&lt;br/&gt;
8:		         0   0   0   |    0   0  50&lt;br/&gt;
16:		         0   0   0   |    0   0  50&lt;br/&gt;
32:		         0   0   0   |    0   0  50&lt;br/&gt;
64:		         0   0   0   |    0   0  50&lt;br/&gt;
128:		         0   0   0   |    0   0  50&lt;br/&gt;
256:		        43   0   0   |   82   0  50&lt;br/&gt;
512:		         2   0   0   | 153548  12  62&lt;br/&gt;
1K:		         2   0   0   |   49   0  62&lt;br/&gt;
2K:		        39   0   0   |  141   0  62&lt;br/&gt;
4K:		    369260  99 100   | 443071  37 100&lt;/p&gt;

&lt;p&gt;                           read      |     write&lt;br/&gt;
discontiguous pages    rpcs  % cum % |  rpcs        % cum %&lt;br/&gt;
0:		    369353 100 100   | 597117  50  50&lt;br/&gt;
1:		         0   0 100   |    0   0  50&lt;br/&gt;
2:		         0   0 100   |    0   0  50&lt;br/&gt;
3:		         0   0 100   |    0   0  50&lt;br/&gt;
4:		         0   0 100   |    0   0  50&lt;br/&gt;
5:		         0   0 100   |    0   0  50&lt;br/&gt;
6:		         0   0 100   |    0   0  50&lt;br/&gt;
7:		         0   0 100   |    0   0  50&lt;br/&gt;
8:		         0   0 100   |    0   0  50&lt;br/&gt;
9:		         0   0 100   |    0   0  50&lt;br/&gt;
10:		         0   0 100   |    0   0  50&lt;br/&gt;
11:		         0   0 100   |    0   0  50&lt;br/&gt;
12:		         0   0 100   |    0   0  50&lt;br/&gt;
13:		         0   0 100   |    0   0  50&lt;br/&gt;
14:		         0   0 100   |    0   0  50&lt;br/&gt;
15:		         0   0 100   |    0   0  50&lt;br/&gt;
16:		         0   0 100   |    0   0  50&lt;br/&gt;
17:		         0   0 100   |    0   0  50&lt;br/&gt;
18:		         0   0 100   |    0   0  50&lt;br/&gt;
19:		         0   0 100   |    0   0  50&lt;br/&gt;
20:		         0   0 100   |    0   0  50&lt;br/&gt;
21:		         0   0 100   |    0   0  50&lt;br/&gt;
22:		         0   0 100   |    0   0  50&lt;br/&gt;
23:		         0   0 100   |    0   0  50&lt;br/&gt;
24:		         0   0 100   |    0   0  50&lt;br/&gt;
25:		         0   0 100   |    0   0  50&lt;br/&gt;
26:		         0   0 100   |    0   0  50&lt;br/&gt;
27:		         0   0 100   |    0   0  50&lt;br/&gt;
28:		         0   0 100   |    0   0  50&lt;br/&gt;
29:		         0   0 100   |    0   0  50&lt;br/&gt;
30:		         0   0 100   |    0   0  50&lt;br/&gt;
31:		         0   0 100   | 596891  49 100&lt;/p&gt;

&lt;p&gt;                           read      |     write&lt;br/&gt;
disk I/Os in flight    ios   % cum % |  ios         % cum %&lt;br/&gt;
1:		     13706   3   3   | 644001  53  53&lt;br/&gt;
2:		     32744   8  12   | 204951  17  71&lt;br/&gt;
3:		     26076   7  19   | 65415   5  76&lt;br/&gt;
4:		     41077  11  30   | 9928   0  77&lt;br/&gt;
5:		     62304  16  47   | 6889   0  77&lt;br/&gt;
6:		     71872  19  67   | 5011   0  78&lt;br/&gt;
7:		     63965  17  84   | 3906   0  78&lt;br/&gt;
8:		     41954  11  95   | 3087   0  78&lt;br/&gt;
9:		     15655   4 100   | 2566   0  79&lt;br/&gt;
10:		         0   0 100   | 2222   0  79&lt;br/&gt;
11:		         0   0 100   | 2485   0  79&lt;br/&gt;
12:		         0   0 100   | 3936   0  79&lt;br/&gt;
13:		         0   0 100   | 4464   0  80&lt;br/&gt;
14:		         0   0 100   | 4682   0  80&lt;br/&gt;
15:		         0   0 100   | 3413   0  80&lt;br/&gt;
16:		         0   0 100   | 2469   0  81&lt;br/&gt;
17:		         0   0 100   | 2331   0  81&lt;br/&gt;
18:		         0   0 100   | 2830   0  81&lt;br/&gt;
19:		         0   0 100   | 7537   0  82&lt;br/&gt;
20:		         0   0 100   | 57506   4  87&lt;br/&gt;
21:		         0   0 100   | 123813  10  97&lt;br/&gt;
22:		         0   0 100   | 21147   1  99&lt;br/&gt;
23:		         0   0 100   | 4920   0  99&lt;br/&gt;
24:		         0   0 100   | 4496   0  99&lt;br/&gt;
25:		         0   0 100   |    3   0 100&lt;/p&gt;

&lt;p&gt;                           read      |     write&lt;br/&gt;
I/O time (1/1000s)     ios   % cum % |  ios         % cum %&lt;br/&gt;
1:		         4   0   0   |    0   0   0&lt;br/&gt;
2:		         0   0   0   |    0   0   0&lt;br/&gt;
4:		         0   0   0   |    0   0   0&lt;br/&gt;
8:		         0   0   0   |    0   0   0&lt;br/&gt;
16:		         0   0   0   |    0   0   0&lt;br/&gt;
32:		      9997   2   2   |    0   0   0&lt;br/&gt;
64:		    136162  36  39   |    0   0   0&lt;br/&gt;
128:		    172673  46  86   |    0   0   0&lt;br/&gt;
256:		     49096  13  99   |    0   0   0&lt;br/&gt;
512:		      1317   0  99   |    0   0   0&lt;br/&gt;
1K:		        22   0  99   |    0   0   0&lt;br/&gt;
2K:		        21   0 100   |    0   0   0&lt;/p&gt;

&lt;p&gt;                           read      |     write&lt;br/&gt;
disk I/O size          ios   % cum % |  ios         % cum %&lt;br/&gt;
4:		         0   0   0   |   25   0   0&lt;br/&gt;
8:		         0   0   0   |   45   0   0&lt;br/&gt;
16:		         0   0   0   |    0   0   0&lt;br/&gt;
32:		         0   0   0   |    1   0   0&lt;br/&gt;
64:		         0   0   0   |    1   0   0&lt;br/&gt;
128:		         0   0   0   | 597016  50  50&lt;br/&gt;
256:		         0   0   0   |   18   0  50&lt;br/&gt;
512:		         0   0   0   |    9   0  50&lt;br/&gt;
1K:		         1   0   0   |    0   0  50&lt;br/&gt;
2K:		         0   0   0   |    0   0  50&lt;br/&gt;
4K:		         1   0   0   |    0   0  50&lt;br/&gt;
8K:		         2   0   0   |    2   0  50&lt;br/&gt;
16K:		         0   0   0   |    0   0  50&lt;br/&gt;
32K:		         0   0   0   |    0   0  50&lt;br/&gt;
64K:		         0   0   0   |    0   0  50&lt;br/&gt;
128K:		         0   0   0   |    0   0  50&lt;br/&gt;
256K:		         0   0   0   |    0   0  50&lt;br/&gt;
512K:		         0   0   0   |    0   0  50&lt;br/&gt;
1M:		        43   0   0   |   82   0  50&lt;br/&gt;
2M:		         2   0   0   | 153548  12  62&lt;br/&gt;
4M:		         2   0   0   |   49   0  62&lt;br/&gt;
8M:		        39   0   0   |  141   0  62&lt;br/&gt;
16M:		    369260  99 100   | 443071  37 100&lt;/p&gt;

&lt;p&gt;From one client:&lt;br/&gt;
&lt;span class=&quot;error&quot;&gt;&amp;#91;root@wolf-6 mdisolation_step2_draid_testing&amp;#93;&lt;/span&gt;# lctl get_param osc.*.rpc_stats &lt;br/&gt;
osc.lsdraid-OST0000-osc-ffff88104e13d800.rpc_stats=&lt;br/&gt;
snapshot_time:         1470098591.939087 (secs.usecs)&lt;br/&gt;
read RPCs in flight:  0&lt;br/&gt;
write RPCs in flight: 1&lt;br/&gt;
pending write pages:  0&lt;br/&gt;
pending read pages:   0&lt;/p&gt;

&lt;p&gt;			read			write&lt;br/&gt;
pages per rpc         rpcs   % cum % |       rpcs   % cum %&lt;br/&gt;
1:		         0   0   0   |          0   0   0&lt;br/&gt;
2:		         0   0   0   |          0   0   0&lt;br/&gt;
4:		         0   0   0   |          0   0   0&lt;br/&gt;
8:		         0   0   0   |          0   0   0&lt;br/&gt;
16:		         0   0   0   |          0   0   0&lt;br/&gt;
32:		         0   0   0   |          0   0   0&lt;br/&gt;
64:		         0   0   0   |          0   0   0&lt;br/&gt;
128:		         0   0   0   |          0   0   0&lt;br/&gt;
256:		        17   0   0   |         10   0   0&lt;br/&gt;
512:		         2   0   0   |          2   0   0&lt;br/&gt;
1024:		         2   0   0   |          5   0   0&lt;br/&gt;
2048:		        13   0   0   |         53   0   0&lt;br/&gt;
4096:		    116941  99 100   |     146350  99 100&lt;/p&gt;

&lt;p&gt;			read			write&lt;br/&gt;
rpcs in flight        rpcs   % cum % |       rpcs   % cum %&lt;br/&gt;
0:		         0   0   0   |          0   0   0&lt;br/&gt;
1:		     14281  12  12   |      66644  45  45&lt;br/&gt;
2:		     31530  26  39   |      25379  17  62&lt;br/&gt;
3:		     71163  60  99   |       3173   2  65&lt;br/&gt;
4:		         1   0 100   |       4759   3  68&lt;br/&gt;
5:		         0   0 100   |       2206   1  69&lt;br/&gt;
6:		         0   0 100   |       1957   1  71&lt;br/&gt;
7:		         0   0 100   |       3053   2  73&lt;br/&gt;
8:		         0   0 100   |      39249  26 100&lt;/p&gt;

&lt;p&gt;			read			write&lt;br/&gt;
offset                rpcs   % cum % |       rpcs   % cum %&lt;br/&gt;
0:		        20   0   0   |         40   0   0&lt;br/&gt;
1:		         0   0   0   |          0   0   0&lt;br/&gt;
2:		         0   0   0   |          0   0   0&lt;br/&gt;
4:		         0   0   0   |          0   0   0&lt;br/&gt;
8:		         0   0   0   |          0   0   0&lt;br/&gt;
16:		         0   0   0   |          0   0   0&lt;br/&gt;
32:		         0   0   0   |          0   0   0&lt;br/&gt;
64:		         0   0   0   |          0   0   0&lt;br/&gt;
128:		         0   0   0   |          0   0   0&lt;br/&gt;
256:		         5   0   0   |          4   0   0&lt;br/&gt;
512:		         8   0   0   |          0   0   0&lt;br/&gt;
1024:		         4   0   0   |          0   0   0&lt;br/&gt;
2048:		         6   0   0   |          3   0   0&lt;br/&gt;
4096:		        19   0   0   |         41   0   0&lt;br/&gt;
8192:		        40   0   0   |         82   0   0&lt;br/&gt;
16384:		        80   0   0   |        164   0   0&lt;br/&gt;
32768:		       159   0   0   |        328   0   0&lt;br/&gt;
65536:		       320   0   0   |        656   0   0&lt;br/&gt;
131072:		       637   0   1   |       1311   0   1&lt;br/&gt;
262144:		      1274   1   2   |       2621   1   3&lt;br/&gt;
524288:		      2553   2   4   |       5241   3   7&lt;br/&gt;
1048576:		      5107   4   8   |       8413   5  12&lt;br/&gt;
2097152:		     10211   8  17   |      16784  11  24&lt;br/&gt;
4194304:		     18374  15  33   |      22058  15  39&lt;br/&gt;
8388608:		     36778  31  64   |      43061  29  68&lt;br/&gt;
16777216:		     41380  35 100   |      45613  31 100 &lt;/p&gt;

&lt;p&gt;However, now it appears mdtest is hanging &amp;#8211; we will have to find some time to collect data on that. &lt;/p&gt;
</comment>
                            <comment id="160732" author="jsalians_intel" created="Wed, 3 Aug 2016 21:59:20 +0000"  >&lt;p&gt;Today I have hit this several times trying to reproduce a different hang.  In these case I do NOT have 16MB blocks set.  It is a completely vanilla run and as soon as mdtest starts I hit this: &lt;/p&gt;

&lt;p&gt;]# mpirun -np 4 -machinefile hosts /home/johnsali/wolf-3/mdtest-1.8.4/mdtest -i 2 -I 2 -z 2 -u -w 1 -d /mnt/lustre/&lt;br/&gt;
&amp;#8211; started at 08/03/2016 21:55:07 &amp;#8211;&lt;/p&gt;

&lt;p&gt;mdtest-1.8.3 was launched with 4 total task(s) on 4 nodes&lt;br/&gt;
Command line used: /home/johnsali/wolf-3/mdtest-1.8.4/mdtest -i 2 -I 2 -z 2 -u -w 1 -d /mnt/lustre/&lt;br/&gt;
Path: /mnt/lustre&lt;br/&gt;
FS: 13.9 TiB   Used FS: 0.0%   Inodes: 55.8 Mi   Used Inodes: 89.5%&lt;br/&gt;
4 tasks, 24 files/directories&lt;/p&gt;

&lt;p&gt;Message from syslogd@wolf-6 at Aug  3 21:55:08 ...&lt;br/&gt;
 kernel:LustreError: 6182:0:(osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: &lt;br/&gt;
Message from syslogd@wolf-6 at Aug  3 21:55:08 ...&lt;br/&gt;
 kernel:LustreError: 6182:0:(osc_cache.c:658:osc_extent_find()) LBUG&lt;/p&gt;</comment>
                            <comment id="160735" author="adilger" created="Wed, 3 Aug 2016 22:47:42 +0000"  >&lt;p&gt;John, could you please reproduce the problem with the above patch applied, so that we can see why the assertion is failing.&lt;/p&gt;</comment>
                            <comment id="160736" author="pjones" created="Wed, 3 Aug 2016 22:47:43 +0000"  >&lt;p&gt;Bobijam&lt;/p&gt;

&lt;p&gt;Could you please assist with this issue?&lt;/p&gt;

&lt;p&gt;John&lt;/p&gt;

&lt;p&gt;Were you using Andreas&apos;s diagnostic patch when you were running?&lt;/p&gt;

&lt;p&gt;Peter&lt;/p&gt;</comment>
                            <comment id="160742" author="jay" created="Thu, 4 Aug 2016 00:42:03 +0000"  >&lt;p&gt;the issue is that the brw_size is 16M on the server side but the max_pages_per_rpc is set to 1024, which means 4M, on the client side&lt;/p&gt;

&lt;p&gt;there is a defect to handle chunk size on the client side, which should be set to the maximum of &#8220;size reported from OST&#8221; and &#8220;max_pages_per_rpc&#8221;&lt;/p&gt;</comment>
                            <comment id="161142" author="jsalians_intel" created="Mon, 8 Aug 2016 17:26:46 +0000"  >&lt;p&gt;With the patch I see: &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;kernel:LustreError: 137570:0:(osc_cache.c:661:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: max_pages: 0x100 chunk_bits: 24 chunk_mask: 0xfffffffffffff000
kernel:LustreError: 137570:0:(osc_cache.c:661:osc_extent_find()) LBUG
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;This is with default settings (no 16MB RPCs set): &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;client [root@wolf-6 ~]# cat /proc/fs/lustre/osc/lsdraid-OST0000-osc-ffff88084eccc800/max_pages_per_rpc
256 
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;OSS: &lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[root@wolf-3 kernel]# lctl get_param obdfilter.lsdraid-OST0000.brw_size
obdfilter.lsdraid-OST0000.brw_size=1
 [root@wolf-4 ~]# lctl get_param obdfilter.lsdraid-OST0000.brw_size
obdfilter.lsdraid-OST0000.brw_size=1
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[413569.867436] Lustre: Mounted lsdraid-client
[414679.351734] LustreError: 137570:0:(osc_cache.c:661:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: max_pages: 0x100 chunk_bits: 24 chunk_mask: 0xfffffffffffff000
[414679.373266] LustreError: 137570:0:(osc_cache.c:661:osc_extent_find()) LBUG
[414679.382571] Pid: 137570, comm: mdtest
[414679.388269] 
Call Trace:
[414679.395587]  [&amp;lt;ffffffffa071d7d3&amp;gt;] libcfs_debug_dumpstack+0x53/0x80 [libcfs]
[414679.404872]  [&amp;lt;ffffffffa071dd75&amp;gt;] lbug_with_loc+0x45/0xc0 [libcfs]
[414679.413187]  [&amp;lt;ffffffffa0e31677&amp;gt;] osc_extent_find+0x2f6/0x1aa2 [osc]
[414679.421671]  [&amp;lt;ffffffffa0e3335e&amp;gt;] ? osc_enter_cache+0x53b/0xe7a [osc]
[414679.430159]  [&amp;lt;ffffffff81173317&amp;gt;] ? __alloc_pages_nodemask+0x197/0xba0
[414679.438756]  [&amp;lt;ffffffffa0e1780b&amp;gt;] ? osc_lru_alloc+0x3b/0x390 [osc]
[414679.446884]  [&amp;lt;ffffffff81285e38&amp;gt;] ? security_capable+0x18/0x20
[414679.454653]  [&amp;lt;ffffffffa0e29705&amp;gt;] osc_queue_async_io+0x6a5/0x1870 [osc]
[414679.463245]  [&amp;lt;ffffffffa0cf9c8c&amp;gt;] ? lov_merge_lvb_kms+0x12c/0x450 [lov]
[414679.471818]  [&amp;lt;ffffffffa0e17b60&amp;gt;] ? osc_page_init+0x0/0x210 [osc]
[414679.479775]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[414679.488686]  [&amp;lt;ffffffffa0e165e3&amp;gt;] osc_page_cache_add+0x43/0x140 [osc]
[414679.496969]  [&amp;lt;ffffffffa0e1e903&amp;gt;] osc_io_commit_async+0x173/0x440 [osc]
[414679.505470]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[414679.514355]  [&amp;lt;ffffffffa0a65557&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[414679.523097]  [&amp;lt;ffffffffa0cf754c&amp;gt;] lov_io_commit_async+0x2dc/0x4a0 [lov]
[414679.531506]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[414679.540302]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? write_commit_callback+0x0/0x50 [lustre]
[414679.549061]  [&amp;lt;ffffffffa0a65557&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[414679.557714]  [&amp;lt;ffffffffa0d9e97a&amp;gt;] vvp_io_write_commit+0x17a/0x8d0 [lustre]
[414679.566324]  [&amp;lt;ffffffffa0d9f39f&amp;gt;] vvp_io_write_start+0x2cf/0x650 [lustre]
[414679.574846]  [&amp;lt;ffffffffa0a64b65&amp;gt;] cl_io_start+0x65/0x130 [obdclass]
[414679.582796]  [&amp;lt;ffffffffa0a66f05&amp;gt;] cl_io_loop+0xa5/0x190 [obdclass]
[414679.590648]  [&amp;lt;ffffffffa0d4cbef&amp;gt;] ll_file_io_generic+0x67f/0xaa0 [lustre]
[414679.599179]  [&amp;lt;ffffffffa0d4d2dd&amp;gt;] ll_file_aio_write+0x12d/0x1f0 [lustre]
[414679.607620]  [&amp;lt;ffffffffa0d4d46e&amp;gt;] ll_file_write+0xce/0x1e0 [lustre]
[414679.615561]  [&amp;lt;ffffffff811dec1d&amp;gt;] vfs_write+0xbd/0x1e0
[414679.622248]  [&amp;lt;ffffffff811df6bf&amp;gt;] SyS_write+0x7f/0xe0
[414679.628843]  [&amp;lt;ffffffff81646889&amp;gt;] system_call_fastpath+0x16/0x1b
[414679.636498] 
[414679.639249] Kernel panic - not syncing: LBUG
[414679.644949] CPU: 15 PID: 137570 Comm: mdtest Tainted: G          IOE  ------------   3.10.0-327.22.2.el7.x86_64 #1
[414679.657442] Hardware name: Intel Corporation S2600WTT/S2600WTT, BIOS SE5C610.86B.01.01.0008.021120151325 02/11/2015
[414679.670035]  ffffffffa073adef 00000000a1cd2686 ffff88103ee6f6f8 ffffffff816360fc
[414679.679336]  ffff88103ee6f778 ffffffff8162f977 ffffffff00000008 ffff88103ee6f788
[414679.688629]  ffff88103ee6f728 00000000a1cd2686 ffffffffa0e373b0 0000000000000000
[414679.697931] Call Trace:
[414679.701624]  [&amp;lt;ffffffff816360fc&amp;gt;] dump_stack+0x19/0x1b
[414679.708306]  [&amp;lt;ffffffff8162f977&amp;gt;] panic+0xd8/0x1e7
[414679.714580]  [&amp;lt;ffffffffa071dddb&amp;gt;] lbug_with_loc+0xab/0xc0 [libcfs]
[414679.722387]  [&amp;lt;ffffffffa0e31677&amp;gt;] osc_extent_find+0x2f6/0x1aa2 [osc]
[414679.730387]  [&amp;lt;ffffffffa0e3335e&amp;gt;] ? osc_enter_cache+0x53b/0xe7a [osc]
[414679.738450]  [&amp;lt;ffffffff81173317&amp;gt;] ? __alloc_pages_nodemask+0x197/0xba0
[414679.746598]  [&amp;lt;ffffffffa0e1780b&amp;gt;] ? osc_lru_alloc+0x3b/0x390 [osc]
[414679.754346]  [&amp;lt;ffffffff81285e38&amp;gt;] ? security_capable+0x18/0x20
[414679.761704]  [&amp;lt;ffffffffa0e29705&amp;gt;] osc_queue_async_io+0x6a5/0x1870 [osc]
[414679.769940]  [&amp;lt;ffffffffa0cf9c8c&amp;gt;] ? lov_merge_lvb_kms+0x12c/0x450 [lov]
[414679.778188]  [&amp;lt;ffffffffa0e17b60&amp;gt;] ? osc_lru_alloc+0x390/0x390 [osc]
[414679.786036]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[414679.794960]  [&amp;lt;ffffffffa0e165e3&amp;gt;] osc_page_cache_add+0x43/0x140 [osc]
[414679.803203]  [&amp;lt;ffffffffa0e1e903&amp;gt;] osc_io_commit_async+0x173/0x440 [osc]
[414679.811621]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[414679.820732]  [&amp;lt;ffffffffa0a65557&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[414679.829440]  [&amp;lt;ffffffffa0cf754c&amp;gt;] lov_io_commit_async+0x2dc/0x4a0 [lov]
[414679.837867]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[414679.846976]  [&amp;lt;ffffffffa0d9ba70&amp;gt;] ? mkwrite_commit_callback+0x20/0x20 [lustre]
[414679.856076]  [&amp;lt;ffffffffa0a65557&amp;gt;] cl_io_commit_async+0x77/0x140 [obdclass]
[414679.864791]  [&amp;lt;ffffffffa0d9e97a&amp;gt;] vvp_io_write_commit+0x17a/0x8d0 [lustre]
[414679.873498]  [&amp;lt;ffffffffa0d9f39f&amp;gt;] vvp_io_write_start+0x2cf/0x650 [lustre]
[414679.882106]  [&amp;lt;ffffffffa0a64b65&amp;gt;] cl_io_start+0x65/0x130 [obdclass]
[414679.890136]  [&amp;lt;ffffffffa0a66f05&amp;gt;] cl_io_loop+0xa5/0x190 [obdclass]
[414679.898055]  [&amp;lt;ffffffffa0d4cbef&amp;gt;] ll_file_io_generic+0x67f/0xaa0 [lustre]
[414679.906650]  [&amp;lt;ffffffffa0d4d2dd&amp;gt;] ll_file_aio_write+0x12d/0x1f0 [lustre]
[414679.915158]  [&amp;lt;ffffffffa0d4d46e&amp;gt;] ll_file_write+0xce/0x1e0 [lustre]
[414679.923158]  [&amp;lt;ffffffff811dec1d&amp;gt;] vfs_write+0xbd/0x1e0
[414679.929946]  [&amp;lt;ffffffff811df6bf&amp;gt;] SyS_write+0x7f/0xe0
[414679.936584]  [&amp;lt;ffffffff81646889&amp;gt;] system_call_fastpath+0x16/0x1b

/scratch/dumps/wolf-6.wolf.hpdd.intel.com/10.8.1.6-2016-08-08-17:12:27/
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                            <comment id="161163" author="jay" created="Mon, 8 Aug 2016 18:08:00 +0000"  >&lt;p&gt;chunk_bits is 24 means 16MB block size on the OST side and max_pages_per_rpc was set to 256 unfortunately.&lt;/p&gt;</comment>
                            <comment id="161231" author="gerrit" created="Tue, 9 Aug 2016 02:57:26 +0000"  >&lt;p&gt;Bobi Jam (bobijam@hotmail.com) uploaded a new patch: &lt;a href=&quot;http://review.whamcloud.com/21825&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21825&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8460&quot; title=&quot;(osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8460&quot;&gt;&lt;del&gt;LU-8460&lt;/del&gt;&lt;/a&gt; osc: chunk size should not bigger than RPC size&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: 1&lt;br/&gt;
Commit: bde1f813f387f8e15492a864e8c3a78eb0eb3ff4&lt;/p&gt;</comment>
                            <comment id="161651" author="jay" created="Thu, 11 Aug 2016 17:36:34 +0000"  >&lt;p&gt;&lt;a href=&quot;https://jira.whamcloud.com/secure/ViewProfile.jspa?name=jsalians_intel&quot; class=&quot;user-hover&quot; rel=&quot;jsalians_intel&quot;&gt;jsalians_intel&lt;/a&gt; can you please verify that patch 21825 actually fixes the problem?&lt;/p&gt;</comment>
                            <comment id="161664" author="jsalians_intel" created="Thu, 11 Aug 2016 18:30:49 +0000"  >&lt;p&gt;Currently this bug is blocking our progress: &lt;a href=&quot;https://jira.hpdd.intel.com/browse/LU-8498&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://jira.hpdd.intel.com/browse/LU-8498&lt;/a&gt; &lt;/p&gt;</comment>
                            <comment id="161773" author="adilger" created="Fri, 12 Aug 2016 17:58:29 +0000"  >&lt;p&gt;It seems that it would be possible to test this with a ZFS OST with &lt;tt&gt;recordsize=1024k&lt;/tt&gt; and then manually setting &lt;tt&gt;brw_size=512K&lt;/tt&gt; on an unpatched OST, and verifying that the client no longer crashes.  Similarly, testing that it isn&apos;t possible to set &lt;tt&gt;brw_size&lt;/tt&gt; smaller than &lt;tt&gt;recordsize&lt;/tt&gt; on a patched OST would be useful, including changing &lt;tt&gt;recordsize&lt;/tt&gt; after mount.&lt;/p&gt;</comment>
                            <comment id="161947" author="gerrit" created="Mon, 15 Aug 2016 21:12:30 +0000"  >&lt;p&gt;Oleg Drokin (oleg.drokin@intel.com) merged in patch &lt;a href=&quot;http://review.whamcloud.com/21825/&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;http://review.whamcloud.com/21825/&lt;/a&gt;&lt;br/&gt;
Subject: &lt;a href=&quot;https://jira.whamcloud.com/browse/LU-8460&quot; title=&quot;(osc_cache.c:658:osc_extent_find()) ASSERTION( (max_pages &amp;amp; ~chunk_mask) == 0 ) failed: LBUG&quot; class=&quot;issue-link&quot; data-issue-key=&quot;LU-8460&quot;&gt;&lt;del&gt;LU-8460&lt;/del&gt;&lt;/a&gt; osc: max_pages_per_rpc should be chunk size aligned&lt;br/&gt;
Project: fs/lustre-release&lt;br/&gt;
Branch: master&lt;br/&gt;
Current Patch Set: &lt;br/&gt;
Commit: 13834f5aeef42d3c358574ac59475c0758dce300&lt;/p&gt;</comment>
                            <comment id="162297" author="pjones" created="Wed, 17 Aug 2016 23:35:39 +0000"  >&lt;p&gt;Landed for 2.9&lt;/p&gt;</comment>
                    </comments>
                <issuelinks>
                            <issuelinktype id="10011">
                    <name>Related</name>
                                            <outwardlinks description="is related to ">
                                                        </outwardlinks>
                                                        </issuelinktype>
                    </issuelinks>
                <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|hzyj6n:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>