<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:51:28 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12311] recovery-double-scale test pairwise_fail crashed with OOM</title>
                <link>https://jira.whamcloud.com/browse/LU-12311</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;We are seeing recovery-double-scale test_pairwise_fail crashing with OOM for SLES 12 SP3 failover testing only ... so far.&lt;/p&gt;

&lt;p&gt;Looking at the kernel-crash log for &lt;a href=&quot;https://testing.whamcloud.com/test_sets/bf0a7c40-7523-11e9-a6f2-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/bf0a7c40-7523-11e9-a6f2-52540065bddc&lt;/a&gt; , we see&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[  752.114008] Lustre: DEBUG MARKER: == recovery-double-scale test pairwise_fail: pairwise combination of clients, MDS, and OST failures == 08:37:36 (1557675456)
[  752.143553] Lustre: DEBUG MARKER: PATH=/opt/iozone/bin:/opt/iozone/bin:/usr/lib64/lustre/tests/mpi:/usr/lib64/lustre/tests/racer:/usr/lib64/lustre/../lustre-iokit/sgpdd-survey:/usr/lib64/lustre/tests:/usr/lib64/lustre/utils/gss:/usr/lib64/lustre/utils:/usr/lib64/mpi/gcc/openmpi/bin:/sbin:
[  752.199829] Lustre: DEBUG MARKER: /usr/sbin/lctl mark Started client load: dd on trevis-42vm3
[  752.240449] Lustre: DEBUG MARKER: Started client load: dd on trevis-42vm3
[  752.998214] Lustre: DEBUG MARKER: /usr/sbin/lctl mark Started client load: tar on trevis-42vm4
[  753.060478] Lustre: DEBUG MARKER: Started client load: tar on trevis-42vm4
[  755.135366] Lustre: DEBUG MARKER: cat /tmp/client-load.pid
[  831.669265] irqbalance invoked oom-killer: gfp_mask=0x14280ca(GFP_HIGHUSER_MOVABLE|__GFP_ZERO), nodemask=0, order=0, oom_score_adj=0
[  831.669300] irqbalance cpuset=/ mems_allowed=0
[  831.669310] CPU: 0 PID: 493 Comm: irqbalance Tainted: G           OE   N  4.4.162-94.69-default #1
[  831.669310] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[  831.669317]  0000000000000000 ffffffff8132cdc0 ffff88007b2c7ac0 0000000000000000
[  831.669318]  ffffffff8120b20e 0000000000000000 0000000000000000 0000000000000000
[  831.669320]  0000000000000000 ffffffff810a1fb7 ffffffff81e9aa20 0000000000000000
[  831.669320] Call Trace:
[  831.669423]  [&amp;lt;ffffffff81019b09&amp;gt;] dump_trace+0x59/0x340
[  831.669430]  [&amp;lt;ffffffff81019eda&amp;gt;] show_stack_log_lvl+0xea/0x170
[  831.669435]  [&amp;lt;ffffffff8101acb1&amp;gt;] show_stack+0x21/0x40
[  831.669455]  [&amp;lt;ffffffff8132cdc0&amp;gt;] dump_stack+0x5c/0x7c
[  831.669490]  [&amp;lt;ffffffff8120b20e&amp;gt;] dump_header+0x82/0x215
[  831.669519]  [&amp;lt;ffffffff81199d39&amp;gt;] check_panic_on_oom+0x29/0x50
[  831.669530]  [&amp;lt;ffffffff81199eda&amp;gt;] out_of_memory+0x17a/0x4a0
[  831.669537]  [&amp;lt;ffffffff8119e928&amp;gt;] __alloc_pages_nodemask+0xaf8/0xb70
[  831.669555]  [&amp;lt;ffffffff811e8b24&amp;gt;] alloc_pages_vma+0xa4/0x220
[  831.669569]  [&amp;lt;ffffffff811c5063&amp;gt;] handle_pte_fault+0xe63/0x1660
[  831.669577]  [&amp;lt;ffffffff811c741a&amp;gt;] handle_mm_fault+0x2fa/0x640
[  831.669593]  [&amp;lt;ffffffff81068df7&amp;gt;] __do_page_fault+0x217/0x4c0
[  831.669608]  [&amp;lt;ffffffff8106914c&amp;gt;] trace_do_page_fault+0x3c/0x120
[  831.669627]  [&amp;lt;ffffffff81621382&amp;gt;] async_page_fault+0x32/0x60
[  831.672967] DWARF2 unwinder stuck at async_page_fault+0x32/0x60
[  831.672967] 
[  831.672971] Leftover inexact backtrace:
               
[  831.672989]  [&amp;lt;ffffffff8133945c&amp;gt;] ? copy_user_generic_string+0x2c/0x40
[  831.673000]  [&amp;lt;ffffffff8127fb70&amp;gt;] ? int_seq_next+0x20/0x20
[  831.673009]  [&amp;lt;ffffffff81231d84&amp;gt;] ? seq_read+0x2a4/0x3a0
[  831.673015]  [&amp;lt;ffffffff81276e4c&amp;gt;] ? proc_reg_read+0x3c/0x70
[  831.673017]  [&amp;lt;ffffffff8120f676&amp;gt;] ? __vfs_read+0x26/0x140
[  831.673019]  [&amp;lt;ffffffff8120fb09&amp;gt;] ? rw_verify_area+0x49/0xc0
[  831.673021]  [&amp;lt;ffffffff8120fbfa&amp;gt;] ? vfs_read+0x7a/0x120
[  831.673022]  [&amp;lt;ffffffff81210d12&amp;gt;] ? SyS_read+0x42/0xa0
[  831.673029]  [&amp;lt;ffffffff8161de61&amp;gt;] ? entry_SYSCALL_64_fastpath+0x20/0xe9
[  831.673044] Mem-Info:
[  831.673053] active_anon:1289 inactive_anon:1830 isolated_anon:0
                active_file:103761 inactive_file:336285 isolated_file:31
                unevictable:20 dirty:0 writeback:0 unstable:0
                slab_reclaimable:2630 slab_unreclaimable:9715
                mapped:7320 shmem:2179 pagetables:945 bounce:0
                free:12214 free_pcp:31 free_cma:0
[  831.673076] Node 0 DMA free:7724kB min:376kB low:468kB high:560kB active_anon:40kB inactive_anon:60kB active_file:3444kB inactive_file:3492kB unevictable:0kB isolated(anon):0kB isolated(file):124kB present:15992kB managed:15904kB mlocked:0kB dirty:0kB writeback:0kB mapped:308kB shmem:100kB slab_reclaimable:28kB slab_unreclaimable:404kB kernel_stack:16kB pagetables:108kB unstable:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB writeback_tmp:0kB pages_scanned:281104 all_unreclaimable? yes
[  831.673078] lowmem_reserve[]: 0 1843 1843 1843 1843
[  831.673083] Node 0 DMA32 free:41132kB min:44676kB low:55844kB high:67012kB active_anon:5116kB inactive_anon:7260kB active_file:411600kB inactive_file:1341648kB unevictable:80kB isolated(anon):0kB isolated(file):0kB present:2080744kB managed:1900752kB mlocked:80kB dirty:0kB writeback:0kB mapped:28972kB shmem:8616kB slab_reclaimable:10492kB slab_unreclaimable:38456kB kernel_stack:2560kB pagetables:3672kB unstable:0kB bounce:0kB free_pcp:124kB local_pcp:120kB free_cma:0kB writeback_tmp:0kB pages_scanned:11405892 all_unreclaimable? yes
[  831.673085] lowmem_reserve[]: 0 0 0 0 0
[  831.673093] Node 0 DMA: 15*4kB (UE) 12*8kB (UME) 13*16kB (UE) 6*32kB (U) 2*64kB (U) 1*128kB (E) 1*256kB (E) 1*512kB (E) 2*1024kB (ME) 2*2048kB (ME) 0*4096kB = 7724kB
[  831.673100] Node 0 DMA32: 369*4kB (UME) 171*8kB (ME) 333*16kB (UE) 248*32kB (UME) 155*64kB (UE) 72*128kB (UE) 11*256kB (UM) 4*512kB (UM) 1*1024kB (U) 0*2048kB 0*4096kB = 41132kB
[  831.673117] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
[  831.673130] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
[  831.673131] 14562 total pagecache pages
[  831.673132] 89 pages in swap cache
[  831.673136] Swap cache stats: add 6258, delete 6169, find 134/194
[  831.673137] Free swap  = 14313840kB
[  831.673137] Total swap = 14338044kB
[  831.673137] 524184 pages RAM
[  831.673138] 0 pages HighMem/MovableOnly
[  831.673138] 45020 pages reserved
[  831.673138] 0 pages hwpoisoned
[  831.673139] [ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name
[  831.673373] [  359]     0   359    10933     1055      25       3     1114             0 systemd-journal
[  831.673382] [  402]   495   402    13124      928      30       3      116             0 rpcbind
[  831.673403] [  405]     0   405     9267      848      21       3      100         -1000 systemd-udevd
[  831.673409] [  493]     0   493     4815      630      14       3       38             0 irqbalance
[  831.673411] [  500]     0   500    29707     1804      57       3      204             0 sssd
[  831.673421] [  510]   499   510    13452      892      29       3      150          -900 dbus-daemon
[  831.673423] [  536]     0   536    36531     2054      71       3      289             0 sssd_be
[  831.673435] [  539]     0   539     7448     1061      20       4      265             0 wickedd-dhcp4
[  831.673444] [  540]     0   540     7448     1033      20       3      261             0 wickedd-auto4
[  831.673452] [  541]     0   541     7447     1061      20       3      260             0 wickedd-dhcp6
[  831.673481] [  560]     0   560    84317      916      37       3      264             0 rsyslogd
[  831.673483] [  576]     0   576    31712     1810      67       3      186             0 sssd_nss
[  831.673485] [  577]     0   577    26060     1524      55       3      171             0 sssd_pam
[  831.673487] [  578]     0   578    24978     1424      52       3      189             0 sssd_ssh
[  831.673574] [  767]     0   767     7480     1064      18       3      300             0 wickedd
[  831.673580] [  770]     0   770     7455      990      19       3      276             0 wickedd-nanny
[  831.673589] [ 1429]     0  1429     2141      457      10       3       24             0 xinetd
[  831.673597] [ 1471]    74  1471     8408      993      17       3      128             0 ntpd
[  831.673608] [ 1478]    74  1478     9461      558      18       3      148             0 ntpd
[  831.673629] [ 1491]     0  1491    16586     1569      37       3      181         -1000 sshd
[  831.673634] [ 1503]   493  1503    55352      609      20       3      144             0 munged
[  831.673643] [ 1543]     0  1543     1664      436       9       3       26             0 agetty
[  831.673660] [ 1547]     0  1547     1664      459       9       3       30             0 agetty
[  831.673662] [ 1557]     0  1557   147212     1543      61       4      345             0 automount
[  831.673671] [ 1604]     0  1604     5513      603      16       4       64             0 systemd-logind
[  831.673673] [ 1826]     0  1826     8861      838      21       3       98             0 master
[  831.673675] [ 1846]    51  1846    12439     1043      27       3      106             0 pickup
[  831.673683] [ 1847]    51  1847    12536     1329      27       3      176             0 qmgr
[  831.673691] [ 1893]     0  1893     5198      564      17       3      151             0 cron
[  831.673798] [15007]     0 15007    17465      869      36       3        0             0 in.mrshd
[  831.673809] [15008]     0 15008     2894      661      11       3       12             0 bash
[  831.673817] [15013]     0 15013     2894      494      11       3        2             0 bash
[  831.673831] [15014]     0 15014     3034      751      12       3        0             0 run_dd.sh
[  831.673833] [15047]     0 15047     1062      191       7       3        0             0 dd
[  831.673838] Kernel panic - not syncing: Out of memory: system-wide panic_on_oom is enabled
               
[  831.673840] CPU: 0 PID: 493 Comm: irqbalance Tainted: G           OE   N  4.4.162-94.69-default #1
[  831.673840] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[  831.673842]  0000000000000000 ffffffff8132cdc0 ffffffff81a28bf0 ffff88007b2c79e8
[  831.673843]  ffffffff81193c21 0000000000000010 ffff88007b2c79f8 ffff88007b2c7998
[  831.673845]  0000000000000426 ffffffff81a2cec3 000000000000004f 0000000000000000
[  831.673845] Call Trace:
[  831.673851]  [&amp;lt;ffffffff81019b09&amp;gt;] dump_trace+0x59/0x340
[  831.673854]  [&amp;lt;ffffffff81019eda&amp;gt;] show_stack_log_lvl+0xea/0x170
[  831.673856]  [&amp;lt;ffffffff8101acb1&amp;gt;] show_stack+0x21/0x40
[  831.673859]  [&amp;lt;ffffffff8132cdc0&amp;gt;] dump_stack+0x5c/0x7c
[  831.673867]  [&amp;lt;ffffffff81193c21&amp;gt;] panic+0xd2/0x232
[  831.673870]  [&amp;lt;ffffffff81199d60&amp;gt;] check_panic_on_oom+0x50/0x50
[  831.673873]  [&amp;lt;ffffffff81199eda&amp;gt;] out_of_memory+0x17a/0x4a0
[  831.673879]  [&amp;lt;ffffffff8119e928&amp;gt;] __alloc_pages_nodemask+0xaf8/0xb70
[  831.673883]  [&amp;lt;ffffffff811e8b24&amp;gt;] alloc_pages_vma+0xa4/0x220
[  831.673886]  [&amp;lt;ffffffff811c5063&amp;gt;] handle_pte_fault+0xe63/0x1660
[  831.673890]  [&amp;lt;ffffffff811c741a&amp;gt;] handle_mm_fault+0x2fa/0x640
[  831.673893]  [&amp;lt;ffffffff81068df7&amp;gt;] __do_page_fault+0x217/0x4c0
[  831.673897]  [&amp;lt;ffffffff8106914c&amp;gt;] trace_do_page_fault+0x3c/0x120
[  831.673900]  [&amp;lt;ffffffff81621382&amp;gt;] async_page_fault+0x32/0x60
[  831.676117] DWARF2 unwinder stuck at async_page_fault+0x32/0x60
[  831.676118] 
[  831.676118] Leftover inexact backtrace:
               
[  831.676121]  [&amp;lt;ffffffff8133945c&amp;gt;] ? copy_user_generic_string+0x2c/0x40
[  831.676122]  [&amp;lt;ffffffff8127fb70&amp;gt;] ? int_seq_next+0x20/0x20
[  831.676124]  [&amp;lt;ffffffff81231d84&amp;gt;] ? seq_read+0x2a4/0x3a0
[  831.676126]  [&amp;lt;ffffffff81276e4c&amp;gt;] ? proc_reg_read+0x3c/0x70
[  831.676128]  [&amp;lt;ffffffff8120f676&amp;gt;] ? __vfs_read+0x26/0x140
[  831.676130]  [&amp;lt;ffffffff8120fb09&amp;gt;] ? rw_verify_area+0x49/0xc0
[  831.676131]  [&amp;lt;ffffffff8120fbfa&amp;gt;] ? vfs_read+0x7a/0x120
[  831.676133]  [&amp;lt;ffffffff81210d12&amp;gt;] ? SyS_read+0x42/0xa0
[  831.676135]  [&amp;lt;ffffffff8161de61&amp;gt;] ? entry_SYSCALL_64_fastpath+0x20/0xe9
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;Here&apos;s another example of this crash: &lt;a href=&quot;https://testing.whamcloud.com/test_sets/e2461cc8-5485-11e9-9646-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/e2461cc8-5485-11e9-9646-52540065bddc&lt;/a&gt; .&lt;/p&gt;

&lt;p&gt;A similar crash for recovery-double-scale test_pairwise_fail; &lt;a href=&quot;https://testing.whamcloud.com/test_sets/e2461cc8-5485-11e9-9646-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/e2461cc8-5485-11e9-9646-52540065bddc&lt;/a&gt;&lt;/p&gt;
</description>
                <environment>SLES12 SP3 server and client failover testing</environment>
        <key id="55664">LU-12311</key>
            <summary>recovery-double-scale test pairwise_fail crashed with OOM</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                            <label>failover</label>
                    </labels>
                <created>Thu, 16 May 2019 21:10:37 +0000</created>
                <updated>Wed, 20 Sep 2023 16:17:15 +0000</updated>
                                            <version>Lustre 2.10.7</version>
                    <version>Lustre 2.12.1</version>
                    <version>Lustre 2.10.8</version>
                    <version>Lustre 2.12.3</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>2</watches>
                                                                            <comments>
                            <comment id="256386" author="jamesanunez" created="Mon, 14 Oct 2019 23:20:43 +0000"  >&lt;p&gt;Not sure if this is the same issue at &lt;a href=&quot;https://testing.whamcloud.com/test_sets/63530a46-eb2c-11e9-a197-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/63530a46-eb2c-11e9-a197-52540065bddc&lt;/a&gt;, we see&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 1607.502634] Lustre: lustre-OST0006-osc-ffff88f3fadea000: Connection restored to 10.9.5.189@tcp (at 10.9.5.189@tcp)
[ 1624.058737] LNetError: 1943:0:(lib-msg.c:820:lnet_is_health_check()) Msg is in inconsistent state, don&apos;t perform health checking (-125, 0)
[ 1743.142312] kthreadd invoked oom-killer: gfp_mask=0x16040c0(GFP_KERNEL|__GFP_COMP|__GFP_NOTRACK), nodemask=(null),  order=2, oom_score_adj=0
[ 1743.144796] kthreadd cpuset=/ mems_allowed=0
[ 1743.145658] CPU: 1 PID: 2 Comm: kthreadd Tainted: G           OE      4.12.14-95.29-default #1 SLE12-SP4
[ 1743.147330] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 1743.148394] Call Trace:
[ 1743.149009]  dump_stack+0x5a/0x75
[ 1743.149702]  dump_header+0x9c/0x238
[ 1743.150420]  ? notifier_call_chain+0x47/0x70
[ 1743.151244]  ? __blocking_notifier_call_chain+0x51/0x60
[ 1743.152226]  out_of_memory+0x44b/0x490
[ 1743.152984]  __alloc_pages_slowpath+0x7e5/0xa0d
[ 1743.153862]  __alloc_pages_nodemask+0x1e9/0x210
[ 1743.154740]  cache_grow_begin+0x85/0x560
[ 1743.155507]  fallback_alloc+0x167/0x1f0
[ 1743.156304]  kmem_cache_alloc_node+0x84/0x1f0
[ 1743.157168]  ? __switch_to_asm+0x40/0x70
[ 1743.157946]  copy_process.part.36+0xf1/0x1cf0
[ 1743.158796]  ? __switch_to_asm+0x40/0x70
[ 1743.159562]  ? __switch_to_asm+0x34/0x70
[ 1743.160331]  ? __switch_to_asm+0x40/0x70
[ 1743.161110]  ? __switch_to_asm+0x34/0x70
[ 1743.161877]  ? __switch_to_asm+0x40/0x70
[ 1743.162642]  ? __switch_to_asm+0x34/0x70
[ 1743.163401]  ? __switch_to_asm+0x40/0x70
[ 1743.164168]  ? __switch_to_asm+0x34/0x70
[ 1743.164934]  ? __switch_to_asm+0x40/0x70
[ 1743.165706]  ? __switch_to_asm+0x34/0x70
[ 1743.166466]  ? __switch_to_asm+0x40/0x70
[ 1743.167228]  ? __switch_to_asm+0x34/0x70
[ 1743.167993]  ? __switch_to_asm+0x34/0x70
[ 1743.168756]  ? __kthread_parkme+0x70/0x70
[ 1743.169540]  ? __switch_to+0x7c/0x4a0
[ 1743.170263]  _do_fork+0xdd/0x360
[ 1743.170921]  kernel_thread+0x25/0x30
[ 1743.171623]  kthreadd+0x2d4/0x320
[ 1743.172298]  ? kthread_create_on_cpu+0x80/0x80
[ 1743.173151]  ret_from_fork+0x35/0x40
[ 1743.173925] Mem-Info:
[ 1743.174428] active_anon:4 inactive_anon:8 isolated_anon:0
[ 1743.174428]  active_file:320689 inactive_file:106977 isolated_file:160
[ 1743.174428]  unevictable:20 dirty:10335 writeback:59935 unstable:0
[ 1743.174428]  slab_reclaimable:3522 slab_unreclaimable:17261
[ 1743.174428]  mapped:1556 shmem:0 pagetables:977 bounce:0
[ 1743.174428]  free:13065 free_pcp:447 free_cma:0
[ 1743.180075] Node 0 active_anon:16kB inactive_anon:32kB active_file:1282756kB inactive_file:427908kB unevictable:80kB isolated(anon):0kB isolated(file):744kB mapped:6224kB dirty:41340kB writeback:239740kB shmem:0kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 0kB writeback_tmp:0kB unstable:0kB all_unreclaimable? yes
[ 1743.184821] Node 0 DMA free:7652kB min:380kB low:472kB high:564kB active_anon:0kB inactive_anon:0kB active_file:8120kB inactive_file:100kB unevictable:0kB writepending:68kB present:15992kB managed:15908kB mlocked:0kB slab_reclaimable:4kB slab_unreclaimable:16kB kernel_stack:0kB pagetables:0kB bounce:0kB free_pcp:0kB local_pcp:0kB free_cma:0kB
[ 1743.190065] lowmem_reserve[]: 0 1822 1822 1822 1822
[ 1743.191023] Node 0 DMA32 free:44608kB min:44672kB low:55840kB high:67008kB active_anon:160kB inactive_anon:64kB active_file:1274868kB inactive_file:427792kB unevictable:80kB writepending:281228kB present:2080744kB managed:1885928kB mlocked:80kB slab_reclaimable:14084kB slab_unreclaimable:69028kB kernel_stack:2096kB pagetables:3908kB bounce:0kB free_pcp:1788kB local_pcp:668kB free_cma:0kB
[ 1743.196915] lowmem_reserve[]: 0 0 0 0 0
[ 1743.197703] Node 0 DMA: 3*4kB (UME) 1*8kB (E) 3*16kB (UME) 3*32kB (UE) 5*64kB (UME) 2*128kB (ME) 3*256kB (UME) 2*512kB (ME) 3*1024kB (UME) 1*2048kB (E) 0*4096kB = 7652kB
[ 1743.200311] Node 0 DMA32: 556*4kB (UE) 688*8kB (UE) 561*16kB (UE) 367*32kB (UE) 184*64kB (UM) 34*128kB (UM) 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 44576kB
[ 1743.202828] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=1048576kB
[ 1743.204409] Node 0 hugepages_total=0 hugepages_free=0 hugepages_surp=0 hugepages_size=2048kB
[ 1743.205917] 71887 total pagecache pages
[ 1743.206700] 0 pages in swap cache
[ 1743.207365] Swap cache stats: add 18669, delete 18669, find 6415/10352
[ 1743.208563] Free swap  = 14298108kB
[ 1743.209264] Total swap = 14338044kB
[ 1743.209958] 524184 pages RAM
[ 1743.210551] 0 pages HighMem/MovableOnly
[ 1743.211300] 48725 pages reserved
[ 1743.211953] 0 pages hwpoisoned
[ 1743.212580] [ pid ]   uid  tgid total_vm      rss nr_ptes nr_pmds swapents oom_score_adj name
[ 1743.214297] [  356]     0   356    10933      365      23       3      782             0 systemd-journal
[ 1743.216156] [  368]     0   368     3008        1      11       3      782             0 haveged
[ 1743.217772] [  379]     0   379    10442      422      24       3      280         -1000 systemd-udevd
[ 1743.219480] [  380]   495   380    13125       19      32       3      125             0 rpcbind
[ 1743.221089] [  422]     0   422    28063       36      59       3      272             0 sssd
[ 1743.222637] [  432]   499   432    10376        0      27       3      149          -900 dbus-daemon
[ 1743.224293] [  490]     0   490    10882        2      25       3      290             0 wickedd-dhcp4
[ 1743.225965] [  491]     0   491    10881        2      27       3      287             0 wickedd-auto4
[ 1743.227625] [  492]     0   492    10882        4      26       3      286             0 wickedd-dhcp6
[ 1743.229278] [  494]     0   494     4814      260      14       4       58             0 irqbalance
[ 1743.230903] [  499]     0   499    83783      239      37       4      281             0 rsyslogd
[ 1743.232490] [  519]     0   519    34904      128      72       3      343             0 sssd_be
[ 1743.234106] [  534]     0   534    25919      399      54       3      234             0 sssd_nss
[ 1743.235743] [  535]     0   535    26473      128      55       3      225             0 sssd_pam
[ 1743.237328] [  536]     0   536    25353      128      54       3      223             0 sssd_ssh
[ 1743.238930] [  564]     0   564    10912        1      26       3      331             0 wickedd
[ 1743.240510] [  569]     0   569    10888        0      26       3      294             0 wickedd-nanny
[ 1743.242183] [ 1212]     0  1212     2141      296      10       3       41             0 xinetd
[ 1743.243747] [ 1236]    74  1236     5883      249      17       3      146             0 ntpd
[ 1743.245280] [ 1237]    74  1237     6936        1      18       3      151             0 ntpd
[ 1743.246827] [ 1254]     0  1254    16606        0      38       5      181         -1000 sshd
[ 1743.248384] [ 1261]   489  1261    55369        0      18       4      259             0 munged
[ 1743.249970] [ 1377]     0  1377     1665        1       9       3       30             0 agetty
[ 1743.251524] [ 1378]     0  1378     1665        1       9       3       28             0 agetty
[ 1743.253112] [ 1421]     0  1421     8863      214      21       3      123             0 master
[ 1743.254690] [ 1423]    51  1423     9900      136      23       3      128             0 pickup
[ 1743.256256] [ 1424]    51  1424     9997       22      25       3      179             0 qmgr
[ 1743.257798] [ 1434]     0  1434     5204      267      15       3      173             0 cron
[ 1743.259322] [ 1436]     0  1436   164426      290      65       3      378             0 automount
[ 1743.260943] [ 1452]     0  1452     5515        0      17       3       78             0 systemd-logind
[ 1743.262782] [ 3538]     0  3538    14929        1      35       3      175             0 in.mrshd
[ 1743.264391] [ 3539]     0  3539     2894        1      11       3       77             0 bash
[ 1743.265940] [ 3544]     0  3544     2894        0      11       3       78             0 bash
[ 1743.267484] [ 3545]     0  3545     3034      383      10       3      215             0 run_dd.sh
[ 1743.269112] [ 3773]     0  3773     1062      164       8       3       25             0 dd
[ 1743.270624] Kernel panic - not syncing: Out of memory: system-wide panic_on_oom is enabled
[ 1743.270624] 
[ 1743.272352] CPU: 1 PID: 2 Comm: kthreadd Tainted: G           OE      4.12.14-95.29-default #1 SLE12-SP4
[ 1743.274016] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 1743.275072] Call Trace:
[ 1743.275592]  dump_stack+0x5a/0x75
[ 1743.276268]  panic+0xdb/0x23e
[ 1743.276890]  out_of_memory+0x472/0x490
[ 1743.277625]  __alloc_pages_slowpath+0x7e5/0xa0d
[ 1743.278484]  __alloc_pages_nodemask+0x1e9/0x210
[ 1743.279346]  cache_grow_begin+0x85/0x560
[ 1743.280109]  fallback_alloc+0x167/0x1f0
[ 1743.280864]  kmem_cache_alloc_node+0x84/0x1f0
[ 1743.281705]  ? __switch_to_asm+0x40/0x70
[ 1743.282477]  copy_process.part.36+0xf1/0x1cf0
[ 1743.283313]  ? __switch_to_asm+0x40/0x70
[ 1743.284080]  ? __switch_to_asm+0x34/0x70
[ 1743.284849]  ? __switch_to_asm+0x40/0x70
[ 1743.285605]  ? __switch_to_asm+0x34/0x70
[ 1743.286369]  ? __switch_to_asm+0x40/0x70
[ 1743.287137]  ? __switch_to_asm+0x34/0x70
[ 1743.287903]  ? __switch_to_asm+0x40/0x70
[ 1743.288666]  ? __switch_to_asm+0x34/0x70
[ 1743.289427]  ? __switch_to_asm+0x40/0x70
[ 1743.290189]  ? __switch_to_asm+0x34/0x70
[ 1743.290958]  ? __switch_to_asm+0x40/0x70
[ 1743.291725]  ? __switch_to_asm+0x34/0x70
[ 1743.292485]  ? __switch_to_asm+0x34/0x70
[ 1743.293253]  ? __kthread_parkme+0x70/0x70
[ 1743.294035]  ? __switch_to+0x7c/0x4a0
[ 1743.294757]  _do_fork+0xdd/0x360
[ 1743.295401]  kernel_thread+0x25/0x30
[ 1743.296107]  kthreadd+0x2d4/0x320
[ 1743.296774]  ? kthread_create_on_cpu+0x80/0x80
[ 1743.297620]  ret_from_fork+0x35/0x40
[    1.625911] Kernel panic - not syncing: Out of memory and no killable processes...
[    1.625911] 
[    1.627661] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.12.14-95.29-default #1 SLE12-SP4
[    1.628007] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[    1.628007] Call Trace:
[    1.628007]  dump_stack+0x5a/0x75
[    1.628007]  panic+0xdb/0x23e
[    1.628007]  out_of_memory+0x3a1/0x490
[    1.628007]  __alloc_pages_slowpath+0x7e5/0xa0d
[    1.628007]  ? interleave_nodes+0x22/0x40
[    1.628007]  __alloc_pages_nodemask+0x1e9/0x210
[    1.628007]  alloc_page_interleave+0x36/0xa0
[    1.628007]  alloc_pages_current+0xa8/0xf0
[    1.628007]  pagecache_get_page+0x4d/0x250
[    1.628007]  ? xattr_resolve_name+0xa8/0xd0
[    1.628007]  grab_cache_page_write_begin+0x1c/0x40
[    1.628007]  simple_write_begin+0x24/0xe0
[    1.628007]  generic_perform_write+0xb2/0x190
[    1.628007]  __generic_file_write_iter+0x184/0x1c0
[    1.628007]  generic_file_write_iter+0xec/0x1d0
[    1.628007]  __vfs_write+0xdc/0x150
[    1.628007]  vfs_write+0xad/0x1a0
[    1.628007]  SyS_write+0x42/0x90
[    1.628007]  xwrite+0x2a/0x5b
[    1.628007]  do_copy+0x8b/0xb8
[    1.628007]  write_buffer+0x27/0x37
[    1.628007]  flush_buffer+0x2b/0x84
[    1.628007]  ? write_buffer+0x37/0x37
[    1.628007]  unxz+0x195/0x297
[    1.628007]  ? unlzo+0x4c4/0x4c4
[    1.628007]  unpack_to_rootfs+0x15d/0x299
[    1.628007]  ? initrd_load+0x3f/0x3f
[    1.628007]  ? parse_header+0x10e/0x10e
[    1.628007]  populate_rootfs+0x61/0x116
[    1.628007]  ? parse_header+0x10e/0x10e
[    1.628007]  do_one_initcall+0x50/0x1b0
[    1.628007]  kernel_init_freeable+0x19a/0x222
[    1.628007]  ? set_debug_rodata+0x11/0x11
[    1.628007]  ? rest_init+0x80/0x80
[    1.628007]  kernel_init+0xa/0x110
[    1.628007]  ret_from_fork+0x35/0x40
[    1.628007] Rebooting in 1 seconds..
�[1;256r�[256;256H�[6n
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00gfr:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>