<!-- 
RSS generated by JIRA (9.4.14#940014-sha1:734e6822bbf0d45eff9af51f82432957f73aa32c) at Sat Feb 10 02:51:11 UTC 2024

It is possible to restrict the fields that are returned in this document by specifying the 'field' parameter in your request.
For example, to request only the issue key and summary append 'field=key&field=summary' to the URL of your request.
-->
<rss version="0.92" >
<channel>
    <title>Whamcloud Community JIRA</title>
    <link>https://jira.whamcloud.com</link>
    <description>This file is an XML representation of an issue</description>
    <language>en-us</language>    <build-info>
        <version>9.4.14</version>
        <build-number>940014</build-number>
        <build-date>05-12-2023</build-date>
    </build-info>


<item>
            <title>[LU-12278] sanity-scrub crashes with &apos;BUG: soft lockup - CPU#0 stuck for 23s! [lfsck:27242]&apos;/ [OI_scrub:27272]</title>
                <link>https://jira.whamcloud.com/browse/LU-12278</link>
                <project id="10000" key="LU">Lustre</project>
                    <description>&lt;p&gt;We&#8217;ve seen several instances of sanity-scrub failing a test and then crashing afterwards for DNE/ZFS testing.&lt;/p&gt;

&lt;p&gt;Looking at the crash log for &lt;a href=&quot;https://testing.whamcloud.com/test_sets/fdbc9c2c-726a-11e9-8bb1-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/fdbc9c2c-726a-11e9-8bb1-52540065bddc&lt;/a&gt;, we see test 12 fail with &apos;(7) Expected &apos;&apos; on ost1&apos; and soon after, the kernel crashes&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 8366.570367] Lustre: DEBUG MARKER: zfs get -H -o value lustre:svname 		                           lustre-ost1/ost1 2&amp;gt;/dev/null
[ 8366.811819] Lustre: lustre-OST0000: Denying connection for new client 3832f87b-cc39-39e7-83ea-5dbfd472bd4c(at 10.9.5.163@tcp), waiting for 5 known clients (3 recovered, 0 in progress, and 0 evicted) already passed deadline 3:38
[ 8369.115250] Lustre: lustre-OST0000: Recovery over after 0:04, of 5 clients 5 recovered and 0 were evicted.
[ 8369.121253] Lustre: *** cfs_fail_loc=233, val=0***
[ 8371.828494] LustreError: 26994:0:(osd_object.c:480:osd_check_lma()) lustre-OST0000: FID-in-LMA [0x100000000:0x21c:0x0] does not match the object self-fid [0x100000000:0x21b:0x0]
[ 8372.030872] Lustre: DEBUG MARKER: /usr/sbin/lctl lfsck_start -M lustre-OST0000 -t scrub -r
[ 8372.869557] LustreError: 27272:0:(osd_index.c:218:__osd_xattr_load_by_oid()) lustre-OST0000: can&apos;t get bonus, rc = -2
[ 8399.919060] NMI watchdog: BUG: soft lockup - CPU#0 stuck for 23s! [lfsck:27242]
[ 8399.922057] NMI watchdog: BUG: soft lockup - CPU#1 stuck for 23s! [OI_scrub:27272]
[ 8399.920438] Modules linked in: osp(OE) ofd(OE) lfsck(OE) ost(OE) mgc(OE) osd_zfs(OE) lquota(OE)
[ 8399.922058] Modules linked in:
[ 8399.922059]  osp(OE)
[ 8399.922060]  ofd(OE)
[ 8399.922060]  lfsck(OE)
[ 8399.922061]  ost(OE)
[ 8399.922061]  mgc(OE)
[ 8399.922062]  osd_zfs(OE)
[ 8399.922062]  lquota(OE)
[ 8399.922063]  fid(OE)
[ 8399.922063]  fld(OE)
[ 8399.922064]  ksocklnd(OE)
[ 8399.922065]  ptlrpc(OE)
[ 8399.922065]  obdclass(OE)
[ 8399.922066]  lnet(OE)
[ 8399.922066]  libcfs(OE)
[ 8399.922067]  rpcsec_gss_krb5
[ 8399.922067]  auth_rpcgss
[ 8399.922068]  nfsv4
[ 8399.922068]  dns_resolver
[ 8399.922069]  nfs
[ 8399.922070]  lockd
[ 8399.922070]  grace
[ 8399.922071]  fscache
[ 8399.922071]  rpcrdma
[ 8399.922072]  ib_isert
[ 8399.922072]  iscsi_target_mod
[ 8399.922073]  ib_iser
[ 8399.922073]  libiscsi
[ 8399.922074]  scsi_transport_iscsi
[ 8399.922074]  ib_srpt
[ 8399.922075]  target_core_mod
[ 8399.922075]  crc_t10dif
[ 8399.922076]  crct10dif_generic
[ 8399.922076]  ib_srp
[ 8399.922077]  scsi_transport_srp
[ 8399.922078]  scsi_tgt
[ 8399.922078]  ib_ipoib
[ 8399.922079]  rdma_ucm
[ 8399.922079]  ib_ucm
[ 8399.922080]  ib_uverbs
[ 8399.922080]  ib_umad
[ 8399.922081]  rdma_cm
[ 8399.922081]  ib_cm
[ 8399.922082]  iw_cm
[ 8399.922082]  sunrpc
[ 8399.922083]  ib_core
[ 8399.922083]  dm_mod
[ 8399.922084]  zfs(POE)
[ 8399.922084]  zunicode(POE)
[ 8399.922085]  zavl(POE)
[ 8399.922085]  icp(POE)
[ 8399.922086]  ppdev
[ 8399.922086]  iosf_mbi
[ 8399.922087]  crc32_pclmul
[ 8399.922087]  ghash_clmulni_intel
[ 8399.922088]  zcommon(POE)
[ 8399.922089]  znvpair(POE)
[ 8399.922089]  aesni_intel
[ 8399.922090]  spl(OE)
[ 8399.922090]  lrw
[ 8399.922091]  gf128mul
[ 8399.922091]  glue_helper
[ 8399.922092]  ablk_helper
[ 8399.922093]  cryptd
[ 8399.922093]  joydev
[ 8399.922094]  pcspkr
[ 8399.922094]  virtio_balloon
[ 8399.922095]  parport_pc
[ 8399.922095]  parport
[ 8399.922096]  i2c_piix4
[ 8399.922096]  ip_tables
[ 8399.922097]  ext4
[ 8399.922097]  mbcache
[ 8399.922098]  jbd2
[ 8399.922098]  virtio_blk
[ 8399.922099]  ata_generic
[ 8399.922099]  pata_acpi
[ 8399.922100]  crct10dif_pclmul
[ 8399.922100]  crct10dif_common
[ 8399.922101]  crc32c_intel
[ 8399.922101]  serio_raw
[ 8399.922102]  floppy
[ 8399.922102]  ata_piix
[ 8399.922103]  libata
[ 8399.922103]  8139too
[ 8399.922104]  virtio_pci
[ 8399.922104]  virtio_ring
[ 8399.922105]  virtio
[ 8399.922105]  8139cp
[ 8399.922106]  mii
[ 8399.922106] 
[ 8399.922109] CPU: 1 PID: 27272 Comm: OI_scrub Kdump: loaded Tainted: P           OE  ------------   3.10.0-957.10.1.el7_lustre.x86_64 #1
[ 8399.922109] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 8399.922111] task: ffff9f33ba7d4100 ti: ffff9f339a828000 task.ti: ffff9f339a828000
[ 8399.922112] RIP: 0010:[&amp;lt;ffffffffbbd6aca6&amp;gt;] 
[ 8399.922118]  [&amp;lt;ffffffffbbd6aca6&amp;gt;] native_safe_halt+0x6/0x10
[ 8399.922119] RSP: 0018:ffff9f339a82bd78  EFLAGS: 00000202
[ 8399.922120] RAX: 0000000000000005 RBX: ffff9f339a82bdc0 RCX: 0000000000000000
[ 8399.922121] RDX: 0000000000000005 RSI: 0000000000000005 RDI: ffff9f339db272f8
[ 8399.922122] RBP: ffff9f339a82bd78 R08: 0000000000000000 R09: 00000000000000b8
[ 8399.922122] R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000020
[ 8399.922123] R13: ffff9f339a82bdf0 R14: ffff9f339db26000 R15: 00000000000003a3
[ 8399.922124] FS:  0000000000000000(0000) GS:ffff9f33bfd00000(0000) knlGS:0000000000000000
[ 8399.922125] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 8399.922126] CR2: 00007ffe24f6eea8 CR3: 0000000023210000 CR4: 00000000000606e0
[ 8399.922130] Call Trace:
[ 8399.922144]  [&amp;lt;ffffffffbb6698f5&amp;gt;] kvm_wait+0x65/0x70
[ 8399.922148]  [&amp;lt;ffffffffbb7120be&amp;gt;] __pv_queued_spin_lock_slowpath+0x29e/0x2e0
[ 8399.922150]  [&amp;lt;ffffffffbbd5cfcb&amp;gt;] queued_spin_lock_slowpath+0xb/0xf
[ 8399.922152]  [&amp;lt;ffffffffbbd6b480&amp;gt;] _raw_spin_lock+0x20/0x30
[ 8399.922224]  [&amp;lt;ffffffffc12e69b0&amp;gt;] osd_scrub_main+0x720/0xf50 [osd_zfs]
[ 8399.922227]  [&amp;lt;ffffffffbb6d67f0&amp;gt;] ? wake_up_state+0x20/0x20
[ 8399.922234]  [&amp;lt;ffffffffc12e6290&amp;gt;] ? osd_scrub_exec+0x970/0x970 [osd_zfs]
[ 8399.922236]  [&amp;lt;ffffffffbb6c1c71&amp;gt;] kthread+0xd1/0xe0
[ 8399.922238]  [&amp;lt;ffffffffbb6c1ba0&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 8399.922241]  [&amp;lt;ffffffffbbd75c37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[ 8399.922242]  [&amp;lt;ffffffffbb6c1ba0&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 8399.922243] Code: 
[ 8399.922244] 65 
[ 8399.922244] 48 
[ 8399.922244] 8b 
[ 8399.922245] 05 
[ 8399.922245] 95 
[ 8399.922245] 23 
[ 8399.922246] 2a 
[ 8399.922246] 44 
[ 8399.922246] 48 
[ 8399.922247] 89 
[ 8399.922247] c2 
[ 8399.922247] b9 
[ 8399.922248] 48 
[ 8399.922248] 00 
[ 8399.922248] 00 
[ 8399.922249] 00 
[ 8399.922249] 48 
[ 8399.922249] c1 
[ 8399.922249] ea 
[ 8399.922250] 20 
[ 8399.922250] 0f 
[ 8399.922250] 30 
[ 8399.922251] eb 
[ 8399.922251] 85 
[ 8399.922251] 66 
[ 8399.922252] 2e 
[ 8399.922252] 0f 
[ 8399.922252] 1f 
[ 8399.922252] 84 
[ 8399.922253] 00 
[ 8399.922253] 00 
[ 8399.922253] 00 
[ 8399.922254] 00 
[ 8399.922254] 00 
[ 8399.922254] 0f 
[ 8399.922255] 1f 
[ 8399.922255] 00 
[ 8399.922255] 55 
[ 8399.922255] 48 
[ 8399.922256] 89 
[ 8399.922256] e5 
[ 8399.922256] fb 
[ 8399.922257] f4 
[ 8399.922257] &amp;lt;5d&amp;gt; 
[ 8399.922257] c3 
[ 8399.922258] 0f 
[ 8399.922258] 1f 
[ 8399.922258] 84 
[ 8399.922259] 00 
[ 8399.922259] 00 
[ 8399.922259] 00 
[ 8399.922260] 00 
[ 8399.922260] 00 
[ 8399.922260] 55 
[ 8399.922260] 48 
[ 8399.922261] 89 
[ 8399.922261] e5 
[ 8399.922261] f4 
[ 8399.922262] 5d 
[ 8399.922262] c3 
[ 8399.922262] 90 
[ 8399.922263] 90 
[ 8399.922263] 90 
[ 8399.922263] 90 
[ 8399.922263] 
[ 8399.922264] Kernel panic - not syncing: softlockup: hung tasks
[ 8399.922266] CPU: 1 PID: 27272 Comm: OI_scrub Kdump: loaded Tainted: P           OEL ------------   3.10.0-957.10.1.el7_lustre.x86_64 #1
[ 8399.922267] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 8399.922267] Call Trace:
[ 8399.922268]  &amp;lt;IRQ&amp;gt; 
[ 8399.922270]  [&amp;lt;ffffffffbbd62e41&amp;gt;] dump_stack+0x19/0x1b
[ 8399.922272]  [&amp;lt;ffffffffbbd5c550&amp;gt;] panic+0xe8/0x21f
[ 8399.922274]  [&amp;lt;ffffffffbb62e8b8&amp;gt;] ? show_regs+0x58/0x210
[ 8399.922277]  [&amp;lt;ffffffffbb749121&amp;gt;] watchdog_timer_fn+0x231/0x240
[ 8399.922278]  [&amp;lt;ffffffffbb748ef0&amp;gt;] ? watchdog+0x40/0x40
[ 8399.922281]  [&amp;lt;ffffffffbb6c5fd3&amp;gt;] __hrtimer_run_queues+0xf3/0x270
[ 8399.922283]  [&amp;lt;ffffffffbb6c655f&amp;gt;] hrtimer_interrupt+0xaf/0x1d0
[ 8399.922286]  [&amp;lt;ffffffffbb65a2cb&amp;gt;] local_apic_timer_interrupt+0x3b/0x60
[ 8399.922288]  [&amp;lt;ffffffffbbd7a6c3&amp;gt;] smp_apic_timer_interrupt+0x43/0x60
[ 8399.922290]  [&amp;lt;ffffffffbbd76df2&amp;gt;] apic_timer_interrupt+0x162/0x170
[ 8399.922290]  &amp;lt;EOI&amp;gt; 
[ 8399.922292]  [&amp;lt;ffffffffbbd6aca6&amp;gt;] ? native_safe_halt+0x6/0x10
[ 8399.922294]  [&amp;lt;ffffffffbb6698f5&amp;gt;] kvm_wait+0x65/0x70
[ 8399.922296]  [&amp;lt;ffffffffbb7120be&amp;gt;] __pv_queued_spin_lock_slowpath+0x29e/0x2e0
[ 8399.922297]  [&amp;lt;ffffffffbbd5cfcb&amp;gt;] queued_spin_lock_slowpath+0xb/0xf
[ 8399.922299]  [&amp;lt;ffffffffbbd6b480&amp;gt;] _raw_spin_lock+0x20/0x30
[ 8399.922306]  [&amp;lt;ffffffffc12e69b0&amp;gt;] osd_scrub_main+0x720/0xf50 [osd_zfs]
[ 8399.922307]  [&amp;lt;ffffffffbb6d67f0&amp;gt;] ? wake_up_state+0x20/0x20
[ 8399.922313]  [&amp;lt;ffffffffc12e6290&amp;gt;] ? osd_scrub_exec+0x970/0x970 [osd_zfs]
[ 8399.922315]  [&amp;lt;ffffffffbb6c1c71&amp;gt;] kthread+0xd1/0xe0
[ 8399.922316]  [&amp;lt;ffffffffbb6c1ba0&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 8399.922318]  [&amp;lt;ffffffffbbd75c37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[ 8399.922319]  [&amp;lt;ffffffffbb6c1ba0&amp;gt;] ? insert_kthread_work+0x40/0x40
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;

&lt;p&gt;For the crash at &lt;a href=&quot;https://testing.whamcloud.com/test_sets/d54b8e0a-6604-11e9-8bb1-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/d54b8e0a-6604-11e9-8bb1-52540065bddc&lt;/a&gt; , we see similar error messages prior to the crash but a different call trace&lt;/p&gt;
&lt;div class=&quot;preformatted panel&quot; style=&quot;border-width: 1px;&quot;&gt;&lt;div class=&quot;preformattedContent panelContent&quot;&gt;
&lt;pre&gt;[ 7550.003517] Lustre: DEBUG MARKER: /usr/sbin/lctl lfsck_start -M lustre-MDT0000 -t scrub 5 --dryrun
[ 7550.247858] LustreError: 25166:0:(osd_index.c:218:__osd_xattr_load_by_oid()) lustre-MDT0000: can&apos;t get bonus, rc = -17
[ 7550.249490] LustreError: 25166:0:(osd_index.c:218:__osd_xattr_load_by_oid()) Skipped 1 previous similar message
[ 7550.746977] Lustre: DEBUG MARKER: /usr/sbin/lctl lfsck_start -M lustre-MDT0002 -t scrub 5 --dryrun
[ 7576.216595] NMI watchdog: BUG: soft lockup - CPU#0 stuck for 22s! [OI_scrub:25242]
[ 7576.219587] NMI watchdog: BUG: soft lockup - CPU#1 stuck for 22s! [lfsck:25241]
[ 7576.217631] Modules linked in: osp(OE) mdd(OE) lod(OE) mdt(OE) lfsck(OE) mgs(OE) mgc(OE) osd_zfs(OE) lquota(OE) fid(OE) fld(OE) ksocklnd(OE) ptlrpc(OE) obdclass(OE) lnet(OE) libcfs(OE) rpcsec_gss_krb5
[ 7576.219588] Modules linked in:
[ 7576.219589]  osp(OE)
[ 7576.219589]  mdd(OE)
[ 7576.219590]  lod(OE)
[ 7576.219590]  mdt(OE)
[ 7576.219591]  lfsck(OE)
[ 7576.219591]  mgs(OE)
[ 7576.219592]  mgc(OE)
[ 7576.219592]  osd_zfs(OE)
[ 7576.219593]  lquota(OE)
[ 7576.219593]  fid(OE)
[ 7576.219594]  fld(OE)
[ 7576.219594]  ksocklnd(OE)
[ 7576.219595]  ptlrpc(OE)
[ 7576.219595]  obdclass(OE)
[ 7576.219595]  lnet(OE)
[ 7576.219596]  libcfs(OE)
[ 7576.219597]  rpcsec_gss_krb5
[ 7576.219604]  auth_rpcgss
[ 7576.219609]  nfsv4
[ 7576.219630]  dns_resolver
[ 7576.219633]  nfs
[ 7576.219636]  lockd
[ 7576.219638]  grace
[ 7576.219641]  fscache
[ 7576.219643]  rpcrdma
[ 7576.219646]  ib_isert
[ 7576.219649]  iscsi_target_mod
[ 7576.219651]  ib_iser
[ 7576.219652]  libiscsi
[ 7576.219655]  scsi_transport_iscsi
[ 7576.219657]  ib_srpt
[ 7576.219660]  target_core_mod
[ 7576.219662]  crc_t10dif
[ 7576.219663]  crct10dif_generic
[ 7576.219666]  ib_srp
[ 7576.219668]  scsi_transport_srp
[ 7576.219671]  scsi_tgt
[ 7576.219674]  ib_ipoib
[ 7576.219676]  rdma_ucm
[ 7576.219679]  ib_ucm
[ 7576.219681]  ib_uverbs
[ 7576.219684]  ib_umad
[ 7576.219686]  rdma_cm
[ 7576.219689]  ib_cm
[ 7576.219691]  iw_cm
[ 7576.219694]  sunrpc
[ 7576.219694]  ib_core
[ 7576.219695]  dm_mod
[ 7576.219695]  zfs(POE)
[ 7576.219698]  zunicode(POE)
[ 7576.219701]  zavl(POE)
[ 7576.219702]  icp(POE)
[ 7576.219704]  ppdev
[ 7576.219707]  iosf_mbi
[ 7576.219707]  crc32_pclmul
[ 7576.219708]  ghash_clmulni_intel
[ 7576.219708]  zcommon(POE)
[ 7576.219709]  znvpair(POE)
[ 7576.219710]  spl(OE)
[ 7576.219710]  aesni_intel
[ 7576.219711]  lrw
[ 7576.219713]  gf128mul
[ 7576.219714]  glue_helper
[ 7576.219717]  ablk_helper
[ 7576.219717]  cryptd
[ 7576.219720]  joydev
[ 7576.219722]  pcspkr
[ 7576.219723]  virtio_balloon
[ 7576.219725]  parport_pc
[ 7576.219728]  parport
[ 7576.219730]  i2c_piix4
[ 7576.219733]  ip_tables
[ 7576.219733]  ext4
[ 7576.219734]  mbcache
[ 7576.219735]  jbd2
[ 7576.219737]  virtio_blk
[ 7576.219738]  ata_generic
[ 7576.219739]  pata_acpi
[ 7576.219739]  crct10dif_pclmul
[ 7576.219741]  crct10dif_common
[ 7576.219742]  crc32c_intel
[ 7576.219743]  serio_raw
[ 7576.219743]  floppy
[ 7576.219744]  ata_piix
[ 7576.219744]  8139too
[ 7576.219745]  libata
[ 7576.219745]  virtio_pci
[ 7576.219746]  virtio_ring
[ 7576.219746]  virtio
[ 7576.219747]  8139cp
[ 7576.219747]  mii
[ 7576.219748] 
[ 7576.219753] CPU: 1 PID: 25241 Comm: lfsck Kdump: loaded Tainted: P           OE  ------------   3.10.0-957.1.3.el7_lustre.x86_64 #1
[ 7576.219753] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 7576.219755] task: ffff92b71b165140 ti: ffff92b70f208000 task.ti: ffff92b70f208000
[ 7576.219756] RIP: 0010:[&amp;lt;ffffffffb1769ca6&amp;gt;] 
[ 7576.219781]  [&amp;lt;ffffffffb1769ca6&amp;gt;] native_safe_halt+0x6/0x10
[ 7576.219782] RSP: 0018:ffff92b70f20bcd0  EFLAGS: 00000202
[ 7576.219783] RAX: 0000000000000001 RBX: ffffffffc0a737a8 RCX: 0000000000000001
[ 7576.219784] RDX: 0000000000000002 RSI: 0000000000000001 RDI: ffff92b73fd1b7c4
[ 7576.219785] RBP: ffff92b70f20bcd0 R08: 00000000be162c32 R09: 0000000000000001
[ 7576.219786] R10: 0000000000000000 R11: ffff92b70f20bb5e R12: ffff92b70f20bd88
[ 7576.219786] R13: ffffffffc117e960 R14: 0000000000000090 R15: ffffffffc0a69395
[ 7576.219788] FS:  0000000000000000(0000) GS:ffff92b73fd00000(0000) knlGS:0000000000000000
[ 7576.219789] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 7576.219789] CR2: 00007fdb6db92000 CR3: 0000000079fb4000 CR4: 00000000000606e0
[ 7576.219794] Call Trace:
[ 7576.219874]  [&amp;lt;ffffffffb10698c5&amp;gt;] kvm_wait+0x65/0x70
[ 7576.219900]  [&amp;lt;ffffffffb1111ff6&amp;gt;] __pv_queued_spin_lock_slowpath+0x216/0x2e0
[ 7576.219907]  [&amp;lt;ffffffffb175bfcb&amp;gt;] queued_spin_lock_slowpath+0xb/0xf
[ 7576.219911]  [&amp;lt;ffffffffb176a480&amp;gt;] _raw_spin_lock+0x20/0x30
[ 7576.220076]  [&amp;lt;ffffffffc0bf3af7&amp;gt;] scrub_stop+0x37/0x170 [obdclass]
[ 7576.220144]  [&amp;lt;ffffffffc1060801&amp;gt;] osd_otable_it_fini+0x31/0x180 [osd_zfs]
[ 7576.220195]  [&amp;lt;ffffffffc1106ed4&amp;gt;] lfsck_master_engine+0x4a4/0x1370 [lfsck]
[ 7576.220204]  [&amp;lt;ffffffffb10d67b0&amp;gt;] ? wake_up_state+0x20/0x20
[ 7576.220211]  [&amp;lt;ffffffffc1106a30&amp;gt;] ? lfsck_master_oit_engine+0x1510/0x1510 [lfsck]
[ 7576.220215]  [&amp;lt;ffffffffb10c1c31&amp;gt;] kthread+0xd1/0xe0
[ 7576.220217]  [&amp;lt;ffffffffb10c1b60&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 7576.220222]  [&amp;lt;ffffffffb1774c37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[ 7576.220224]  [&amp;lt;ffffffffb10c1b60&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 7576.220224] Code: 
[ 7576.220225] 65 
[ 7576.220226] 48 
[ 7576.220226] 8b 
[ 7576.220226] 05 
[ 7576.220227] 95 
[ 7576.220227] 33 
[ 7576.220227] 8a 
[ 7576.220227] 4e 
[ 7576.220228] 48 
[ 7576.220228] 89 
[ 7576.220228] c2 
[ 7576.220229] b9 
[ 7576.220229] 48 
[ 7576.220229] 00 
[ 7576.220230] 00 
[ 7576.220230] 00 
[ 7576.220230] 48 
[ 7576.220230] c1 
[ 7576.220231] ea 
[ 7576.220231] 20 
[ 7576.220231] 0f 
[ 7576.220232] 30 
[ 7576.220232] eb 
[ 7576.220232] 85 
[ 7576.220233] 66 
[ 7576.220233] 2e 
[ 7576.220233] 0f 
[ 7576.220234] 1f 
[ 7576.220234] 84 
[ 7576.220234] 00 
[ 7576.220234] 00 
[ 7576.220235] 00 
[ 7576.220235] 00 
[ 7576.220235] 00 
[ 7576.220236] 0f 
[ 7576.220236] 1f 
[ 7576.220236] 00 
[ 7576.220236] 55 
[ 7576.220237] 48 
[ 7576.220237] 89 
[ 7576.220237] e5 
[ 7576.220238] fb 
[ 7576.220238] f4 
[ 7576.220238] &amp;lt;5d&amp;gt; 
[ 7576.220239] c3 
[ 7576.220239] 0f 
[ 7576.220239] 1f 
[ 7576.220240] 84 
[ 7576.220240] 00 
[ 7576.220240] 00 
[ 7576.220240] 00 
[ 7576.220241] 00 
[ 7576.220241] 00 
[ 7576.220241] 55 
[ 7576.220242] 48 
[ 7576.220242] 89 
[ 7576.220242] e5 
[ 7576.220242] f4 
[ 7576.220243] 5d 
[ 7576.220243] c3 
[ 7576.220243] 90 
[ 7576.220244] 90 
[ 7576.220244] 90 
[ 7576.220244] 90 
[ 7576.220244] 
[ 7576.220246] Kernel panic - not syncing: softlockup: hung tasks
[ 7576.220250] CPU: 1 PID: 25241 Comm: lfsck Kdump: loaded Tainted: P           OEL ------------   3.10.0-957.1.3.el7_lustre.x86_64 #1
[ 7576.220252] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2011
[ 7576.220253] Call Trace:
[ 7576.220254]  &amp;lt;IRQ&amp;gt; 
[ 7576.220257]  [&amp;lt;ffffffffb1761e41&amp;gt;] dump_stack+0x19/0x1b
[ 7576.220258]  [&amp;lt;ffffffffb175b550&amp;gt;] panic+0xe8/0x21f
[ 7576.220265]  [&amp;lt;ffffffffb102e8b8&amp;gt;] ? show_regs+0x58/0x210
[ 7576.220271]  [&amp;lt;ffffffffb1149061&amp;gt;] watchdog_timer_fn+0x231/0x240
[ 7576.220273]  [&amp;lt;ffffffffb1148e30&amp;gt;] ? watchdog+0x40/0x40
[ 7576.220277]  [&amp;lt;ffffffffb10c5f93&amp;gt;] __hrtimer_run_queues+0xf3/0x270
[ 7576.220279]  [&amp;lt;ffffffffb10c651f&amp;gt;] hrtimer_interrupt+0xaf/0x1d0
[ 7576.220286]  [&amp;lt;ffffffffb105a2cb&amp;gt;] local_apic_timer_interrupt+0x3b/0x60
[ 7576.220290]  [&amp;lt;ffffffffb17796c3&amp;gt;] smp_apic_timer_interrupt+0x43/0x60
[ 7576.220292]  [&amp;lt;ffffffffb1775df2&amp;gt;] apic_timer_interrupt+0x162/0x170
[ 7576.220293]  &amp;lt;EOI&amp;gt; 
[ 7576.220326]  [&amp;lt;ffffffffc0a737a8&amp;gt;] ? cfs_hash_bd_from_key+0x38/0xb0 [libcfs]
[ 7576.220328]  [&amp;lt;ffffffffb1769ca6&amp;gt;] ? native_safe_halt+0x6/0x10
[ 7576.220330]  [&amp;lt;ffffffffb10698c5&amp;gt;] kvm_wait+0x65/0x70
[ 7576.220332]  [&amp;lt;ffffffffb1111ff6&amp;gt;] __pv_queued_spin_lock_slowpath+0x216/0x2e0
[ 7576.220334]  [&amp;lt;ffffffffb175bfcb&amp;gt;] queued_spin_lock_slowpath+0xb/0xf
[ 7576.220336]  [&amp;lt;ffffffffb176a480&amp;gt;] _raw_spin_lock+0x20/0x30
[ 7576.220353]  [&amp;lt;ffffffffc0bf3af7&amp;gt;] scrub_stop+0x37/0x170 [obdclass]
[ 7576.220360]  [&amp;lt;ffffffffc1060801&amp;gt;] osd_otable_it_fini+0x31/0x180 [osd_zfs]
[ 7576.220367]  [&amp;lt;ffffffffc1106ed4&amp;gt;] lfsck_master_engine+0x4a4/0x1370 [lfsck]
[ 7576.220369]  [&amp;lt;ffffffffb10d67b0&amp;gt;] ? wake_up_state+0x20/0x20
[ 7576.220376]  [&amp;lt;ffffffffc1106a30&amp;gt;] ? lfsck_master_oit_engine+0x1510/0x1510 [lfsck]
[ 7576.220377]  [&amp;lt;ffffffffb10c1c31&amp;gt;] kthread+0xd1/0xe0
[ 7576.220379]  [&amp;lt;ffffffffb10c1b60&amp;gt;] ? insert_kthread_work+0x40/0x40
[ 7576.220381]  [&amp;lt;ffffffffb1774c37&amp;gt;] ret_from_fork_nospec_begin+0x21/0x21
[ 7576.220382]  [&amp;lt;ffffffffb10c1b60&amp;gt;] ? insert_kthread_work+0x40/0x40
&lt;/pre&gt;
&lt;/div&gt;&lt;/div&gt;</description>
                <environment>DNE/ZFS</environment>
        <key id="55599">LU-12278</key>
            <summary>sanity-scrub crashes with &apos;BUG: soft lockup - CPU#0 stuck for 23s! [lfsck:27242]&apos;/ [OI_scrub:27272]</summary>
                <type id="1" iconUrl="https://jira.whamcloud.com/secure/viewavatar?size=xsmall&amp;avatarId=11303&amp;avatarType=issuetype">Bug</type>
                                            <priority id="4" iconUrl="https://jira.whamcloud.com/images/icons/priorities/minor.svg">Minor</priority>
                        <status id="1" iconUrl="https://jira.whamcloud.com/images/icons/statuses/open.png" description="The issue is open and ready for the assignee to start work on it.">Open</status>
                    <statusCategory id="2" key="new" colorName="default"/>
                                    <resolution id="-1">Unresolved</resolution>
                                        <assignee username="wc-triage">WC Triage</assignee>
                                    <reporter username="jamesanunez">James Nunez</reporter>
                        <labels>
                    </labels>
                <created>Thu, 9 May 2019 16:57:14 +0000</created>
                <updated>Fri, 13 Sep 2019 17:22:29 +0000</updated>
                                            <version>Lustre 2.13.0</version>
                    <version>Lustre 2.12.1</version>
                                                        <due></due>
                            <votes>0</votes>
                                    <watches>3</watches>
                                                                            <comments>
                            <comment id="254673" author="jamesanunez" created="Fri, 13 Sep 2019 17:22:29 +0000"  >&lt;p&gt;Another example where sanity-scrub test 15 crashes at &lt;a href=&quot;https://testing.whamcloud.com/test_sets/0004833a-d5dc-11e9-9fc9-52540065bddc&quot; class=&quot;external-link&quot; target=&quot;_blank&quot; rel=&quot;nofollow noopener&quot;&gt;https://testing.whamcloud.com/test_sets/0004833a-d5dc-11e9-9fc9-52540065bddc&lt;/a&gt;&lt;/p&gt;</comment>
                    </comments>
                    <attachments>
                    </attachments>
                <subtasks>
                    </subtasks>
                <customfields>
                                                                                                                                                                                            <customfield id="customfield_10890" key="com.atlassian.jira.plugins.jira-development-integration-plugin:devsummary">
                        <customfieldname>Development</customfieldname>
                        <customfieldvalues>
                            
                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        <customfield id="customfield_10390" key="com.pyxis.greenhopper.jira:gh-lexo-rank">
                        <customfieldname>Rank</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>1|i00g1b:</customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                <customfield id="customfield_10090" key="com.pyxis.greenhopper.jira:gh-global-rank">
                        <customfieldname>Rank (Obsolete)</customfieldname>
                        <customfieldvalues>
                            <customfieldvalue>9223372036854775807</customfieldvalue>
                        </customfieldvalues>
                    </customfield>
                                                                                            <customfield id="customfield_10060" key="com.atlassian.jira.plugin.system.customfieldtypes:select">
                        <customfieldname>Severity</customfieldname>
                        <customfieldvalues>
                                <customfieldvalue key="10022"><![CDATA[3]]></customfieldvalue>

                        </customfieldvalues>
                    </customfield>
                                                                                                                                                                                                                                                                                                                                                        </customfields>
    </item>
</channel>
</rss>