======================================================== WARNING: possible irq lock inversion dependency detected 6.4.0-rc7-next-20230623 #1 Not tainted -------------------------------------------------------- syz-executor.5/5623 just changed the state of lock: ffffffff85815818 (blkg_stat_lock){+.-.}-{2:2}, at: __blkcg_rstat_flush.isra.0+0x11f/0x4e0 but this lock was taken by another, HARDIRQ-safe lock in the past: (per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)){-.-.}-{2:2} and interrupts could create inverse lock ordering between them. other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(blkg_stat_lock); local_irq_disable(); lock(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)); lock(blkg_stat_lock); lock(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)); *** DEADLOCK *** 2 locks held by syz-executor.5/5623: #0: ffffffff8560b5a0 (rcu_callback){....}-{0:0}, at: rcu_core+0x83e/0x28b0 #1: ffffffff8560b6c0 (rcu_read_lock){....}-{1:2}, at: __blkcg_rstat_flush.isra.0+0x93/0x4e0 the shortest dependencies between 2nd lock and 1st lock: -> (per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)){-.-.}-{2:2} { IN-HARDIRQ-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock_irqsave+0x3a/0x60 cgroup_rstat_updated+0xcb/0x2e0 __cgroup_account_cputime_field+0xa0/0x120 account_system_index_time+0x199/0x2c0 update_process_times+0x26/0x150 tick_sched_handle+0x8e/0x170 tick_sched_timer+0xe6/0x110 __hrtimer_run_queues+0x17f/0xb60 hrtimer_interrupt+0x2ef/0x750 __sysvec_apic_timer_interrupt+0xff/0x380 sysvec_apic_timer_interrupt+0x69/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 number+0x698/0xa90 vsnprintf+0x877/0x1650 seq_printf+0x19d/0x250 cgroup_procs_show+0x2f/0x40 cgroup_seqfile_show+0x1ad/0x310 seq_read_iter+0xbe2/0x1300 kernfs_fop_read_iter+0x4d4/0x690 vfs_read+0x4b5/0x8f0 ksys_read+0x122/0x250 do_syscall_64+0x3f/0x90 entry_SYSCALL_64_after_hwframe+0x6e/0xd8 IN-SOFTIRQ-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock_irqsave+0x3a/0x60 cgroup_rstat_updated+0xcb/0x2e0 __cgroup_account_cputime+0x75/0xc0 update_curr+0x350/0x6d0 dequeue_task_fair+0x20e/0x14a0 load_balance+0xcb4/0x2790 rebalance_domains+0x66c/0xc00 __do_softirq+0x1b7/0x7d4 irq_exit_rcu+0x93/0xc0 sysvec_apic_timer_interrupt+0x6e/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 stack_access_ok+0x26/0x1d0 unwind_next_frame+0xa58/0x2490 arch_stack_walk+0x87/0xf0 stack_trace_save+0x90/0xd0 kasan_save_stack+0x22/0x50 __kasan_record_aux_stack+0x8e/0xa0 __call_rcu_common.constprop.0+0x6a/0xbd0 dentry_free+0xc3/0x160 __dentry_kill+0x47d/0x5c0 dput+0x6c3/0xe60 step_into+0x1084/0x2000 path_openat+0x478/0x2710 do_filp_open+0x1ba/0x410 do_sys_openat2+0x164/0x1d0 __x64_sys_openat+0x143/0x200 do_syscall_64+0x3f/0x90 entry_SYSCALL_64_after_hwframe+0x6e/0xd8 INITIAL USE at: lock_acquire+0x19a/0x4c0 _raw_spin_lock_irqsave+0x3a/0x60 cgroup_rstat_flush_locked+0x131/0xd80 cgroup_rstat_flush+0x37/0x50 do_flush_stats+0x97/0xf0 flush_memcg_stats_dwork+0x9/0x50 process_one_work+0xabf/0x1770 worker_thread+0x64f/0x12a0 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 } ... key at: [] __key.0+0x0/0x40 ... acquired at: _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 blkcg_rstat_flush+0x87/0xb0 cgroup_rstat_flush_locked+0x706/0xd80 cgroup_rstat_flush+0x37/0x50 do_flush_stats+0x97/0xf0 mem_cgroup_wb_stats+0x3d8/0x4b0 wb_over_bg_thresh+0x30e/0x6a0 wb_workfn+0x486/0xed0 process_one_work+0xabf/0x1770 worker_thread+0x64f/0x12a0 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 -> (blkg_stat_lock){+.-.}-{2:2} { HARDIRQ-ON-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 irq_exit_rcu+0x93/0xc0 sysvec_apic_timer_interrupt+0x38/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 IN-SOFTIRQ-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 irq_exit_rcu+0x93/0xc0 sysvec_apic_timer_interrupt+0x38/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 INITIAL USE at: lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 blkcg_rstat_flush+0x87/0xb0 cgroup_rstat_flush_locked+0x706/0xd80 cgroup_rstat_flush+0x37/0x50 do_flush_stats+0x97/0xf0 mem_cgroup_wb_stats+0x3d8/0x4b0 wb_over_bg_thresh+0x30e/0x6a0 wb_workfn+0x486/0xed0 process_one_work+0xabf/0x1770 worker_thread+0x64f/0x12a0 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 } ... key at: [] blkg_stat_lock+0x18/0x60 ... acquired at: __lock_acquire+0x8b8/0x6340 lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 irq_exit_rcu+0x93/0xc0 sysvec_apic_timer_interrupt+0x38/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 stack backtrace: CPU: 0 PID: 5623 Comm: syz-executor.5 Not tainted 6.4.0-rc7-next-20230623 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 Call Trace: dump_stack_lvl+0x91/0xf0 print_irq_inversion_bug.part.0+0x3d5/0x570 mark_lock.part.0+0x900/0x2f50 __lock_acquire+0x8b8/0x6340 lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 irq_exit_rcu+0x93/0xc0 sysvec_apic_timer_interrupt+0x38/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 RIP: 0033:0x7f3fc10299ac Code: 48 39 d1 72 67 49 89 0e 49 89 7e 08 49 8b 50 f8 48 89 ee 4c 89 e3 4c 89 c0 0f 1f 44 00 00 49 89 dd 48 39 cf 72 2f 48 83 e8 08 <48> 39 d1 73 14 0f 1f 80 00 00 00 00 48 8b 50 f8 48 83 e8 08 48 39 RSP: 002b:00007ffd3625f9f0 EFLAGS: 00000206 RAX: 00007f3fc0c1c6f0 RBX: 00007f3fc0c1c6d0 RCX: ffffffff81994efe RDX: ffffffff81994ee1 RSI: 00007f3fc0c1c6d8 RDI: ffffffff81994efe RBP: 00007f3fc0c1c5a0 R08: 00007f3fc0c1c770 R09: 0000001b2d02bc4c R10: 00000000000000c1 R11: 00000000e95be0c5 R12: 00007f3fc0c1c598 R13: 00007f3fc0c1c6d0 R14: 00007f3fc0c1c590 R15: 0000000000000010 loop6: detected capacity change from 0 to 83 EXT4-fs: Ignoring removed i_version option ext4: Unknown parameter 'dax!áÕ ´ŸX00000000000000032768' perf: interrupt took too long (2529 > 2500), lowering kernel.perf_event_max_sample_rate to 79000