======================================================== WARNING: possible irq lock inversion dependency detected 6.4.0-rc7-next-20230623 #1 Not tainted -------------------------------------------------------- ksoftirqd/1/19 just changed the state of lock: ffffffff85815818 (blkg_stat_lock){+.-.}-{2:2}, at: __blkcg_rstat_flush.isra.0+0x11f/0x4e0 but this lock was taken by another, HARDIRQ-safe lock in the past: (per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)){-.-.}-{2:2} and interrupts could create inverse lock ordering between them. other info that might help us debug this: Possible interrupt unsafe locking scenario: CPU0 CPU1 ---- ---- lock(blkg_stat_lock); local_irq_disable(); lock(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)); lock(blkg_stat_lock); lock(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)); *** DEADLOCK *** 2 locks held by ksoftirqd/1/19: #0: ffffffff8560b5a0 (rcu_callback){....}-{0:0}, at: rcu_core+0x83e/0x28b0 #1: ffffffff8560b6c0 (rcu_read_lock){....}-{1:2}, at: __blkcg_rstat_flush.isra.0+0x93/0x4e0 the shortest dependencies between 2nd lock and 1st lock: -> (per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)){-.-.}-{2:2} { IN-HARDIRQ-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock_irqsave+0x3a/0x60 cgroup_rstat_updated+0xcb/0x2e0 __cgroup_account_cputime_field+0xa0/0x120 account_system_index_time+0x199/0x2c0 update_process_times+0x26/0x150 tick_sched_handle+0x8e/0x170 tick_sched_timer+0xe6/0x110 __hrtimer_run_queues+0x17f/0xb60 hrtimer_interrupt+0x2ef/0x750 __sysvec_apic_timer_interrupt+0xff/0x380 sysvec_apic_timer_interrupt+0x69/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 lock_acquire+0x1c7/0x4c0 __is_insn_slot_addr+0x41/0x290 kernel_text_address+0x48/0xc0 __kernel_text_address+0xd/0x40 unwind_get_return_address+0x59/0xa0 arch_stack_walk+0x9d/0xf0 stack_trace_save+0x90/0xd0 kasan_save_stack+0x22/0x50 kasan_set_track+0x25/0x30 __kasan_kmalloc+0x7f/0x90 cgroup_file_open+0x93/0x370 kernfs_fop_open+0x94e/0xe90 do_dentry_open+0x69b/0x15c0 path_openat+0x1859/0x2710 do_filp_open+0x1ba/0x410 do_sys_openat2+0x164/0x1d0 __x64_sys_openat+0x143/0x200 do_syscall_64+0x3f/0x90 entry_SYSCALL_64_after_hwframe+0x6e/0xd8 IN-SOFTIRQ-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock_irqsave+0x3a/0x60 cgroup_rstat_updated+0xcb/0x2e0 __cgroup_account_cputime+0x75/0xc0 update_curr+0x350/0x6d0 dequeue_task_fair+0x20e/0x14a0 load_balance+0xcb4/0x2790 rebalance_domains+0x66c/0xc00 __do_softirq+0x1b7/0x7d4 irq_exit_rcu+0x93/0xc0 sysvec_apic_timer_interrupt+0x6e/0x90 asm_sysvec_apic_timer_interrupt+0x1a/0x20 lock_acquire+0x1c7/0x4c0 __is_insn_slot_addr+0x41/0x290 kernel_text_address+0x5b/0xc0 __kernel_text_address+0xd/0x40 unwind_get_return_address+0x59/0xa0 arch_stack_walk+0x9d/0xf0 stack_trace_save+0x90/0xd0 set_track_prepare+0x74/0xd0 __create_object+0x3b2/0xc90 kmem_cache_alloc+0x20b/0x370 vm_area_alloc+0x113/0x200 mmap_region+0x430/0x26c0 do_mmap+0x833/0xea0 vm_mmap_pgoff+0x1a8/0x3b0 ksys_mmap_pgoff+0x3af/0x500 do_syscall_64+0x3f/0x90 entry_SYSCALL_64_after_hwframe+0x6e/0xd8 INITIAL USE at: lock_acquire+0x19a/0x4c0 _raw_spin_lock_irqsave+0x3a/0x60 cgroup_rstat_flush_locked+0x131/0xd80 cgroup_rstat_flush+0x37/0x50 do_flush_stats+0x97/0xf0 flush_memcg_stats_dwork+0x9/0x50 process_one_work+0xabf/0x1770 worker_thread+0x64f/0x12a0 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 } ... key at: [] __key.0+0x0/0x40 ... acquired at: _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 blkcg_rstat_flush+0x87/0xb0 cgroup_rstat_flush_locked+0x706/0xd80 cgroup_rstat_flush+0x37/0x50 do_flush_stats+0x97/0xf0 flush_memcg_stats_dwork+0x9/0x50 process_one_work+0xabf/0x1770 worker_thread+0x64f/0x12a0 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 -> (blkg_stat_lock){+.-.}-{2:2} { HARDIRQ-ON-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 run_ksoftirqd+0x29/0x50 smpboot_thread_fn+0x40b/0x930 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 IN-SOFTIRQ-W at: lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 run_ksoftirqd+0x29/0x50 smpboot_thread_fn+0x40b/0x930 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 INITIAL USE at: lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 blkcg_rstat_flush+0x87/0xb0 cgroup_rstat_flush_locked+0x706/0xd80 cgroup_rstat_flush+0x37/0x50 do_flush_stats+0x97/0xf0 flush_memcg_stats_dwork+0x9/0x50 process_one_work+0xabf/0x1770 worker_thread+0x64f/0x12a0 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 } ... key at: [] blkg_stat_lock+0x18/0x60 ... acquired at: __lock_acquire+0x8b8/0x6340 lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 run_ksoftirqd+0x29/0x50 smpboot_thread_fn+0x40b/0x930 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50 stack backtrace: CPU: 1 PID: 19 Comm: ksoftirqd/1 Not tainted 6.4.0-rc7-next-20230623 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-1 04/01/2014 Call Trace: dump_stack_lvl+0x91/0xf0 print_irq_inversion_bug.part.0+0x3d5/0x570 mark_lock.part.0+0x900/0x2f50 __lock_acquire+0x8b8/0x6340 lock_acquire+0x19a/0x4c0 _raw_spin_lock+0x2b/0x40 __blkcg_rstat_flush.isra.0+0x11f/0x4e0 __blkg_release+0xfa/0x3b0 rcu_core+0x8c8/0x28b0 __do_softirq+0x1b7/0x7d4 run_ksoftirqd+0x29/0x50 smpboot_thread_fn+0x40b/0x930 kthread+0x33f/0x440 ret_from_fork+0x2c/0x50