From 56992309ccbe71f4321ddd50ee2f76f91b412c1a Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 5 Nov 2009 15:38:40 -0800 Subject: sysctl kernel: Remove binary sysctl logic Now that sys_sysctl is a generic wrapper around /proc/sys .ctl_name and .strategy members of sysctl tables are dead code. Remove them. Cc: Ingo Molnar Cc: Peter Zijlstra Cc: David Howells Signed-off-by: Eric W. Biederman --- kernel/slow-work.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 0d31135efbf..0134b15b38d 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -52,7 +52,6 @@ static const int slow_work_max_vslow = 99; ctl_table slow_work_sysctls[] = { { - .ctl_name = CTL_UNNUMBERED, .procname = "min-threads", .data = &slow_work_min_threads, .maxlen = sizeof(unsigned), @@ -62,7 +61,6 @@ ctl_table slow_work_sysctls[] = { .extra2 = &slow_work_max_threads, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "max-threads", .data = &slow_work_max_threads, .maxlen = sizeof(unsigned), @@ -72,7 +70,6 @@ ctl_table slow_work_sysctls[] = { .extra2 = (void *) &slow_work_max_max_threads, }, { - .ctl_name = CTL_UNNUMBERED, .procname = "vslow-percentage", .data = &vslow_work_proportion, .maxlen = sizeof(unsigned), @@ -81,7 +78,7 @@ ctl_table slow_work_sysctls[] = { .extra1 = (void *) &slow_work_min_vslow, .extra2 = (void *) &slow_work_max_vslow, }, - { .ctl_name = 0 } + {} }; #endif -- cgit v1.2.3-70-g09d2 From 6d4561110a3e9fa742aeec6717248a491dfb1878 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Mon, 16 Nov 2009 03:11:48 -0800 Subject: sysctl: Drop & in front of every proc_handler. For consistency drop & in front of every proc_handler. Explicity taking the address is unnecessary and it prevents optimizations like stubbing the proc_handlers to NULL. Cc: Alexey Dobriyan Cc: Ingo Molnar Cc: Joe Perches Signed-off-by: Eric W. Biederman --- arch/arm/kernel/isa.c | 6 +- arch/arm/mach-bcmring/arch.c | 2 +- arch/frv/kernel/pm.c | 8 +- arch/frv/kernel/sysctl.c | 4 +- arch/ia64/kernel/crash.c | 4 +- arch/ia64/kernel/perfmon.c | 8 +- arch/mips/lasat/sysctl.c | 22 +-- arch/powerpc/kernel/idle.c | 2 +- arch/s390/appldata/appldata_base.c | 4 +- arch/s390/kernel/debug.c | 4 +- arch/s390/mm/cmm.c | 6 +- arch/sh/kernel/traps_64.c | 6 +- crypto/proc.c | 2 +- drivers/cdrom/cdrom.c | 12 +- drivers/char/hpet.c | 2 +- drivers/char/ipmi/ipmi_poweroff.c | 2 +- drivers/char/pty.c | 4 +- drivers/char/random.c | 12 +- drivers/char/rtc.c | 2 +- drivers/macintosh/mac_hid.c | 6 +- drivers/md/md.c | 4 +- drivers/misc/sgi-xp/xpc_main.c | 6 +- drivers/net/wireless/arlan-proc.c | 64 ++++----- drivers/parport/procfs.c | 28 ++-- drivers/scsi/scsi_sysctl.c | 2 +- fs/coda/sysctl.c | 6 +- fs/eventpoll.c | 2 +- fs/lockd/svc.c | 12 +- fs/nfs/sysctl.c | 8 +- fs/notify/inotify/inotify_user.c | 6 +- fs/ntfs/sysctl.c | 2 +- fs/ocfs2/stackglue.c | 2 +- fs/quota/dquot.c | 18 +-- fs/xfs/linux-2.6/xfs_sysctl.c | 30 ++--- kernel/slow-work.c | 2 +- kernel/sysctl.c | 266 ++++++++++++++++++------------------- net/rds/ib_sysctl.c | 12 +- net/rds/iw_sysctl.c | 12 +- net/rds/sysctl.c | 10 +- net/sctp/sysctl.c | 2 +- net/sunrpc/sysctl.c | 10 +- net/sunrpc/xprtrdma/svc_rdma.c | 24 ++-- net/sunrpc/xprtrdma/transport.c | 12 +- net/sunrpc/xprtsock.c | 10 +- security/keys/sysctl.c | 10 +- 45 files changed, 339 insertions(+), 339 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/arch/arm/kernel/isa.c b/arch/arm/kernel/isa.c index 738dfcc658c..34648591073 100644 --- a/arch/arm/kernel/isa.c +++ b/arch/arm/kernel/isa.c @@ -26,19 +26,19 @@ static ctl_table ctl_isa_vars[4] = { .data = &isa_membase, .maxlen = sizeof(isa_membase), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "portbase", .data = &isa_portbase, .maxlen = sizeof(isa_portbase), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "portshift", .data = &isa_portshift, .maxlen = sizeof(isa_portshift), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, {} }; diff --git a/arch/arm/mach-bcmring/arch.c b/arch/arm/mach-bcmring/arch.c index f3c11199707..fbe6fa02c88 100644 --- a/arch/arm/mach-bcmring/arch.c +++ b/arch/arm/mach-bcmring/arch.c @@ -58,7 +58,7 @@ static struct ctl_table bcmring_sysctl_warm_reboot[] = { .data = &bcmring_arch_warm_reboot, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec}, + .proc_handler = proc_dointvec}, {} }; diff --git a/arch/frv/kernel/pm.c b/arch/frv/kernel/pm.c index 940d8bb486f..5fa3889d858 100644 --- a/arch/frv/kernel/pm.c +++ b/arch/frv/kernel/pm.c @@ -303,28 +303,28 @@ static struct ctl_table pm_table[] = .data = NULL, .maxlen = 0, .mode = 0200, - .proc_handler = &sysctl_pm_do_suspend, + .proc_handler = sysctl_pm_do_suspend, }, { .procname = "cmode", .data = &clock_cmode_current, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &cmode_procctl, + .proc_handler = cmode_procctl, }, { .procname = "p0", .data = &clock_p0_current, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &p0_procctl, + .proc_handler = p0_procctl, }, { .procname = "cm", .data = &clock_cm_current, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &cm_procctl, + .proc_handler = cm_procctl, }, { } }; diff --git a/arch/frv/kernel/sysctl.c b/arch/frv/kernel/sysctl.c index b30a4f2cda3..035516cb7a9 100644 --- a/arch/frv/kernel/sysctl.c +++ b/arch/frv/kernel/sysctl.c @@ -180,7 +180,7 @@ static struct ctl_table frv_table[] = .data = NULL, .maxlen = 0, .mode = 0644, - .proc_handler = &procctl_frv_cachemode, + .proc_handler = procctl_frv_cachemode, }, #ifdef CONFIG_MMU { @@ -188,7 +188,7 @@ static struct ctl_table frv_table[] = .data = NULL, .maxlen = 0, .mode = 0644, - .proc_handler = &procctl_frv_pin_cxnr + .proc_handler = procctl_frv_pin_cxnr }, #endif {} diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index 7c7d6a6dc0f..b942f4032d7 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -243,14 +243,14 @@ static ctl_table kdump_ctl_table[] = { .data = &kdump_on_init, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "kdump_on_fatal_mca", .data = &kdump_on_fatal_mca, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index ca30b364640..402698b6689 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -526,28 +526,28 @@ static ctl_table pfm_ctl_table[]={ .data = &pfm_sysctl.debug, .maxlen = sizeof(int), .mode = 0666, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "debug_ovfl", .data = &pfm_sysctl.debug_ovfl, .maxlen = sizeof(int), .mode = 0666, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "fastctxsw", .data = &pfm_sysctl.fastctxsw, .maxlen = sizeof(int), .mode = 0600, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "expert_mode", .data = &pfm_sysctl.expert_mode, .maxlen = sizeof(int), .mode = 0600, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, {} }; diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 1dbdd76a8c0..14b9a28a4ae 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c @@ -182,28 +182,28 @@ static ctl_table lasat_table[] = { .data = &lasat_board_info.li_cpu_hz, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "bus-hz", .data = &lasat_board_info.li_bus_hz, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "bmid", .data = &lasat_board_info.li_bmid, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "prid", .data = &lasat_board_info.li_prid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_lasat_prid, + .proc_handler = proc_lasat_prid, . }, #ifdef CONFIG_INET { @@ -211,14 +211,14 @@ static ctl_table lasat_table[] = { .data = &lasat_board_info.li_eeprom_info.ipaddr, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_lasat_ip, + .proc_handler = proc_lasat_ip, }, { .procname = "netmask", .data = &lasat_board_info.li_eeprom_info.netmask, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_lasat_ip, + .proc_handler = proc_lasat_ip, }, #endif { @@ -227,14 +227,14 @@ static ctl_table lasat_table[] = { .maxlen = sizeof(lasat_board_info.li_eeprom_info.passwd_hash), .mode = 0600, - .proc_handler = &proc_dolasatstring, + .proc_handler = proc_dolasatstring, }, { .procname = "boot-service", .data = &lasat_boot_to_service, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #ifdef CONFIG_DS1603 { @@ -242,7 +242,7 @@ static ctl_table lasat_table[] = { .data = &rtctmp, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dolasatrtc, + .proc_handler = proc_dolasatrtc, }, #endif { @@ -250,14 +250,14 @@ static ctl_table lasat_table[] = { .data = &lasat_board_info.li_namestr, .maxlen = sizeof(lasat_board_info.li_namestr), .mode = 0444, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, { .procname = "typestr", .data = &lasat_board_info.li_typestr, .maxlen = sizeof(lasat_board_info.li_typestr), .mode = 0444, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, {} }; diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index cece9e2cc5e..049dda60e47 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -114,7 +114,7 @@ static ctl_table powersave_nap_ctl_table[]={ .data = &powersave_nap, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, {} }; diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index b55fd7ed1c3..495589950dc 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -61,12 +61,12 @@ static struct ctl_table appldata_table[] = { { .procname = "timer", .mode = S_IRUGO | S_IWUSR, - .proc_handler = &appldata_timer_handler, + .proc_handler = appldata_timer_handler, }, { .procname = "interval", .mode = S_IRUGO | S_IWUSR, - .proc_handler = &appldata_interval_handler, + .proc_handler = appldata_interval_handler, }, { }, }; diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index adf11260260..071c81f179e 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -897,14 +897,14 @@ static struct ctl_table s390dbf_table[] = { .data = &debug_stoppable, .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "debug_active", .data = &debug_active, .maxlen = sizeof(int), .mode = S_IRUGO | S_IWUSR, - .proc_handler = &s390dbf_procactive, + .proc_handler = s390dbf_procactive, }, { } }; diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index dab3e4a7582..ff58779bf7e 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -343,17 +343,17 @@ static struct ctl_table cmm_table[] = { { .procname = "cmm_pages", .mode = 0644, - .proc_handler = &cmm_pages_handler, + .proc_handler = cmm_pages_handler, }, { .procname = "cmm_timed_pages", .mode = 0644, - .proc_handler = &cmm_pages_handler, + .proc_handler = cmm_pages_handler, }, { .procname = "cmm_timeout", .mode = 0644, - .proc_handler = &cmm_timeout_handler, + .proc_handler = cmm_timeout_handler, }, { } }; diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c index 080c8ee2d86..75c0cbe2eda 100644 --- a/arch/sh/kernel/traps_64.c +++ b/arch/sh/kernel/traps_64.c @@ -881,21 +881,21 @@ static ctl_table unaligned_table[] = { .data = &kernel_mode_unaligned_fixup_count, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .procname = "user_reports", .data = &user_mode_unaligned_fixup_count, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .procname = "user_enable", .data = &user_mode_unaligned_fixup_enable, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec}, + .proc_handler = proc_dointvec}, {} }; diff --git a/crypto/proc.c b/crypto/proc.c index fe95975fc53..1c38733c224 100644 --- a/crypto/proc.c +++ b/crypto/proc.c @@ -29,7 +29,7 @@ static struct ctl_table crypto_sysctl_table[] = { .data = &fips_enabled, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, {} }; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index 1872b6dc168..e3749d0ba68 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -3557,42 +3557,42 @@ static ctl_table cdrom_table[] = { .data = &cdrom_sysctl_settings.info, .maxlen = CDROM_STR_SIZE, .mode = 0444, - .proc_handler = &cdrom_sysctl_info, + .proc_handler = cdrom_sysctl_info, }, { .procname = "autoclose", .data = &cdrom_sysctl_settings.autoclose, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &cdrom_sysctl_handler, + .proc_handler = cdrom_sysctl_handler, }, { .procname = "autoeject", .data = &cdrom_sysctl_settings.autoeject, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &cdrom_sysctl_handler, + .proc_handler = cdrom_sysctl_handler, }, { .procname = "debug", .data = &cdrom_sysctl_settings.debug, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &cdrom_sysctl_handler, + .proc_handler = cdrom_sysctl_handler, }, { .procname = "lock", .data = &cdrom_sysctl_settings.lock, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &cdrom_sysctl_handler, + .proc_handler = cdrom_sysctl_handler, }, { .procname = "check_media", .data = &cdrom_sysctl_settings.check, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &cdrom_sysctl_handler + .proc_handler = cdrom_sysctl_handler }, { } }; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index a05a6112240..e481c5938ba 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -679,7 +679,7 @@ static ctl_table hpet_table[] = { .data = &hpet_max_freq, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, {} }; diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index aa39722696d..0dec5da000e 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c @@ -664,7 +664,7 @@ static ctl_table ipmi_table[] = { .data = &poweroff_powercycle, .maxlen = sizeof(poweroff_powercycle), .mode = 0644, - .proc_handler = &proc_dointvec }, + .proc_handler = proc_dointvec }, { } }; diff --git a/drivers/char/pty.c b/drivers/char/pty.c index d516e9ced3c..d86c0bc05c1 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c @@ -435,7 +435,7 @@ static struct ctl_table pty_table[] = { .maxlen = sizeof(int), .mode = 0644, .data = &pty_limit, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &pty_limit_min, .extra2 = &pty_limit_max, }, { @@ -443,7 +443,7 @@ static struct ctl_table pty_table[] = { .maxlen = sizeof(int), .mode = 0444, .data = &pty_count, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, {} }; diff --git a/drivers/char/random.c b/drivers/char/random.c index bcf680f9ff5..dcd08635cf1 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1264,13 +1264,13 @@ ctl_table random_table[] = { .data = &sysctl_poolsize, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "entropy_avail", .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, .data = &input_pool.entropy_count, }, { @@ -1278,7 +1278,7 @@ ctl_table random_table[] = { .data = &random_read_wakeup_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_read_thresh, .extra2 = &max_read_thresh, }, @@ -1287,7 +1287,7 @@ ctl_table random_table[] = { .data = &random_write_wakeup_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_write_thresh, .extra2 = &max_write_thresh, }, @@ -1296,13 +1296,13 @@ ctl_table random_table[] = { .data = &sysctl_bootid, .maxlen = 16, .mode = 0444, - .proc_handler = &proc_do_uuid, + .proc_handler = proc_do_uuid, }, { .procname = "uuid", .maxlen = 16, .mode = 0444, - .proc_handler = &proc_do_uuid, + .proc_handler = proc_do_uuid, }, { } }; diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 37bfe23c218..95acb8c880f 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -286,7 +286,7 @@ static ctl_table rtc_table[] = { .data = &rtc_max_user_freq, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c index 2dd29b42a49..7b4ef5bb556 100644 --- a/drivers/macintosh/mac_hid.c +++ b/drivers/macintosh/mac_hid.c @@ -31,21 +31,21 @@ static ctl_table mac_hid_files[] = { .data = &mouse_emulate_buttons, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "mouse_button2_keycode", .data = &mouse_button2_keycode, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "mouse_button3_keycode", .data = &mouse_button3_keycode, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/drivers/md/md.c b/drivers/md/md.c index 1d529551e94..1d7b9a23c76 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -102,14 +102,14 @@ static ctl_table raid_table[] = { .data = &sysctl_speed_limit_min, .maxlen = sizeof(int), .mode = S_IRUGO|S_IWUSR, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "speed_limit_max", .data = &sysctl_speed_limit_max, .maxlen = sizeof(int), .mode = S_IRUGO|S_IWUSR, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index ce98b9373be..832ed4c88cf 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -93,7 +93,7 @@ static ctl_table xpc_sys_xpc_hb_dir[] = { .data = &xpc_hb_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xpc_hb_min_interval, .extra2 = &xpc_hb_max_interval}, { @@ -101,7 +101,7 @@ static ctl_table xpc_sys_xpc_hb_dir[] = { .data = &xpc_hb_check_interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xpc_hb_check_min_interval, .extra2 = &xpc_hb_check_max_interval}, {} @@ -116,7 +116,7 @@ static ctl_table xpc_sys_xpc_dir[] = { .data = &xpc_disengage_timelimit, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xpc_disengage_min_timelimit, .extra2 = &xpc_disengage_max_timelimit}, {} diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/net/wireless/arlan-proc.c index 66a0b330b75..b22983e6c0c 100644 --- a/drivers/net/wireless/arlan-proc.c +++ b/drivers/net/wireless/arlan-proc.c @@ -819,15 +819,15 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, #define CTBLN(card,nam) \ { .procname = #nam,\ .data = &(arlan_conf[card].nam),\ - .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec} + .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec} #ifdef ARLAN_DEBUGGING #define ARLAN_PROC_DEBUG_ENTRIES \ { .procname = "entry_exit_debug",\ .data = &arlan_entry_and_exit_debug,\ - .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec},\ + .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec},\ { .procname = "debug", .data = &arlan_debug,\ - .maxlen = sizeof(int), .mode = 0600, .proc_handler = &proc_dointvec}, + .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec}, #else #define ARLAN_PROC_DEBUG_ENTRIES #endif @@ -864,7 +864,7 @@ static int arlan_sysctl_reset(ctl_table * ctl, int write, CTBLN(cardNo, channelSet), \ { .procname = "name",\ .data = arlan_conf[cardNo].siteName,\ - .maxlen = 16, .mode = 0600, .proc_handler = &proc_dostring},\ + .maxlen = 16, .mode = 0600, .proc_handler = proc_dostring},\ CTBLN(cardNo,waitTime),\ CTBLN(cardNo,lParameter),\ CTBLN(cardNo,_15),\ @@ -906,35 +906,35 @@ static ctl_table arlan_conf_table0[] = .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_infotxRing, + .proc_handler = arlan_sysctl_infotxRing, }, { .procname = "arlan0-rxRing", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_inforxRing, + .proc_handler = arlan_sysctl_inforxRing, }, { .procname = "arlan0-18", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info18, + .proc_handler = arlan_sysctl_info18, }, { .procname = "arlan0-ring", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info161719, + .proc_handler = arlan_sysctl_info161719, }, { .procname = "arlan0-shm-cpy", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info, + .proc_handler = arlan_sysctl_info, }, #endif { @@ -942,14 +942,14 @@ static ctl_table arlan_conf_table0[] = .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_configure + .proc_handler = arlan_configure }, { .procname = "reset0", .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_sysctl_reset, + .proc_handler = arlan_sysctl_reset, }, { } }; @@ -965,35 +965,35 @@ static ctl_table arlan_conf_table1[] = .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_infotxRing, + .proc_handler = arlan_sysctl_infotxRing, }, { .procname = "arlan1-rxRing", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_inforxRing, + .proc_handler = arlan_sysctl_inforxRing, }, { .procname = "arlan1-18", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info18, + .proc_handler = arlan_sysctl_info18, }, { .procname = "arlan1-ring", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info161719, + .proc_handler = arlan_sysctl_info161719, }, { .procname = "arlan1-shm-cpy", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info, + .proc_handler = arlan_sysctl_info, }, #endif { @@ -1001,14 +1001,14 @@ static ctl_table arlan_conf_table1[] = .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_configure, + .proc_handler = arlan_configure, }, { .procname = "reset1", .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_sysctl_reset, + .proc_handler = arlan_sysctl_reset, }, { } }; @@ -1024,35 +1024,35 @@ static ctl_table arlan_conf_table2[] = .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_infotxRing, + .proc_handler = arlan_sysctl_infotxRing, }, { .procname = "arlan2-rxRing", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_inforxRing, + .proc_handler = arlan_sysctl_inforxRing, }, { .procname = "arlan2-18", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info18, + .proc_handler = arlan_sysctl_info18, }, { .procname = "arlan2-ring", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info161719, + .proc_handler = arlan_sysctl_info161719, }, { .procname = "arlan2-shm-cpy", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info, + .proc_handler = arlan_sysctl_info, }, #endif { @@ -1060,14 +1060,14 @@ static ctl_table arlan_conf_table2[] = .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_configure, + .proc_handler = arlan_configure, }, { .procname = "reset2", .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_sysctl_reset, + .proc_handler = arlan_sysctl_reset, }, { } }; @@ -1083,35 +1083,35 @@ static ctl_table arlan_conf_table3[] = .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_infotxRing, + .proc_handler = arlan_sysctl_infotxRing, }, { .procname = "arlan3-rxRing", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_inforxRing, + .proc_handler = arlan_sysctl_inforxRing, }, { .procname = "arlan3-18", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info18, + .proc_handler = arlan_sysctl_info18, }, { .procname = "arlan3-ring", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info161719, + .proc_handler = arlan_sysctl_info161719, }, { .procname = "arlan3-shm-cpy", .data = &arlan_drive_info, .maxlen = ARLAN_STR_SIZE, .mode = 0400, - .proc_handler = &arlan_sysctl_info, + .proc_handler = arlan_sysctl_info, }, #endif { @@ -1119,14 +1119,14 @@ static ctl_table arlan_conf_table3[] = .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_configure, + .proc_handler = arlan_configure, }, { .procname = "reset3", .data = &conf_reset_result, .maxlen = 100, .mode = 0400, - .proc_handler = &arlan_sysctl_reset, + .proc_handler = arlan_sysctl_reset, }, { } }; diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c index f808bdbf177..3f56bc086cb 100644 --- a/drivers/parport/procfs.c +++ b/drivers/parport/procfs.c @@ -270,7 +270,7 @@ static const struct parport_sysctl_table parport_sysctl_template = { .data = NULL, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void*) &parport_min_spintime_value, .extra2 = (void*) &parport_max_spintime_value }, @@ -279,28 +279,28 @@ static const struct parport_sysctl_table parport_sysctl_template = { .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_hardware_base_addr + .proc_handler = do_hardware_base_addr }, { .procname = "irq", .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_hardware_irq + .proc_handler = do_hardware_irq }, { .procname = "dma", .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_hardware_dma + .proc_handler = do_hardware_dma }, { .procname = "modes", .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_hardware_modes + .proc_handler = do_hardware_modes }, PARPORT_DEVICES_ROOT_DIR, #ifdef CONFIG_PARPORT_1284 @@ -309,35 +309,35 @@ static const struct parport_sysctl_table parport_sysctl_template = { .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_autoprobe + .proc_handler = do_autoprobe }, { .procname = "autoprobe0", .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_autoprobe + .proc_handler = do_autoprobe }, { .procname = "autoprobe1", .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_autoprobe + .proc_handler = do_autoprobe }, { .procname = "autoprobe2", .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_autoprobe + .proc_handler = do_autoprobe }, { .procname = "autoprobe3", .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_autoprobe + .proc_handler = do_autoprobe }, #endif /* IEEE 1284 support */ {} @@ -348,7 +348,7 @@ static const struct parport_sysctl_table parport_sysctl_template = { .data = NULL, .maxlen = 0, .mode = 0444, - .proc_handler = &do_active_device + .proc_handler = do_active_device }, {} }, @@ -386,7 +386,7 @@ parport_device_sysctl_template = { .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_ms_jiffies_minmax, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, .extra1 = (void*) &parport_min_timeslice_value, .extra2 = (void*) &parport_max_timeslice_value }, @@ -437,7 +437,7 @@ parport_default_sysctl_table = { .data = &parport_default_timeslice, .maxlen = sizeof(parport_default_timeslice), .mode = 0644, - .proc_handler = &proc_doulongvec_ms_jiffies_minmax, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, .extra1 = (void*) &parport_min_timeslice_value, .extra2 = (void*) &parport_max_timeslice_value }, @@ -446,7 +446,7 @@ parport_default_sysctl_table = { .data = &parport_default_spintime, .maxlen = sizeof(parport_default_spintime), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void*) &parport_min_spintime_value, .extra2 = (void*) &parport_max_spintime_value }, diff --git a/drivers/scsi/scsi_sysctl.c b/drivers/scsi/scsi_sysctl.c index 42c31bee711..2b6b93f7d8e 100644 --- a/drivers/scsi/scsi_sysctl.c +++ b/drivers/scsi/scsi_sysctl.c @@ -17,7 +17,7 @@ static ctl_table scsi_table[] = { .data = &scsi_logging_level, .maxlen = sizeof(scsi_logging_level), .mode = 0644, - .proc_handler = &proc_dointvec }, + .proc_handler = proc_dointvec }, { } }; diff --git a/fs/coda/sysctl.c b/fs/coda/sysctl.c index 354c050d426..c6405ce3c50 100644 --- a/fs/coda/sysctl.c +++ b/fs/coda/sysctl.c @@ -21,21 +21,21 @@ static ctl_table coda_table[] = { .data = &coda_timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .procname = "hard", .data = &coda_hard, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, { .procname = "fake_statfs", .data = &coda_fake_statfs, .maxlen = sizeof(int), .mode = 0600, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, {} }; diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 70aa66c96c5..366c503f965 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -251,7 +251,7 @@ ctl_table epoll_table[] = { .data = &max_user_watches, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 307ed4c3e1f..e50cfa3d965 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -375,7 +375,7 @@ static ctl_table nlm_sysctls[] = { .data = &nlm_grace_period, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = (unsigned long *) &nlm_grace_period_min, .extra2 = (unsigned long *) &nlm_grace_period_max, }, @@ -384,7 +384,7 @@ static ctl_table nlm_sysctls[] = { .data = &nlm_timeout, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = (unsigned long *) &nlm_timeout_min, .extra2 = (unsigned long *) &nlm_timeout_max, }, @@ -393,7 +393,7 @@ static ctl_table nlm_sysctls[] = { .data = &nlm_udpport, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (int *) &nlm_port_min, .extra2 = (int *) &nlm_port_max, }, @@ -402,7 +402,7 @@ static ctl_table nlm_sysctls[] = { .data = &nlm_tcpport, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (int *) &nlm_port_min, .extra2 = (int *) &nlm_port_max, }, @@ -411,14 +411,14 @@ static ctl_table nlm_sysctls[] = { .data = &nsm_use_hostnames, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "nsm_local_state", .data = &nsm_local_state, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c index af51e6af207..70e1fbbaaea 100644 --- a/fs/nfs/sysctl.c +++ b/fs/nfs/sysctl.c @@ -26,7 +26,7 @@ static ctl_table nfs_cb_sysctls[] = { .data = &nfs_callback_set_tcpport, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (int *)&nfs_set_port_min, .extra2 = (int *)&nfs_set_port_max, }, @@ -35,7 +35,7 @@ static ctl_table nfs_cb_sysctls[] = { .data = &nfs_idmap_cache_timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, #endif { @@ -43,14 +43,14 @@ static ctl_table nfs_cb_sysctls[] = { .data = &nfs_mountpoint_expiry_timeout, .maxlen = sizeof(nfs_mountpoint_expiry_timeout), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, { .procname = "nfs_congestion_kb", .data = &nfs_congestion_kb, .maxlen = sizeof(nfs_congestion_kb), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 5275921ed1c..1d1d1a2765d 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -73,7 +73,7 @@ ctl_table inotify_table[] = { .data = &inotify_max_user_instances, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { @@ -81,7 +81,7 @@ ctl_table inotify_table[] = { .data = &inotify_max_user_watches, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { @@ -89,7 +89,7 @@ ctl_table inotify_table[] = { .data = &inotify_max_queued_events, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero }, { } diff --git a/fs/ntfs/sysctl.c b/fs/ntfs/sysctl.c index 99612ea690c..79a89184cb5 100644 --- a/fs/ntfs/sysctl.c +++ b/fs/ntfs/sysctl.c @@ -40,7 +40,7 @@ static ctl_table ntfs_sysctls[] = { .data = &debug_msgs, /* Data pointer and size. */ .maxlen = sizeof(debug_msgs), .mode = 0644, /* Mode, proc handler. */ - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, {} }; diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c index ed12c116147..f3df0baa9a4 100644 --- a/fs/ocfs2/stackglue.c +++ b/fs/ocfs2/stackglue.c @@ -624,7 +624,7 @@ static ctl_table ocfs2_nm_table[] = { .data = ocfs2_hb_ctl_path, .maxlen = OCFS2_MAX_HB_CTL_PATH, .mode = 0644, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, { } }; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 60940f8709d..f0eb200d8f8 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2477,56 +2477,56 @@ static ctl_table fs_dqstats_table[] = { .data = &dqstats.lookups, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "drops", .data = &dqstats.drops, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "reads", .data = &dqstats.reads, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "writes", .data = &dqstats.writes, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "cache_hits", .data = &dqstats.cache_hits, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "allocated_dquots", .data = &dqstats.allocated_dquots, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "free_dquots", .data = &dqstats.free_dquots, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "syncs", .data = &dqstats.syncs, .maxlen = sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #ifdef CONFIG_PRINT_QUOTA_WARNING { @@ -2534,7 +2534,7 @@ static ctl_table fs_dqstats_table[] = { .data = &flag_print_warnings, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif { }, diff --git a/fs/xfs/linux-2.6/xfs_sysctl.c b/fs/xfs/linux-2.6/xfs_sysctl.c index 6880147cafa..7bb5092d6ae 100644 --- a/fs/xfs/linux-2.6/xfs_sysctl.c +++ b/fs/xfs/linux-2.6/xfs_sysctl.c @@ -59,7 +59,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.sgid_inherit.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.sgid_inherit.min, .extra2 = &xfs_params.sgid_inherit.max }, @@ -68,7 +68,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.symlink_mode.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.symlink_mode.min, .extra2 = &xfs_params.symlink_mode.max }, @@ -77,7 +77,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.panic_mask.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.panic_mask.min, .extra2 = &xfs_params.panic_mask.max }, @@ -87,7 +87,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.error_level.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.error_level.min, .extra2 = &xfs_params.error_level.max }, @@ -96,7 +96,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.syncd_timer.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.syncd_timer.min, .extra2 = &xfs_params.syncd_timer.max }, @@ -105,7 +105,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.inherit_sync.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.inherit_sync.min, .extra2 = &xfs_params.inherit_sync.max }, @@ -114,7 +114,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.inherit_nodump.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.inherit_nodump.min, .extra2 = &xfs_params.inherit_nodump.max }, @@ -123,7 +123,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.inherit_noatim.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.inherit_noatim.min, .extra2 = &xfs_params.inherit_noatim.max }, @@ -132,7 +132,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.xfs_buf_timer.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.xfs_buf_timer.min, .extra2 = &xfs_params.xfs_buf_timer.max }, @@ -141,7 +141,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.xfs_buf_age.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.xfs_buf_age.min, .extra2 = &xfs_params.xfs_buf_age.max }, @@ -150,7 +150,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.inherit_nosym.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.inherit_nosym.min, .extra2 = &xfs_params.inherit_nosym.max }, @@ -159,7 +159,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.rotorstep.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.rotorstep.min, .extra2 = &xfs_params.rotorstep.max }, @@ -168,7 +168,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.inherit_nodfrg.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.inherit_nodfrg.min, .extra2 = &xfs_params.inherit_nodfrg.max }, @@ -177,7 +177,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.fstrm_timer.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xfs_params.fstrm_timer.min, .extra2 = &xfs_params.fstrm_timer.max, }, @@ -188,7 +188,7 @@ static ctl_table xfs_table[] = { .data = &xfs_params.stats_clear.val, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &xfs_stats_clear_proc_handler, + .proc_handler = xfs_stats_clear_proc_handler, .extra1 = &xfs_params.stats_clear.min, .extra2 = &xfs_params.stats_clear.max }, diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 0134b15b38d..ebd9a8317a7 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -74,7 +74,7 @@ ctl_table slow_work_sysctls[] = { .data = &vslow_work_proportion, .maxlen = sizeof(unsigned), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void *) &slow_work_min_vslow, .extra2 = (void *) &slow_work_max_vslow, }, diff --git a/kernel/sysctl.c b/kernel/sysctl.c index b4a5763d6dc..e2ccc89382c 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -249,7 +249,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_sched_child_runs_first, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #ifdef CONFIG_SCHED_DEBUG { @@ -257,7 +257,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_sched_min_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &sched_nr_latency_handler, + .proc_handler = sched_nr_latency_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, @@ -266,7 +266,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_sched_latency, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &sched_nr_latency_handler, + .proc_handler = sched_nr_latency_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, @@ -275,7 +275,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_sched_wakeup_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, @@ -284,14 +284,14 @@ static struct ctl_table kern_table[] = { .data = &sysctl_sched_shares_ratelimit, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "sched_shares_thresh", .data = &sysctl_sched_shares_thresh, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { @@ -299,35 +299,35 @@ static struct ctl_table kern_table[] = { .data = &sysctl_sched_features, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "sched_migration_cost", .data = &sysctl_sched_migration_cost, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "sched_nr_migrate", .data = &sysctl_sched_nr_migrate, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "sched_time_avg", .data = &sysctl_sched_time_avg, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "timer_migration", .data = &sysctl_timer_migration, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, @@ -337,21 +337,21 @@ static struct ctl_table kern_table[] = { .data = &sysctl_sched_rt_period, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &sched_rt_handler, + .proc_handler = sched_rt_handler, }, { .procname = "sched_rt_runtime_us", .data = &sysctl_sched_rt_runtime, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &sched_rt_handler, + .proc_handler = sched_rt_handler, }, { .procname = "sched_compat_yield", .data = &sysctl_sched_compat_yield, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #ifdef CONFIG_PROVE_LOCKING { @@ -359,7 +359,7 @@ static struct ctl_table kern_table[] = { .data = &prove_locking, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_LOCK_STAT @@ -368,7 +368,7 @@ static struct ctl_table kern_table[] = { .data = &lock_stat, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif { @@ -376,35 +376,35 @@ static struct ctl_table kern_table[] = { .data = &panic_timeout, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "core_uses_pid", .data = &core_uses_pid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "core_pattern", .data = core_pattern, .maxlen = CORENAME_MAX_SIZE, .mode = 0644, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, { .procname = "core_pipe_limit", .data = &core_pipe_limit, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", .maxlen = sizeof(long), .mode = 0644, - .proc_handler = &proc_taint, + .proc_handler = proc_taint, }, #endif #ifdef CONFIG_LATENCYTOP @@ -413,7 +413,7 @@ static struct ctl_table kern_table[] = { .data = &latencytop_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BLK_DEV_INITRD @@ -422,7 +422,7 @@ static struct ctl_table kern_table[] = { .data = &real_root_dev, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif { @@ -430,7 +430,7 @@ static struct ctl_table kern_table[] = { .data = &print_fatal_signals, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #ifdef CONFIG_SPARC { @@ -438,21 +438,21 @@ static struct ctl_table kern_table[] = { .data = reboot_command, .maxlen = 256, .mode = 0644, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, { .procname = "stop-a", .data = &stop_a_enabled, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "scons-poweroff", .data = &scons_pwroff, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SPARC64 @@ -461,7 +461,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_tsb_ratio, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef __hppa__ @@ -470,14 +470,14 @@ static struct ctl_table kern_table[] = { .data = &pwrsw_enabled, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "unaligned-trap", .data = &unaligned_enabled, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif { @@ -485,7 +485,7 @@ static struct ctl_table kern_table[] = { .data = &C_A_D, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #ifdef CONFIG_FUNCTION_TRACER { @@ -493,7 +493,7 @@ static struct ctl_table kern_table[] = { .data = &ftrace_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &ftrace_enable_sysctl, + .proc_handler = ftrace_enable_sysctl, }, #endif #ifdef CONFIG_STACK_TRACER @@ -502,7 +502,7 @@ static struct ctl_table kern_table[] = { .data = &stack_tracer_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &stack_trace_sysctl, + .proc_handler = stack_trace_sysctl, }, #endif #ifdef CONFIG_TRACING @@ -511,7 +511,7 @@ static struct ctl_table kern_table[] = { .data = &ftrace_dump_on_oops, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MODULES @@ -520,7 +520,7 @@ static struct ctl_table kern_table[] = { .data = &modprobe_path, .maxlen = KMOD_PATH_LEN, .mode = 0644, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, { .procname = "modules_disabled", @@ -528,7 +528,7 @@ static struct ctl_table kern_table[] = { .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, @@ -539,7 +539,7 @@ static struct ctl_table kern_table[] = { .data = &uevent_helper, .maxlen = UEVENT_HELPER_PATH_LEN, .mode = 0644, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, #endif #ifdef CONFIG_CHR_DEV_SG @@ -548,7 +548,7 @@ static struct ctl_table kern_table[] = { .data = &sg_big_buff, .maxlen = sizeof (int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BSD_PROCESS_ACCT @@ -557,7 +557,7 @@ static struct ctl_table kern_table[] = { .data = &acct_parm, .maxlen = 3*sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MAGIC_SYSRQ @@ -566,7 +566,7 @@ static struct ctl_table kern_table[] = { .data = &__sysrq_enabled, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_PROC_SYSCTL @@ -575,7 +575,7 @@ static struct ctl_table kern_table[] = { .data = NULL, .maxlen = sizeof (int), .mode = 0600, - .proc_handler = &proc_do_cad_pid, + .proc_handler = proc_do_cad_pid, }, #endif { @@ -583,7 +583,7 @@ static struct ctl_table kern_table[] = { .data = &max_threads, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "random", @@ -595,7 +595,7 @@ static struct ctl_table kern_table[] = { .data = &overflowuid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, @@ -604,7 +604,7 @@ static struct ctl_table kern_table[] = { .data = &overflowgid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, @@ -615,7 +615,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_ieee_emulation_warnings, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif { @@ -623,7 +623,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_userprocess_debug, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif { @@ -631,7 +631,7 @@ static struct ctl_table kern_table[] = { .data = &pid_max, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, @@ -640,7 +640,7 @@ static struct ctl_table kern_table[] = { .data = &panic_on_oops, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #if defined CONFIG_PRINTK { @@ -648,28 +648,28 @@ static struct ctl_table kern_table[] = { .data = &console_loglevel, .maxlen = 4*sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "printk_ratelimit", .data = &printk_ratelimit_state.interval, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, { .procname = "printk_ratelimit_burst", .data = &printk_ratelimit_state.burst, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "printk_delay", .data = &printk_delay_msec, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &ten_thousand, }, @@ -679,7 +679,7 @@ static struct ctl_table kern_table[] = { .data = &ngroups_max, .maxlen = sizeof (int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) { @@ -687,14 +687,14 @@ static struct ctl_table kern_table[] = { .data = &unknown_nmi_panic, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "nmi_watchdog", .data = &nmi_watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_nmi_enabled, + .proc_handler = proc_nmi_enabled, }, #endif #if defined(CONFIG_X86) @@ -703,42 +703,42 @@ static struct ctl_table kern_table[] = { .data = &panic_on_unrecovered_nmi, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "panic_on_io_nmi", .data = &panic_on_io_nmi, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "bootloader_type", .data = &bootloader_type, .maxlen = sizeof (int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "bootloader_version", .data = &bootloader_version, .maxlen = sizeof (int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "kstack_depth_to_print", .data = &kstack_depth_to_print, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "io_delay_type", .data = &io_delay_type, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_MMU) @@ -747,7 +747,7 @@ static struct ctl_table kern_table[] = { .data = &randomize_va_space, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) @@ -756,7 +756,7 @@ static struct ctl_table kern_table[] = { .data = &spin_retry, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) @@ -765,7 +765,7 @@ static struct ctl_table kern_table[] = { .data = &acpi_realmode_flags, .maxlen = sizeof (unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, #endif #ifdef CONFIG_IA64 @@ -774,14 +774,14 @@ static struct ctl_table kern_table[] = { .data = &no_unaligned_warning, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "unaligned-dump-stack", .data = &unaligned_dump_stack, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DETECT_SOFTLOCKUP @@ -790,7 +790,7 @@ static struct ctl_table kern_table[] = { .data = &softlockup_panic, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, @@ -799,7 +799,7 @@ static struct ctl_table kern_table[] = { .data = &softlockup_thresh, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dosoftlockup_thresh, + .proc_handler = proc_dosoftlockup_thresh, .extra1 = &neg_one, .extra2 = &sixty, }, @@ -810,7 +810,7 @@ static struct ctl_table kern_table[] = { .data = &sysctl_hung_task_panic, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, @@ -819,21 +819,21 @@ static struct ctl_table kern_table[] = { .data = &sysctl_hung_task_check_count, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "hung_task_timeout_secs", .data = &sysctl_hung_task_timeout_secs, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_dohung_task_timeout_secs, + .proc_handler = proc_dohung_task_timeout_secs, }, { .procname = "hung_task_warnings", .data = &sysctl_hung_task_warnings, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, #endif #ifdef CONFIG_COMPAT @@ -842,7 +842,7 @@ static struct ctl_table kern_table[] = { .data = &compat_log, .maxlen = sizeof (int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_RT_MUTEXES @@ -851,7 +851,7 @@ static struct ctl_table kern_table[] = { .data = &max_lock_depth, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif { @@ -859,7 +859,7 @@ static struct ctl_table kern_table[] = { .data = &poweroff_cmd, .maxlen = POWEROFF_CMD_PATH_LEN, .mode = 0644, - .proc_handler = &proc_dostring, + .proc_handler = proc_dostring, }, #ifdef CONFIG_KEYS { @@ -874,7 +874,7 @@ static struct ctl_table kern_table[] = { .data = &rcutorture_runnable, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SLOW_WORK @@ -890,21 +890,21 @@ static struct ctl_table kern_table[] = { .data = &sysctl_perf_event_paranoid, .maxlen = sizeof(sysctl_perf_event_paranoid), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "perf_event_mlock_kb", .data = &sysctl_perf_event_mlock, .maxlen = sizeof(sysctl_perf_event_mlock), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "perf_event_max_sample_rate", .data = &sysctl_perf_event_sample_rate, .maxlen = sizeof(sysctl_perf_event_sample_rate), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_KMEMCHECK @@ -913,7 +913,7 @@ static struct ctl_table kern_table[] = { .data = &kmemcheck_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BLOCK @@ -922,7 +922,7 @@ static struct ctl_table kern_table[] = { .data = &blk_iopoll_enabled, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif /* @@ -938,49 +938,49 @@ static struct ctl_table vm_table[] = { .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "panic_on_oom", .data = &sysctl_panic_on_oom, .maxlen = sizeof(sysctl_panic_on_oom), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "oom_kill_allocating_task", .data = &sysctl_oom_kill_allocating_task, .maxlen = sizeof(sysctl_oom_kill_allocating_task), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "oom_dump_tasks", .data = &sysctl_oom_dump_tasks, .maxlen = sizeof(sysctl_oom_dump_tasks), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "page-cluster", .data = &page_cluster, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, .maxlen = sizeof(dirty_background_ratio), .mode = 0644, - .proc_handler = &dirty_background_ratio_handler, + .proc_handler = dirty_background_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, @@ -989,7 +989,7 @@ static struct ctl_table vm_table[] = { .data = &dirty_background_bytes, .maxlen = sizeof(dirty_background_bytes), .mode = 0644, - .proc_handler = &dirty_background_bytes_handler, + .proc_handler = dirty_background_bytes_handler, .extra1 = &one_ul, }, { @@ -997,7 +997,7 @@ static struct ctl_table vm_table[] = { .data = &vm_dirty_ratio, .maxlen = sizeof(vm_dirty_ratio), .mode = 0644, - .proc_handler = &dirty_ratio_handler, + .proc_handler = dirty_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, @@ -1006,7 +1006,7 @@ static struct ctl_table vm_table[] = { .data = &vm_dirty_bytes, .maxlen = sizeof(vm_dirty_bytes), .mode = 0644, - .proc_handler = &dirty_bytes_handler, + .proc_handler = dirty_bytes_handler, .extra1 = &dirty_bytes_min, }, { @@ -1014,28 +1014,28 @@ static struct ctl_table vm_table[] = { .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), .mode = 0644, - .proc_handler = &dirty_writeback_centisecs_handler, + .proc_handler = dirty_writeback_centisecs_handler, }, { .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "nr_pdflush_threads", .data = &nr_pdflush_threads, .maxlen = sizeof nr_pdflush_threads, .mode = 0444 /* read-only*/, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "swappiness", .data = &vm_swappiness, .maxlen = sizeof(vm_swappiness), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one_hundred, }, @@ -1045,7 +1045,7 @@ static struct ctl_table vm_table[] = { .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &hugetlb_sysctl_handler, + .proc_handler = hugetlb_sysctl_handler, .extra1 = (void *)&hugetlb_zero, .extra2 = (void *)&hugetlb_infinity, }, @@ -1054,21 +1054,21 @@ static struct ctl_table vm_table[] = { .data = &sysctl_hugetlb_shm_group, .maxlen = sizeof(gid_t), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "hugepages_treat_as_movable", .data = &hugepages_treat_as_movable, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &hugetlb_treat_movable_handler, + .proc_handler = hugetlb_treat_movable_handler, }, { .procname = "nr_overcommit_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &hugetlb_overcommit_handler, + .proc_handler = hugetlb_overcommit_handler, .extra1 = (void *)&hugetlb_zero, .extra2 = (void *)&hugetlb_infinity, }, @@ -1078,7 +1078,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_lowmem_reserve_ratio, .maxlen = sizeof(sysctl_lowmem_reserve_ratio), .mode = 0644, - .proc_handler = &lowmem_reserve_ratio_sysctl_handler, + .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, { .procname = "drop_caches", @@ -1092,7 +1092,7 @@ static struct ctl_table vm_table[] = { .data = &min_free_kbytes, .maxlen = sizeof(min_free_kbytes), .mode = 0644, - .proc_handler = &min_free_kbytes_sysctl_handler, + .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, { @@ -1100,7 +1100,7 @@ static struct ctl_table vm_table[] = { .data = &percpu_pagelist_fraction, .maxlen = sizeof(percpu_pagelist_fraction), .mode = 0644, - .proc_handler = &percpu_pagelist_fraction_sysctl_handler, + .proc_handler = percpu_pagelist_fraction_sysctl_handler, .extra1 = &min_percpu_pagelist_fract, }, #ifdef CONFIG_MMU @@ -1109,7 +1109,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, - .proc_handler = &proc_dointvec + .proc_handler = proc_dointvec }, #else { @@ -1117,7 +1117,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_nr_trim_pages, .maxlen = sizeof(sysctl_nr_trim_pages), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #endif @@ -1126,14 +1126,14 @@ static struct ctl_table vm_table[] = { .data = &laptop_mode, .maxlen = sizeof(laptop_mode), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, { .procname = "block_dump", .data = &block_dump, .maxlen = sizeof(block_dump), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, .extra1 = &zero, }, { @@ -1141,7 +1141,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, .extra1 = &zero, }, #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT @@ -1150,7 +1150,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif @@ -1160,7 +1160,7 @@ static struct ctl_table vm_table[] = { .data = &zone_reclaim_mode, .maxlen = sizeof(zone_reclaim_mode), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, .extra1 = &zero, }, { @@ -1168,7 +1168,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_min_unmapped_ratio, .maxlen = sizeof(sysctl_min_unmapped_ratio), .mode = 0644, - .proc_handler = &sysctl_min_unmapped_ratio_sysctl_handler, + .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, @@ -1177,7 +1177,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_min_slab_ratio, .maxlen = sizeof(sysctl_min_slab_ratio), .mode = 0644, - .proc_handler = &sysctl_min_slab_ratio_sysctl_handler, + .proc_handler = sysctl_min_slab_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, @@ -1188,7 +1188,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_stat_interval, .maxlen = sizeof(sysctl_stat_interval), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, #endif { @@ -1196,7 +1196,7 @@ static struct ctl_table vm_table[] = { .data = &dac_mmap_min_addr, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &mmap_min_addr_handler, + .proc_handler = mmap_min_addr_handler, }, #ifdef CONFIG_NUMA { @@ -1204,7 +1204,7 @@ static struct ctl_table vm_table[] = { .data = &numa_zonelist_order, .maxlen = NUMA_ZONELIST_ORDER_LEN, .mode = 0644, - .proc_handler = &numa_zonelist_order_handler, + .proc_handler = numa_zonelist_order_handler, }, #endif #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ @@ -1214,7 +1214,7 @@ static struct ctl_table vm_table[] = { .data = &vdso_enabled, .maxlen = sizeof(vdso_enabled), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif @@ -1224,7 +1224,7 @@ static struct ctl_table vm_table[] = { .data = &vm_highmem_is_dirtyable, .maxlen = sizeof(vm_highmem_is_dirtyable), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, @@ -1234,7 +1234,7 @@ static struct ctl_table vm_table[] = { .data = &scan_unevictable_pages, .maxlen = sizeof(scan_unevictable_pages), .mode = 0644, - .proc_handler = &scan_unevictable_handler, + .proc_handler = scan_unevictable_handler, }, #ifdef CONFIG_MEMORY_FAILURE { @@ -1242,7 +1242,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_memory_failure_early_kill, .maxlen = sizeof(sysctl_memory_failure_early_kill), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, @@ -1251,7 +1251,7 @@ static struct ctl_table vm_table[] = { .data = &sysctl_memory_failure_recovery, .maxlen = sizeof(sysctl_memory_failure_recovery), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, @@ -1276,35 +1276,35 @@ static struct ctl_table fs_table[] = { .data = &inodes_stat, .maxlen = 2*sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "inode-state", .data = &inodes_stat, .maxlen = 7*sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "file-nr", .data = &files_stat, .maxlen = 3*sizeof(int), .mode = 0444, - .proc_handler = &proc_nr_files, + .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "nr_open", .data = &sysctl_nr_open, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, @@ -1313,14 +1313,14 @@ static struct ctl_table fs_table[] = { .data = &dentry_stat, .maxlen = 6*sizeof(int), .mode = 0444, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "overflowuid", .data = &fs_overflowuid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, @@ -1329,7 +1329,7 @@ static struct ctl_table fs_table[] = { .data = &fs_overflowgid, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, @@ -1339,7 +1339,7 @@ static struct ctl_table fs_table[] = { .data = &leases_enable, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DNOTIFY @@ -1348,7 +1348,7 @@ static struct ctl_table fs_table[] = { .data = &dir_notify_enable, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MMU @@ -1358,7 +1358,7 @@ static struct ctl_table fs_table[] = { .data = &lease_break_time, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_AIO @@ -1367,14 +1367,14 @@ static struct ctl_table fs_table[] = { .data = &aio_nr, .maxlen = sizeof(aio_nr), .mode = 0444, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "aio-max-nr", .data = &aio_max_nr, .maxlen = sizeof(aio_max_nr), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, #endif /* CONFIG_AIO */ #ifdef CONFIG_INOTIFY_USER @@ -1397,7 +1397,7 @@ static struct ctl_table fs_table[] = { .data = &suid_dumpable, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c index 517c6c9987b..03f01cb4e0f 100644 --- a/net/rds/ib_sysctl.c +++ b/net/rds/ib_sysctl.c @@ -71,7 +71,7 @@ ctl_table rds_ib_sysctl_table[] = { .data = &rds_ib_sysctl_max_send_wr, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_ib_sysctl_max_wr_min, .extra2 = &rds_ib_sysctl_max_wr_max, }, @@ -80,7 +80,7 @@ ctl_table rds_ib_sysctl_table[] = { .data = &rds_ib_sysctl_max_recv_wr, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_ib_sysctl_max_wr_min, .extra2 = &rds_ib_sysctl_max_wr_max, }, @@ -89,7 +89,7 @@ ctl_table rds_ib_sysctl_table[] = { .data = &rds_ib_sysctl_max_unsig_wrs, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_ib_sysctl_max_unsig_wr_min, .extra2 = &rds_ib_sysctl_max_unsig_wr_max, }, @@ -98,7 +98,7 @@ ctl_table rds_ib_sysctl_table[] = { .data = &rds_ib_sysctl_max_unsig_bytes, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_ib_sysctl_max_unsig_bytes_min, .extra2 = &rds_ib_sysctl_max_unsig_bytes_max, }, @@ -107,14 +107,14 @@ ctl_table rds_ib_sysctl_table[] = { .data = &rds_ib_sysctl_max_recv_allocation, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "flow_control", .data = &rds_ib_sysctl_flow_control, .maxlen = sizeof(rds_ib_sysctl_flow_control), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c index 3e00b01559f..1c4428a61a0 100644 --- a/net/rds/iw_sysctl.c +++ b/net/rds/iw_sysctl.c @@ -61,7 +61,7 @@ ctl_table rds_iw_sysctl_table[] = { .data = &rds_iw_sysctl_max_send_wr, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_wr_min, .extra2 = &rds_iw_sysctl_max_wr_max, }, @@ -70,7 +70,7 @@ ctl_table rds_iw_sysctl_table[] = { .data = &rds_iw_sysctl_max_recv_wr, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_wr_min, .extra2 = &rds_iw_sysctl_max_wr_max, }, @@ -79,7 +79,7 @@ ctl_table rds_iw_sysctl_table[] = { .data = &rds_iw_sysctl_max_unsig_wrs, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_unsig_wr_min, .extra2 = &rds_iw_sysctl_max_unsig_wr_max, }, @@ -88,7 +88,7 @@ ctl_table rds_iw_sysctl_table[] = { .data = &rds_iw_sysctl_max_unsig_bytes, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &rds_iw_sysctl_max_unsig_bytes_min, .extra2 = &rds_iw_sysctl_max_unsig_bytes_max, }, @@ -97,14 +97,14 @@ ctl_table rds_iw_sysctl_table[] = { .data = &rds_iw_sysctl_max_recv_allocation, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_minmax, + .proc_handler = proc_doulongvec_minmax, }, { .procname = "flow_control", .data = &rds_iw_sysctl_flow_control, .maxlen = sizeof(rds_iw_sysctl_flow_control), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c index 8fb499ee368..7829a20325d 100644 --- a/net/rds/sysctl.c +++ b/net/rds/sysctl.c @@ -55,7 +55,7 @@ static ctl_table rds_sysctl_rds_table[] = { .data = &rds_sysctl_reconnect_min_jiffies, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_ms_jiffies_minmax, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, .extra1 = &rds_sysctl_reconnect_min, .extra2 = &rds_sysctl_reconnect_max_jiffies, }, @@ -64,7 +64,7 @@ static ctl_table rds_sysctl_rds_table[] = { .data = &rds_sysctl_reconnect_max_jiffies, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_doulongvec_ms_jiffies_minmax, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, .extra1 = &rds_sysctl_reconnect_min_jiffies, .extra2 = &rds_sysctl_reconnect_max, }, @@ -73,21 +73,21 @@ static ctl_table rds_sysctl_rds_table[] = { .data = &rds_sysctl_max_unacked_packets, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "max_unacked_bytes", .data = &rds_sysctl_max_unacked_bytes, .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "ping_enable", .data = &rds_sysctl_ping_enable, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { } }; diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index c4ece982954..d50a042f955 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -237,7 +237,7 @@ static ctl_table sctp_table[] = { .data = &sctp_scope_policy, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &addr_scope_max, }, diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index f0ce326d017..e65dcc61333 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c @@ -139,34 +139,34 @@ static ctl_table debug_table[] = { .data = &rpc_debug, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dodebug + .proc_handler = proc_dodebug }, { .procname = "nfs_debug", .data = &nfs_debug, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dodebug + .proc_handler = proc_dodebug }, { .procname = "nfsd_debug", .data = &nfsd_debug, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dodebug + .proc_handler = proc_dodebug }, { .procname = "nlm_debug", .data = &nlm_debug, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = &proc_dodebug + .proc_handler = proc_dodebug }, { .procname = "transports", .maxlen = 256, .mode = 0444, - .proc_handler = &proc_do_xprt, + .proc_handler = proc_do_xprt, }, { } }; diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c index 678cee22013..5b8a8ff93a2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma.c +++ b/net/sunrpc/xprtrdma/svc_rdma.c @@ -120,7 +120,7 @@ static ctl_table svcrdma_parm_table[] = { .data = &svcrdma_max_requests, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_requests, .extra2 = &max_max_requests }, @@ -129,7 +129,7 @@ static ctl_table svcrdma_parm_table[] = { .data = &svcrdma_max_req_size, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_max_inline, .extra2 = &max_max_inline }, @@ -138,7 +138,7 @@ static ctl_table svcrdma_parm_table[] = { .data = &svcrdma_ord, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_ord, .extra2 = &max_ord, }, @@ -148,63 +148,63 @@ static ctl_table svcrdma_parm_table[] = { .data = &rdma_stat_read, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_recv", .data = &rdma_stat_recv, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_write", .data = &rdma_stat_write, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_sq_starve", .data = &rdma_stat_sq_starve, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_rq_starve", .data = &rdma_stat_rq_starve, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_rq_poll", .data = &rdma_stat_rq_poll, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_rq_prod", .data = &rdma_stat_rq_prod, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_sq_poll", .data = &rdma_stat_sq_poll, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { .procname = "rdma_stat_sq_prod", .data = &rdma_stat_sq_prod, .maxlen = sizeof(atomic_t), .mode = 0644, - .proc_handler = &read_reset_stat, + .proc_handler = read_reset_stat, }, { }, }; diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 47681606224..7018eef1dcd 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -90,7 +90,7 @@ static ctl_table xr_tunables_table[] = { .data = &xprt_rdma_slot_table_entries, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_slot_table_size, .extra2 = &max_slot_table_size }, @@ -99,21 +99,21 @@ static ctl_table xr_tunables_table[] = { .data = &xprt_rdma_max_inline_read, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "rdma_max_inline_write", .data = &xprt_rdma_max_inline_write, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { .procname = "rdma_inline_write_padding", .data = &xprt_rdma_inline_write_padding, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &max_padding, }, @@ -122,7 +122,7 @@ static ctl_table xr_tunables_table[] = { .data = &xprt_rdma_memreg_strategy, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_memreg, .extra2 = &max_memreg, }, @@ -131,7 +131,7 @@ static ctl_table xr_tunables_table[] = { .data = &xprt_rdma_pad_optimize, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec, + .proc_handler = proc_dointvec, }, { }, }; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 8b9a2079f2e..04732d09013 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -85,7 +85,7 @@ static ctl_table xs_tunables_table[] = { .data = &xprt_udp_slot_table_entries, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_slot_table_size, .extra2 = &max_slot_table_size }, @@ -94,7 +94,7 @@ static ctl_table xs_tunables_table[] = { .data = &xprt_tcp_slot_table_entries, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &min_slot_table_size, .extra2 = &max_slot_table_size }, @@ -103,7 +103,7 @@ static ctl_table xs_tunables_table[] = { .data = &xprt_min_resvport, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xprt_min_resvport_limit, .extra2 = &xprt_max_resvport_limit }, @@ -112,7 +112,7 @@ static ctl_table xs_tunables_table[] = { .data = &xprt_max_resvport, .maxlen = sizeof(unsigned int), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = &xprt_min_resvport_limit, .extra2 = &xprt_max_resvport_limit }, @@ -121,7 +121,7 @@ static ctl_table xs_tunables_table[] = { .data = &xs_tcp_fin_timeout, .maxlen = sizeof(xs_tcp_fin_timeout), .mode = 0644, - .proc_handler = &proc_dointvec_jiffies, + .proc_handler = proc_dointvec_jiffies, }, { }, }; diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c index 3565e2fc10c..ee32d181764 100644 --- a/security/keys/sysctl.c +++ b/security/keys/sysctl.c @@ -21,7 +21,7 @@ ctl_table key_sysctls[] = { .data = &key_quota_maxkeys, .maxlen = sizeof(unsigned), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void *) &one, .extra2 = (void *) &max, }, @@ -30,7 +30,7 @@ ctl_table key_sysctls[] = { .data = &key_quota_maxbytes, .maxlen = sizeof(unsigned), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void *) &one, .extra2 = (void *) &max, }, @@ -39,7 +39,7 @@ ctl_table key_sysctls[] = { .data = &key_quota_root_maxkeys, .maxlen = sizeof(unsigned), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void *) &one, .extra2 = (void *) &max, }, @@ -48,7 +48,7 @@ ctl_table key_sysctls[] = { .data = &key_quota_root_maxbytes, .maxlen = sizeof(unsigned), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void *) &one, .extra2 = (void *) &max, }, @@ -57,7 +57,7 @@ ctl_table key_sysctls[] = { .data = &key_gc_delay, .maxlen = sizeof(unsigned), .mode = 0644, - .proc_handler = &proc_dointvec_minmax, + .proc_handler = proc_dointvec_minmax, .extra1 = (void *) &zero, .extra2 = (void *) &max, }, -- cgit v1.2.3-70-g09d2 From 3d7a641e544e428191667e8b1f83f96fa46dbd65 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:10:23 +0000 Subject: SLOW_WORK: Wait for outstanding work items belonging to a module to clear Wait for outstanding slow work items belonging to a module to clear when unregistering that module as a user of the facility. This prevents the put_ref code of a work item from being taken away before it returns. Signed-off-by: David Howells --- Documentation/slow-work.txt | 13 ++++- fs/fscache/main.c | 6 +- fs/fscache/object.c | 1 + fs/fscache/operation.c | 1 + fs/gfs2/main.c | 4 +- fs/gfs2/recovery.c | 1 + include/linux/slow-work.h | 8 ++- kernel/slow-work.c | 132 ++++++++++++++++++++++++++++++++++++++++++-- 8 files changed, 150 insertions(+), 16 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index ebc50f808ea..f12fda31dcd 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -64,9 +64,11 @@ USING SLOW WORK ITEMS Firstly, a module or subsystem wanting to make use of slow work items must register its interest: - int ret = slow_work_register_user(); + int ret = slow_work_register_user(struct module *module); -This will return 0 if successful, or a -ve error upon failure. +This will return 0 if successful, or a -ve error upon failure. The module +pointer should be the module interested in using this facility (almost +certainly THIS_MODULE). Slow work items may then be set up by: @@ -110,7 +112,12 @@ operation. When all a module's slow work items have been processed, and the module has no further interest in the facility, it should unregister its interest: - slow_work_unregister_user(); + slow_work_unregister_user(struct module *module); + +The module pointer is used to wait for all outstanding work items for that +module before completing the unregistration. This prevents the put_ref() code +from being taken away before it completes. module should almost certainly be +THIS_MODULE. =============== diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 4de41b59749..add6bdb53f0 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c @@ -48,7 +48,7 @@ static int __init fscache_init(void) { int ret; - ret = slow_work_register_user(); + ret = slow_work_register_user(THIS_MODULE); if (ret < 0) goto error_slow_work; @@ -80,7 +80,7 @@ error_kobj: error_cookie_jar: fscache_proc_cleanup(); error_proc: - slow_work_unregister_user(); + slow_work_unregister_user(THIS_MODULE); error_slow_work: return ret; } @@ -97,7 +97,7 @@ static void __exit fscache_exit(void) kobject_put(fscache_root); kmem_cache_destroy(fscache_cookie_jar); fscache_proc_cleanup(); - slow_work_unregister_user(); + slow_work_unregister_user(THIS_MODULE); printk(KERN_NOTICE "FS-Cache: Unloaded\n"); } diff --git a/fs/fscache/object.c b/fs/fscache/object.c index 392a41b1b79..d236eb1d6f3 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c @@ -45,6 +45,7 @@ static void fscache_enqueue_dependents(struct fscache_object *); static void fscache_dequeue_object(struct fscache_object *); const struct slow_work_ops fscache_object_slow_work_ops = { + .owner = THIS_MODULE, .get_ref = fscache_object_slow_work_get_ref, .put_ref = fscache_object_slow_work_put_ref, .execute = fscache_object_slow_work_execute, diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index e7f8d53b8b6..f1a2857b2ff 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c @@ -453,6 +453,7 @@ static void fscache_op_execute(struct slow_work *work) } const struct slow_work_ops fscache_op_slow_work_ops = { + .owner = THIS_MODULE, .get_ref = fscache_op_get_ref, .put_ref = fscache_op_put_ref, .execute = fscache_op_execute, diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index eacd78a5d08..5b31f7741a8 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c @@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void) if (error) goto fail_unregister; - error = slow_work_register_user(); + error = slow_work_register_user(THIS_MODULE); if (error) goto fail_slow; @@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void) gfs2_unregister_debugfs(); unregister_filesystem(&gfs2_fs_type); unregister_filesystem(&gfs2meta_fs_type); - slow_work_unregister_user(); + slow_work_unregister_user(THIS_MODULE); kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep); diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 59d2695509d..b2bb779f09e 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -593,6 +593,7 @@ fail: } struct slow_work_ops gfs2_recover_ops = { + .owner = THIS_MODULE, .get_ref = gfs2_recover_get_ref, .put_ref = gfs2_recover_put_ref, .execute = gfs2_recover_work, diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index b65c8881f07..9adb2b30754 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -24,6 +24,9 @@ struct slow_work; * The operations used to support slow work items */ struct slow_work_ops { + /* owner */ + struct module *owner; + /* get a ref on a work item * - return 0 if successful, -ve if not */ @@ -42,6 +45,7 @@ struct slow_work_ops { * queued */ struct slow_work { + struct module *owner; /* the owning module */ unsigned long flags; #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ #define SLOW_WORK_EXECUTING 1 /* item currently executing */ @@ -84,8 +88,8 @@ static inline void vslow_work_init(struct slow_work *work, } extern int slow_work_enqueue(struct slow_work *work); -extern int slow_work_register_user(void); -extern void slow_work_unregister_user(void); +extern int slow_work_register_user(struct module *owner); +extern void slow_work_unregister_user(struct module *owner); #ifdef CONFIG_SYSCTL extern ctl_table slow_work_sysctls[]; diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 0d31135efbf..dd08f376e40 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -22,6 +22,8 @@ #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after * OOM */ +#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ + static void slow_work_cull_timeout(unsigned long); static void slow_work_oom_timeout(unsigned long); @@ -46,7 +48,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process #ifdef CONFIG_SYSCTL static const int slow_work_min_min_threads = 2; -static int slow_work_max_max_threads = 255; +static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT; static const int slow_work_min_vslow = 1; static const int slow_work_max_vslow = 99; @@ -97,6 +99,23 @@ static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0); static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0); static struct slow_work slow_work_new_thread; /* new thread starter */ +/* + * slow work ID allocation (use slow_work_queue_lock) + */ +static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT); + +/* + * Unregistration tracking to prevent put_ref() from disappearing during module + * unload + */ +#ifdef CONFIG_MODULES +static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT]; +static struct module *slow_work_unreg_module; +static struct slow_work *slow_work_unreg_work_item; +static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); +static DEFINE_MUTEX(slow_work_unreg_sync_lock); +#endif + /* * The queues of work items and the lock governing access to them. These are * shared between all the CPUs. It doesn't make sense to have per-CPU queues @@ -149,8 +168,11 @@ static unsigned slow_work_calc_vsmax(void) * Attempt to execute stuff queued on a slow thread. Return true if we managed * it, false if there was nothing to do. */ -static bool slow_work_execute(void) +static bool slow_work_execute(int id) { +#ifdef CONFIG_MODULES + struct module *module; +#endif struct slow_work *work = NULL; unsigned vsmax; bool very_slow; @@ -186,6 +208,12 @@ static bool slow_work_execute(void) } else { very_slow = false; /* avoid the compiler warning */ } + +#ifdef CONFIG_MODULES + if (work) + slow_work_thread_processing[id] = work->owner; +#endif + spin_unlock_irq(&slow_work_queue_lock); if (!work) @@ -219,7 +247,18 @@ static bool slow_work_execute(void) spin_unlock_irq(&slow_work_queue_lock); } + /* sort out the race between module unloading and put_ref() */ work->ops->put_ref(work); + +#ifdef CONFIG_MODULES + module = slow_work_thread_processing[id]; + slow_work_thread_processing[id] = NULL; + smp_mb(); + if (slow_work_unreg_work_item == work || + slow_work_unreg_module == module) + wake_up_all(&slow_work_unreg_wq); +#endif + return true; auto_requeue: @@ -232,6 +271,7 @@ auto_requeue: else list_add_tail(&work->link, &slow_work_queue); spin_unlock_irq(&slow_work_queue_lock); + slow_work_thread_processing[id] = NULL; return true; } @@ -368,13 +408,22 @@ static inline bool slow_work_available(int vsmax) */ static int slow_work_thread(void *_data) { - int vsmax; + int vsmax, id; DEFINE_WAIT(wait); set_freezable(); set_user_nice(current, -5); + /* allocate ourselves an ID */ + spin_lock_irq(&slow_work_queue_lock); + id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); + BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); + __set_bit(id, slow_work_ids); + spin_unlock_irq(&slow_work_queue_lock); + + sprintf(current->comm, "kslowd%03u", id); + for (;;) { vsmax = vslow_work_proportion; vsmax *= atomic_read(&slow_work_thread_count); @@ -395,7 +444,7 @@ static int slow_work_thread(void *_data) vsmax *= atomic_read(&slow_work_thread_count); vsmax /= 100; - if (slow_work_available(vsmax) && slow_work_execute()) { + if (slow_work_available(vsmax) && slow_work_execute(id)) { cond_resched(); if (list_empty(&slow_work_queue) && list_empty(&vslow_work_queue) && @@ -412,6 +461,10 @@ static int slow_work_thread(void *_data) break; } + spin_lock_irq(&slow_work_queue_lock); + __clear_bit(id, slow_work_ids); + spin_unlock_irq(&slow_work_queue_lock); + if (atomic_dec_and_test(&slow_work_thread_count)) complete_and_exit(&slow_work_last_thread_exited, 0); return 0; @@ -475,6 +528,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) } static const struct slow_work_ops slow_work_new_thread_ops = { + .owner = THIS_MODULE, .get_ref = slow_work_new_thread_get_ref, .put_ref = slow_work_new_thread_put_ref, .execute = slow_work_new_thread_execute, @@ -546,12 +600,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write, /** * slow_work_register_user - Register a user of the facility + * @module: The module about to make use of the facility * * Register a user of the facility, starting up the initial threads if there * aren't any other users at this point. This will return 0 if successful, or * an error if not. */ -int slow_work_register_user(void) +int slow_work_register_user(struct module *module) { struct task_struct *p; int loop; @@ -598,14 +653,79 @@ error: } EXPORT_SYMBOL(slow_work_register_user); +/* + * wait for all outstanding items from the calling module to complete + * - note that more items may be queued whilst we're waiting + */ +static void slow_work_wait_for_items(struct module *module) +{ + DECLARE_WAITQUEUE(myself, current); + struct slow_work *work; + int loop; + + mutex_lock(&slow_work_unreg_sync_lock); + add_wait_queue(&slow_work_unreg_wq, &myself); + + for (;;) { + spin_lock_irq(&slow_work_queue_lock); + + /* first of all, we wait for the last queued item in each list + * to be processed */ + list_for_each_entry_reverse(work, &vslow_work_queue, link) { + if (work->owner == module) { + set_current_state(TASK_UNINTERRUPTIBLE); + slow_work_unreg_work_item = work; + goto do_wait; + } + } + list_for_each_entry_reverse(work, &slow_work_queue, link) { + if (work->owner == module) { + set_current_state(TASK_UNINTERRUPTIBLE); + slow_work_unreg_work_item = work; + goto do_wait; + } + } + + /* then we wait for the items being processed to finish */ + slow_work_unreg_module = module; + smp_mb(); + for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) { + if (slow_work_thread_processing[loop] == module) + goto do_wait; + } + spin_unlock_irq(&slow_work_queue_lock); + break; /* okay, we're done */ + + do_wait: + spin_unlock_irq(&slow_work_queue_lock); + schedule(); + slow_work_unreg_work_item = NULL; + slow_work_unreg_module = NULL; + } + + remove_wait_queue(&slow_work_unreg_wq, &myself); + mutex_unlock(&slow_work_unreg_sync_lock); +} + /** * slow_work_unregister_user - Unregister a user of the facility + * @module: The module whose items should be cleared * * Unregister a user of the facility, killing all the threads if this was the * last one. + * + * This waits for all the work items belonging to the nominated module to go + * away before proceeding. */ -void slow_work_unregister_user(void) +void slow_work_unregister_user(struct module *module) { + /* first of all, wait for all outstanding items from the calling module + * to complete */ + if (module) + slow_work_wait_for_items(module); + + /* then we can actually go about shutting down the facility if need + * be */ mutex_lock(&slow_work_user_lock); BUG_ON(slow_work_user_count <= 0); -- cgit v1.2.3-70-g09d2 From 4d8bb2cbccf6dccaada509aafeb01c6205c9d8c4 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Nov 2009 18:10:39 +0000 Subject: SLOW_WORK: Make slow_work_ops ->get_ref/->put_ref optional Make the ability for the slow-work facility to take references on a work item optional as not everyone requires this. Even the internal slow-work stubs them out, so those can be got rid of too. Signed-off-by: Jens Axboe Signed-off-by: David Howells --- Documentation/slow-work.txt | 2 +- kernel/slow-work.c | 36 ++++++++++++++++-------------------- 2 files changed, 17 insertions(+), 21 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index f12fda31dcd..c655c517fc6 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -125,7 +125,7 @@ ITEM OPERATIONS =============== Each work item requires a table of operations of type struct slow_work_ops. -All members are required: +Only ->execute() is required, getting and putting of a reference are optional. (*) Get a reference on an item: diff --git a/kernel/slow-work.c b/kernel/slow-work.c index dd08f376e40..fccf421eb5c 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -145,6 +145,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited); static int slow_work_user_count; static DEFINE_MUTEX(slow_work_user_lock); +static inline int slow_work_get_ref(struct slow_work *work) +{ + if (work->ops->get_ref) + return work->ops->get_ref(work); + + return 0; +} + +static inline void slow_work_put_ref(struct slow_work *work) +{ + if (work->ops->put_ref) + work->ops->put_ref(work); +} + /* * Calculate the maximum number of active threads in the pool that are * permitted to process very slow work items. @@ -248,7 +262,7 @@ static bool slow_work_execute(int id) } /* sort out the race between module unloading and put_ref() */ - work->ops->put_ref(work); + slow_work_put_ref(work); #ifdef CONFIG_MODULES module = slow_work_thread_processing[id]; @@ -309,7 +323,6 @@ int slow_work_enqueue(struct slow_work *work) BUG_ON(slow_work_user_count <= 0); BUG_ON(!work); BUG_ON(!work->ops); - BUG_ON(!work->ops->get_ref); /* when honouring an enqueue request, we only promise that we will run * the work function in the future; we do not promise to run it once @@ -339,7 +352,7 @@ int slow_work_enqueue(struct slow_work *work) if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); } else { - if (work->ops->get_ref(work) < 0) + if (slow_work_get_ref(work) < 0) goto cant_get_ref; if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); @@ -479,21 +492,6 @@ static void slow_work_cull_timeout(unsigned long data) wake_up(&slow_work_thread_wq); } -/* - * Get a reference on slow work thread starter - */ -static int slow_work_new_thread_get_ref(struct slow_work *work) -{ - return 0; -} - -/* - * Drop a reference on slow work thread starter - */ -static void slow_work_new_thread_put_ref(struct slow_work *work) -{ -} - /* * Start a new slow work thread */ @@ -529,8 +527,6 @@ static void slow_work_new_thread_execute(struct slow_work *work) static const struct slow_work_ops slow_work_new_thread_ops = { .owner = THIS_MODULE, - .get_ref = slow_work_new_thread_get_ref, - .put_ref = slow_work_new_thread_put_ref, .execute = slow_work_new_thread_execute, }; -- cgit v1.2.3-70-g09d2 From 0160950297c08f8233c89b9f9e7dd59cfb080809 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Nov 2009 18:10:43 +0000 Subject: SLOW_WORK: Add support for cancellation of slow work Add support for cancellation of queued slow work and delayed slow work items. The cancellation functions will wait for items that are pending or undergoing execution to be discarded by the slow work facility. Attempting to enqueue work that is in the process of being cancelled will result in ECANCELED. Signed-off-by: Jens Axboe Signed-off-by: David Howells --- Documentation/slow-work.txt | 12 ++++++- include/linux/slow-work.h | 2 ++ kernel/slow-work.c | 81 +++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 88 insertions(+), 7 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index c655c517fc6..2e384bd4dea 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -108,7 +108,17 @@ on the item, 0 otherwise. The items are reference counted, so there ought to be no need for a flush -operation. When all a module's slow work items have been processed, and the +operation. But as the reference counting is optional, means to cancel +existing work items are also included: + + cancel_slow_work(&myitem); + +can be used to cancel pending work. The above cancel function waits for +existing work to have been executed (or prevent execution of them, depending +on timing). + + +When all a module's slow work items have been processed, and the module has no further interest in the facility, it should unregister its interest: diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index 9adb2b30754..eef20182d5b 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -51,6 +51,7 @@ struct slow_work { #define SLOW_WORK_EXECUTING 1 /* item currently executing */ #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ +#define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ }; @@ -88,6 +89,7 @@ static inline void vslow_work_init(struct slow_work *work, } extern int slow_work_enqueue(struct slow_work *work); +extern void slow_work_cancel(struct slow_work *work); extern int slow_work_register_user(struct module *owner); extern void slow_work_unregister_user(struct module *owner); diff --git a/kernel/slow-work.c b/kernel/slow-work.c index fccf421eb5c..671cc434532 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -236,12 +236,17 @@ static bool slow_work_execute(int id) if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags)) BUG(); - work->ops->execute(work); + /* don't execute if the work is in the process of being cancelled */ + if (!test_bit(SLOW_WORK_CANCELLING, &work->flags)) + work->ops->execute(work); if (very_slow) atomic_dec(&vslow_work_executing_count); clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags); + /* wake up anyone waiting for this work to be complete */ + wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); + /* if someone tried to enqueue the item whilst we were executing it, * then it'll be left unenqueued to avoid multiple threads trying to * execute it simultaneously @@ -314,11 +319,16 @@ auto_requeue: * allowed to pick items to execute. This ensures that very slow items won't * overly block ones that are just ordinarily slow. * - * Returns 0 if successful, -EAGAIN if not. + * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is + * attempted queued) */ int slow_work_enqueue(struct slow_work *work) { unsigned long flags; + int ret; + + if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) + return -ECANCELED; BUG_ON(slow_work_user_count <= 0); BUG_ON(!work); @@ -335,6 +345,9 @@ int slow_work_enqueue(struct slow_work *work) if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { spin_lock_irqsave(&slow_work_queue_lock, flags); + if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) + goto cancelled; + /* we promise that we will not attempt to execute the work * function in more than one thread simultaneously * @@ -352,8 +365,9 @@ int slow_work_enqueue(struct slow_work *work) if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); } else { - if (slow_work_get_ref(work) < 0) - goto cant_get_ref; + ret = slow_work_get_ref(work); + if (ret < 0) + goto failed; if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -365,12 +379,67 @@ int slow_work_enqueue(struct slow_work *work) } return 0; -cant_get_ref: +cancelled: + ret = -ECANCELED; +failed: spin_unlock_irqrestore(&slow_work_queue_lock, flags); - return -EAGAIN; + return ret; } EXPORT_SYMBOL(slow_work_enqueue); +static int slow_work_wait(void *word) +{ + schedule(); + return 0; +} + +/** + * slow_work_cancel - Cancel a slow work item + * @work: The work item to cancel + * + * This function will cancel a previously enqueued work item. If we cannot + * cancel the work item, it is guarenteed to have run when this function + * returns. + */ +void slow_work_cancel(struct slow_work *work) +{ + bool wait = true, put = false; + + set_bit(SLOW_WORK_CANCELLING, &work->flags); + + spin_lock_irq(&slow_work_queue_lock); + + if (test_bit(SLOW_WORK_PENDING, &work->flags) && + !list_empty(&work->link)) { + /* the link in the pending queue holds a reference on the item + * that we will need to release */ + list_del_init(&work->link); + wait = false; + put = true; + clear_bit(SLOW_WORK_PENDING, &work->flags); + + } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) { + /* the executor is holding our only reference on the item, so + * we merely need to wait for it to finish executing */ + clear_bit(SLOW_WORK_PENDING, &work->flags); + } + + spin_unlock_irq(&slow_work_queue_lock); + + /* the EXECUTING flag is set by the executor whilst the spinlock is set + * and before the item is dequeued - so assuming the above doesn't + * actually dequeue it, simply waiting for the EXECUTING flag to be + * released here should be sufficient */ + if (wait) + wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait, + TASK_UNINTERRUPTIBLE); + + clear_bit(SLOW_WORK_CANCELLING, &work->flags); + if (put) + slow_work_put_ref(work); +} +EXPORT_SYMBOL(slow_work_cancel); + /* * Schedule a cull of the thread pool at some time in the near future */ -- cgit v1.2.3-70-g09d2 From 6b8268b17a1ffc942bc72d7d00274e433d6b6719 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Nov 2009 18:10:47 +0000 Subject: SLOW_WORK: Add delayed_slow_work support This adds support for starting slow work with a delay, similar to the functionality we have for workqueues. Signed-off-by: Jens Axboe Signed-off-by: David Howells --- Documentation/slow-work.txt | 16 +++++- include/linux/slow-work.h | 29 ++++++++++ kernel/slow-work.c | 129 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 171 insertions(+), 3 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 2e384bd4dea..a9d1b0ffdde 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long. Operations of both types may sleep during execution, thus tying up the thread loaned to it. +A further class of work item is available, based on the slow work item class: + + (*) Delayed slow work items. + +These are slow work items that have a timer to defer queueing of the item for +a while. + THREAD-TO-CLASS ALLOCATION -------------------------- @@ -93,6 +100,10 @@ Slow work items may then be set up by: slow_work_init(&myitem, &myitem_ops); + or: + + delayed_slow_work_init(&myitem, &myitem_ops); + or: vslow_work_init(&myitem, &myitem_ops); @@ -104,7 +115,9 @@ A suitably set up work item can then be enqueued for processing: int ret = slow_work_enqueue(&myitem); This will return a -ve error if the thread pool is unable to gain a reference -on the item, 0 otherwise. +on the item, 0 otherwise, or (for delayed work): + + int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay); The items are reference counted, so there ought to be no need for a flush @@ -112,6 +125,7 @@ operation. But as the reference counting is optional, means to cancel existing work items are also included: cancel_slow_work(&myitem); + cancel_delayed_slow_work(&myitem); can be used to cancel pending work. The above cancel function waits for existing work to have been executed (or prevent execution of them, depending diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index eef20182d5b..b245b9a9cc0 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -17,6 +17,7 @@ #ifdef CONFIG_SLOW_WORK #include +#include struct slow_work; @@ -52,10 +53,16 @@ struct slow_work { #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ +#define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ }; +struct delayed_slow_work { + struct slow_work work; + struct timer_list timer; +}; + /** * slow_work_init - Initialise a slow work item * @work: The work item to initialise @@ -71,6 +78,20 @@ static inline void slow_work_init(struct slow_work *work, INIT_LIST_HEAD(&work->link); } +/** + * slow_work_init - Initialise a delayed slow work item + * @work: The work item to initialise + * @ops: The operations to use to handle the slow work item + * + * Initialise a delayed slow work item. + */ +static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, + const struct slow_work_ops *ops) +{ + init_timer(&dwork->timer); + slow_work_init(&dwork->work, ops); +} + /** * vslow_work_init - Initialise a very slow work item * @work: The work item to initialise @@ -93,6 +114,14 @@ extern void slow_work_cancel(struct slow_work *work); extern int slow_work_register_user(struct module *owner); extern void slow_work_unregister_user(struct module *owner); +extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, + unsigned long delay); + +static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) +{ + slow_work_cancel(&dwork->work); +} + #ifdef CONFIG_SYSCTL extern ctl_table slow_work_sysctls[]; #endif diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 671cc434532..f67e1daae93 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -406,11 +406,40 @@ void slow_work_cancel(struct slow_work *work) bool wait = true, put = false; set_bit(SLOW_WORK_CANCELLING, &work->flags); + smp_mb(); + + /* if the work item is a delayed work item with an active timer, we + * need to wait for the timer to finish _before_ getting the spinlock, + * lest we deadlock against the timer routine + * + * the timer routine will leave DELAYED set if it notices the + * CANCELLING flag in time + */ + if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { + struct delayed_slow_work *dwork = + container_of(work, struct delayed_slow_work, work); + del_timer_sync(&dwork->timer); + } spin_lock_irq(&slow_work_queue_lock); - if (test_bit(SLOW_WORK_PENDING, &work->flags) && - !list_empty(&work->link)) { + if (test_bit(SLOW_WORK_DELAYED, &work->flags)) { + /* the timer routine aborted or never happened, so we are left + * holding the timer's reference on the item and should just + * drop the pending flag and wait for any ongoing execution to + * finish */ + struct delayed_slow_work *dwork = + container_of(work, struct delayed_slow_work, work); + + BUG_ON(timer_pending(&dwork->timer)); + BUG_ON(!list_empty(&work->link)); + + clear_bit(SLOW_WORK_DELAYED, &work->flags); + put = true; + clear_bit(SLOW_WORK_PENDING, &work->flags); + + } else if (test_bit(SLOW_WORK_PENDING, &work->flags) && + !list_empty(&work->link)) { /* the link in the pending queue holds a reference on the item * that we will need to release */ list_del_init(&work->link); @@ -440,6 +469,102 @@ void slow_work_cancel(struct slow_work *work) } EXPORT_SYMBOL(slow_work_cancel); +/* + * Handle expiry of the delay timer, indicating that a delayed slow work item + * should now be queued if not cancelled + */ +static void delayed_slow_work_timer(unsigned long data) +{ + struct slow_work *work = (struct slow_work *) data; + unsigned long flags; + bool queued = false, put = false; + + spin_lock_irqsave(&slow_work_queue_lock, flags); + if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { + clear_bit(SLOW_WORK_DELAYED, &work->flags); + + if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) { + /* we discard the reference the timer was holding in + * favour of the one the executor holds */ + set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); + put = true; + } else { + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) + list_add_tail(&work->link, &vslow_work_queue); + else + list_add_tail(&work->link, &slow_work_queue); + queued = true; + } + } + + spin_unlock_irqrestore(&slow_work_queue_lock, flags); + if (put) + slow_work_put_ref(work); + if (queued) + wake_up(&slow_work_thread_wq); +} + +/** + * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing + * @dwork: The delayed work item to queue + * @delay: When to start executing the work, in jiffies from now + * + * This is similar to slow_work_enqueue(), but it adds a delay before the work + * is actually queued for processing. + * + * The item can have delayed processing requested on it whilst it is being + * executed. The delay will begin immediately, and if it expires before the + * item finishes executing, the item will be placed back on the queue when it + * has done executing. + */ +int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, + unsigned long delay) +{ + struct slow_work *work = &dwork->work; + unsigned long flags; + int ret; + + if (delay == 0) + return slow_work_enqueue(&dwork->work); + + BUG_ON(slow_work_user_count <= 0); + BUG_ON(!work); + BUG_ON(!work->ops); + + if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) + return -ECANCELED; + + if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { + spin_lock_irqsave(&slow_work_queue_lock, flags); + + if (test_bit(SLOW_WORK_CANCELLING, &work->flags)) + goto cancelled; + + /* the timer holds a reference whilst it is pending */ + ret = work->ops->get_ref(work); + if (ret < 0) + goto cant_get_ref; + + if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags)) + BUG(); + dwork->timer.expires = jiffies + delay; + dwork->timer.data = (unsigned long) work; + dwork->timer.function = delayed_slow_work_timer; + add_timer(&dwork->timer); + + spin_unlock_irqrestore(&slow_work_queue_lock, flags); + } + + return 0; + +cancelled: + ret = -ECANCELED; +cant_get_ref: + spin_unlock_irqrestore(&slow_work_queue_lock, flags); + return ret; +} +EXPORT_SYMBOL(delayed_slow_work_enqueue); + /* * Schedule a cull of the thread pool at some time in the near future */ -- cgit v1.2.3-70-g09d2 From 8fba10a42d191de612e60e7009c8f0313f90a9b3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:10:51 +0000 Subject: SLOW_WORK: Allow the work items to be viewed through a /proc file Allow the executing and queued work items to be viewed through a /proc file for debugging purposes. The contents look something like the following: THR PID ITEM ADDR FL MARK DESC === ===== ================ == ===== ========== 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2 vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2 vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2 vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2 vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2 vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2 vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK In the 'THR' column, executing items show the thread they're occupying and queued threads indicate which queue they're on. 'PID' shows the process ID of a slow-work thread that's executing something. 'FL' shows the work item flags. 'MARK' indicates how long since an item was queued or began executing. Lastly, the 'DESC' column permits the owner of an item to give some information. Signed-off-by: David Howells --- Documentation/slow-work.txt | 60 +++++++++++- include/linux/slow-work.h | 11 +++ init/Kconfig | 10 ++ kernel/Makefile | 1 + kernel/slow-work-proc.c | 227 ++++++++++++++++++++++++++++++++++++++++++++ kernel/slow-work.c | 44 ++++++--- kernel/slow-work.h | 72 ++++++++++++++ 7 files changed, 413 insertions(+), 12 deletions(-) create mode 100644 kernel/slow-work-proc.c create mode 100644 kernel/slow-work.h (limited to 'kernel/slow-work.c') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index a9d1b0ffdde..f120238e70f 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -149,7 +149,8 @@ ITEM OPERATIONS =============== Each work item requires a table of operations of type struct slow_work_ops. -Only ->execute() is required, getting and putting of a reference are optional. +Only ->execute() is required; the getting and putting of a reference and the +describing of an item are all optional. (*) Get a reference on an item: @@ -179,6 +180,16 @@ Only ->execute() is required, getting and putting of a reference are optional. This should perform the work required of the item. It may sleep, it may perform disk I/O and it may wait for locks. + (*) View an item through /proc: + + void (*desc)(struct slow_work *work, struct seq_file *m); + + If supplied, this should print to 'm' a small string describing the work + the item is to do. This should be no more than about 40 characters, and + shouldn't include a newline character. + + See the 'Viewing executing and queued items' section below. + ================== POOL CONFIGURATION @@ -203,3 +214,50 @@ The slow-work thread pool has a number of configurables: is bounded to between 1 and one fewer than the number of active threads. This ensures there is always at least one thread that can process very slow work items, and always at least one thread that won't. + + +================================== +VIEWING EXECUTING AND QUEUED ITEMS +================================== + +If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available: + + /proc/slow_work_rq + +through which the list of work items being executed and the queues of items to +be executed may be viewed. The owner of a work item is given the chance to +add some information of its own. + +The contents look something like the following: + + THR PID ITEM ADDR FL MARK DESC + === ===== ================ == ===== ========== + 0 3005 ffff880023f52348 a 952ms FSC: OBJ17d3: LOOK + 1 3006 ffff880024e33668 2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2 + 2 3165 ffff8800296dd180 a 424ms FSC: OBJ17e4: LOOK + 3 4089 ffff8800262c8d78 a 212ms FSC: OBJ17ea: CRTN + 4 4090 ffff88002792bed8 2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2 + 5 4092 ffff88002a0ef308 2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2 + 6 4094 ffff88002abaf4b8 2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2 + 7 4095 ffff88002bb188e0 a 388ms FSC: OBJ17e9: CRTN + vsq - ffff880023d99668 1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2 + vsq - ffff8800295d1740 1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2 + vsq - ffff880025ba3308 1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2 + vsq - ffff880024ec83e0 1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2 + vsq - ffff880026618e00 1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2 + vsq - ffff880025a2a4b8 1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2 + vsq - ffff880023cbe6d8 9 212ms FSC: OBJ17eb: LOOK + vsq - ffff880024d37590 9 212ms FSC: OBJ17ec: LOOK + vsq - ffff880027746cb0 9 212ms FSC: OBJ17ed: LOOK + vsq - ffff880024d37ae8 9 212ms FSC: OBJ17ee: LOOK + vsq - ffff880024d37cb0 9 212ms FSC: OBJ17ef: LOOK + vsq - ffff880025036550 9 212ms FSC: OBJ17f0: LOOK + vsq - ffff8800250368e0 9 212ms FSC: OBJ17f1: LOOK + vsq - ffff880025036aa8 9 212ms FSC: OBJ17f2: LOOK + +In the 'THR' column, executing items show the thread they're occupying and +queued threads indicate which queue they're on. 'PID' shows the process ID of +a slow-work thread that's executing something. 'FL' shows the work item flags. +'MARK' indicates how long since an item was queued or began executing. Lastly, +the 'DESC' column permits the owner of an item to give some information. + diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index b245b9a9cc0..f41485145ed 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -20,6 +20,9 @@ #include struct slow_work; +#ifdef CONFIG_SLOW_WORK_PROC +struct seq_file; +#endif /* * The operations used to support slow work items @@ -38,6 +41,11 @@ struct slow_work_ops { /* execute a work item */ void (*execute)(struct slow_work *work); + +#ifdef CONFIG_SLOW_WORK_PROC + /* describe a work item for /proc */ + void (*desc)(struct slow_work *work, struct seq_file *m); +#endif }; /* @@ -56,6 +64,9 @@ struct slow_work { #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ +#ifdef CONFIG_SLOW_WORK_PROC + struct timespec mark; /* jiffies at which queued or exec begun */ +#endif }; struct delayed_slow_work { diff --git a/init/Kconfig b/init/Kconfig index 9e03ef8b311..ab5c64801fe 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1098,6 +1098,16 @@ config SLOW_WORK See Documentation/slow-work.txt. +config SLOW_WORK_PROC + bool "Slow work debugging through /proc" + default n + depends on SLOW_WORK && PROC_FS + help + Display the contents of the slow work run queue through /proc, + including items currently executing. + + See Documentation/slow-work.txt. + endmenu # General setup config HAVE_GENERIC_DMA_COHERENT diff --git a/kernel/Makefile b/kernel/Makefile index b8d4cd8ac0b..776ffed1556 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o +obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c new file mode 100644 index 00000000000..3988032571f --- /dev/null +++ b/kernel/slow-work-proc.c @@ -0,0 +1,227 @@ +/* Slow work debugging + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "slow-work.h" + +#define ITERATOR_SHIFT (BITS_PER_LONG - 4) +#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) +#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) + +void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) +{ + seq_puts(m, "Slow-work: New thread"); +} + +/* + * Render the time mark field on a work item into a 5-char time with units plus + * a space + */ +static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) +{ + struct timespec now, diff; + + now = CURRENT_TIME; + diff = timespec_sub(now, work->mark); + + if (diff.tv_sec < 0) + seq_puts(m, " -ve "); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) + seq_printf(m, "%3luns ", diff.tv_nsec); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) + seq_printf(m, "%3luus ", diff.tv_nsec / 1000); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) + seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); + else if (diff.tv_sec <= 1) + seq_puts(m, " 1s "); + else if (diff.tv_sec < 60) + seq_printf(m, "%4lus ", diff.tv_sec); + else if (diff.tv_sec < 60 * 60) + seq_printf(m, "%4lum ", diff.tv_sec / 60); + else if (diff.tv_sec < 60 * 60 * 24) + seq_printf(m, "%4luh ", diff.tv_sec / 3600); + else + seq_puts(m, "exces "); +} + +/* + * Describe a slow work item for /proc + */ +static int slow_work_runqueue_show(struct seq_file *m, void *v) +{ + struct slow_work *work; + struct list_head *p = v; + unsigned long id; + + switch ((unsigned long) v) { + case 1: + seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); + return 0; + case 2: + seq_puts(m, "=== ===== ================ == ===== ==========\n"); + return 0; + + case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: + id = (unsigned long) v - 3; + + read_lock(&slow_work_execs_lock); + work = slow_work_execs[id]; + if (work) { + smp_read_barrier_depends(); + + seq_printf(m, "%3lu %5d %16p %2lx ", + id, slow_work_pids[id], work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + } + read_unlock(&slow_work_execs_lock); + return 0; + + default: + work = list_entry(p, struct slow_work, link); + seq_printf(m, "%3s - %16p %2lx ", + work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", + work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + return 0; + } +} + +/* + * map the iterator to a work item + */ +static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) +{ + struct list_head *p; + unsigned long count, id; + + switch (*_pos >> ITERATOR_SHIFT) { + case 0x0: + if (*_pos == 0) + *_pos = 1; + if (*_pos < 3) + return (void *)(unsigned long) *_pos; + if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) + for (id = *_pos - 3; + id < SLOW_WORK_THREAD_LIMIT; + id++, (*_pos)++) + if (slow_work_execs[id]) + return (void *)(unsigned long) *_pos; + *_pos = 0x1UL << ITERATOR_SHIFT; + + case 0x1: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &slow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + + case 0x2: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &vslow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * set up the iterator to start reading from the first line + */ +static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) +{ + spin_lock_irq(&slow_work_queue_lock); + return slow_work_runqueue_index(m, _pos); +} + +/* + * move to the next line + */ +static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) +{ + struct list_head *p = v; + unsigned long selector = *_pos >> ITERATOR_SHIFT; + + (*_pos)++; + switch (selector) { + case 0x0: + return slow_work_runqueue_index(m, _pos); + + case 0x1: + if (*_pos >> ITERATOR_SHIFT == 0x1) { + p = p->next; + if (p != &slow_work_queue) + return p; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + p = &vslow_work_queue; + + case 0x2: + if (*_pos >> ITERATOR_SHIFT == 0x2) { + p = p->next; + if (p != &vslow_work_queue) + return p; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * clean up after reading + */ +static void slow_work_runqueue_stop(struct seq_file *m, void *v) +{ + spin_unlock_irq(&slow_work_queue_lock); +} + +static const struct seq_operations slow_work_runqueue_ops = { + .start = slow_work_runqueue_start, + .stop = slow_work_runqueue_stop, + .next = slow_work_runqueue_next, + .show = slow_work_runqueue_show, +}; + +/* + * open "/proc/slow_work_rq" to list queue contents + */ +static int slow_work_runqueue_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &slow_work_runqueue_ops); +} + +const struct file_operations slow_work_runqueue_fops = { + .owner = THIS_MODULE, + .open = slow_work_runqueue_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/kernel/slow-work.c b/kernel/slow-work.c index f67e1daae93..b763bc2d267 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -16,13 +16,8 @@ #include #include #include - -#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of - * things to do */ -#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after - * OOM */ - -#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ +#include +#include "slow-work.h" static void slow_work_cull_timeout(unsigned long); static void slow_work_oom_timeout(unsigned long); @@ -116,6 +111,15 @@ static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); static DEFINE_MUTEX(slow_work_unreg_sync_lock); #endif +/* + * Data for tracking currently executing items for indication through /proc + */ +#ifdef CONFIG_SLOW_WORK_PROC +struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; +pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; +DEFINE_RWLOCK(slow_work_execs_lock); +#endif + /* * The queues of work items and the lock governing access to them. These are * shared between all the CPUs. It doesn't make sense to have per-CPU queues @@ -124,9 +128,9 @@ static DEFINE_MUTEX(slow_work_unreg_sync_lock); * There are two queues of work items: one for slow work items, and one for * very slow work items. */ -static LIST_HEAD(slow_work_queue); -static LIST_HEAD(vslow_work_queue); -static DEFINE_SPINLOCK(slow_work_queue_lock); +LIST_HEAD(slow_work_queue); +LIST_HEAD(vslow_work_queue); +DEFINE_SPINLOCK(slow_work_queue_lock); /* * The thread controls. A variable used to signal to the threads that they @@ -182,7 +186,7 @@ static unsigned slow_work_calc_vsmax(void) * Attempt to execute stuff queued on a slow thread. Return true if we managed * it, false if there was nothing to do. */ -static bool slow_work_execute(int id) +static noinline bool slow_work_execute(int id) { #ifdef CONFIG_MODULES struct module *module; @@ -227,6 +231,10 @@ static bool slow_work_execute(int id) if (work) slow_work_thread_processing[id] = work->owner; #endif + if (work) { + slow_work_mark_time(work); + slow_work_begin_exec(id, work); + } spin_unlock_irq(&slow_work_queue_lock); @@ -247,6 +255,8 @@ static bool slow_work_execute(int id) /* wake up anyone waiting for this work to be complete */ wake_up_bit(&work->flags, SLOW_WORK_EXECUTING); + slow_work_end_exec(id, work); + /* if someone tried to enqueue the item whilst we were executing it, * then it'll be left unenqueued to avoid multiple threads trying to * execute it simultaneously @@ -285,6 +295,7 @@ auto_requeue: * - we transfer our ref on the item back to the appropriate queue * - don't wake another thread up as we're awake already */ + slow_work_mark_time(work); if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -368,6 +379,7 @@ int slow_work_enqueue(struct slow_work *work) ret = slow_work_get_ref(work); if (ret < 0) goto failed; + slow_work_mark_time(work); if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -489,6 +501,7 @@ static void delayed_slow_work_timer(unsigned long data) set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags); put = true; } else { + slow_work_mark_time(work); if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) list_add_tail(&work->link, &vslow_work_queue); else @@ -627,6 +640,7 @@ static int slow_work_thread(void *_data) id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT); BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT); __set_bit(id, slow_work_ids); + slow_work_set_thread_pid(id, current->pid); spin_unlock_irq(&slow_work_queue_lock); sprintf(current->comm, "kslowd%03u", id); @@ -669,6 +683,7 @@ static int slow_work_thread(void *_data) } spin_lock_irq(&slow_work_queue_lock); + slow_work_set_thread_pid(id, 0); __clear_bit(id, slow_work_ids); spin_unlock_irq(&slow_work_queue_lock); @@ -722,6 +737,9 @@ static void slow_work_new_thread_execute(struct slow_work *work) static const struct slow_work_ops slow_work_new_thread_ops = { .owner = THIS_MODULE, .execute = slow_work_new_thread_execute, +#ifdef CONFIG_SLOW_WORK_PROC + .desc = slow_work_new_thread_desc, +#endif }; /* @@ -948,6 +966,10 @@ static int __init init_slow_work(void) #ifdef CONFIG_SYSCTL if (slow_work_max_max_threads < nr_cpus * 2) slow_work_max_max_threads = nr_cpus * 2; +#endif +#ifdef CONFIG_SLOW_WORK_PROC + proc_create("slow_work_rq", S_IFREG | 0400, NULL, + &slow_work_runqueue_fops); #endif return 0; } diff --git a/kernel/slow-work.h b/kernel/slow-work.h new file mode 100644 index 00000000000..3c2f007f3ad --- /dev/null +++ b/kernel/slow-work.h @@ -0,0 +1,72 @@ +/* Slow work private definitions + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of + * things to do */ +#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after + * OOM */ + +#define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */ + +/* + * slow-work.c + */ +#ifdef CONFIG_SLOW_WORK_PROC +extern struct slow_work *slow_work_execs[]; +extern pid_t slow_work_pids[]; +extern rwlock_t slow_work_execs_lock; +#endif + +extern struct list_head slow_work_queue; +extern struct list_head vslow_work_queue; +extern spinlock_t slow_work_queue_lock; + +/* + * slow-work-proc.c + */ +#ifdef CONFIG_SLOW_WORK_PROC +extern const struct file_operations slow_work_runqueue_fops; + +extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); +#endif + +/* + * Helper functions + */ +static inline void slow_work_set_thread_pid(int id, pid_t pid) +{ +#ifdef CONFIG_SLOW_WORK_PROC + slow_work_pids[id] = pid; +#endif +} + +static inline void slow_work_mark_time(struct slow_work *work) +{ +#ifdef CONFIG_SLOW_WORK_PROC + work->mark = CURRENT_TIME; +#endif +} + +static inline void slow_work_begin_exec(int id, struct slow_work *work) +{ +#ifdef CONFIG_SLOW_WORK_PROC + slow_work_execs[id] = work; +#endif +} + +static inline void slow_work_end_exec(int id, struct slow_work *work) +{ +#ifdef CONFIG_SLOW_WORK_PROC + write_lock(&slow_work_execs_lock); + slow_work_execs[id] = NULL; + write_unlock(&slow_work_execs_lock); +#endif +} -- cgit v1.2.3-70-g09d2 From 3bde31a4ac225cb5805be02eff6eaaf7e0766ccd Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 19 Nov 2009 18:10:57 +0000 Subject: SLOW_WORK: Allow a requeueable work item to sleep till the thread is needed Add a function to allow a requeueable work item to sleep till the thread processing it is needed by the slow-work facility to perform other work. Sometimes a work item can't progress immediately, but must wait for the completion of another work item that's currently being processed by another slow-work thread. In some circumstances, the waiting item could instead - theoretically - put itself back on the queue and yield its thread back to the slow-work facility, thus waiting till it gets processing time again before attempting to progress. This would allow other work items processing time on that thread. However, this only works if there is something on the queue for it to queue behind - otherwise it will just get a thread again immediately, and will end up cycling between the queue and the thread, eating up valuable CPU time. So, slow_work_sleep_till_thread_needed() is provided such that an item can put itself on a wait queue that will wake it up when the event it is actually interested in occurs, then call this function in lieu of calling schedule(). This function will then sleep until either the item's event occurs or another work item appears on the queue. If another work item is queued, but the item's event hasn't occurred, then the work item should requeue itself and yield the thread back to the slow-work facility by returning. This can be used by CacheFiles for an object that is being created on one thread to wait for an object being deleted on another thread where there is nothing on the queue for the creation to go and wait behind. As soon as an item appears on the queue that could be given thread time instead, CacheFiles can stick the creating object back on the queue and return to the slow-work facility - assuming the object deletion didn't also complete. Signed-off-by: David Howells --- Documentation/slow-work.txt | 44 +++++++++++++++++++++ include/linux/slow-work.h | 3 ++ kernel/slow-work.c | 94 ++++++++++++++++++++++++++++++++++++++++----- 3 files changed, 132 insertions(+), 9 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 0169c9d9dd1..52bc3143372 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -158,6 +158,50 @@ with a requeue pending). This can be used to work out whether an item on which another depends is on the queue, thus allowing a dependent item to be queued after it. +If the above shows an item on which another depends not to be queued, then the +owner of the dependent item might need to wait. However, to avoid locking up +the threads unnecessarily be sleeping in them, it can make sense under some +circumstances to return the work item to the queue, thus deferring it until +some other items have had a chance to make use of the yielded thread. + +To yield a thread and defer an item, the work function should simply enqueue +the work item again and return. However, this doesn't work if there's nothing +actually on the queue, as the thread just vacated will jump straight back into +the item's work function, thus busy waiting on a CPU. + +Instead, the item should use the thread to wait for the dependency to go away, +but rather than using schedule() or schedule_timeout() to sleep, it should use +the following function: + + bool requeue = slow_work_sleep_till_thread_needed( + struct slow_work *work, + signed long *_timeout); + +This will add a second wait and then sleep, such that it will be woken up if +either something appears on the queue that could usefully make use of the +thread - and behind which this item can be queued, or if the event the caller +set up to wait for happens. True will be returned if something else appeared +on the queue and this work function should perhaps return, of false if +something else woke it up. The timeout is as for schedule_timeout(). + +For example: + + wq = bit_waitqueue(&my_flags, MY_BIT); + init_wait(&wait); + requeue = false; + do { + prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); + if (!test_bit(MY_BIT, &my_flags)) + break; + requeue = slow_work_sleep_till_thread_needed(&my_work, + &timeout); + } while (timeout > 0 && !requeue); + finish_wait(wq, &wait); + if (!test_bit(MY_BIT, &my_flags) + goto do_my_thing; + if (requeue) + return; // to slow_work + =============== ITEM OPERATIONS diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index bfd3ab4c889..5035a269173 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -152,6 +152,9 @@ static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) slow_work_cancel(&dwork->work); } +extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, + signed long *_timeout); + #ifdef CONFIG_SYSCTL extern ctl_table slow_work_sysctls[]; #endif diff --git a/kernel/slow-work.c b/kernel/slow-work.c index b763bc2d267..da94f3c101a 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -132,6 +132,15 @@ LIST_HEAD(slow_work_queue); LIST_HEAD(vslow_work_queue); DEFINE_SPINLOCK(slow_work_queue_lock); +/* + * The following are two wait queues that get pinged when a work item is placed + * on an empty queue. These allow work items that are hogging a thread by + * sleeping in a way that could be deferred to yield their thread and enqueue + * themselves. + */ +static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation); +static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation); + /* * The thread controls. A variable used to signal to the threads that they * should exit when the queue is empty, a waitqueue used by the threads to wait @@ -305,6 +314,50 @@ auto_requeue: return true; } +/** + * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work + * work: The work item under execution that wants to sleep + * _timeout: Scheduler sleep timeout + * + * Allow a requeueable work item to sleep on a slow-work processor thread until + * that thread is needed to do some other work or the sleep is interrupted by + * some other event. + * + * The caller must set up a wake up event before calling this and must have set + * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own + * condition before calling this function as no test is made here. + * + * False is returned if there is nothing on the queue; true is returned if the + * work item should be requeued + */ +bool slow_work_sleep_till_thread_needed(struct slow_work *work, + signed long *_timeout) +{ + wait_queue_head_t *wfo_wq; + struct list_head *queue; + + DEFINE_WAIT(wait); + + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { + wfo_wq = &vslow_work_queue_waits_for_occupation; + queue = &vslow_work_queue; + } else { + wfo_wq = &slow_work_queue_waits_for_occupation; + queue = &slow_work_queue; + } + + if (!list_empty(queue)) + return true; + + add_wait_queue_exclusive(wfo_wq, &wait); + if (list_empty(queue)) + *_timeout = schedule_timeout(*_timeout); + finish_wait(wfo_wq, &wait); + + return !list_empty(queue); +} +EXPORT_SYMBOL(slow_work_sleep_till_thread_needed); + /** * slow_work_enqueue - Schedule a slow work item for processing * @work: The work item to queue @@ -335,6 +388,8 @@ auto_requeue: */ int slow_work_enqueue(struct slow_work *work) { + wait_queue_head_t *wfo_wq; + struct list_head *queue; unsigned long flags; int ret; @@ -354,6 +409,14 @@ int slow_work_enqueue(struct slow_work *work) * maintaining our promise */ if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) { + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { + wfo_wq = &vslow_work_queue_waits_for_occupation; + queue = &vslow_work_queue; + } else { + wfo_wq = &slow_work_queue_waits_for_occupation; + queue = &slow_work_queue; + } + spin_lock_irqsave(&slow_work_queue_lock, flags); if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags))) @@ -380,11 +443,13 @@ int slow_work_enqueue(struct slow_work *work) if (ret < 0) goto failed; slow_work_mark_time(work); - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) - list_add_tail(&work->link, &vslow_work_queue); - else - list_add_tail(&work->link, &slow_work_queue); + list_add_tail(&work->link, queue); wake_up(&slow_work_thread_wq); + + /* if someone who could be requeued is sleeping on a + * thread, then ask them to yield their thread */ + if (work->link.prev == queue) + wake_up(wfo_wq); } spin_unlock_irqrestore(&slow_work_queue_lock, flags); @@ -487,9 +552,19 @@ EXPORT_SYMBOL(slow_work_cancel); */ static void delayed_slow_work_timer(unsigned long data) { + wait_queue_head_t *wfo_wq; + struct list_head *queue; struct slow_work *work = (struct slow_work *) data; unsigned long flags; - bool queued = false, put = false; + bool queued = false, put = false, first = false; + + if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) { + wfo_wq = &vslow_work_queue_waits_for_occupation; + queue = &vslow_work_queue; + } else { + wfo_wq = &slow_work_queue_waits_for_occupation; + queue = &slow_work_queue; + } spin_lock_irqsave(&slow_work_queue_lock, flags); if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) { @@ -502,17 +577,18 @@ static void delayed_slow_work_timer(unsigned long data) put = true; } else { slow_work_mark_time(work); - if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) - list_add_tail(&work->link, &vslow_work_queue); - else - list_add_tail(&work->link, &slow_work_queue); + list_add_tail(&work->link, queue); queued = true; + if (work->link.prev == queue) + first = true; } } spin_unlock_irqrestore(&slow_work_queue_lock, flags); if (put) slow_work_put_ref(work); + if (first) + wake_up(wfo_wq); if (queued) wake_up(&slow_work_thread_wq); } -- cgit v1.2.3-70-g09d2 From fa1dae4906982b5d896c07613b1fe42456133b1c Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 1 Dec 2009 13:52:08 +0000 Subject: SLOW_WORK: Fix the CONFIG_MODULES=n case Commits 3d7a641 ("SLOW_WORK: Wait for outstanding work items belonging to a module to clear") introduced some code to make sure that all of a module's slow-work items were complete before that module was removed, and commit 3bde31a ("SLOW_WORK: Allow a requeueable work item to sleep till the thread is needed") further extended that, breaking it in the process if CONFIG_MODULES=n: CC kernel/slow-work.o kernel/slow-work.c: In function 'slow_work_execute': kernel/slow-work.c:313: error: 'slow_work_thread_processing' undeclared (first use in this function) kernel/slow-work.c:313: error: (Each undeclared identifier is reported only once kernel/slow-work.c:313: error: for each function it appears in.) kernel/slow-work.c: In function 'slow_work_wait_for_items': kernel/slow-work.c:950: error: 'slow_work_unreg_sync_lock' undeclared (first use in this function) kernel/slow-work.c:951: error: 'slow_work_unreg_wq' undeclared (first use in this function) kernel/slow-work.c:961: error: 'slow_work_unreg_work_item' undeclared (first use in this function) kernel/slow-work.c:974: error: 'slow_work_unreg_module' undeclared (first use in this function) kernel/slow-work.c:977: error: 'slow_work_thread_processing' undeclared (first use in this function) make[1]: *** [kernel/slow-work.o] Error 1 Fix this by: (1) Extracting the bits of slow_work_execute() that are contingent on CONFIG_MODULES, and the bits that should be, into inline functions and placing them into the #ifdef'd section that defines the relevant variables and adding stubs for moduleless kernels. This allows the removal of some #ifdefs. (2) #ifdef'ing out the contents of slow_work_wait_for_items() in moduleless kernels. The four functions related to handling module unloading synchronisation (and their associated variables) could be offloaded into a separate .c file, but each function is only used once and three of them are tiny, so doing so would prevent them from being inlined. Reported-by: Ingo Molnar Signed-off-by: David Howells Signed-off-by: Linus Torvalds --- kernel/slow-work.c | 46 +++++++++++++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 17 deletions(-) (limited to 'kernel/slow-work.c') diff --git a/kernel/slow-work.c b/kernel/slow-work.c index da94f3c101a..b5c17f15f9d 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -109,6 +109,30 @@ static struct module *slow_work_unreg_module; static struct slow_work *slow_work_unreg_work_item; static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq); static DEFINE_MUTEX(slow_work_unreg_sync_lock); + +static void slow_work_set_thread_processing(int id, struct slow_work *work) +{ + if (work) + slow_work_thread_processing[id] = work->owner; +} +static void slow_work_done_thread_processing(int id, struct slow_work *work) +{ + struct module *module = slow_work_thread_processing[id]; + + slow_work_thread_processing[id] = NULL; + smp_mb(); + if (slow_work_unreg_work_item == work || + slow_work_unreg_module == module) + wake_up_all(&slow_work_unreg_wq); +} +static void slow_work_clear_thread_processing(int id) +{ + slow_work_thread_processing[id] = NULL; +} +#else +static void slow_work_set_thread_processing(int id, struct slow_work *work) {} +static void slow_work_done_thread_processing(int id, struct slow_work *work) {} +static void slow_work_clear_thread_processing(int id) {} #endif /* @@ -197,9 +221,6 @@ static unsigned slow_work_calc_vsmax(void) */ static noinline bool slow_work_execute(int id) { -#ifdef CONFIG_MODULES - struct module *module; -#endif struct slow_work *work = NULL; unsigned vsmax; bool very_slow; @@ -236,10 +257,7 @@ static noinline bool slow_work_execute(int id) very_slow = false; /* avoid the compiler warning */ } -#ifdef CONFIG_MODULES - if (work) - slow_work_thread_processing[id] = work->owner; -#endif + slow_work_set_thread_processing(id, work); if (work) { slow_work_mark_time(work); slow_work_begin_exec(id, work); @@ -287,15 +305,7 @@ static noinline bool slow_work_execute(int id) /* sort out the race between module unloading and put_ref() */ slow_work_put_ref(work); - -#ifdef CONFIG_MODULES - module = slow_work_thread_processing[id]; - slow_work_thread_processing[id] = NULL; - smp_mb(); - if (slow_work_unreg_work_item == work || - slow_work_unreg_module == module) - wake_up_all(&slow_work_unreg_wq); -#endif + slow_work_done_thread_processing(id, work); return true; @@ -310,7 +320,7 @@ auto_requeue: else list_add_tail(&work->link, &slow_work_queue); spin_unlock_irq(&slow_work_queue_lock); - slow_work_thread_processing[id] = NULL; + slow_work_clear_thread_processing(id); return true; } @@ -943,6 +953,7 @@ EXPORT_SYMBOL(slow_work_register_user); */ static void slow_work_wait_for_items(struct module *module) { +#ifdef CONFIG_MODULES DECLARE_WAITQUEUE(myself, current); struct slow_work *work; int loop; @@ -989,6 +1000,7 @@ static void slow_work_wait_for_items(struct module *module) remove_wait_queue(&slow_work_unreg_wq, &myself); mutex_unlock(&slow_work_unreg_sync_lock); +#endif /* CONFIG_MODULES */ } /** -- cgit v1.2.3-70-g09d2 From f13a48bd798a159291ca583b95453171b88b7448 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 1 Dec 2009 15:36:11 +0000 Subject: SLOW_WORK: Move slow_work's proc file to debugfs Move slow_work's debugging proc file to debugfs. Signed-off-by: David Howells Requested-and-acked-by: Ingo Molnar Signed-off-by: Linus Torvalds --- Documentation/slow-work.txt | 4 +- include/linux/slow-work.h | 8 +- init/Kconfig | 8 +- kernel/Makefile | 2 +- kernel/slow-work-debugfs.c | 227 ++++++++++++++++++++++++++++++++++++++++++++ kernel/slow-work-proc.c | 227 -------------------------------------------- kernel/slow-work.c | 18 ++-- kernel/slow-work.h | 6 +- 8 files changed, 253 insertions(+), 247 deletions(-) create mode 100644 kernel/slow-work-debugfs.c delete mode 100644 kernel/slow-work-proc.c (limited to 'kernel/slow-work.c') diff --git a/Documentation/slow-work.txt b/Documentation/slow-work.txt index 52bc3143372..9dbf4470c7e 100644 --- a/Documentation/slow-work.txt +++ b/Documentation/slow-work.txt @@ -279,9 +279,9 @@ The slow-work thread pool has a number of configurables: VIEWING EXECUTING AND QUEUED ITEMS ================================== -If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available: +If CONFIG_SLOW_WORK_DEBUG is enabled, a debugfs file is made available: - /proc/slow_work_rq + /sys/kernel/debug/slow_work/runqueue through which the list of work items being executed and the queues of items to be executed may be viewed. The owner of a work item is given the chance to diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h index 5035a269173..13337bf6c3f 100644 --- a/include/linux/slow-work.h +++ b/include/linux/slow-work.h @@ -20,7 +20,7 @@ #include struct slow_work; -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG struct seq_file; #endif @@ -42,8 +42,8 @@ struct slow_work_ops { /* execute a work item */ void (*execute)(struct slow_work *work); -#ifdef CONFIG_SLOW_WORK_PROC - /* describe a work item for /proc */ +#ifdef CONFIG_SLOW_WORK_DEBUG + /* describe a work item for debugfs */ void (*desc)(struct slow_work *work, struct seq_file *m); #endif }; @@ -64,7 +64,7 @@ struct slow_work { #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ const struct slow_work_ops *ops; /* operations table for this item */ struct list_head link; /* link in queue */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG struct timespec mark; /* jiffies at which queued or exec begun */ #endif }; diff --git a/init/Kconfig b/init/Kconfig index ab5c64801fe..39923ccc287 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1098,12 +1098,12 @@ config SLOW_WORK See Documentation/slow-work.txt. -config SLOW_WORK_PROC - bool "Slow work debugging through /proc" +config SLOW_WORK_DEBUG + bool "Slow work debugging through debugfs" default n - depends on SLOW_WORK && PROC_FS + depends on SLOW_WORK && DEBUG_FS help - Display the contents of the slow work run queue through /proc, + Display the contents of the slow work run queue through debugfs, including items currently executing. See Documentation/slow-work.txt. diff --git a/kernel/Makefile b/kernel/Makefile index 776ffed1556..d7c13d249b2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -94,7 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_SMP) += sched_cpupri.o obj-$(CONFIG_SLOW_WORK) += slow-work.o -obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o +obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) diff --git a/kernel/slow-work-debugfs.c b/kernel/slow-work-debugfs.c new file mode 100644 index 00000000000..e45c4364529 --- /dev/null +++ b/kernel/slow-work-debugfs.c @@ -0,0 +1,227 @@ +/* Slow work debugging + * + * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include "slow-work.h" + +#define ITERATOR_SHIFT (BITS_PER_LONG - 4) +#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) +#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) + +void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) +{ + seq_puts(m, "Slow-work: New thread"); +} + +/* + * Render the time mark field on a work item into a 5-char time with units plus + * a space + */ +static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) +{ + struct timespec now, diff; + + now = CURRENT_TIME; + diff = timespec_sub(now, work->mark); + + if (diff.tv_sec < 0) + seq_puts(m, " -ve "); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) + seq_printf(m, "%3luns ", diff.tv_nsec); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) + seq_printf(m, "%3luus ", diff.tv_nsec / 1000); + else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) + seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); + else if (diff.tv_sec <= 1) + seq_puts(m, " 1s "); + else if (diff.tv_sec < 60) + seq_printf(m, "%4lus ", diff.tv_sec); + else if (diff.tv_sec < 60 * 60) + seq_printf(m, "%4lum ", diff.tv_sec / 60); + else if (diff.tv_sec < 60 * 60 * 24) + seq_printf(m, "%4luh ", diff.tv_sec / 3600); + else + seq_puts(m, "exces "); +} + +/* + * Describe a slow work item for debugfs + */ +static int slow_work_runqueue_show(struct seq_file *m, void *v) +{ + struct slow_work *work; + struct list_head *p = v; + unsigned long id; + + switch ((unsigned long) v) { + case 1: + seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); + return 0; + case 2: + seq_puts(m, "=== ===== ================ == ===== ==========\n"); + return 0; + + case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: + id = (unsigned long) v - 3; + + read_lock(&slow_work_execs_lock); + work = slow_work_execs[id]; + if (work) { + smp_read_barrier_depends(); + + seq_printf(m, "%3lu %5d %16p %2lx ", + id, slow_work_pids[id], work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + } + read_unlock(&slow_work_execs_lock); + return 0; + + default: + work = list_entry(p, struct slow_work, link); + seq_printf(m, "%3s - %16p %2lx ", + work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", + work, work->flags); + slow_work_print_mark(m, work); + + if (work->ops->desc) + work->ops->desc(work, m); + seq_putc(m, '\n'); + return 0; + } +} + +/* + * map the iterator to a work item + */ +static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) +{ + struct list_head *p; + unsigned long count, id; + + switch (*_pos >> ITERATOR_SHIFT) { + case 0x0: + if (*_pos == 0) + *_pos = 1; + if (*_pos < 3) + return (void *)(unsigned long) *_pos; + if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) + for (id = *_pos - 3; + id < SLOW_WORK_THREAD_LIMIT; + id++, (*_pos)++) + if (slow_work_execs[id]) + return (void *)(unsigned long) *_pos; + *_pos = 0x1UL << ITERATOR_SHIFT; + + case 0x1: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &slow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + + case 0x2: + count = *_pos & ITERATOR_COUNTER; + list_for_each(p, &vslow_work_queue) { + if (count == 0) + return p; + count--; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * set up the iterator to start reading from the first line + */ +static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) +{ + spin_lock_irq(&slow_work_queue_lock); + return slow_work_runqueue_index(m, _pos); +} + +/* + * move to the next line + */ +static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) +{ + struct list_head *p = v; + unsigned long selector = *_pos >> ITERATOR_SHIFT; + + (*_pos)++; + switch (selector) { + case 0x0: + return slow_work_runqueue_index(m, _pos); + + case 0x1: + if (*_pos >> ITERATOR_SHIFT == 0x1) { + p = p->next; + if (p != &slow_work_queue) + return p; + } + *_pos = 0x2UL << ITERATOR_SHIFT; + p = &vslow_work_queue; + + case 0x2: + if (*_pos >> ITERATOR_SHIFT == 0x2) { + p = p->next; + if (p != &vslow_work_queue) + return p; + } + *_pos = 0x3UL << ITERATOR_SHIFT; + + default: + return NULL; + } +} + +/* + * clean up after reading + */ +static void slow_work_runqueue_stop(struct seq_file *m, void *v) +{ + spin_unlock_irq(&slow_work_queue_lock); +} + +static const struct seq_operations slow_work_runqueue_ops = { + .start = slow_work_runqueue_start, + .stop = slow_work_runqueue_stop, + .next = slow_work_runqueue_next, + .show = slow_work_runqueue_show, +}; + +/* + * open "/sys/kernel/debug/slow_work/runqueue" to list queue contents + */ +static int slow_work_runqueue_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &slow_work_runqueue_ops); +} + +const struct file_operations slow_work_runqueue_fops = { + .owner = THIS_MODULE, + .open = slow_work_runqueue_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c deleted file mode 100644 index 3988032571f..00000000000 --- a/kernel/slow-work-proc.c +++ /dev/null @@ -1,227 +0,0 @@ -/* Slow work debugging - * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include "slow-work.h" - -#define ITERATOR_SHIFT (BITS_PER_LONG - 4) -#define ITERATOR_SELECTOR (0xfUL << ITERATOR_SHIFT) -#define ITERATOR_COUNTER (~ITERATOR_SELECTOR) - -void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m) -{ - seq_puts(m, "Slow-work: New thread"); -} - -/* - * Render the time mark field on a work item into a 5-char time with units plus - * a space - */ -static void slow_work_print_mark(struct seq_file *m, struct slow_work *work) -{ - struct timespec now, diff; - - now = CURRENT_TIME; - diff = timespec_sub(now, work->mark); - - if (diff.tv_sec < 0) - seq_puts(m, " -ve "); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000) - seq_printf(m, "%3luns ", diff.tv_nsec); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000) - seq_printf(m, "%3luus ", diff.tv_nsec / 1000); - else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000) - seq_printf(m, "%3lums ", diff.tv_nsec / 1000000); - else if (diff.tv_sec <= 1) - seq_puts(m, " 1s "); - else if (diff.tv_sec < 60) - seq_printf(m, "%4lus ", diff.tv_sec); - else if (diff.tv_sec < 60 * 60) - seq_printf(m, "%4lum ", diff.tv_sec / 60); - else if (diff.tv_sec < 60 * 60 * 24) - seq_printf(m, "%4luh ", diff.tv_sec / 3600); - else - seq_puts(m, "exces "); -} - -/* - * Describe a slow work item for /proc - */ -static int slow_work_runqueue_show(struct seq_file *m, void *v) -{ - struct slow_work *work; - struct list_head *p = v; - unsigned long id; - - switch ((unsigned long) v) { - case 1: - seq_puts(m, "THR PID ITEM ADDR FL MARK DESC\n"); - return 0; - case 2: - seq_puts(m, "=== ===== ================ == ===== ==========\n"); - return 0; - - case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1: - id = (unsigned long) v - 3; - - read_lock(&slow_work_execs_lock); - work = slow_work_execs[id]; - if (work) { - smp_read_barrier_depends(); - - seq_printf(m, "%3lu %5d %16p %2lx ", - id, slow_work_pids[id], work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - } - read_unlock(&slow_work_execs_lock); - return 0; - - default: - work = list_entry(p, struct slow_work, link); - seq_printf(m, "%3s - %16p %2lx ", - work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq", - work, work->flags); - slow_work_print_mark(m, work); - - if (work->ops->desc) - work->ops->desc(work, m); - seq_putc(m, '\n'); - return 0; - } -} - -/* - * map the iterator to a work item - */ -static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos) -{ - struct list_head *p; - unsigned long count, id; - - switch (*_pos >> ITERATOR_SHIFT) { - case 0x0: - if (*_pos == 0) - *_pos = 1; - if (*_pos < 3) - return (void *)(unsigned long) *_pos; - if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT) - for (id = *_pos - 3; - id < SLOW_WORK_THREAD_LIMIT; - id++, (*_pos)++) - if (slow_work_execs[id]) - return (void *)(unsigned long) *_pos; - *_pos = 0x1UL << ITERATOR_SHIFT; - - case 0x1: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &slow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - - case 0x2: - count = *_pos & ITERATOR_COUNTER; - list_for_each(p, &vslow_work_queue) { - if (count == 0) - return p; - count--; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * set up the iterator to start reading from the first line - */ -static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos) -{ - spin_lock_irq(&slow_work_queue_lock); - return slow_work_runqueue_index(m, _pos); -} - -/* - * move to the next line - */ -static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos) -{ - struct list_head *p = v; - unsigned long selector = *_pos >> ITERATOR_SHIFT; - - (*_pos)++; - switch (selector) { - case 0x0: - return slow_work_runqueue_index(m, _pos); - - case 0x1: - if (*_pos >> ITERATOR_SHIFT == 0x1) { - p = p->next; - if (p != &slow_work_queue) - return p; - } - *_pos = 0x2UL << ITERATOR_SHIFT; - p = &vslow_work_queue; - - case 0x2: - if (*_pos >> ITERATOR_SHIFT == 0x2) { - p = p->next; - if (p != &vslow_work_queue) - return p; - } - *_pos = 0x3UL << ITERATOR_SHIFT; - - default: - return NULL; - } -} - -/* - * clean up after reading - */ -static void slow_work_runqueue_stop(struct seq_file *m, void *v) -{ - spin_unlock_irq(&slow_work_queue_lock); -} - -static const struct seq_operations slow_work_runqueue_ops = { - .start = slow_work_runqueue_start, - .stop = slow_work_runqueue_stop, - .next = slow_work_runqueue_next, - .show = slow_work_runqueue_show, -}; - -/* - * open "/proc/slow_work_rq" to list queue contents - */ -static int slow_work_runqueue_open(struct inode *inode, struct file *file) -{ - return seq_open(file, &slow_work_runqueue_ops); -} - -const struct file_operations slow_work_runqueue_fops = { - .owner = THIS_MODULE, - .open = slow_work_runqueue_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; diff --git a/kernel/slow-work.c b/kernel/slow-work.c index b5c17f15f9d..00889bd3c59 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include "slow-work.h" static void slow_work_cull_timeout(unsigned long); @@ -138,7 +138,7 @@ static void slow_work_clear_thread_processing(int id) {} /* * Data for tracking currently executing items for indication through /proc */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT]; pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT]; DEFINE_RWLOCK(slow_work_execs_lock); @@ -823,7 +823,7 @@ static void slow_work_new_thread_execute(struct slow_work *work) static const struct slow_work_ops slow_work_new_thread_ops = { .owner = THIS_MODULE, .execute = slow_work_new_thread_execute, -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG .desc = slow_work_new_thread_desc, #endif }; @@ -1055,9 +1055,15 @@ static int __init init_slow_work(void) if (slow_work_max_max_threads < nr_cpus * 2) slow_work_max_max_threads = nr_cpus * 2; #endif -#ifdef CONFIG_SLOW_WORK_PROC - proc_create("slow_work_rq", S_IFREG | 0400, NULL, - &slow_work_runqueue_fops); +#ifdef CONFIG_SLOW_WORK_DEBUG + { + struct dentry *dbdir; + + dbdir = debugfs_create_dir("slow_work", NULL); + if (dbdir && !IS_ERR(dbdir)) + debugfs_create_file("runqueue", S_IFREG | 0400, dbdir, + NULL, &slow_work_runqueue_fops); + } #endif return 0; } diff --git a/kernel/slow-work.h b/kernel/slow-work.h index 3c2f007f3ad..321f3c59d73 100644 --- a/kernel/slow-work.h +++ b/kernel/slow-work.h @@ -19,7 +19,7 @@ /* * slow-work.c */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG extern struct slow_work *slow_work_execs[]; extern pid_t slow_work_pids[]; extern rwlock_t slow_work_execs_lock; @@ -30,9 +30,9 @@ extern struct list_head vslow_work_queue; extern spinlock_t slow_work_queue_lock; /* - * slow-work-proc.c + * slow-work-debugfs.c */ -#ifdef CONFIG_SLOW_WORK_PROC +#ifdef CONFIG_SLOW_WORK_DEBUG extern const struct file_operations slow_work_runqueue_fops; extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); -- cgit v1.2.3-70-g09d2