diff options
Diffstat (limited to 'drivers')
279 files changed, 13362 insertions, 3441 deletions
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c index bd3c937b0ac..7737afb157c 100644 --- a/drivers/acpi/acpica/hwvalid.c +++ b/drivers/acpi/acpica/hwvalid.c @@ -90,7 +90,6 @@ static const struct acpi_port_info acpi_protected_ports[] = { {"PIT2", 0x0048, 0x004B, ACPI_OSI_WIN_XP}, {"RTC", 0x0070, 0x0071, ACPI_OSI_WIN_XP}, {"CMOS", 0x0074, 0x0076, ACPI_OSI_WIN_XP}, - {"DMA1", 0x0081, 0x0083, ACPI_OSI_WIN_XP}, {"DMA1L", 0x0087, 0x0087, ACPI_OSI_WIN_XP}, {"DMA2", 0x0089, 0x008B, ACPI_OSI_WIN_XP}, {"DMA2L", 0x008F, 0x008F, ACPI_OSI_WIN_XP}, diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 05dfdc96802..d0d550d22a6 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -343,9 +343,6 @@ acpi_system_write_alarm(struct file *file, } #endif /* HAVE_ACPI_LEGACY_ALARM */ -extern struct list_head acpi_wakeup_device_list; -extern spinlock_t acpi_device_lock; - static int acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) { @@ -353,7 +350,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) seq_printf(seq, "Device\tS-state\t Status Sysfs node\n"); - spin_lock(&acpi_device_lock); + mutex_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); @@ -361,7 +358,6 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) if (!dev->wakeup.flags.valid) continue; - spin_unlock(&acpi_device_lock); ldev = acpi_get_physical_device(dev->handle); seq_printf(seq, "%s\t S%d\t%c%-8s ", @@ -376,9 +372,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) seq_printf(seq, "\n"); put_device(ldev); - spin_lock(&acpi_device_lock); } - spin_unlock(&acpi_device_lock); + mutex_unlock(&acpi_device_lock); return 0; } @@ -409,7 +404,7 @@ acpi_system_write_wakeup_device(struct file *file, strbuf[len] = '\0'; sscanf(strbuf, "%s", str); - spin_lock(&acpi_device_lock); + mutex_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); @@ -446,7 +441,7 @@ acpi_system_write_wakeup_device(struct file *file, } } } - spin_unlock(&acpi_device_lock); + mutex_unlock(&acpi_device_lock); return count; } diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 4e6e758bd39..6fe121434ff 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -145,6 +145,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr, struct acpi_processor_power *pwr = &pr->power; u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; + if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) + return; + /* * Check, if one of the previous states already marked the lapic * unstable diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 20c23c04920..8ff510b91d8 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -24,7 +24,7 @@ extern struct acpi_device *acpi_root; static LIST_HEAD(acpi_device_list); static LIST_HEAD(acpi_bus_id_list); -DEFINE_SPINLOCK(acpi_device_lock); +DEFINE_MUTEX(acpi_device_lock); LIST_HEAD(acpi_wakeup_device_list); struct acpi_device_bus_id{ @@ -491,7 +491,6 @@ static int acpi_device_register(struct acpi_device *device, */ INIT_LIST_HEAD(&device->children); INIT_LIST_HEAD(&device->node); - INIT_LIST_HEAD(&device->g_list); INIT_LIST_HEAD(&device->wakeup_list); new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); @@ -500,7 +499,7 @@ static int acpi_device_register(struct acpi_device *device, return -ENOMEM; } - spin_lock(&acpi_device_lock); + mutex_lock(&acpi_device_lock); /* * Find suitable bus_id and instance number in acpi_bus_id_list * If failed, create one and link it into acpi_bus_id_list @@ -521,14 +520,12 @@ static int acpi_device_register(struct acpi_device *device, } dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no); - if (device->parent) { + if (device->parent) list_add_tail(&device->node, &device->parent->children); - list_add_tail(&device->g_list, &device->parent->g_list); - } else - list_add_tail(&device->g_list, &acpi_device_list); + if (device->wakeup.flags.valid) list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list); - spin_unlock(&acpi_device_lock); + mutex_unlock(&acpi_device_lock); if (device->parent) device->dev.parent = &parent->dev; @@ -549,28 +546,22 @@ static int acpi_device_register(struct acpi_device *device, device->removal_type = ACPI_BUS_REMOVAL_NORMAL; return 0; end: - spin_lock(&acpi_device_lock); - if (device->parent) { + mutex_lock(&acpi_device_lock); + if (device->parent) list_del(&device->node); - list_del(&device->g_list); - } else - list_del(&device->g_list); list_del(&device->wakeup_list); - spin_unlock(&acpi_device_lock); + mutex_unlock(&acpi_device_lock); return result; } static void acpi_device_unregister(struct acpi_device *device, int type) { - spin_lock(&acpi_device_lock); - if (device->parent) { + mutex_lock(&acpi_device_lock); + if (device->parent) list_del(&device->node); - list_del(&device->g_list); - } else - list_del(&device->g_list); list_del(&device->wakeup_list); - spin_unlock(&acpi_device_lock); + mutex_unlock(&acpi_device_lock); acpi_detach_data(device->handle, acpi_bus_data_handler); diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index cfaf8f5b0a1..8a8f3b3382a 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -5,3 +5,6 @@ extern int acpi_suspend (u32 state); extern void acpi_enable_wakeup_device_prep(u8 sleep_state); extern void acpi_enable_wakeup_device(u8 sleep_state); extern void acpi_disable_wakeup_device(u8 sleep_state); + +extern struct list_head acpi_wakeup_device_list; +extern struct mutex acpi_device_lock; diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index e8c143caf0f..9cd15e8c893 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -98,6 +98,7 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points."); static int acpi_thermal_add(struct acpi_device *device); static int acpi_thermal_remove(struct acpi_device *device, int type); static int acpi_thermal_resume(struct acpi_device *device); +static void acpi_thermal_notify(struct acpi_device *device, u32 event); static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file); static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file); static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file); @@ -123,6 +124,7 @@ static struct acpi_driver acpi_thermal_driver = { .add = acpi_thermal_add, .remove = acpi_thermal_remove, .resume = acpi_thermal_resume, + .notify = acpi_thermal_notify, }, }; @@ -192,6 +194,7 @@ struct acpi_thermal { struct acpi_handle_list devices; struct thermal_zone_device *thermal_zone; int tz_enabled; + int kelvin_offset; struct mutex lock; }; @@ -581,7 +584,7 @@ static void acpi_thermal_check(void *data) } /* sys I/F for generic thermal sysfs support */ -#define KELVIN_TO_MILLICELSIUS(t) (t * 100 - 273200) +#define KELVIN_TO_MILLICELSIUS(t, off) (((t) - (off)) * 100) static int thermal_get_temp(struct thermal_zone_device *thermal, unsigned long *temp) @@ -596,7 +599,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, if (result) return result; - *temp = KELVIN_TO_MILLICELSIUS(tz->temperature); + *temp = KELVIN_TO_MILLICELSIUS(tz->temperature, tz->kelvin_offset); return 0; } @@ -702,7 +705,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, if (tz->trips.critical.flags.valid) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( - tz->trips.critical.temperature); + tz->trips.critical.temperature, + tz->kelvin_offset); return 0; } trip--; @@ -711,7 +715,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, if (tz->trips.hot.flags.valid) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( - tz->trips.hot.temperature); + tz->trips.hot.temperature, + tz->kelvin_offset); return 0; } trip--; @@ -720,7 +725,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, if (tz->trips.passive.flags.valid) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( - tz->trips.passive.temperature); + tz->trips.passive.temperature, + tz->kelvin_offset); return 0; } trip--; @@ -730,7 +736,8 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal, tz->trips.active[i].flags.valid; i++) { if (!trip) { *temp = KELVIN_TO_MILLICELSIUS( - tz->trips.active[i].temperature); + tz->trips.active[i].temperature, + tz->kelvin_offset); return 0; } trip--; @@ -745,7 +752,8 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal, if (tz->trips.critical.flags.valid) { *temperature = KELVIN_TO_MILLICELSIUS( - tz->trips.critical.temperature); + tz->trips.critical.temperature, + tz->kelvin_offset); return 0; } else return -EINVAL; @@ -1264,17 +1272,14 @@ static int acpi_thermal_remove_fs(struct acpi_device *device) Driver Interface -------------------------------------------------------------------------- */ -static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data) +static void acpi_thermal_notify(struct acpi_device *device, u32 event) { - struct acpi_thermal *tz = data; - struct acpi_device *device = NULL; + struct acpi_thermal *tz = acpi_driver_data(device); if (!tz) return; - device = tz->device; - switch (event) { case ACPI_THERMAL_NOTIFY_TEMPERATURE: acpi_thermal_check(tz); @@ -1298,8 +1303,6 @@ static void acpi_thermal_notify(acpi_handle handle, u32 event, void *data) "Unsupported event [0x%x]\n", event)); break; } - - return; } static int acpi_thermal_get_info(struct acpi_thermal *tz) @@ -1334,10 +1337,28 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz) return 0; } +/* + * The exact offset between Kelvin and degree Celsius is 273.15. However ACPI + * handles temperature values with a single decimal place. As a consequence, + * some implementations use an offset of 273.1 and others use an offset of + * 273.2. Try to find out which one is being used, to present the most + * accurate and visually appealing number. + * + * The heuristic below should work for all ACPI thermal zones which have a + * critical trip point with a value being a multiple of 0.5 degree Celsius. + */ +static void acpi_thermal_guess_offset(struct acpi_thermal *tz) +{ + if (tz->trips.critical.flags.valid && + (tz->trips.critical.temperature % 5) == 1) + tz->kelvin_offset = 2731; + else + tz->kelvin_offset = 2732; +} + static int acpi_thermal_add(struct acpi_device *device) { int result = 0; - acpi_status status = AE_OK; struct acpi_thermal *tz = NULL; @@ -1360,6 +1381,8 @@ static int acpi_thermal_add(struct acpi_device *device) if (result) goto free_memory; + acpi_thermal_guess_offset(tz); + result = acpi_thermal_register_thermal_zone(tz); if (result) goto free_memory; @@ -1368,21 +1391,11 @@ static int acpi_thermal_add(struct acpi_device *device) if (result) goto unregister_thermal_zone; - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, - acpi_thermal_notify, tz); - if (ACPI_FAILURE(status)) { - result = -ENODEV; - goto remove_fs; - } - printk(KERN_INFO PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), acpi_device_bid(device), KELVIN_TO_CELSIUS(tz->temperature)); goto end; -remove_fs: - acpi_thermal_remove_fs(device); unregister_thermal_zone: thermal_zone_device_unregister(tz->thermal_zone); free_memory: @@ -1393,7 +1406,6 @@ end: static int acpi_thermal_remove(struct acpi_device *device, int type) { - acpi_status status = AE_OK; struct acpi_thermal *tz = NULL; if (!device || !acpi_driver_data(device)) @@ -1401,10 +1413,6 @@ static int acpi_thermal_remove(struct acpi_device *device, int type) tz = acpi_driver_data(device); - status = acpi_remove_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, - acpi_thermal_notify); - acpi_thermal_remove_fs(device); acpi_thermal_unregister_thermal_zone(tz); mutex_destroy(&tz->lock); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index ab06143672b..cd4fb7543a9 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -79,6 +79,7 @@ module_param(brightness_switch_enabled, bool, 0644); static int acpi_video_bus_add(struct acpi_device *device); static int acpi_video_bus_remove(struct acpi_device *device, int type); static int acpi_video_resume(struct acpi_device *device); +static void acpi_video_bus_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id video_device_ids[] = { {ACPI_VIDEO_HID, 0}, @@ -94,6 +95,7 @@ static struct acpi_driver acpi_video_bus = { .add = acpi_video_bus_add, .remove = acpi_video_bus_remove, .resume = acpi_video_resume, + .notify = acpi_video_bus_notify, }, }; @@ -1986,17 +1988,15 @@ static int acpi_video_bus_stop_devices(struct acpi_video_bus *video) return acpi_video_bus_DOS(video, 0, 1); } -static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data) +static void acpi_video_bus_notify(struct acpi_device *device, u32 event) { - struct acpi_video_bus *video = data; - struct acpi_device *device = NULL; + struct acpi_video_bus *video = acpi_driver_data(device); struct input_dev *input; int keycode; if (!video) return; - device = video->device; input = video->input; switch (event) { @@ -2127,7 +2127,6 @@ static int acpi_video_resume(struct acpi_device *device) static int acpi_video_bus_add(struct acpi_device *device) { - acpi_status status; struct acpi_video_bus *video; struct input_dev *input; int error; @@ -2169,20 +2168,10 @@ static int acpi_video_bus_add(struct acpi_device *device) acpi_video_bus_get_devices(video, device); acpi_video_bus_start_devices(video); - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, - acpi_video_bus_notify, video); - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX - "Error installing notify handler\n"); - error = -ENODEV; - goto err_stop_video; - } - video->input = input = input_allocate_device(); if (!input) { error = -ENOMEM; - goto err_uninstall_notify; + goto err_stop_video; } snprintf(video->phys, sizeof(video->phys), @@ -2218,9 +2207,6 @@ static int acpi_video_bus_add(struct acpi_device *device) err_free_input_dev: input_free_device(input); - err_uninstall_notify: - acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_video_bus_notify); err_stop_video: acpi_video_bus_stop_devices(video); acpi_video_bus_put_devices(video); @@ -2235,7 +2221,6 @@ static int acpi_video_bus_add(struct acpi_device *device) static int acpi_video_bus_remove(struct acpi_device *device, int type) { - acpi_status status = 0; struct acpi_video_bus *video = NULL; @@ -2245,11 +2230,6 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type) video = acpi_driver_data(device); acpi_video_bus_stop_devices(video); - - status = acpi_remove_notify_handler(video->device->handle, - ACPI_DEVICE_NOTIFY, - acpi_video_bus_notify); - acpi_video_bus_put_devices(video); acpi_video_bus_remove_fs(device); diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index 5aee8c26cc9..88725dcdf8b 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -12,12 +12,14 @@ #include "internal.h" #include "sleep.h" +/* + * We didn't lock acpi_device_lock in the file, because it invokes oops in + * suspend/resume and isn't really required as this is called in S-state. At + * that time, there is no device hotplug + **/ #define _COMPONENT ACPI_SYSTEM_COMPONENT ACPI_MODULE_NAME("wakeup_devices") -extern struct list_head acpi_wakeup_device_list; -extern spinlock_t acpi_device_lock; - /** * acpi_enable_wakeup_device_prep - prepare wakeup devices * @sleep_state: ACPI state @@ -29,7 +31,6 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) { struct list_head *node, *next; - spin_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, @@ -40,11 +41,8 @@ void acpi_enable_wakeup_device_prep(u8 sleep_state) (sleep_state > (u32) dev->wakeup.sleep_state)) continue; - spin_unlock(&acpi_device_lock); acpi_enable_wakeup_device_power(dev, sleep_state); - spin_lock(&acpi_device_lock); } - spin_unlock(&acpi_device_lock); } /** @@ -60,7 +58,6 @@ void acpi_enable_wakeup_device(u8 sleep_state) * Caution: this routine must be invoked when interrupt is disabled * Refer ACPI2.0: P212 */ - spin_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); @@ -74,22 +71,17 @@ void acpi_enable_wakeup_device(u8 sleep_state) if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) || sleep_state > (u32) dev->wakeup.sleep_state) { if (dev->wakeup.flags.run_wake) { - spin_unlock(&acpi_device_lock); /* set_gpe_type will disable GPE, leave it like that */ acpi_set_gpe_type(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ACPI_GPE_TYPE_RUNTIME); - spin_lock(&acpi_device_lock); } continue; } - spin_unlock(&acpi_device_lock); if (!dev->wakeup.flags.run_wake) acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); - spin_lock(&acpi_device_lock); } - spin_unlock(&acpi_device_lock); } /** @@ -101,7 +93,6 @@ void acpi_disable_wakeup_device(u8 sleep_state) { struct list_head *node, *next; - spin_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); @@ -112,19 +103,16 @@ void acpi_disable_wakeup_device(u8 sleep_state) if ((!dev->wakeup.state.enabled && !dev->wakeup.flags.prepared) || sleep_state > (u32) dev->wakeup.sleep_state) { if (dev->wakeup.flags.run_wake) { - spin_unlock(&acpi_device_lock); acpi_set_gpe_type(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ACPI_GPE_TYPE_WAKE_RUN); /* Re-enable it, since set_gpe_type will disable it */ acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); - spin_lock(&acpi_device_lock); } continue; } - spin_unlock(&acpi_device_lock); acpi_disable_wakeup_device_power(dev); /* Never disable run-wake GPE */ if (!dev->wakeup.flags.run_wake) { @@ -133,16 +121,14 @@ void acpi_disable_wakeup_device(u8 sleep_state) acpi_clear_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ACPI_NOT_ISR); } - spin_lock(&acpi_device_lock); } - spin_unlock(&acpi_device_lock); } int __init acpi_wakeup_device_init(void) { struct list_head *node, *next; - spin_lock(&acpi_device_lock); + mutex_lock(&acpi_device_lock); list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, @@ -150,15 +136,13 @@ int __init acpi_wakeup_device_init(void) /* In case user doesn't load button driver */ if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled) continue; - spin_unlock(&acpi_device_lock); acpi_set_gpe_type(dev->wakeup.gpe_device, dev->wakeup.gpe_number, ACPI_GPE_TYPE_WAKE_RUN); acpi_enable_gpe(dev->wakeup.gpe_device, dev->wakeup.gpe_number); dev->wakeup.state.enabled = 1; - spin_lock(&acpi_device_lock); } - spin_unlock(&acpi_device_lock); + mutex_unlock(&acpi_device_lock); return 0; } diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 57be6bea48e..08186ecbaf8 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -114,6 +114,7 @@ enum { board_ahci_sb700 = 5, /* for SB700 and SB800 */ board_ahci_mcp65 = 6, board_ahci_nopmp = 7, + board_ahci_yesncq = 8, /* global controller registers */ HOST_CAP = 0x00, /* host capabilities */ @@ -469,6 +470,14 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, + /* board_ahci_yesncq */ + { + AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), + .flags = AHCI_FLAG_COMMON, + .pio_mask = ATA_PIO4, + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, }; static const struct pci_device_id ahci_pci_tbl[] = { @@ -535,30 +544,30 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ - { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */ - { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */ - { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */ + { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */ { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e7ea77cf606..17c5d48a75d 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1231,6 +1231,9 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) * * We follow the current spec and consider that 0x69/0x96 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. + * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports + * SEMB signature. This is worked around in + * ata_dev_read_id(). */ if ((tf->lbam == 0) && (tf->lbah == 0)) { DPRINTK("found ATA device by sig\n"); @@ -1248,8 +1251,8 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) } if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { - printk(KERN_INFO "ata: SEMB device ignored\n"); - return ATA_DEV_SEMB_UNSUP; /* not yet */ + DPRINTK("found SEMB device by sig (could be ATA device)\n"); + return ATA_DEV_SEMB; } DPRINTK("unknown device\n"); @@ -1653,8 +1656,8 @@ unsigned long ata_id_xfermask(const u16 *id) /* * Process compact flash extended modes */ - int pio = id[163] & 0x7; - int dma = (id[163] >> 3) & 7; + int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; + int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; if (pio) pio_mask |= (1 << 5); @@ -2080,6 +2083,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, struct ata_taskfile tf; unsigned int err_mask = 0; const char *reason; + bool is_semb = class == ATA_DEV_SEMB; int may_fallback = 1, tried_spinup = 0; int rc; @@ -2090,6 +2094,8 @@ retry: ata_tf_init(dev, &tf); switch (class) { + case ATA_DEV_SEMB: + class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ case ATA_DEV_ATA: tf.command = ATA_CMD_ID_ATA; break; @@ -2126,6 +2132,14 @@ retry: return -ENOENT; } + if (is_semb) { + ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " + "device w/ SEMB sig, disabled\n"); + /* SEMB is not supported yet */ + *p_class = ATA_DEV_SEMB_UNSUP; + return 0; + } + if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { /* Device or controller might have reported * the wrong device class. Give a shot at the @@ -2412,7 +2426,8 @@ int ata_dev_configure(struct ata_device *dev) /* ATA-specific feature tests */ if (dev->class == ATA_DEV_ATA) { if (ata_id_is_cfa(id)) { - if (id[162] & 1) /* CPRM may make this media unusable */ + /* CPRM may make this media unusable */ + if (id[ATA_ID_CFA_KEY_MGMT] & 1) ata_dev_printk(dev, KERN_WARNING, "supports DRM functions and may " "not be fully accessable.\n"); @@ -6110,13 +6125,11 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) ata_port_printk(ap, KERN_INFO, "DUMMY\n"); } - /* perform each probe synchronously */ - DPRINTK("probe begin\n"); + /* perform each probe asynchronously */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; async_schedule(async_port_probe, ap); } - DPRINTK("probe end\n"); return 0; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index b9747fa59e5..2733b0c90b7 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -647,23 +647,45 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) return rc; } +static int ata_ioc32(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_PIO_DMA) + return 1; + if (ap->pflags & ATA_PFLAG_PIO32) + return 1; + return 0; +} + int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, int cmd, void __user *arg) { int val = -EINVAL, rc = -EINVAL; + unsigned long flags; switch (cmd) { case ATA_IOC_GET_IO32: - val = 0; + spin_lock_irqsave(ap->lock, flags); + val = ata_ioc32(ap); + spin_unlock_irqrestore(ap->lock, flags); if (copy_to_user(arg, &val, 1)) return -EFAULT; return 0; case ATA_IOC_SET_IO32: val = (unsigned long) arg; - if (val != 0) - return -EINVAL; - return 0; + rc = 0; + spin_lock_irqsave(ap->lock, flags); + if (ap->pflags & ATA_PFLAG_PIO32CHANGE) { + if (val) + ap->pflags |= ATA_PFLAG_PIO32; + else + ap->pflags &= ~ATA_PFLAG_PIO32; + } else { + if (val != ata_ioc32(ap)) + rc = -EINVAL; + } + spin_unlock_irqrestore(ap->lock, flags); + return rc; case HDIO_GET_IDENTITY: return ata_get_identity(ap, scsidev, arg); diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 8332e97a9de..bb18415d3d6 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -87,6 +87,7 @@ const struct ata_port_operations ata_bmdma32_port_ops = { .inherits = &ata_bmdma_port_ops, .sff_data_xfer = ata_sff_data_xfer32, + .port_start = ata_sff_port_start32, }; EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); @@ -769,6 +770,9 @@ unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, void __iomem *data_addr = ap->ioaddr.data_addr; unsigned int words = buflen >> 2; int slop = buflen & 3; + + if (!(ap->pflags & ATA_PFLAG_PIO32)) + return ata_sff_data_xfer(dev, buf, buflen, rw); /* Transfer multiple of 4 bytes */ if (rw == READ) @@ -2402,6 +2406,29 @@ int ata_sff_port_start(struct ata_port *ap) EXPORT_SYMBOL_GPL(ata_sff_port_start); /** + * ata_sff_port_start32 - Set port up for dma. + * @ap: Port to initialize + * + * Called just after data structures for each port are + * initialized. Allocates space for PRD table if the device + * is DMA capable SFF. + * + * May be used as the port_start() entry in ata_port_operations for + * devices that are capable of 32bit PIO. + * + * LOCKING: + * Inherited from caller. + */ +int ata_sff_port_start32(struct ata_port *ap) +{ + ap->pflags |= ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; + if (ap->ioaddr.bmdma_addr) + return ata_port_start(ap); + return 0; +} +EXPORT_SYMBOL_GPL(ata_sff_port_start32); + +/** * ata_sff_std_ports - initialize ioaddr with standard port offsets. * @ioaddr: IO address structure to be initialized * diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 81ab57003ab..122c786449a 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c @@ -8,7 +8,7 @@ * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> * Portions Copyright (C) 2001 Sun Microsystems, Inc. * Portions Copyright (C) 2003 Red Hat Inc - * Portions Copyright (C) 2005-2007 MontaVista Software, Inc. + * Portions Copyright (C) 2005-2009 MontaVista Software, Inc. * * TODO * Look into engine reset on timeout errors. Should not be required. @@ -24,7 +24,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_hpt37x" -#define DRV_VERSION "0.6.11" +#define DRV_VERSION "0.6.12" struct hpt_clock { u8 xfer_speed; @@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev) } /** - * hpt370_bmdma_start - DMA engine begin - * @qc: ATA command - * - * The 370 and 370A want us to reset the DMA engine each time we - * use it. The 372 and later are fine. - */ - -static void hpt370_bmdma_start(struct ata_queued_cmd *qc) -{ - struct ata_port *ap = qc->ap; - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37); - udelay(10); - ata_bmdma_start(qc); -} - -/** * hpt370_bmdma_end - DMA engine stop * @qc: ATA command * @@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = { static struct ata_port_operations hpt370_port_ops = { .inherits = &ata_bmdma_port_ops, - .bmdma_start = hpt370_bmdma_start, .bmdma_stop = hpt370_bmdma_stop, .mode_filter = hpt370_filter, diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 3f830f0fe2c..6f985bed8cb 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -108,6 +108,7 @@ struct legacy_controller { struct ata_port_operations *ops; unsigned int pio_mask; unsigned int flags; + unsigned int pflags; int (*setup)(struct platform_device *, struct legacy_probe *probe, struct legacy_data *data); }; @@ -285,7 +286,8 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev, { int slop = buflen & 3; /* 32bit I/O capable *and* we need to write a whole number of dwords */ - if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)) { + if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3) + && (ap->pflags & ATA_PFLAG_PIO32)) { struct ata_port *ap = dev->link->ap; unsigned long flags; @@ -736,7 +738,8 @@ static unsigned int vlb32_data_xfer(struct ata_device *adev, unsigned char *buf, struct ata_port *ap = adev->link->ap; int slop = buflen & 3; - if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)) { + if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3) + && (ap->pflags & ATA_PFLAG_PIO32)) { if (rw == WRITE) iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2); else @@ -858,27 +861,30 @@ static struct ata_port_operations winbond_port_ops = { static struct legacy_controller controllers[] = { {"BIOS", &legacy_port_ops, 0x1F, - ATA_FLAG_NO_IORDY, NULL }, + ATA_FLAG_NO_IORDY, 0, NULL }, {"Snooping", &simple_port_ops, 0x1F, - 0 , NULL }, + 0, 0, NULL }, {"PDC20230", &pdc20230_port_ops, 0x7, - ATA_FLAG_NO_IORDY, NULL }, + ATA_FLAG_NO_IORDY, + ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, NULL }, {"HT6560A", &ht6560a_port_ops, 0x07, - ATA_FLAG_NO_IORDY, NULL }, + ATA_FLAG_NO_IORDY, 0, NULL }, {"HT6560B", &ht6560b_port_ops, 0x1F, - ATA_FLAG_NO_IORDY, NULL }, + ATA_FLAG_NO_IORDY, 0, NULL }, {"OPTI82C611A", &opti82c611a_port_ops, 0x0F, - 0 , NULL }, + 0, 0, NULL }, {"OPTI82C46X", &opti82c46x_port_ops, 0x0F, - 0 , NULL }, + 0, 0, NULL }, {"QDI6500", &qdi6500_port_ops, 0x07, - ATA_FLAG_NO_IORDY, qdi_port }, + ATA_FLAG_NO_IORDY, + ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, qdi_port }, {"QDI6580", &qdi6580_port_ops, 0x1F, - 0 , qdi_port }, + 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, qdi_port }, {"QDI6580DP", &qdi6580dp_port_ops, 0x1F, - 0 , qdi_port }, + 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, qdi_port }, {"W83759A", &winbond_port_ops, 0x1F, - 0 , winbond_port } + 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32_CHANGE, + winbond_port } }; /** @@ -1008,6 +1014,7 @@ static __init int legacy_init_one(struct legacy_probe *probe) ap->ops = ops; ap->pio_mask = pio_modes; ap->flags |= ATA_FLAG_SLAVE_POSS | iordy; + ap->pflags |= controller->pflags; ap->ioaddr.cmd_addr = io_addr; ap->ioaddr.altstatus_addr = ctrl_addr; ap->ioaddr.ctl_addr = ctrl_addr; @@ -1032,6 +1039,7 @@ static __init int legacy_init_one(struct legacy_probe *probe) return 0; } } + ata_host_detach(host); fail: platform_device_unregister(pdev); return ret; diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c index 0fb6b1b1e63..dd53a66b19e 100644 --- a/drivers/ata/pata_ninja32.c +++ b/drivers/ata/pata_ninja32.c @@ -44,7 +44,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_ninja32" -#define DRV_VERSION "0.1.3" +#define DRV_VERSION "0.1.5" /** @@ -86,6 +86,7 @@ static struct ata_port_operations ninja32_port_ops = { .sff_dev_select = ninja32_dev_select, .cable_detect = ata_cable_40wire, .set_piomode = ninja32_set_piomode, + .sff_data_xfer = ata_sff_data_xfer32 }; static void ninja32_program(void __iomem *base) @@ -144,6 +145,7 @@ static int ninja32_init_one(struct pci_dev *dev, const struct pci_device_id *id) ap->ioaddr.altstatus_addr = base + 0x1E; ap->ioaddr.bmdma_addr = base; ata_sff_std_ports(&ap->ioaddr); + ap->pflags = ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE; ninja32_program(base); /* FIXME: Should we disable them at remove ? */ diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index 98e8c50703b..bdd43c7f432 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -566,7 +566,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) static int printed_version; unsigned int i; int rc; - struct ata_host *host; + struct ata_host *host = NULL; int board_id = (int) ent->driver_data; const unsigned *bar_sizes; diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index be204308cc1..9359613addc 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -1059,7 +1059,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) goto out; } - err = pci_set_dma_mask(dev, DMA_32BIT_MASK); + err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); if (err) { dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n"); goto out; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index bdd4f5f4557..5f7e64ba87e 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -275,8 +275,10 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page, if (rw == READ) { copy_from_brd(mem + off, brd, sector, len); flush_dcache_page(page); - } else + } else { + flush_dcache_page(page); copy_to_brd(brd, mem + off, sector, len); + } kunmap_atomic(mem, KM_USER0); out: @@ -436,6 +438,7 @@ static struct brd_device *brd_alloc(int i) if (!brd->brd_queue) goto out_free_dev; blk_queue_make_request(brd->brd_queue, brd_make_request); + blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); blk_queue_max_sectors(brd->brd_queue, 1024); blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 0ef6f08aa6e..4d4d5e0d3fa 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -3505,7 +3505,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u /* The Inbound Post Queue only accepts 32-bit physical addresses for the CCISS commands, so they must be allocated from the lower 4GiB of memory. */ - err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); return -ENOMEM; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 6cccdc3f522..4aecf5dc6a9 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -563,7 +563,7 @@ static void ace_fsm_dostate(struct ace_device *ace) case ACE_FSM_STATE_IDENTIFY_PREPARE: /* Send identify command */ ace->fsm_task = ACE_TASK_IDENTIFY; - ace->data_ptr = &ace->cf_id; + ace->data_ptr = ace->cf_id; ace->data_count = ACE_BUF_PER_SECTOR; ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); @@ -608,8 +608,8 @@ static void ace_fsm_dostate(struct ace_device *ace) break; case ACE_FSM_STATE_IDENTIFY_COMPLETE: - ace_fix_driveid(&ace->cf_id[0]); - ace_dump_mem(&ace->cf_id, 512); /* Debug: Dump out disk ID */ + ace_fix_driveid(ace->cf_id); + ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */ if (ace->data_result) { /* Error occured, disable the disk */ @@ -622,9 +622,9 @@ static void ace_fsm_dostate(struct ace_device *ace) /* Record disk parameters */ set_capacity(ace->gd, - ata_id_u32(&ace->cf_id, ATA_ID_LBA_CAPACITY)); + ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); dev_info(ace->dev, "capacity: %i sectors\n", - ata_id_u32(&ace->cf_id, ATA_ID_LBA_CAPACITY)); + ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); } /* We're done, drop to IDLE state and notify waiters */ @@ -923,7 +923,7 @@ static int ace_release(struct gendisk *disk, fmode_t mode) static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ace_device *ace = bdev->bd_disk->private_data; - u16 *cf_id = &ace->cf_id[0]; + u16 *cf_id = ace->cf_id; dev_dbg(ace->dev, "ace_getgeo()\n"); diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 9d9490e22e0..3686912427b 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -2131,6 +2131,8 @@ static const struct intel_driver_description { { PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, 0, "830M", &intel_845_driver, &intel_830_driver }, { PCI_DEVICE_ID_INTEL_82850_HB, 0, 0, "i850", &intel_850_driver, NULL }, + { PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, 0, "854", + &intel_845_driver, &intel_830_driver }, { PCI_DEVICE_ID_INTEL_82855PM_HB, 0, 0, "855PM", &intel_845_driver, NULL }, { PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, 0, "855GM", &intel_845_driver, &intel_830_driver }, @@ -2355,6 +2357,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_82845_HB), ID(PCI_DEVICE_ID_INTEL_82845G_HB), ID(PCI_DEVICE_ID_INTEL_82850_HB), + ID(PCI_DEVICE_ID_INTEL_82854_HB), ID(PCI_DEVICE_ID_INTEL_82855PM_HB), ID(PCI_DEVICE_ID_INTEL_82855GM_HB), ID(PCI_DEVICE_ID_INTEL_82860_HB), diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 6de020d078e..b0a6a3e5192 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -35,7 +35,6 @@ #include <linux/vt_kern.h> #include <linux/workqueue.h> #include <linux/kexec.h> -#include <linux/interrupt.h> #include <linux/hrtimer.h> #include <linux/oom.h> diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index 28f2c3f959b..6ad95c8d636 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -767,11 +767,19 @@ static inline void pci_write_bits16(struct pci_dev *pdev, int offset, pci_write_config_word(pdev, offset, value); } -/* write all or some bits in a dword-register*/ +/* + * pci_write_bits32 + * + * edac local routine to do pci_write_config_dword, but adds + * a mask parameter. If mask is all ones, ignore the mask. + * Otherwise utilize the mask to isolate specified bits + * + * write all or some bits in a dword-register + */ static inline void pci_write_bits32(struct pci_dev *pdev, int offset, u32 value, u32 mask) { - if (mask != 0xffff) { + if (mask != 0xffffffff) { u32 buf; pci_read_config_dword(pdev, offset, &buf); diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index ca9113e1c10..a7d2c717d03 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -389,7 +389,7 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info */ static void edac_device_workq_function(struct work_struct *work_req) { - struct delayed_work *d_work = (struct delayed_work *)work_req; + struct delayed_work *d_work = to_delayed_work(work_req); struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work); mutex_lock(&device_ctls_mutex); diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 25d66940b4f..335b7ebdb11 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -260,7 +260,7 @@ static int edac_mc_assert_error_check_and_clear(void) */ static void edac_mc_workq_function(struct work_struct *work_req) { - struct delayed_work *d_work = (struct delayed_work *)work_req; + struct delayed_work *d_work = to_delayed_work(work_req); struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); mutex_lock(&mem_ctls_mutex); diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 5b150aea703..30b585b1d60 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c @@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(edac_pci_find); */ static void edac_pci_workq_function(struct work_struct *work_req) { - struct delayed_work *d_work = (struct delayed_work *)work_req; + struct delayed_work *d_work = to_delayed_work(work_req); struct edac_pci_ctl_info *pci = to_edac_pci_ctl_work(d_work); int msec; unsigned long delay; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 3750d800304..473a8f7fbdb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -446,6 +446,9 @@ struct drm_i915_gem_object { uint32_t tiling_mode; uint32_t stride; + /** Record of address bit 17 of each page at last unbind. */ + long *bit_17; + /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ uint32_t agp_type; @@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev, struct drm_gem_object *obj); void i915_gem_free_all_phys_object(struct drm_device *dev); +int i915_gem_object_get_pages(struct drm_gem_object *obj); +void i915_gem_object_put_pages(struct drm_gem_object *obj); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); +void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_gem_object *obj, int len, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1449b452cc6..4642115902d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, uint64_t offset, uint64_t size); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); -static int i915_gem_object_get_pages(struct drm_gem_object *obj); -static void i915_gem_object_put_pages(struct drm_gem_object *obj); static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment); @@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages, int length) { char __iomem *vaddr; - int ret; + int unwritten; vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0); if (vaddr == NULL) return -ENOMEM; - ret = __copy_to_user_inatomic(data, vaddr + page_offset, length); + unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length); kunmap_atomic(vaddr, KM_USER0); - return ret; + if (unwritten) + return -EFAULT; + + return 0; +} + +static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) +{ + drm_i915_private_t *dev_priv = obj->dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + + return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && + obj_priv->tiling_mode != I915_TILING_NONE; } static inline int @@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page, return 0; } +static inline int +slow_shmem_bit17_copy(struct page *gpu_page, + int gpu_offset, + struct page *cpu_page, + int cpu_offset, + int length, + int is_read) +{ + char *gpu_vaddr, *cpu_vaddr; + + /* Use the unswizzled path if this page isn't affected. */ + if ((page_to_phys(gpu_page) & (1 << 17)) == 0) { + if (is_read) + return slow_shmem_copy(cpu_page, cpu_offset, + gpu_page, gpu_offset, length); + else + return slow_shmem_copy(gpu_page, gpu_offset, + cpu_page, cpu_offset, length); + } + + gpu_vaddr = kmap_atomic(gpu_page, KM_USER0); + if (gpu_vaddr == NULL) + return -ENOMEM; + + cpu_vaddr = kmap_atomic(cpu_page, KM_USER1); + if (cpu_vaddr == NULL) { + kunmap_atomic(gpu_vaddr, KM_USER0); + return -ENOMEM; + } + + /* Copy the data, XORing A6 with A17 (1). The user already knows he's + * XORing with the other bits (A9 for Y, A9 and A10 for X) + */ + while (length > 0) { + int cacheline_end = ALIGN(gpu_offset + 1, 64); + int this_length = min(cacheline_end - gpu_offset, length); + int swizzled_gpu_offset = gpu_offset ^ 64; + + if (is_read) { + memcpy(cpu_vaddr + cpu_offset, + gpu_vaddr + swizzled_gpu_offset, + this_length); + } else { + memcpy(gpu_vaddr + swizzled_gpu_offset, + cpu_vaddr + cpu_offset, + this_length); + } + cpu_offset += this_length; + gpu_offset += this_length; + length -= this_length; + } + + kunmap_atomic(cpu_vaddr, KM_USER1); + kunmap_atomic(gpu_vaddr, KM_USER0); + + return 0; +} + /** * This is the fast shmem pread path, which attempts to copy_from_user directly * from the backing pages of the object to the user's address space. On a @@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, int page_length; int ret; uint64_t data_ptr = args->data_ptr; + int do_bit17_swizzling; remain = args->size; @@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, down_read(&mm->mmap_sem); pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr, - num_pages, 0, 0, user_pages, NULL); + num_pages, 1, 0, user_pages, NULL); up_read(&mm->mmap_sem); if (pinned_pages < num_pages) { ret = -EFAULT; goto fail_put_user_pages; } + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + mutex_lock(&dev->struct_mutex); ret = i915_gem_object_get_pages(obj); @@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; - ret = slow_shmem_copy(user_pages[data_page_index], - data_page_offset, - obj_priv->pages[shmem_page_index], - shmem_page_offset, - page_length); + if (do_bit17_swizzling) { + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length, + 1); + } else { + ret = slow_shmem_copy(user_pages[data_page_index], + data_page_offset, + obj_priv->pages[shmem_page_index], + shmem_page_offset, + page_length); + } if (ret) goto fail_put_pages; @@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); - if (ret != 0) + if (i915_gem_object_needs_bit17_swizzle(obj)) { ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); + } else { + ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); + if (ret != 0) + ret = i915_gem_shmem_pread_slow(dev, obj, args, + file_priv); + } drm_gem_object_unreference(obj); @@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, int page_length; int ret; uint64_t data_ptr = args->data_ptr; + int do_bit17_swizzling; remain = args->size; @@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, goto fail_put_user_pages; } + do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); + mutex_lock(&dev->struct_mutex); ret = i915_gem_object_get_pages(obj); @@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, if ((data_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - data_page_offset; - ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], - shmem_page_offset, - user_pages[data_page_index], - data_page_offset, - page_length); + if (do_bit17_swizzling) { + ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length, + 0); + } else { + ret = slow_shmem_copy(obj_priv->pages[shmem_page_index], + shmem_page_offset, + user_pages[data_page_index], + data_page_offset, + page_length); + } if (ret) goto fail_put_pages; @@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_gtt_pwrite_slow(dev, obj, args, file_priv); } + } else if (i915_gem_object_needs_bit17_swizzle(obj)) { + ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv); } else { ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv); if (ret == -EFAULT) { @@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return 0; } -static void +void i915_gem_object_put_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj) if (--obj_priv->pages_refcount != 0) return; + if (obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_save_bit_17_swizzle(obj); + for (i = 0; i < page_count; i++) if (obj_priv->pages[i] != NULL) { if (obj_priv->dirty) @@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev, if (obj->write_domain != 0) i915_gem_object_move_to_flushing(obj); - else + else { + /* Take a reference on the object so it won't be + * freed while the spinlock is held. The list + * protection for this spinlock is safe when breaking + * the lock like this since the next thing we do + * is just get the head of the list again. + */ + drm_gem_object_reference(obj); i915_gem_object_move_to_inactive(obj); + spin_unlock(&dev_priv->mm.active_list_lock); + drm_gem_object_unreference(obj); + spin_lock(&dev_priv->mm.active_list_lock); + } } out: spin_unlock(&dev_priv->mm.active_list_lock); @@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev) return ret; } -static int +int i915_gem_object_get_pages(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj) } obj_priv->pages[i] = page; } + + if (obj_priv->tiling_mode != I915_TILING_NONE) + i915_gem_object_do_bit_17_swizzle(obj); + return 0; } @@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, drm_free(*relocs, reloc_count * sizeof(**relocs), DRM_MEM_DRIVER); *relocs = NULL; - return ret; + return -EFAULT; } reloc_index += exec_list[i].relocation_count; } - return ret; + return 0; } static int @@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, struct drm_i915_gem_relocation_entry *relocs) { uint32_t reloc_count = 0, i; - int ret; + int ret = 0; for (i = 0; i < buffer_count; i++) { struct drm_i915_gem_relocation_entry __user *user_relocs; + int unwritten; user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; - if (ret == 0) { - ret = copy_to_user(user_relocs, - &relocs[reloc_count], - exec_list[i].relocation_count * - sizeof(*relocs)); + unwritten = copy_to_user(user_relocs, + &relocs[reloc_count], + exec_list[i].relocation_count * + sizeof(*relocs)); + + if (unwritten) { + ret = -EFAULT; + goto err; } reloc_count += exec_list[i].relocation_count; } +err: drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); return ret; @@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, exec_offset = exec_list[args->buffer_count - 1].offset; #if WATCH_EXEC - i915_gem_dump_object(object_list[args->buffer_count - 1], + i915_gem_dump_object(batch_obj, args->batch_len, __func__, ~0); @@ -3308,10 +3430,12 @@ err: (uintptr_t) args->buffers_ptr, exec_list, sizeof(*exec_list) * args->buffer_count); - if (ret) + if (ret) { + ret = -EFAULT; DRM_ERROR("failed to copy %d exec entries " "back to user (%d)\n", args->buffer_count, ret); + } } /* Copy the updated relocations out regardless of current error @@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_free_mmap_offset(obj); drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); + kfree(obj_priv->bit_17); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); } diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c index a1ac0c5e730..986f1082c59 100644 --- a/drivers/gpu/drm/i915/i915_gem_debugfs.c +++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c @@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data) return 0; } +static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count) +{ + int page, i; + uint32_t *mem; + + for (page = 0; page < page_count; page++) { + mem = kmap(pages[page]); + for (i = 0; i < PAGE_SIZE; i += 4) + seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); + kunmap(pages[page]); + } +} + +static int i915_batchbuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + spin_lock(&dev_priv->mm.active_list_lock); + + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { + obj = obj_priv->obj; + if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { + ret = i915_gem_object_get_pages(obj); + if (ret) { + DRM_ERROR("Failed to get pages: %d\n", ret); + spin_unlock(&dev_priv->mm.active_list_lock); + return ret; + } + + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset); + i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE); + + i915_gem_object_put_pages(obj); + } + } + + spin_unlock(&dev_priv->mm.active_list_lock); + + return 0; +} + +static int i915_ringbuffer_data(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u8 *virt; + uint32_t *ptr, off; + + if (!dev_priv->ring.ring_obj) { + seq_printf(m, "No ringbuffer setup\n"); + return 0; + } + + virt = dev_priv->ring.virtual_start; + + for (off = 0; off < dev_priv->ring.Size; off += 4) { + ptr = (uint32_t *)(virt + off); + seq_printf(m, "%08x : %08x\n", off, *ptr); + } + + return 0; +} + +static int i915_ringbuffer_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + unsigned int head, tail, mask; + + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; + tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; + mask = dev_priv->ring.tail_mask; + + seq_printf(m, "RingHead : %08x\n", head); + seq_printf(m, "RingTail : %08x\n", tail); + seq_printf(m, "RingMask : %08x\n", mask); + seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size); + seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD)); + + return 0; +} + + static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, @@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = { {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, {"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_gem_hws", i915_hws_info, 0}, + {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, + {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, + {"i915_batchbuffers", i915_batchbuffer_info, 0}, }; #define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 6be3f927c86..f27e523c764 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -25,6 +25,8 @@ * */ +#include "linux/string.h" +#include "linux/bitops.h" #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_y = I915_BIT_6_SWIZZLE_9_11; } else { /* Bit 17 swizzling by the CPU in addition. */ - swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; - swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_x = I915_BIT_6_SWIZZLE_9_10_17; + swizzle_y = I915_BIT_6_SWIZZLE_9_17; } break; } @@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; else args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + + /* Hide bit 17 swizzling from the user. This prevents old Mesa + * from aborting the application on sw fallbacks to bit 17, + * and we use the pread/pwrite bit17 paths to swizzle for it. + * If there was a user that was relying on the swizzle + * information for drm_intel_bo_map()ed reads/writes this would + * break it, but we don't have any of those. + */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + /* If we can't handle the swizzling, make it untiled. */ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { args->tiling_mode = I915_TILING_NONE; @@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, DRM_ERROR("unknown tiling mode\n"); } + /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9; + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) + args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; + drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return 0; } + +/** + * Swap every 64 bytes of this page around, to account for it having a new + * bit 17 of its physical address and therefore being interpreted differently + * by the GPU. + */ +static int +i915_gem_swizzle_page(struct page *page) +{ + char *vaddr; + int i; + char temp[64]; + + vaddr = kmap(page); + if (vaddr == NULL) + return -ENOMEM; + + for (i = 0; i < PAGE_SIZE; i += 128) { + memcpy(temp, &vaddr[i], 64); + memcpy(&vaddr[i], &vaddr[i + 64], 64); + memcpy(&vaddr[i + 64], temp, 64); + } + + kunmap(page); + + return 0; +} + +void +i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page_count = obj->size >> PAGE_SHIFT; + int i; + + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) + return; + + if (obj_priv->bit_17 == NULL) + return; + + for (i = 0; i < page_count; i++) { + char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; + if ((new_bit_17 & 0x1) != + (test_bit(i, obj_priv->bit_17) != 0)) { + int ret = i915_gem_swizzle_page(obj_priv->pages[i]); + if (ret != 0) { + DRM_ERROR("Failed to swizzle page\n"); + return; + } + set_page_dirty(obj_priv->pages[i]); + } + } +} + +void +i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page_count = obj->size >> PAGE_SHIFT; + int i; + + if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) + return; + + if (obj_priv->bit_17 == NULL) { + obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * + sizeof(long), GFP_KERNEL); + if (obj_priv->bit_17 == NULL) { + DRM_ERROR("Failed to allocate memory for bit 17 " + "record\n"); + return; + } + } + + for (i = 0; i < page_count; i++) { + if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) + __set_bit(i, obj_priv->bit_17); + else + __clear_bit(i, obj_priv->bit_17); + } +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64773ce5296..c2c8e95ff14 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = { .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, .p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST }, + .find_pll = intel_find_best_PLL, }, { /* INTEL_LIMIT_IGD_LVDS */ .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, @@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = { /* IGD only supports single-channel mode. */ .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, + .find_pll = intel_find_best_PLL, }, }; diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index b7f0ebe9f81..3e094beecb9 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) static struct sysrq_key_op sysrq_intelfb_restore_op = { .handler = intelfb_sysrq, - .help_msg = "force fb", - .action_msg = "force restore of fb console", + .help_msg = "force-fb(G)", + .action_msg = "Restore framebuffer console", }; int intelfb_probe(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index b06a4a3ff08..55037422538 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -38,7 +38,7 @@ struct intel_hdmi_priv { u32 sdvox_reg; u32 save_SDVOX; - int has_hdmi_sink; + bool has_hdmi_sink; }; static void intel_hdmi_mode_set(struct drm_encoder *encoder, @@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder, return true; } +static void +intel_hdmi_sink_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; + struct edid *edid = NULL; + + edid = drm_get_edid(&intel_output->base, + &intel_output->ddc_bus->adapter); + if (edid != NULL) { + hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid); + kfree(edid); + intel_output->base.display_info.raw_edid = NULL; + } +} + static enum drm_connector_status intel_hdmi_detect(struct drm_connector *connector) { @@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector) return connector_status_unknown; } - if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) + if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) { + intel_hdmi_sink_detect(connector); return connector_status_connected; - else + } else return connector_status_disconnected; } diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 7b31f55f55c..9913651c1e1 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) intel_sdvo_read_response(intel_output, &response, 2); } +static void +intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) +{ + struct intel_output *intel_output = to_intel_output(connector); + struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; + struct edid *edid = NULL; + + intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus); + edid = drm_get_edid(&intel_output->base, + &intel_output->ddc_bus->adapter); + if (edid != NULL) { + sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid); + kfree(edid); + intel_output->base.display_info.raw_edid = NULL; + } +} + static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector) { u8 response[2]; @@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect if (status != SDVO_CMD_STATUS_SUCCESS) return connector_status_unknown; - if ((response[0] != 0) || (response[1] != 0)) + if ((response[0] != 0) || (response[1] != 0)) { + intel_sdvo_hdmi_sink_detect(connector); return connector_status_connected; - else + } else return connector_status_disconnected; } diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 0e8a9185f67..d73f5f473e3 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -692,6 +692,16 @@ config SENSORS_PCF8591 These devices are hard to detect and rarely found on mainstream hardware. If unsure, say N. +config SENSORS_SHT15 + tristate "Sensiron humidity and temperature sensors. SHT15 and compat." + depends on GENERIC_GPIO + help + If you say yes here you get support for the Sensiron SHT10, SHT11, + SHT15, SHT71, SHT75 humidity and temperature sensors. + + This driver can also be built as a module. If so, the module + will be called sht15. + config SENSORS_SIS5595 tristate "Silicon Integrated Systems Corp. SiS5595" depends on PCI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 1d3757837b4..0ae26984ba4 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o obj-$(CONFIG_SENSORS_PC87360) += pc87360.o obj-$(CONFIG_SENSORS_PC87427) += pc87427.o obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o +obj-$(CONFIG_SENSORS_SHT15) += sht15.o obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index 55d3dc565be..abca7e9f953 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c @@ -34,7 +34,6 @@ #include <linux/wait.h> #include <linux/poll.h> #include <linux/freezer.h> -#include <linux/version.h> #include <linux/uaccess.h> #include <linux/leds.h> #include <acpi/acpi_drivers.h> diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c new file mode 100644 index 00000000000..6cbdc2fea73 --- /dev/null +++ b/drivers/hwmon/sht15.c @@ -0,0 +1,692 @@ +/* + * sht15.c - support for the SHT15 Temperature and Humidity Sensor + * + * Copyright (c) 2009 Jonathan Cameron + * + * Copyright (c) 2007 Wouter Horre + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Currently ignoring checksum on readings. + * Default resolution only (14bit temp, 12bit humidity) + * Ignoring battery status. + * Heater not enabled. + * Timings are all conservative. + * + * Data sheet available (1/2009) at + * http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf + * + * Regulator supply name = vcc + */ + +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/err.h> +#include <linux/sht15.h> +#include <linux/regulator/consumer.h> +#include <asm/atomic.h> + +#define SHT15_MEASURE_TEMP 3 +#define SHT15_MEASURE_RH 5 + +#define SHT15_READING_NOTHING 0 +#define SHT15_READING_TEMP 1 +#define SHT15_READING_HUMID 2 + +/* Min timings in nsecs */ +#define SHT15_TSCKL 100 /* clock low */ +#define SHT15_TSCKH 100 /* clock high */ +#define SHT15_TSU 150 /* data setup time */ + +/** + * struct sht15_temppair - elements of voltage dependant temp calc + * @vdd: supply voltage in microvolts + * @d1: see data sheet + */ +struct sht15_temppair { + int vdd; /* microvolts */ + int d1; +}; + +/* Table 9 from data sheet - relates temperature calculation + * to supply voltage. + */ +static const struct sht15_temppair temppoints[] = { + { 2500000, -39400 }, + { 3000000, -39600 }, + { 3500000, -39700 }, + { 4000000, -39800 }, + { 5000000, -40100 }, +}; + +/** + * struct sht15_data - device instance specific data + * @pdata: platform data (gpio's etc) + * @read_work: bh of interrupt handler + * @wait_queue: wait queue for getting values from device + * @val_temp: last temperature value read from device + * @val_humid: last humidity value read from device + * @flag: status flag used to identify what the last request was + * @valid: are the current stored values valid (start condition) + * @last_updat: time of last update + * @read_lock: mutex to ensure only one read in progress + * at a time. + * @dev: associate device structure + * @hwmon_dev: device associated with hwmon subsystem + * @reg: associated regulator (if specified) + * @nb: notifier block to handle notifications of voltage changes + * @supply_uV: local copy of supply voltage used to allow + * use of regulator consumer if available + * @supply_uV_valid: indicates that an updated value has not yet + * been obtained from the regulator and so any calculations + * based upon it will be invalid. + * @update_supply_work: work struct that is used to update the supply_uV + * @interrupt_handled: flag used to indicate a hander has been scheduled + */ +struct sht15_data { + struct sht15_platform_data *pdata; + struct work_struct read_work; + wait_queue_head_t wait_queue; + uint16_t val_temp; + uint16_t val_humid; + u8 flag; + u8 valid; + unsigned long last_updat; + struct mutex read_lock; + struct device *dev; + struct device *hwmon_dev; + struct regulator *reg; + struct notifier_block nb; + int supply_uV; + int supply_uV_valid; + struct work_struct update_supply_work; + atomic_t interrupt_handled; +}; + +/** + * sht15_connection_reset() - reset the comms interface + * @data: sht15 specific data + * + * This implements section 3.4 of the data sheet + */ +static void sht15_connection_reset(struct sht15_data *data) +{ + int i; + gpio_direction_output(data->pdata->gpio_data, 1); + ndelay(SHT15_TSCKL); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); + for (i = 0; i < 9; ++i) { + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSCKH); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); + } +} +/** + * sht15_send_bit() - send an individual bit to the device + * @data: device state data + * @val: value of bit to be sent + **/ +static inline void sht15_send_bit(struct sht15_data *data, int val) +{ + + gpio_set_value(data->pdata->gpio_data, val); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSCKH); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); /* clock low time */ +} + +/** + * sht15_transmission_start() - specific sequence for new transmission + * + * @data: device state data + * Timings for this are not documented on the data sheet, so very + * conservative ones used in implementation. This implements + * figure 12 on the data sheet. + **/ +static void sht15_transmission_start(struct sht15_data *data) +{ + /* ensure data is high and output */ + gpio_direction_output(data->pdata->gpio_data, 1); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSCKH); + gpio_set_value(data->pdata->gpio_data, 0); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSCKH); + gpio_set_value(data->pdata->gpio_data, 1); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); +} +/** + * sht15_send_byte() - send a single byte to the device + * @data: device state + * @byte: value to be sent + **/ +static void sht15_send_byte(struct sht15_data *data, u8 byte) +{ + int i; + for (i = 0; i < 8; i++) { + sht15_send_bit(data, !!(byte & 0x80)); + byte <<= 1; + } +} +/** + * sht15_wait_for_response() - checks for ack from device + * @data: device state + **/ +static int sht15_wait_for_response(struct sht15_data *data) +{ + gpio_direction_input(data->pdata->gpio_data); + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSCKH); + if (gpio_get_value(data->pdata->gpio_data)) { + gpio_set_value(data->pdata->gpio_sck, 0); + dev_err(data->dev, "Command not acknowledged\n"); + sht15_connection_reset(data); + return -EIO; + } + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); + return 0; +} + +/** + * sht15_send_cmd() - Sends a command to the device. + * @data: device state + * @cmd: command byte to be sent + * + * On entry, sck is output low, data is output pull high + * and the interrupt disabled. + **/ +static int sht15_send_cmd(struct sht15_data *data, u8 cmd) +{ + int ret = 0; + sht15_transmission_start(data); + sht15_send_byte(data, cmd); + ret = sht15_wait_for_response(data); + return ret; +} +/** + * sht15_update_single_val() - get a new value from device + * @data: device instance specific data + * @command: command sent to request value + * @timeout_msecs: timeout after which comms are assumed + * to have failed are reset. + **/ +static inline int sht15_update_single_val(struct sht15_data *data, + int command, + int timeout_msecs) +{ + int ret; + ret = sht15_send_cmd(data, command); + if (ret) + return ret; + + gpio_direction_input(data->pdata->gpio_data); + atomic_set(&data->interrupt_handled, 0); + + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + if (gpio_get_value(data->pdata->gpio_data) == 0) { + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + /* Only relevant if the interrupt hasn't occured. */ + if (!atomic_read(&data->interrupt_handled)) + schedule_work(&data->read_work); + } + ret = wait_event_timeout(data->wait_queue, + (data->flag == SHT15_READING_NOTHING), + msecs_to_jiffies(timeout_msecs)); + if (ret == 0) {/* timeout occurred */ + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));; + sht15_connection_reset(data); + return -ETIME; + } + return 0; +} + +/** + * sht15_update_vals() - get updated readings from device if too old + * @data: device state + **/ +static int sht15_update_vals(struct sht15_data *data) +{ + int ret = 0; + int timeout = HZ; + + mutex_lock(&data->read_lock); + if (time_after(jiffies, data->last_updat + timeout) + || !data->valid) { + data->flag = SHT15_READING_HUMID; + ret = sht15_update_single_val(data, SHT15_MEASURE_RH, 160); + if (ret) + goto error_ret; + data->flag = SHT15_READING_TEMP; + ret = sht15_update_single_val(data, SHT15_MEASURE_TEMP, 400); + if (ret) + goto error_ret; + data->valid = 1; + data->last_updat = jiffies; + } +error_ret: + mutex_unlock(&data->read_lock); + + return ret; +} + +/** + * sht15_calc_temp() - convert the raw reading to a temperature + * @data: device state + * + * As per section 4.3 of the data sheet. + **/ +static inline int sht15_calc_temp(struct sht15_data *data) +{ + int d1 = 0; + int i; + + for (i = 1; i < ARRAY_SIZE(temppoints) - 1; i++) + /* Find pointer to interpolate */ + if (data->supply_uV > temppoints[i - 1].vdd) { + d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd) + * (temppoints[i].d1 - temppoints[i - 1].d1) + / (temppoints[i].vdd - temppoints[i - 1].vdd) + + temppoints[i - 1].d1; + break; + } + + return data->val_temp*10 + d1; +} + +/** + * sht15_calc_humid() - using last temperature convert raw to humid + * @data: device state + * + * This is the temperature compensated version as per section 4.2 of + * the data sheet. + **/ +static inline int sht15_calc_humid(struct sht15_data *data) +{ + int RHlinear; /* milli percent */ + int temp = sht15_calc_temp(data); + + const int c1 = -4; + const int c2 = 40500; /* x 10 ^ -6 */ + const int c3 = 2800; /* x10 ^ -9 */ + + RHlinear = c1*1000 + + c2 * data->val_humid/1000 + + (data->val_humid * data->val_humid * c3)/1000000; + return (temp - 25000) * (10000 + 800 * data->val_humid) + / 1000000 + RHlinear; +} + +static ssize_t sht15_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret; + struct sht15_data *data = dev_get_drvdata(dev); + + /* Technically no need to read humidity as well */ + ret = sht15_update_vals(data); + + return ret ? ret : sprintf(buf, "%d\n", + sht15_calc_temp(data)); +} + +static ssize_t sht15_show_humidity(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret; + struct sht15_data *data = dev_get_drvdata(dev); + + ret = sht15_update_vals(data); + + return ret ? ret : sprintf(buf, "%d\n", sht15_calc_humid(data)); + +}; +static ssize_t show_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct platform_device *pdev = to_platform_device(dev); + return sprintf(buf, "%s\n", pdev->name); +} + +static SENSOR_DEVICE_ATTR(temp1_input, + S_IRUGO, sht15_show_temp, + NULL, 0); +static SENSOR_DEVICE_ATTR(humidity1_input, + S_IRUGO, sht15_show_humidity, + NULL, 0); +static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); +static struct attribute *sht15_attrs[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_humidity1_input.dev_attr.attr, + &dev_attr_name.attr, + NULL, +}; + +static const struct attribute_group sht15_attr_group = { + .attrs = sht15_attrs, +}; + +static irqreturn_t sht15_interrupt_fired(int irq, void *d) +{ + struct sht15_data *data = d; + /* First disable the interrupt */ + disable_irq_nosync(irq); + atomic_inc(&data->interrupt_handled); + /* Then schedule a reading work struct */ + if (data->flag != SHT15_READING_NOTHING) + schedule_work(&data->read_work); + return IRQ_HANDLED; +} + +/* Each byte of data is acknowledged by pulling the data line + * low for one clock pulse. + */ +static void sht15_ack(struct sht15_data *data) +{ + gpio_direction_output(data->pdata->gpio_data, 0); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_data, 1); + + gpio_direction_input(data->pdata->gpio_data); +} +/** + * sht15_end_transmission() - notify device of end of transmission + * @data: device state + * + * This is basically a NAK. (single clock pulse, data high) + **/ +static void sht15_end_transmission(struct sht15_data *data) +{ + gpio_direction_output(data->pdata->gpio_data, 1); + ndelay(SHT15_TSU); + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSCKH); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); +} + +static void sht15_bh_read_data(struct work_struct *work_s) +{ + int i; + uint16_t val = 0; + struct sht15_data *data + = container_of(work_s, struct sht15_data, + read_work); + /* Firstly, verify the line is low */ + if (gpio_get_value(data->pdata->gpio_data)) { + /* If not, then start the interrupt again - care + here as could have gone low in meantime so verify + it hasn't! + */ + atomic_set(&data->interrupt_handled, 0); + enable_irq(gpio_to_irq(data->pdata->gpio_data)); + /* If still not occured or another handler has been scheduled */ + if (gpio_get_value(data->pdata->gpio_data) + || atomic_read(&data->interrupt_handled)) + return; + } + /* Read the data back from the device */ + for (i = 0; i < 16; ++i) { + val <<= 1; + gpio_set_value(data->pdata->gpio_sck, 1); + ndelay(SHT15_TSCKH); + val |= !!gpio_get_value(data->pdata->gpio_data); + gpio_set_value(data->pdata->gpio_sck, 0); + ndelay(SHT15_TSCKL); + if (i == 7) + sht15_ack(data); + } + /* Tell the device we are done */ + sht15_end_transmission(data); + + switch (data->flag) { + case SHT15_READING_TEMP: + data->val_temp = val; + break; + case SHT15_READING_HUMID: + data->val_humid = val; + break; + } + + data->flag = SHT15_READING_NOTHING; + wake_up(&data->wait_queue); +} + +static void sht15_update_voltage(struct work_struct *work_s) +{ + struct sht15_data *data + = container_of(work_s, struct sht15_data, + update_supply_work); + data->supply_uV = regulator_get_voltage(data->reg); +} + +/** + * sht15_invalidate_voltage() - mark supply voltage invalid when notified by reg + * @nb: associated notification structure + * @event: voltage regulator state change event code + * @ignored: function parameter - ignored here + * + * Note that as the notification code holds the regulator lock, we have + * to schedule an update of the supply voltage rather than getting it directly. + **/ +static int sht15_invalidate_voltage(struct notifier_block *nb, + unsigned long event, + void *ignored) +{ + struct sht15_data *data = container_of(nb, struct sht15_data, nb); + + if (event == REGULATOR_EVENT_VOLTAGE_CHANGE) + data->supply_uV_valid = false; + schedule_work(&data->update_supply_work); + + return NOTIFY_OK; +} + +static int __devinit sht15_probe(struct platform_device *pdev) +{ + int ret = 0; + struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL); + + if (!data) { + ret = -ENOMEM; + dev_err(&pdev->dev, "kzalloc failed"); + goto error_ret; + } + + INIT_WORK(&data->read_work, sht15_bh_read_data); + INIT_WORK(&data->update_supply_work, sht15_update_voltage); + platform_set_drvdata(pdev, data); + mutex_init(&data->read_lock); + data->dev = &pdev->dev; + init_waitqueue_head(&data->wait_queue); + + if (pdev->dev.platform_data == NULL) { + dev_err(&pdev->dev, "no platform data supplied"); + goto err_free_data; + } + data->pdata = pdev->dev.platform_data; + data->supply_uV = data->pdata->supply_mv*1000; + +/* If a regulator is available, query what the supply voltage actually is!*/ + data->reg = regulator_get(data->dev, "vcc"); + if (!IS_ERR(data->reg)) { + data->supply_uV = regulator_get_voltage(data->reg); + regulator_enable(data->reg); + /* setup a notifier block to update this if another device + * causes the voltage to change */ + data->nb.notifier_call = &sht15_invalidate_voltage; + ret = regulator_register_notifier(data->reg, &data->nb); + } +/* Try requesting the GPIOs */ + ret = gpio_request(data->pdata->gpio_sck, "SHT15 sck"); + if (ret) { + dev_err(&pdev->dev, "gpio request failed"); + goto err_free_data; + } + gpio_direction_output(data->pdata->gpio_sck, 0); + ret = gpio_request(data->pdata->gpio_data, "SHT15 data"); + if (ret) { + dev_err(&pdev->dev, "gpio request failed"); + goto err_release_gpio_sck; + } + ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group); + if (ret) { + dev_err(&pdev->dev, "sysfs create failed"); + goto err_free_data; + } + + ret = request_irq(gpio_to_irq(data->pdata->gpio_data), + sht15_interrupt_fired, + IRQF_TRIGGER_FALLING, + "sht15 data", + data); + if (ret) { + dev_err(&pdev->dev, "failed to get irq for data line"); + goto err_release_gpio_data; + } + disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + sht15_connection_reset(data); + sht15_send_cmd(data, 0x1E); + + data->hwmon_dev = hwmon_device_register(data->dev); + if (IS_ERR(data->hwmon_dev)) { + ret = PTR_ERR(data->hwmon_dev); + goto err_release_gpio_data; + } + return 0; + +err_release_gpio_data: + gpio_free(data->pdata->gpio_data); +err_release_gpio_sck: + gpio_free(data->pdata->gpio_sck); +err_free_data: + kfree(data); +error_ret: + + return ret; +} + +static int __devexit sht15_remove(struct platform_device *pdev) +{ + struct sht15_data *data = platform_get_drvdata(pdev); + + /* Make sure any reads from the device are done and + * prevent new ones beginnning */ + mutex_lock(&data->read_lock); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group); + if (!IS_ERR(data->reg)) { + regulator_unregister_notifier(data->reg, &data->nb); + regulator_disable(data->reg); + regulator_put(data->reg); + } + + free_irq(gpio_to_irq(data->pdata->gpio_data), data); + gpio_free(data->pdata->gpio_data); + gpio_free(data->pdata->gpio_sck); + mutex_unlock(&data->read_lock); + kfree(data); + return 0; +} + + +static struct platform_driver sht_drivers[] = { + { + .driver = { + .name = "sht10", + .owner = THIS_MODULE, + }, + .probe = sht15_probe, + .remove = sht15_remove, + }, { + .driver = { + .name = "sht11", + .owner = THIS_MODULE, + }, + .probe = sht15_probe, + .remove = sht15_remove, + }, { + .driver = { + .name = "sht15", + .owner = THIS_MODULE, + }, + .probe = sht15_probe, + .remove = sht15_remove, + }, { + .driver = { + .name = "sht71", + .owner = THIS_MODULE, + }, + .probe = sht15_probe, + .remove = sht15_remove, + }, { + .driver = { + .name = "sht75", + .owner = THIS_MODULE, + }, + .probe = sht15_probe, + .remove = sht15_remove, + }, +}; + + +static int __init sht15_init(void) +{ + int ret; + int i; + + for (i = 0; i < ARRAY_SIZE(sht_drivers); i++) { + ret = platform_driver_register(&sht_drivers[i]); + if (ret) + goto error_unreg; + } + + return 0; + +error_unreg: + while (--i >= 0) + platform_driver_unregister(&sht_drivers[i]); + + return ret; +} +module_init(sht15_init); + +static void __exit sht15_exit(void) +{ + int i; + for (i = ARRAY_SIZE(sht_drivers) - 1; i >= 0; i--) + platform_driver_unregister(&sht_drivers[i]); +} +module_exit(sht15_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index f68e5f8e23e..6318f7ddc1d 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -190,7 +190,7 @@ static int pca_xfer(struct i2c_adapter *i2c_adap, int completed = 1; unsigned long timeout = jiffies + i2c_adap->timeout; - while (pca_status(adap) != 0xf8) { + while ((state = pca_status(adap)) != 0xf8) { if (time_before(jiffies, timeout)) { msleep(10); } else { diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 94eae5c3cbc..a48c8aee021 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -604,12 +604,14 @@ comment "Graphics adapter I2C/DDC channel drivers" depends on PCI config I2C_VOODOO3 - tristate "Voodoo 3" + tristate "Voodoo 3 (DEPRECATED)" depends on PCI select I2C_ALGOBIT help If you say yes to this option, support will be included for the - Voodoo 3 I2C interface. + Voodoo 3 I2C interface. This driver is deprecated and you should + use the tdfxfb driver instead, which additionally provides + framebuffer support. This driver can also be built as a module. If so, the module will be called i2c-voodoo3. diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index b6f3a0de6ca..85e2e919d1c 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -716,8 +716,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) /* new style driver methods can't mix with legacy ones */ if (is_newstyle_driver(driver)) { - if (driver->attach_adapter || driver->detach_adapter - || driver->detach_client) { + if (driver->detach_adapter || driver->detach_client) { printk(KERN_WARNING "i2c-core: driver [%s] is confused\n", driver->driver.name); diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c index 8eda552326e..403d0e4265d 100644 --- a/drivers/ide/at91_ide.c +++ b/drivers/ide/at91_ide.c @@ -20,7 +20,6 @@ * */ -#include <linux/version.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/clk.h> @@ -175,90 +174,6 @@ static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, leave_16bit(chipselect, mode); } -static u8 ide_mm_inb(unsigned long port) -{ - return readb((void __iomem *) port); -} - -static void ide_mm_outb(u8 value, unsigned long port) -{ - writeb(value, (void __iomem *) port); -} - -static void at91_ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; - - if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) - HIHI = 0xFF; - - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) - ide_mm_outb(tf->hob_feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) - ide_mm_outb(tf->hob_nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) - ide_mm_outb(tf->hob_lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) - ide_mm_outb(tf->hob_lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) - ide_mm_outb(tf->hob_lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) - ide_mm_outb(tf->feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) - ide_mm_outb(tf->nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) - ide_mm_outb(tf->lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) - ide_mm_outb(tf->lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) - ide_mm_outb(tf->lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) - ide_mm_outb((tf->device & HIHI) | drive->select, io_ports->device_addr); -} - -static void at91_ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - - /* be sure we're looking at the low order bits */ - ide_mm_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) - tf->error = ide_mm_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) - tf->nsect = ide_mm_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) - tf->lbal = ide_mm_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) - tf->lbam = ide_mm_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) - tf->lbah = ide_mm_inb(io_ports->lbah_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) - tf->device = ide_mm_inb(io_ports->device_addr); - - if (cmd->tf_flags & IDE_TFLAG_LBA48) { - ide_mm_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) - tf->hob_error = ide_mm_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = ide_mm_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = ide_mm_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = ide_mm_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = ide_mm_inb(io_ports->lbah_addr); - } -} - static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) { struct ide_timing *timing; @@ -284,8 +199,8 @@ static const struct ide_tp_ops at91_ide_tp_ops = { .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, - .tf_load = at91_ide_tf_load, - .tf_read = at91_ide_tf_read, + .tf_load = ide_tf_load, + .tf_read = ide_tf_read, .input_data = at91_ide_input_data, .output_data = at91_ide_output_data, @@ -300,7 +215,7 @@ static const struct ide_port_info at91_ide_port_info __initdata = { .tp_ops = &at91_ide_tp_ops, .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE | IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS, - .pio_mask = ATA_PIO5, + .pio_mask = ATA_PIO6, }; /* diff --git a/drivers/ide/falconide.c b/drivers/ide/falconide.c index afa2af9a362..0e2df6755ec 100644 --- a/drivers/ide/falconide.c +++ b/drivers/ide/falconide.c @@ -20,6 +20,7 @@ #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> +#include <asm/ide.h> #define DRV_NAME "falconide" @@ -67,8 +68,10 @@ static void falconide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, { unsigned long data_addr = drive->hwif->io_ports.data_addr; - if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) - return insw(data_addr, buf, (len + 1) / 2); + if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { + __ide_mm_insw(data_addr, buf, (len + 1) / 2); + return; + } raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } @@ -78,8 +81,10 @@ static void falconide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, { unsigned long data_addr = drive->hwif->io_ports.data_addr; - if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) - return outsw(data_addr, buf, (len + 1) / 2); + if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { + __ide_mm_outsw(data_addr, buf, (len + 1) / 2); + return; + } raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c index 12f436951bf..77f79d26b26 100644 --- a/drivers/ide/ide-acpi.c +++ b/drivers/ide/ide-acpi.c @@ -318,8 +318,9 @@ static int do_drive_set_taskfiles(ide_drive_t *drive, /* convert GTF to taskfile */ memset(&cmd, 0, sizeof(cmd)); - memcpy(&cmd.tf_array[7], gtf, REGS_PER_GTF); - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + memcpy(&cmd.tf.feature, gtf, REGS_PER_GTF); + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; err = ide_no_data_taskfile(drive, &cmd); if (err) { diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index 3e43b889dd6..7201b176d75 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c @@ -254,16 +254,13 @@ EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) { - struct ide_cmd cmd; + struct ide_taskfile tf; - memset(&cmd, 0, sizeof(cmd)); - cmd.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM | - IDE_TFLAG_IN_NSECT; + drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT | + IDE_VALID_LBAM | IDE_VALID_LBAH); - drive->hwif->tp_ops->tf_read(drive, &cmd); - - *bcount = (cmd.tf.lbah << 8) | cmd.tf.lbam; - *ireason = cmd.tf.nsect & 3; + *bcount = (tf.lbah << 8) | tf.lbam; + *ireason = tf.nsect & 3; } EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); @@ -439,12 +436,12 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) return ide_started; } -static void ide_init_packet_cmd(struct ide_cmd *cmd, u32 tf_flags, +static void ide_init_packet_cmd(struct ide_cmd *cmd, u8 valid_tf, u16 bcount, u8 dma) { - cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO; - cmd->tf_flags |= IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | - IDE_TFLAG_OUT_FEATURE | tf_flags; + cmd->protocol = dma ? ATAPI_PROT_DMA : ATAPI_PROT_PIO; + cmd->valid.out.tf = IDE_VALID_LBAH | IDE_VALID_LBAM | + IDE_VALID_FEATURE | valid_tf; cmd->tf.command = ATA_CMD_PACKET; cmd->tf.feature = dma; /* Use PIO/DMA */ cmd->tf.lbam = bcount & 0xff; @@ -453,14 +450,11 @@ static void ide_init_packet_cmd(struct ide_cmd *cmd, u32 tf_flags, static u8 ide_read_ireason(ide_drive_t *drive) { - struct ide_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.tf_flags = IDE_TFLAG_IN_NSECT; + struct ide_taskfile tf; - drive->hwif->tp_ops->tf_read(drive, &cmd); + drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_NSECT); - return cmd.tf.nsect & 3; + return tf.nsect & 3; } static u8 ide_wait_ireason(ide_drive_t *drive, u8 ireason) @@ -588,12 +582,12 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) ide_expiry_t *expiry = NULL; struct request *rq = hwif->rq; unsigned int timeout; - u32 tf_flags; u16 bcount; + u8 valid_tf; u8 drq_int = !!(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT); if (dev_is_idecd(drive)) { - tf_flags = IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL; + valid_tf = IDE_VALID_NSECT | IDE_VALID_LBAL; bcount = ide_cd_get_xferlen(rq); expiry = ide_cd_expiry; timeout = ATAPI_WAIT_PC; @@ -607,7 +601,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) pc->xferred = 0; pc->cur_pos = pc->buf; - tf_flags = IDE_TFLAG_OUT_DEVICE; + valid_tf = IDE_VALID_DEVICE; bcount = ((drive->media == ide_tape) ? pc->req_xfer : min(pc->req_xfer, 63 * 1024)); @@ -627,7 +621,7 @@ ide_startstop_t ide_issue_pc(ide_drive_t *drive, struct ide_cmd *cmd) : WAIT_TAPE_CMD; } - ide_init_packet_cmd(cmd, tf_flags, bcount, drive->dma); + ide_init_packet_cmd(cmd, valid_tf, bcount, drive->dma); (void)do_rw_taskfile(drive, cmd); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 35729a47f79..3aec19d1fdf 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -265,35 +265,62 @@ static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) cdrom_analyze_sense_data(drive, NULL, sense); } + /* + * Allow the drive 5 seconds to recover; some devices will return NOT_READY + * while flushing data from cache. + * + * returns: 0 failed (write timeout expired) + * 1 success + */ +static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) +{ + + struct cdrom_info *info = drive->driver_data; + + if (!rq->errors) + info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY; + + rq->errors = 1; + + if (time_after(jiffies, info->write_timeout)) + return 0; + else { + struct request_queue *q = drive->queue; + unsigned long flags; + + /* + * take a breather relying on the unplug timer to kick us again + */ + + spin_lock_irqsave(q->queue_lock, flags); + blk_plug_device(q); + spin_unlock_irqrestore(q->queue_lock, flags); + + return 1; + } +} + +/** * Returns: * 0: if the request should be continued. * 1: if the request will be going through error recovery. * 2: if the request should be ended. */ -static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) +static int cdrom_decode_status(ide_drive_t *drive, u8 stat) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; - int stat, err, sense_key; - - /* check for errors */ - stat = hwif->tp_ops->read_status(hwif); - - if (stat_ret) - *stat_ret = stat; - - if (OK_STAT(stat, good_stat, BAD_R_STAT)) - return 0; + int err, sense_key, do_end_request = 0; + u8 quiet = rq->cmd_flags & REQ_QUIET; /* get the IDE error register */ err = ide_read_error(drive); sense_key = err >> 4; - ide_debug_log(IDE_DBG_RQ, "stat: 0x%x, good_stat: 0x%x, cmd[0]: 0x%x, " - "rq->cmd_type: 0x%x, err: 0x%x", - stat, good_stat, rq->cmd[0], rq->cmd_type, - err); + ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, rq->cmd_type: 0x%x, err: 0x%x, " + "stat 0x%x", + rq->cmd[0], rq->cmd_type, err, stat); if (blk_sense_request(rq)) { /* @@ -303,151 +330,108 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) */ rq->cmd_flags |= REQ_FAILED; return 2; - } else if (blk_pc_request(rq) || rq->cmd_type == REQ_TYPE_ATA_PC) { - /* All other functions, except for READ. */ + } - /* - * if we have an error, pass back CHECK_CONDITION as the - * scsi status byte - */ - if (blk_pc_request(rq) && !rq->errors) - rq->errors = SAM_STAT_CHECK_CONDITION; + /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ + if (blk_pc_request(rq) && !rq->errors) + rq->errors = SAM_STAT_CHECK_CONDITION; - /* check for tray open */ - if (sense_key == NOT_READY) { - cdrom_saw_media_change(drive); - } else if (sense_key == UNIT_ATTENTION) { - /* check for media change */ + if (blk_noretry_request(rq)) + do_end_request = 1; + + switch (sense_key) { + case NOT_READY: + if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) { + if (ide_cd_breathe(drive, rq)) + return 1; + } else { cdrom_saw_media_change(drive); - return 0; - } else if (sense_key == ILLEGAL_REQUEST && - rq->cmd[0] == GPCMD_START_STOP_UNIT) { - /* - * Don't print error message for this condition-- - * SFF8090i indicates that 5/24/00 is the correct - * response to a request to close the tray if the - * drive doesn't have that capability. - * cdrom_log_sense() knows this! - */ - } else if (!(rq->cmd_flags & REQ_QUIET)) { - /* otherwise, print an error */ - ide_dump_status(drive, "packet command error", stat); + + if (blk_fs_request(rq) && !quiet) + printk(KERN_ERR PFX "%s: tray open\n", + drive->name); } + do_end_request = 1; + break; + case UNIT_ATTENTION: + cdrom_saw_media_change(drive); - rq->cmd_flags |= REQ_FAILED; + if (blk_fs_request(rq) == 0) + return 0; /* - * instead of playing games with moving completions around, - * remove failed request completely and end it when the - * request sense has completed + * Arrange to retry the request but be sure to give up if we've + * retried too many times. */ - goto end_request; - - } else if (blk_fs_request(rq)) { - int do_end_request = 0; - - /* handle errors from READ and WRITE requests */ - - if (blk_noretry_request(rq)) + if (++rq->errors > ERROR_MAX) do_end_request = 1; - - if (sense_key == NOT_READY) { - /* tray open */ - if (rq_data_dir(rq) == READ) { - cdrom_saw_media_change(drive); - - /* fail the request */ - printk(KERN_ERR PFX "%s: tray open\n", - drive->name); - do_end_request = 1; - } else { - struct cdrom_info *info = drive->driver_data; - - /* - * Allow the drive 5 seconds to recover, some - * devices will return this error while flushing - * data from cache. - */ - if (!rq->errors) - info->write_timeout = jiffies + - ATAPI_WAIT_WRITE_BUSY; - rq->errors = 1; - if (time_after(jiffies, info->write_timeout)) - do_end_request = 1; - else { - struct request_queue *q = drive->queue; - unsigned long flags; - - /* - * take a breather relying on the unplug - * timer to kick us again - */ - spin_lock_irqsave(q->queue_lock, flags); - blk_plug_device(q); - spin_unlock_irqrestore(q->queue_lock, flags); - - return 1; - } - } - } else if (sense_key == UNIT_ATTENTION) { - /* media change */ - cdrom_saw_media_change(drive); - - /* - * Arrange to retry the request but be sure to give up - * if we've retried too many times. - */ - if (++rq->errors > ERROR_MAX) - do_end_request = 1; - } else if (sense_key == ILLEGAL_REQUEST || - sense_key == DATA_PROTECT) { - /* - * No point in retrying after an illegal request or data - * protect error. - */ + break; + case ILLEGAL_REQUEST: + /* + * Don't print error message for this condition -- SFF8090i + * indicates that 5/24/00 is the correct response to a request + * to close the tray if the drive doesn't have that capability. + * + * cdrom_log_sense() knows this! + */ + if (rq->cmd[0] == GPCMD_START_STOP_UNIT) + break; + /* fall-through */ + case DATA_PROTECT: + /* + * No point in retrying after an illegal request or data + * protect error. + */ + if (!quiet) ide_dump_status(drive, "command error", stat); - do_end_request = 1; - } else if (sense_key == MEDIUM_ERROR) { - /* - * No point in re-trying a zillion times on a bad - * sector. If we got here the error is not correctable. - */ - ide_dump_status(drive, "media error (bad sector)", + do_end_request = 1; + break; + case MEDIUM_ERROR: + /* + * No point in re-trying a zillion times on a bad sector. + * If we got here the error is not correctable. + */ + if (!quiet) + ide_dump_status(drive, "media error " + "(bad sector)", stat); + do_end_request = 1; + break; + case BLANK_CHECK: + /* disk appears blank? */ + if (!quiet) + ide_dump_status(drive, "media error (blank)", stat); - do_end_request = 1; - } else if (sense_key == BLANK_CHECK) { - /* disk appears blank ?? */ - ide_dump_status(drive, "media error (blank)", stat); - do_end_request = 1; - } else if ((err & ~ATA_ABORTED) != 0) { + do_end_request = 1; + break; + default: + if (blk_fs_request(rq) == 0) + break; + if (err & ~ATA_ABORTED) { /* go to the default handler for other errors */ ide_error(drive, "cdrom_decode_status", stat); return 1; - } else if ((++rq->errors > ERROR_MAX)) { + } else if (++rq->errors > ERROR_MAX) /* we've racked up too many retries, abort */ do_end_request = 1; - } - - /* - * End a request through request sense analysis when we have - * sense data. We need this in order to perform end of media - * processing. - */ - if (do_end_request) - goto end_request; + } - /* - * If we got a CHECK_CONDITION status, queue - * a request sense command. - */ - if (stat & ATA_ERR) - cdrom_queue_request_sense(drive, NULL, NULL); - return 1; - } else { - blk_dump_rq_flags(rq, PFX "bad rq"); - return 2; + if (blk_fs_request(rq) == 0) { + rq->cmd_flags |= REQ_FAILED; + do_end_request = 1; } + /* + * End a request through request sense analysis when we have sense data. + * We need this in order to perform end of media processing. + */ + if (do_end_request) + goto end_request; + + /* if we got a CHECK_CONDITION status, queue a request sense command */ + if (stat & ATA_ERR) + cdrom_queue_request_sense(drive, NULL, NULL); + return 1; + end_request: if (stat & ATA_ERR) { struct request_queue *q = drive->queue; @@ -624,15 +608,14 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) struct ide_cmd *cmd = &hwif->cmd; struct request *rq = hwif->rq; ide_expiry_t *expiry = NULL; - int dma_error = 0, dma, stat, thislen, uptodate = 0; + int dma_error = 0, dma, thislen, uptodate = 0; int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc, nsectors; int sense = blk_sense_request(rq); unsigned int timeout; u16 len; - u8 ireason; + u8 ireason, stat; - ide_debug_log(IDE_DBG_PC, "cmd[0]: 0x%x, write: 0x%x", - rq->cmd[0], write); + ide_debug_log(IDE_DBG_PC, "cmd: 0x%x, write: 0x%x", rq->cmd[0], write); /* check for errors */ dma = drive->dma; @@ -648,11 +631,16 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) } } - rc = cdrom_decode_status(drive, 0, &stat); - if (rc) { - if (rc == 2) - goto out_end; - return ide_stopped; + /* check status */ + stat = hwif->tp_ops->read_status(hwif); + + if (!OK_STAT(stat, 0, BAD_R_STAT)) { + rc = cdrom_decode_status(drive, stat); + if (rc) { + if (rc == 2) + goto out_end; + return ide_stopped; + } } /* using dma, transfer is complete now */ diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index c998cf8e971..a9fbe2c3121 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -97,35 +97,38 @@ static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, } memset(&cmd, 0, sizeof(cmd)); - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; if (drive->dev_flags & IDE_DFLAG_LBA) { if (lba48) { pr_debug("%s: LBA=0x%012llx\n", drive->name, (unsigned long long)block); - tf->hob_nsect = (nsectors >> 8) & 0xff; - tf->hob_lbal = (u8)(block >> 24); - if (sizeof(block) != 4) { - tf->hob_lbam = (u8)((u64)block >> 32); - tf->hob_lbah = (u8)((u64)block >> 40); - } - tf->nsect = nsectors & 0xff; tf->lbal = (u8) block; tf->lbam = (u8)(block >> 8); tf->lbah = (u8)(block >> 16); + tf->device = ATA_LBA; - cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); + tf = &cmd.hob; + tf->nsect = (nsectors >> 8) & 0xff; + tf->lbal = (u8)(block >> 24); + if (sizeof(block) != 4) { + tf->lbam = (u8)((u64)block >> 32); + tf->lbah = (u8)((u64)block >> 40); + } + + cmd.valid.out.hob = IDE_VALID_OUT_HOB; + cmd.valid.in.hob = IDE_VALID_IN_HOB; + cmd.tf_flags |= IDE_TFLAG_LBA48; } else { tf->nsect = nsectors & 0xff; tf->lbal = block; tf->lbam = block >>= 8; tf->lbah = block >>= 8; - tf->device = (block >> 8) & 0xf; + tf->device = ((block >> 8) & 0xf) | ATA_LBA; } - - tf->device |= ATA_LBA; } else { unsigned int sect, head, cyl, track; @@ -220,15 +223,19 @@ static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48) tf->command = ATA_CMD_READ_NATIVE_MAX; tf->device = ATA_LBA; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; - if (lba48) - cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; + if (lba48) { + cmd.valid.out.hob = IDE_VALID_OUT_HOB; + cmd.valid.in.hob = IDE_VALID_IN_HOB; + cmd.tf_flags = IDE_TFLAG_LBA48; + } ide_no_data_taskfile(drive, &cmd); /* if OK, compute maximum address value */ if (!(tf->status & ATA_ERR)) - addr = ide_get_lba_addr(tf, lba48) + 1; + addr = ide_get_lba_addr(&cmd, lba48) + 1; return addr; } @@ -250,9 +257,9 @@ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48) tf->lbam = (addr_req >>= 8) & 0xff; tf->lbah = (addr_req >>= 8) & 0xff; if (lba48) { - tf->hob_lbal = (addr_req >>= 8) & 0xff; - tf->hob_lbam = (addr_req >>= 8) & 0xff; - tf->hob_lbah = (addr_req >>= 8) & 0xff; + cmd.hob.lbal = (addr_req >>= 8) & 0xff; + cmd.hob.lbam = (addr_req >>= 8) & 0xff; + cmd.hob.lbah = (addr_req >>= 8) & 0xff; tf->command = ATA_CMD_SET_MAX_EXT; } else { tf->device = (addr_req >>= 8) & 0x0f; @@ -260,15 +267,19 @@ static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48) } tf->device |= ATA_LBA; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; - if (lba48) - cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB); + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; + if (lba48) { + cmd.valid.out.hob = IDE_VALID_OUT_HOB; + cmd.valid.in.hob = IDE_VALID_IN_HOB; + cmd.tf_flags = IDE_TFLAG_LBA48; + } ide_no_data_taskfile(drive, &cmd); /* if OK, compute maximum address value */ if (!(tf->status & ATA_ERR)) - addr_set = ide_get_lba_addr(tf, lba48) + 1; + addr_set = ide_get_lba_addr(&cmd, lba48) + 1; return addr_set; } @@ -395,8 +406,8 @@ static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) cmd->tf.command = ATA_CMD_FLUSH_EXT; else cmd->tf.command = ATA_CMD_FLUSH; - cmd->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE | - IDE_TFLAG_DYN; + cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd->tf_flags = IDE_TFLAG_DYN; cmd->protocol = ATA_PROT_NODATA; rq->cmd_type = REQ_TYPE_ATA_TASKFILE; @@ -457,7 +468,8 @@ static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect) cmd.tf.feature = feature; cmd.tf.nsect = nsect; cmd.tf.command = ATA_CMD_SET_FEATURES; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; return ide_no_data_taskfile(drive, &cmd); } @@ -533,7 +545,8 @@ static int do_idedisk_flushcache(ide_drive_t *drive) cmd.tf.command = ATA_CMD_FLUSH_EXT; else cmd.tf.command = ATA_CMD_FLUSH; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; return ide_no_data_taskfile(drive, &cmd); } @@ -715,7 +728,8 @@ static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk, memset(&cmd, 0, sizeof(cmd)); cmd.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; ret = ide_no_data_taskfile(drive, &cmd); diff --git a/drivers/ide/ide-disk_proc.c b/drivers/ide/ide-disk_proc.c index eaea3bef207..19f263bf0a9 100644 --- a/drivers/ide/ide-disk_proc.c +++ b/drivers/ide/ide-disk_proc.c @@ -13,7 +13,8 @@ static int smart_enable(ide_drive_t *drive) tf->lbam = ATA_SMART_LBAM_PASS; tf->lbah = ATA_SMART_LBAH_PASS; tf->command = ATA_CMD_SMART; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; return ide_no_data_taskfile(drive, &cmd); } @@ -29,7 +30,8 @@ static int get_smart_data(ide_drive_t *drive, u8 *buf, u8 sub_cmd) tf->lbam = ATA_SMART_LBAM_PASS; tf->lbah = ATA_SMART_LBAH_PASS; tf->command = ATA_CMD_SMART; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; cmd.protocol = ATA_PROT_PIO; return ide_raw_taskfile(drive, &cmd, buf, 1); diff --git a/drivers/ide/ide-dma-sff.c b/drivers/ide/ide-dma-sff.c index 16fc46edc32..e4cdf78cc3e 100644 --- a/drivers/ide/ide-dma-sff.c +++ b/drivers/ide/ide-dma-sff.c @@ -277,8 +277,6 @@ void ide_dma_start(ide_drive_t *drive) dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); } - - wmb(); } EXPORT_SYMBOL_GPL(ide_dma_start); @@ -286,7 +284,7 @@ EXPORT_SYMBOL_GPL(ide_dma_start); int ide_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; - u8 dma_stat = 0, dma_cmd = 0, mask; + u8 dma_stat = 0, dma_cmd = 0; /* stop DMA */ if (hwif->host_flags & IDE_HFLAG_MMIO) { @@ -304,11 +302,10 @@ int ide_dma_end(ide_drive_t *drive) /* clear INTR & ERROR bits */ ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); - wmb(); +#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) /* verify good DMA status */ - mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR; - if ((dma_stat & mask) != ATA_DMA_INTR) + if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR) return 0x10 | dma_stat; return 0; } diff --git a/drivers/ide/ide-h8300.c b/drivers/ide/ide-h8300.c index dac9a6d4496..c06ebdc4a13 100644 --- a/drivers/ide/ide-h8300.c +++ b/drivers/ide/ide-h8300.c @@ -22,103 +22,6 @@ (r); \ }) -static void mm_outw(u16 d, unsigned long a) -{ - __asm__("mov.b %w0,r2h\n\t" - "mov.b %x0,r2l\n\t" - "mov.w r2,@%1" - : - :"r"(d),"r"(a) - :"er2"); -} - -static u16 mm_inw(unsigned long a) -{ - register u16 r __asm__("er0"); - __asm__("mov.w @%1,r2\n\t" - "mov.b r2l,%x0\n\t" - "mov.b r2h,%w0" - :"=r"(r) - :"r"(a) - :"er2"); - return r; -} - -static void h8300_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; - - if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) - HIHI = 0xFF; - - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) - outb(tf->hob_feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) - outb(tf->hob_nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) - outb(tf->hob_lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) - outb(tf->hob_lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) - outb(tf->hob_lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) - outb(tf->feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) - outb(tf->nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) - outb(tf->lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) - outb(tf->lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) - outb(tf->lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) - outb((tf->device & HIHI) | drive->select, - io_ports->device_addr); -} - -static void h8300_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - - /* be sure we're looking at the low order bits */ - outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) - tf->error = inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) - tf->nsect = inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) - tf->lbal = inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) - tf->lbam = inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) - tf->lbah = inb(io_ports->lbah_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) - tf->device = inb(io_ports->device_addr); - - if (cmd->tf_flags & IDE_TFLAG_LBA48) { - outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) - tf->hob_error = inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = inb(io_ports->lbah_addr); - } -} - static void mm_outsw(unsigned long addr, void *buf, u32 len) { unsigned short *bp = (unsigned short *)buf; @@ -152,8 +55,8 @@ static const struct ide_tp_ops h8300_tp_ops = { .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, - .tf_load = h8300_tf_load, - .tf_read = h8300_tf_read, + .tf_load = ide_tf_load, + .tf_read = ide_tf_read, .input_data = h8300_input_data, .output_data = h8300_output_data, diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 9cac281d82c..46721c45451 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c @@ -85,98 +85,57 @@ void ide_dev_select(ide_drive_t *drive) } EXPORT_SYMBOL_GPL(ide_dev_select); -void ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) +void ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) { ide_hwif_t *hwif = drive->hwif; struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; void (*tf_outb)(u8 addr, unsigned long port); u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; - u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; if (mmio) tf_outb = ide_mm_outb; else tf_outb = ide_outb; - if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) - HIHI = 0xFF; - - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) - tf_outb(tf->hob_feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) - tf_outb(tf->hob_nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) - tf_outb(tf->hob_lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) - tf_outb(tf->hob_lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) - tf_outb(tf->hob_lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) + if (valid & IDE_VALID_FEATURE) tf_outb(tf->feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) + if (valid & IDE_VALID_NSECT) tf_outb(tf->nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) + if (valid & IDE_VALID_LBAL) tf_outb(tf->lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) + if (valid & IDE_VALID_LBAM) tf_outb(tf->lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) + if (valid & IDE_VALID_LBAH) tf_outb(tf->lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) - tf_outb((tf->device & HIHI) | drive->select, - io_ports->device_addr); + if (valid & IDE_VALID_DEVICE) + tf_outb(tf->device, io_ports->device_addr); } EXPORT_SYMBOL_GPL(ide_tf_load); -void ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) +void ide_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) { ide_hwif_t *hwif = drive->hwif; struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - void (*tf_outb)(u8 addr, unsigned long port); u8 (*tf_inb)(unsigned long port); u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; - if (mmio) { - tf_outb = ide_mm_outb; + if (mmio) tf_inb = ide_mm_inb; - } else { - tf_outb = ide_outb; + else tf_inb = ide_inb; - } - - /* be sure we're looking at the low order bits */ - tf_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) + if (valid & IDE_VALID_ERROR) tf->error = tf_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) + if (valid & IDE_VALID_NSECT) tf->nsect = tf_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) + if (valid & IDE_VALID_LBAL) tf->lbal = tf_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) + if (valid & IDE_VALID_LBAM) tf->lbam = tf_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) + if (valid & IDE_VALID_LBAH) tf->lbah = tf_inb(io_ports->lbah_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) + if (valid & IDE_VALID_DEVICE) tf->device = tf_inb(io_ports->device_addr); - - if (cmd->tf_flags & IDE_TFLAG_LBA48) { - tf_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) - tf->hob_error = tf_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = tf_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = tf_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = tf_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = tf_inb(io_ports->lbah_addr); - } } EXPORT_SYMBOL_GPL(ide_tf_read); diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 1deb6d29b18..2ae02b8d7f8 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -86,18 +86,18 @@ void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) tp_ops->input_data(drive, cmd, data, 2); - tf->data = data[0]; - tf->hob_data = data[1]; + cmd->tf.data = data[0]; + cmd->hob.data = data[1]; } - tp_ops->tf_read(drive, cmd); + ide_tf_readback(drive, cmd); if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && tf_cmd == ATA_CMD_IDLEIMMEDIATE) { if (tf->lbal != 0xc4) { printk(KERN_ERR "%s: head unload failed!\n", drive->name); - ide_tf_dump(drive->name, tf); + ide_tf_dump(drive->name, cmd); } else drive->dev_flags |= IDE_DFLAG_PARKED; } @@ -205,8 +205,9 @@ static ide_startstop_t ide_disk_special(ide_drive_t *drive) return ide_stopped; } - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE | - IDE_TFLAG_CUSTOM_HANDLER; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; + cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER; do_rw_taskfile(drive, &cmd); diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index 77014276743..c1c25ebbaa1 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c @@ -141,11 +141,12 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg) tf->lbal = args[1]; tf->lbam = 0x4f; tf->lbah = 0xc2; - cmd.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT; + cmd.valid.out.tf = IDE_VALID_OUT_TF; + cmd.valid.in.tf = IDE_VALID_NSECT; } else { tf->nsect = args[1]; - cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT | - IDE_TFLAG_IN_NSECT; + cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT; + cmd.valid.in.tf = IDE_VALID_NSECT; } tf->command = args[0]; cmd.protocol = args[3] ? ATA_PROT_PIO : ATA_PROT_NODATA; @@ -205,14 +206,15 @@ static int ide_task_ioctl(ide_drive_t *drive, unsigned long arg) return -EFAULT; memset(&cmd, 0, sizeof(cmd)); - memcpy(&cmd.tf_array[7], &args[1], 6); + memcpy(&cmd.tf.feature, &args[1], 6); cmd.tf.command = args[0]; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; err = ide_no_data_taskfile(drive, &cmd); args[0] = cmd.tf.command; - memcpy(&args[1], &cmd.tf_array[7], 6); + memcpy(&args[1], &cmd.tf.feature, 6); if (copy_to_user(p, args, 7)) err = -EFAULT; diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 27bb70ddd45..c19a221b1e1 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -37,14 +37,11 @@ void SELECT_MASK(ide_drive_t *drive, int mask) u8 ide_read_error(ide_drive_t *drive) { - struct ide_cmd cmd; + struct ide_taskfile tf; - memset(&cmd, 0, sizeof(cmd)); - cmd.tf_flags = IDE_TFLAG_IN_ERROR; + drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR); - drive->hwif->tp_ops->tf_read(drive, &cmd); - - return cmd.tf.error; + return tf.error; } EXPORT_SYMBOL_GPL(ide_read_error); @@ -312,10 +309,10 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) { ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; + struct ide_taskfile tf; u16 *id = drive->id, i; int error = 0; u8 stat; - struct ide_cmd cmd; #ifdef CONFIG_BLK_DEV_IDEDMA if (hwif->dma_ops) /* check if host supports DMA */ @@ -347,12 +344,11 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed) udelay(1); tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS); - memset(&cmd, 0, sizeof(cmd)); - cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT; - cmd.tf.feature = SETFEATURES_XFER; - cmd.tf.nsect = speed; + memset(&tf, 0, sizeof(tf)); + tf.feature = SETFEATURES_XFER; + tf.nsect = speed; - tp_ops->tf_load(drive, &cmd); + tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT); tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES); diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index 217b7fdf2b1..56ff8c46c7d 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -49,16 +49,17 @@ static void ide_dump_opcode(ide_drive_t *drive) printk(KERN_CONT "0x%02x\n", cmd->tf.command); } -u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48) +u64 ide_get_lba_addr(struct ide_cmd *cmd, int lba48) { + struct ide_taskfile *tf = &cmd->tf; u32 high, low; - if (lba48) - high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) | - tf->hob_lbal; - else - high = tf->device & 0xf; low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; + if (lba48) { + tf = &cmd->hob; + high = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal; + } else + high = tf->device & 0xf; return ((u64)high << 24) | low; } @@ -71,17 +72,18 @@ static void ide_dump_sector(ide_drive_t *drive) u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); memset(&cmd, 0, sizeof(cmd)); - if (lba48) - cmd.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA | - IDE_TFLAG_LBA48; - else - cmd.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE; + if (lba48) { + cmd.valid.in.tf = IDE_VALID_LBA; + cmd.valid.in.hob = IDE_VALID_LBA; + cmd.tf_flags = IDE_TFLAG_LBA48; + } else + cmd.valid.in.tf = IDE_VALID_LBA | IDE_VALID_DEVICE; - drive->hwif->tp_ops->tf_read(drive, &cmd); + ide_tf_readback(drive, &cmd); if (lba48 || (tf->device & ATA_LBA)) printk(KERN_CONT ", LBAsect=%llu", - (unsigned long long)ide_get_lba_addr(tf, lba48)); + (unsigned long long)ide_get_lba_addr(&cmd, lba48)); else printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, tf->device & 0xf, tf->lbal); diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index 9490b446519..310d03f2b5b 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c @@ -74,7 +74,8 @@ ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq) tf->lbal = 0x4c; tf->lbam = 0x4e; tf->lbah = 0x55; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; } else /* cmd == REQ_UNPARK_HEADS */ tf->command = ATA_CMD_CHK_POWER; diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index bb7858ebb7d..0d8a151c0a0 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c @@ -163,7 +163,8 @@ ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) return ide_stopped; out_do_tf: - cmd->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; cmd->protocol = ATA_PROT_NODATA; return do_rw_taskfile(drive, cmd); diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index d8c1c3e735b..7f264ed1141 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -283,13 +283,11 @@ int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id) * identify command to be sure of reply */ if (cmd == ATA_CMD_ID_ATAPI) { - struct ide_cmd cmd; + struct ide_taskfile tf; - memset(&cmd, 0, sizeof(cmd)); + memset(&tf, 0, sizeof(tf)); /* disable DMA & overlap */ - cmd.tf_flags = IDE_TFLAG_OUT_FEATURE; - - tp_ops->tf_load(drive, &cmd); + tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE); } /* ask drive for ID */ @@ -337,14 +335,11 @@ int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus) static u8 ide_read_device(ide_drive_t *drive) { - struct ide_cmd cmd; - - memset(&cmd, 0, sizeof(cmd)); - cmd.tf_flags = IDE_TFLAG_IN_DEVICE; + struct ide_taskfile tf; - drive->hwif->tp_ops->tf_read(drive, &cmd); + drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_DEVICE); - return cmd.tf.device; + return tf.device; } /** @@ -1314,6 +1309,7 @@ struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws) host->get_lock = d->get_lock; host->release_lock = d->release_lock; host->host_flags = d->host_flags; + host->irq_flags = d->irq_flags; } return host; diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 10a88bf3eef..3242698832a 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c @@ -204,8 +204,8 @@ static int set_xfer_rate (ide_drive_t *drive, int arg) cmd.tf.command = ATA_CMD_SET_FEATURES; cmd.tf.feature = SETFEATURES_XFER; cmd.tf.nsect = (u8)arg; - cmd.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT | - IDE_TFLAG_IN_NSECT; + cmd.valid.out.tf = IDE_VALID_FEATURE | IDE_VALID_NSECT; + cmd.valid.in.tf = IDE_VALID_NSECT; err = ide_no_data_taskfile(drive, &cmd); diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index 243421ce40d..4aa6223c11b 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -23,17 +23,33 @@ #include <asm/uaccess.h> #include <asm/io.h> -void ide_tf_dump(const char *s, struct ide_taskfile *tf) +void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd) +{ + ide_hwif_t *hwif = drive->hwif; + const struct ide_tp_ops *tp_ops = hwif->tp_ops; + + /* Be sure we're looking at the low order bytes */ + tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); + + tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf); + + if (cmd->tf_flags & IDE_TFLAG_LBA48) { + tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS); + + tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob); + } +} + +void ide_tf_dump(const char *s, struct ide_cmd *cmd) { #ifdef DEBUG printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", - s, tf->feature, tf->nsect, tf->lbal, - tf->lbam, tf->lbah, tf->device, tf->command); - printk("%s: hob: nsect 0x%02x lbal 0x%02x " - "lbam 0x%02x lbah 0x%02x\n", - s, tf->hob_nsect, tf->hob_lbal, - tf->hob_lbam, tf->hob_lbah); + s, cmd->tf.feature, cmd->tf.nsect, + cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah, + cmd->tf.device, cmd->tf.command); + printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n", + s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah); #endif } @@ -47,7 +63,8 @@ int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf) cmd.tf.command = ATA_CMD_ID_ATA; else cmd.tf.command = ATA_CMD_ID_ATAPI; - cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; + cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; cmd.protocol = ATA_PROT_PIO; return ide_raw_taskfile(drive, &cmd, buf, 1); @@ -79,16 +96,27 @@ ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) memcpy(cmd, orig_cmd, sizeof(*cmd)); if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { - ide_tf_dump(drive->name, tf); + ide_tf_dump(drive->name, cmd); tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); SELECT_MASK(drive, 0); if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { - u8 data[2] = { tf->data, tf->hob_data }; + u8 data[2] = { cmd->tf.data, cmd->hob.data }; tp_ops->output_data(drive, cmd, data, 2); } - tp_ops->tf_load(drive, cmd); + + if (cmd->valid.out.tf & IDE_VALID_DEVICE) { + u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? + 0xE0 : 0xEF; + + if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED)) + cmd->tf.device &= HIHI; + cmd->tf.device |= drive->select; + } + + tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob); + tp_ops->tf_load(drive, &cmd->tf, cmd->valid.out.tf); } switch (cmd->protocol) { @@ -489,16 +517,17 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) memset(&cmd, 0, sizeof(cmd)); - memcpy(&cmd.tf_array[0], req_task->hob_ports, - HDIO_DRIVE_HOB_HDR_SIZE - 2); - memcpy(&cmd.tf_array[6], req_task->io_ports, - HDIO_DRIVE_TASK_HDR_SIZE); + memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2); + memcpy(&cmd.tf, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE); - cmd.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE | - IDE_TFLAG_IN_TF; + cmd.valid.out.tf = IDE_VALID_DEVICE; + cmd.valid.in.tf = IDE_VALID_DEVICE | IDE_VALID_IN_TF; + cmd.tf_flags = IDE_TFLAG_IO_16BIT; - if (drive->dev_flags & IDE_DFLAG_LBA48) - cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB); + if (drive->dev_flags & IDE_DFLAG_LBA48) { + cmd.tf_flags |= IDE_TFLAG_LBA48; + cmd.valid.in.hob = IDE_VALID_IN_HOB; + } if (req_task->out_flags.all) { cmd.ftf_flags |= IDE_FTFLAG_FLAGGED; @@ -507,28 +536,28 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA; if (req_task->out_flags.b.nsector_hob) - cmd.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT; + cmd.valid.out.hob |= IDE_VALID_NSECT; if (req_task->out_flags.b.sector_hob) - cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL; + cmd.valid.out.hob |= IDE_VALID_LBAL; if (req_task->out_flags.b.lcyl_hob) - cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM; + cmd.valid.out.hob |= IDE_VALID_LBAM; if (req_task->out_flags.b.hcyl_hob) - cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH; + cmd.valid.out.hob |= IDE_VALID_LBAH; if (req_task->out_flags.b.error_feature) - cmd.tf_flags |= IDE_TFLAG_OUT_FEATURE; + cmd.valid.out.tf |= IDE_VALID_FEATURE; if (req_task->out_flags.b.nsector) - cmd.tf_flags |= IDE_TFLAG_OUT_NSECT; + cmd.valid.out.tf |= IDE_VALID_NSECT; if (req_task->out_flags.b.sector) - cmd.tf_flags |= IDE_TFLAG_OUT_LBAL; + cmd.valid.out.tf |= IDE_VALID_LBAL; if (req_task->out_flags.b.lcyl) - cmd.tf_flags |= IDE_TFLAG_OUT_LBAM; + cmd.valid.out.tf |= IDE_VALID_LBAM; if (req_task->out_flags.b.hcyl) - cmd.tf_flags |= IDE_TFLAG_OUT_LBAH; + cmd.valid.out.tf |= IDE_VALID_LBAH; } else { - cmd.tf_flags |= IDE_TFLAG_OUT_TF; + cmd.valid.out.tf |= IDE_VALID_OUT_TF; if (cmd.tf_flags & IDE_TFLAG_LBA48) - cmd.tf_flags |= IDE_TFLAG_OUT_HOB; + cmd.valid.out.hob |= IDE_VALID_OUT_HOB; } if (req_task->in_flags.b.data) @@ -594,7 +623,7 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) nsect = 0; else if (!nsect) { - nsect = (cmd.tf.hob_nsect << 8) | cmd.tf.nsect; + nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect; if (!nsect) { printk(KERN_ERR "%s: in/out command without data\n", @@ -606,10 +635,8 @@ int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) err = ide_raw_taskfile(drive, &cmd, data_buf, nsect); - memcpy(req_task->hob_ports, &cmd.tf_array[0], - HDIO_DRIVE_HOB_HDR_SIZE - 2); - memcpy(req_task->io_ports, &cmd.tf_array[6], - HDIO_DRIVE_TASK_HDR_SIZE); + memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2); + memcpy(req_task->io_ports, &cmd.tf, HDIO_DRIVE_TASK_HDR_SIZE); if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) && req_task->in_flags.all == 0) { diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c index 71a39fb3856..95327a2c242 100644 --- a/drivers/ide/ns87415.c +++ b/drivers/ide/ns87415.c @@ -61,41 +61,23 @@ static u8 superio_dma_sff_read_status(ide_hwif_t *hwif) return superio_ide_inb(hwif->dma_base + ATA_DMA_STATUS); } -static void superio_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) +static void superio_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, + u8 valid) { struct ide_io_ports *io_ports = &drive->hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - /* be sure we're looking at the low order bits */ - outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) + if (valid & IDE_VALID_ERROR) tf->error = inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) + if (valid & IDE_VALID_NSECT) tf->nsect = inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) + if (valid & IDE_VALID_LBAL) tf->lbal = inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) + if (valid & IDE_VALID_LBAM) tf->lbam = inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) + if (valid & IDE_VALID_LBAH) tf->lbah = inb(io_ports->lbah_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) + if (valid & IDE_VALID_DEVICE) tf->device = superio_ide_inb(io_ports->device_addr); - - if (cmd->tf_flags & IDE_TFLAG_LBA48) { - outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) - tf->hob_error = inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = inb(io_ports->lbah_addr); - } } static void ns87415_dev_select(ide_drive_t *drive); diff --git a/drivers/ide/q40ide.c b/drivers/ide/q40ide.c index d007e7f6659..c7934667924 100644 --- a/drivers/ide/q40ide.c +++ b/drivers/ide/q40ide.c @@ -16,6 +16,8 @@ #include <linux/blkdev.h> #include <linux/ide.h> +#include <asm/ide.h> + /* * Bases of the IDE interfaces */ @@ -77,8 +79,10 @@ static void q40ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, { unsigned long data_addr = drive->hwif->io_ports.data_addr; - if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) - return insw(data_addr, buf, (len + 1) / 2); + if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { + __ide_mm_insw(data_addr, buf, (len + 1) / 2); + return; + } raw_insw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } @@ -88,8 +92,10 @@ static void q40ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, { unsigned long data_addr = drive->hwif->io_ports.data_addr; - if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) - return outsw(data_addr, buf, (len + 1) / 2); + if (drive->media == ide_disk && cmd && (cmd->tf_flags & IDE_TFLAG_FS)) { + __ide_mm_outsw(data_addr, buf, (len + 1) / 2); + return; + } raw_outsw_swapw((u16 *)data_addr, buf, (len + 1) / 2); } diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c index 6d8dbd9c10b..5be41f25204 100644 --- a/drivers/ide/scc_pata.c +++ b/drivers/ide/scc_pata.c @@ -337,7 +337,6 @@ static void scc_dma_start(ide_drive_t *drive) /* start DMA */ scc_ide_outb(dma_cmd | 1, hwif->dma_base); - wmb(); } static int __scc_dma_end(ide_drive_t *drive) @@ -354,7 +353,6 @@ static int __scc_dma_end(ide_drive_t *drive) /* clear the INTR & ERROR bits */ scc_ide_outb(dma_stat | 6, hwif->dma_base + 4); /* verify good DMA status */ - wmb(); return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0; } @@ -647,77 +645,40 @@ static int __devinit init_setup_scc(struct pci_dev *dev, return rc; } -static void scc_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) +static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) { struct ide_io_ports *io_ports = &drive->hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; - - if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) - HIHI = 0xFF; - - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) - scc_ide_outb(tf->hob_feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) - scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) - scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) - scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) - scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) + + if (valid & IDE_VALID_FEATURE) scc_ide_outb(tf->feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) + if (valid & IDE_VALID_NSECT) scc_ide_outb(tf->nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) + if (valid & IDE_VALID_LBAL) scc_ide_outb(tf->lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) + if (valid & IDE_VALID_LBAM) scc_ide_outb(tf->lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) + if (valid & IDE_VALID_LBAH) scc_ide_outb(tf->lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) - scc_ide_outb((tf->device & HIHI) | drive->select, - io_ports->device_addr); + if (valid & IDE_VALID_DEVICE) + scc_ide_outb(tf->device, io_ports->device_addr); } -static void scc_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) +static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) { struct ide_io_ports *io_ports = &drive->hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - - /* be sure we're looking at the low order bits */ - scc_ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) + if (valid & IDE_VALID_ERROR) tf->error = scc_ide_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) + if (valid & IDE_VALID_NSECT) tf->nsect = scc_ide_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) + if (valid & IDE_VALID_LBAL) tf->lbal = scc_ide_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) + if (valid & IDE_VALID_LBAM) tf->lbam = scc_ide_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) + if (valid & IDE_VALID_LBAH) tf->lbah = scc_ide_inb(io_ports->lbah_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) + if (valid & IDE_VALID_DEVICE) tf->device = scc_ide_inb(io_ports->device_addr); - - if (cmd->tf_flags & IDE_TFLAG_LBA48) { - scc_ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) - tf->hob_error = scc_ide_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr); - } } static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd, diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index 4cb79c4c260..e33d764e294 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c @@ -72,91 +72,6 @@ static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio) #ifdef __BIG_ENDIAN /* custom iops (independent from SWAP_IO_SPACE) */ -static u8 tx4938ide_inb(unsigned long port) -{ - return __raw_readb((void __iomem *)port); -} - -static void tx4938ide_outb(u8 value, unsigned long port) -{ - __raw_writeb(value, (void __iomem *)port); -} - -static void tx4938ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - u8 HIHI = cmd->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF; - - if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) - HIHI = 0xFF; - - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) - tx4938ide_outb(tf->hob_feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) - tx4938ide_outb(tf->hob_nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) - tx4938ide_outb(tf->hob_lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) - tx4938ide_outb(tf->hob_lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) - tx4938ide_outb(tf->hob_lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) - tx4938ide_outb(tf->feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) - tx4938ide_outb(tf->nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) - tx4938ide_outb(tf->lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) - tx4938ide_outb(tf->lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) - tx4938ide_outb(tf->lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) - tx4938ide_outb((tf->device & HIHI) | drive->select, - io_ports->device_addr); -} - -static void tx4938ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - - /* be sure we're looking at the low order bits */ - tx4938ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) - tf->error = tx4938ide_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) - tf->nsect = tx4938ide_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) - tf->lbal = tx4938ide_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) - tf->lbam = tx4938ide_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) - tf->lbah = tx4938ide_inb(io_ports->lbah_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) - tf->device = tx4938ide_inb(io_ports->device_addr); - - if (cmd->tf_flags & IDE_TFLAG_LBA48) { - tx4938ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) - tf->hob_error = tx4938ide_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = tx4938ide_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = tx4938ide_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = tx4938ide_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = tx4938ide_inb(io_ports->lbah_addr); - } -} - static void tx4938ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { @@ -190,8 +105,8 @@ static const struct ide_tp_ops tx4938ide_tp_ops = { .write_devctl = ide_write_devctl, .dev_select = ide_dev_select, - .tf_load = tx4938ide_tf_load, - .tf_read = tx4938ide_tf_read, + .tf_load = ide_tf_load, + .tf_read = ide_tf_read, .input_data = tx4938ide_input_data_swap, .output_data = tx4938ide_output_data_swap, diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index 0040a9a3e26..564422d2397 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c @@ -327,15 +327,15 @@ static int tx4939ide_dma_end(ide_drive_t *drive) /* read and clear the INTR & ERROR bits */ dma_stat = tx4939ide_clear_dma_status(base); - wmb(); +#define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) /* verify good DMA status */ - if ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) == 0 && + if ((dma_stat & CHECK_DMA_MASK) == 0 && (ctl & (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) == (TX4939IDE_INT_XFEREND | TX4939IDE_INT_HOST)) /* INT_IDE lost... bug? */ return 0; - return ((dma_stat & (ATA_DMA_INTR | ATA_DMA_ERR | ATA_DMA_ACTIVE)) != + return ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR) ? 0x10 | dma_stat : 0; } @@ -434,97 +434,19 @@ static void tx4939ide_tf_load_fixup(ide_drive_t *drive) tx4939ide_writew(sysctl, base, TX4939IDE_Sys_Ctl); } -#ifdef __BIG_ENDIAN - -/* custom iops (independent from SWAP_IO_SPACE) */ -static u8 tx4939ide_inb(unsigned long port) +static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, + u8 valid) { - return __raw_readb((void __iomem *)port); -} + ide_tf_load(drive, tf, valid); -static void tx4939ide_outb(u8 value, unsigned long port) -{ - __raw_writeb(value, (void __iomem *)port); -} - -static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - u8 HIHI = cmd->tf_flags & IDE_TFLAG_LBA48 ? 0xE0 : 0xEF; - - if (cmd->ftf_flags & IDE_FTFLAG_FLAGGED) - HIHI = 0xFF; - - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) - tx4939ide_outb(tf->hob_feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) - tx4939ide_outb(tf->hob_nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) - tx4939ide_outb(tf->hob_lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) - tx4939ide_outb(tf->hob_lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) - tx4939ide_outb(tf->hob_lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_FEATURE) - tx4939ide_outb(tf->feature, io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_NSECT) - tx4939ide_outb(tf->nsect, io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAL) - tx4939ide_outb(tf->lbal, io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAM) - tx4939ide_outb(tf->lbam, io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_OUT_LBAH) - tx4939ide_outb(tf->lbah, io_ports->lbah_addr); - - if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) { - tx4939ide_outb((tf->device & HIHI) | drive->select, - io_ports->device_addr); + if (valid & IDE_VALID_DEVICE) tx4939ide_tf_load_fixup(drive); - } } -static void tx4939ide_tf_read(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_hwif_t *hwif = drive->hwif; - struct ide_io_ports *io_ports = &hwif->io_ports; - struct ide_taskfile *tf = &cmd->tf; - - /* be sure we're looking at the low order bits */ - tx4939ide_outb(ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_ERROR) - tf->error = tx4939ide_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_NSECT) - tf->nsect = tx4939ide_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAL) - tf->lbal = tx4939ide_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAM) - tf->lbam = tx4939ide_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_LBAH) - tf->lbah = tx4939ide_inb(io_ports->lbah_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_DEVICE) - tf->device = tx4939ide_inb(io_ports->device_addr); - - if (cmd->tf_flags & IDE_TFLAG_LBA48) { - tx4939ide_outb(ATA_HOB | ATA_DEVCTL_OBS, io_ports->ctl_addr); - - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_ERROR) - tf->hob_error = tx4939ide_inb(io_ports->feature_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_NSECT) - tf->hob_nsect = tx4939ide_inb(io_ports->nsect_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAL) - tf->hob_lbal = tx4939ide_inb(io_ports->lbal_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAM) - tf->hob_lbam = tx4939ide_inb(io_ports->lbam_addr); - if (cmd->tf_flags & IDE_TFLAG_IN_HOB_LBAH) - tf->hob_lbah = tx4939ide_inb(io_ports->lbah_addr); - } -} +#ifdef __BIG_ENDIAN -static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq, +/* custom iops (independent from SWAP_IO_SPACE) */ +static void tx4939ide_input_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long port = drive->hwif->io_ports.data_addr; @@ -536,7 +458,7 @@ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq, __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2)); } -static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq, +static void tx4939ide_output_data_swap(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned int len) { unsigned long port = drive->hwif->io_ports.data_addr; @@ -558,7 +480,7 @@ static const struct ide_tp_ops tx4939ide_tp_ops = { .dev_select = ide_dev_select, .tf_load = tx4939ide_tf_load, - .tf_read = tx4939ide_tf_read, + .tf_read = ide_tf_read, .input_data = tx4939ide_input_data_swap, .output_data = tx4939ide_output_data_swap, @@ -566,14 +488,6 @@ static const struct ide_tp_ops tx4939ide_tp_ops = { #else /* __LITTLE_ENDIAN */ -static void tx4939ide_tf_load(ide_drive_t *drive, struct ide_cmd *cmd) -{ - ide_tf_load(drive, cmd); - - if (cmd->tf_flags & IDE_TFLAG_OUT_DEVICE) - tx4939ide_tf_load_fixup(drive); -} - static const struct ide_tp_ops tx4939ide_tp_ops = { .exec_command = ide_exec_command, .read_status = ide_read_status, diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 2a2e50871b4..851de83ff45 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -297,21 +297,25 @@ static void cma_detach_from_dev(struct rdma_id_private *id_priv) id_priv->cma_dev = NULL; } -static int cma_set_qkey(struct ib_device *device, u8 port_num, - enum rdma_port_space ps, - struct rdma_dev_addr *dev_addr, u32 *qkey) +static int cma_set_qkey(struct rdma_id_private *id_priv) { struct ib_sa_mcmember_rec rec; int ret = 0; - switch (ps) { + if (id_priv->qkey) + return 0; + + switch (id_priv->id.ps) { case RDMA_PS_UDP: - *qkey = RDMA_UDP_QKEY; + id_priv->qkey = RDMA_UDP_QKEY; break; case RDMA_PS_IPOIB: - ib_addr_get_mgid(dev_addr, &rec.mgid); - ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec); - *qkey = be32_to_cpu(rec.qkey); + ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); + ret = ib_sa_get_mcmember_rec(id_priv->id.device, + id_priv->id.port_num, &rec.mgid, + &rec); + if (!ret) + id_priv->qkey = be32_to_cpu(rec.qkey); break; default: break; @@ -341,12 +345,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) ret = ib_find_cached_gid(cma_dev->device, &gid, &id_priv->id.port_num, NULL); if (!ret) { - ret = cma_set_qkey(cma_dev->device, - id_priv->id.port_num, - id_priv->id.ps, dev_addr, - &id_priv->qkey); - if (!ret) - cma_attach_to_dev(id_priv, cma_dev); + cma_attach_to_dev(id_priv, cma_dev); break; } } @@ -578,6 +577,10 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; if (cma_is_ud_ps(id_priv->id.ps)) { + ret = cma_set_qkey(id_priv); + if (ret) + return ret; + qp_attr->qkey = id_priv->qkey; *qp_attr_mask |= IB_QP_QKEY; } else { @@ -2201,6 +2204,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, event.status = ib_event->param.sidr_rep_rcvd.status; break; } + ret = cma_set_qkey(id_priv); + if (ret) { + event.event = RDMA_CM_EVENT_ADDR_ERROR; + event.status = -EINVAL; + break; + } if (id_priv->qkey != rep->qkey) { event.event = RDMA_CM_EVENT_UNREACHABLE; event.status = -EINVAL; @@ -2480,10 +2489,14 @@ static int cma_send_sidr_rep(struct rdma_id_private *id_priv, const void *private_data, int private_data_len) { struct ib_cm_sidr_rep_param rep; + int ret; memset(&rep, 0, sizeof rep); rep.status = status; if (status == IB_SIDR_SUCCESS) { + ret = cma_set_qkey(id_priv); + if (ret) + return ret; rep.qp_num = id_priv->qp_num; rep.qkey = id_priv->qkey; } @@ -2713,6 +2726,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv, IB_SA_MCMEMBER_REC_FLOW_LABEL | IB_SA_MCMEMBER_REC_TRAFFIC_CLASS; + if (id_priv->id.ps == RDMA_PS_IPOIB) + comp_mask |= IB_SA_MCMEMBER_REC_RATE | + IB_SA_MCMEMBER_REC_RATE_SELECTOR; + mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, id_priv->id.port_num, &rec, comp_mask, GFP_KERNEL, diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index a4a82bff710..8d71086f5a1 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -152,7 +152,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) sge_cmd = qpid << 8 | 3; wqe->sge_cmd = cpu_to_be64(sge_cmd); skb->priority = CPL_PRIORITY_CONTROL; - return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); + return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); } int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) @@ -571,7 +571,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p) (unsigned long long) rdev_p->ctrl_qp.dma_addr, rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); skb->priority = CPL_PRIORITY_CONTROL; - return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); + return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); err: kfree_skb(skb); return err; @@ -701,7 +701,7 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, u32 stag_idx; u32 wptr; - if (rdev_p->flags) + if (cxio_fatal_error(rdev_p)) return -EIO; stag_state = stag_state > 0; @@ -858,7 +858,7 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr) wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size); wqe->irs = cpu_to_be32(attr->irs); skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */ - return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); + return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); } void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb) @@ -1041,9 +1041,9 @@ void cxio_rdev_close(struct cxio_rdev *rdev_p) cxio_hal_pblpool_destroy(rdev_p); cxio_hal_rqtpool_destroy(rdev_p); list_del(&rdev_p->entry); - rdev_p->t3cdev_p->ulp = NULL; cxio_hal_destroy_ctrl_qp(rdev_p); cxio_hal_destroy_resource(rdev_p->rscp); + rdev_p->t3cdev_p->ulp = NULL; } } diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index 094a66d1480..bfd03bf8be5 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -115,6 +115,11 @@ struct cxio_rdev { #define CXIO_ERROR_FATAL 1 }; +static inline int cxio_fatal_error(struct cxio_rdev *rdev_p) +{ + return rdev_p->flags & CXIO_ERROR_FATAL; +} + static inline int cxio_num_stags(struct cxio_rdev *rdev_p) { return min((int)T3_MAX_NUM_STAG, (int)((rdev_p->rnic_info.tpt_top - rdev_p->rnic_info.tpt_base) >> 5)); @@ -188,6 +193,7 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); void cxio_flush_hw_cq(struct t3_cq *cq); int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit); +int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb); #define MOD "iw_cxgb3: " #define PDBG(fmt, args...) pr_debug(MOD fmt, ## args) diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index 37a4fc264a0..26fc0a4eaa7 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c @@ -165,12 +165,19 @@ static void close_rnic_dev(struct t3cdev *tdev) static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error) { struct cxio_rdev *rdev = tdev->ulp; + struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev); + struct ib_event event; - if (status == OFFLOAD_STATUS_DOWN) + if (status == OFFLOAD_STATUS_DOWN) { rdev->flags = CXIO_ERROR_FATAL; - return; + event.device = &rnicp->ibdev; + event.event = IB_EVENT_DEVICE_FATAL; + event.element.port_num = 0; + ib_dispatch_event(&event); + } + return; } static int __init iwch_init_module(void) diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h index 3773453b2cf..84735506333 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.h +++ b/drivers/infiniband/hw/cxgb3/iwch.h @@ -117,6 +117,11 @@ static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) return container_of(ibdev, struct iwch_dev, ibdev); } +static inline struct iwch_dev *rdev_to_iwch_dev(struct cxio_rdev *rdev) +{ + return container_of(rdev, struct iwch_dev, rdev); +} + static inline int t3b_device(const struct iwch_dev *rhp) { return rhp->rdev.t3cdev_p->type == T3B; diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 8699947aaf6..fef3f1ae722 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -139,6 +139,38 @@ static void stop_ep_timer(struct iwch_ep *ep) put_ep(&ep->com); } +int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_entry *l2e) +{ + int error = 0; + struct cxio_rdev *rdev; + + rdev = (struct cxio_rdev *)tdev->ulp; + if (cxio_fatal_error(rdev)) { + kfree_skb(skb); + return -EIO; + } + error = l2t_send(tdev, skb, l2e); + if (error) + kfree_skb(skb); + return error; +} + +int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb) +{ + int error = 0; + struct cxio_rdev *rdev; + + rdev = (struct cxio_rdev *)tdev->ulp; + if (cxio_fatal_error(rdev)) { + kfree_skb(skb); + return -EIO; + } + error = cxgb3_ofld_send(tdev, skb); + if (error) + kfree_skb(skb); + return error; +} + static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) { struct cpl_tid_release *req; @@ -150,7 +182,7 @@ static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb) req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); skb->priority = CPL_PRIORITY_SETUP; - cxgb3_ofld_send(tdev, skb); + iwch_cxgb3_ofld_send(tdev, skb); return; } @@ -172,8 +204,7 @@ int iwch_quiesce_tid(struct iwch_ep *ep) req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); skb->priority = CPL_PRIORITY_DATA; - cxgb3_ofld_send(ep->com.tdev, skb); - return 0; + return iwch_cxgb3_ofld_send(ep->com.tdev, skb); } int iwch_resume_tid(struct iwch_ep *ep) @@ -194,8 +225,7 @@ int iwch_resume_tid(struct iwch_ep *ep) req->val = 0; skb->priority = CPL_PRIORITY_DATA; - cxgb3_ofld_send(ep->com.tdev, skb); - return 0; + return iwch_cxgb3_ofld_send(ep->com.tdev, skb); } static void set_emss(struct iwch_ep *ep, u16 opt) @@ -252,18 +282,22 @@ static void *alloc_ep(int size, gfp_t gfp) void __free_ep(struct kref *kref) { - struct iwch_ep_common *epc; - epc = container_of(kref, struct iwch_ep_common, kref); - PDBG("%s ep %p state %s\n", __func__, epc, states[state_read(epc)]); - kfree(epc); + struct iwch_ep *ep; + ep = container_of(container_of(kref, struct iwch_ep_common, kref), + struct iwch_ep, com); + PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); + if (ep->com.flags & RELEASE_RESOURCES) { + cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); + dst_release(ep->dst); + l2t_release(L2DATA(ep->com.tdev), ep->l2t); + } + kfree(ep); } static void release_ep_resources(struct iwch_ep *ep) { PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid); - cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); - dst_release(ep->dst); - l2t_release(L2DATA(ep->com.tdev), ep->l2t); + ep->com.flags |= RELEASE_RESOURCES; put_ep(&ep->com); } @@ -382,7 +416,7 @@ static void abort_arp_failure(struct t3cdev *dev, struct sk_buff *skb) PDBG("%s t3cdev %p\n", __func__, dev); req->cmd = CPL_ABORT_NO_RST; - cxgb3_ofld_send(dev, skb); + iwch_cxgb3_ofld_send(dev, skb); } static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) @@ -402,8 +436,7 @@ static int send_halfclose(struct iwch_ep *ep, gfp_t gfp) req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); - l2t_send(ep->com.tdev, skb, ep->l2t); - return 0; + return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); } static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) @@ -424,8 +457,7 @@ static int send_abort(struct iwch_ep *ep, struct sk_buff *skb, gfp_t gfp) req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); req->cmd = CPL_ABORT_SEND_RST; - l2t_send(ep->com.tdev, skb, ep->l2t); - return 0; + return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); } static int send_connect(struct iwch_ep *ep) @@ -469,8 +501,7 @@ static int send_connect(struct iwch_ep *ep) req->opt0l = htonl(opt0l); req->params = 0; req->opt2 = htonl(opt2); - l2t_send(ep->com.tdev, skb, ep->l2t); - return 0; + return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); } static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) @@ -527,7 +558,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb) req->sndseq = htonl(ep->snd_seq); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; - l2t_send(ep->com.tdev, skb, ep->l2t); + iwch_l2t_send(ep->com.tdev, skb, ep->l2t); start_ep_timer(ep); state_set(&ep->com, MPA_REQ_SENT); return; @@ -578,8 +609,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen) req->sndseq = htonl(ep->snd_seq); BUG_ON(ep->mpa_skb); ep->mpa_skb = skb; - l2t_send(ep->com.tdev, skb, ep->l2t); - return 0; + return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); } static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) @@ -630,8 +660,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen) req->sndseq = htonl(ep->snd_seq); ep->mpa_skb = skb; state_set(&ep->com, MPA_REP_SENT); - l2t_send(ep->com.tdev, skb, ep->l2t); - return 0; + return iwch_l2t_send(ep->com.tdev, skb, ep->l2t); } static int act_establish(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) @@ -795,7 +824,7 @@ static int update_rx_credits(struct iwch_ep *ep, u32 credits) OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); skb->priority = CPL_PRIORITY_ACK; - cxgb3_ofld_send(ep->com.tdev, skb); + iwch_cxgb3_ofld_send(ep->com.tdev, skb); return credits; } @@ -1127,8 +1156,8 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) * We get 2 abort replies from the HW. The first one must * be ignored except for scribbling that we need one more. */ - if (!(ep->flags & ABORT_REQ_IN_PROGRESS)) { - ep->flags |= ABORT_REQ_IN_PROGRESS; + if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) { + ep->com.flags |= ABORT_REQ_IN_PROGRESS; return CPL_RET_BUF_DONE; } @@ -1203,8 +1232,7 @@ static int listen_start(struct iwch_listen_ep *ep) req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); skb->priority = 1; - cxgb3_ofld_send(ep->com.tdev, skb); - return 0; + return iwch_cxgb3_ofld_send(ep->com.tdev, skb); } static int pass_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) @@ -1237,8 +1265,7 @@ static int listen_stop(struct iwch_listen_ep *ep) req->cpu_idx = 0; OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); skb->priority = 1; - cxgb3_ofld_send(ep->com.tdev, skb); - return 0; + return iwch_cxgb3_ofld_send(ep->com.tdev, skb); } static int close_listsrv_rpl(struct t3cdev *tdev, struct sk_buff *skb, @@ -1286,7 +1313,7 @@ static void accept_cr(struct iwch_ep *ep, __be32 peer_ip, struct sk_buff *skb) rpl->opt2 = htonl(opt2); rpl->rsvd = rpl->opt2; /* workaround for HW bug */ skb->priority = CPL_PRIORITY_SETUP; - l2t_send(ep->com.tdev, skb, ep->l2t); + iwch_l2t_send(ep->com.tdev, skb, ep->l2t); return; } @@ -1315,7 +1342,7 @@ static void reject_cr(struct t3cdev *tdev, u32 hwtid, __be32 peer_ip, rpl->opt0l_status = htonl(CPL_PASS_OPEN_REJECT); rpl->opt2 = 0; rpl->rsvd = rpl->opt2; - cxgb3_ofld_send(tdev, skb); + iwch_cxgb3_ofld_send(tdev, skb); } } @@ -1534,8 +1561,8 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) * We get 2 peer aborts from the HW. The first one must * be ignored except for scribbling that we need one more. */ - if (!(ep->flags & PEER_ABORT_IN_PROGRESS)) { - ep->flags |= PEER_ABORT_IN_PROGRESS; + if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) { + ep->com.flags |= PEER_ABORT_IN_PROGRESS; return CPL_RET_BUF_DONE; } @@ -1613,7 +1640,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) rpl->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); rpl->cmd = CPL_ABORT_NO_RST; - cxgb3_ofld_send(ep->com.tdev, rpl_skb); + iwch_cxgb3_ofld_send(ep->com.tdev, rpl_skb); out: if (release) release_ep_resources(ep); @@ -2017,8 +2044,11 @@ int iwch_destroy_listen(struct iw_cm_id *cm_id) ep->com.rpl_done = 0; ep->com.rpl_err = 0; err = listen_stop(ep); + if (err) + goto done; wait_event(ep->com.waitq, ep->com.rpl_done); cxgb3_free_stid(ep->com.tdev, ep->stid); +done: err = ep->com.rpl_err; cm_id->rem_ref(cm_id); put_ep(&ep->com); @@ -2030,12 +2060,22 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) int ret=0; unsigned long flags; int close = 0; + int fatal = 0; + struct t3cdev *tdev; + struct cxio_rdev *rdev; spin_lock_irqsave(&ep->com.lock, flags); PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, states[ep->com.state], abrupt); + tdev = (struct t3cdev *)ep->com.tdev; + rdev = (struct cxio_rdev *)tdev->ulp; + if (cxio_fatal_error(rdev)) { + fatal = 1; + close_complete_upcall(ep); + ep->com.state = DEAD; + } switch (ep->com.state) { case MPA_REQ_WAIT: case MPA_REQ_SENT: @@ -2075,7 +2115,11 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp) ret = send_abort(ep, NULL, gfp); else ret = send_halfclose(ep, gfp); + if (ret) + fatal = 1; } + if (fatal) + release_ep_resources(ep); return ret; } diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.h b/drivers/infiniband/hw/cxgb3/iwch_cm.h index d7c7e09f099..43c0aea7ead 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.h +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.h @@ -147,6 +147,7 @@ enum iwch_ep_state { enum iwch_ep_flags { PEER_ABORT_IN_PROGRESS = (1 << 0), ABORT_REQ_IN_PROGRESS = (1 << 1), + RELEASE_RESOURCES = (1 << 2), }; struct iwch_ep_common { @@ -161,6 +162,7 @@ struct iwch_ep_common { wait_queue_head_t waitq; int rpl_done; int rpl_err; + u32 flags; }; struct iwch_listen_ep { @@ -188,7 +190,6 @@ struct iwch_ep { u16 plen; u32 ird; u32 ord; - u32 flags; }; static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index c758fbd5847..2f546a62533 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -751,7 +751,7 @@ int iwch_post_zb_read(struct iwch_qp *qhp) wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| V_FW_RIWR_LEN(flit_cnt)); skb->priority = CPL_PRIORITY_DATA; - return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); + return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); } /* @@ -783,7 +783,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG)); wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)); skb->priority = CPL_PRIORITY_DATA; - return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); + return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); } /* diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 2ccb9d31771..ae3d7590346 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -394,8 +394,7 @@ static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) PAGE_SIZE, vma->vm_page_prot)) return -EAGAIN; } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) { - /* FIXME want pgprot_writecombine() for BlueFlame pages */ - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, to_mucontext(context)->uar.pfn + diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 04b12ad2339..17621de54a9 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -289,8 +289,8 @@ static inline __le32 get_crc_value(struct nes_v4_quad *nes_quad) static inline void set_wqe_64bit_value(__le32 *wqe_words, u32 index, u64 value) { - wqe_words[index] = cpu_to_le32((u32) ((unsigned long)value)); - wqe_words[index + 1] = cpu_to_le32((u32)(upper_32_bits((unsigned long)value))); + wqe_words[index] = cpu_to_le32((u32) value); + wqe_words[index + 1] = cpu_to_le32(upper_32_bits(value)); } static inline void diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 52425154acd..dbd9a75474e 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -426,6 +426,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, if (type == NES_TIMER_TYPE_CLOSE) { new_send->timetosend += (HZ/10); if (cm_node->recv_entry) { + kfree(new_send); WARN_ON(1); return -EINVAL; } @@ -445,8 +446,8 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb, if (ret != NETDEV_TX_OK) { nes_debug(NES_DBG_CM, "Error sending packet %p " "(jiffies = %lu)\n", new_send, jiffies); - atomic_dec(&new_send->skb->users); new_send->timetosend = jiffies; + ret = NETDEV_TX_OK; } else { cm_packets_sent++; if (!send_retrans) { @@ -630,7 +631,6 @@ static void nes_cm_timer_tick(unsigned long pass) nes_debug(NES_DBG_CM, "rexmit failed for " "node=%p\n", cm_node); cm_packets_bounced++; - atomic_dec(&send_entry->skb->users); send_entry->retrycount--; nexttimeout = jiffies + NES_SHORT_TIME; settimer = 1; @@ -666,11 +666,6 @@ static void nes_cm_timer_tick(unsigned long pass) spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); rem_ref_cm_node(cm_node->cm_core, cm_node); - if (ret != NETDEV_TX_OK) { - nes_debug(NES_DBG_CM, "rexmit failed for cm_node=%p\n", - cm_node); - break; - } } if (settimer) { @@ -1262,7 +1257,6 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core, cm_node->nesqp = NULL; } - cm_node->freed = 1; kfree(cm_node); return 0; } @@ -1999,13 +1993,17 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core, if (loopbackremotelistener == NULL) { create_event(cm_node, NES_CM_EVENT_ABORTED); } else { - atomic_inc(&cm_loopbacks); loopback_cm_info = *cm_info; loopback_cm_info.loc_port = cm_info->rem_port; loopback_cm_info.rem_port = cm_info->loc_port; loopback_cm_info.cm_id = loopbackremotelistener->cm_id; loopbackremotenode = make_cm_node(cm_core, nesvnic, &loopback_cm_info, loopbackremotelistener); + if (!loopbackremotenode) { + rem_ref_cm_node(cm_node->cm_core, cm_node); + return NULL; + } + atomic_inc(&cm_loopbacks); loopbackremotenode->loopbackpartner = cm_node; loopbackremotenode->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE; @@ -2690,6 +2688,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) struct ib_mr *ibmr = NULL; struct ib_phys_buf ibphysbuf; struct nes_pd *nespd; + u64 tagged_offset; @@ -2755,10 +2754,11 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ibphysbuf.addr = nesqp->ietf_frame_pbase; ibphysbuf.size = conn_param->private_data_len + sizeof(struct ietf_mpa_frame); + tagged_offset = (u64)(unsigned long)nesqp->ietf_frame; ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd, &ibphysbuf, 1, IB_ACCESS_LOCAL_WRITE, - (u64 *)&nesqp->ietf_frame); + &tagged_offset); if (!ibmr) { nes_debug(NES_DBG_CM, "Unable to register memory region" "for lSMM for cm_node = %p \n", @@ -2782,7 +2782,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) sizeof(struct ietf_mpa_frame)); set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_FRAG0_LOW_IDX, - (u64)nesqp->ietf_frame); + (u64)(unsigned long)nesqp->ietf_frame); wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] = cpu_to_le32(conn_param->private_data_len + sizeof(struct ietf_mpa_frame)); diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h index d5f778202eb..80bba189257 100644 --- a/drivers/infiniband/hw/nes/nes_cm.h +++ b/drivers/infiniband/hw/nes/nes_cm.h @@ -298,7 +298,6 @@ struct nes_cm_node { struct nes_vnic *nesvnic; int apbvt_set; int accept_pend; - int freed; struct list_head timer_entry; struct list_head reset_entry; struct nes_qp *nesqp; diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 52e734042b8..d6fc9ae4406 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -46,6 +46,10 @@ static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR; module_param(nes_lro_max_aggr, uint, 0444); MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); +static int wide_ppm_offset; +module_param(wide_ppm_offset, int, 0644); +MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm"); + static u32 crit_err_count; u32 int_mod_timer_init; u32 int_mod_cq_depth_256; @@ -546,8 +550,11 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { msleep(1); } if (int_cnt > 1) { + u32 sds; spin_lock_irqsave(&nesadapter->phy_lock, flags); - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, 0x0000F088); + sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); + sds |= 0x00000040; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); mh_detected++; reset_value = nes_read32(nesdev->regs+NES_SOFTWARE_RESET); reset_value |= 0x0000003d; @@ -736,39 +743,49 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, { int i; u32 u32temp; - u32 serdes_common_control; + u32 sds; if (hw_rev != NE020_REV) { /* init serdes 0 */ + if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4)) + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA); + else + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { - serdes_common_control = nes_read_indexed(nesdev, - NES_IDX_ETH_SERDES_COMMON_CONTROL0); - serdes_common_control |= 0x000000100; - nes_write_indexed(nesdev, - NES_IDX_ETH_SERDES_COMMON_CONTROL0, - serdes_common_control); - } else if (!OneG_Mode) { - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); + sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); + sds |= 0x00000100; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds); } - if (((port_count > 1) && - (nesadapter->phy_type[0] != NES_PHY_TYPE_PUMA_1G)) || - ((port_count > 2) && - (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G))) { - /* init serdes 1 */ - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); - if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { - serdes_common_control = nes_read_indexed(nesdev, - NES_IDX_ETH_SERDES_COMMON_CONTROL1); - serdes_common_control |= 0x000000100; - nes_write_indexed(nesdev, - NES_IDX_ETH_SERDES_COMMON_CONTROL1, - serdes_common_control); - } else if (!OneG_Mode) { - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); - } + if (!OneG_Mode) + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); + + if (port_count < 2) + return 0; + + /* init serdes 1 */ + switch (nesadapter->phy_type[1]) { + case NES_PHY_TYPE_ARGUS: + case NES_PHY_TYPE_SFP_D: + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000); + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000); + break; + case NES_PHY_TYPE_CX4: + sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); + sds &= 0xFFFFFFBF; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); + if (wide_ppm_offset) + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA); + else + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000000FF); + break; + case NES_PHY_TYPE_PUMA_1G: + sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); + sds |= 0x000000100; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1, sds); } + if (!OneG_Mode) + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE1, 0x11110000); } else { /* init serdes 0 */ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, 0x00000008); @@ -1259,203 +1276,162 @@ int nes_init_phy(struct nes_device *nesdev) { struct nes_adapter *nesadapter = nesdev->nesadapter; u32 counter = 0; - u32 sds_common_control0; + u32 sds; u32 mac_index = nesdev->mac_index; u32 tx_config = 0; u16 phy_data; u32 temp_phy_data = 0; u32 temp_phy_data2 = 0; - u32 i = 0; + u8 phy_type = nesadapter->phy_type[mac_index]; + u8 phy_index = nesadapter->phy_index[mac_index]; if ((nesadapter->OneG_Mode) && - (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) { + (phy_type != NES_PHY_TYPE_PUMA_1G)) { nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); - if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) { - printk(PFX "%s: Programming mdc config for 1G\n", __func__); + if (phy_type == NES_PHY_TYPE_1G) { tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); tx_config &= 0xFFFFFFE3; tx_config |= 0x04; nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); } - nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n", - nesadapter->phy_index[mac_index], phy_data); - nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000); + nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000); /* Reset the PHY */ - nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000); + nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000); udelay(100); counter = 0; do { - nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); - if (counter++ > 100) break; + nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); + if (counter++ > 100) + break; } while (phy_data & 0x8000); /* Setting no phy loopback */ phy_data &= 0xbfff; phy_data |= 0x1140; - nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data); - nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0 = 0x%X.\n", phy_data); - - nes_read_1G_phy_reg(nesdev, 0x17, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x17 = 0x%X.\n", phy_data); - - nes_read_1G_phy_reg(nesdev, 0x1e, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x1e = 0x%X.\n", phy_data); + nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); + nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); + nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data); + nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data); /* Setting the interrupt mask */ - nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data); - nes_write_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], 0xffee); - - nes_read_1G_phy_reg(nesdev, 0x19, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x19 = 0x%X.\n", phy_data); + nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee); + nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); /* turning on flow control */ - nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data); - nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], - (phy_data & ~(0x03E0)) | 0xc00); - /* nes_write_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], - phy_data | 0xc00); */ - nes_read_1G_phy_reg(nesdev, 4, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x4 = 0x%X.\n", phy_data); - - nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data); - /* Clear Half duplex */ - nes_write_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], - phy_data & ~(0x0100)); - nes_read_1G_phy_reg(nesdev, 9, nesadapter->phy_index[mac_index], &phy_data); - nes_debug(NES_DBG_PHY, "Phy data from register 0x9 = 0x%X.\n", phy_data); - - nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data); - nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300); - } else { - if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) || - (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { - /* setup 10G MDIO operation */ - tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); - tx_config &= 0xFFFFFFE3; - tx_config |= 0x15; - nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); - } - if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) { - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); + nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00); + nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); - temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - mdelay(10); - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); - temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + /* Clear Half duplex */ + nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100)); + nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); - /* - * if firmware is already running (like from a - * driver un-load/load, don't do anything. - */ - if (temp_phy_data == temp_phy_data2) { - /* configure QT2505 AMCC PHY */ - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0x0000, 0x8000); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0000); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc302, 0x0044); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc318, 0x0052); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0001); - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528); + nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300); - /* - * remove micro from reset; chip boots from ROM, - * uploads EEPROM f/w image, uC executes f/w - */ - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0002); + return 0; + } - /* - * wait for heart beat to start to - * know loading is done - */ - counter = 0; - do { - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); - temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - if (counter++ > 1000) { - nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from heartbeat check <this is bad!!!> \n"); - break; - } - mdelay(100); - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee); - temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - } while ((temp_phy_data2 == temp_phy_data)); + if ((phy_type == NES_PHY_TYPE_IRIS) || + (phy_type == NES_PHY_TYPE_ARGUS) || + (phy_type == NES_PHY_TYPE_SFP_D)) { + /* setup 10G MDIO operation */ + tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); + tx_config &= 0xFFFFFFE3; + tx_config |= 0x15; + nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); + } + if ((phy_type == NES_PHY_TYPE_ARGUS) || + (phy_type == NES_PHY_TYPE_SFP_D)) { + /* Check firmware heartbeat */ + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + udelay(1500); + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + + if (temp_phy_data != temp_phy_data2) + return 0; - /* - * wait for tracking to start to know - * f/w is good to go - */ - counter = 0; - do { - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7fd); - temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - if (counter++ > 1000) { - nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from status check <this is bad!!!> \n"); - break; - } - mdelay(1000); - /* - * nes_debug(NES_DBG_PHY, "AMCC PHY- phy_status not ready yet = 0x%02X\n", - * temp_phy_data); - */ - } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70)); - - /* set LOS Control invert RXLOSB_I_PADINV */ - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd003, 0x0000); - /* set LOS Control to mask of RXLOSB_I */ - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc314, 0x0042); - /* set LED1 to input mode (LED1 and LED2 share same LED) */ - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd006, 0x0007); - /* set LED2 to RX link_status and activity */ - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd007, 0x000A); - /* set LED3 to RX link_status */ - nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd008, 0x0009); + /* no heartbeat, configure the PHY */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); + if (phy_type == NES_PHY_TYPE_ARGUS) { + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008); + } else { + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038); + } + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001); - /* - * reset the res-calibration on t2 - * serdes; ensures it is stable after - * the amcc phy is stable - */ + /* setup LEDs */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); - sds_common_control0 = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); - sds_common_control0 |= 0x1; - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528); - /* release the res-calibration reset */ - sds_common_control0 &= 0xfffffffe; - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0); + /* Bring PHY out of reset */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002); - i = 0; - while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) - && (i++ < 5000)) { - /* mdelay(1); */ - } + /* Check for heartbeat */ + counter = 0; + mdelay(690); + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + do { + if (counter++ > 150) { + nes_debug(NES_DBG_PHY, "No PHY heartbeat\n"); + break; + } + mdelay(1); + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + } while ((temp_phy_data2 == temp_phy_data)); - /* - * wait for link train done before moving on, - * or will get an interupt storm - */ - counter = 0; - do { - temp_phy_data = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + - (0x200 * (nesdev->mac_index & 1))); - if (counter++ > 1000) { - nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from link train wait <this is bad, link didnt train!!!>\n"); - break; - } - mdelay(1); - } while (((temp_phy_data & 0x0f1f0000) != 0x0f0f0000)); + /* wait for tracking */ + counter = 0; + do { + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + if (counter++ > 300) { + nes_debug(NES_DBG_PHY, "PHY did not track\n"); + break; } - } + mdelay(10); + } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70)); + + /* setup signal integrity */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063); + + /* reset serdes */ + sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + + mac_index * 0x200); + sds |= 0x1; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + + mac_index * 0x200, sds); + sds &= 0xfffffffe; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + + mac_index * 0x200, sds); + + counter = 0; + while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) + && (counter++ < 5000)) + ; } return 0; } @@ -2359,6 +2335,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) u16 temp_phy_data; u32 pcs_val = 0x0f0f0000; u32 pcs_mask = 0x0f1f0000; + u32 cdr_ctrl; spin_lock_irqsave(&nesadapter->phy_lock, flags); if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) { @@ -2473,6 +2450,7 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) break; case NES_PHY_TYPE_ARGUS: + case NES_PHY_TYPE_SFP_D: /* clear the alarms */ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); @@ -2483,19 +2461,18 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005); /* check link status */ - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1); + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - u32temp = 100; - do { - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1); - phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - if ((phy_data == temp_phy_data) || (!(--u32temp))) - break; - temp_phy_data = phy_data; - } while (1); + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); + nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 3, 0x0021); + phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + + phy_data = (!temp_phy_data && (phy_data == 0x8000)) ? 0x4 : 0x0; + nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", - __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP"); + __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); break; case NES_PHY_TYPE_PUMA_1G: @@ -2511,6 +2488,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) } if (phy_data & 0x0004) { + if (wide_ppm_offset && + (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) && + (nesadapter->hw_rev != NE020_REV)) { + cdr_ctrl = nes_read_indexed(nesdev, + NES_IDX_ETH_SERDES_CDR_CONTROL0 + + mac_index * 0x200); + nes_write_indexed(nesdev, + NES_IDX_ETH_SERDES_CDR_CONTROL0 + + mac_index * 0x200, + cdr_ctrl | 0x000F0000); + } nesadapter->mac_link_down[mac_index] = 0; list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { nes_debug(NES_DBG_PHY, "The Link is UP!!. linkup was %d\n", @@ -2525,6 +2513,17 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) } } } else { + if (wide_ppm_offset && + (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_CX4) && + (nesadapter->hw_rev != NE020_REV)) { + cdr_ctrl = nes_read_indexed(nesdev, + NES_IDX_ETH_SERDES_CDR_CONTROL0 + + mac_index * 0x200); + nes_write_indexed(nesdev, + NES_IDX_ETH_SERDES_CDR_CONTROL0 + + mac_index * 0x200, + cdr_ctrl & 0xFFF0FFFF); + } nesadapter->mac_link_down[mac_index] = 1; list_for_each_entry(nesvnic, &nesadapter->nesvnic_list[mac_index], list) { nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n", diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index f41a8710d2a..c3654c6383f 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -35,12 +35,14 @@ #include <linux/inet_lro.h> +#define NES_PHY_TYPE_CX4 1 #define NES_PHY_TYPE_1G 2 #define NES_PHY_TYPE_IRIS 3 #define NES_PHY_TYPE_ARGUS 4 #define NES_PHY_TYPE_PUMA_1G 5 #define NES_PHY_TYPE_PUMA_10G 6 #define NES_PHY_TYPE_GLADIUS 7 +#define NES_PHY_TYPE_SFP_D 8 #define NES_MULTICAST_PF_MAX 8 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index ecb1f6fd627..c6e6611d301 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1426,49 +1426,55 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_device *nesdev = nesvnic->nesdev; struct nes_adapter *nesadapter = nesdev->nesadapter; + u32 mac_index = nesdev->mac_index; + u8 phy_type = nesadapter->phy_type[mac_index]; + u8 phy_index = nesadapter->phy_index[mac_index]; u16 phy_data; et_cmd->duplex = DUPLEX_FULL; et_cmd->port = PORT_MII; + et_cmd->maxtxpkt = 511; + et_cmd->maxrxpkt = 511; if (nesadapter->OneG_Mode) { et_cmd->speed = SPEED_1000; - if (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) { + if (phy_type == NES_PHY_TYPE_PUMA_1G) { et_cmd->supported = SUPPORTED_1000baseT_Full; et_cmd->advertising = ADVERTISED_1000baseT_Full; et_cmd->autoneg = AUTONEG_DISABLE; et_cmd->transceiver = XCVR_INTERNAL; - et_cmd->phy_address = nesdev->mac_index; + et_cmd->phy_address = mac_index; } else { - et_cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg; - et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg; - nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], &phy_data); + et_cmd->supported = SUPPORTED_1000baseT_Full + | SUPPORTED_Autoneg; + et_cmd->advertising = ADVERTISED_1000baseT_Full + | ADVERTISED_Autoneg; + nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); if (phy_data & 0x1000) et_cmd->autoneg = AUTONEG_ENABLE; else et_cmd->autoneg = AUTONEG_DISABLE; et_cmd->transceiver = XCVR_EXTERNAL; - et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; + et_cmd->phy_address = phy_index; } + return 0; + } + if ((phy_type == NES_PHY_TYPE_IRIS) || + (phy_type == NES_PHY_TYPE_ARGUS) || + (phy_type == NES_PHY_TYPE_SFP_D)) { + et_cmd->transceiver = XCVR_EXTERNAL; + et_cmd->port = PORT_FIBRE; + et_cmd->supported = SUPPORTED_FIBRE; + et_cmd->advertising = ADVERTISED_FIBRE; + et_cmd->phy_address = phy_index; } else { - if ((nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) || - (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_ARGUS)) { - et_cmd->transceiver = XCVR_EXTERNAL; - et_cmd->port = PORT_FIBRE; - et_cmd->supported = SUPPORTED_FIBRE; - et_cmd->advertising = ADVERTISED_FIBRE; - et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index]; - } else { - et_cmd->transceiver = XCVR_INTERNAL; - et_cmd->supported = SUPPORTED_10000baseT_Full; - et_cmd->advertising = ADVERTISED_10000baseT_Full; - et_cmd->phy_address = nesdev->mac_index; - } - et_cmd->speed = SPEED_10000; - et_cmd->autoneg = AUTONEG_DISABLE; + et_cmd->transceiver = XCVR_INTERNAL; + et_cmd->supported = SUPPORTED_10000baseT_Full; + et_cmd->advertising = ADVERTISED_10000baseT_Full; + et_cmd->phy_address = mac_index; } - et_cmd->maxtxpkt = 511; - et_cmd->maxrxpkt = 511; + et_cmd->speed = SPEED_10000; + et_cmd->autoneg = AUTONEG_DISABLE; return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 5a76a551035..4c57f329dd5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -70,12 +70,14 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) */ if (ppriv->pkey == pkey) { result = -ENOTUNIQ; + priv = NULL; goto err; } list_for_each_entry(priv, &ppriv->child_intfs, list) { if (priv->pkey == pkey) { result = -ENOTUNIQ; + priv = NULL; goto err; } } @@ -96,7 +98,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) result = ipoib_set_dev_features(priv, ppriv->ca); if (result) - goto device_init_failed; + goto err; priv->pkey = pkey; @@ -109,7 +111,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ipoib_warn(ppriv, "failed to initialize subinterface: " "device %s, port %d", ppriv->ca->name, ppriv->port); - goto device_init_failed; + goto err; } result = register_netdevice(priv->dev); @@ -146,19 +148,19 @@ sysfs_failed: register_failed: ipoib_dev_cleanup(priv->dev); -device_init_failed: - free_netdev(priv->dev); - err: mutex_unlock(&ppriv->vlan_mutex); rtnl_unlock(); + if (priv) + free_netdev(priv->dev); + return result; } int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) { struct ipoib_dev_priv *ppriv, *priv, *tpriv; - int ret = -ENOENT; + struct net_device *dev = NULL; if (!capable(CAP_NET_ADMIN)) return -EPERM; @@ -172,14 +174,17 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) unregister_netdevice(priv->dev); ipoib_dev_cleanup(priv->dev); list_del(&priv->list); - free_netdev(priv->dev); - - ret = 0; + dev = priv->dev; break; } } mutex_unlock(&ppriv->vlan_mutex); rtnl_unlock(); - return ret; + if (dev) { + free_netdev(dev); + return 0; + } + + return -ENODEV; } diff --git a/drivers/input/input.c b/drivers/input/input.c index ec3db3ade11..d44065d2e66 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -132,6 +132,11 @@ static void input_start_autorepeat(struct input_dev *dev, int code) } } +static void input_stop_autorepeat(struct input_dev *dev) +{ + del_timer(&dev->timer); +} + #define INPUT_IGNORE_EVENT 0 #define INPUT_PASS_TO_HANDLERS 1 #define INPUT_PASS_TO_DEVICE 2 @@ -167,6 +172,8 @@ static void input_handle_event(struct input_dev *dev, __change_bit(code, dev->key); if (value) input_start_autorepeat(dev, code); + else + input_stop_autorepeat(dev); } disposition = INPUT_PASS_TO_HANDLERS; @@ -737,11 +744,11 @@ static inline void input_wakeup_procfs_readers(void) static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) { - int state = input_devices_state; - poll_wait(file, &input_devices_poll_wait, wait); - if (state != input_devices_state) + if (file->f_version != input_devices_state) { + file->f_version = input_devices_state; return POLLIN | POLLRDNORM; + } return 0; } diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 45470f18d7e..f999dc60c3b 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -229,7 +229,8 @@ struct atkbd { /* * System-specific ketymap fixup routine */ -static void (*atkbd_platform_fixup)(struct atkbd *); +static void (*atkbd_platform_fixup)(struct atkbd *, const void *data); +static void *atkbd_platform_fixup_data; static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, ssize_t (*handler)(struct atkbd *, char *)); @@ -834,87 +835,64 @@ static void atkbd_disconnect(struct serio *serio) } /* - * Most special keys (Fn+F?) on Dell laptops do not generate release - * events so we have to do it ourselves. + * generate release events for the keycodes given in data */ -static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd) +static void atkbd_apply_forced_release_keylist(struct atkbd* atkbd, + const void *data) { - static const unsigned int forced_release_keys[] = { - 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, - }; - int i; + const unsigned int *keys = data; + unsigned int i; if (atkbd->set == 2) - for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) - __set_bit(forced_release_keys[i], - atkbd->force_release_mask); + for (i = 0; keys[i] != -1U; i++) + __set_bit(keys[i], atkbd->force_release_mask); } /* + * Most special keys (Fn+F?) on Dell laptops do not generate release + * events so we have to do it ourselves. + */ +static unsigned int atkbd_dell_laptop_forced_release_keys[] = { + 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93, -1U +}; + +/* * Perform fixup for HP system that doesn't generate release * for its video switch */ -static void atkbd_hp_keymap_fixup(struct atkbd *atkbd) -{ - static const unsigned int forced_release_keys[] = { - 0x94, - }; - int i; - - if (atkbd->set == 2) - for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) - __set_bit(forced_release_keys[i], - atkbd->force_release_mask); -} +static unsigned int atkbd_hp_forced_release_keys[] = { + 0x94, -1U +}; /* * Inventec system with broken key release on volume keys */ -static void atkbd_inventec_keymap_fixup(struct atkbd *atkbd) -{ - const unsigned int forced_release_keys[] = { - 0xae, 0xb0, - }; - int i; - - if (atkbd->set == 2) - for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) - __set_bit(forced_release_keys[i], - atkbd->force_release_mask); -} +static unsigned int atkbd_inventec_forced_release_keys[] = { + 0xae, 0xb0, -1U +}; /* * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release * for its volume buttons */ -static void atkbd_hp_zv6100_keymap_fixup(struct atkbd *atkbd) -{ - const unsigned int forced_release_keys[] = { - 0xae, 0xb0, - }; - int i; - - if (atkbd->set == 2) - for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) - __set_bit(forced_release_keys[i], - atkbd->force_release_mask); -} +static unsigned int atkbd_hp_zv6100_forced_release_keys[] = { + 0xae, 0xb0, -1U +}; /* * Samsung NC10 with Fn+F? key release not working */ -static void atkbd_samsung_keymap_fixup(struct atkbd *atkbd) -{ - const unsigned int forced_release_keys[] = { - 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, - }; - int i; +static unsigned int atkbd_samsung_forced_release_keys[] = { + 0x82, 0x83, 0x84, 0x86, 0x88, 0x89, 0xb3, 0xf7, 0xf9, -1U +}; - if (atkbd->set == 2) - for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++) - __set_bit(forced_release_keys[i], - atkbd->force_release_mask); -} +/* + * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop + * do not generate release events so we have to do it ourselves. + */ +static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = { + 0xb0, 0xae, -1U +}; /* * atkbd_set_keycode_table() initializes keyboard's keycode table @@ -967,7 +945,7 @@ static void atkbd_set_keycode_table(struct atkbd *atkbd) * Perform additional fixups */ if (atkbd_platform_fixup) - atkbd_platform_fixup(atkbd); + atkbd_platform_fixup(atkbd, atkbd_platform_fixup_data); } /* @@ -1492,9 +1470,11 @@ static ssize_t atkbd_show_err_count(struct atkbd *atkbd, char *buf) return sprintf(buf, "%lu\n", atkbd->err_count); } -static int __init atkbd_setup_fixup(const struct dmi_system_id *id) +static int __init atkbd_setup_forced_release(const struct dmi_system_id *id) { - atkbd_platform_fixup = id->driver_data; + atkbd_platform_fixup = atkbd_apply_forced_release_keylist; + atkbd_platform_fixup_data = id->driver_data; + return 0; } @@ -1505,8 +1485,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ }, - .callback = atkbd_setup_fixup, - .driver_data = atkbd_dell_laptop_keymap_fixup, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_dell_laptop_forced_release_keys, }, { .ident = "Dell Laptop", @@ -1514,8 +1494,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), DMI_MATCH(DMI_CHASSIS_TYPE, "8"), /* Portable */ }, - .callback = atkbd_setup_fixup, - .driver_data = atkbd_dell_laptop_keymap_fixup, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_dell_laptop_forced_release_keys, }, { .ident = "HP 2133", @@ -1523,8 +1503,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "HP 2133"), }, - .callback = atkbd_setup_fixup, - .driver_data = atkbd_hp_keymap_fixup, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_hp_forced_release_keys, }, { .ident = "HP Pavilion ZV6100", @@ -1532,8 +1512,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), }, - .callback = atkbd_setup_fixup, - .driver_data = atkbd_hp_zv6100_keymap_fixup, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_hp_zv6100_forced_release_keys, }, { .ident = "Inventec Symphony", @@ -1541,8 +1521,8 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), }, - .callback = atkbd_setup_fixup, - .driver_data = atkbd_inventec_keymap_fixup, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_inventec_forced_release_keys, }, { .ident = "Samsung NC10", @@ -1550,8 +1530,17 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), DMI_MATCH(DMI_PRODUCT_NAME, "NC10"), }, - .callback = atkbd_setup_fixup, - .driver_data = atkbd_samsung_keymap_fixup, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_samsung_forced_release_keys, + }, + { + .ident = "Fujitsu Amilo PA 1510", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), + DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"), + }, + .callback = atkbd_setup_forced_release, + .driver_data = atkbd_amilo_pa1510_forced_release_keys, }, { } }; diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c index ee855c5202e..e94b7d735ac 100644 --- a/drivers/input/keyboard/bf54x-keys.c +++ b/drivers/input/keyboard/bf54x-keys.c @@ -211,8 +211,8 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev) if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT || !pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) { - printk(KERN_ERR DRV_NAME - ": Invalid Debounce/Columdrive Time from pdata\n"); + printk(KERN_WARNING DRV_NAME + ": Invalid Debounce/Columndrive Time in platform data\n"); bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */ } else { bfin_write_KPAD_MSEL( diff --git a/drivers/input/keyboard/hilkbd.c b/drivers/input/keyboard/hilkbd.c index aacf71f3cd4..e9d639ec283 100644 --- a/drivers/input/keyboard/hilkbd.c +++ b/drivers/input/keyboard/hilkbd.c @@ -198,45 +198,28 @@ static void hil_do(unsigned char cmd, unsigned char *data, unsigned int len) } -/* initialise HIL */ -static int __init -hil_keyb_init(void) +/* initialize HIL */ +static int __devinit hil_keyb_init(void) { unsigned char c; unsigned int i, kbid; wait_queue_head_t hil_wait; int err; - if (hil_dev.dev) { + if (hil_dev.dev) return -ENODEV; /* already initialized */ - } + init_waitqueue_head(&hil_wait); spin_lock_init(&hil_dev.lock); + hil_dev.dev = input_allocate_device(); if (!hil_dev.dev) return -ENOMEM; -#if defined(CONFIG_HP300) - if (!MACH_IS_HP300) { - err = -ENODEV; - goto err1; - } - if (!hwreg_present((void *)(HILBASE + HIL_DATA))) { - printk(KERN_ERR "HIL: hardware register was not found\n"); - err = -ENODEV; - goto err1; - } - if (!request_region(HILBASE + HIL_DATA, 2, "hil")) { - printk(KERN_ERR "HIL: IOPORT region already used\n"); - err = -EIO; - goto err1; - } -#endif - err = request_irq(HIL_IRQ, hil_interrupt, 0, "hil", hil_dev.dev_id); if (err) { printk(KERN_ERR "HIL: Can't get IRQ\n"); - goto err2; + goto err1; } /* Turn on interrupts */ @@ -246,11 +229,9 @@ hil_keyb_init(void) hil_dev.valid = 0; /* clear any pending data */ hil_do(HIL_READKBDSADR, NULL, 0); - init_waitqueue_head(&hil_wait); - wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3*HZ); - if (!hil_dev.valid) { + wait_event_interruptible_timeout(hil_wait, hil_dev.valid, 3 * HZ); + if (!hil_dev.valid) printk(KERN_WARNING "HIL: timed out, assuming no keyboard present\n"); - } c = hil_dev.c; hil_dev.valid = 0; @@ -268,7 +249,7 @@ hil_keyb_init(void) for (i = 0; i < HIL_KEYCODES_SET1_TBLSIZE; i++) if (hphilkeyb_keycode[i] != KEY_RESERVED) - set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit); + __set_bit(hphilkeyb_keycode[i], hil_dev.dev->keybit); hil_dev.dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); hil_dev.dev->ledbit[0] = BIT_MASK(LED_NUML) | BIT_MASK(LED_CAPSL) | @@ -287,34 +268,45 @@ hil_keyb_init(void) err = input_register_device(hil_dev.dev); if (err) { printk(KERN_ERR "HIL: Can't register device\n"); - goto err3; + goto err2; } + printk(KERN_INFO "input: %s, ID %d at 0x%08lx (irq %d) found and attached\n", hil_dev.dev->name, kbid, HILBASE, HIL_IRQ); return 0; -err3: +err2: hil_do(HIL_INTOFF, NULL, 0); - disable_irq(HIL_IRQ); free_irq(HIL_IRQ, hil_dev.dev_id); -err2: -#if defined(CONFIG_HP300) - release_region(HILBASE + HIL_DATA, 2); err1: -#endif input_free_device(hil_dev.dev); hil_dev.dev = NULL; return err; } +static void __devexit hil_keyb_exit(void) +{ + if (HIL_IRQ) + free_irq(HIL_IRQ, hil_dev.dev_id); + + /* Turn off interrupts */ + hil_do(HIL_INTOFF, NULL, 0); + + input_unregister_device(hil_dev.dev); + hil_dev.dev = NULL; +} #if defined(CONFIG_PARISC) -static int __init -hil_init_chip(struct parisc_device *dev) +static int __devinit hil_probe_chip(struct parisc_device *dev) { + /* Only allow one HIL keyboard */ + if (hil_dev.dev) + return -ENODEV; + if (!dev->irq) { - printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%08lx\n", dev->hpa.start); + printk(KERN_WARNING "HIL: IRQ not found for HIL bus at 0x%p\n", + (void *)dev->hpa.start); return -ENODEV; } @@ -327,51 +319,79 @@ hil_init_chip(struct parisc_device *dev) return hil_keyb_init(); } +static int __devexit hil_remove_chip(struct parisc_device *dev) +{ + hil_keyb_exit(); + + return 0; +} + static struct parisc_device_id hil_tbl[] = { { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00073 }, { 0, } }; +#if 0 +/* Disabled to avoid conflicts with the HP SDC HIL drivers */ MODULE_DEVICE_TABLE(parisc, hil_tbl); +#endif static struct parisc_driver hil_driver = { - .name = "hil", - .id_table = hil_tbl, - .probe = hil_init_chip, + .name = "hil", + .id_table = hil_tbl, + .probe = hil_probe_chip, + .remove = __devexit_p(hil_remove_chip), }; -#endif /* CONFIG_PARISC */ - static int __init hil_init(void) { -#if defined(CONFIG_PARISC) return register_parisc_driver(&hil_driver); -#else - return hil_keyb_init(); -#endif } - static void __exit hil_exit(void) { - if (HIL_IRQ) { - disable_irq(HIL_IRQ); - free_irq(HIL_IRQ, hil_dev.dev_id); + unregister_parisc_driver(&hil_driver); +} + +#else /* !CONFIG_PARISC */ + +static int __init hil_init(void) +{ + int error; + + /* Only allow one HIL keyboard */ + if (hil_dev.dev) + return -EBUSY; + + if (!MACH_IS_HP300) + return -ENODEV; + + if (!hwreg_present((void *)(HILBASE + HIL_DATA))) { + printk(KERN_ERR "HIL: hardware register was not found\n"); + return -ENODEV; } - /* Turn off interrupts */ - hil_do(HIL_INTOFF, NULL, 0); + if (!request_region(HILBASE + HIL_DATA, 2, "hil")) { + printk(KERN_ERR "HIL: IOPORT region already used\n"); + return -EIO; + } - input_unregister_device(hil_dev.dev); + error = hil_keyb_init(); + if (error) { + release_region(HILBASE + HIL_DATA, 2); + return error; + } - hil_dev.dev = NULL; + return 0; +} -#if defined(CONFIG_PARISC) - unregister_parisc_driver(&hil_driver); -#else - release_region(HILBASE+HIL_DATA, 2); -#endif +static void __exit hil_exit(void) +{ + hil_keyb_exit(); + release_region(HILBASE + HIL_DATA, 2); } +#endif /* CONFIG_PARISC */ + module_init(hil_init); module_exit(hil_exit); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 67e5553f699..203abac1e23 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -227,4 +227,27 @@ config INPUT_PCF50633_PMU Say Y to include support for delivering PMU events via input layer on NXP PCF50633. +config INPUT_GPIO_ROTARY_ENCODER + tristate "Rotary encoders connected to GPIO pins" + depends on GPIOLIB && GENERIC_GPIO + help + Say Y here to add support for rotary encoders connected to GPIO lines. + Check file:Documentation/incput/rotary_encoder.txt for more + information. + + To compile this driver as a module, choose M here: the + module will be called rotary_encoder. + +config INPUT_RB532_BUTTON + tristate "Mikrotik Routerboard 532 button interface" + depends on MIKROTIK_RB532 + depends on GPIOLIB && GENERIC_GPIO + select INPUT_POLLDEV + help + Say Y here if you want support for the S1 button built into + Mikrotik's Routerboard 532. + + To compile this driver as a module, choose M here: the + module will be called rb532_button. + endif diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index bb62e6efacf..eb3f407baed 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -4,21 +4,23 @@ # Each configuration option enables a list of files. -obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o -obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o -obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o -obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o -obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o -obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o -obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o +obj-$(CONFIG_INPUT_APANEL) += apanel.o obj-$(CONFIG_INPUT_ATI_REMOTE) += ati_remote.o obj-$(CONFIG_INPUT_ATI_REMOTE2) += ati_remote2.o -obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o -obj-$(CONFIG_INPUT_POWERMATE) += powermate.o -obj-$(CONFIG_INPUT_YEALINK) += yealink.o +obj-$(CONFIG_INPUT_ATLAS_BTNS) += atlas_btns.o obj-$(CONFIG_INPUT_CM109) += cm109.o +obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o -obj-$(CONFIG_INPUT_UINPUT) += uinput.o -obj-$(CONFIG_INPUT_APANEL) += apanel.o -obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o +obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o +obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o +obj-$(CONFIG_INPUT_M68K_BEEP) += m68kspkr.o obj-$(CONFIG_INPUT_PCF50633_PMU) += pcf50633-input.o +obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o +obj-$(CONFIG_INPUT_POWERMATE) += powermate.o +obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o +obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o +obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o +obj-$(CONFIG_INPUT_SPARCSPKR) += sparcspkr.o +obj-$(CONFIG_INPUT_UINPUT) += uinput.o +obj-$(CONFIG_INPUT_WISTRON_BTNS) += wistron_btns.o +obj-$(CONFIG_INPUT_YEALINK) += yealink.o diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index 3c9988dc0e9..922c0514158 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c @@ -31,12 +31,73 @@ MODULE_LICENSE("GPL"); * newly configured "channel". */ -static unsigned int channel_mask = 0xFFFF; -module_param(channel_mask, uint, 0644); +enum { + ATI_REMOTE2_MAX_CHANNEL_MASK = 0xFFFF, + ATI_REMOTE2_MAX_MODE_MASK = 0x1F, +}; + +static int ati_remote2_set_mask(const char *val, + struct kernel_param *kp, unsigned int max) +{ + unsigned long mask; + int ret; + + if (!val) + return -EINVAL; + + ret = strict_strtoul(val, 0, &mask); + if (ret) + return ret; + + if (mask & ~max) + return -EINVAL; + + *(unsigned int *)kp->arg = mask; + + return 0; +} + +static int ati_remote2_set_channel_mask(const char *val, + struct kernel_param *kp) +{ + pr_debug("%s()\n", __func__); + + return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_CHANNEL_MASK); +} + +static int ati_remote2_get_channel_mask(char *buffer, struct kernel_param *kp) +{ + pr_debug("%s()\n", __func__); + + return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg); +} + +static int ati_remote2_set_mode_mask(const char *val, struct kernel_param *kp) +{ + pr_debug("%s()\n", __func__); + + return ati_remote2_set_mask(val, kp, ATI_REMOTE2_MAX_MODE_MASK); +} + +static int ati_remote2_get_mode_mask(char *buffer, struct kernel_param *kp) +{ + pr_debug("%s()\n", __func__); + + return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg); +} + +static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK; +#define param_check_channel_mask(name, p) __param_check(name, p, unsigned int) +#define param_set_channel_mask ati_remote2_set_channel_mask +#define param_get_channel_mask ati_remote2_get_channel_mask +module_param(channel_mask, channel_mask, 0644); MODULE_PARM_DESC(channel_mask, "Bitmask of channels to accept <15:Channel16>...<1:Channel2><0:Channel1>"); -static unsigned int mode_mask = 0x1F; -module_param(mode_mask, uint, 0644); +static unsigned int mode_mask = ATI_REMOTE2_MAX_MODE_MASK; +#define param_check_mode_mask(name, p) __param_check(name, p, unsigned int) +#define param_set_mode_mask ati_remote2_set_mode_mask +#define param_get_mode_mask ati_remote2_get_mode_mask +module_param(mode_mask, mode_mask, 0644); MODULE_PARM_DESC(mode_mask, "Bitmask of modes to accept <4:PC><3:AUX4><2:AUX3><1:AUX2><0:AUX1>"); static struct usb_device_id ati_remote2_id_table[] = { @@ -133,12 +194,18 @@ struct ati_remote2 { u16 keycode[ATI_REMOTE2_MODES][ARRAY_SIZE(ati_remote2_key_table)]; unsigned int flags; + + unsigned int channel_mask; + unsigned int mode_mask; }; static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id); static void ati_remote2_disconnect(struct usb_interface *interface); static int ati_remote2_suspend(struct usb_interface *interface, pm_message_t message); static int ati_remote2_resume(struct usb_interface *interface); +static int ati_remote2_reset_resume(struct usb_interface *interface); +static int ati_remote2_pre_reset(struct usb_interface *interface); +static int ati_remote2_post_reset(struct usb_interface *interface); static struct usb_driver ati_remote2_driver = { .name = "ati_remote2", @@ -147,6 +214,9 @@ static struct usb_driver ati_remote2_driver = { .id_table = ati_remote2_id_table, .suspend = ati_remote2_suspend, .resume = ati_remote2_resume, + .reset_resume = ati_remote2_reset_resume, + .pre_reset = ati_remote2_pre_reset, + .post_reset = ati_remote2_post_reset, .supports_autosuspend = 1, }; @@ -238,7 +308,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2) channel = data[0] >> 4; - if (!((1 << channel) & channel_mask)) + if (!((1 << channel) & ar2->channel_mask)) return; mode = data[0] & 0x0F; @@ -250,7 +320,7 @@ static void ati_remote2_input_mouse(struct ati_remote2 *ar2) return; } - if (!((1 << mode) & mode_mask)) + if (!((1 << mode) & ar2->mode_mask)) return; input_event(idev, EV_REL, REL_X, (s8) data[1]); @@ -277,7 +347,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2) channel = data[0] >> 4; - if (!((1 << channel) & channel_mask)) + if (!((1 << channel) & ar2->channel_mask)) return; mode = data[0] & 0x0F; @@ -305,7 +375,7 @@ static void ati_remote2_input_key(struct ati_remote2 *ar2) ar2->mode = mode; } - if (!((1 << mode) & mode_mask)) + if (!((1 << mode) & ar2->mode_mask)) return; index = ati_remote2_lookup(hw_code); @@ -410,7 +480,7 @@ static int ati_remote2_getkeycode(struct input_dev *idev, int index, mode; mode = scancode >> 8; - if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask)) + if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask)) return -EINVAL; index = ati_remote2_lookup(scancode & 0xFF); @@ -427,7 +497,7 @@ static int ati_remote2_setkeycode(struct input_dev *idev, int scancode, int keyc int index, mode, old_keycode; mode = scancode >> 8; - if (mode > ATI_REMOTE2_PC || !((1 << mode) & mode_mask)) + if (mode > ATI_REMOTE2_PC || !((1 << mode) & ar2->mode_mask)) return -EINVAL; index = ati_remote2_lookup(scancode & 0xFF); @@ -550,7 +620,7 @@ static void ati_remote2_urb_cleanup(struct ati_remote2 *ar2) } } -static int ati_remote2_setup(struct ati_remote2 *ar2) +static int ati_remote2_setup(struct ati_remote2 *ar2, unsigned int ch_mask) { int r, i, channel; @@ -565,8 +635,8 @@ static int ati_remote2_setup(struct ati_remote2 *ar2) channel = 0; for (i = 0; i < 16; i++) { - if ((1 << i) & channel_mask) { - if (!(~(1 << i) & 0xFFFF & channel_mask)) + if ((1 << i) & ch_mask) { + if (!(~(1 << i) & ch_mask)) channel = i + 1; break; } @@ -585,6 +655,99 @@ static int ati_remote2_setup(struct ati_remote2 *ar2) return 0; } +static ssize_t ati_remote2_show_channel_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_device *udev = to_usb_device(dev); + struct usb_interface *intf = usb_ifnum_to_if(udev, 0); + struct ati_remote2 *ar2 = usb_get_intfdata(intf); + + return sprintf(buf, "0x%04x\n", ar2->channel_mask); +} + +static ssize_t ati_remote2_store_channel_mask(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_device *udev = to_usb_device(dev); + struct usb_interface *intf = usb_ifnum_to_if(udev, 0); + struct ati_remote2 *ar2 = usb_get_intfdata(intf); + unsigned long mask; + int r; + + if (strict_strtoul(buf, 0, &mask)) + return -EINVAL; + + if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK) + return -EINVAL; + + r = usb_autopm_get_interface(ar2->intf[0]); + if (r) { + dev_err(&ar2->intf[0]->dev, + "%s(): usb_autopm_get_interface() = %d\n", __func__, r); + return r; + } + + mutex_lock(&ati_remote2_mutex); + + if (mask != ar2->channel_mask && !ati_remote2_setup(ar2, mask)) + ar2->channel_mask = mask; + + mutex_unlock(&ati_remote2_mutex); + + usb_autopm_put_interface(ar2->intf[0]); + + return count; +} + +static ssize_t ati_remote2_show_mode_mask(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct usb_device *udev = to_usb_device(dev); + struct usb_interface *intf = usb_ifnum_to_if(udev, 0); + struct ati_remote2 *ar2 = usb_get_intfdata(intf); + + return sprintf(buf, "0x%02x\n", ar2->mode_mask); +} + +static ssize_t ati_remote2_store_mode_mask(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_device *udev = to_usb_device(dev); + struct usb_interface *intf = usb_ifnum_to_if(udev, 0); + struct ati_remote2 *ar2 = usb_get_intfdata(intf); + unsigned long mask; + + if (strict_strtoul(buf, 0, &mask)) + return -EINVAL; + + if (mask & ~ATI_REMOTE2_MAX_MODE_MASK) + return -EINVAL; + + ar2->mode_mask = mask; + + return count; +} + +static DEVICE_ATTR(channel_mask, 0644, ati_remote2_show_channel_mask, + ati_remote2_store_channel_mask); + +static DEVICE_ATTR(mode_mask, 0644, ati_remote2_show_mode_mask, + ati_remote2_store_mode_mask); + +static struct attribute *ati_remote2_attrs[] = { + &dev_attr_channel_mask.attr, + &dev_attr_mode_mask.attr, + NULL, +}; + +static struct attribute_group ati_remote2_attr_group = { + .attrs = ati_remote2_attrs, +}; + static int ati_remote2_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); @@ -615,7 +778,10 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d if (r) goto fail2; - r = ati_remote2_setup(ar2); + ar2->channel_mask = channel_mask; + ar2->mode_mask = mode_mask; + + r = ati_remote2_setup(ar2, ar2->channel_mask); if (r) goto fail2; @@ -624,19 +790,24 @@ static int ati_remote2_probe(struct usb_interface *interface, const struct usb_d strlcat(ar2->name, "ATI Remote Wonder II", sizeof(ar2->name)); - r = ati_remote2_input_init(ar2); + r = sysfs_create_group(&udev->dev.kobj, &ati_remote2_attr_group); if (r) goto fail2; + r = ati_remote2_input_init(ar2); + if (r) + goto fail3; + usb_set_intfdata(interface, ar2); interface->needs_remote_wakeup = 1; return 0; + fail3: + sysfs_remove_group(&udev->dev.kobj, &ati_remote2_attr_group); fail2: ati_remote2_urb_cleanup(ar2); - usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); fail1: kfree(ar2); @@ -657,6 +828,8 @@ static void ati_remote2_disconnect(struct usb_interface *interface) input_unregister_device(ar2->idev); + sysfs_remove_group(&ar2->udev->dev.kobj, &ati_remote2_attr_group); + ati_remote2_urb_cleanup(ar2); usb_driver_release_interface(&ati_remote2_driver, ar2->intf[1]); @@ -715,6 +888,78 @@ static int ati_remote2_resume(struct usb_interface *interface) return r; } +static int ati_remote2_reset_resume(struct usb_interface *interface) +{ + struct ati_remote2 *ar2; + struct usb_host_interface *alt = interface->cur_altsetting; + int r = 0; + + if (alt->desc.bInterfaceNumber) + return 0; + + ar2 = usb_get_intfdata(interface); + + dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); + + mutex_lock(&ati_remote2_mutex); + + r = ati_remote2_setup(ar2, ar2->channel_mask); + if (r) + goto out; + + if (ar2->flags & ATI_REMOTE2_OPENED) + r = ati_remote2_submit_urbs(ar2); + + if (!r) + ar2->flags &= ~ATI_REMOTE2_SUSPENDED; + + out: + mutex_unlock(&ati_remote2_mutex); + + return r; +} + +static int ati_remote2_pre_reset(struct usb_interface *interface) +{ + struct ati_remote2 *ar2; + struct usb_host_interface *alt = interface->cur_altsetting; + + if (alt->desc.bInterfaceNumber) + return 0; + + ar2 = usb_get_intfdata(interface); + + dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); + + mutex_lock(&ati_remote2_mutex); + + if (ar2->flags == ATI_REMOTE2_OPENED) + ati_remote2_kill_urbs(ar2); + + return 0; +} + +static int ati_remote2_post_reset(struct usb_interface *interface) +{ + struct ati_remote2 *ar2; + struct usb_host_interface *alt = interface->cur_altsetting; + int r = 0; + + if (alt->desc.bInterfaceNumber) + return 0; + + ar2 = usb_get_intfdata(interface); + + dev_dbg(&ar2->intf[0]->dev, "%s()\n", __func__); + + if (ar2->flags == ATI_REMOTE2_OPENED) + r = ati_remote2_submit_urbs(ar2); + + mutex_unlock(&ati_remote2_mutex); + + return r; +} + static int __init ati_remote2_init(void) { int r; diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c new file mode 100644 index 00000000000..e2c7f622a0b --- /dev/null +++ b/drivers/input/misc/rb532_button.c @@ -0,0 +1,120 @@ +/* + * Support for the S1 button on Routerboard 532 + * + * Copyright (C) 2009 Phil Sutter <n0-1@freewrt.org> + */ + +#include <linux/input-polldev.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <asm/mach-rc32434/gpio.h> +#include <asm/mach-rc32434/rb.h> + +#define DRV_NAME "rb532-button" + +#define RB532_BTN_RATE 100 /* msec */ +#define RB532_BTN_KSYM BTN_0 + +/* The S1 button state is provided by GPIO pin 1. But as this + * pin is also used for uart input as alternate function, the + * operational modes must be switched first: + * 1) disable uart using set_latch_u5() + * 2) turn off alternate function implicitly through + * gpio_direction_input() + * 3) read the GPIO's current value + * 4) undo step 2 by enabling alternate function (in this + * mode the GPIO direction is fixed, so no change needed) + * 5) turn on uart again + * The GPIO value occurs to be inverted, so pin high means + * button is not pressed. + */ +static bool rb532_button_pressed(void) +{ + int val; + + set_latch_u5(0, LO_FOFF); + gpio_direction_input(GPIO_BTN_S1); + + val = gpio_get_value(GPIO_BTN_S1); + + rb532_gpio_set_func(GPIO_BTN_S1); + set_latch_u5(LO_FOFF, 0); + + return !val; +} + +static void rb532_button_poll(struct input_polled_dev *poll_dev) +{ + input_report_key(poll_dev->input, RB532_BTN_KSYM, + rb532_button_pressed()); + input_sync(poll_dev->input); +} + +static int __devinit rb532_button_probe(struct platform_device *pdev) +{ + struct input_polled_dev *poll_dev; + int error; + + poll_dev = input_allocate_polled_device(); + if (!poll_dev) + return -ENOMEM; + + poll_dev->poll = rb532_button_poll; + poll_dev->poll_interval = RB532_BTN_RATE; + + poll_dev->input->name = "rb532 button"; + poll_dev->input->phys = "rb532/button0"; + poll_dev->input->id.bustype = BUS_HOST; + poll_dev->input->dev.parent = &pdev->dev; + + dev_set_drvdata(&pdev->dev, poll_dev); + + input_set_capability(poll_dev->input, EV_KEY, RB532_BTN_KSYM); + + error = input_register_polled_device(poll_dev); + if (error) { + input_free_polled_device(poll_dev); + return error; + } + + return 0; +} + +static int __devexit rb532_button_remove(struct platform_device *pdev) +{ + struct input_polled_dev *poll_dev = dev_get_drvdata(&pdev->dev); + + input_unregister_polled_device(poll_dev); + input_free_polled_device(poll_dev); + dev_set_drvdata(&pdev->dev, NULL); + + return 0; +} + +static struct platform_driver rb532_button_driver = { + .probe = rb532_button_probe, + .remove = __devexit_p(rb532_button_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init rb532_button_init(void) +{ + return platform_driver_register(&rb532_button_driver); +} + +static void __exit rb532_button_exit(void) +{ + platform_driver_unregister(&rb532_button_driver); +} + +module_init(rb532_button_init); +module_exit(rb532_button_exit); + +MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Support for S1 button on Routerboard 532"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c new file mode 100644 index 00000000000..5bb3ab51b8c --- /dev/null +++ b/drivers/input/misc/rotary_encoder.c @@ -0,0 +1,221 @@ +/* + * rotary_encoder.c + * + * (c) 2009 Daniel Mack <daniel@caiaq.de> + * + * state machine code inspired by code from Tim Ruetz + * + * A generic driver for rotary encoders connected to GPIO lines. + * See file:Documentation/input/rotary_encoder.txt for more information + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/input.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/rotary_encoder.h> + +#define DRV_NAME "rotary-encoder" + +struct rotary_encoder { + unsigned int irq_a; + unsigned int irq_b; + unsigned int pos; + unsigned int armed; + unsigned int dir; + struct input_dev *input; + struct rotary_encoder_platform_data *pdata; +}; + +static irqreturn_t rotary_encoder_irq(int irq, void *dev_id) +{ + struct rotary_encoder *encoder = dev_id; + struct rotary_encoder_platform_data *pdata = encoder->pdata; + int a = !!gpio_get_value(pdata->gpio_a); + int b = !!gpio_get_value(pdata->gpio_b); + int state; + + a ^= pdata->inverted_a; + b ^= pdata->inverted_b; + state = (a << 1) | b; + + switch (state) { + + case 0x0: + if (!encoder->armed) + break; + + if (encoder->dir) { + /* turning counter-clockwise */ + encoder->pos += pdata->steps; + encoder->pos--; + encoder->pos %= pdata->steps; + } else { + /* turning clockwise */ + encoder->pos++; + encoder->pos %= pdata->steps; + } + + input_report_abs(encoder->input, pdata->axis, encoder->pos); + input_sync(encoder->input); + + encoder->armed = 0; + break; + + case 0x1: + case 0x2: + if (encoder->armed) + encoder->dir = state - 1; + break; + + case 0x3: + encoder->armed = 1; + break; + } + + return IRQ_HANDLED; +} + +static int __devinit rotary_encoder_probe(struct platform_device *pdev) +{ + struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data; + struct rotary_encoder *encoder; + struct input_dev *input; + int err; + + if (!pdata || !pdata->steps) { + dev_err(&pdev->dev, "invalid platform data\n"); + return -ENOENT; + } + + encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL); + input = input_allocate_device(); + if (!encoder || !input) { + dev_err(&pdev->dev, "failed to allocate memory for device\n"); + err = -ENOMEM; + goto exit_free_mem; + } + + encoder->input = input; + encoder->pdata = pdata; + encoder->irq_a = gpio_to_irq(pdata->gpio_a); + encoder->irq_b = gpio_to_irq(pdata->gpio_b); + + /* create and register the input driver */ + input->name = pdev->name; + input->id.bustype = BUS_HOST; + input->dev.parent = &pdev->dev; + input->evbit[0] = BIT_MASK(EV_ABS); + input_set_abs_params(encoder->input, + pdata->axis, 0, pdata->steps, 0, 1); + + err = input_register_device(input); + if (err) { + dev_err(&pdev->dev, "failed to register input device\n"); + goto exit_free_mem; + } + + /* request the GPIOs */ + err = gpio_request(pdata->gpio_a, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "unable to request GPIO %d\n", + pdata->gpio_a); + goto exit_unregister_input; + } + + err = gpio_request(pdata->gpio_b, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "unable to request GPIO %d\n", + pdata->gpio_b); + goto exit_free_gpio_a; + } + + /* request the IRQs */ + err = request_irq(encoder->irq_a, &rotary_encoder_irq, + IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, + DRV_NAME, encoder); + if (err) { + dev_err(&pdev->dev, "unable to request IRQ %d\n", + encoder->irq_a); + goto exit_free_gpio_b; + } + + err = request_irq(encoder->irq_b, &rotary_encoder_irq, + IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, + DRV_NAME, encoder); + if (err) { + dev_err(&pdev->dev, "unable to request IRQ %d\n", + encoder->irq_b); + goto exit_free_irq_a; + } + + platform_set_drvdata(pdev, encoder); + + return 0; + +exit_free_irq_a: + free_irq(encoder->irq_a, encoder); +exit_free_gpio_b: + gpio_free(pdata->gpio_b); +exit_free_gpio_a: + gpio_free(pdata->gpio_a); +exit_unregister_input: + input_unregister_device(input); + input = NULL; /* so we don't try to free it */ +exit_free_mem: + input_free_device(input); + kfree(encoder); + return err; +} + +static int __devexit rotary_encoder_remove(struct platform_device *pdev) +{ + struct rotary_encoder *encoder = platform_get_drvdata(pdev); + struct rotary_encoder_platform_data *pdata = pdev->dev.platform_data; + + free_irq(encoder->irq_a, encoder); + free_irq(encoder->irq_b, encoder); + gpio_free(pdata->gpio_a); + gpio_free(pdata->gpio_b); + input_unregister_device(encoder->input); + platform_set_drvdata(pdev, NULL); + kfree(encoder); + + return 0; +} + +static struct platform_driver rotary_encoder_driver = { + .probe = rotary_encoder_probe, + .remove = __devexit_p(rotary_encoder_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + } +}; + +static int __init rotary_encoder_init(void) +{ + return platform_driver_register(&rotary_encoder_driver); +} + +static void __exit rotary_encoder_exit(void) +{ + platform_driver_unregister(&rotary_encoder_driver); +} + +module_init(rotary_encoder_init); +module_exit(rotary_encoder_exit); + +MODULE_ALIAS("platform:" DRV_NAME); +MODULE_DESCRIPTION("GPIO rotary encoder driver"); +MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index 4f38e6f7dfd..c66cc3d08c2 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig @@ -292,4 +292,15 @@ config MOUSE_PXA930_TRKBALL help Say Y here to support PXA930 Trackball mouse. +config MOUSE_MAPLE + tristate "Maple mouse (for the Dreamcast)" + depends on MAPLE + help + This driver supports the Maple mouse on the SEGA Dreamcast. + + Most Dreamcast users, who have a mouse, will say Y here. + + To compile this driver as a module choose M here: the module will be + called maplemouse. + endif diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile index 8c8a1f236e2..472189468d6 100644 --- a/drivers/input/mouse/Makefile +++ b/drivers/input/mouse/Makefile @@ -6,18 +6,19 @@ obj-$(CONFIG_MOUSE_AMIGA) += amimouse.o obj-$(CONFIG_MOUSE_APPLETOUCH) += appletouch.o -obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o obj-$(CONFIG_MOUSE_ATARI) += atarimouse.o -obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o +obj-$(CONFIG_MOUSE_BCM5974) += bcm5974.o +obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o +obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o obj-$(CONFIG_MOUSE_INPORT) += inport.o obj-$(CONFIG_MOUSE_LOGIBM) += logibm.o +obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o obj-$(CONFIG_MOUSE_PS2) += psmouse.o obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o +obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o -obj-$(CONFIG_MOUSE_HIL) += hil_ptr.o obj-$(CONFIG_MOUSE_VSXXXAA) += vsxxxaa.o -obj-$(CONFIG_MOUSE_GPIO) += gpio_mouse.o psmouse-objs := psmouse-base.o synaptics.o diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c index 55cd0fa6833..a1ad2f1a7bb 100644 --- a/drivers/input/mouse/hgpk.c +++ b/drivers/input/mouse/hgpk.c @@ -472,7 +472,7 @@ static enum hgpk_model_t hgpk_get_model(struct psmouse *psmouse) return -EIO; } - hgpk_dbg(psmouse, "ID: %02x %02x %02x", param[0], param[1], param[2]); + hgpk_dbg(psmouse, "ID: %02x %02x %02x\n", param[0], param[1], param[2]); /* HGPK signature: 0x67, 0x00, 0x<model> */ if (param[0] != 0x67 || param[1] != 0x00) diff --git a/drivers/input/mouse/maplemouse.c b/drivers/input/mouse/maplemouse.c new file mode 100644 index 00000000000..d196abfb68b --- /dev/null +++ b/drivers/input/mouse/maplemouse.c @@ -0,0 +1,147 @@ +/* + * SEGA Dreamcast mouse driver + * Based on drivers/usb/usbmouse.c + * + * Copyright Yaegashi Takeshi, 2001 + * Adrian McMenamin, 2008 + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/input.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/timer.h> +#include <linux/maple.h> + +MODULE_AUTHOR("Adrian McMenamin <adrian@mcmen.demon.co.uk>"); +MODULE_DESCRIPTION("SEGA Dreamcast mouse driver"); +MODULE_LICENSE("GPL"); + +struct dc_mouse { + struct input_dev *dev; + struct maple_device *mdev; +}; + +static void dc_mouse_callback(struct mapleq *mq) +{ + int buttons, relx, rely, relz; + struct maple_device *mapledev = mq->dev; + struct dc_mouse *mse = maple_get_drvdata(mapledev); + struct input_dev *dev = mse->dev; + unsigned char *res = mq->recvbuf; + + buttons = ~res[8]; + relx = *(unsigned short *)(res + 12) - 512; + rely = *(unsigned short *)(res + 14) - 512; + relz = *(unsigned short *)(res + 16) - 512; + + input_report_key(dev, BTN_LEFT, buttons & 4); + input_report_key(dev, BTN_MIDDLE, buttons & 9); + input_report_key(dev, BTN_RIGHT, buttons & 2); + input_report_rel(dev, REL_X, relx); + input_report_rel(dev, REL_Y, rely); + input_report_rel(dev, REL_WHEEL, relz); + input_sync(dev); +} + +static int dc_mouse_open(struct input_dev *dev) +{ + struct dc_mouse *mse = dev->dev.platform_data; + + maple_getcond_callback(mse->mdev, dc_mouse_callback, HZ/50, + MAPLE_FUNC_MOUSE); + + return 0; +} + +static void dc_mouse_close(struct input_dev *dev) +{ + struct dc_mouse *mse = dev->dev.platform_data; + + maple_getcond_callback(mse->mdev, dc_mouse_callback, 0, + MAPLE_FUNC_MOUSE); +} + + +static int __devinit probe_maple_mouse(struct device *dev) +{ + struct maple_device *mdev = to_maple_dev(dev); + struct maple_driver *mdrv = to_maple_driver(dev->driver); + struct input_dev *input_dev; + struct dc_mouse *mse; + int error; + + mse = kzalloc(sizeof(struct dc_mouse), GFP_KERNEL); + input_dev = input_allocate_device(); + + if (!mse || !input_dev) { + error = -ENOMEM; + goto fail; + } + + mse->dev = input_dev; + mse->mdev = mdev; + + input_set_drvdata(input_dev, mse); + input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + input_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | + BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); + input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y) | + BIT_MASK(REL_WHEEL); + input_dev->name = mdev->product_name; + input_dev->id.bustype = BUS_HOST; + input_dev->open = dc_mouse_open; + input_dev->close = dc_mouse_close; + + mdev->driver = mdrv; + maple_set_drvdata(mdev, mse); + + error = input_register_device(input_dev); + if (error) + goto fail; + + return 0; + +fail: + input_free_device(input_dev); + maple_set_drvdata(mdev, NULL); + kfree(mse); + mdev->driver = NULL; + return error; +} + +static int __devexit remove_maple_mouse(struct device *dev) +{ + struct maple_device *mdev = to_maple_dev(dev); + struct dc_mouse *mse = maple_get_drvdata(mdev); + + mdev->callback = NULL; + input_unregister_device(mse->dev); + maple_set_drvdata(mdev, NULL); + kfree(mse); + + return 0; +} + +static struct maple_driver dc_mouse_driver = { + .function = MAPLE_FUNC_MOUSE, + .drv = { + .name = "Dreamcast_mouse", + .probe = probe_maple_mouse, + .remove = __devexit_p(remove_maple_mouse), + }, +}; + +static int __init dc_mouse_init(void) +{ + return maple_driver_register(&dc_mouse_driver); +} + +static void __exit dc_mouse_exit(void) +{ + maple_driver_unregister(&dc_mouse_driver); +} + +module_init(dc_mouse_init); +module_exit(dc_mouse_exit); diff --git a/drivers/input/mouse/pc110pad.c b/drivers/input/mouse/pc110pad.c index fd09c8df81f..f63995f854f 100644 --- a/drivers/input/mouse/pc110pad.c +++ b/drivers/input/mouse/pc110pad.c @@ -111,11 +111,8 @@ static int __init pc110pad_init(void) struct pci_dev *dev; int err; - dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL); - if (dev) { - pci_dev_put(dev); + if (!no_pci_devices()) return -ENODEV; - } if (!request_region(pc110pad_io, 4, "pc110pad")) { printk(KERN_ERR "pc110pad: I/O area %#x-%#x in use.\n", diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 6fa2deff744..83ed2d56b92 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -151,6 +151,14 @@ static struct dmi_system_id __initdata i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "01"), }, }, + { + .ident = "HP DV9700", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv9700"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index bb6486a8c07..b01fd61dadc 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -29,6 +29,51 @@ config TOUCHSCREEN_ADS7846 To compile this driver as a module, choose M here: the module will be called ads7846. +config TOUCHSCREEN_AD7877 + tristate "AD7877 based touchscreens" + depends on SPI_MASTER + help + Say Y here if you have a touchscreen interface using the + AD7877 controller, and your board-specific initialization + code includes that in its table of SPI devices. + + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called ad7877. + +config TOUCHSCREEN_AD7879_I2C + tristate "AD7879 based touchscreens: AD7879-1 I2C Interface" + depends on I2C + select TOUCHSCREEN_AD7879 + help + Say Y here if you have a touchscreen interface using the + AD7879-1 controller, and your board-specific initialization + code includes that in its table of I2C devices. + + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called ad7879. + +config TOUCHSCREEN_AD7879_SPI + tristate "AD7879 based touchscreens: AD7879 SPI Interface" + depends on SPI_MASTER && TOUCHSCREEN_AD7879_I2C = n + select TOUCHSCREEN_AD7879 + help + Say Y here if you have a touchscreen interface using the + AD7879 controller, and your board-specific initialization + code includes that in its table of SPI devices. + + If unsure, say N (but it's safe to say "Y"). + + To compile this driver as a module, choose M here: the + module will be called ad7879. + +config TOUCHSCREEN_AD7879 + tristate + default n + config TOUCHSCREEN_BITSY tristate "Compaq iPAQ H3600 (Bitsy) touchscreen" depends on SA1100_BITSY @@ -308,6 +353,19 @@ config TOUCHSCREEN_WM97XX_MAINSTONE To compile this driver as a module, choose M here: the module will be called mainstone-wm97xx. +config TOUCHSCREEN_WM97XX_ZYLONITE + tristate "Zylonite accelerated touch" + depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE + select TOUCHSCREEN_WM9713 + help + Say Y here for support for streaming mode with the touchscreen + on Zylonite systems. + + If unsure, say N. + + To compile this driver as a module, choose M here: the + module will be called zylonite-wm97xx. + config TOUCHSCREEN_USB_COMPOSITE tristate "USB Touchscreen Driver" depends on USB_ARCH_HAS_HCD diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile index d3375aff46f..6700f7b9d16 100644 --- a/drivers/input/touchscreen/Makefile +++ b/drivers/input/touchscreen/Makefile @@ -6,6 +6,8 @@ wm97xx-ts-y := wm97xx-core.o +obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o +obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o @@ -34,3 +36,4 @@ wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o +obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c new file mode 100644 index 00000000000..e4728a28f49 --- /dev/null +++ b/drivers/input/touchscreen/ad7877.c @@ -0,0 +1,844 @@ +/* + * Copyright (C) 2006-2008 Michael Hennerich, Analog Devices Inc. + * + * Description: AD7877 based touchscreen, sensor (ADCs), DAC and GPIO driver + * Based on: ads7846.c + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * History: + * Copyright (c) 2005 David Brownell + * Copyright (c) 2006 Nokia Corporation + * Various changes: Imre Deak <imre.deak@nokia.com> + * + * Using code from: + * - corgi_ts.c + * Copyright (C) 2004-2005 Richard Purdie + * - omap_ts.[hc], ads7846.h, ts_osk.c + * Copyright (C) 2002 MontaVista Software + * Copyright (C) 2004 Texas Instruments + * Copyright (C) 2005 Dirk Behme + */ + + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/spi/ad7877.h> +#include <asm/irq.h> + +#define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50) + +#define MAX_SPI_FREQ_HZ 20000000 +#define MAX_12BIT ((1<<12)-1) + +#define AD7877_REG_ZEROS 0 +#define AD7877_REG_CTRL1 1 +#define AD7877_REG_CTRL2 2 +#define AD7877_REG_ALERT 3 +#define AD7877_REG_AUX1HIGH 4 +#define AD7877_REG_AUX1LOW 5 +#define AD7877_REG_BAT1HIGH 6 +#define AD7877_REG_BAT1LOW 7 +#define AD7877_REG_BAT2HIGH 8 +#define AD7877_REG_BAT2LOW 9 +#define AD7877_REG_TEMP1HIGH 10 +#define AD7877_REG_TEMP1LOW 11 +#define AD7877_REG_SEQ0 12 +#define AD7877_REG_SEQ1 13 +#define AD7877_REG_DAC 14 +#define AD7877_REG_NONE1 15 +#define AD7877_REG_EXTWRITE 15 +#define AD7877_REG_XPLUS 16 +#define AD7877_REG_YPLUS 17 +#define AD7877_REG_Z2 18 +#define AD7877_REG_aux1 19 +#define AD7877_REG_aux2 20 +#define AD7877_REG_aux3 21 +#define AD7877_REG_bat1 22 +#define AD7877_REG_bat2 23 +#define AD7877_REG_temp1 24 +#define AD7877_REG_temp2 25 +#define AD7877_REG_Z1 26 +#define AD7877_REG_GPIOCTRL1 27 +#define AD7877_REG_GPIOCTRL2 28 +#define AD7877_REG_GPIODATA 29 +#define AD7877_REG_NONE2 30 +#define AD7877_REG_NONE3 31 + +#define AD7877_SEQ_YPLUS_BIT (1<<11) +#define AD7877_SEQ_XPLUS_BIT (1<<10) +#define AD7877_SEQ_Z2_BIT (1<<9) +#define AD7877_SEQ_AUX1_BIT (1<<8) +#define AD7877_SEQ_AUX2_BIT (1<<7) +#define AD7877_SEQ_AUX3_BIT (1<<6) +#define AD7877_SEQ_BAT1_BIT (1<<5) +#define AD7877_SEQ_BAT2_BIT (1<<4) +#define AD7877_SEQ_TEMP1_BIT (1<<3) +#define AD7877_SEQ_TEMP2_BIT (1<<2) +#define AD7877_SEQ_Z1_BIT (1<<1) + +enum { + AD7877_SEQ_YPOS = 0, + AD7877_SEQ_XPOS = 1, + AD7877_SEQ_Z2 = 2, + AD7877_SEQ_AUX1 = 3, + AD7877_SEQ_AUX2 = 4, + AD7877_SEQ_AUX3 = 5, + AD7877_SEQ_BAT1 = 6, + AD7877_SEQ_BAT2 = 7, + AD7877_SEQ_TEMP1 = 8, + AD7877_SEQ_TEMP2 = 9, + AD7877_SEQ_Z1 = 10, + AD7877_NR_SENSE = 11, +}; + +/* DAC Register Default RANGE 0 to Vcc, Volatge Mode, DAC On */ +#define AD7877_DAC_CONF 0x1 + +/* If gpio3 is set AUX3/GPIO3 acts as GPIO Output */ +#define AD7877_EXTW_GPIO_3_CONF 0x1C4 +#define AD7877_EXTW_GPIO_DATA 0x200 + +/* Control REG 2 */ +#define AD7877_TMR(x) ((x & 0x3) << 0) +#define AD7877_REF(x) ((x & 0x1) << 2) +#define AD7877_POL(x) ((x & 0x1) << 3) +#define AD7877_FCD(x) ((x & 0x3) << 4) +#define AD7877_PM(x) ((x & 0x3) << 6) +#define AD7877_ACQ(x) ((x & 0x3) << 8) +#define AD7877_AVG(x) ((x & 0x3) << 10) + +/* Control REG 1 */ +#define AD7877_SER (1 << 11) /* non-differential */ +#define AD7877_DFR (0 << 11) /* differential */ + +#define AD7877_MODE_NOC (0) /* Do not convert */ +#define AD7877_MODE_SCC (1) /* Single channel conversion */ +#define AD7877_MODE_SEQ0 (2) /* Sequence 0 in Slave Mode */ +#define AD7877_MODE_SEQ1 (3) /* Sequence 1 in Master Mode */ + +#define AD7877_CHANADD(x) ((x&0xF)<<7) +#define AD7877_READADD(x) ((x)<<2) +#define AD7877_WRITEADD(x) ((x)<<12) + +#define AD7877_READ_CHAN(x) (AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_SER | \ + AD7877_MODE_SCC | AD7877_CHANADD(AD7877_REG_ ## x) | \ + AD7877_READADD(AD7877_REG_ ## x)) + +#define AD7877_MM_SEQUENCE (AD7877_SEQ_YPLUS_BIT | AD7877_SEQ_XPLUS_BIT | \ + AD7877_SEQ_Z2_BIT | AD7877_SEQ_Z1_BIT) + +/* + * Non-touchscreen sensors only use single-ended conversions. + */ + +struct ser_req { + u16 reset; + u16 ref_on; + u16 command; + u16 sample; + struct spi_message msg; + struct spi_transfer xfer[6]; +}; + +struct ad7877 { + struct input_dev *input; + char phys[32]; + + struct spi_device *spi; + u16 model; + u16 vref_delay_usecs; + u16 x_plate_ohms; + u16 pressure_max; + + u16 cmd_crtl1; + u16 cmd_crtl2; + u16 cmd_dummy; + u16 dac; + + u8 stopacq_polarity; + u8 first_conversion_delay; + u8 acquisition_time; + u8 averaging; + u8 pen_down_acc_interval; + + u16 conversion_data[AD7877_NR_SENSE]; + + struct spi_transfer xfer[AD7877_NR_SENSE + 2]; + struct spi_message msg; + + struct mutex mutex; + unsigned disabled:1; /* P: mutex */ + unsigned gpio3:1; /* P: mutex */ + unsigned gpio4:1; /* P: mutex */ + + spinlock_t lock; + struct timer_list timer; /* P: lock */ + unsigned pending:1; /* P: lock */ +}; + +static int gpio3; +module_param(gpio3, int, 0); +MODULE_PARM_DESC(gpio3, "If gpio3 is set to 1 AUX3 acts as GPIO3"); + +/* + * ad7877_read/write are only used for initial setup and for sysfs controls. + * The main traffic is done using spi_async() in the interrupt handler. + */ + +static int ad7877_read(struct spi_device *spi, u16 reg) +{ + struct ser_req *req; + int status, ret; + + req = kzalloc(sizeof *req, GFP_KERNEL); + if (!req) + return -ENOMEM; + + spi_message_init(&req->msg); + + req->command = (u16) (AD7877_WRITEADD(AD7877_REG_CTRL1) | + AD7877_READADD(reg)); + req->xfer[0].tx_buf = &req->command; + req->xfer[0].len = 2; + + req->xfer[1].rx_buf = &req->sample; + req->xfer[1].len = 2; + + spi_message_add_tail(&req->xfer[0], &req->msg); + spi_message_add_tail(&req->xfer[1], &req->msg); + + status = spi_sync(spi, &req->msg); + ret = status ? : req->sample; + + kfree(req); + + return ret; +} + +static int ad7877_write(struct spi_device *spi, u16 reg, u16 val) +{ + struct ser_req *req; + int status; + + req = kzalloc(sizeof *req, GFP_KERNEL); + if (!req) + return -ENOMEM; + + spi_message_init(&req->msg); + + req->command = (u16) (AD7877_WRITEADD(reg) | (val & MAX_12BIT)); + req->xfer[0].tx_buf = &req->command; + req->xfer[0].len = 2; + + spi_message_add_tail(&req->xfer[0], &req->msg); + + status = spi_sync(spi, &req->msg); + + kfree(req); + + return status; +} + +static int ad7877_read_adc(struct spi_device *spi, unsigned command) +{ + struct ad7877 *ts = dev_get_drvdata(&spi->dev); + struct ser_req *req; + int status; + int sample; + int i; + + req = kzalloc(sizeof *req, GFP_KERNEL); + if (!req) + return -ENOMEM; + + spi_message_init(&req->msg); + + /* activate reference, so it has time to settle; */ + req->ref_on = AD7877_WRITEADD(AD7877_REG_CTRL2) | + AD7877_POL(ts->stopacq_polarity) | + AD7877_AVG(0) | AD7877_PM(2) | AD7877_TMR(0) | + AD7877_ACQ(ts->acquisition_time) | AD7877_FCD(0); + + req->reset = AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_MODE_NOC; + + req->command = (u16) command; + + req->xfer[0].tx_buf = &req->reset; + req->xfer[0].len = 2; + + req->xfer[1].tx_buf = &req->ref_on; + req->xfer[1].len = 2; + req->xfer[1].delay_usecs = ts->vref_delay_usecs; + + req->xfer[2].tx_buf = &req->command; + req->xfer[2].len = 2; + req->xfer[2].delay_usecs = ts->vref_delay_usecs; + + req->xfer[3].rx_buf = &req->sample; + req->xfer[3].len = 2; + + req->xfer[4].tx_buf = &ts->cmd_crtl2; /*REF OFF*/ + req->xfer[4].len = 2; + + req->xfer[5].tx_buf = &ts->cmd_crtl1; /*DEFAULT*/ + req->xfer[5].len = 2; + + /* group all the transfers together, so we can't interfere with + * reading touchscreen state; disable penirq while sampling + */ + for (i = 0; i < 6; i++) + spi_message_add_tail(&req->xfer[i], &req->msg); + + status = spi_sync(spi, &req->msg); + sample = req->sample; + + kfree(req); + + return status ? : sample; +} + +static void ad7877_rx(struct ad7877 *ts) +{ + struct input_dev *input_dev = ts->input; + unsigned Rt; + u16 x, y, z1, z2; + + x = ts->conversion_data[AD7877_SEQ_XPOS] & MAX_12BIT; + y = ts->conversion_data[AD7877_SEQ_YPOS] & MAX_12BIT; + z1 = ts->conversion_data[AD7877_SEQ_Z1] & MAX_12BIT; + z2 = ts->conversion_data[AD7877_SEQ_Z2] & MAX_12BIT; + + /* + * The samples processed here are already preprocessed by the AD7877. + * The preprocessing function consists of an averaging filter. + * The combination of 'first conversion delay' and averaging provides a robust solution, + * discarding the spurious noise in the signal and keeping only the data of interest. + * The size of the averaging filter is programmable. (dev.platform_data, see linux/spi/ad7877.h) + * Other user-programmable conversion controls include variable acquisition time, + * and first conversion delay. Up to 16 averages can be taken per conversion. + */ + + if (likely(x && z1)) { + /* compute touch pressure resistance using equation #1 */ + Rt = (z2 - z1) * x * ts->x_plate_ohms; + Rt /= z1; + Rt = (Rt + 2047) >> 12; + + input_report_abs(input_dev, ABS_X, x); + input_report_abs(input_dev, ABS_Y, y); + input_report_abs(input_dev, ABS_PRESSURE, Rt); + input_sync(input_dev); + } +} + +static inline void ad7877_ts_event_release(struct ad7877 *ts) +{ + struct input_dev *input_dev = ts->input; + + input_report_abs(input_dev, ABS_PRESSURE, 0); + input_sync(input_dev); +} + +static void ad7877_timer(unsigned long handle) +{ + struct ad7877 *ts = (void *)handle; + + ad7877_ts_event_release(ts); +} + +static irqreturn_t ad7877_irq(int irq, void *handle) +{ + struct ad7877 *ts = handle; + unsigned long flags; + int status; + + /* + * The repeated conversion sequencer controlled by TMR kicked off + * too fast. We ignore the last and process the sample sequence + * currently in the queue. It can't be older than 9.4ms, and we + * need to avoid that ts->msg doesn't get issued twice while in work. + */ + + spin_lock_irqsave(&ts->lock, flags); + if (!ts->pending) { + ts->pending = 1; + + status = spi_async(ts->spi, &ts->msg); + if (status) + dev_err(&ts->spi->dev, "spi_sync --> %d\n", status); + } + spin_unlock_irqrestore(&ts->lock, flags); + + return IRQ_HANDLED; +} + +static void ad7877_callback(void *_ts) +{ + struct ad7877 *ts = _ts; + + spin_lock_irq(&ts->lock); + + ad7877_rx(ts); + ts->pending = 0; + mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT); + + spin_unlock_irq(&ts->lock); +} + +static void ad7877_disable(struct ad7877 *ts) +{ + mutex_lock(&ts->mutex); + + if (!ts->disabled) { + ts->disabled = 1; + disable_irq(ts->spi->irq); + + /* Wait for spi_async callback */ + while (ts->pending) + msleep(1); + + if (del_timer_sync(&ts->timer)) + ad7877_ts_event_release(ts); + } + + /* we know the chip's in lowpower mode since we always + * leave it that way after every request + */ + + mutex_unlock(&ts->mutex); +} + +static void ad7877_enable(struct ad7877 *ts) +{ + mutex_lock(&ts->mutex); + + if (ts->disabled) { + ts->disabled = 0; + enable_irq(ts->spi->irq); + } + + mutex_unlock(&ts->mutex); +} + +#define SHOW(name) static ssize_t \ +name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \ +{ \ + struct ad7877 *ts = dev_get_drvdata(dev); \ + ssize_t v = ad7877_read_adc(ts->spi, \ + AD7877_READ_CHAN(name)); \ + if (v < 0) \ + return v; \ + return sprintf(buf, "%u\n", (unsigned) v); \ +} \ +static DEVICE_ATTR(name, S_IRUGO, name ## _show, NULL); + +SHOW(aux1) +SHOW(aux2) +SHOW(aux3) +SHOW(bat1) +SHOW(bat2) +SHOW(temp1) +SHOW(temp2) + +static ssize_t ad7877_disable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", ts->disabled); +} + +static ssize_t ad7877_disable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + if (val) + ad7877_disable(ts); + else + ad7877_enable(ts); + + return count; +} + +static DEVICE_ATTR(disable, 0664, ad7877_disable_show, ad7877_disable_store); + +static ssize_t ad7877_dac_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", ts->dac); +} + +static ssize_t ad7877_dac_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + mutex_lock(&ts->mutex); + ts->dac = val & 0xFF; + ad7877_write(ts->spi, AD7877_REG_DAC, (ts->dac << 4) | AD7877_DAC_CONF); + mutex_unlock(&ts->mutex); + + return count; +} + +static DEVICE_ATTR(dac, 0664, ad7877_dac_show, ad7877_dac_store); + +static ssize_t ad7877_gpio3_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", ts->gpio3); +} + +static ssize_t ad7877_gpio3_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + mutex_lock(&ts->mutex); + ts->gpio3 = !!val; + ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA | + (ts->gpio4 << 4) | (ts->gpio3 << 5)); + mutex_unlock(&ts->mutex); + + return count; +} + +static DEVICE_ATTR(gpio3, 0664, ad7877_gpio3_show, ad7877_gpio3_store); + +static ssize_t ad7877_gpio4_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", ts->gpio4); +} + +static ssize_t ad7877_gpio4_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ad7877 *ts = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + mutex_lock(&ts->mutex); + ts->gpio4 = !!val; + ad7877_write(ts->spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_DATA | + (ts->gpio4 << 4) | (ts->gpio3 << 5)); + mutex_unlock(&ts->mutex); + + return count; +} + +static DEVICE_ATTR(gpio4, 0664, ad7877_gpio4_show, ad7877_gpio4_store); + +static struct attribute *ad7877_attributes[] = { + &dev_attr_temp1.attr, + &dev_attr_temp2.attr, + &dev_attr_aux1.attr, + &dev_attr_aux2.attr, + &dev_attr_bat1.attr, + &dev_attr_bat2.attr, + &dev_attr_disable.attr, + &dev_attr_dac.attr, + &dev_attr_gpio4.attr, + NULL +}; + +static const struct attribute_group ad7877_attr_group = { + .attrs = ad7877_attributes, +}; + +static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts) +{ + struct spi_message *m; + int i; + + ts->cmd_crtl2 = AD7877_WRITEADD(AD7877_REG_CTRL2) | + AD7877_POL(ts->stopacq_polarity) | + AD7877_AVG(ts->averaging) | AD7877_PM(1) | + AD7877_TMR(ts->pen_down_acc_interval) | + AD7877_ACQ(ts->acquisition_time) | + AD7877_FCD(ts->first_conversion_delay); + + ad7877_write(spi, AD7877_REG_CTRL2, ts->cmd_crtl2); + + ts->cmd_crtl1 = AD7877_WRITEADD(AD7877_REG_CTRL1) | + AD7877_READADD(AD7877_REG_XPLUS-1) | + AD7877_MODE_SEQ1 | AD7877_DFR; + + ad7877_write(spi, AD7877_REG_CTRL1, ts->cmd_crtl1); + + ts->cmd_dummy = 0; + + m = &ts->msg; + + spi_message_init(m); + + m->complete = ad7877_callback; + m->context = ts; + + ts->xfer[0].tx_buf = &ts->cmd_crtl1; + ts->xfer[0].len = 2; + + spi_message_add_tail(&ts->xfer[0], m); + + ts->xfer[1].tx_buf = &ts->cmd_dummy; /* Send ZERO */ + ts->xfer[1].len = 2; + + spi_message_add_tail(&ts->xfer[1], m); + + for (i = 0; i < 11; i++) { + ts->xfer[i + 2].rx_buf = &ts->conversion_data[AD7877_SEQ_YPOS + i]; + ts->xfer[i + 2].len = 2; + spi_message_add_tail(&ts->xfer[i + 2], m); + } +} + +static int __devinit ad7877_probe(struct spi_device *spi) +{ + struct ad7877 *ts; + struct input_dev *input_dev; + struct ad7877_platform_data *pdata = spi->dev.platform_data; + int err; + u16 verify; + + if (!spi->irq) { + dev_dbg(&spi->dev, "no IRQ?\n"); + return -ENODEV; + } + + if (!pdata) { + dev_dbg(&spi->dev, "no platform data?\n"); + return -ENODEV; + } + + /* don't exceed max specified SPI CLK frequency */ + if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) { + dev_dbg(&spi->dev, "SPI CLK %d Hz?\n",spi->max_speed_hz); + return -EINVAL; + } + + ts = kzalloc(sizeof(struct ad7877), GFP_KERNEL); + input_dev = input_allocate_device(); + if (!ts || !input_dev) { + err = -ENOMEM; + goto err_free_mem; + } + + dev_set_drvdata(&spi->dev, ts); + ts->spi = spi; + ts->input = input_dev; + + setup_timer(&ts->timer, ad7877_timer, (unsigned long) ts); + mutex_init(&ts->mutex); + spin_lock_init(&ts->lock); + + ts->model = pdata->model ? : 7877; + ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; + ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; + ts->pressure_max = pdata->pressure_max ? : ~0; + + ts->stopacq_polarity = pdata->stopacq_polarity; + ts->first_conversion_delay = pdata->first_conversion_delay; + ts->acquisition_time = pdata->acquisition_time; + ts->averaging = pdata->averaging; + ts->pen_down_acc_interval = pdata->pen_down_acc_interval; + + snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev)); + + input_dev->name = "AD7877 Touchscreen"; + input_dev->phys = ts->phys; + input_dev->dev.parent = &spi->dev; + + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(ABS_X, input_dev->absbit); + __set_bit(ABS_Y, input_dev->absbit); + __set_bit(ABS_PRESSURE, input_dev->absbit); + + input_set_abs_params(input_dev, ABS_X, + pdata->x_min ? : 0, + pdata->x_max ? : MAX_12BIT, + 0, 0); + input_set_abs_params(input_dev, ABS_Y, + pdata->y_min ? : 0, + pdata->y_max ? : MAX_12BIT, + 0, 0); + input_set_abs_params(input_dev, ABS_PRESSURE, + pdata->pressure_min, pdata->pressure_max, 0, 0); + + ad7877_write(spi, AD7877_REG_SEQ1, AD7877_MM_SEQUENCE); + + verify = ad7877_read(spi, AD7877_REG_SEQ1); + + if (verify != AD7877_MM_SEQUENCE){ + dev_err(&spi->dev, "%s: Failed to probe %s\n", + dev_name(&spi->dev), input_dev->name); + err = -ENODEV; + goto err_free_mem; + } + + if (gpio3) + ad7877_write(spi, AD7877_REG_EXTWRITE, AD7877_EXTW_GPIO_3_CONF); + + ad7877_setup_ts_def_msg(spi, ts); + + /* Request AD7877 /DAV GPIO interrupt */ + + err = request_irq(spi->irq, ad7877_irq, IRQF_TRIGGER_FALLING | + IRQF_SAMPLE_RANDOM, spi->dev.driver->name, ts); + if (err) { + dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); + goto err_free_mem; + } + + err = sysfs_create_group(&spi->dev.kobj, &ad7877_attr_group); + if (err) + goto err_free_irq; + + err = device_create_file(&spi->dev, + gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3); + if (err) + goto err_remove_attr_group; + + err = input_register_device(input_dev); + if (err) + goto err_remove_attr; + + return 0; + +err_remove_attr: + device_remove_file(&spi->dev, + gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3); +err_remove_attr_group: + sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group); +err_free_irq: + free_irq(spi->irq, ts); +err_free_mem: + input_free_device(input_dev); + kfree(ts); + dev_set_drvdata(&spi->dev, NULL); + return err; +} + +static int __devexit ad7877_remove(struct spi_device *spi) +{ + struct ad7877 *ts = dev_get_drvdata(&spi->dev); + + sysfs_remove_group(&spi->dev.kobj, &ad7877_attr_group); + device_remove_file(&spi->dev, + gpio3 ? &dev_attr_gpio3 : &dev_attr_aux3); + + ad7877_disable(ts); + free_irq(ts->spi->irq, ts); + + input_unregister_device(ts->input); + kfree(ts); + + dev_dbg(&spi->dev, "unregistered touchscreen\n"); + dev_set_drvdata(&spi->dev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int ad7877_suspend(struct spi_device *spi, pm_message_t message) +{ + struct ad7877 *ts = dev_get_drvdata(&spi->dev); + + ad7877_disable(ts); + + return 0; +} + +static int ad7877_resume(struct spi_device *spi) +{ + struct ad7877 *ts = dev_get_drvdata(&spi->dev); + + ad7877_enable(ts); + + return 0; +} +#else +#define ad7877_suspend NULL +#define ad7877_resume NULL +#endif + +static struct spi_driver ad7877_driver = { + .driver = { + .name = "ad7877", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = ad7877_probe, + .remove = __devexit_p(ad7877_remove), + .suspend = ad7877_suspend, + .resume = ad7877_resume, +}; + +static int __init ad7877_init(void) +{ + return spi_register_driver(&ad7877_driver); +} +module_init(ad7877_init); + +static void __exit ad7877_exit(void) +{ + spi_unregister_driver(&ad7877_driver); +} +module_exit(ad7877_exit); + +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("AD7877 touchscreen Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c new file mode 100644 index 00000000000..ea4c61d6868 --- /dev/null +++ b/drivers/input/touchscreen/ad7879.c @@ -0,0 +1,782 @@ +/* + * Copyright (C) 2008 Michael Hennerich, Analog Devices Inc. + * + * Description: AD7879 based touchscreen, and GPIO driver (I2C/SPI Interface) + * + * Bugs: Enter bugs at http://blackfin.uclinux.org/ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see the file COPYING, or write + * to the Free Software Foundation, Inc., + * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * + * History: + * Copyright (c) 2005 David Brownell + * Copyright (c) 2006 Nokia Corporation + * Various changes: Imre Deak <imre.deak@nokia.com> + * + * Using code from: + * - corgi_ts.c + * Copyright (C) 2004-2005 Richard Purdie + * - omap_ts.[hc], ads7846.h, ts_osk.c + * Copyright (C) 2002 MontaVista Software + * Copyright (C) 2004 Texas Instruments + * Copyright (C) 2005 Dirk Behme + * - ad7877.c + * Copyright (C) 2006-2008 Analog Devices Inc. + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/input.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <linux/spi/spi.h> +#include <linux/i2c.h> + +#include <linux/spi/ad7879.h> + +#define AD7879_REG_ZEROS 0 +#define AD7879_REG_CTRL1 1 +#define AD7879_REG_CTRL2 2 +#define AD7879_REG_CTRL3 3 +#define AD7879_REG_AUX1HIGH 4 +#define AD7879_REG_AUX1LOW 5 +#define AD7879_REG_TEMP1HIGH 6 +#define AD7879_REG_TEMP1LOW 7 +#define AD7879_REG_XPLUS 8 +#define AD7879_REG_YPLUS 9 +#define AD7879_REG_Z1 10 +#define AD7879_REG_Z2 11 +#define AD7879_REG_AUXVBAT 12 +#define AD7879_REG_TEMP 13 +#define AD7879_REG_REVID 14 + +/* Control REG 1 */ +#define AD7879_TMR(x) ((x & 0xFF) << 0) +#define AD7879_ACQ(x) ((x & 0x3) << 8) +#define AD7879_MODE_NOC (0 << 10) /* Do not convert */ +#define AD7879_MODE_SCC (1 << 10) /* Single channel conversion */ +#define AD7879_MODE_SEQ0 (2 << 10) /* Sequence 0 in Slave Mode */ +#define AD7879_MODE_SEQ1 (3 << 10) /* Sequence 1 in Master Mode */ +#define AD7879_MODE_INT (1 << 15) /* PENIRQ disabled INT enabled */ + +/* Control REG 2 */ +#define AD7879_FCD(x) ((x & 0x3) << 0) +#define AD7879_RESET (1 << 4) +#define AD7879_MFS(x) ((x & 0x3) << 5) +#define AD7879_AVG(x) ((x & 0x3) << 7) +#define AD7879_SER (1 << 9) /* non-differential */ +#define AD7879_DFR (0 << 9) /* differential */ +#define AD7879_GPIOPOL (1 << 10) +#define AD7879_GPIODIR (1 << 11) +#define AD7879_GPIO_DATA (1 << 12) +#define AD7879_GPIO_EN (1 << 13) +#define AD7879_PM(x) ((x & 0x3) << 14) +#define AD7879_PM_SHUTDOWN (0) +#define AD7879_PM_DYN (1) +#define AD7879_PM_FULLON (2) + +/* Control REG 3 */ +#define AD7879_TEMPMASK_BIT (1<<15) +#define AD7879_AUXVBATMASK_BIT (1<<14) +#define AD7879_INTMODE_BIT (1<<13) +#define AD7879_GPIOALERTMASK_BIT (1<<12) +#define AD7879_AUXLOW_BIT (1<<11) +#define AD7879_AUXHIGH_BIT (1<<10) +#define AD7879_TEMPLOW_BIT (1<<9) +#define AD7879_TEMPHIGH_BIT (1<<8) +#define AD7879_YPLUS_BIT (1<<7) +#define AD7879_XPLUS_BIT (1<<6) +#define AD7879_Z1_BIT (1<<5) +#define AD7879_Z2_BIT (1<<4) +#define AD7879_AUX_BIT (1<<3) +#define AD7879_VBAT_BIT (1<<2) +#define AD7879_TEMP_BIT (1<<1) + +enum { + AD7879_SEQ_XPOS = 0, + AD7879_SEQ_YPOS = 1, + AD7879_SEQ_Z1 = 2, + AD7879_SEQ_Z2 = 3, + AD7879_NR_SENSE = 4, +}; + +#define MAX_12BIT ((1<<12)-1) +#define TS_PEN_UP_TIMEOUT msecs_to_jiffies(50) + +#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) +#define AD7879_DEVID 0x7A +typedef struct spi_device bus_device; +#elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) +#define AD7879_DEVID 0x79 +typedef struct i2c_client bus_device; +#endif + +struct ad7879 { + bus_device *bus; + struct input_dev *input; + struct work_struct work; + struct timer_list timer; + + struct mutex mutex; + unsigned disabled:1; /* P: mutex */ + +#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) + struct spi_message msg; + struct spi_transfer xfer[AD7879_NR_SENSE + 1]; + u16 cmd; +#endif + u16 conversion_data[AD7879_NR_SENSE]; + char phys[32]; + u8 first_conversion_delay; + u8 acquisition_time; + u8 averaging; + u8 pen_down_acc_interval; + u8 median; + u16 x_plate_ohms; + u16 pressure_max; + u16 gpio_init; + u16 cmd_crtl1; + u16 cmd_crtl2; + u16 cmd_crtl3; + unsigned gpio:1; +}; + +static int ad7879_read(bus_device *, u8); +static int ad7879_write(bus_device *, u8, u16); +static void ad7879_collect(struct ad7879 *); + +static void ad7879_report(struct ad7879 *ts) +{ + struct input_dev *input_dev = ts->input; + unsigned Rt; + u16 x, y, z1, z2; + + x = ts->conversion_data[AD7879_SEQ_XPOS] & MAX_12BIT; + y = ts->conversion_data[AD7879_SEQ_YPOS] & MAX_12BIT; + z1 = ts->conversion_data[AD7879_SEQ_Z1] & MAX_12BIT; + z2 = ts->conversion_data[AD7879_SEQ_Z2] & MAX_12BIT; + + /* + * The samples processed here are already preprocessed by the AD7879. + * The preprocessing function consists of a median and an averaging filter. + * The combination of these two techniques provides a robust solution, + * discarding the spurious noise in the signal and keeping only the data of interest. + * The size of both filters is programmable. (dev.platform_data, see linux/spi/ad7879.h) + * Other user-programmable conversion controls include variable acquisition time, + * and first conversion delay. Up to 16 averages can be taken per conversion. + */ + + if (likely(x && z1)) { + /* compute touch pressure resistance using equation #1 */ + Rt = (z2 - z1) * x * ts->x_plate_ohms; + Rt /= z1; + Rt = (Rt + 2047) >> 12; + + input_report_abs(input_dev, ABS_X, x); + input_report_abs(input_dev, ABS_Y, y); + input_report_abs(input_dev, ABS_PRESSURE, Rt); + input_sync(input_dev); + } +} + +static void ad7879_work(struct work_struct *work) +{ + struct ad7879 *ts = container_of(work, struct ad7879, work); + + /* use keventd context to read the result registers */ + ad7879_collect(ts); + ad7879_report(ts); + mod_timer(&ts->timer, jiffies + TS_PEN_UP_TIMEOUT); +} + +static void ad7879_ts_event_release(struct ad7879 *ts) +{ + struct input_dev *input_dev = ts->input; + + input_report_abs(input_dev, ABS_PRESSURE, 0); + input_sync(input_dev); +} + +static void ad7879_timer(unsigned long handle) +{ + struct ad7879 *ts = (void *)handle; + + ad7879_ts_event_release(ts); +} + +static irqreturn_t ad7879_irq(int irq, void *handle) +{ + struct ad7879 *ts = handle; + + /* The repeated conversion sequencer controlled by TMR kicked off too fast. + * We ignore the last and process the sample sequence currently in the queue. + * It can't be older than 9.4ms + */ + + if (!work_pending(&ts->work)) + schedule_work(&ts->work); + + return IRQ_HANDLED; +} + +static void ad7879_setup(struct ad7879 *ts) +{ + ts->cmd_crtl3 = AD7879_YPLUS_BIT | + AD7879_XPLUS_BIT | + AD7879_Z2_BIT | + AD7879_Z1_BIT | + AD7879_TEMPMASK_BIT | + AD7879_AUXVBATMASK_BIT | + AD7879_GPIOALERTMASK_BIT; + + ts->cmd_crtl2 = AD7879_PM(AD7879_PM_DYN) | AD7879_DFR | + AD7879_AVG(ts->averaging) | + AD7879_MFS(ts->median) | + AD7879_FCD(ts->first_conversion_delay) | + ts->gpio_init; + + ts->cmd_crtl1 = AD7879_MODE_INT | AD7879_MODE_SEQ1 | + AD7879_ACQ(ts->acquisition_time) | + AD7879_TMR(ts->pen_down_acc_interval); + + ad7879_write(ts->bus, AD7879_REG_CTRL2, ts->cmd_crtl2); + ad7879_write(ts->bus, AD7879_REG_CTRL3, ts->cmd_crtl3); + ad7879_write(ts->bus, AD7879_REG_CTRL1, ts->cmd_crtl1); +} + +static void ad7879_disable(struct ad7879 *ts) +{ + mutex_lock(&ts->mutex); + + if (!ts->disabled) { + + ts->disabled = 1; + disable_irq(ts->bus->irq); + + cancel_work_sync(&ts->work); + + if (del_timer_sync(&ts->timer)) + ad7879_ts_event_release(ts); + + ad7879_write(ts->bus, AD7879_REG_CTRL2, + AD7879_PM(AD7879_PM_SHUTDOWN)); + } + + mutex_unlock(&ts->mutex); +} + +static void ad7879_enable(struct ad7879 *ts) +{ + mutex_lock(&ts->mutex); + + if (ts->disabled) { + ad7879_setup(ts); + ts->disabled = 0; + enable_irq(ts->bus->irq); + } + + mutex_unlock(&ts->mutex); +} + +static ssize_t ad7879_disable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad7879 *ts = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", ts->disabled); +} + +static ssize_t ad7879_disable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ad7879 *ts = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + if (val) + ad7879_disable(ts); + else + ad7879_enable(ts); + + return count; +} + +static DEVICE_ATTR(disable, 0664, ad7879_disable_show, ad7879_disable_store); + +static ssize_t ad7879_gpio_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ad7879 *ts = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", ts->gpio); +} + +static ssize_t ad7879_gpio_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ad7879 *ts = dev_get_drvdata(dev); + unsigned long val; + int error; + + error = strict_strtoul(buf, 10, &val); + if (error) + return error; + + mutex_lock(&ts->mutex); + ts->gpio = !!val; + error = ad7879_write(ts->bus, AD7879_REG_CTRL2, + ts->gpio ? + ts->cmd_crtl2 & ~AD7879_GPIO_DATA : + ts->cmd_crtl2 | AD7879_GPIO_DATA); + mutex_unlock(&ts->mutex); + + return error ? : count; +} + +static DEVICE_ATTR(gpio, 0664, ad7879_gpio_show, ad7879_gpio_store); + +static struct attribute *ad7879_attributes[] = { + &dev_attr_disable.attr, + &dev_attr_gpio.attr, + NULL +}; + +static const struct attribute_group ad7879_attr_group = { + .attrs = ad7879_attributes, +}; + +static int __devinit ad7879_construct(bus_device *bus, struct ad7879 *ts) +{ + struct input_dev *input_dev; + struct ad7879_platform_data *pdata = bus->dev.platform_data; + int err; + u16 revid; + + if (!bus->irq) { + dev_err(&bus->dev, "no IRQ?\n"); + return -ENODEV; + } + + if (!pdata) { + dev_err(&bus->dev, "no platform data?\n"); + return -ENODEV; + } + + input_dev = input_allocate_device(); + if (!input_dev) + return -ENOMEM; + + ts->input = input_dev; + + setup_timer(&ts->timer, ad7879_timer, (unsigned long) ts); + INIT_WORK(&ts->work, ad7879_work); + mutex_init(&ts->mutex); + + ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; + ts->pressure_max = pdata->pressure_max ? : ~0; + + ts->first_conversion_delay = pdata->first_conversion_delay; + ts->acquisition_time = pdata->acquisition_time; + ts->averaging = pdata->averaging; + ts->pen_down_acc_interval = pdata->pen_down_acc_interval; + ts->median = pdata->median; + + if (pdata->gpio_output) + ts->gpio_init = AD7879_GPIO_EN | + (pdata->gpio_default ? 0 : AD7879_GPIO_DATA); + else + ts->gpio_init = AD7879_GPIO_EN | AD7879_GPIODIR; + + snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&bus->dev)); + + input_dev->name = "AD7879 Touchscreen"; + input_dev->phys = ts->phys; + input_dev->dev.parent = &bus->dev; + + __set_bit(EV_ABS, input_dev->evbit); + __set_bit(ABS_X, input_dev->absbit); + __set_bit(ABS_Y, input_dev->absbit); + __set_bit(ABS_PRESSURE, input_dev->absbit); + + input_set_abs_params(input_dev, ABS_X, + pdata->x_min ? : 0, + pdata->x_max ? : MAX_12BIT, + 0, 0); + input_set_abs_params(input_dev, ABS_Y, + pdata->y_min ? : 0, + pdata->y_max ? : MAX_12BIT, + 0, 0); + input_set_abs_params(input_dev, ABS_PRESSURE, + pdata->pressure_min, pdata->pressure_max, 0, 0); + + err = ad7879_write(bus, AD7879_REG_CTRL2, AD7879_RESET); + + if (err < 0) { + dev_err(&bus->dev, "Failed to write %s\n", input_dev->name); + goto err_free_mem; + } + + revid = ad7879_read(bus, AD7879_REG_REVID); + + if ((revid & 0xFF) != AD7879_DEVID) { + dev_err(&bus->dev, "Failed to probe %s\n", input_dev->name); + err = -ENODEV; + goto err_free_mem; + } + + ad7879_setup(ts); + + err = request_irq(bus->irq, ad7879_irq, + IRQF_TRIGGER_FALLING | IRQF_SAMPLE_RANDOM, + bus->dev.driver->name, ts); + + if (err) { + dev_err(&bus->dev, "irq %d busy?\n", bus->irq); + goto err_free_mem; + } + + err = sysfs_create_group(&bus->dev.kobj, &ad7879_attr_group); + if (err) + goto err_free_irq; + + err = input_register_device(input_dev); + if (err) + goto err_remove_attr; + + dev_info(&bus->dev, "Rev.%d touchscreen, irq %d\n", + revid >> 8, bus->irq); + + return 0; + +err_remove_attr: + sysfs_remove_group(&bus->dev.kobj, &ad7879_attr_group); +err_free_irq: + free_irq(bus->irq, ts); +err_free_mem: + input_free_device(input_dev); + + return err; +} + +static int __devexit ad7879_destroy(bus_device *bus, struct ad7879 *ts) +{ + ad7879_disable(ts); + sysfs_remove_group(&ts->bus->dev.kobj, &ad7879_attr_group); + free_irq(ts->bus->irq, ts); + input_unregister_device(ts->input); + dev_dbg(&bus->dev, "unregistered touchscreen\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int ad7879_suspend(bus_device *bus, pm_message_t message) +{ + struct ad7879 *ts = dev_get_drvdata(&bus->dev); + + ad7879_disable(ts); + + return 0; +} + +static int ad7879_resume(bus_device *bus) +{ + struct ad7879 *ts = dev_get_drvdata(&bus->dev); + + ad7879_enable(ts); + + return 0; +} +#else +#define ad7879_suspend NULL +#define ad7879_resume NULL +#endif + +#if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) +#define MAX_SPI_FREQ_HZ 5000000 +#define AD7879_CMD_MAGIC 0xE000 +#define AD7879_CMD_READ (1 << 10) +#define AD7879_WRITECMD(reg) (AD7879_CMD_MAGIC | (reg & 0xF)) +#define AD7879_READCMD(reg) (AD7879_CMD_MAGIC | AD7879_CMD_READ | (reg & 0xF)) + +struct ser_req { + u16 command; + u16 data; + struct spi_message msg; + struct spi_transfer xfer[2]; +}; + +/* + * ad7879_read/write are only used for initial setup and for sysfs controls. + * The main traffic is done in ad7879_collect(). + */ + +static int ad7879_read(struct spi_device *spi, u8 reg) +{ + struct ser_req *req; + int status, ret; + + req = kzalloc(sizeof *req, GFP_KERNEL); + if (!req) + return -ENOMEM; + + spi_message_init(&req->msg); + + req->command = (u16) AD7879_READCMD(reg); + req->xfer[0].tx_buf = &req->command; + req->xfer[0].len = 2; + + req->xfer[1].rx_buf = &req->data; + req->xfer[1].len = 2; + + spi_message_add_tail(&req->xfer[0], &req->msg); + spi_message_add_tail(&req->xfer[1], &req->msg); + + status = spi_sync(spi, &req->msg); + ret = status ? : req->data; + + kfree(req); + + return ret; +} + +static int ad7879_write(struct spi_device *spi, u8 reg, u16 val) +{ + struct ser_req *req; + int status; + + req = kzalloc(sizeof *req, GFP_KERNEL); + if (!req) + return -ENOMEM; + + spi_message_init(&req->msg); + + req->command = (u16) AD7879_WRITECMD(reg); + req->xfer[0].tx_buf = &req->command; + req->xfer[0].len = 2; + + req->data = val; + req->xfer[1].tx_buf = &req->data; + req->xfer[1].len = 2; + + spi_message_add_tail(&req->xfer[0], &req->msg); + spi_message_add_tail(&req->xfer[1], &req->msg); + + status = spi_sync(spi, &req->msg); + + kfree(req); + + return status; +} + +static void ad7879_collect(struct ad7879 *ts) +{ + int status = spi_sync(ts->bus, &ts->msg); + + if (status) + dev_err(&ts->bus->dev, "spi_sync --> %d\n", status); +} + +static void ad7879_setup_ts_def_msg(struct ad7879 *ts) +{ + struct spi_message *m; + int i; + + ts->cmd = (u16) AD7879_READCMD(AD7879_REG_XPLUS); + + m = &ts->msg; + spi_message_init(m); + ts->xfer[0].tx_buf = &ts->cmd; + ts->xfer[0].len = 2; + + spi_message_add_tail(&ts->xfer[0], m); + + for (i = 0; i < AD7879_NR_SENSE; i++) { + ts->xfer[i + 1].rx_buf = &ts->conversion_data[i]; + ts->xfer[i + 1].len = 2; + spi_message_add_tail(&ts->xfer[i + 1], m); + } +} + +static int __devinit ad7879_probe(struct spi_device *spi) +{ + struct ad7879 *ts; + int error; + + /* don't exceed max specified SPI CLK frequency */ + if (spi->max_speed_hz > MAX_SPI_FREQ_HZ) { + dev_err(&spi->dev, "SPI CLK %d Hz?\n", spi->max_speed_hz); + return -EINVAL; + } + + ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, ts); + ts->bus = spi; + + ad7879_setup_ts_def_msg(ts); + + error = ad7879_construct(spi, ts); + if (error) { + dev_set_drvdata(&spi->dev, NULL); + kfree(ts); + } + + return 0; +} + +static int __devexit ad7879_remove(struct spi_device *spi) +{ + struct ad7879 *ts = dev_get_drvdata(&spi->dev); + + ad7879_destroy(spi, ts); + dev_set_drvdata(&spi->dev, NULL); + kfree(ts); + + return 0; +} + +static struct spi_driver ad7879_driver = { + .driver = { + .name = "ad7879", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + .probe = ad7879_probe, + .remove = __devexit_p(ad7879_remove), + .suspend = ad7879_suspend, + .resume = ad7879_resume, +}; + +static int __init ad7879_init(void) +{ + return spi_register_driver(&ad7879_driver); +} +module_init(ad7879_init); + +static void __exit ad7879_exit(void) +{ + spi_unregister_driver(&ad7879_driver); +} +module_exit(ad7879_exit); + +#elif defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) + +/* All registers are word-sized. + * AD7879 uses a high-byte first convention. + */ +static int ad7879_read(struct i2c_client *client, u8 reg) +{ + return swab16(i2c_smbus_read_word_data(client, reg)); +} + +static int ad7879_write(struct i2c_client *client, u8 reg, u16 val) +{ + return i2c_smbus_write_word_data(client, reg, swab16(val)); +} + +static void ad7879_collect(struct ad7879 *ts) +{ + int i; + + for (i = 0; i < AD7879_NR_SENSE; i++) + ts->conversion_data[i] = ad7879_read(ts->bus, + AD7879_REG_XPLUS + i); +} + +static int __devinit ad7879_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct ad7879 *ts; + int error; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_WORD_DATA)) { + dev_err(&client->dev, "SMBUS Word Data not Supported\n"); + return -EIO; + } + + ts = kzalloc(sizeof(struct ad7879), GFP_KERNEL); + if (!ts) + return -ENOMEM; + + i2c_set_clientdata(client, ts); + ts->bus = client; + + error = ad7879_construct(client, ts); + if (error) { + i2c_set_clientdata(client, NULL); + kfree(ts); + } + + return 0; +} + +static int __devexit ad7879_remove(struct i2c_client *client) +{ + struct ad7879 *ts = dev_get_drvdata(&client->dev); + + ad7879_destroy(client, ts); + i2c_set_clientdata(client, NULL); + kfree(ts); + + return 0; +} + +static const struct i2c_device_id ad7879_id[] = { + { "ad7879", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, ad7879_id); + +static struct i2c_driver ad7879_driver = { + .driver = { + .name = "ad7879", + .owner = THIS_MODULE, + }, + .probe = ad7879_probe, + .remove = __devexit_p(ad7879_remove), + .suspend = ad7879_suspend, + .resume = ad7879_resume, + .id_table = ad7879_id, +}; + +static int __init ad7879_init(void) +{ + return i2c_add_driver(&ad7879_driver); +} +module_init(ad7879_init); + +static void __exit ad7879_exit(void) +{ + i2c_del_driver(&ad7879_driver); +} +module_exit(ad7879_exit); +#endif + +MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); +MODULE_DESCRIPTION("AD7879(-1) touchscreen Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c index 1d11e2be9ef..dfa6a84ab50 100644 --- a/drivers/input/touchscreen/mainstone-wm97xx.c +++ b/drivers/input/touchscreen/mainstone-wm97xx.c @@ -162,6 +162,7 @@ static int wm97xx_acc_pen_down(struct wm97xx *wm) input_report_abs(wm->input_dev, ABS_X, x & 0xfff); input_report_abs(wm->input_dev, ABS_Y, y & 0xfff); input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff); + input_report_key(wm->input_dev, BTN_TOUCH, (p != 0)); input_sync(wm->input_dev); reads++; } while (reads < cinfo[sp_idx].reads); @@ -245,7 +246,7 @@ static void wm97xx_irq_enable(struct wm97xx *wm, int enable) if (enable) enable_irq(wm->pen_irq); else - disable_irq(wm->pen_irq); + disable_irq_nosync(wm->pen_irq); } static struct wm97xx_mach_ops mainstone_mach_ops = { diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c index 54986627def..e868264fe79 100644 --- a/drivers/input/touchscreen/ucb1400_ts.c +++ b/drivers/input/touchscreen/ucb1400_ts.c @@ -151,12 +151,14 @@ static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16 input_report_abs(idev, ABS_X, x); input_report_abs(idev, ABS_Y, y); input_report_abs(idev, ABS_PRESSURE, pressure); + input_report_key(idev, BTN_TOUCH, 1); input_sync(idev); } static void ucb1400_ts_event_release(struct input_dev *idev) { input_report_abs(idev, ABS_PRESSURE, 0); + input_report_key(idev, BTN_TOUCH, 0); input_sync(idev); } @@ -377,7 +379,8 @@ static int ucb1400_ts_probe(struct platform_device *dev) ucb->ts_idev->id.product = ucb->id; ucb->ts_idev->open = ucb1400_ts_open; ucb->ts_idev->close = ucb1400_ts_close; - ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS); + ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); + ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); ucb1400_adc_enable(ucb->ac97); x_res = ucb1400_ts_read_xres(ucb); diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index d15aa11d705..cec480bffe3 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -409,6 +409,7 @@ static int wm97xx_read_samples(struct wm97xx *wm) wm->pen_is_down = 0; dev_dbg(wm->dev, "pen up\n"); input_report_abs(wm->input_dev, ABS_PRESSURE, 0); + input_report_key(wm->input_dev, BTN_TOUCH, 0); input_sync(wm->input_dev); } else if (!(rc & RC_AGAIN)) { /* We need high frequency updates only while @@ -433,6 +434,7 @@ static int wm97xx_read_samples(struct wm97xx *wm) input_report_abs(wm->input_dev, ABS_X, data.x & 0xfff); input_report_abs(wm->input_dev, ABS_Y, data.y & 0xfff); input_report_abs(wm->input_dev, ABS_PRESSURE, data.p & 0xfff); + input_report_key(wm->input_dev, BTN_TOUCH, 1); input_sync(wm->input_dev); wm->pen_is_down = 1; wm->ts_reader_interval = wm->ts_reader_min_interval; @@ -628,18 +630,21 @@ static int wm97xx_probe(struct device *dev) wm->input_dev->phys = "wm97xx"; wm->input_dev->open = wm97xx_ts_input_open; wm->input_dev->close = wm97xx_ts_input_close; - set_bit(EV_ABS, wm->input_dev->evbit); - set_bit(ABS_X, wm->input_dev->absbit); - set_bit(ABS_Y, wm->input_dev->absbit); - set_bit(ABS_PRESSURE, wm->input_dev->absbit); + + __set_bit(EV_ABS, wm->input_dev->evbit); + __set_bit(EV_KEY, wm->input_dev->evbit); + __set_bit(BTN_TOUCH, wm->input_dev->keybit); + input_set_abs_params(wm->input_dev, ABS_X, abs_x[0], abs_x[1], abs_x[2], 0); input_set_abs_params(wm->input_dev, ABS_Y, abs_y[0], abs_y[1], abs_y[2], 0); input_set_abs_params(wm->input_dev, ABS_PRESSURE, abs_p[0], abs_p[1], abs_p[2], 0); + input_set_drvdata(wm->input_dev, wm); wm->input_dev->dev.parent = dev; + ret = input_register_device(wm->input_dev); if (ret < 0) goto dev_alloc_err; diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c new file mode 100644 index 00000000000..41e4359c277 --- /dev/null +++ b/drivers/input/touchscreen/zylonite-wm97xx.c @@ -0,0 +1,240 @@ +/* + * zylonite-wm97xx.c -- Zylonite Continuous Touch screen driver + * + * Copyright 2004, 2007, 2008 Wolfson Microelectronics PLC. + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * Parts Copyright : Ian Molton <spyro@f2s.com> + * Andrew Zabolotny <zap@homelink.ru> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * Notes: + * This is a wm97xx extended touch driver supporting interrupt driven + * and continuous operation on Marvell Zylonite development systems + * (which have a WM9713 on board). + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/wm97xx.h> + +#include <mach/hardware.h> +#include <mach/mfp.h> +#include <mach/regs-ac97.h> + +struct continuous { + u16 id; /* codec id */ + u8 code; /* continuous code */ + u8 reads; /* number of coord reads per read cycle */ + u32 speed; /* number of coords per second */ +}; + +#define WM_READS(sp) ((sp / HZ) + 1) + +static const struct continuous cinfo[] = { + { WM9713_ID2, 0, WM_READS(94), 94 }, + { WM9713_ID2, 1, WM_READS(120), 120 }, + { WM9713_ID2, 2, WM_READS(154), 154 }, + { WM9713_ID2, 3, WM_READS(188), 188 }, +}; + +/* continuous speed index */ +static int sp_idx; + +/* + * Pen sampling frequency (Hz) in continuous mode. + */ +static int cont_rate = 200; +module_param(cont_rate, int, 0); +MODULE_PARM_DESC(cont_rate, "Sampling rate in continuous mode (Hz)"); + +/* + * Pressure readback. + * + * Set to 1 to read back pen down pressure + */ +static int pressure; +module_param(pressure, int, 0); +MODULE_PARM_DESC(pressure, "Pressure readback (1 = pressure, 0 = no pressure)"); + +/* + * AC97 touch data slot. + * + * Touch screen readback data ac97 slot + */ +static int ac97_touch_slot = 5; +module_param(ac97_touch_slot, int, 0); +MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number"); + + +/* flush AC97 slot 5 FIFO machines */ +static void wm97xx_acc_pen_up(struct wm97xx *wm) +{ + int i; + + msleep(1); + + for (i = 0; i < 16; i++) + MODR; +} + +static int wm97xx_acc_pen_down(struct wm97xx *wm) +{ + u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES; + int reads = 0; + static u16 last, tries; + + /* When the AC97 queue has been drained we need to allow time + * to buffer up samples otherwise we end up spinning polling + * for samples. The controller can't have a suitably low + * threashold set to use the notifications it gives. + */ + msleep(1); + + if (tries > 5) { + tries = 0; + return RC_PENUP; + } + + x = MODR; + if (x == last) { + tries++; + return RC_AGAIN; + } + last = x; + do { + if (reads) + x = MODR; + y = MODR; + if (pressure) + p = MODR; + + /* are samples valid */ + if ((x & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_X || + (y & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_Y || + (p & WM97XX_ADCSRC_MASK) != WM97XX_ADCSEL_PRES) + goto up; + + /* coordinate is good */ + tries = 0; + input_report_abs(wm->input_dev, ABS_X, x & 0xfff); + input_report_abs(wm->input_dev, ABS_Y, y & 0xfff); + input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff); + input_report_key(wm->input_dev, BTN_TOUCH, (p != 0)); + input_sync(wm->input_dev); + reads++; + } while (reads < cinfo[sp_idx].reads); +up: + return RC_PENDOWN | RC_AGAIN; +} + +static int wm97xx_acc_startup(struct wm97xx *wm) +{ + int idx; + + /* check we have a codec */ + if (wm->ac97 == NULL) + return -ENODEV; + + /* Go you big red fire engine */ + for (idx = 0; idx < ARRAY_SIZE(cinfo); idx++) { + if (wm->id != cinfo[idx].id) + continue; + sp_idx = idx; + if (cont_rate <= cinfo[idx].speed) + break; + } + wm->acc_rate = cinfo[sp_idx].code; + wm->acc_slot = ac97_touch_slot; + dev_info(wm->dev, + "zylonite accelerated touchscreen driver, %d samples/sec\n", + cinfo[sp_idx].speed); + + return 0; +} + +static void wm97xx_irq_enable(struct wm97xx *wm, int enable) +{ + if (enable) + enable_irq(wm->pen_irq); + else + disable_irq_nosync(wm->pen_irq); +} + +static struct wm97xx_mach_ops zylonite_mach_ops = { + .acc_enabled = 1, + .acc_pen_up = wm97xx_acc_pen_up, + .acc_pen_down = wm97xx_acc_pen_down, + .acc_startup = wm97xx_acc_startup, + .irq_enable = wm97xx_irq_enable, + .irq_gpio = WM97XX_GPIO_2, +}; + +static int zylonite_wm97xx_probe(struct platform_device *pdev) +{ + struct wm97xx *wm = platform_get_drvdata(pdev); + int gpio_touch_irq; + + if (cpu_is_pxa320()) + gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO15); + else + gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26); + + wm->pen_irq = IRQ_GPIO(gpio_touch_irq); + set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH); + + wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN, + WM97XX_GPIO_POL_HIGH, + WM97XX_GPIO_STICKY, + WM97XX_GPIO_WAKE); + wm97xx_config_gpio(wm, WM97XX_GPIO_2, WM97XX_GPIO_OUT, + WM97XX_GPIO_POL_HIGH, + WM97XX_GPIO_NOTSTICKY, + WM97XX_GPIO_NOWAKE); + + return wm97xx_register_mach_ops(wm, &zylonite_mach_ops); +} + +static int zylonite_wm97xx_remove(struct platform_device *pdev) +{ + struct wm97xx *wm = platform_get_drvdata(pdev); + + wm97xx_unregister_mach_ops(wm); + + return 0; +} + +static struct platform_driver zylonite_wm97xx_driver = { + .probe = zylonite_wm97xx_probe, + .remove = zylonite_wm97xx_remove, + .driver = { + .name = "wm97xx-touch", + }, +}; + +static int __init zylonite_wm97xx_init(void) +{ + return platform_driver_register(&zylonite_wm97xx_driver); +} + +static void __exit zylonite_wm97xx_exit(void) +{ + platform_driver_unregister(&zylonite_wm97xx_driver); +} + +module_init(zylonite_wm97xx_init); +module_exit(zylonite_wm97xx_exit); + +/* Module information */ +MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); +MODULE_DESCRIPTION("wm97xx continuous touch driver for Zylonite"); +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 102ef4a14c5..d2109054de8 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -82,7 +82,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, if (!gpio_is_valid(template->gpio)) { printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", template->gpio, template->name); - return; + return 0; } ret = gpio_request(template->gpio, template->name); diff --git a/drivers/md/dm-bio-list.h b/drivers/md/dm-bio-list.h deleted file mode 100644 index 345098b4ca7..00000000000 --- a/drivers/md/dm-bio-list.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (C) 2004 Red Hat UK Ltd. - * - * This file is released under the GPL. - */ - -#ifndef DM_BIO_LIST_H -#define DM_BIO_LIST_H - -#include <linux/bio.h> - -#ifdef CONFIG_BLOCK - -struct bio_list { - struct bio *head; - struct bio *tail; -}; - -static inline int bio_list_empty(const struct bio_list *bl) -{ - return bl->head == NULL; -} - -static inline void bio_list_init(struct bio_list *bl) -{ - bl->head = bl->tail = NULL; -} - -#define bio_list_for_each(bio, bl) \ - for (bio = (bl)->head; bio; bio = bio->bi_next) - -static inline unsigned bio_list_size(const struct bio_list *bl) -{ - unsigned sz = 0; - struct bio *bio; - - bio_list_for_each(bio, bl) - sz++; - - return sz; -} - -static inline void bio_list_add(struct bio_list *bl, struct bio *bio) -{ - bio->bi_next = NULL; - - if (bl->tail) - bl->tail->bi_next = bio; - else - bl->head = bio; - - bl->tail = bio; -} - -static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) -{ - bio->bi_next = bl->head; - - bl->head = bio; - - if (!bl->tail) - bl->tail = bio; -} - -static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) -{ - if (!bl2->head) - return; - - if (bl->tail) - bl->tail->bi_next = bl2->head; - else - bl->head = bl2->head; - - bl->tail = bl2->tail; -} - -static inline void bio_list_merge_head(struct bio_list *bl, - struct bio_list *bl2) -{ - if (!bl2->head) - return; - - if (bl->head) - bl2->tail->bi_next = bl->head; - else - bl->tail = bl2->tail; - - bl->head = bl2->head; -} - -static inline struct bio *bio_list_pop(struct bio_list *bl) -{ - struct bio *bio = bl->head; - - if (bio) { - bl->head = bl->head->bi_next; - if (!bl->head) - bl->tail = NULL; - - bio->bi_next = NULL; - } - - return bio; -} - -static inline struct bio *bio_list_get(struct bio_list *bl) -{ - struct bio *bio = bl->head; - - bl->head = bl->tail = NULL; - - return bio; -} - -#endif /* CONFIG_BLOCK */ -#endif diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index 59ee1b015d2..559dbb52bc8 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -15,8 +15,6 @@ #include <linux/device-mapper.h> -#include "dm-bio-list.h" - #define DM_MSG_PREFIX "delay" struct delay_c { diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index f01096549a9..823ceba6efa 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1047,6 +1047,19 @@ static int populate_table(struct dm_table *table, return dm_table_complete(table); } +static int table_prealloc_integrity(struct dm_table *t, + struct mapped_device *md) +{ + struct list_head *devices = dm_table_get_devices(t); + struct dm_dev_internal *dd; + + list_for_each_entry(dd, devices, list) + if (bdev_get_integrity(dd->dm_dev.bdev)) + return blk_integrity_register(dm_disk(md), NULL); + + return 0; +} + static int table_load(struct dm_ioctl *param, size_t param_size) { int r; @@ -1068,6 +1081,14 @@ static int table_load(struct dm_ioctl *param, size_t param_size) goto out; } + r = table_prealloc_integrity(t, md); + if (r) { + DMERR("%s: could not register integrity profile.", + dm_device_name(md)); + dm_table_destroy(t); + goto out; + } + down_write(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 0a225da2127..3e3fc06cb86 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -297,7 +297,8 @@ static int run_complete_job(struct kcopyd_job *job) dm_kcopyd_notify_fn fn = job->fn; struct dm_kcopyd_client *kc = job->kc; - kcopyd_put_pages(kc, job->pages); + if (job->pages) + kcopyd_put_pages(kc, job->pages); mempool_free(job, kc->job_pool); fn(read_err, write_err, context); @@ -461,6 +462,7 @@ static void segment_complete(int read_err, unsigned long write_err, sector_t progress = 0; sector_t count = 0; struct kcopyd_job *job = (struct kcopyd_job *) context; + struct dm_kcopyd_client *kc = job->kc; mutex_lock(&job->lock); @@ -490,7 +492,7 @@ static void segment_complete(int read_err, unsigned long write_err, if (count) { int i; - struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, + struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, GFP_NOIO); *sub_job = *job; @@ -509,13 +511,16 @@ static void segment_complete(int read_err, unsigned long write_err, } else if (atomic_dec_and_test(&job->sub_jobs)) { /* - * To avoid a race we must keep the job around - * until after the notify function has completed. - * Otherwise the client may try and stop the job - * after we've completed. + * Queue the completion callback to the kcopyd thread. + * + * Some callers assume that all the completions are called + * from a single thread and don't race with each other. + * + * We must not call the callback directly here because this + * code may not be executing in the thread. */ - job->fn(read_err, write_err, job->context); - mempool_free(job, job->kc->job_pool); + push(&kc->complete_jobs, job); + wake(kc); } } @@ -528,6 +533,8 @@ static void split_job(struct kcopyd_job *job) { int i; + atomic_inc(&job->kc->nr_jobs); + atomic_set(&job->sub_jobs, SPLIT_COUNT); for (i = 0; i < SPLIT_COUNT; i++) segment_complete(0, 0u, job); diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index bfa107f59d9..79fb53e51c7 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -142,7 +142,6 @@ static struct target_type linear_target = { .status = linear_status, .ioctl = linear_ioctl, .merge = linear_merge, - .features = DM_TARGET_SUPPORTS_BARRIERS, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 095f77bf968..6a386ab4f7e 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -8,7 +8,6 @@ #include <linux/device-mapper.h> #include "dm-path-selector.h" -#include "dm-bio-list.h" #include "dm-bio-record.h" #include "dm-uevent.h" diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 536ef0bef15..076fbb4e967 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -5,7 +5,6 @@ * This file is released under the GPL. */ -#include "dm-bio-list.h" #include "dm-bio-record.h" #include <linux/init.h> diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 59f8d9df9e1..7b899be0b08 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c @@ -14,7 +14,6 @@ #include <linux/vmalloc.h> #include "dm.h" -#include "dm-bio-list.h" #define DM_MSG_PREFIX "region hash" diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 981a0413068..d73f17fc777 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -22,7 +22,6 @@ #include <linux/workqueue.h> #include "dm-exception-store.h" -#include "dm-bio-list.h" #define DM_MSG_PREFIX "snapshots" diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index e8361b191b9..429b50b975d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -52,8 +52,6 @@ struct dm_table { sector_t *highs; struct dm_target *targets; - unsigned barriers_supported:1; - /* * Indicates the rw permissions for the new logical * device. This should be a combination of FMODE_READ @@ -243,7 +241,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode, INIT_LIST_HEAD(&t->devices); atomic_set(&t->holders, 0); - t->barriers_supported = 1; if (!num_targets) num_targets = KEYS_PER_NODE; @@ -751,10 +748,6 @@ int dm_table_add_target(struct dm_table *t, const char *type, /* FIXME: the plan is to combine high here and then have * the merge fn apply the target level restrictions. */ combine_restrictions_low(&t->limits, &tgt->limits); - - if (!(tgt->type->features & DM_TARGET_SUPPORTS_BARRIERS)) - t->barriers_supported = 0; - return 0; bad: @@ -799,12 +792,6 @@ int dm_table_complete(struct dm_table *t) check_for_valid_limits(&t->limits); - /* - * We only support barriers if there is exactly one underlying device. - */ - if (!list_is_singular(&t->devices)) - t->barriers_supported = 0; - /* how many indexes will the btree have ? */ leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); @@ -879,6 +866,45 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } +/* + * Set the integrity profile for this device if all devices used have + * matching profiles. + */ +static void dm_table_set_integrity(struct dm_table *t) +{ + struct list_head *devices = dm_table_get_devices(t); + struct dm_dev_internal *prev = NULL, *dd = NULL; + + if (!blk_get_integrity(dm_disk(t->md))) + return; + + list_for_each_entry(dd, devices, list) { + if (prev && + blk_integrity_compare(prev->dm_dev.bdev->bd_disk, + dd->dm_dev.bdev->bd_disk) < 0) { + DMWARN("%s: integrity not set: %s and %s mismatch", + dm_device_name(t->md), + prev->dm_dev.bdev->bd_disk->disk_name, + dd->dm_dev.bdev->bd_disk->disk_name); + goto no_integrity; + } + prev = dd; + } + + if (!prev || !bdev_get_integrity(prev->dm_dev.bdev)) + goto no_integrity; + + blk_integrity_register(dm_disk(t->md), + bdev_get_integrity(prev->dm_dev.bdev)); + + return; + +no_integrity: + blk_integrity_register(dm_disk(t->md), NULL); + + return; +} + void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) { /* @@ -899,6 +925,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q) else queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); + dm_table_set_integrity(t); } unsigned int dm_table_get_num_targets(struct dm_table *t) @@ -1019,12 +1046,6 @@ struct mapped_device *dm_table_get_md(struct dm_table *t) return t->md; } -int dm_table_barrier_ok(struct dm_table *t) -{ - return t->barriers_supported; -} -EXPORT_SYMBOL(dm_table_barrier_ok); - EXPORT_SYMBOL(dm_vcalloc); EXPORT_SYMBOL(dm_get_device); EXPORT_SYMBOL(dm_put_device); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 788ba96a625..424f7b048c3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -6,7 +6,6 @@ */ #include "dm.h" -#include "dm-bio-list.h" #include "dm-uevent.h" #include <linux/init.h> @@ -89,12 +88,13 @@ union map_info *dm_get_mapinfo(struct bio *bio) /* * Bits for the md->flags field. */ -#define DMF_BLOCK_IO 0 +#define DMF_BLOCK_IO_FOR_SUSPEND 0 #define DMF_SUSPENDED 1 #define DMF_FROZEN 2 #define DMF_FREEING 3 #define DMF_DELETING 4 #define DMF_NOFLUSH_SUSPENDING 5 +#define DMF_QUEUE_IO_TO_THREAD 6 /* * Work processed by per-device workqueue. @@ -124,6 +124,11 @@ struct mapped_device { spinlock_t deferred_lock; /* + * An error from the barrier request currently being processed. + */ + int barrier_error; + + /* * Processing queue (flush/barriers) */ struct workqueue_struct *wq; @@ -424,6 +429,10 @@ static void end_io_acct(struct dm_io *io) part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration); part_stat_unlock(); + /* + * After this is decremented the bio must not be touched if it is + * a barrier. + */ dm_disk(md)->part0.in_flight = pending = atomic_dec_return(&md->pending); @@ -435,21 +444,18 @@ static void end_io_acct(struct dm_io *io) /* * Add the bio to the list of deferred io. */ -static int queue_io(struct mapped_device *md, struct bio *bio) +static void queue_io(struct mapped_device *md, struct bio *bio) { down_write(&md->io_lock); - if (!test_bit(DMF_BLOCK_IO, &md->flags)) { - up_write(&md->io_lock); - return 1; - } - spin_lock_irq(&md->deferred_lock); bio_list_add(&md->deferred, bio); spin_unlock_irq(&md->deferred_lock); + if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) + queue_work(md->wq, &md->work); + up_write(&md->io_lock); - return 0; /* deferred successfully */ } /* @@ -533,25 +539,35 @@ static void dec_pending(struct dm_io *io, int error) */ spin_lock_irqsave(&md->deferred_lock, flags); if (__noflush_suspending(md)) - bio_list_add(&md->deferred, io->bio); + bio_list_add_head(&md->deferred, io->bio); else /* noflush suspend was interrupted. */ io->error = -EIO; spin_unlock_irqrestore(&md->deferred_lock, flags); } - end_io_acct(io); - io_error = io->error; bio = io->bio; - free_io(md, io); + if (bio_barrier(bio)) { + /* + * There can be just one barrier request so we use + * a per-device variable for error reporting. + * Note that you can't touch the bio after end_io_acct + */ + md->barrier_error = io_error; + end_io_acct(io); + } else { + end_io_acct(io); - if (io_error != DM_ENDIO_REQUEUE) { - trace_block_bio_complete(md->queue, bio); + if (io_error != DM_ENDIO_REQUEUE) { + trace_block_bio_complete(md->queue, bio); - bio_endio(bio, io_error); + bio_endio(bio, io_error); + } } + + free_io(md, io); } } @@ -693,13 +709,19 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_sector = sector; clone->bi_bdev = bio->bi_bdev; - clone->bi_rw = bio->bi_rw; + clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); clone->bi_vcnt = 1; clone->bi_size = to_bytes(len); clone->bi_io_vec->bv_offset = offset; clone->bi_io_vec->bv_len = clone->bi_size; clone->bi_flags |= 1 << BIO_CLONED; + if (bio_integrity(bio)) { + bio_integrity_clone(clone, bio, GFP_NOIO); + bio_integrity_trim(clone, + bio_sector_offset(bio, idx, offset), len); + } + return clone; } @@ -714,6 +736,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); __bio_clone(clone, bio); + clone->bi_rw &= ~(1 << BIO_RW_BARRIER); clone->bi_destructor = dm_bio_destructor; clone->bi_sector = sector; clone->bi_idx = idx; @@ -721,6 +744,14 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector, clone->bi_size = to_bytes(len); clone->bi_flags &= ~(1 << BIO_SEG_VALID); + if (bio_integrity(bio)) { + bio_integrity_clone(clone, bio, GFP_NOIO); + + if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) + bio_integrity_trim(clone, + bio_sector_offset(bio, idx, 0), len); + } + return clone; } @@ -834,14 +865,13 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) ci.map = dm_get_table(md); if (unlikely(!ci.map)) { - bio_io_error(bio); - return; - } - if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) { - dm_table_put(ci.map); - bio_endio(bio, -EOPNOTSUPP); + if (!bio_barrier(bio)) + bio_io_error(bio); + else + md->barrier_error = -EIO; return; } + ci.md = md; ci.bio = bio; ci.io = alloc_io(md); @@ -918,7 +948,6 @@ out: */ static int dm_request(struct request_queue *q, struct bio *bio) { - int r = -EIO; int rw = bio_data_dir(bio); struct mapped_device *md = q->queuedata; int cpu; @@ -931,34 +960,27 @@ static int dm_request(struct request_queue *q, struct bio *bio) part_stat_unlock(); /* - * If we're suspended we have to queue - * this io for later. + * If we're suspended or the thread is processing barriers + * we have to queue this io for later. */ - while (test_bit(DMF_BLOCK_IO, &md->flags)) { + if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || + unlikely(bio_barrier(bio))) { up_read(&md->io_lock); - if (bio_rw(bio) != READA) - r = queue_io(md, bio); + if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && + bio_rw(bio) == READA) { + bio_io_error(bio); + return 0; + } - if (r <= 0) - goto out_req; + queue_io(md, bio); - /* - * We're in a while loop, because someone could suspend - * before we get to the following read lock. - */ - down_read(&md->io_lock); + return 0; } __split_and_process_bio(md, bio); up_read(&md->io_lock); return 0; - -out_req: - if (r < 0) - bio_io_error(bio); - - return 0; } static void dm_unplug_all(struct request_queue *q) @@ -978,7 +1000,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits) struct mapped_device *md = congested_data; struct dm_table *map; - if (!test_bit(DMF_BLOCK_IO, &md->flags)) { + if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { map = dm_get_table(md); if (map) { r = dm_table_any_congested(map, bdi_bits); @@ -1193,6 +1215,7 @@ static void free_dev(struct mapped_device *md) mempool_destroy(md->tio_pool); mempool_destroy(md->io_pool); bioset_free(md->bs); + blk_integrity_unregister(md->disk); del_gendisk(md->disk); free_minor(minor); @@ -1406,6 +1429,36 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) return r; } +static int dm_flush(struct mapped_device *md) +{ + dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); + return 0; +} + +static void process_barrier(struct mapped_device *md, struct bio *bio) +{ + int error = dm_flush(md); + + if (unlikely(error)) { + bio_endio(bio, error); + return; + } + if (bio_empty_barrier(bio)) { + bio_endio(bio, 0); + return; + } + + __split_and_process_bio(md, bio); + + error = dm_flush(md); + + if (!error && md->barrier_error) + error = md->barrier_error; + + if (md->barrier_error != DM_ENDIO_REQUEUE) + bio_endio(bio, error); +} + /* * Process the deferred bios */ @@ -1417,25 +1470,34 @@ static void dm_wq_work(struct work_struct *work) down_write(&md->io_lock); -next_bio: - spin_lock_irq(&md->deferred_lock); - c = bio_list_pop(&md->deferred); - spin_unlock_irq(&md->deferred_lock); + while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { + spin_lock_irq(&md->deferred_lock); + c = bio_list_pop(&md->deferred); + spin_unlock_irq(&md->deferred_lock); - if (c) { - __split_and_process_bio(md, c); - goto next_bio; - } + if (!c) { + clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); + break; + } - clear_bit(DMF_BLOCK_IO, &md->flags); + up_write(&md->io_lock); + + if (bio_barrier(c)) + process_barrier(md, c); + else + __split_and_process_bio(md, c); + + down_write(&md->io_lock); + } up_write(&md->io_lock); } static void dm_queue_flush(struct mapped_device *md) { + clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); + smp_mb__after_clear_bit(); queue_work(md->wq, &md->work); - flush_workqueue(md->wq); } /* @@ -1553,20 +1615,36 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) } /* - * First we set the BLOCK_IO flag so no more ios will be mapped. + * Here we must make sure that no processes are submitting requests + * to target drivers i.e. no one may be executing + * __split_and_process_bio. This is called from dm_request and + * dm_wq_work. + * + * To get all processes out of __split_and_process_bio in dm_request, + * we take the write lock. To prevent any process from reentering + * __split_and_process_bio from dm_request, we set + * DMF_QUEUE_IO_TO_THREAD. + * + * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND + * and call flush_workqueue(md->wq). flush_workqueue will wait until + * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any + * further calls to __split_and_process_bio from dm_wq_work. */ down_write(&md->io_lock); - set_bit(DMF_BLOCK_IO, &md->flags); - + set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); + set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags); up_write(&md->io_lock); + flush_workqueue(md->wq); + /* - * Wait for the already-mapped ios to complete. + * At this point no more requests are entering target request routines. + * We call dm_wait_for_completion to wait for all existing requests + * to finish. */ r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE); down_write(&md->io_lock); - if (noflush) clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); up_write(&md->io_lock); @@ -1579,6 +1657,12 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) goto out; /* pushback list is already flushed, so skip flush */ } + /* + * If dm_wait_for_completion returned 0, the device is completely + * quiescent now. There is no request-processing activity. All new + * requests are being added to md->deferred list. + */ + dm_table_postsuspend_targets(map); set_bit(DMF_SUSPENDED, &md->flags); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index b48397c0abb..a31506d93e9 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -52,7 +52,6 @@ int dm_table_any_congested(struct dm_table *t, int bdi_bits); * To check the return value from dm_table_find_target(). */ #define dm_target_is_valid(t) ((t)->table) -int dm_table_barrier_ok(struct dm_table *t); /*----------------------------------------------------------------- * A registry of target types. diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 274b491a11c..36df9109cde 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -35,7 +35,6 @@ #include <linux/blkdev.h> #include <linux/seq_file.h> #include "md.h" -#include "dm-bio-list.h" #include "raid1.h" #include "bitmap.h" diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e293d92641a..81a54f17417 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -22,7 +22,6 @@ #include <linux/blkdev.h> #include <linux/seq_file.h> #include "md.h" -#include "dm-bio-list.h" #include "raid10.h" #include "bitmap.h" diff --git a/drivers/media/dvb/ttpci/Kconfig b/drivers/media/dvb/ttpci/Kconfig index 772990415f9..68eb4493f99 100644 --- a/drivers/media/dvb/ttpci/Kconfig +++ b/drivers/media/dvb/ttpci/Kconfig @@ -28,25 +28,12 @@ config DVB_AV7110 download/extract it, and then copy it to /usr/lib/hotplug/firmware or /lib/firmware (depending on configuration of firmware hotplug). - Say Y if you own such a card and want to use it. - -config DVB_AV7110_FIRMWARE - bool "Compile AV7110 firmware into the driver" - depends on DVB_AV7110 && !STANDALONE - default y if DVB_AV7110=y - help - The AV7110 firmware is normally loaded by the firmware hotplug manager. - If you want to compile the firmware into the driver you need to say - Y here and provide the correct path of the firmware. You need this - option if you want to compile the whole driver statically into the - kernel. + Alternatively, you can download the file and use the kernel's + EXTRA_FIRMWARE configuration option to build it into your + kernel image by adding the filename to the EXTRA_FIRMWARE + configuration option string. - All other people say N. - -config DVB_AV7110_FIRMWARE_FILE - string "Full pathname of av7110 firmware file" - depends on DVB_AV7110_FIRMWARE - default "/usr/lib/hotplug/firmware/dvb-ttpci-01.fw" + Say Y if you own such a card and want to use it. config DVB_AV7110_OSD bool "AV7110 OSD support" diff --git a/drivers/media/dvb/ttpci/Makefile b/drivers/media/dvb/ttpci/Makefile index 71451237294..8a4d5bb20a5 100644 --- a/drivers/media/dvb/ttpci/Makefile +++ b/drivers/media/dvb/ttpci/Makefile @@ -19,12 +19,3 @@ obj-$(CONFIG_DVB_AV7110) += dvb-ttpci.o EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/ EXTRA_CFLAGS += -Idrivers/media/common/tuners - -hostprogs-y := fdump - -ifeq ($(CONFIG_DVB_AV7110_FIRMWARE),y) -$(obj)/av7110.o: $(obj)/av7110_firm.h - -$(obj)/av7110_firm.h: $(obj)/fdump - $(obj)/fdump $(CONFIG_DVB_AV7110_FIRMWARE_FILE) dvb_ttpci_fw $@ -endif diff --git a/drivers/media/dvb/ttpci/av7110.c b/drivers/media/dvb/ttpci/av7110.c index 4624cee93e7..d1d959ed37b 100644 --- a/drivers/media/dvb/ttpci/av7110.c +++ b/drivers/media/dvb/ttpci/av7110.c @@ -1518,20 +1518,6 @@ static int check_firmware(struct av7110* av7110) return 0; } -#ifdef CONFIG_DVB_AV7110_FIRMWARE_FILE -#include "av7110_firm.h" -static void put_firmware(struct av7110* av7110) -{ - av7110->bin_fw = NULL; -} - -static inline int get_firmware(struct av7110* av7110) -{ - av7110->bin_fw = dvb_ttpci_fw; - av7110->size_fw = sizeof(dvb_ttpci_fw); - return check_firmware(av7110); -} -#else static void put_firmware(struct av7110* av7110) { vfree(av7110->bin_fw); @@ -1580,8 +1566,6 @@ static int get_firmware(struct av7110* av7110) release_firmware(fw); return ret; } -#endif - static int alps_bsrv2_tuner_set_params(struct dvb_frontend* fe, struct dvb_frontend_parameters *params) { diff --git a/drivers/media/dvb/ttpci/av7110_hw.c b/drivers/media/dvb/ttpci/av7110_hw.c index 3a3f5279e92..5e3f88911a1 100644 --- a/drivers/media/dvb/ttpci/av7110_hw.c +++ b/drivers/media/dvb/ttpci/av7110_hw.c @@ -198,29 +198,10 @@ static int load_dram(struct av7110 *av7110, u32 *data, int len) /* we cannot write av7110 DRAM directly, so load a bootloader into * the DPRAM which implements a simple boot protocol */ -static u8 bootcode[] = { - 0xea, 0x00, 0x00, 0x0e, 0xe1, 0xb0, 0xf0, 0x0e, 0xe2, 0x5e, 0xf0, 0x04, - 0xe2, 0x5e, 0xf0, 0x04, 0xe2, 0x5e, 0xf0, 0x08, 0xe2, 0x5e, 0xf0, 0x04, - 0xe2, 0x5e, 0xf0, 0x04, 0xe2, 0x5e, 0xf0, 0x04, 0x2c, 0x00, 0x00, 0x24, - 0x00, 0x00, 0x00, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x34, - 0x00, 0x00, 0x00, 0x00, 0xa5, 0xa5, 0x5a, 0x5a, 0x00, 0x1f, 0x15, 0x55, - 0x00, 0x00, 0x00, 0x09, 0xe5, 0x9f, 0xd0, 0x7c, 0xe5, 0x9f, 0x40, 0x74, - 0xe3, 0xa0, 0x00, 0x00, 0xe5, 0x84, 0x00, 0x00, 0xe5, 0x84, 0x00, 0x04, - 0xe5, 0x9f, 0x10, 0x70, 0xe5, 0x9f, 0x20, 0x70, 0xe5, 0x9f, 0x30, 0x64, - 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, 0xe1, 0x51, 0x00, 0x02, - 0xda, 0xff, 0xff, 0xfb, 0xe5, 0x9f, 0xf0, 0x50, 0xe1, 0xd4, 0x10, 0xb0, - 0xe3, 0x51, 0x00, 0x00, 0x0a, 0xff, 0xff, 0xfc, 0xe1, 0xa0, 0x10, 0x0d, - 0xe5, 0x94, 0x30, 0x04, 0xe1, 0xd4, 0x20, 0xb2, 0xe2, 0x82, 0x20, 0x3f, - 0xe1, 0xb0, 0x23, 0x22, 0x03, 0xa0, 0x00, 0x02, 0xe1, 0xc4, 0x00, 0xb0, - 0x0a, 0xff, 0xff, 0xf4, 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, - 0xe8, 0xb1, 0x1f, 0xe0, 0xe8, 0xa3, 0x1f, 0xe0, 0xe2, 0x52, 0x20, 0x01, - 0x1a, 0xff, 0xff, 0xf9, 0xe2, 0x2d, 0xdb, 0x05, 0xea, 0xff, 0xff, 0xec, - 0x2c, 0x00, 0x03, 0xf8, 0x2c, 0x00, 0x04, 0x00, 0x9e, 0x00, 0x08, 0x00, - 0x2c, 0x00, 0x00, 0x74, 0x2c, 0x00, 0x00, 0xc0 -}; - int av7110_bootarm(struct av7110 *av7110) { + const struct firmware *fw; + const char *fw_name = "av7110/bootcode.bin"; struct saa7146_dev *dev = av7110->dev; u32 ret; int i; @@ -261,7 +242,15 @@ int av7110_bootarm(struct av7110 *av7110) //saa7146_setgpio(dev, DEBI_DONE_LINE, SAA7146_GPIO_INPUT); //saa7146_setgpio(dev, 3, SAA7146_GPIO_INPUT); - mwdebi(av7110, DEBISWAB, DPRAM_BASE, bootcode, sizeof(bootcode)); + ret = request_firmware(&fw, fw_name, &dev->pci->dev); + if (ret) { + printk(KERN_ERR "dvb-ttpci: Failed to load firmware \"%s\"\n", + fw_name); + return ret; + } + + mwdebi(av7110, DEBISWAB, DPRAM_BASE, fw->data, fw->size); + release_firmware(fw); iwdebi(av7110, DEBINOSWAP, AV7110_BOOT_STATE, BOOTSTATE_BUFFER_FULL, 2); if (saa7146_wait_for_debi_done(av7110->dev, 1)) { @@ -302,7 +291,7 @@ int av7110_bootarm(struct av7110 *av7110) av7110->arm_ready = 1; return 0; } - +MODULE_FIRMWARE("av7110/bootcode.bin"); /**************************************************************************** * DEBI command polling diff --git a/drivers/media/dvb/ttpci/av7110_hw.h b/drivers/media/dvb/ttpci/av7110_hw.h index ca99e5c1fc8..1634aba5cb8 100644 --- a/drivers/media/dvb/ttpci/av7110_hw.h +++ b/drivers/media/dvb/ttpci/av7110_hw.h @@ -390,7 +390,8 @@ static inline void iwdebi(struct av7110 *av7110, u32 config, int addr, u32 val, } /* buffer writes */ -static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, u8 *val, int count) +static inline void mwdebi(struct av7110 *av7110, u32 config, int addr, + const u8 *val, int count) { memcpy(av7110->debi_virt, val, count); av7110_debiwrite(av7110, config, addr, 0, count); diff --git a/drivers/media/dvb/ttpci/fdump.c b/drivers/media/dvb/ttpci/fdump.c deleted file mode 100644 index c90001d35e7..00000000000 --- a/drivers/media/dvb/ttpci/fdump.c +++ /dev/null @@ -1,44 +0,0 @@ -#include <stdio.h> -#include <sys/types.h> -#include <sys/stat.h> -#include <fcntl.h> -#include <unistd.h> - -int main(int argc, char **argv) -{ - unsigned char buf[8]; - unsigned int i, count, bytes = 0; - FILE *fd_in, *fd_out; - - if (argc != 4) { - fprintf(stderr, "\n\tusage: %s <ucode.bin> <array_name> <output_name>\n\n", argv[0]); - return -1; - } - - fd_in = fopen(argv[1], "rb"); - if (fd_in == NULL) { - fprintf(stderr, "firmware file '%s' not found\n", argv[1]); - return -1; - } - - fd_out = fopen(argv[3], "w+"); - if (fd_out == NULL) { - fprintf(stderr, "cannot create output file '%s'\n", argv[3]); - return -1; - } - - fprintf(fd_out, "\n#include <asm/types.h>\n\nu8 %s [] = {", argv[2]); - - while ((count = fread(buf, 1, 8, fd_in)) > 0) { - fprintf(fd_out, "\n\t"); - for (i = 0; i < count; i++, bytes++) - fprintf(fd_out, "0x%02x, ", buf[i]); - } - - fprintf(fd_out, "\n};\n\n"); - - fclose(fd_in); - fclose(fd_out); - - return 0; -} diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index d184dfab963..db39f4a52f5 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -278,7 +278,7 @@ static ssize_t at24_bin_read(struct kobject *kobj, struct bin_attribute *attr, * We only use page mode writes; the alternative is sloooow. This routine * writes at most one page. */ -static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf, +static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf, unsigned offset, size_t count) { struct i2c_client *client; @@ -347,8 +347,8 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, char *buf, return -ETIMEDOUT; } -static ssize_t at24_write(struct at24_data *at24, - char *buf, loff_t off, size_t count) +static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off, + size_t count) { ssize_t retval = 0; @@ -406,7 +406,7 @@ static ssize_t at24_macc_read(struct memory_accessor *macc, char *buf, return at24_read(at24, buf, offset, count); } -static ssize_t at24_macc_write(struct memory_accessor *macc, char *buf, +static ssize_t at24_macc_write(struct memory_accessor *macc, const char *buf, off_t offset, size_t count) { struct at24_data *at24 = container_of(macc, struct at24_data, macc); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 6bc0dac5c1e..b34cb5f79ee 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -140,7 +140,8 @@ at25_bin_read(struct kobject *kobj, struct bin_attribute *bin_attr, static ssize_t -at25_ee_write(struct at25_data *at25, char *buf, loff_t off, size_t count) +at25_ee_write(struct at25_data *at25, const char *buf, loff_t off, + size_t count) { ssize_t status = 0; unsigned written = 0; @@ -276,7 +277,7 @@ static ssize_t at25_mem_read(struct memory_accessor *mem, char *buf, return at25_ee_read(at25, buf, offset, count); } -static ssize_t at25_mem_write(struct memory_accessor *mem, char *buf, +static ssize_t at25_mem_write(struct memory_accessor *mem, const char *buf, off_t offset, size_t count) { struct at25_data *at25 = container_of(mem, struct at25_data, mem); diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 114444cfd49..b94d5f76770 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -90,18 +90,21 @@ struct xpc_rsvd_page { short max_npartitions; /* value of XPC_MAX_PARTITIONS */ u8 version; u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ + unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ union { - unsigned long vars_pa; /* phys address of struct xpc_vars */ - unsigned long activate_gru_mq_desc_gpa; /* phys addr of */ - /* activate mq's */ - /* gru mq descriptor */ + struct { + unsigned long vars_pa; /* phys addr */ + } sn2; + struct { + unsigned long heartbeat_gpa; /* phys addr */ + unsigned long activate_gru_mq_desc_gpa; /* phys addr */ + } uv; } sn; - unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ - u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ + u64 pad2[9]; /* align to last u64 in 2nd 64-byte cacheline */ u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */ }; -#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */ +#define XPC_RP_VERSION _XPC_VERSION(3, 0) /* version 3.0 of the reserved page */ /* * Define the structures by which XPC variables can be exported to other @@ -182,6 +185,17 @@ struct xpc_vars_part_sn2 { (XPC_RP_MACH_NASIDS(_rp) + \ xpc_nasid_mask_nlongs)) + +/* + * The following structure describes the partition's heartbeat info which + * will be periodically read by other partitions to determine whether this + * XPC is still 'alive'. + */ +struct xpc_heartbeat_uv { + unsigned long value; + unsigned long offline; /* if 0, heartbeat should be changing */ +}; + /* * Info pertinent to a GRU message queue using a watch list for irq generation. */ @@ -198,7 +212,7 @@ struct xpc_gru_mq_uv { /* * The activate_mq is used to send/receive GRU messages that affect XPC's - * heartbeat, partition active state, and channel state. This is UV only. + * partition active state and channel state. This is uv only. */ struct xpc_activate_mq_msghdr_uv { unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ @@ -210,33 +224,27 @@ struct xpc_activate_mq_msghdr_uv { /* activate_mq defined message types */ #define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0 -#define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1 -#define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2 -#define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3 -#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4 -#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5 +#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 1 +#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 2 -#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6 -#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7 -#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8 -#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 3 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 4 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 5 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 6 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV 7 -#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10 -#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11 +#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 8 +#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 9 struct xpc_activate_mq_msg_uv { struct xpc_activate_mq_msghdr_uv hdr; }; -struct xpc_activate_mq_msg_heartbeat_req_uv { - struct xpc_activate_mq_msghdr_uv hdr; - u64 heartbeat; -}; - struct xpc_activate_mq_msg_activate_req_uv { struct xpc_activate_mq_msghdr_uv hdr; unsigned long rp_gpa; + unsigned long heartbeat_gpa; unsigned long activate_gru_mq_desc_gpa; }; @@ -271,6 +279,11 @@ struct xpc_activate_mq_msg_chctl_openreply_uv { unsigned long notify_gru_mq_desc_gpa; }; +struct xpc_activate_mq_msg_chctl_opencomplete_uv { + struct xpc_activate_mq_msghdr_uv hdr; + short ch_number; +}; + /* * Functions registered by add_timer() or called by kernel_thread() only * allow for a single 64-bit argument. The following macros can be used to @@ -576,30 +589,32 @@ struct xpc_channel { #define XPC_C_WASCONNECTED 0x00000001 /* channel was connected */ -#define XPC_C_ROPENREPLY 0x00000002 /* remote open channel reply */ -#define XPC_C_OPENREPLY 0x00000004 /* local open channel reply */ -#define XPC_C_ROPENREQUEST 0x00000008 /* remote open channel request */ -#define XPC_C_OPENREQUEST 0x00000010 /* local open channel request */ +#define XPC_C_ROPENCOMPLETE 0x00000002 /* remote open channel complete */ +#define XPC_C_OPENCOMPLETE 0x00000004 /* local open channel complete */ +#define XPC_C_ROPENREPLY 0x00000008 /* remote open channel reply */ +#define XPC_C_OPENREPLY 0x00000010 /* local open channel reply */ +#define XPC_C_ROPENREQUEST 0x00000020 /* remote open channel request */ +#define XPC_C_OPENREQUEST 0x00000040 /* local open channel request */ -#define XPC_C_SETUP 0x00000020 /* channel's msgqueues are alloc'd */ -#define XPC_C_CONNECTEDCALLOUT 0x00000040 /* connected callout initiated */ +#define XPC_C_SETUP 0x00000080 /* channel's msgqueues are alloc'd */ +#define XPC_C_CONNECTEDCALLOUT 0x00000100 /* connected callout initiated */ #define XPC_C_CONNECTEDCALLOUT_MADE \ - 0x00000080 /* connected callout completed */ -#define XPC_C_CONNECTED 0x00000100 /* local channel is connected */ -#define XPC_C_CONNECTING 0x00000200 /* channel is being connected */ + 0x00000200 /* connected callout completed */ +#define XPC_C_CONNECTED 0x00000400 /* local channel is connected */ +#define XPC_C_CONNECTING 0x00000800 /* channel is being connected */ -#define XPC_C_RCLOSEREPLY 0x00000400 /* remote close channel reply */ -#define XPC_C_CLOSEREPLY 0x00000800 /* local close channel reply */ -#define XPC_C_RCLOSEREQUEST 0x00001000 /* remote close channel request */ -#define XPC_C_CLOSEREQUEST 0x00002000 /* local close channel request */ +#define XPC_C_RCLOSEREPLY 0x00001000 /* remote close channel reply */ +#define XPC_C_CLOSEREPLY 0x00002000 /* local close channel reply */ +#define XPC_C_RCLOSEREQUEST 0x00004000 /* remote close channel request */ +#define XPC_C_CLOSEREQUEST 0x00008000 /* local close channel request */ -#define XPC_C_DISCONNECTED 0x00004000 /* channel is disconnected */ -#define XPC_C_DISCONNECTING 0x00008000 /* channel is being disconnected */ +#define XPC_C_DISCONNECTED 0x00010000 /* channel is disconnected */ +#define XPC_C_DISCONNECTING 0x00020000 /* channel is being disconnected */ #define XPC_C_DISCONNECTINGCALLOUT \ - 0x00010000 /* disconnecting callout initiated */ + 0x00040000 /* disconnecting callout initiated */ #define XPC_C_DISCONNECTINGCALLOUT_MADE \ - 0x00020000 /* disconnecting callout completed */ -#define XPC_C_WDISCONNECT 0x00040000 /* waiting for channel disconnect */ + 0x00080000 /* disconnecting callout completed */ +#define XPC_C_WDISCONNECT 0x00100000 /* waiting for channel disconnect */ /* * The channel control flags (chctl) union consists of a 64-bit variable which @@ -618,11 +633,13 @@ union xpc_channel_ctl_flags { #define XPC_CHCTL_CLOSEREPLY 0x02 #define XPC_CHCTL_OPENREQUEST 0x04 #define XPC_CHCTL_OPENREPLY 0x08 -#define XPC_CHCTL_MSGREQUEST 0x10 +#define XPC_CHCTL_OPENCOMPLETE 0x10 +#define XPC_CHCTL_MSGREQUEST 0x20 #define XPC_OPENCLOSE_CHCTL_FLAGS \ (XPC_CHCTL_CLOSEREQUEST | XPC_CHCTL_CLOSEREPLY | \ - XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY) + XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | \ + XPC_CHCTL_OPENCOMPLETE) #define XPC_MSG_CHCTL_FLAGS XPC_CHCTL_MSGREQUEST static inline int @@ -687,6 +704,9 @@ struct xpc_partition_sn2 { }; struct xpc_partition_uv { + unsigned long heartbeat_gpa; /* phys addr of partition's heartbeat */ + struct xpc_heartbeat_uv cached_heartbeat; /* cached copy of */ + /* partition's heartbeat */ unsigned long activate_gru_mq_desc_gpa; /* phys addr of parititon's */ /* activate mq's gru mq */ /* descriptor */ @@ -698,14 +718,12 @@ struct xpc_partition_uv { u8 remote_act_state; /* remote partition's act_state */ u8 act_state_req; /* act_state request from remote partition */ enum xp_retval reason; /* reason for deactivate act_state request */ - u64 heartbeat; /* incremented by remote partition */ }; /* struct xpc_partition_uv flags */ -#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001 +#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000001 #define XPC_P_ENGAGED_UV 0x00000002 -#define XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV 0x00000004 /* struct xpc_partition_uv act_state change requests */ @@ -762,6 +780,62 @@ struct xpc_partition { } ____cacheline_aligned; +struct xpc_arch_operations { + int (*setup_partitions) (void); + void (*teardown_partitions) (void); + void (*process_activate_IRQ_rcvd) (void); + enum xp_retval (*get_partition_rsvd_page_pa) + (void *, u64 *, unsigned long *, size_t *); + int (*setup_rsvd_page) (struct xpc_rsvd_page *); + + void (*allow_hb) (short); + void (*disallow_hb) (short); + void (*disallow_all_hbs) (void); + void (*increment_heartbeat) (void); + void (*offline_heartbeat) (void); + void (*online_heartbeat) (void); + void (*heartbeat_init) (void); + void (*heartbeat_exit) (void); + enum xp_retval (*get_remote_heartbeat) (struct xpc_partition *); + + void (*request_partition_activation) (struct xpc_rsvd_page *, + unsigned long, int); + void (*request_partition_reactivation) (struct xpc_partition *); + void (*request_partition_deactivation) (struct xpc_partition *); + void (*cancel_partition_deactivation_request) (struct xpc_partition *); + enum xp_retval (*setup_ch_structures) (struct xpc_partition *); + void (*teardown_ch_structures) (struct xpc_partition *); + + enum xp_retval (*make_first_contact) (struct xpc_partition *); + + u64 (*get_chctl_all_flags) (struct xpc_partition *); + void (*send_chctl_closerequest) (struct xpc_channel *, unsigned long *); + void (*send_chctl_closereply) (struct xpc_channel *, unsigned long *); + void (*send_chctl_openrequest) (struct xpc_channel *, unsigned long *); + void (*send_chctl_openreply) (struct xpc_channel *, unsigned long *); + void (*send_chctl_opencomplete) (struct xpc_channel *, unsigned long *); + void (*process_msg_chctl_flags) (struct xpc_partition *, int); + + enum xp_retval (*save_remote_msgqueue_pa) (struct xpc_channel *, + unsigned long); + + enum xp_retval (*setup_msg_structures) (struct xpc_channel *); + void (*teardown_msg_structures) (struct xpc_channel *); + + void (*indicate_partition_engaged) (struct xpc_partition *); + void (*indicate_partition_disengaged) (struct xpc_partition *); + void (*assume_partition_disengaged) (short); + int (*partition_engaged) (short); + int (*any_partition_engaged) (void); + + int (*n_of_deliverable_payloads) (struct xpc_channel *); + enum xp_retval (*send_payload) (struct xpc_channel *, u32, void *, + u16, u8, xpc_notify_func, void *); + void *(*get_deliverable_payload) (struct xpc_channel *); + void (*received_payload) (struct xpc_channel *, void *); + void (*notify_senders_of_disconnect) (struct xpc_channel *); +}; + /* struct xpc_partition act_state values (for XPC HB) */ #define XPC_P_AS_INACTIVE 0x00 /* partition is not active */ @@ -802,67 +876,17 @@ extern struct xpc_registration xpc_registrations[]; /* found in xpc_main.c */ extern struct device *xpc_part; extern struct device *xpc_chan; +extern struct xpc_arch_operations xpc_arch_ops; extern int xpc_disengage_timelimit; extern int xpc_disengage_timedout; extern int xpc_activate_IRQ_rcvd; extern spinlock_t xpc_activate_IRQ_rcvd_lock; extern wait_queue_head_t xpc_activate_IRQ_wq; -extern void *xpc_heartbeating_to_mask; extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); extern void xpc_activate_partition(struct xpc_partition *); extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_disconnect_wait(int); -extern int (*xpc_setup_partitions_sn) (void); -extern void (*xpc_teardown_partitions_sn) (void); -extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, - unsigned long *, - size_t *); -extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *); -extern void (*xpc_heartbeat_init) (void); -extern void (*xpc_heartbeat_exit) (void); -extern void (*xpc_increment_heartbeat) (void); -extern void (*xpc_offline_heartbeat) (void); -extern void (*xpc_online_heartbeat) (void); -extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *); -extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); -extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *); -extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *); -extern void (*xpc_teardown_msg_structures) (struct xpc_channel *); -extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); -extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); -extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *); -extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *); -extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, - unsigned long, int); -extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); -extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); -extern void (*xpc_cancel_partition_deactivation_request) ( - struct xpc_partition *); -extern void (*xpc_process_activate_IRQ_rcvd) (void); -extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *); -extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *); - -extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *); -extern int (*xpc_partition_engaged) (short); -extern int (*xpc_any_partition_engaged) (void); -extern void (*xpc_indicate_partition_disengaged) (struct xpc_partition *); -extern void (*xpc_assume_partition_disengaged) (short); - -extern void (*xpc_send_chctl_closerequest) (struct xpc_channel *, - unsigned long *); -extern void (*xpc_send_chctl_closereply) (struct xpc_channel *, - unsigned long *); -extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *, - unsigned long *); -extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *); - -extern enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, - unsigned long); - -extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *, - u16, u8, xpc_notify_func, void *); -extern void (*xpc_received_payload) (struct xpc_channel *, void *); /* found in xpc_sn2.c */ extern int xpc_init_sn2(void); @@ -909,40 +933,6 @@ extern void xpc_disconnect_channel(const int, struct xpc_channel *, extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); -static inline int -xpc_hb_allowed(short partid, void *heartbeating_to_mask) -{ - return test_bit(partid, heartbeating_to_mask); -} - -static inline int -xpc_any_hbs_allowed(void) -{ - DBUG_ON(xpc_heartbeating_to_mask == NULL); - return !bitmap_empty(xpc_heartbeating_to_mask, xp_max_npartitions); -} - -static inline void -xpc_allow_hb(short partid) -{ - DBUG_ON(xpc_heartbeating_to_mask == NULL); - set_bit(partid, xpc_heartbeating_to_mask); -} - -static inline void -xpc_disallow_hb(short partid) -{ - DBUG_ON(xpc_heartbeating_to_mask == NULL); - clear_bit(partid, xpc_heartbeating_to_mask); -} - -static inline void -xpc_disallow_all_hbs(void) -{ - DBUG_ON(xpc_heartbeating_to_mask == NULL); - bitmap_zero(xpc_heartbeating_to_mask, xp_max_npartitions); -} - static inline void xpc_wakeup_channel_mgr(struct xpc_partition *part) { diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 99a2534c38a..652593fc486 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. */ /* @@ -39,34 +39,38 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) if (!(ch->flags & XPC_C_SETUP)) { spin_unlock_irqrestore(&ch->lock, *irq_flags); - ret = xpc_setup_msg_structures(ch); + ret = xpc_arch_ops.setup_msg_structures(ch); spin_lock_irqsave(&ch->lock, *irq_flags); if (ret != xpSuccess) XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); + else + ch->flags |= XPC_C_SETUP; - ch->flags |= XPC_C_SETUP; - - if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) + if (ch->flags & XPC_C_DISCONNECTING) return; } if (!(ch->flags & XPC_C_OPENREPLY)) { ch->flags |= XPC_C_OPENREPLY; - xpc_send_chctl_openreply(ch, irq_flags); + xpc_arch_ops.send_chctl_openreply(ch, irq_flags); } if (!(ch->flags & XPC_C_ROPENREPLY)) return; - ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ + if (!(ch->flags & XPC_C_OPENCOMPLETE)) { + ch->flags |= (XPC_C_OPENCOMPLETE | XPC_C_CONNECTED); + xpc_arch_ops.send_chctl_opencomplete(ch, irq_flags); + } + + if (!(ch->flags & XPC_C_ROPENCOMPLETE)) + return; dev_info(xpc_chan, "channel %d to partition %d connected\n", ch->number, ch->partid); - spin_unlock_irqrestore(&ch->lock, *irq_flags); - xpc_create_kthreads(ch, 1, 0); - spin_lock_irqsave(&ch->lock, *irq_flags); + ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ } /* @@ -96,7 +100,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) if (part->act_state == XPC_P_AS_DEACTIVATING) { /* can't proceed until the other side disengages from us */ - if (xpc_partition_engaged(ch->partid)) + if (xpc_arch_ops.partition_engaged(ch->partid)) return; } else { @@ -108,7 +112,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) if (!(ch->flags & XPC_C_CLOSEREPLY)) { ch->flags |= XPC_C_CLOSEREPLY; - xpc_send_chctl_closereply(ch, irq_flags); + xpc_arch_ops.send_chctl_closereply(ch, irq_flags); } if (!(ch->flags & XPC_C_RCLOSEREPLY)) @@ -118,7 +122,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) /* wake those waiting for notify completion */ if (atomic_read(&ch->n_to_notify) > 0) { /* we do callout while holding ch->lock, callout can't block */ - xpc_notify_senders_of_disconnect(ch); + xpc_arch_ops.notify_senders_of_disconnect(ch); } /* both sides are disconnected now */ @@ -132,7 +136,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) DBUG_ON(atomic_read(&ch->n_to_notify) != 0); /* it's now safe to free the channel's message queues */ - xpc_teardown_msg_structures(ch); + xpc_arch_ops.teardown_msg_structures(ch); ch->func = NULL; ch->key = NULL; @@ -144,8 +148,9 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) /* * Mark the channel disconnected and clear all other flags, including - * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but - * not including XPC_C_WDISCONNECT (if it was set). + * XPC_C_SETUP (because of call to + * xpc_arch_ops.teardown_msg_structures()) but not including + * XPC_C_WDISCONNECT (if it was set). */ ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); @@ -184,6 +189,7 @@ xpc_process_openclose_chctl_flags(struct xpc_partition *part, int ch_number, struct xpc_channel *ch = &part->channels[ch_number]; enum xp_retval reason; enum xp_retval ret; + int create_kthread = 0; spin_lock_irqsave(&ch->lock, irq_flags); @@ -196,8 +202,7 @@ again: * has had a chance to see that the channel is disconnected. */ ch->delayed_chctl_flags |= chctl_flags; - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } if (chctl_flags & XPC_CHCTL_CLOSEREQUEST) { @@ -239,8 +244,7 @@ again: XPC_CHCTL_CLOSEREQUEST; spin_unlock(&part->chctl_lock); } - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } XPC_SET_REASON(ch, 0, 0); @@ -250,7 +254,8 @@ again: ch->flags |= (XPC_C_CONNECTING | XPC_C_ROPENREQUEST); } - chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY); + chctl_flags &= ~(XPC_CHCTL_OPENREQUEST | XPC_CHCTL_OPENREPLY | + XPC_CHCTL_OPENCOMPLETE); /* * The meaningful CLOSEREQUEST connection state fields are: @@ -269,8 +274,7 @@ again: XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); DBUG_ON(chctl_flags & XPC_CHCTL_CLOSEREPLY); - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } xpc_process_disconnect(ch, &irq_flags); @@ -283,8 +287,7 @@ again: if (ch->flags & XPC_C_DISCONNECTED) { DBUG_ON(part->act_state != XPC_P_AS_DEACTIVATING); - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } DBUG_ON(!(ch->flags & XPC_C_CLOSEREQUEST)); @@ -299,8 +302,7 @@ again: XPC_CHCTL_CLOSEREPLY; spin_unlock(&part->chctl_lock); } - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } ch->flags |= XPC_C_RCLOSEREPLY; @@ -320,14 +322,12 @@ again: if (part->act_state == XPC_P_AS_DEACTIVATING || (ch->flags & XPC_C_ROPENREQUEST)) { - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_WDISCONNECT)) { ch->delayed_chctl_flags |= XPC_CHCTL_OPENREQUEST; - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } DBUG_ON(!(ch->flags & (XPC_C_DISCONNECTED | XPC_C_OPENREQUEST))); @@ -341,8 +341,7 @@ again: */ if (args->entry_size == 0 || args->local_nentries == 0) { /* assume OPENREQUEST was delayed by mistake */ - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } ch->flags |= (XPC_C_ROPENREQUEST | XPC_C_CONNECTING); @@ -352,8 +351,7 @@ again: if (args->entry_size != ch->entry_size) { XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, &irq_flags); - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } } else { ch->entry_size = args->entry_size; @@ -375,15 +373,13 @@ again: args->local_msgqueue_pa, args->local_nentries, args->remote_nentries, ch->partid, ch->number); - if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; - } + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) + goto out; + if (!(ch->flags & XPC_C_OPENREQUEST)) { XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, &irq_flags); - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); @@ -400,11 +396,11 @@ again: DBUG_ON(args->local_nentries == 0); DBUG_ON(args->remote_nentries == 0); - ret = xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); + ret = xpc_arch_ops.save_remote_msgqueue_pa(ch, + args->local_msgqueue_pa); if (ret != xpSuccess) { XPC_DISCONNECT_CHANNEL(ch, ret, &irq_flags); - spin_unlock_irqrestore(&ch->lock, irq_flags); - return; + goto out; } ch->flags |= XPC_C_ROPENREPLY; @@ -430,7 +426,36 @@ again: xpc_process_connect(ch, &irq_flags); } + if (chctl_flags & XPC_CHCTL_OPENCOMPLETE) { + + dev_dbg(xpc_chan, "XPC_CHCTL_OPENCOMPLETE received from " + "partid=%d, channel=%d\n", ch->partid, ch->number); + + if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) + goto out; + + if (!(ch->flags & XPC_C_OPENREQUEST) || + !(ch->flags & XPC_C_OPENREPLY)) { + XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, + &irq_flags); + goto out; + } + + DBUG_ON(!(ch->flags & XPC_C_ROPENREQUEST)); + DBUG_ON(!(ch->flags & XPC_C_ROPENREPLY)); + DBUG_ON(!(ch->flags & XPC_C_CONNECTED)); + + ch->flags |= XPC_C_ROPENCOMPLETE; + + xpc_process_connect(ch, &irq_flags); + create_kthread = 1; + } + +out: spin_unlock_irqrestore(&ch->lock, irq_flags); + + if (create_kthread) + xpc_create_kthreads(ch, 1, 0); } /* @@ -508,7 +533,7 @@ xpc_connect_channel(struct xpc_channel *ch) /* initiate the connection */ ch->flags |= (XPC_C_OPENREQUEST | XPC_C_CONNECTING); - xpc_send_chctl_openrequest(ch, &irq_flags); + xpc_arch_ops.send_chctl_openrequest(ch, &irq_flags); xpc_process_connect(ch, &irq_flags); @@ -526,7 +551,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part) int ch_number; u32 ch_flags; - chctl.all_flags = xpc_get_chctl_all_flags(part); + chctl.all_flags = xpc_arch_ops.get_chctl_all_flags(part); /* * Initiate channel connections for registered channels. @@ -564,10 +589,6 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part) if (!(ch_flags & XPC_C_OPENREQUEST)) { DBUG_ON(ch_flags & XPC_C_SETUP); (void)xpc_connect_channel(ch); - } else { - spin_lock_irqsave(&ch->lock, irq_flags); - xpc_process_connect(ch, &irq_flags); - spin_unlock_irqrestore(&ch->lock, irq_flags); } continue; } @@ -579,7 +600,7 @@ xpc_process_sent_chctl_flags(struct xpc_partition *part) */ if (chctl.flags[ch_number] & XPC_MSG_CHCTL_FLAGS) - xpc_process_msg_chctl_flags(part, ch_number); + xpc_arch_ops.process_msg_chctl_flags(part, ch_number); } } @@ -755,7 +776,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, XPC_C_ROPENREQUEST | XPC_C_ROPENREPLY | XPC_C_CONNECTING | XPC_C_CONNECTED); - xpc_send_chctl_closerequest(ch, irq_flags); + xpc_arch_ops.send_chctl_closerequest(ch, irq_flags); if (channel_was_connected) ch->flags |= XPC_C_WASCONNECTED; @@ -862,8 +883,8 @@ xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, DBUG_ON(payload == NULL); if (xpc_part_ref(part)) { - ret = xpc_send_payload(&part->channels[ch_number], flags, - payload, payload_size, 0, NULL, NULL); + ret = xpc_arch_ops.send_payload(&part->channels[ch_number], + flags, payload, payload_size, 0, NULL, NULL); xpc_part_deref(part); } @@ -914,9 +935,8 @@ xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, DBUG_ON(func == NULL); if (xpc_part_ref(part)) { - ret = xpc_send_payload(&part->channels[ch_number], flags, - payload, payload_size, XPC_N_CALL, func, - key); + ret = xpc_arch_ops.send_payload(&part->channels[ch_number], + flags, payload, payload_size, XPC_N_CALL, func, key); xpc_part_deref(part); } return ret; @@ -930,7 +950,7 @@ xpc_deliver_payload(struct xpc_channel *ch) { void *payload; - payload = xpc_get_deliverable_payload(ch); + payload = xpc_arch_ops.get_deliverable_payload(ch); if (payload != NULL) { /* @@ -984,7 +1004,7 @@ xpc_initiate_received(short partid, int ch_number, void *payload) DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); ch = &part->channels[ch_number]; - xpc_received_payload(ch, payload); + xpc_arch_ops.received_payload(ch, payload); /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ xpc_msgqueue_deref(ch); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 1ab9fda87fa..fd3688a3e23 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2004-2009 Silicon Graphics, Inc. All Rights Reserved. */ /* @@ -150,7 +150,6 @@ DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); static unsigned long xpc_hb_check_timeout; static struct timer_list xpc_hb_timer; -void *xpc_heartbeating_to_mask; /* notification that the xpc_hb_checker thread has exited */ static DECLARE_COMPLETION(xpc_hb_checker_exited); @@ -170,62 +169,7 @@ static struct notifier_block xpc_die_notifier = { .notifier_call = xpc_system_die, }; -int (*xpc_setup_partitions_sn) (void); -void (*xpc_teardown_partitions_sn) (void); -enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, - unsigned long *rp_pa, - size_t *len); -int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp); -void (*xpc_heartbeat_init) (void); -void (*xpc_heartbeat_exit) (void); -void (*xpc_increment_heartbeat) (void); -void (*xpc_offline_heartbeat) (void); -void (*xpc_online_heartbeat) (void); -enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part); - -enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); -void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); -u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part); -enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch); -void (*xpc_teardown_msg_structures) (struct xpc_channel *ch); -void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number); -int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch); -void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch); - -void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, - unsigned long remote_rp_pa, - int nasid); -void (*xpc_request_partition_reactivation) (struct xpc_partition *part); -void (*xpc_request_partition_deactivation) (struct xpc_partition *part); -void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); - -void (*xpc_process_activate_IRQ_rcvd) (void); -enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part); -void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part); - -void (*xpc_indicate_partition_engaged) (struct xpc_partition *part); -int (*xpc_partition_engaged) (short partid); -int (*xpc_any_partition_engaged) (void); -void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part); -void (*xpc_assume_partition_disengaged) (short partid); - -void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch, - unsigned long *irq_flags); -void (*xpc_send_chctl_closereply) (struct xpc_channel *ch, - unsigned long *irq_flags); -void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch, - unsigned long *irq_flags); -void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, - unsigned long *irq_flags); - -enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, - unsigned long msgqueue_pa); - -enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, - void *payload, u16 payload_size, - u8 notify_type, xpc_notify_func func, - void *key); -void (*xpc_received_payload) (struct xpc_channel *ch, void *payload); +struct xpc_arch_operations xpc_arch_ops; /* * Timer function to enforce the timelimit on the partition disengage. @@ -240,7 +184,7 @@ xpc_timeout_partition_disengage(unsigned long data) (void)xpc_partition_disengaged(part); DBUG_ON(part->disengage_timeout != 0); - DBUG_ON(xpc_partition_engaged(XPC_PARTID(part))); + DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); } /* @@ -251,7 +195,7 @@ xpc_timeout_partition_disengage(unsigned long data) static void xpc_hb_beater(unsigned long dummy) { - xpc_increment_heartbeat(); + xpc_arch_ops.increment_heartbeat(); if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) wake_up_interruptible(&xpc_activate_IRQ_wq); @@ -263,7 +207,7 @@ xpc_hb_beater(unsigned long dummy) static void xpc_start_hb_beater(void) { - xpc_heartbeat_init(); + xpc_arch_ops.heartbeat_init(); init_timer(&xpc_hb_timer); xpc_hb_timer.function = xpc_hb_beater; xpc_hb_beater(0); @@ -273,7 +217,7 @@ static void xpc_stop_hb_beater(void) { del_timer_sync(&xpc_hb_timer); - xpc_heartbeat_exit(); + xpc_arch_ops.heartbeat_exit(); } /* @@ -302,7 +246,7 @@ xpc_check_remote_hb(void) continue; } - ret = xpc_get_remote_heartbeat(part); + ret = xpc_arch_ops.get_remote_heartbeat(part); if (ret != xpSuccess) XPC_DEACTIVATE_PARTITION(part, ret); } @@ -353,7 +297,7 @@ xpc_hb_checker(void *ignore) force_IRQ = 0; dev_dbg(xpc_part, "processing activate IRQs " "received\n"); - xpc_process_activate_IRQ_rcvd(); + xpc_arch_ops.process_activate_IRQ_rcvd(); } /* wait for IRQ or timeout */ @@ -528,7 +472,7 @@ xpc_setup_ch_structures(struct xpc_partition *part) init_waitqueue_head(&ch->idle_wq); } - ret = xpc_setup_ch_structures_sn(part); + ret = xpc_arch_ops.setup_ch_structures(part); if (ret != xpSuccess) goto out_2; @@ -572,7 +516,7 @@ xpc_teardown_ch_structures(struct xpc_partition *part) /* now we can begin tearing down the infrastructure */ - xpc_teardown_ch_structures_sn(part); + xpc_arch_ops.teardown_ch_structures(part); kfree(part->remote_openclose_args_base); part->remote_openclose_args = NULL; @@ -620,12 +564,12 @@ xpc_activating(void *__partid) dev_dbg(xpc_part, "activating partition %d\n", partid); - xpc_allow_hb(partid); + xpc_arch_ops.allow_hb(partid); if (xpc_setup_ch_structures(part) == xpSuccess) { (void)xpc_part_ref(part); /* this will always succeed */ - if (xpc_make_first_contact(part) == xpSuccess) { + if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { xpc_mark_partition_active(part); xpc_channel_mgr(part); /* won't return until partition is deactivating */ @@ -635,12 +579,12 @@ xpc_activating(void *__partid) xpc_teardown_ch_structures(part); } - xpc_disallow_hb(partid); + xpc_arch_ops.disallow_hb(partid); xpc_mark_partition_inactive(part); if (part->reason == xpReactivating) { /* interrupting ourselves results in activating partition */ - xpc_request_partition_reactivation(part); + xpc_arch_ops.request_partition_reactivation(part); } return 0; @@ -713,10 +657,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed) static void xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) { + int (*n_of_deliverable_payloads) (struct xpc_channel *) = + xpc_arch_ops.n_of_deliverable_payloads; + do { /* deliver messages to their intended recipients */ - while (xpc_n_of_deliverable_payloads(ch) > 0 && + while (n_of_deliverable_payloads(ch) > 0 && !(ch->flags & XPC_C_DISCONNECTING)) { xpc_deliver_payload(ch); } @@ -732,7 +679,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) "wait_event_interruptible_exclusive()\n"); (void)wait_event_interruptible_exclusive(ch->idle_wq, - (xpc_n_of_deliverable_payloads(ch) > 0 || + (n_of_deliverable_payloads(ch) > 0 || (ch->flags & XPC_C_DISCONNECTING))); atomic_dec(&ch->kthreads_idle); @@ -749,6 +696,8 @@ xpc_kthread_start(void *args) struct xpc_channel *ch; int n_needed; unsigned long irq_flags; + int (*n_of_deliverable_payloads) (struct xpc_channel *) = + xpc_arch_ops.n_of_deliverable_payloads; dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n", partid, ch_number); @@ -777,7 +726,7 @@ xpc_kthread_start(void *args) * additional kthreads to help deliver them. We only * need one less than total #of messages to deliver. */ - n_needed = xpc_n_of_deliverable_payloads(ch) - 1; + n_needed = n_of_deliverable_payloads(ch) - 1; if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) xpc_activate_kthreads(ch, n_needed); @@ -805,7 +754,7 @@ xpc_kthread_start(void *args) if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { - xpc_indicate_partition_disengaged(part); + xpc_arch_ops.indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); @@ -837,6 +786,8 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, u64 args = XPC_PACK_ARGS(ch->partid, ch->number); struct xpc_partition *part = &xpc_partitions[ch->partid]; struct task_struct *kthread; + void (*indicate_partition_disengaged) (struct xpc_partition *) = + xpc_arch_ops.indicate_partition_disengaged; while (needed-- > 0) { @@ -858,7 +809,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 && atomic_inc_return(&part->nchannels_engaged) == 1) { - xpc_indicate_partition_engaged(part); + xpc_arch_ops.indicate_partition_engaged(part); } (void)xpc_part_ref(part); xpc_msgqueue_ref(ch); @@ -880,7 +831,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, if (atomic_dec_return(&ch->kthreads_assigned) == 0 && atomic_dec_return(&part->nchannels_engaged) == 0) { - xpc_indicate_partition_disengaged(part); + indicate_partition_disengaged(part); } xpc_msgqueue_deref(ch); xpc_part_deref(part); @@ -993,13 +944,13 @@ xpc_setup_partitions(void) atomic_set(&part->references, 0); } - return xpc_setup_partitions_sn(); + return xpc_arch_ops.setup_partitions(); } static void xpc_teardown_partitions(void) { - xpc_teardown_partitions_sn(); + xpc_arch_ops.teardown_partitions(); kfree(xpc_partitions); } @@ -1055,7 +1006,7 @@ xpc_do_exit(enum xp_retval reason) disengage_timeout = part->disengage_timeout; } - if (xpc_any_partition_engaged()) { + if (xpc_arch_ops.any_partition_engaged()) { if (time_is_before_jiffies(printmsg_time)) { dev_info(xpc_part, "waiting for remote " "partitions to deactivate, timeout in " @@ -1086,8 +1037,7 @@ xpc_do_exit(enum xp_retval reason) } while (1); - DBUG_ON(xpc_any_partition_engaged()); - DBUG_ON(xpc_any_hbs_allowed() != 0); + DBUG_ON(xpc_arch_ops.any_partition_engaged()); xpc_teardown_rsvd_page(); @@ -1152,15 +1102,15 @@ xpc_die_deactivate(void) /* keep xpc_hb_checker thread from doing anything (just in case) */ xpc_exiting = 1; - xpc_disallow_all_hbs(); /*indicate we're deactivated */ + xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */ for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; - if (xpc_partition_engaged(partid) || + if (xpc_arch_ops.partition_engaged(partid) || part->act_state != XPC_P_AS_INACTIVE) { - xpc_request_partition_deactivation(part); - xpc_indicate_partition_disengaged(part); + xpc_arch_ops.request_partition_deactivation(part); + xpc_arch_ops.indicate_partition_disengaged(part); } } @@ -1177,7 +1127,7 @@ xpc_die_deactivate(void) wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; while (1) { - any_engaged = xpc_any_partition_engaged(); + any_engaged = xpc_arch_ops.any_partition_engaged(); if (!any_engaged) { dev_info(xpc_part, "all partitions have deactivated\n"); break; @@ -1186,7 +1136,7 @@ xpc_die_deactivate(void) if (!keep_waiting--) { for (partid = 0; partid < xp_max_npartitions; partid++) { - if (xpc_partition_engaged(partid)) { + if (xpc_arch_ops.partition_engaged(partid)) { dev_info(xpc_part, "deactivate from " "remote partition %d timed " "out\n", partid); @@ -1233,7 +1183,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) /* fall through */ case DIE_MCA_MONARCH_ENTER: case DIE_INIT_MONARCH_ENTER: - xpc_offline_heartbeat(); + xpc_arch_ops.offline_heartbeat(); break; case DIE_KDEBUG_LEAVE: @@ -1244,7 +1194,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) /* fall through */ case DIE_MCA_MONARCH_LEAVE: case DIE_INIT_MONARCH_LEAVE: - xpc_online_heartbeat(); + xpc_arch_ops.online_heartbeat(); break; } #else diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 6722f6fe4dc..65877bc5eda 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -70,6 +70,9 @@ xpc_get_rsvd_page_pa(int nasid) size_t buf_len = 0; void *buf = buf; void *buf_base = NULL; + enum xp_retval (*get_partition_rsvd_page_pa) + (void *, u64 *, unsigned long *, size_t *) = + xpc_arch_ops.get_partition_rsvd_page_pa; while (1) { @@ -79,8 +82,7 @@ xpc_get_rsvd_page_pa(int nasid) * ??? function or have two versions? Rename rp_pa for UV to * ??? rp_gpa? */ - ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, - &len); + ret = get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len); dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " "address=0x%016lx, len=0x%016lx\n", ret, @@ -172,7 +174,7 @@ xpc_setup_rsvd_page(void) xpc_part_nasids = XPC_RP_PART_NASIDS(rp); xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); - ret = xpc_setup_rsvd_page_sn(rp); + ret = xpc_arch_ops.setup_rsvd_page(rp); if (ret != 0) return ret; @@ -264,7 +266,7 @@ xpc_partition_disengaged(struct xpc_partition *part) short partid = XPC_PARTID(part); int disengaged; - disengaged = !xpc_partition_engaged(partid); + disengaged = !xpc_arch_ops.partition_engaged(partid); if (part->disengage_timeout) { if (!disengaged) { if (time_is_after_jiffies(part->disengage_timeout)) { @@ -280,7 +282,7 @@ xpc_partition_disengaged(struct xpc_partition *part) dev_info(xpc_part, "deactivate request to remote " "partition %d timed out\n", partid); xpc_disengage_timedout = 1; - xpc_assume_partition_disengaged(partid); + xpc_arch_ops.assume_partition_disengaged(partid); disengaged = 1; } part->disengage_timeout = 0; @@ -294,7 +296,7 @@ xpc_partition_disengaged(struct xpc_partition *part) if (part->act_state != XPC_P_AS_INACTIVE) xpc_wakeup_channel_mgr(part); - xpc_cancel_partition_deactivation_request(part); + xpc_arch_ops.cancel_partition_deactivation_request(part); } return disengaged; } @@ -339,7 +341,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, spin_unlock_irqrestore(&part->act_lock, irq_flags); if (reason == xpReactivating) { /* we interrupt ourselves to reactivate partition */ - xpc_request_partition_reactivation(part); + xpc_arch_ops.request_partition_reactivation(part); } return; } @@ -358,7 +360,7 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, spin_unlock_irqrestore(&part->act_lock, irq_flags); /* ask remote partition to deactivate with regard to us */ - xpc_request_partition_deactivation(part); + xpc_arch_ops.request_partition_deactivation(part); /* set a timelimit on the disengage phase of the deactivation request */ part->disengage_timeout = jiffies + (xpc_disengage_timelimit * HZ); @@ -496,7 +498,7 @@ xpc_discovery(void) continue; } - xpc_request_partition_activation(remote_rp, + xpc_arch_ops.request_partition_activation(remote_rp, remote_rp_pa, nasid); } } diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index eaaa964942d..915a3b495da 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved. */ /* @@ -60,14 +60,14 @@ static struct xpc_vars_sn2 *xpc_vars_sn2; static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; static int -xpc_setup_partitions_sn_sn2(void) +xpc_setup_partitions_sn2(void) { /* nothing needs to be done */ return 0; } static void -xpc_teardown_partitions_sn_sn2(void) +xpc_teardown_partitions_sn2(void) { /* nothing needs to be done */ } @@ -431,6 +431,13 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) } static void +xpc_send_chctl_opencomplete_sn2(struct xpc_channel *ch, + unsigned long *irq_flags) +{ + XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENCOMPLETE, irq_flags); +} + +static void xpc_send_chctl_msgrequest_sn2(struct xpc_channel *ch) { XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST, NULL); @@ -621,7 +628,7 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, static int -xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) +xpc_setup_rsvd_page_sn2(struct xpc_rsvd_page *rp) { struct amo *amos_page; int i; @@ -629,7 +636,7 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) xpc_vars_sn2 = XPC_RP_VARS(rp); - rp->sn.vars_pa = xp_pa(xpc_vars_sn2); + rp->sn.sn2.vars_pa = xp_pa(xpc_vars_sn2); /* vars_part array follows immediately after vars */ xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + @@ -693,6 +700,33 @@ xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) return 0; } +static int +xpc_hb_allowed_sn2(short partid, void *heartbeating_to_mask) +{ + return test_bit(partid, heartbeating_to_mask); +} + +static void +xpc_allow_hb_sn2(short partid) +{ + DBUG_ON(xpc_vars_sn2 == NULL); + set_bit(partid, xpc_vars_sn2->heartbeating_to_mask); +} + +static void +xpc_disallow_hb_sn2(short partid) +{ + DBUG_ON(xpc_vars_sn2 == NULL); + clear_bit(partid, xpc_vars_sn2->heartbeating_to_mask); +} + +static void +xpc_disallow_all_hbs_sn2(void) +{ + DBUG_ON(xpc_vars_sn2 == NULL); + bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, xp_max_npartitions); +} + static void xpc_increment_heartbeat_sn2(void) { @@ -719,7 +753,6 @@ xpc_heartbeat_init_sn2(void) DBUG_ON(xpc_vars_sn2 == NULL); bitmap_zero(xpc_vars_sn2->heartbeating_to_mask, XP_MAX_NPARTITIONS_SN2); - xpc_heartbeating_to_mask = &xpc_vars_sn2->heartbeating_to_mask[0]; xpc_online_heartbeat_sn2(); } @@ -751,9 +784,9 @@ xpc_get_remote_heartbeat_sn2(struct xpc_partition *part) remote_vars->heartbeating_to_mask[0]); if ((remote_vars->heartbeat == part->last_heartbeat && - remote_vars->heartbeat_offline == 0) || - !xpc_hb_allowed(sn_partition_id, - &remote_vars->heartbeating_to_mask)) { + !remote_vars->heartbeat_offline) || + !xpc_hb_allowed_sn2(sn_partition_id, + remote_vars->heartbeating_to_mask)) { ret = xpNoHeartbeat; } else { part->last_heartbeat = remote_vars->heartbeat; @@ -972,7 +1005,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) return; } - remote_vars_pa = remote_rp->sn.vars_pa; + remote_vars_pa = remote_rp->sn.sn2.vars_pa; remote_rp_version = remote_rp->version; remote_rp_ts_jiffies = remote_rp->ts_jiffies; @@ -1129,7 +1162,7 @@ xpc_process_activate_IRQ_rcvd_sn2(void) * Setup the channel structures that are sn2 specific. */ static enum xp_retval -xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part) +xpc_setup_ch_structures_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; struct xpc_channel_sn2 *ch_sn2; @@ -1251,7 +1284,7 @@ out_1: * Teardown the channel structures that are sn2 specific. */ static void -xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part) +xpc_teardown_ch_structures_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; short partid = XPC_PARTID(part); @@ -2315,61 +2348,70 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) xpc_acknowledge_msgs_sn2(ch, get, msg->flags); } +static struct xpc_arch_operations xpc_arch_ops_sn2 = { + .setup_partitions = xpc_setup_partitions_sn2, + .teardown_partitions = xpc_teardown_partitions_sn2, + .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2, + .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2, + .setup_rsvd_page = xpc_setup_rsvd_page_sn2, + + .allow_hb = xpc_allow_hb_sn2, + .disallow_hb = xpc_disallow_hb_sn2, + .disallow_all_hbs = xpc_disallow_all_hbs_sn2, + .increment_heartbeat = xpc_increment_heartbeat_sn2, + .offline_heartbeat = xpc_offline_heartbeat_sn2, + .online_heartbeat = xpc_online_heartbeat_sn2, + .heartbeat_init = xpc_heartbeat_init_sn2, + .heartbeat_exit = xpc_heartbeat_exit_sn2, + .get_remote_heartbeat = xpc_get_remote_heartbeat_sn2, + + .request_partition_activation = + xpc_request_partition_activation_sn2, + .request_partition_reactivation = + xpc_request_partition_reactivation_sn2, + .request_partition_deactivation = + xpc_request_partition_deactivation_sn2, + .cancel_partition_deactivation_request = + xpc_cancel_partition_deactivation_request_sn2, + + .setup_ch_structures = xpc_setup_ch_structures_sn2, + .teardown_ch_structures = xpc_teardown_ch_structures_sn2, + + .make_first_contact = xpc_make_first_contact_sn2, + + .get_chctl_all_flags = xpc_get_chctl_all_flags_sn2, + .send_chctl_closerequest = xpc_send_chctl_closerequest_sn2, + .send_chctl_closereply = xpc_send_chctl_closereply_sn2, + .send_chctl_openrequest = xpc_send_chctl_openrequest_sn2, + .send_chctl_openreply = xpc_send_chctl_openreply_sn2, + .send_chctl_opencomplete = xpc_send_chctl_opencomplete_sn2, + .process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2, + + .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2, + + .setup_msg_structures = xpc_setup_msg_structures_sn2, + .teardown_msg_structures = xpc_teardown_msg_structures_sn2, + + .indicate_partition_engaged = xpc_indicate_partition_engaged_sn2, + .indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2, + .partition_engaged = xpc_partition_engaged_sn2, + .any_partition_engaged = xpc_any_partition_engaged_sn2, + .assume_partition_disengaged = xpc_assume_partition_disengaged_sn2, + + .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2, + .send_payload = xpc_send_payload_sn2, + .get_deliverable_payload = xpc_get_deliverable_payload_sn2, + .received_payload = xpc_received_payload_sn2, + .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2, +}; + int xpc_init_sn2(void) { int ret; size_t buf_size; - xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2; - xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_sn2; - xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; - xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2; - xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; - xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; - xpc_online_heartbeat = xpc_online_heartbeat_sn2; - xpc_heartbeat_init = xpc_heartbeat_init_sn2; - xpc_heartbeat_exit = xpc_heartbeat_exit_sn2; - xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_sn2; - - xpc_request_partition_activation = xpc_request_partition_activation_sn2; - xpc_request_partition_reactivation = - xpc_request_partition_reactivation_sn2; - xpc_request_partition_deactivation = - xpc_request_partition_deactivation_sn2; - xpc_cancel_partition_deactivation_request = - xpc_cancel_partition_deactivation_request_sn2; - - xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2; - xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2; - xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2; - xpc_make_first_contact = xpc_make_first_contact_sn2; - - xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2; - xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2; - xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2; - xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2; - xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2; - - xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2; - - xpc_setup_msg_structures = xpc_setup_msg_structures_sn2; - xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2; - - xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; - xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2; - xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2; - xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2; - - xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2; - xpc_indicate_partition_disengaged = - xpc_indicate_partition_disengaged_sn2; - xpc_partition_engaged = xpc_partition_engaged_sn2; - xpc_any_partition_engaged = xpc_any_partition_engaged_sn2; - xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; - - xpc_send_payload = xpc_send_payload_sn2; - xpc_received_payload = xpc_received_payload_sn2; + xpc_arch_ops = xpc_arch_ops_sn2; if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index f7fff4727ed..9172fcdee4e 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -46,8 +46,7 @@ struct uv_IO_APIC_route_entry { }; #endif -static atomic64_t xpc_heartbeat_uv; -static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); +static struct xpc_heartbeat_uv *xpc_heartbeat_uv; #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) #define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ @@ -63,7 +62,7 @@ static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; static int -xpc_setup_partitions_sn_uv(void) +xpc_setup_partitions_uv(void) { short partid; struct xpc_partition_uv *part_uv; @@ -79,7 +78,7 @@ xpc_setup_partitions_sn_uv(void) } static void -xpc_teardown_partitions_sn_uv(void) +xpc_teardown_partitions_uv(void) { short partid; struct xpc_partition_uv *part_uv; @@ -423,41 +422,6 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, /* syncing of remote_act_state was just done above */ break; - case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { - struct xpc_activate_mq_msg_heartbeat_req_uv *msg; - - msg = container_of(msg_hdr, - struct xpc_activate_mq_msg_heartbeat_req_uv, - hdr); - part_uv->heartbeat = msg->heartbeat; - break; - } - case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { - struct xpc_activate_mq_msg_heartbeat_req_uv *msg; - - msg = container_of(msg_hdr, - struct xpc_activate_mq_msg_heartbeat_req_uv, - hdr); - part_uv->heartbeat = msg->heartbeat; - - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); - part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); - break; - } - case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { - struct xpc_activate_mq_msg_heartbeat_req_uv *msg; - - msg = container_of(msg_hdr, - struct xpc_activate_mq_msg_heartbeat_req_uv, - hdr); - part_uv->heartbeat = msg->heartbeat; - - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); - part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); - break; - } case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { struct xpc_activate_mq_msg_activate_req_uv *msg; @@ -475,6 +439,7 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; + part_uv->heartbeat_gpa = msg->heartbeat_gpa; if (msg->activate_gru_mq_desc_gpa != part_uv->activate_gru_mq_desc_gpa) { @@ -569,6 +534,17 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, xpc_wakeup_channel_mgr(part); break; } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV: { + struct xpc_activate_mq_msg_chctl_opencomplete_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_opencomplete_uv, hdr); + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENCOMPLETE; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + } case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: spin_lock_irqsave(&part_uv->flags_lock, irq_flags); part_uv->flags |= XPC_P_ENGAGED_UV; @@ -759,7 +735,7 @@ xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) /* * !!! Make our side think that the remote partition sent an activate - * !!! message our way by doing what the activate IRQ handler would + * !!! mq message our way by doing what the activate IRQ handler would * !!! do had one really been sent. */ @@ -806,90 +782,82 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, } static int -xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) +xpc_setup_rsvd_page_uv(struct xpc_rsvd_page *rp) { - rp->sn.activate_gru_mq_desc_gpa = + xpc_heartbeat_uv = + &xpc_partitions[sn_partition_id].sn.uv.cached_heartbeat; + rp->sn.uv.heartbeat_gpa = uv_gpa(xpc_heartbeat_uv); + rp->sn.uv.activate_gru_mq_desc_gpa = uv_gpa(xpc_activate_mq_uv->gru_mq_desc); return 0; } static void -xpc_send_heartbeat_uv(int msg_type) +xpc_allow_hb_uv(short partid) { - short partid; - struct xpc_partition *part; - struct xpc_activate_mq_msg_heartbeat_req_uv msg; - - /* - * !!! On uv we're broadcasting a heartbeat message every 5 seconds. - * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20 - * !!! seconds. This is an increase in numalink traffic. - * ??? Is this good? - */ - - msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv); - - partid = find_first_bit(xpc_heartbeating_to_mask_uv, - XP_MAX_NPARTITIONS_UV); - - while (partid < XP_MAX_NPARTITIONS_UV) { - part = &xpc_partitions[partid]; +} - xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), - msg_type); +static void +xpc_disallow_hb_uv(short partid) +{ +} - partid = find_next_bit(xpc_heartbeating_to_mask_uv, - XP_MAX_NPARTITIONS_UV, partid + 1); - } +static void +xpc_disallow_all_hbs_uv(void) +{ } static void xpc_increment_heartbeat_uv(void) { - xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV); + xpc_heartbeat_uv->value++; } static void xpc_offline_heartbeat_uv(void) { - xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); + xpc_increment_heartbeat_uv(); + xpc_heartbeat_uv->offline = 1; } static void xpc_online_heartbeat_uv(void) { - xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV); + xpc_increment_heartbeat_uv(); + xpc_heartbeat_uv->offline = 0; } static void xpc_heartbeat_init_uv(void) { - atomic64_set(&xpc_heartbeat_uv, 0); - bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); - xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0]; + xpc_heartbeat_uv->value = 1; + xpc_heartbeat_uv->offline = 0; } static void xpc_heartbeat_exit_uv(void) { - xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); + xpc_offline_heartbeat_uv(); } static enum xp_retval xpc_get_remote_heartbeat_uv(struct xpc_partition *part) { struct xpc_partition_uv *part_uv = &part->sn.uv; - enum xp_retval ret = xpNoHeartbeat; + enum xp_retval ret; - if (part_uv->remote_act_state != XPC_P_AS_INACTIVE && - part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) { + ret = xp_remote_memcpy(uv_gpa(&part_uv->cached_heartbeat), + part_uv->heartbeat_gpa, + sizeof(struct xpc_heartbeat_uv)); + if (ret != xpSuccess) + return ret; - if (part_uv->heartbeat != part->last_heartbeat || - (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) { + if (part_uv->cached_heartbeat.value == part->last_heartbeat && + !part_uv->cached_heartbeat.offline) { - part->last_heartbeat = part_uv->heartbeat; - ret = xpSuccess; - } + ret = xpNoHeartbeat; + } else { + part->last_heartbeat = part_uv->cached_heartbeat.value; } return ret; } @@ -904,8 +872,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; + part->sn.uv.heartbeat_gpa = remote_rp->sn.uv.heartbeat_gpa; part->sn.uv.activate_gru_mq_desc_gpa = - remote_rp->sn.activate_gru_mq_desc_gpa; + remote_rp->sn.uv.activate_gru_mq_desc_gpa; /* * ??? Is it a good idea to make this conditional on what is @@ -913,8 +882,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, */ if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { msg.rp_gpa = uv_gpa(xpc_rsvd_page); + msg.heartbeat_gpa = xpc_rsvd_page->sn.uv.heartbeat_gpa; msg.activate_gru_mq_desc_gpa = - xpc_rsvd_page->sn.activate_gru_mq_desc_gpa; + xpc_rsvd_page->sn.uv.activate_gru_mq_desc_gpa; xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); } @@ -1010,7 +980,7 @@ xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) * Setup the channel structures that are uv specific. */ static enum xp_retval -xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) +xpc_setup_ch_structures_uv(struct xpc_partition *part) { struct xpc_channel_uv *ch_uv; int ch_number; @@ -1029,7 +999,7 @@ xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) * Teardown the channel structures that are uv specific. */ static void -xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) +xpc_teardown_ch_structures_uv(struct xpc_partition *part) { /* nothing needs to be done */ return; @@ -1243,6 +1213,16 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) } static void +xpc_send_chctl_opencomplete_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_opencomplete_uv msg; + + msg.ch_number = ch->number; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENCOMPLETE_UV); +} + +static void xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) { unsigned long irq_flags; @@ -1669,58 +1649,67 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) msg->hdr.msg_slot_number += ch->remote_nentries; } +static struct xpc_arch_operations xpc_arch_ops_uv = { + .setup_partitions = xpc_setup_partitions_uv, + .teardown_partitions = xpc_teardown_partitions_uv, + .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, + .get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv, + .setup_rsvd_page = xpc_setup_rsvd_page_uv, + + .allow_hb = xpc_allow_hb_uv, + .disallow_hb = xpc_disallow_hb_uv, + .disallow_all_hbs = xpc_disallow_all_hbs_uv, + .increment_heartbeat = xpc_increment_heartbeat_uv, + .offline_heartbeat = xpc_offline_heartbeat_uv, + .online_heartbeat = xpc_online_heartbeat_uv, + .heartbeat_init = xpc_heartbeat_init_uv, + .heartbeat_exit = xpc_heartbeat_exit_uv, + .get_remote_heartbeat = xpc_get_remote_heartbeat_uv, + + .request_partition_activation = + xpc_request_partition_activation_uv, + .request_partition_reactivation = + xpc_request_partition_reactivation_uv, + .request_partition_deactivation = + xpc_request_partition_deactivation_uv, + .cancel_partition_deactivation_request = + xpc_cancel_partition_deactivation_request_uv, + + .setup_ch_structures = xpc_setup_ch_structures_uv, + .teardown_ch_structures = xpc_teardown_ch_structures_uv, + + .make_first_contact = xpc_make_first_contact_uv, + + .get_chctl_all_flags = xpc_get_chctl_all_flags_uv, + .send_chctl_closerequest = xpc_send_chctl_closerequest_uv, + .send_chctl_closereply = xpc_send_chctl_closereply_uv, + .send_chctl_openrequest = xpc_send_chctl_openrequest_uv, + .send_chctl_openreply = xpc_send_chctl_openreply_uv, + .send_chctl_opencomplete = xpc_send_chctl_opencomplete_uv, + .process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv, + + .save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv, + + .setup_msg_structures = xpc_setup_msg_structures_uv, + .teardown_msg_structures = xpc_teardown_msg_structures_uv, + + .indicate_partition_engaged = xpc_indicate_partition_engaged_uv, + .indicate_partition_disengaged = xpc_indicate_partition_disengaged_uv, + .assume_partition_disengaged = xpc_assume_partition_disengaged_uv, + .partition_engaged = xpc_partition_engaged_uv, + .any_partition_engaged = xpc_any_partition_engaged_uv, + + .n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv, + .send_payload = xpc_send_payload_uv, + .get_deliverable_payload = xpc_get_deliverable_payload_uv, + .received_payload = xpc_received_payload_uv, + .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, +}; + int xpc_init_uv(void) { - xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; - xpc_teardown_partitions_sn = xpc_teardown_partitions_sn_uv; - xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; - xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; - xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; - xpc_increment_heartbeat = xpc_increment_heartbeat_uv; - xpc_offline_heartbeat = xpc_offline_heartbeat_uv; - xpc_online_heartbeat = xpc_online_heartbeat_uv; - xpc_heartbeat_init = xpc_heartbeat_init_uv; - xpc_heartbeat_exit = xpc_heartbeat_exit_uv; - xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv; - - xpc_request_partition_activation = xpc_request_partition_activation_uv; - xpc_request_partition_reactivation = - xpc_request_partition_reactivation_uv; - xpc_request_partition_deactivation = - xpc_request_partition_deactivation_uv; - xpc_cancel_partition_deactivation_request = - xpc_cancel_partition_deactivation_request_uv; - - xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; - xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; - - xpc_make_first_contact = xpc_make_first_contact_uv; - - xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv; - xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv; - xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv; - xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv; - xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv; - - xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv; - - xpc_setup_msg_structures = xpc_setup_msg_structures_uv; - xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv; - - xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv; - xpc_indicate_partition_disengaged = - xpc_indicate_partition_disengaged_uv; - xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv; - xpc_partition_engaged = xpc_partition_engaged_uv; - xpc_any_partition_engaged = xpc_any_partition_engaged_uv; - - xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv; - xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv; - xpc_send_payload = xpc_send_payload_uv; - xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv; - xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv; - xpc_received_payload = xpc_received_payload_uv; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index c232d11a7ed..06084dbf127 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -208,7 +208,7 @@ static int mmc_read_ext_csd(struct mmc_card *card) } ext_csd_struct = ext_csd[EXT_CSD_REV]; - if (ext_csd_struct > 2) { + if (ext_csd_struct > 3) { printk(KERN_ERR "%s: unrecognised EXT_CSD structure " "version %d\n", mmc_hostname(card->host), ext_csd_struct); diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 26fc098d77c..cd81c395e16 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -363,15 +363,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, goto err; /* - * For SPI, enable CRC as appropriate. - */ - if (mmc_host_is_spi(host)) { - err = mmc_spi_set_crc(host, use_spi_crc); - if (err) - goto err; - } - - /* * Fetch CID from card. */ if (mmc_host_is_spi(host)) @@ -458,6 +449,18 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, } /* + * For SPI, enable CRC as appropriate. + * This CRC enable is located AFTER the reading of the + * card registers because some SDHC cards are not able + * to provide valid CRCs for non-512-byte blocks. + */ + if (mmc_host_is_spi(host)) { + err = mmc_spi_set_crc(host, use_spi_crc); + if (err) + goto free_card; + } + + /* * Attempt to change to high-speed (if supported) */ err = mmc_switch_hs(card); diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c index eb29b1d933a..e0be21a4a69 100644 --- a/drivers/mmc/host/imxmmc.c +++ b/drivers/mmc/host/imxmmc.c @@ -307,13 +307,6 @@ static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) wmb(); - if (host->actual_bus_width == MMC_BUS_WIDTH_4) - BLR(host->dma) = 0; /* burst 64 byte read / 64 bytes write */ - else - BLR(host->dma) = 16; /* burst 16 byte read / 16 bytes write */ - - RSSR(host->dma) = DMA_REQ_SDHC; - set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); @@ -818,9 +811,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->bus_width == MMC_BUS_WIDTH_4) { host->actual_bus_width = MMC_BUS_WIDTH_4; imx_gpio_mode(PB11_PF_SD_DAT3); + BLR(host->dma) = 0; /* burst 64 byte read/write */ } else { host->actual_bus_width = MMC_BUS_WIDTH_1; imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); + BLR(host->dma) = 16; /* burst 16 byte read/write */ } if (host->power_mode != ios->power_mode) { @@ -938,7 +933,7 @@ static void imxmci_check_status(unsigned long data) mod_timer(&host->timer, jiffies + (HZ>>1)); } -static int imxmci_probe(struct platform_device *pdev) +static int __init imxmci_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct imxmci_host *host = NULL; @@ -1034,6 +1029,7 @@ static int imxmci_probe(struct platform_device *pdev) } host->dma_allocated = 1; imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); + RSSR(host->dma) = DMA_REQ_SDHC; tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); host->status_reg=0; @@ -1079,7 +1075,7 @@ out: return ret; } -static int imxmci_remove(struct platform_device *pdev) +static int __exit imxmci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); @@ -1145,8 +1141,7 @@ static int imxmci_resume(struct platform_device *dev) #endif /* CONFIG_PM */ static struct platform_driver imxmci_driver = { - .probe = imxmci_probe, - .remove = imxmci_remove, + .remove = __exit_p(imxmci_remove), .suspend = imxmci_suspend, .resume = imxmci_resume, .driver = { @@ -1157,7 +1152,7 @@ static struct platform_driver imxmci_driver = { static int __init imxmci_init(void) { - return platform_driver_register(&imxmci_driver); + return platform_driver_probe(&imxmci_driver, imxmci_probe); } static void __exit imxmci_exit(void) diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 72f8bde4877..f48349d18c9 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -24,7 +24,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/hrtimer.h> +#include <linux/sched.h> #include <linux/delay.h> #include <linux/bio.h> #include <linux/dma-mapping.h> @@ -95,7 +95,7 @@ * reads which takes nowhere near that long. Older cards may be able to use * shorter timeouts ... but why bother? */ -#define r1b_timeout ktime_set(3, 0) +#define r1b_timeout (HZ * 3) /****************************************************************************/ @@ -183,12 +183,11 @@ mmc_spi_readbytes(struct mmc_spi_host *host, unsigned len) return status; } -static int -mmc_spi_skip(struct mmc_spi_host *host, ktime_t timeout, unsigned n, u8 byte) +static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout, + unsigned n, u8 byte) { u8 *cp = host->data->status; - - timeout = ktime_add(timeout, ktime_get()); + unsigned long start = jiffies; while (1) { int status; @@ -203,22 +202,26 @@ mmc_spi_skip(struct mmc_spi_host *host, ktime_t timeout, unsigned n, u8 byte) return cp[i]; } - /* REVISIT investigate msleep() to avoid busy-wait I/O - * in at least some cases. - */ - if (ktime_to_ns(ktime_sub(ktime_get(), timeout)) > 0) + if (time_is_before_jiffies(start + timeout)) break; + + /* If we need long timeouts, we may release the CPU. + * We use jiffies here because we want to have a relation + * between elapsed time and the blocking of the scheduler. + */ + if (time_is_before_jiffies(start+1)) + schedule(); } return -ETIMEDOUT; } static inline int -mmc_spi_wait_unbusy(struct mmc_spi_host *host, ktime_t timeout) +mmc_spi_wait_unbusy(struct mmc_spi_host *host, unsigned long timeout) { return mmc_spi_skip(host, timeout, sizeof(host->data->status), 0); } -static int mmc_spi_readtoken(struct mmc_spi_host *host, ktime_t timeout) +static int mmc_spi_readtoken(struct mmc_spi_host *host, unsigned long timeout) { return mmc_spi_skip(host, timeout, 1, 0xff); } @@ -251,6 +254,10 @@ static int mmc_spi_response_get(struct mmc_spi_host *host, u8 *cp = host->data->status; u8 *end = cp + host->t.len; int value = 0; + int bitshift; + u8 leftover = 0; + unsigned short rotator; + int i; char tag[32]; snprintf(tag, sizeof(tag), " ... CMD%d response SPI_%s", @@ -268,9 +275,8 @@ static int mmc_spi_response_get(struct mmc_spi_host *host, /* Data block reads (R1 response types) may need more data... */ if (cp == end) { - unsigned i; - cp = host->data->status; + end = cp+1; /* Card sends N(CR) (== 1..8) bytes of all-ones then one * status byte ... and we already scanned 2 bytes. @@ -295,20 +301,34 @@ static int mmc_spi_response_get(struct mmc_spi_host *host, } checkstatus: - if (*cp & 0x80) { - dev_dbg(&host->spi->dev, "%s: INVALID RESPONSE, %02x\n", - tag, *cp); - value = -EBADR; - goto done; + bitshift = 0; + if (*cp & 0x80) { + /* Houston, we have an ugly card with a bit-shifted response */ + rotator = *cp++ << 8; + /* read the next byte */ + if (cp == end) { + value = mmc_spi_readbytes(host, 1); + if (value < 0) + goto done; + cp = host->data->status; + end = cp+1; + } + rotator |= *cp++; + while (rotator & 0x8000) { + bitshift++; + rotator <<= 1; + } + cmd->resp[0] = rotator >> 8; + leftover = rotator; + } else { + cmd->resp[0] = *cp++; } - - cmd->resp[0] = *cp++; cmd->error = 0; /* Status byte: the entire seven-bit R1 response. */ if (cmd->resp[0] != 0) { if ((R1_SPI_PARAMETER | R1_SPI_ADDRESS - | R1_SPI_ILLEGAL_COMMAND) + | R1_SPI_ILLEGAL_COMMAND) & cmd->resp[0]) value = -EINVAL; else if (R1_SPI_COM_CRC & cmd->resp[0]) @@ -336,12 +356,45 @@ checkstatus: * SPI R5 == R1 + data byte; IO_RW_DIRECT */ case MMC_RSP_SPI_R2: - cmd->resp[0] |= *cp << 8; + /* read the next byte */ + if (cp == end) { + value = mmc_spi_readbytes(host, 1); + if (value < 0) + goto done; + cp = host->data->status; + end = cp+1; + } + if (bitshift) { + rotator = leftover << 8; + rotator |= *cp << bitshift; + cmd->resp[0] |= (rotator & 0xFF00); + } else { + cmd->resp[0] |= *cp << 8; + } break; /* SPI R3, R4, or R7 == R1 + 4 bytes */ case MMC_RSP_SPI_R3: - cmd->resp[1] = get_unaligned_be32(cp); + rotator = leftover << 8; + cmd->resp[1] = 0; + for (i = 0; i < 4; i++) { + cmd->resp[1] <<= 8; + /* read the next byte */ + if (cp == end) { + value = mmc_spi_readbytes(host, 1); + if (value < 0) + goto done; + cp = host->data->status; + end = cp+1; + } + if (bitshift) { + rotator |= *cp++ << bitshift; + cmd->resp[1] |= (rotator >> 8); + rotator <<= 8; + } else { + cmd->resp[1] |= *cp++; + } + } break; /* SPI R1 == just one status byte */ @@ -607,7 +660,7 @@ mmc_spi_setup_data_message( */ static int mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, - ktime_t timeout) + unsigned long timeout) { struct spi_device *spi = host->spi; int status, i; @@ -717,11 +770,13 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, */ static int mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, - ktime_t timeout) + unsigned long timeout) { struct spi_device *spi = host->spi; int status; struct scratch *scratch = host->data; + unsigned int bitshift; + u8 leftover; /* At least one SD card sends an all-zeroes byte when N(CX) * applies, before the all-ones bytes ... just cope with that. @@ -733,38 +788,60 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t, if (status == 0xff || status == 0) status = mmc_spi_readtoken(host, timeout); - if (status == SPI_TOKEN_SINGLE) { - if (host->dma_dev) { - dma_sync_single_for_device(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); - dma_sync_single_for_device(host->dma_dev, - t->rx_dma, t->len, - DMA_FROM_DEVICE); - } + if (status < 0) { + dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); + return status; + } - status = spi_sync(spi, &host->m); + /* The token may be bit-shifted... + * the first 0-bit precedes the data stream. + */ + bitshift = 7; + while (status & 0x80) { + status <<= 1; + bitshift--; + } + leftover = status << 1; - if (host->dma_dev) { - dma_sync_single_for_cpu(host->dma_dev, - host->data_dma, sizeof(*scratch), - DMA_BIDIRECTIONAL); - dma_sync_single_for_cpu(host->dma_dev, - t->rx_dma, t->len, - DMA_FROM_DEVICE); - } + if (host->dma_dev) { + dma_sync_single_for_device(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + dma_sync_single_for_device(host->dma_dev, + t->rx_dma, t->len, + DMA_FROM_DEVICE); + } - } else { - dev_dbg(&spi->dev, "read error %02x (%d)\n", status, status); + status = spi_sync(spi, &host->m); - /* we've read extra garbage, timed out, etc */ - if (status < 0) - return status; + if (host->dma_dev) { + dma_sync_single_for_cpu(host->dma_dev, + host->data_dma, sizeof(*scratch), + DMA_BIDIRECTIONAL); + dma_sync_single_for_cpu(host->dma_dev, + t->rx_dma, t->len, + DMA_FROM_DEVICE); + } - /* low four bits are an R2 subset, fifth seems to be - * vendor specific ... map them all to generic error.. + if (bitshift) { + /* Walk through the data and the crc and do + * all the magic to get byte-aligned data. */ - return -EIO; + u8 *cp = t->rx_buf; + unsigned int len; + unsigned int bitright = 8 - bitshift; + u8 temp; + for (len = t->len; len; len--) { + temp = *cp; + *cp++ = leftover | (temp >> bitshift); + leftover = temp << bitright; + } + cp = (u8 *) &scratch->crc_val; + temp = *cp; + *cp++ = leftover | (temp >> bitshift); + leftover = temp << bitright; + temp = *cp; + *cp = leftover | (temp >> bitshift); } if (host->mmc->use_spi_crc) { @@ -803,7 +880,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, unsigned n_sg; int multiple = (data->blocks > 1); u32 clock_rate; - ktime_t timeout; + unsigned long timeout; if (data->flags & MMC_DATA_READ) direction = DMA_FROM_DEVICE; @@ -817,8 +894,9 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, else clock_rate = spi->max_speed_hz; - timeout = ktime_add_ns(ktime_set(0, 0), data->timeout_ns + - data->timeout_clks * 1000000 / clock_rate); + timeout = data->timeout_ns + + data->timeout_clks * 1000000 / clock_rate; + timeout = usecs_to_jiffies((unsigned int)(timeout / 1000)) + 1; /* Handle scatterlist segments one at a time, with synch for * each 512-byte block diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index d183be6f2a5..e62a22a7f00 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -298,7 +298,6 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) struct mmc_request *mrq = host->mrq; host->mrq = NULL; - mmc_omap_fclk_lazy_disable(host); mmc_request_done(host->mmc, mrq); return; } @@ -434,6 +433,8 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) if (host->mrq == NULL) { OMAP_HSMMC_WRITE(host->base, STAT, OMAP_HSMMC_READ(host->base, STAT)); + /* Flush posted write */ + OMAP_HSMMC_READ(host->base, STAT); return IRQ_HANDLED; } @@ -489,8 +490,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) } OMAP_HSMMC_WRITE(host->base, STAT, status); + /* Flush posted write */ + OMAP_HSMMC_READ(host->base, STAT); - if (end_cmd || (status & CC)) + if (end_cmd || ((status & CC) && host->cmd)) mmc_omap_cmd_done(host, host->cmd); if (end_trans || (status & TC)) mmc_omap_xfer_done(host, data); diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index c5b316e2237..cd37962ec44 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -729,6 +729,6 @@ static void __exit sdhci_drv_exit(void) module_init(sdhci_drv_init); module_exit(sdhci_drv_exit); -MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); +MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 30d8e3d4e6f..9234be2226e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1935,7 +1935,7 @@ module_exit(sdhci_drv_exit); module_param(debug_quirks, uint, 0444); -MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); +MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index adda3795203..89bf8cd25ca 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -2036,7 +2036,7 @@ module_param_named(irq, param_irq, uint, 0444); module_param_named(dma, param_dma, int, 0444); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Pierre Ossman <drzeus@drzeus.cx>"); +MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Winbond W83L51xD SD/MMC card interface driver"); #ifdef CONFIG_PNP diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9e7baec4572..9e921544ba2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -977,6 +977,8 @@ config ETHOC depends on NET_ETHERNET && HAS_IOMEM select MII select PHYLIB + select CRC32 + select BITREVERSE help Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC. @@ -2056,6 +2058,27 @@ config IGB_DCA driver. DCA is a method for warming the CPU cache before data is used, with the intent of lessening the impact of cache misses. +config IGBVF + tristate "Intel(R) 82576 Virtual Function Ethernet support" + depends on PCI + ---help--- + This driver supports Intel(R) 82576 virtual functions. For more + information on how to identify your adapter, go to the Adapter & + Driver ID Guide at: + + <http://support.intel.com/support/network/adapter/pro100/21397.htm> + + For general information and support, go to the Intel support + website at: + + <http://support.intel.com> + + More specific information on configuring the driver is in + <file:Documentation/networking/e1000.txt>. + + To compile this driver as a module, choose M here. The module + will be called igbvf. + source "drivers/net/ixp2000/Kconfig" config MYRI_SBUS diff --git a/drivers/net/Makefile b/drivers/net/Makefile index edc9a0d6171..1fc4602a6ff 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_E1000E) += e1000e/ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/ obj-$(CONFIG_IGB) += igb/ +obj-$(CONFIG_IGBVF) += igbvf/ obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IP1000) += ipg.o diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c index d0d0c2fee05..02f64d57864 100644 --- a/drivers/net/a2065.c +++ b/drivers/net/a2065.c @@ -692,6 +692,17 @@ static struct zorro_driver a2065_driver = { .remove = __devexit_p(a2065_remove_one), }; +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit a2065_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) { @@ -753,12 +764,8 @@ static int __devinit a2065_init_one(struct zorro_dev *z, priv->rx_ring_mod_mask = RX_RING_MOD_MASK; priv->tx_ring_mod_mask = TX_RING_MOD_MASK; - dev->open = &lance_open; - dev->stop = &lance_close; - dev->hard_start_xmit = &lance_start_xmit; - dev->tx_timeout = &lance_tx_timeout; + dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = 5*HZ; - dev->set_multicast_list = &lance_set_multicast; dev->dma = 0; init_timer(&priv->multicast_timer); diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index e1d72e06f3e..58e8d522e5b 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c @@ -155,6 +155,18 @@ static struct zorro_driver ariadne_driver = { .remove = __devexit_p(ariadne_remove_one), }; +static const struct net_device_ops ariadne_netdev_ops = { + .ndo_open = ariadne_open, + .ndo_stop = ariadne_close, + .ndo_start_xmit = ariadne_start_xmit, + .ndo_tx_timeout = ariadne_tx_timeout, + .ndo_get_stats = ariadne_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit ariadne_init_one(struct zorro_dev *z, const struct zorro_device_id *ent) { @@ -197,13 +209,8 @@ static int __devinit ariadne_init_one(struct zorro_dev *z, dev->mem_start = ZTWO_VADDR(mem_start); dev->mem_end = dev->mem_start+ARIADNE_RAM_SIZE; - dev->open = &ariadne_open; - dev->stop = &ariadne_close; - dev->hard_start_xmit = &ariadne_start_xmit; - dev->tx_timeout = &ariadne_tx_timeout; + dev->netdev_ops = &ariadne_netdev_ops; dev->watchdog_timeo = 5*HZ; - dev->get_stats = &ariadne_get_stats; - dev->set_multicast_list = &set_multicast_list; err = register_netdev(dev); if (err) { diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 4bc6901b381..627bc75da17 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c @@ -665,6 +665,20 @@ static void __init am79c961_banner(void) if (net_debug && version_printed++ == 0) printk(KERN_INFO "%s", version); } +static const struct net_device_ops am79c961_netdev_ops = { + .ndo_open = am79c961_open, + .ndo_stop = am79c961_close, + .ndo_start_xmit = am79c961_sendpacket, + .ndo_get_stats = am79c961_getstats, + .ndo_set_multicast_list = am79c961_setmulticastlist, + .ndo_tx_timeout = am79c961_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = am79c961_poll_controller, +#endif +}; static int __init am79c961_probe(struct platform_device *pdev) { @@ -732,15 +746,7 @@ static int __init am79c961_probe(struct platform_device *pdev) if (am79c961_hw_init(dev)) goto release; - dev->open = am79c961_open; - dev->stop = am79c961_close; - dev->hard_start_xmit = am79c961_sendpacket; - dev->get_stats = am79c961_getstats; - dev->set_multicast_list = am79c961_setmulticastlist; - dev->tx_timeout = am79c961_timeout; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = am79c961_poll_controller; -#endif + dev->netdev_ops = &am79c961_netdev_ops; ret = register_netdev(dev); if (ret == 0) { diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c index 442938d5038..7f4bc8ae546 100644 --- a/drivers/net/arm/at91_ether.c +++ b/drivers/net/arm/at91_ether.c @@ -577,7 +577,7 @@ static void at91ether_sethashtable(struct net_device *dev) /* * Enable/Disable promiscuous and multicast modes. */ -static void at91ether_set_rx_mode(struct net_device *dev) +static void at91ether_set_multicast_list(struct net_device *dev) { unsigned long cfg; @@ -808,7 +808,7 @@ static int at91ether_close(struct net_device *dev) /* * Transmit packet. */ -static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) +static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_private *lp = netdev_priv(dev); @@ -828,7 +828,7 @@ static int at91ether_tx(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; } else { - printk(KERN_ERR "at91_ether.c: at91ether_tx() called, but device is busy!\n"); + printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); return 1; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) on this skb, he also reports -ENETDOWN and printk's, so either we free and return(0) or don't free and return 1 */ @@ -965,6 +965,21 @@ static void at91ether_poll_controller(struct net_device *dev) } #endif +static const struct net_device_ops at91ether_netdev_ops = { + .ndo_open = at91ether_open, + .ndo_stop = at91ether_close, + .ndo_start_xmit = at91ether_start_xmit, + .ndo_get_stats = at91ether_stats, + .ndo_set_multicast_list = at91ether_set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_do_ioctl = at91ether_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = at91ether_poll_controller, +#endif +}; + /* * Initialize the ethernet interface */ @@ -1005,17 +1020,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add spin_lock_init(&lp->lock); ether_setup(dev); - dev->open = at91ether_open; - dev->stop = at91ether_close; - dev->hard_start_xmit = at91ether_tx; - dev->get_stats = at91ether_stats; - dev->set_multicast_list = at91ether_set_rx_mode; - dev->set_mac_address = set_mac_address; + dev->netdev_ops = &at91ether_netdev_ops; dev->ethtool_ops = &at91ether_ethtool_ops; - dev->do_ioctl = at91ether_ioctl; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = at91ether_poll_controller; -#endif SET_NETDEV_DEV(dev, &pdev->dev); diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c index cc7708775da..41736772c1d 100644 --- a/drivers/net/arm/ep93xx_eth.c +++ b/drivers/net/arm/ep93xx_eth.c @@ -770,7 +770,18 @@ static struct ethtool_ops ep93xx_ethtool_ops = { .get_link = ep93xx_get_link, }; -struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) +static const struct net_device_ops ep93xx_netdev_ops = { + .ndo_open = ep93xx_open, + .ndo_stop = ep93xx_close, + .ndo_start_xmit = ep93xx_xmit, + .ndo_get_stats = ep93xx_get_stats, + .ndo_do_ioctl = ep93xx_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + +static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) { struct net_device *dev; @@ -780,12 +791,8 @@ struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data) memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN); - dev->get_stats = ep93xx_get_stats; dev->ethtool_ops = &ep93xx_ethtool_ops; - dev->hard_start_xmit = ep93xx_xmit; - dev->open = ep93xx_open; - dev->stop = ep93xx_close; - dev->do_ioctl = ep93xx_ioctl; + dev->netdev_ops = &ep93xx_netdev_ops; dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c index e380de45446..edf770f639f 100644 --- a/drivers/net/arm/ether1.c +++ b/drivers/net/arm/ether1.c @@ -991,6 +991,18 @@ static void __devinit ether1_banner(void) printk(KERN_INFO "%s", version); } +static const struct net_device_ops ether1_netdev_ops = { + .ndo_open = ether1_open, + .ndo_stop = ether1_close, + .ndo_start_xmit = ether1_sendpacket, + .ndo_get_stats = ether1_getstats, + .ndo_set_multicast_list = ether1_setmulticastlist, + .ndo_tx_timeout = ether1_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit ether1_probe(struct expansion_card *ec, const struct ecard_id *id) { @@ -1031,12 +1043,7 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id) goto free; } - dev->open = ether1_open; - dev->stop = ether1_close; - dev->hard_start_xmit = ether1_sendpacket; - dev->get_stats = ether1_getstats; - dev->set_multicast_list = ether1_setmulticastlist; - dev->tx_timeout = ether1_timeout; + dev->netdev_ops = ðer1_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c index 21a7bef12d3..ec8a1ae1e88 100644 --- a/drivers/net/arm/ether3.c +++ b/drivers/net/arm/ether3.c @@ -770,6 +770,18 @@ static void __devinit ether3_banner(void) printk(KERN_INFO "%s", version); } +static const struct net_device_ops ether3_netdev_ops = { + .ndo_open = ether3_open, + .ndo_stop = ether3_close, + .ndo_start_xmit = ether3_sendpacket, + .ndo_get_stats = ether3_getstats, + .ndo_set_multicast_list = ether3_setmulticastlist, + .ndo_tx_timeout = ether3_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __devinit ether3_probe(struct expansion_card *ec, const struct ecard_id *id) { @@ -846,12 +858,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id) goto free; } - dev->open = ether3_open; - dev->stop = ether3_close; - dev->hard_start_xmit = ether3_sendpacket; - dev->get_stats = ether3_getstats; - dev->set_multicast_list = ether3_setmulticastlist; - dev->tx_timeout = ether3_timeout; + dev->netdev_ops = ðer3_netdev_ops; dev->watchdog_timeo = 5 * HZ / 100; ret = register_netdev(dev); diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c index 2d81f6afcb5..5425ab0c38c 100644 --- a/drivers/net/atarilance.c +++ b/drivers/net/atarilance.c @@ -453,6 +453,16 @@ static noinline int __init addr_accessible(volatile void *regp, int wordflag, return( ret ); } +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_set_multicast_list = set_multicast_list, + .ndo_set_mac_address = lance_set_mac_address, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; static unsigned long __init lance_probe1( struct net_device *dev, struct lance_addr *init_rec ) @@ -623,15 +633,9 @@ static unsigned long __init lance_probe1( struct net_device *dev, if (did_version++ == 0) DPRINTK( 1, ( version )); - /* The LANCE-specific entries in the device structure. */ - dev->open = &lance_open; - dev->hard_start_xmit = &lance_start_xmit; - dev->stop = &lance_close; - dev->set_multicast_list = &set_multicast_list; - dev->set_mac_address = &lance_set_mac_address; + dev->netdev_ops = &lance_netdev_ops; /* XXX MSch */ - dev->tx_timeout = lance_tx_timeout; dev->watchdog_timeo = TX_TIMEOUT; return( 1 ); diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c index deb7b53167e..83a12125b94 100644 --- a/drivers/net/atl1c/atl1c_main.c +++ b/drivers/net/atl1c/atl1c_main.c @@ -2532,8 +2532,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev, * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ - if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) || - (pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) { + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); goto err_dma; } diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 4274e4ac963..d58c105fc77 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -1004,12 +1004,12 @@ static void au1000_tx_timeout(struct net_device *dev) netif_wake_queue(dev); } -static void set_rx_mode(struct net_device *dev) +static void au1000_multicast_list(struct net_device *dev) { struct au1000_private *aup = netdev_priv(dev); if (au1000_debug > 4) - printk("%s: set_rx_mode: flags=%x\n", dev->name, dev->flags); + printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ aup->mac->control |= MAC_PROMISCUOUS; @@ -1047,6 +1047,18 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); } +static const struct net_device_ops au1000_netdev_ops = { + .ndo_open = au1000_open, + .ndo_stop = au1000_close, + .ndo_start_xmit = au1000_tx, + .ndo_set_multicast_list = au1000_multicast_list, + .ndo_do_ioctl = au1000_ioctl, + .ndo_tx_timeout = au1000_tx_timeout, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static struct net_device * au1000_probe(int port_num) { static unsigned version_printed = 0; @@ -1197,13 +1209,8 @@ static struct net_device * au1000_probe(int port_num) dev->base_addr = base; dev->irq = irq; - dev->open = au1000_open; - dev->hard_start_xmit = au1000_tx; - dev->stop = au1000_close; - dev->set_multicast_list = &set_rx_mode; - dev->do_ioctl = &au1000_ioctl; + dev->netdev_ops = &au1000_netdev_ops; SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops); - dev->tx_timeout = au1000_tx_timeout; dev->watchdog_timeo = ETH_TX_TIMEOUT; /* diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c index 04f4b73fa8d..9592f22e4c8 100644 --- a/drivers/net/benet/be_ethtool.c +++ b/drivers/net/benet/be_ethtool.c @@ -319,7 +319,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) be_cmd_get_flow_control(&adapter->ctrl, &ecmd->tx_pause, &ecmd->rx_pause); - ecmd->autoneg = AUTONEG_ENABLE; + ecmd->autoneg = 0; } static int @@ -328,7 +328,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) struct be_adapter *adapter = netdev_priv(netdev); int status; - if (ecmd->autoneg != AUTONEG_ENABLE) + if (ecmd->autoneg != 0) return -EINVAL; status = be_cmd_set_flow_control(&adapter->ctrl, ecmd->tx_pause, diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 9b75aa63006..30d0c81c989 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1821,11 +1821,11 @@ static int __devinit be_probe(struct pci_dev *pdev, be_msix_enable(adapter); - status = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (!status) { netdev->features |= NETIF_F_HIGHDMA; } else { - status = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (status) { dev_err(&pdev->dev, "Could not set PCI DMA Mask\n"); goto free_netdev; diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 9afe8092dfc..9f971ed6b58 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c @@ -979,6 +979,20 @@ static int bfin_mac_open(struct net_device *dev) return 0; } +static const struct net_device_ops bfin_mac_netdev_ops = { + .ndo_open = bfin_mac_open, + .ndo_stop = bfin_mac_close, + .ndo_start_xmit = bfin_mac_hard_start_xmit, + .ndo_set_mac_address = bfin_mac_set_mac_address, + .ndo_tx_timeout = bfin_mac_timeout, + .ndo_set_multicast_list = bfin_mac_set_multicast_list, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bfin_mac_poll, +#endif +}; + /* * * this makes the board clean up everything that it can @@ -1086,15 +1100,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev) /* Fill in the fields of the device structure with ethernet values. */ ether_setup(ndev); - ndev->open = bfin_mac_open; - ndev->stop = bfin_mac_close; - ndev->hard_start_xmit = bfin_mac_hard_start_xmit; - ndev->set_mac_address = bfin_mac_set_mac_address; - ndev->tx_timeout = bfin_mac_timeout; - ndev->set_multicast_list = bfin_mac_set_multicast_list; -#ifdef CONFIG_NET_POLL_CONTROLLER - ndev->poll_controller = bfin_mac_poll; -#endif + ndev->netdev_ops = &bfin_mac_netdev_ops; ndev->ethtool_ops = &bfin_mac_ethtool_ops; spin_lock_init(&lp->lock); diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 9d268be0b67..d47839184a0 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -3427,8 +3427,8 @@ static int __devinit bnx2_request_firmware(struct bnx2 *bp) { const char *mips_fw_file, *rv2p_fw_file; - const struct bnx2_mips_fw_file *mips; - const struct bnx2_rv2p_fw_file *rv2p; + const struct bnx2_mips_fw_file *mips_fw; + const struct bnx2_rv2p_fw_file *rv2p_fw; int rc; if (CHIP_NUM(bp) == CHIP_NUM_5709) { @@ -3452,21 +3452,21 @@ bnx2_request_firmware(struct bnx2 *bp) rv2p_fw_file); return rc; } - mips = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; - rv2p = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; - if (bp->mips_firmware->size < sizeof(*mips) || - check_mips_fw_entry(bp->mips_firmware, &mips->com) || - check_mips_fw_entry(bp->mips_firmware, &mips->cp) || - check_mips_fw_entry(bp->mips_firmware, &mips->rxp) || - check_mips_fw_entry(bp->mips_firmware, &mips->tpat) || - check_mips_fw_entry(bp->mips_firmware, &mips->txp)) { + mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data; + rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data; + if (bp->mips_firmware->size < sizeof(*mips_fw) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) || + check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) { printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", mips_fw_file); return -EINVAL; } - if (bp->rv2p_firmware->size < sizeof(*rv2p) || - check_fw_section(bp->rv2p_firmware, &rv2p->proc1.rv2p, 8, true) || - check_fw_section(bp->rv2p_firmware, &rv2p->proc2.rv2p, 8, true)) { + if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) || + check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) { printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n", rv2p_fw_file); return -EINVAL; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8dc6fbb9a41..553a8991977 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -370,8 +370,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct if (arp->op_code == htons(ARPOP_REPLY)) { /* update rx hash table for this ARP */ - printk("rar: update orig %s bond_dev %s\n", orig_dev->name, - bond_dev->name); bond = netdev_priv(bond_dev); rlb_update_entry_from_arp(bond, arp); pr_debug("Server received an ARP Reply from client\n"); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 99610f358c4..63369b6b14d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2570,7 +2570,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { if (!targets[i]) - continue; + break; pr_debug("basa: target %x\n", targets[i]); if (list_empty(&bond->vlan_list)) { pr_debug("basa: empty vlan: arp_send\n"); @@ -2677,7 +2677,6 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 int i; __be32 *targets = bond->params.arp_targets; - targets = bond->params.arp_targets; for (i = 0; (i < BOND_MAX_ARP_TARGETS) && targets[i]; i++) { pr_debug("bva: sip %pI4 tip %pI4 t[%d] %pI4 bhti(tip) %d\n", &sip, &tip, i, &targets[i], bond_has_this_ip(bond, tip)); @@ -3303,7 +3302,7 @@ static void bond_info_show_master(struct seq_file *seq) for(i = 0; (i < BOND_MAX_ARP_TARGETS) ;i++) { if (!bond->params.arp_targets[i]) - continue; + break; if (printed) seq_printf(seq, ","); seq_printf(seq, " %pI4", &bond->params.arp_targets[i]); diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 18cf4787874..d2873153522 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -684,17 +684,15 @@ static ssize_t bonding_store_arp_targets(struct device *d, goto out; } /* look for an empty slot to put the target in, and check for dupes */ - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { + for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { if (targets[i] == newtarget) { /* duplicate */ printk(KERN_ERR DRV_NAME ": %s: ARP target %pI4 is already present\n", bond->dev->name, &newtarget); - if (done) - targets[i] = 0; ret = -EINVAL; goto out; } - if (targets[i] == 0 && !done) { + if (targets[i] == 0) { printk(KERN_INFO DRV_NAME ": %s: adding ARP target %pI4.\n", bond->dev->name, &newtarget); @@ -720,12 +718,16 @@ static ssize_t bonding_store_arp_targets(struct device *d, goto out; } - for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) { + for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { if (targets[i] == newtarget) { + int j; printk(KERN_INFO DRV_NAME ": %s: removing ARP target %pI4.\n", bond->dev->name, &newtarget); - targets[i] = 0; + for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++) + targets[j] = targets[j+1]; + + targets[j] = 0; done = 1; } } diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index c9806c58b2f..7a18dc7e5c7 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -257,6 +257,23 @@ struct transceiver_ops transceivers[] = struct transceiver_ops* transceiver = &transceivers[0]; +static const struct net_device_ops e100_netdev_ops = { + .ndo_open = e100_open, + .ndo_stop = e100_close, + .ndo_start_xmit = e100_send_packet, + .ndo_tx_timeout = e100_tx_timeout, + .ndo_get_stats = e100_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_do_ioctl = e100_ioctl, + .ndo_set_mac_address = e100_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_config = e100_set_config, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e100_netpoll, +#endif +}; + #define tx_done(dev) (*R_DMA_CH0_CMD == 0) /* @@ -300,19 +317,8 @@ etrax_ethernet_init(void) /* fill in our handlers so the network layer can talk to us in the future */ - dev->open = e100_open; - dev->hard_start_xmit = e100_send_packet; - dev->stop = e100_close; - dev->get_stats = e100_get_stats; - dev->set_multicast_list = set_multicast_list; - dev->set_mac_address = e100_set_mac_address; dev->ethtool_ops = &e100_ethtool_ops; - dev->do_ioctl = e100_ioctl; - dev->set_config = e100_set_config; - dev->tx_timeout = e100_tx_timeout; -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = e100_netpoll; -#endif + dev->netdev_ops = &e100_netdev_ops; spin_lock_init(&np->lock); spin_lock_init(&np->led_lock); diff --git a/drivers/net/declance.c b/drivers/net/declance.c index 861c867fca8..b62405a6918 100644 --- a/drivers/net/declance.c +++ b/drivers/net/declance.c @@ -1010,6 +1010,17 @@ static void lance_set_multicast_retry(unsigned long _opaque) lance_set_multicast(dev); } +static const struct net_device_ops lance_netdev_ops = { + .ndo_open = lance_open, + .ndo_stop = lance_close, + .ndo_start_xmit = lance_start_xmit, + .ndo_tx_timeout = lance_tx_timeout, + .ndo_set_multicast_list = lance_set_multicast, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init dec_lance_probe(struct device *bdev, const int type) { static unsigned version_printed; @@ -1223,12 +1234,8 @@ static int __init dec_lance_probe(struct device *bdev, const int type) printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq); - dev->open = &lance_open; - dev->stop = &lance_close; - dev->hard_start_xmit = &lance_start_xmit; - dev->tx_timeout = &lance_tx_timeout; + dev->netdev_ops = &lance_netdev_ops; dev->watchdog_timeo = 5*HZ; - dev->set_multicast_list = &lance_set_multicast; /* lp->ll is the location of the registers for lance card */ lp->ll = ll; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index ddc5c533e89..ef12931d302 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -156,8 +156,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid); static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid); static void e1000_restore_vlan(struct e1000_adapter *adapter); -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); #ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); static int e1000_resume(struct pci_dev *pdev); #endif static void e1000_shutdown(struct pci_dev *pdev); @@ -3834,7 +3834,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, struct e1000_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; - bool cleaned; + bool cleaned = false; unsigned int total_tx_bytes=0, total_tx_packets=0; i = tx_ring->next_to_clean; @@ -4601,7 +4601,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) return 0; } -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -4664,22 +4664,18 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ew32(WUC, E1000_WUC_PME_EN); ew32(WUFC, wufc); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); } else { ew32(WUC, 0); ew32(WUFC, 0); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); } e1000_release_manageability(adapter); + *enable_wake = !!wufc; + /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->en_mng_pt) { - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } + if (adapter->en_mng_pt) + *enable_wake = true; if (hw->phy_type == e1000_phy_igp_3) e1000_phy_powerdown_workaround(hw); @@ -4693,12 +4689,29 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; } #ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + static int e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4753,7 +4766,14 @@ static int e1000_resume(struct pci_dev *pdev) static void e1000_shutdown(struct pci_dev *pdev) { - e1000_suspend(pdev, PMSG_SUSPEND); + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 409b58cad0e..1693ed116b1 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c @@ -621,7 +621,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) struct e1000_buffer *buffer_info; unsigned int i, eop; unsigned int count = 0; - bool cleaned; + bool cleaned = false; unsigned int total_tx_bytes = 0, total_tx_packets = 0; i = tx_ring->next_to_clean; @@ -4346,7 +4346,7 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) } } -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct e1000_adapter *adapter = netdev_priv(netdev); @@ -4409,20 +4409,16 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) ew32(WUC, E1000_WUC_PME_EN); ew32(WUFC, wufc); - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); } else { ew32(WUC, 0); ew32(WUFC, 0); - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_enable_wake(pdev, PCI_D3cold, 0); } + *enable_wake = !!wufc; + /* make sure adapter isn't asleep if manageability is enabled */ - if (adapter->flags & FLAG_MNG_PT_ENABLED) { - pci_enable_wake(pdev, PCI_D3hot, 1); - pci_enable_wake(pdev, PCI_D3cold, 1); - } + if (adapter->flags & FLAG_MNG_PT_ENABLED) + *enable_wake = true; if (adapter->hw.phy.type == e1000_phy_igp_3) e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); @@ -4435,6 +4431,26 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); + return 0; +} + +static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake) +{ + if (sleep && wake) { + pci_prepare_to_sleep(pdev); + return; + } + + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); +} + +static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep, + bool wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + /* * The pci-e switch on some quad port adapters will report a * correctable error when the MAC transitions from D0 to D3. To @@ -4450,14 +4466,12 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, (devctl & ~PCI_EXP_DEVCTL_CERE)); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + e1000_power_off(pdev, sleep, wake); pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl); } else { - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + e1000_power_off(pdev, sleep, wake); } - - return 0; } static void e1000e_disable_l1aspm(struct pci_dev *pdev) @@ -4486,6 +4500,18 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev) } #ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (!retval) + e1000_complete_shutdown(pdev, true, wake); + + return retval; +} + static int e1000_resume(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -4549,7 +4575,12 @@ static int e1000_resume(struct pci_dev *pdev) static void e1000_shutdown(struct pci_dev *pdev) { - e1000_suspend(pdev, PMSG_SUSPEND); + bool wake = false; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) + e1000_complete_shutdown(pdev, false, wake); } #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index ac0c5b438e0..604c844d076 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -3080,7 +3080,8 @@ static const struct net_device_ops ehea_netdev_ops = { .ndo_change_mtu = ehea_change_mtu, .ndo_vlan_rx_register = ehea_vlan_rx_register, .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid + .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid, + .ndo_tx_timeout = ehea_tx_watchdog, }; struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, @@ -3142,7 +3143,6 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER | NETIF_F_LLTX; - dev->tx_timeout = &ehea_tx_watchdog; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; INIT_WORK(&port->reset_task, ehea_reset_port); diff --git a/drivers/net/eql.c b/drivers/net/eql.c index 51ead7941f8..5210bb1027c 100644 --- a/drivers/net/eql.c +++ b/drivers/net/eql.c @@ -542,6 +542,8 @@ static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp) } spin_unlock_bh(&eql->queue.lock); + dev_put(slave_dev); + return ret; } diff --git a/drivers/net/fec.c b/drivers/net/fec.c index a515acccc61..682e7f0b558 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -1240,6 +1240,7 @@ static void __inline__ fec_phy_ack_intr(void) icrp = (volatile unsigned long *) (MCF_MBAR + MCFSIM_ICR1); *icrp = 0x0d000000; } +#endif #ifdef CONFIG_M5272 static void __inline__ fec_get_mac(struct net_device *dev) diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index d37465020bc..11d5db16ed9 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -3745,14 +3745,14 @@ static int nv_napi_poll(struct napi_struct *napi, int budget) mod_timer(&np->nic_poll, jiffies + POLL_WAIT); } spin_unlock_irqrestore(&np->lock, flags); - __napi_complete(napi); + napi_complete(napi); return rx_work; } if (rx_work < budget) { /* re-enable interrupts (msix not enabled in napi) */ - __napi_complete(napi); + napi_complete(napi); writel(np->irqmask, base + NvRegIrqMask); } diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index b037ce9857b..a9cbc3191a2 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1019,6 +1019,22 @@ out_put_phy: #define IS_FEC(match) 0 #endif +static const struct net_device_ops fs_enet_netdev_ops = { + .ndo_open = fs_enet_open, + .ndo_stop = fs_enet_close, + .ndo_get_stats = fs_enet_get_stats, + .ndo_start_xmit = fs_enet_start_xmit, + .ndo_tx_timeout = fs_timeout, + .ndo_set_multicast_list = fs_set_multicast_list, + .ndo_do_ioctl = fs_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = fs_enet_netpoll, +#endif +}; + static int __devinit fs_enet_probe(struct of_device *ofdev, const struct of_device_id *match) { @@ -1093,22 +1109,13 @@ static int __devinit fs_enet_probe(struct of_device *ofdev, fep->tx_ring = fpi->tx_ring; fep->rx_ring = fpi->rx_ring; - ndev->open = fs_enet_open; - ndev->hard_start_xmit = fs_enet_start_xmit; - ndev->tx_timeout = fs_timeout; + ndev->netdev_ops = &fs_enet_netdev_ops; ndev->watchdog_timeo = 2 * HZ; - ndev->stop = fs_enet_close; - ndev->get_stats = fs_enet_get_stats; - ndev->set_multicast_list = fs_set_multicast_list; -#ifdef CONFIG_NET_POLL_CONTROLLER - ndev->poll_controller = fs_enet_netpoll; -#endif if (fpi->use_napi) netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); ndev->ethtool_ops = &fs_ethtool_ops; - ndev->do_ioctl = fs_ioctl; init_timer(&fep->phy_timer_list); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 65f55877be9..b2c49679bba 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -1583,8 +1583,10 @@ static void gfar_reset_task(struct work_struct *work) struct net_device *dev = priv->ndev; if (dev->flags & IFF_UP) { + netif_stop_queue(dev); stop_gfar(dev); startup_gfar(dev); + netif_start_queue(dev); } netif_tx_schedule_all(dev); diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 77e4b5b52fc..806533c831c 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2686,6 +2686,32 @@ static int __devinit emac_init_config(struct emac_instance *dev) return 0; } +static const struct net_device_ops emac_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_close, + .ndo_get_stats = emac_stats, + .ndo_set_multicast_list = emac_set_multicast_list, + .ndo_do_ioctl = emac_ioctl, + .ndo_tx_timeout = emac_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_start_xmit = emac_start_xmit, + .ndo_change_mtu = eth_change_mtu, +}; + +static const struct net_device_ops emac_gige_netdev_ops = { + .ndo_open = emac_open, + .ndo_stop = emac_close, + .ndo_get_stats = emac_stats, + .ndo_set_multicast_list = emac_set_multicast_list, + .ndo_do_ioctl = emac_ioctl, + .ndo_tx_timeout = emac_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_start_xmit = emac_start_xmit_sg, + .ndo_change_mtu = emac_change_mtu, +}; + static int __devinit emac_probe(struct of_device *ofdev, const struct of_device_id *match) { @@ -2827,23 +2853,14 @@ static int __devinit emac_probe(struct of_device *ofdev, if (err != 0) goto err_detach_tah; - /* Fill in the driver function table */ - ndev->open = &emac_open; if (dev->tah_dev) ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; - ndev->tx_timeout = &emac_tx_timeout; ndev->watchdog_timeo = 5 * HZ; - ndev->stop = &emac_close; - ndev->get_stats = &emac_stats; - ndev->set_multicast_list = &emac_set_multicast_list; - ndev->do_ioctl = &emac_ioctl; if (emac_phy_supports_gige(dev->phy_mode)) { - ndev->hard_start_xmit = &emac_start_xmit_sg; - ndev->change_mtu = &emac_change_mtu; + ndev->netdev_ops = &emac_gige_netdev_ops; dev->commac.ops = &emac_commac_sg_ops; - } else { - ndev->hard_start_xmit = &emac_start_xmit; - } + } else + ndev->netdev_ops = &emac_netdev_ops; SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); netif_carrier_off(ndev); diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c index f4c315b5a90..472f3f12484 100644 --- a/drivers/net/igb/e1000_mac.c +++ b/drivers/net/igb/e1000_mac.c @@ -111,7 +111,7 @@ void igb_clear_vfta(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) { array_wr32(E1000_VFTA, offset, value); wrfl(); diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h index a34de526963..1d690b4c9ae 100644 --- a/drivers/net/igb/e1000_mac.h +++ b/drivers/net/igb/e1000_mac.h @@ -66,7 +66,6 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); s32 igb_check_alt_mac_addr(struct e1000_hw *hw); void igb_reset_adaptive(struct e1000_hw *hw); void igb_update_adaptive(struct e1000_hw *hw); -void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); bool igb_enable_mng_pass_thru(struct e1000_hw *hw); diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c index fe71c7ddaa0..840782fb573 100644 --- a/drivers/net/igb/e1000_mbx.c +++ b/drivers/net/igb/e1000_mbx.c @@ -188,7 +188,7 @@ out: * returns SUCCESS if it successfully received a message notification and * copied it into the receive buffer. **/ -s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = -E1000_ERR_MBX; @@ -214,7 +214,7 @@ out: * returns SUCCESS if it successfully copied message into the buffer and * received an ack to that message within delay * timeout period **/ -s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) { struct e1000_mbx_info *mbx = &hw->mbx; s32 ret_val = 0; @@ -232,19 +232,6 @@ out: return ret_val; } -/** - * e1000_init_mbx_ops_generic - Initialize NVM function pointers - * @hw: pointer to the HW structure - * - * Setups up the function pointers to no-op functions - **/ -void e1000_init_mbx_ops_generic(struct e1000_hw *hw) -{ - struct e1000_mbx_info *mbx = &hw->mbx; - mbx->ops.read_posted = igb_read_posted_mbx; - mbx->ops.write_posted = igb_write_posted_mbx; -} - static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) { u32 mbvficr = rd32(E1000_MBVFICR); diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h index 6ec9890a8f7..ebc02ea3f19 100644 --- a/drivers/net/igb/e1000_mbx.h +++ b/drivers/net/igb/e1000_mbx.h @@ -67,8 +67,6 @@ s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); -s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); s32 igb_check_for_msg(struct e1000_hw *, u16); s32 igb_check_for_ack(struct e1000_hw *, u16); s32 igb_check_for_rst(struct e1000_hw *, u16); diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index 6b0697c565b..08c801490c7 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -152,14 +152,13 @@ static struct notifier_block dca_notifier = { /* for netdump / net console */ static void igb_netpoll(struct net_device *); #endif - #ifdef CONFIG_PCI_IOV -static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *, - const char *, size_t); -static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *, - char *); -DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs); -#endif +static unsigned int max_vfs = 0; +module_param(max_vfs, uint, 0); +MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate " + "per physical function"); +#endif /* CONFIG_PCI_IOV */ + static pci_ers_result_t igb_io_error_detected(struct pci_dev *, pci_channel_state_t); static pci_ers_result_t igb_io_slot_reset(struct pci_dev *); @@ -671,6 +670,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter) /* If we can't do MSI-X, try MSI */ msi_only: +#ifdef CONFIG_PCI_IOV + /* disable SR-IOV for non MSI-X configurations */ + if (adapter->vf_data) { + struct e1000_hw *hw = &adapter->hw; + /* disable iov and allow time for transactions to clear */ + pci_disable_sriov(adapter->pdev); + msleep(500); + + kfree(adapter->vf_data); + adapter->vf_data = NULL; + wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); + msleep(100); + dev_info(&adapter->pdev->dev, "IOV Disabled\n"); + } +#endif adapter->num_rx_queues = 1; adapter->num_tx_queues = 1; if (!pci_enable_msi(adapter->pdev)) @@ -1238,6 +1252,46 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (err) goto err_sw_init; +#ifdef CONFIG_PCI_IOV + /* since iov functionality isn't critical to base device function we + * can accept failure. If it fails we don't allow iov to be enabled */ + if (hw->mac.type == e1000_82576) { + /* 82576 supports a maximum of 7 VFs in addition to the PF */ + unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs; + int i; + unsigned char mac_addr[ETH_ALEN]; + + if (num_vfs) { + adapter->vf_data = kcalloc(num_vfs, + sizeof(struct vf_data_storage), + GFP_KERNEL); + if (!adapter->vf_data) { + dev_err(&pdev->dev, + "Could not allocate VF private data - " + "IOV enable failed\n"); + } else { + err = pci_enable_sriov(pdev, num_vfs); + if (!err) { + adapter->vfs_allocated_count = num_vfs; + dev_info(&pdev->dev, + "%d vfs allocated\n", + num_vfs); + for (i = 0; + i < adapter->vfs_allocated_count; + i++) { + random_ether_addr(mac_addr); + igb_set_vf_mac(adapter, i, + mac_addr); + } + } else { + kfree(adapter->vf_data); + adapter->vf_data = NULL; + } + } + } + } + +#endif /* setup the private structure */ err = igb_sw_init(adapter); if (err) @@ -1397,19 +1451,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, if (err) goto err_register; -#ifdef CONFIG_PCI_IOV - /* since iov functionality isn't critical to base device function we - * can accept failure. If it fails we don't allow iov to be enabled */ - if (hw->mac.type == e1000_82576) { - err = pci_enable_sriov(pdev, 0); - if (!err) - err = device_create_file(&netdev->dev, - &dev_attr_num_vfs); - if (err) - dev_err(&pdev->dev, "Failed to initialize IOV\n"); - } - -#endif #ifdef CONFIG_IGB_DCA if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; @@ -5422,89 +5463,4 @@ static void igb_vmm_control(struct igb_adapter *adapter) igb_vmdq_set_replication_pf(hw, true); } -#ifdef CONFIG_PCI_IOV -static ssize_t igb_show_num_vfs(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct igb_adapter *adapter = netdev_priv(to_net_dev(dev)); - - return sprintf(buf, "%d\n", adapter->vfs_allocated_count); -} - -static ssize_t igb_set_num_vfs(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct net_device *netdev = to_net_dev(dev); - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - struct pci_dev *pdev = adapter->pdev; - unsigned int num_vfs, i; - unsigned char mac_addr[ETH_ALEN]; - int err; - - sscanf(buf, "%u", &num_vfs); - - if (num_vfs > 7) - num_vfs = 7; - - /* value unchanged do nothing */ - if (num_vfs == adapter->vfs_allocated_count) - return count; - - if (netdev->flags & IFF_UP) - igb_close(netdev); - - igb_reset_interrupt_capability(adapter); - igb_free_queues(adapter); - adapter->tx_ring = NULL; - adapter->rx_ring = NULL; - adapter->vfs_allocated_count = 0; - - /* reclaim resources allocated to VFs since we are changing count */ - if (adapter->vf_data) { - /* disable iov and allow time for transactions to clear */ - pci_disable_sriov(pdev); - msleep(500); - - kfree(adapter->vf_data); - adapter->vf_data = NULL; - wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ); - msleep(100); - dev_info(&pdev->dev, "IOV Disabled\n"); - } - - if (num_vfs) { - adapter->vf_data = kcalloc(num_vfs, - sizeof(struct vf_data_storage), - GFP_KERNEL); - if (!adapter->vf_data) { - dev_err(&pdev->dev, "Could not allocate VF private " - "data - IOV enable failed\n"); - } else { - err = pci_enable_sriov(pdev, num_vfs); - if (!err) { - adapter->vfs_allocated_count = num_vfs; - dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs); - for (i = 0; i < adapter->vfs_allocated_count; i++) { - random_ether_addr(mac_addr); - igb_set_vf_mac(adapter, i, mac_addr); - } - } else { - kfree(adapter->vf_data); - adapter->vf_data = NULL; - } - } - } - - igb_set_interrupt_capability(adapter); - igb_alloc_queues(adapter); - igb_reset(adapter); - - if (netdev->flags & IFF_UP) - igb_open(netdev); - - return count; -} -#endif /* CONFIG_PCI_IOV */ /* igb_main.c */ diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile new file mode 100644 index 00000000000..c2f150d8f2d --- /dev/null +++ b/drivers/net/igbvf/Makefile @@ -0,0 +1,38 @@ +################################################################################ +# +# Intel(R) 82576 Virtual Function Linux driver +# Copyright(c) 2009 Intel Corporation. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Contact Information: +# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> +# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 +# +################################################################################ + +# +# Makefile for the Intel(R) 82576 VF ethernet driver +# + +obj-$(CONFIG_IGBVF) += igbvf.o + +igbvf-objs := vf.o \ + mbx.o \ + ethtool.o \ + netdev.o + diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h new file mode 100644 index 00000000000..88a47537518 --- /dev/null +++ b/drivers/net/igbvf/defines.h @@ -0,0 +1,125 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* IVAR valid bit */ +#define E1000_IVAR_VALID 0x80 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Device Control */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_MBX 15 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +/* Additional Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Tx Queue */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Queue */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c new file mode 100644 index 00000000000..1dcaa690531 --- /dev/null +++ b/drivers/net/igbvf/ethtool.c @@ -0,0 +1,540 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for igbvf */ + +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> + +#include "igbvf.h" +#include <linux/if_vlan.h> + + +struct igbvf_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int base_stat_offset; +}; + +#define IGBVF_STAT(current, base) \ + sizeof(((struct igbvf_adapter *)0)->current), \ + offsetof(struct igbvf_adapter, current), \ + offsetof(struct igbvf_adapter, base) + +static const struct igbvf_stats igbvf_gstrings_stats[] = { + { "rx_packets", IGBVF_STAT(stats.gprc, stats.base_gprc) }, + { "tx_packets", IGBVF_STAT(stats.gptc, stats.base_gptc) }, + { "rx_bytes", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "tx_bytes", IGBVF_STAT(stats.gotc, stats.base_gotc) }, + { "multicast", IGBVF_STAT(stats.mprc, stats.base_mprc) }, + { "lbrx_bytes", IGBVF_STAT(stats.gorlbc, stats.base_gorlbc) }, + { "lbrx_packets", IGBVF_STAT(stats.gprlbc, stats.base_gprlbc) }, + { "tx_restart_queue", IGBVF_STAT(restart_queue, zero_base) }, + { "rx_long_byte_count", IGBVF_STAT(stats.gorc, stats.base_gorc) }, + { "rx_csum_offload_good", IGBVF_STAT(hw_csum_good, zero_base) }, + { "rx_csum_offload_errors", IGBVF_STAT(hw_csum_err, zero_base) }, + { "rx_header_split", IGBVF_STAT(rx_hdr_split, zero_base) }, + { "alloc_rx_buff_failed", IGBVF_STAT(alloc_rx_buff_failed, zero_base) }, +}; + +#define IGBVF_GLOBAL_STATS_LEN ARRAY_SIZE(igbvf_gstrings_stats) + +static const char igbvf_gstrings_test[][ETH_GSTRING_LEN] = { + "Link test (on/offline)" +}; + +#define IGBVF_TEST_LEN ARRAY_SIZE(igbvf_gstrings_test) + +static int igbvf_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 status; + + ecmd->supported = SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_1000baseT_Full; + + ecmd->port = -1; + ecmd->transceiver = XCVR_DUMMY1; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + ecmd->speed = 1000; + else if (status & E1000_STATUS_SPEED_100) + ecmd->speed = 100; + else + ecmd->speed = 10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static u32 igbvf_get_link(struct net_device *netdev) +{ + return netif_carrier_ok(netdev); +} + +static int igbvf_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return; +} + +static int igbvf_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + return -EOPNOTSUPP; +} + +static u32 igbvf_get_tx_csum(struct net_device *netdev) +{ + return ((netdev->features & NETIF_F_IP_CSUM) != 0); +} + +static int igbvf_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + return 0; +} + +static int igbvf_set_tso(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int i; + struct net_device *v_netdev; + + if (data) { + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + } else { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + /* disable TSO on all VLANs if they're present */ + if (!adapter->vlgrp) + goto tso_out; + for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { + v_netdev = vlan_group_get_device(adapter->vlgrp, i); + if (!v_netdev) + continue; + + v_netdev->features &= ~NETIF_F_TSO; + v_netdev->features &= ~NETIF_F_TSO6; + vlan_group_set_device(adapter->vlgrp, i, v_netdev); + } + } + +tso_out: + dev_info(&adapter->pdev->dev, "TSO is %s\n", + data ? "Enabled" : "Disabled"); + adapter->flags |= FLAG_TSO_FORCE; + return 0; +} + +static u32 igbvf_get_msglevel(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void igbvf_set_msglevel(struct net_device *netdev, u32 data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int igbvf_get_regs_len(struct net_device *netdev) +{ +#define IGBVF_REGS_LEN 8 + return IGBVF_REGS_LEN * sizeof(u32); +} + +static void igbvf_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u8 revision_id; + + memset(p, 0, IGBVF_REGS_LEN * sizeof(u32)); + + pci_read_config_byte(adapter->pdev, PCI_REVISION_ID, &revision_id); + + regs->version = (1 << 24) | (revision_id << 16) | adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RDLEN(0)); + regs_buff[3] = er32(RDH(0)); + regs_buff[4] = er32(RDT(0)); + + regs_buff[5] = er32(TDLEN(0)); + regs_buff[6] = er32(TDH(0)); + regs_buff[7] = er32(TDT(0)); +} + +static int igbvf_get_eeprom_len(struct net_device *netdev) +{ + return 0; +} + +static int igbvf_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static int igbvf_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + return -EOPNOTSUPP; +} + +static void igbvf_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + char firmware_version[32] = "N/A"; + + strncpy(drvinfo->driver, igbvf_driver_name, 32); + strncpy(drvinfo->version, igbvf_driver_version, 32); + strncpy(drvinfo->fw_version, firmware_version, 32); + strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->regdump_len = igbvf_get_regs_len(netdev); + drvinfo->eedump_len = igbvf_get_eeprom_len(netdev); +} + +static void igbvf_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + + ring->rx_max_pending = IGBVF_MAX_RXD; + ring->tx_max_pending = IGBVF_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = rx_ring->count; + ring->tx_pending = tx_ring->count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} + +static int igbvf_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *temp_ring; + int err; + u32 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = max(ring->rx_pending, (u32)IGBVF_MIN_RXD); + new_rx_count = min(new_rx_count, (u32)IGBVF_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = max(ring->tx_pending, (u32)IGBVF_MIN_TXD); + new_tx_count = min(new_tx_count, (u32)IGBVF_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring->count) && + (new_rx_count == adapter->rx_ring->count)) { + /* nothing to do */ + return 0; + } + + temp_ring = vmalloc(sizeof(struct igbvf_ring)); + if (!temp_ring) + return -ENOMEM; + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(adapter->netdev)) + igbvf_down(adapter); + + /* + * We can't just free everything and then setup again, + * because the ISRs in MSI-X mode get passed pointers + * to the tx and rx ring structs. + */ + if (new_tx_count != adapter->tx_ring->count) { + memcpy(temp_ring, adapter->tx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_tx_count; + err = igbvf_setup_tx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_tx_resources(adapter->tx_ring); + + memcpy(adapter->tx_ring, temp_ring, sizeof(struct igbvf_ring)); + } + + if (new_rx_count != adapter->rx_ring->count) { + memcpy(temp_ring, adapter->rx_ring, sizeof(struct igbvf_ring)); + + temp_ring->count = new_rx_count; + err = igbvf_setup_rx_resources(adapter, temp_ring); + if (err) + goto err_setup; + + igbvf_free_rx_resources(adapter->rx_ring); + + memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); + } + + err = 0; +err_setup: + if (netif_running(adapter->netdev)) + igbvf_up(adapter); + + clear_bit(__IGBVF_RESETTING, &adapter->state); + vfree(temp_ring); + return err; +} + +static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + + hw->mac.ops.check_for_link(hw); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + + return *data; +} + +static int igbvf_get_self_test_count(struct net_device *netdev) +{ + return IGBVF_TEST_LEN; +} + +static int igbvf_get_stats_count(struct net_device *netdev) +{ + return IGBVF_GLOBAL_STATS_LEN; +} + +static void igbvf_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + set_bit(__IGBVF_TESTING, &adapter->state); + + /* + * Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (igbvf_link_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__IGBVF_TESTING, &adapter->state); + msleep_interruptible(4 * 1000); +} + +static void igbvf_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; + + return; +} + +static int igbvf_set_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + return -EOPNOTSUPP; +} + +static int igbvf_phys_id(struct net_device *netdev, u32 data) +{ + return 0; +} + +static int igbvf_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 3) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = adapter->itr_setting >> 2; + + return 0; +} + +static int igbvf_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 3) && + (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + /* convert to rate of irq's per second */ + if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { + adapter->itr = IGBVF_START_ITR; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = ec->rx_coalesce_usecs << 2; + adapter->itr_setting = adapter->itr; + } + + writel(adapter->itr, + hw->hw_addr + adapter->rx_ring[0].itr_register); + + return 0; +} + +static int igbvf_nway_reset(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + igbvf_reinit_locked(adapter); + return 0; +} + + +static void igbvf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int i; + + igbvf_update_stats(adapter); + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + char *p = (char *)adapter + + igbvf_gstrings_stats[i].stat_offset; + char *b = (char *)adapter + + igbvf_gstrings_stats[i].base_stat_offset; + data[i] = ((igbvf_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p - *(u64 *)b) : + (*(u32 *)p - *(u32 *)b)); + } + +} + +static void igbvf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *igbvf_gstrings_test, sizeof(igbvf_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < IGBVF_GLOBAL_STATS_LEN; i++) { + memcpy(p, igbvf_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static const struct ethtool_ops igbvf_ethtool_ops = { + .get_settings = igbvf_get_settings, + .set_settings = igbvf_set_settings, + .get_drvinfo = igbvf_get_drvinfo, + .get_regs_len = igbvf_get_regs_len, + .get_regs = igbvf_get_regs, + .get_wol = igbvf_get_wol, + .set_wol = igbvf_set_wol, + .get_msglevel = igbvf_get_msglevel, + .set_msglevel = igbvf_set_msglevel, + .nway_reset = igbvf_nway_reset, + .get_link = igbvf_get_link, + .get_eeprom_len = igbvf_get_eeprom_len, + .get_eeprom = igbvf_get_eeprom, + .set_eeprom = igbvf_set_eeprom, + .get_ringparam = igbvf_get_ringparam, + .set_ringparam = igbvf_set_ringparam, + .get_pauseparam = igbvf_get_pauseparam, + .set_pauseparam = igbvf_set_pauseparam, + .get_tx_csum = igbvf_get_tx_csum, + .set_tx_csum = igbvf_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = igbvf_set_tso, + .self_test = igbvf_diag_test, + .get_strings = igbvf_get_strings, + .phys_id = igbvf_phys_id, + .get_ethtool_stats = igbvf_get_ethtool_stats, + .self_test_count = igbvf_get_self_test_count, + .get_stats_count = igbvf_get_stats_count, + .get_coalesce = igbvf_get_coalesce, + .set_coalesce = igbvf_set_coalesce, +}; + +void igbvf_set_ethtool_ops(struct net_device *netdev) +{ + /* have to "undeclare" const on this struct to remove warnings */ + SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops); +} diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h new file mode 100644 index 00000000000..4bff35e4687 --- /dev/null +++ b/drivers/net/igbvf/igbvf.h @@ -0,0 +1,332 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGBVF_H_ +#define _IGBVF_H_ + +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/io.h> +#include <linux/netdevice.h> + + +#include "vf.h" + +/* Forward declarations */ +struct igbvf_info; +struct igbvf_adapter; + +/* Interrupt defines */ +#define IGBVF_START_ITR 648 /* ~6000 ints/sec */ + +/* Interrupt modes, as used by the IntMode paramter */ +#define IGBVF_INT_MODE_LEGACY 0 +#define IGBVF_INT_MODE_MSI 1 +#define IGBVF_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define IGBVF_DEFAULT_TXD 256 +#define IGBVF_MAX_TXD 4096 +#define IGBVF_MIN_TXD 80 + +#define IGBVF_DEFAULT_RXD 256 +#define IGBVF_MAX_RXD 4096 +#define IGBVF_MIN_RXD 80 + +#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGBVF_RX_PTHRESH 16 +#define IGBVF_RX_HTHRESH 8 +#define IGBVF_RX_WTHRESH 1 + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +#define IGBVF_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define IGBVF_TX_QUEUE_WAKE 32 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGBVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define IGBVF_EEPROM_APME 0x0400 + +#define IGBVF_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +enum igbvf_boards { + board_vf, +}; + +struct igbvf_queue_stats { + u64 packets; + u64 bytes; +}; + +/* + * wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct igbvf_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + }; + /* Rx */ + struct { + struct page *page; + u64 page_dma; + unsigned int page_offset; + }; + }; + struct page *page; +}; + +union igbvf_desc { + union e1000_adv_rx_desc rx_desc; + union e1000_adv_tx_desc tx_desc; + struct e1000_adv_tx_context_desc tx_context_desc; +}; + +struct igbvf_ring { + struct igbvf_adapter *adapter; /* backlink */ + union igbvf_desc *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + u16 head; + u16 tail; + + /* array of buffer information structs */ + struct igbvf_buffer *buffer_info; + struct napi_struct napi; + + char name[IFNAMSIZ + 5]; + u32 eims_value; + u32 itr_val; + u16 itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; + + struct igbvf_queue_stats stats; +}; + +/* board specific private data structure */ +struct igbvf_adapter { + struct timer_list watchdog_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct igbvf_info *ei; + + struct vlan_group *vlgrp; + u32 bd_number; + u32 rx_buffer_len; + u32 polling_interval; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + + spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* + * Tx + */ + struct igbvf_ring *tx_ring /* One per active queue */ + ____cacheline_aligned_in_smp; + + unsigned long tx_queue_len; + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* + * Rx + */ + struct igbvf_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + + unsigned int rx_ps_hdr_size; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + struct net_device_stats net_stats; + spinlock_t stats_lock; /* prevent concurrent stats updates */ + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + /* The VF counters don't clear on read so we have to get a base + * count on driver start up and always subtract that base on + * on the first update, thus the flag.. + */ + struct e1000_vf_stats stats; + u64 zero_base; + + struct igbvf_ring test_tx_ring; + struct igbvf_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + struct msix_entry *msix_entries; + int int_mode; + u32 eims_enable_mask; + u32 eims_other; + u32 int_counter0; + u32 int_counter1; + + u32 eeprom_wol; + u32 wol; + u32 pba; + + bool fc_autoneg; + + unsigned long led_status; + + unsigned int flags; +}; + +struct igbvf_info { + enum e1000_mac_type mac; + unsigned int flags; + u32 pba; + void (*init_ops)(struct e1000_hw *); + s32 (*get_variants)(struct igbvf_adapter *); +}; + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_HW_VLAN_FILTER (1 << 0) +#define FLAG_HAS_JUMBO_FRAMES (1 << 1) +#define FLAG_MSI_ENABLED (1 << 2) +#define FLAG_RX_CSUM_ENABLED (1 << 3) +#define FLAG_TSO_FORCE (1 << 4) + +#define IGBVF_RX_DESC_ADV(R, i) \ + (&((((R).desc))[i].rx_desc)) +#define IGBVF_TX_DESC_ADV(R, i) \ + (&((((R).desc))[i].tx_desc)) +#define IGBVF_TX_CTXTDESC_ADV(R, i) \ + (&((((R).desc))[i].tx_context_desc)) + +enum igbvf_state_t { + __IGBVF_TESTING, + __IGBVF_RESETTING, + __IGBVF_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char igbvf_driver_name[]; +extern const char igbvf_driver_version[]; + +extern void igbvf_check_options(struct igbvf_adapter *); +extern void igbvf_set_ethtool_ops(struct net_device *); + +extern int igbvf_up(struct igbvf_adapter *); +extern void igbvf_down(struct igbvf_adapter *); +extern void igbvf_reinit_locked(struct igbvf_adapter *); +extern int igbvf_setup_rx_resources(struct igbvf_adapter *, struct igbvf_ring *); +extern int igbvf_setup_tx_resources(struct igbvf_adapter *, struct igbvf_ring *); +extern void igbvf_free_rx_resources(struct igbvf_ring *); +extern void igbvf_free_tx_resources(struct igbvf_ring *); +extern void igbvf_update_stats(struct igbvf_adapter *); + +extern unsigned int copybreak; + +#endif /* _IGBVF_H_ */ diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c new file mode 100644 index 00000000000..819a8ec901d --- /dev/null +++ b/drivers/net/igbvf/mbx.c @@ -0,0 +1,350 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "mbx.h" + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 e1000_poll_for_msg(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 e1000_poll_for_ack(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + if (!mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw)) { + countdown--; + udelay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +static s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +static s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + /* exit if we either can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg*/ + ret_val = mbx->ops.write(hw, msg, size); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw); +out: + return ret_val; +} + +/** + * e1000_read_v2p_mailbox - read v2p mailbox + * @hw: pointer to the HW structure + * + * This function is used to read the v2p mailbox without losing the read to + * clear status bits. + **/ +static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw) +{ + u32 v2p_mailbox = er32(V2PMAILBOX(0)); + + v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox; + hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS; + + return v2p_mailbox; +} + +/** + * e1000_check_for_bit_vf - Determine if a status bit was set + * @hw: pointer to the HW structure + * @mask: bitmask for bits to be tested and cleared + * + * This function is used to check for the read to clear bits within + * the V2P mailbox. + **/ +static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask) +{ + u32 v2p_mailbox = e1000_read_v2p_mailbox(hw); + s32 ret_val = -E1000_ERR_MBX; + + if (v2p_mailbox & mask) + ret_val = E1000_SUCCESS; + + hw->dev_spec.vf.v2p_mailbox &= ~mask; + + return ret_val; +} + +/** + * e1000_check_for_msg_vf - checks to see if the PF has sent mail + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_vf - checks to see if the PF has ACK'd + * @hw: pointer to the HW structure + * + * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_vf - checks to see if the PF has reset + * @hw: pointer to the HW structure + * + * returns true if the PF has set the reset done bit or else false + **/ +static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD | + E1000_V2PMAILBOX_RSTI))) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_vf - obtain mailbox lock + * @hw: pointer to the HW structure + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_MBX; + + /* Take ownership of the buffer */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for vf use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_vf - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_write; + + /* flush any ack or msg as we are going to overwrite mailbox */ + e1000_check_for_ack_vf(hw); + e1000_check_for_msg_vf(hw); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + array_ew32(VMBMEM(0), i, msg[i]); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + + /* Drop VFU and interrupt the PF to tell it a message has been sent */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_REQ); + +out_no_write: + return err; +} + +/** + * e1000_read_mbx_vf - Reads a message from the inbox intended for vf + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * + * returns SUCCESS if it successfuly read message from buffer + **/ +static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) +{ + s32 err; + u16 i; + + /* lock the mailbox to prevent pf/vf race condition */ + err = e1000_obtain_mbx_lock_vf(hw); + if (err) + goto out_no_read; + + /* copy the message from the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = array_er32(VMBMEM(0), i); + + /* Acknowledge receipt and release mailbox, then we're done */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return err; +} + +/** + * e1000_init_mbx_params_vf - set initial values for vf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for vf mailbox + */ +s32 e1000_init_mbx_params_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + /* start mailbox as timed out and let the reset_hw call set the timeout + * value to being communications */ + mbx->timeout = 0; + mbx->usec_delay = E1000_VF_MBX_INIT_DELAY; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_vf; + mbx->ops.write = e1000_write_mbx_vf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_vf; + mbx->ops.check_for_ack = e1000_check_for_ack_vf; + mbx->ops.check_for_rst = e1000_check_for_rst_vf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + + return E1000_SUCCESS; +} + diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h new file mode 100644 index 00000000000..4938609dbfb --- /dev/null +++ b/drivers/net/igbvf/mbx.h @@ -0,0 +1,75 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 1999 - 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "vf.h" + +#define E1000_V2PMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ +#define E1000_V2PMAILBOX_ACK 0x00000002 /* Ack PF message received */ +#define E1000_V2PMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ +#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ +#define E1000_V2PMAILBOX_RSTI 0x00000040 /* PF has reset indication */ +#define E1000_V2PMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ +#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with + * this are the ACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with + * this are the NACK */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still + clear to send requests */ + +/* We have a total wait time of 1s for vf mailbox posted messages */ +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* retry count for mailbox timeout */ +#define E1000_VF_MBX_INIT_DELAY 500 /* usec delay between retries */ + +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for exra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ +#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_vf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c new file mode 100644 index 00000000000..b774666ad3c --- /dev/null +++ b/drivers/net/igbvf/netdev.c @@ -0,0 +1,2922 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/pagemap.h> +#include <linux/delay.h> +#include <linux/netdevice.h> +#include <linux/tcp.h> +#include <linux/ipv6.h> +#include <net/checksum.h> +#include <net/ip6_checksum.h> +#include <linux/mii.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/pm_qos_params.h> + +#include "igbvf.h" + +#define DRV_VERSION "1.0.0-k0" +char igbvf_driver_name[] = "igbvf"; +const char igbvf_driver_version[] = DRV_VERSION; +static const char igbvf_driver_string[] = + "Intel(R) Virtual Function Network Driver"; +static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; + +static int igbvf_poll(struct napi_struct *napi, int budget); +static void igbvf_reset(struct igbvf_adapter *); +static void igbvf_set_interrupt_capability(struct igbvf_adapter *); +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); + +static struct igbvf_info igbvf_vf_info = { + .mac = e1000_vfadapt, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_RX_CSUM_ENABLED, + .pba = 10, + .init_ops = e1000_init_function_pointers_vf, +}; + +static const struct igbvf_info *igbvf_info_tbl[] = { + [board_vf] = &igbvf_vf_info, +}; + +/** + * igbvf_desc_unused - calculate if we have unused descriptors + **/ +static int igbvf_desc_unused(struct igbvf_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * igbvf_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void igbvf_receive_skb(struct igbvf_adapter *adapter, + struct net_device *netdev, + struct sk_buff *skb, + u32 status, u16 vlan) +{ + if (adapter->vlgrp && (status & E1000_RXD_STAT_VP)) + vlan_hwaccel_receive_skb(skb, adapter->vlgrp, + le16_to_cpu(vlan) & + E1000_RXD_SPC_VLAN_MASK); + else + netif_receive_skb(skb); + + netdev->last_rx = jiffies; +} + +static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, + u32 status_err, struct sk_buff *skb) +{ + skb->ip_summed = CHECKSUM_NONE; + + /* Ignore Checksum bit is set or checksum is disabled through ethtool */ + if ((status_err & E1000_RXD_STAT_IXSM)) + return; + /* TCP/UDP checksum error bit is set */ + if (status_err & + (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* It must be a TCP or UDP packet with a valid checksum */ + if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + adapter->hw_csum_good++; +} + +/** + * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split + * @rx_ring: address of ring structure to repopulate + * @cleaned_count: number of buffers to repopulate + **/ +static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, + int cleaned_count) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + int bufsz; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + if (adapter->rx_ps_hdr_size) + bufsz = adapter->rx_ps_hdr_size; + else + bufsz = adapter->rx_buffer_len; + bufsz += NET_IP_ALIGN; + + while (cleaned_count--) { + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + + if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (!buffer_info->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + buffer_info->page_offset = 0; + } else { + buffer_info->page_offset ^= PAGE_SIZE / 2; + } + buffer_info->page_dma = + pci_map_page(pdev, buffer_info->page, + buffer_info->page_offset, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); + } + + if (!buffer_info->skb) { + skb = netdev_alloc_skb(netdev, bufsz); + if (!skb) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + + /* Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + skb_reserve(skb, NET_IP_ALIGN); + + buffer_info->skb = skb; + buffer_info->dma = pci_map_single(pdev, skb->data, + bufsz, + PCI_DMA_FROMDEVICE); + } + /* Refresh the desc even if buffer_addrs didn't change because + * each write-back erases this info. */ + if (adapter->rx_ps_hdr_size) { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->page_dma); + rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); + } else { + rx_desc->read.pkt_addr = + cpu_to_le64(buffer_info->dma); + rx_desc->read.hdr_addr = 0; + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + if (rx_ring->next_to_use != i) { + rx_ring->next_to_use = i; + if (i == 0) + i = (rx_ring->count - 1); + else + i--; + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->tail); + } +} + +/** + * igbvf_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, + int *work_done, int work_to_do) +{ + struct igbvf_ring *rx_ring = adapter->rx_ring; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_adv_rx_desc *rx_desc, *next_rxd; + struct igbvf_buffer *buffer_info, *next_buffer; + struct sk_buff *skb; + bool cleaned = false; + int cleaned_count = 0; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i; + u32 length, hlen, staterr; + + i = rx_ring->next_to_clean; + rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + + buffer_info = &rx_ring->buffer_info[i]; + + /* HW will not DMA in data larger than the given buffer, even + * if it parses the (NFS, of course) header to be larger. In + * that case, it fills the header buffer and spills the rest + * into the page. + */ + hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & + E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; + if (hlen > adapter->rx_ps_hdr_size) + hlen = adapter->rx_ps_hdr_size; + + length = le16_to_cpu(rx_desc->wb.upper.length); + cleaned = true; + cleaned_count++; + + skb = buffer_info->skb; + prefetch(skb->data - NET_IP_ALIGN); + buffer_info->skb = NULL; + if (!adapter->rx_ps_hdr_size) { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + buffer_info->dma = 0; + skb_put(skb, length); + goto send_up; + } + + if (!skb_shinfo(skb)->nr_frags) { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_hdr_size + NET_IP_ALIGN, + PCI_DMA_FROMDEVICE); + skb_put(skb, hlen); + } + + if (length) { + pci_unmap_page(pdev, buffer_info->page_dma, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); + buffer_info->page_dma = 0; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, + buffer_info->page, + buffer_info->page_offset, + length); + + if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || + (page_count(buffer_info->page) != 1)) + buffer_info->page = NULL; + else + get_page(buffer_info->page); + + skb->len += length; + skb->data_len += length; + skb->truesize += length; + } +send_up: + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); + prefetch(next_rxd); + next_buffer = &rx_ring->buffer_info[i]; + + if (!(staterr & E1000_RXD_STAT_EOP)) { + buffer_info->skb = next_buffer->skb; + buffer_info->dma = next_buffer->dma; + next_buffer->skb = skb; + next_buffer->dma = 0; + goto next_desc; + } + + if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + total_bytes += skb->len; + total_packets++; + + igbvf_rx_checksum_adv(adapter, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + + igbvf_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + + netdev->last_rx = jiffies; + +next_desc: + rx_desc->wb.upper.status_error = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + + rx_ring->next_to_clean = i; + cleaned_count = igbvf_desc_unused(rx_ring); + + if (cleaned_count) + igbvf_alloc_rx_buffers(rx_ring, cleaned_count); + + adapter->total_rx_packets += total_packets; + adapter->total_rx_bytes += total_bytes; + adapter->net_stats.rx_bytes += total_bytes; + adapter->net_stats.rx_packets += total_packets; + return cleaned; +} + +static void igbvf_put_txbuf(struct igbvf_adapter *adapter, + struct igbvf_buffer *buffer_info) +{ + buffer_info->dma = 0; + if (buffer_info->skb) { + skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, + DMA_TO_DEVICE); + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +static void igbvf_print_tx_hang(struct igbvf_adapter *adapter) +{ + struct igbvf_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + union e1000_adv_tx_desc *eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + + /* detected Tx unit hang */ + dev_err(&adapter->pdev->dev, + "Detected Tx Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + readl(adapter->hw.hw_addr + tx_ring->head), + readl(adapter->hw.hw_addr + tx_ring->tail), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->wb.status); +} + +/** + * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + tx_ring->buffer_info = vmalloc(size); + if (!tx_ring->buffer_info) + goto err; + memset(tx_ring->buffer_info, 0, size); + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, + &tx_ring->dma); + + if (!tx_ring->desc) + goto err; + + tx_ring->adapter = adapter; + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the transmit descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * + * Returns 0 on success, negative on failure + **/ +int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, + struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + rx_ring->buffer_info = vmalloc(size); + if (!rx_ring->buffer_info) + goto err; + memset(rx_ring->buffer_info, 0, size); + + desc_len = sizeof(union e1000_adv_rx_desc); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, + &rx_ring->dma); + + if (!rx_ring->desc) + goto err; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + rx_ring->adapter = adapter; + + return 0; + +err: + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + dev_err(&adapter->pdev->dev, + "Unable to allocate memory for the receive descriptor ring\n"); + return -ENOMEM; +} + +/** + * igbvf_clean_tx_ring - Free Tx Buffers + * @tx_ring: ring to be cleaned + **/ +static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct igbvf_buffer *buffer_info; + unsigned long size; + unsigned int i; + + if (!tx_ring->buffer_info) + return; + + /* Free all the Tx ring sk_buffs */ + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + igbvf_put_txbuf(adapter, buffer_info); + } + + size = sizeof(struct igbvf_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, adapter->hw.hw_addr + tx_ring->head); + writel(0, adapter->hw.hw_addr + tx_ring->tail); +} + +/** + * igbvf_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: ring to free resources from + * + * Free all transmit software resources + **/ +void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) +{ + struct pci_dev *pdev = tx_ring->adapter->pdev; + + igbvf_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * igbvf_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + **/ +static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) +{ + struct igbvf_adapter *adapter = rx_ring->adapter; + struct igbvf_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + if (!rx_ring->buffer_info) + return; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->rx_ps_hdr_size){ + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_ps_hdr_size, + PCI_DMA_FROMDEVICE); + } else { + pci_unmap_single(pdev, buffer_info->dma, + adapter->rx_buffer_len, + PCI_DMA_FROMDEVICE); + } + buffer_info->dma = 0; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + if (buffer_info->page) { + if (buffer_info->page_dma) + pci_unmap_page(pdev, buffer_info->page_dma, + PAGE_SIZE / 2, + PCI_DMA_FROMDEVICE); + put_page(buffer_info->page); + buffer_info->page = NULL; + buffer_info->page_dma = 0; + buffer_info->page_offset = 0; + } + } + + size = sizeof(struct igbvf_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, adapter->hw.hw_addr + rx_ring->head); + writel(0, adapter->hw.hw_addr + rx_ring->tail); +} + +/** + * igbvf_free_rx_resources - Free Rx Resources + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ + +void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) +{ + struct pci_dev *pdev = rx_ring->adapter->pdev; + + igbvf_clean_rx_ring(rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * igbvf_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int igbvf_update_itr(struct igbvf_adapter *adapter, + u16 itr_setting, int packets, + int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void igbvf_set_itr(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + adapter->tx_itr = igbvf_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = igbvf_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + + if (new_itr != adapter->itr) { + /* + * this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = 1952; + + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + ew32(ITR, 1952); + } +} + +/** + * igbvf_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + * returns true if ring is completely cleaned + **/ +static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct igbvf_buffer *buffer_info; + struct sk_buff *skb; + union e1000_adv_tx_desc *tx_desc, *eop_desc; + unsigned int total_bytes = 0, total_packets = 0; + unsigned int i, eop, count = 0; + bool cleaned = false; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + + while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + for (cleaned = false; !cleaned; count++) { + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + skb = buffer_info->skb; + + if (skb) { + unsigned int segs, bytecount; + + /* gso_segs is currently only valid for tcp */ + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + + skb->len; + total_packets += segs; + total_bytes += bytecount; + } + + igbvf_put_txbuf(adapter, buffer_info); + tx_desc->wb.status = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + if (unlikely(count && + netif_carrier_ok(netdev) && + igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + if (netif_queue_stopped(netdev) && + !(test_bit(__IGBVF_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) + && !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + /* detected Tx unit hang */ + igbvf_print_tx_hang(adapter); + + netif_stop_queue(netdev); + } + } + adapter->net_stats.tx_bytes += total_bytes; + adapter->net_stats.tx_packets += total_packets; + return (count < tx_ring->count); +} + +static irqreturn_t igbvf_msix_other(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->int_counter1++; + + netif_carrier_off(netdev); + hw->mac.get_link_status = 1; + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + ew32(EIMS, adapter->eims_other); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + /* auto mask will automatically reenable the interrupt when we write + * EICS */ + if (!igbvf_clean_tx_irq(tx_ring)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(EICS, tx_ring->eims_value); + else + ew32(EIMS, tx_ring->eims_value); + + return IRQ_HANDLED; +} + +static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) +{ + struct net_device *netdev = data; + struct igbvf_adapter *adapter = netdev_priv(netdev); + + adapter->int_counter0++; + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (adapter->rx_ring->set_itr) { + writel(adapter->rx_ring->itr_val, + adapter->hw.hw_addr + adapter->rx_ring->itr_register); + adapter->rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->rx_ring->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->rx_ring->napi); + } + + return IRQ_HANDLED; +} + +#define IGBVF_NO_QUEUE -1 + +static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, + int tx_queue, int msix_vector) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ivar, index; + + /* 82576 uses a table-based method for assigning vectors. + Each queue has a single entry in the table to which we write + a vector number along with a "valid" bit. Sadly, the layout + of the table is somewhat counterintuitive. */ + if (rx_queue > IGBVF_NO_QUEUE) { + index = (rx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (rx_queue & 0x1) { + /* vector goes into third byte of register */ + ivar = ivar & 0xFF00FFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 16; + } else { + /* vector goes into low byte of register */ + ivar = ivar & 0xFFFFFF00; + ivar |= msix_vector | E1000_IVAR_VALID; + } + adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } + if (tx_queue > IGBVF_NO_QUEUE) { + index = (tx_queue >> 1); + ivar = array_er32(IVAR0, index); + if (tx_queue & 0x1) { + /* vector goes into high byte of register */ + ivar = ivar & 0x00FFFFFF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 24; + } else { + /* vector goes into second byte of register */ + ivar = ivar & 0xFFFF00FF; + ivar |= (msix_vector | E1000_IVAR_VALID) << 8; + } + adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; + array_ew32(IVAR0, index, ivar); + } +} + +/** + * igbvf_configure_msix - Configure MSI-X hardware + * + * igbvf_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void igbvf_configure_msix(struct igbvf_adapter *adapter) +{ + u32 tmp; + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct igbvf_ring *rx_ring = adapter->rx_ring; + int vector = 0; + + adapter->eims_enable_mask = 0; + + igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); + adapter->eims_enable_mask |= tx_ring->eims_value; + if (tx_ring->itr_val) + writel(tx_ring->itr_val, + hw->hw_addr + tx_ring->itr_register); + else + writel(1952, hw->hw_addr + tx_ring->itr_register); + + igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); + adapter->eims_enable_mask |= rx_ring->eims_value; + if (rx_ring->itr_val) + writel(rx_ring->itr_val, + hw->hw_addr + rx_ring->itr_register); + else + writel(1952, hw->hw_addr + rx_ring->itr_register); + + /* set vector for other causes, i.e. link changes */ + + tmp = (vector++ | E1000_IVAR_VALID); + + ew32(IVAR_MISC, tmp); + + adapter->eims_enable_mask = (1 << (vector)) - 1; + adapter->eims_other = 1 << (vector - 1); + e1e_flush(); +} + +static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } +} + +/** + * igbvf_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) +{ + int err = -ENOMEM; + int i; + + /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ + adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < 3; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, 3); + } + + if (err) { + /* MSI-X failed */ + dev_err(&adapter->pdev->dev, + "Failed to initialize MSI-X interrupts.\n"); + igbvf_reset_interrupt_capability(adapter); + } +} + +/** + * igbvf_request_msix - Initialize MSI-X interrupts + * + * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int igbvf_request_msix(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) { + sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); + sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); + } else { + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + } + + err = request_irq(adapter->msix_entries[vector].vector, + &igbvf_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + goto out; + + adapter->tx_ring->itr_register = E1000_EITR(vector); + adapter->tx_ring->itr_val = 1952; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + &igbvf_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + goto out; + + adapter->rx_ring->itr_register = E1000_EITR(vector); + adapter->rx_ring->itr_val = 1952; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + &igbvf_msix_other, 0, netdev->name, netdev); + if (err) + goto out; + + igbvf_configure_msix(adapter); + return 0; +out: + return err; +} + +/** + * igbvf_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); + + return 0; +} + +/** + * igbvf_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int igbvf_request_irq(struct igbvf_adapter *adapter) +{ + int err = -1; + + /* igbvf supports msi-x only */ + if (adapter->msix_entries) + err = igbvf_request_msix(adapter); + + if (!err) + return err; + + dev_err(&adapter->pdev->dev, + "Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void igbvf_free_irq(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int vector; + + if (adapter->msix_entries) { + for (vector = 0; vector < 3; vector++) + free_irq(adapter->msix_entries[vector].vector, netdev); + } +} + +/** + * igbvf_irq_disable - Mask off interrupt generation on the NIC + **/ +static void igbvf_irq_disable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIMC, ~0); + + if (adapter->msix_entries) + ew32(EIAC, 0); +} + +/** + * igbvf_irq_enable - Enable default interrupt generation settings + **/ +static void igbvf_irq_enable(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(EIAC, adapter->eims_enable_mask); + ew32(EIAM, adapter->eims_enable_mask); + ew32(EIMS, adapter->eims_enable_mask); +} + +/** + * igbvf_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @budget: amount of packets driver is allowed to process this poll + **/ +static int igbvf_poll(struct napi_struct *napi, int budget) +{ + struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); + struct igbvf_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + int work_done = 0; + + igbvf_clean_rx_irq(adapter, &work_done, budget); + + /* If not enough Rx work done, exit the polling mode */ + if (work_done < budget) { + napi_complete(napi); + + if (adapter->itr_setting & 3) + igbvf_set_itr(adapter); + + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + ew32(EIMS, adapter->rx_ring->eims_value); + } + + return work_done; +} + +/** + * igbvf_set_rlpml - set receive large packet maximum length + * @adapter: board private structure + * + * Configure the maximum size of packets that will be received + */ +static void igbvf_set_rlpml(struct igbvf_adapter *adapter) +{ + int max_frame_size = adapter->max_frame_size; + struct e1000_hw *hw = &adapter->hw; + + if (adapter->vlgrp) + max_frame_size += VLAN_TAG_SIZE; + + e1000_rlpml_set_vf(hw, max_frame_size); +} + +static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac.ops.set_vfta(hw, vid, true)) + dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); +} + +static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + igbvf_irq_disable(adapter); + vlan_group_set_device(adapter->vlgrp, vid, NULL); + + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + igbvf_irq_enable(adapter); + + if (hw->mac.ops.set_vfta(hw, vid, false)) + dev_err(&adapter->pdev->dev, + "Failed to remove vlan id %d\n", vid); +} + +static void igbvf_vlan_rx_register(struct net_device *netdev, + struct vlan_group *grp) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + adapter->vlgrp = grp; +} + +static void igbvf_restore_vlan(struct igbvf_adapter *adapter) +{ + u16 vid; + + if (!adapter->vlgrp) + return; + + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (!vlan_group_get_device(adapter->vlgrp, vid)) + continue; + igbvf_vlan_rx_add_vid(adapter->netdev, vid); + } + + igbvf_set_rlpml(adapter); +} + +/** + * igbvf_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void igbvf_configure_tx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 txdctl, dca_txctrl; + + /* disable transmits */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + msleep(10); + + /* Setup the HW Tx Head and Tail descriptor pointers */ + ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); + tdba = tx_ring->dma; + ew32(TDBAL(0), (tdba & DMA_32BIT_MASK)); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = E1000_TDH(0); + tx_ring->tail = E1000_TDT(0); + + /* Turn off Relaxed Ordering on head write-backs. The writebacks + * MUST be delivered in order or it will completely screw up + * our bookeeping. + */ + dca_txctrl = er32(DCA_TXCTRL(0)); + dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; + ew32(DCA_TXCTRL(0), dca_txctrl); + + /* enable transmits */ + txdctl |= E1000_TXDCTL_QUEUE_ENABLE; + ew32(TXDCTL(0), txdctl); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; + + adapter->tx_queue_len = adapter->netdev->tx_queue_len; +} + +/** + * igbvf_setup_srrctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 srrctl = 0; + + srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | + E1000_SRRCTL_BSIZEHDR_MASK | + E1000_SRRCTL_BSIZEPKT_MASK); + + /* Enable queue drop to avoid head of line blocking */ + srrctl |= E1000_SRRCTL_DROP_EN; + + /* Setup buffer sizes */ + srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> + E1000_SRRCTL_BSIZEPKT_SHIFT; + + if (adapter->rx_buffer_len < 2048) { + adapter->rx_ps_hdr_size = 0; + srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; + } else { + adapter->rx_ps_hdr_size = 128; + srrctl |= adapter->rx_ps_hdr_size << + E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; + srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; + } + + ew32(SRRCTL(0), srrctl); +} + +/** + * igbvf_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void igbvf_configure_rx(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct igbvf_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rxdctl; + + /* disable receives */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + msleep(10); + + rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); + + /* + * Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL(0), (rdba & DMA_32BIT_MASK)); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); + rx_ring->head = E1000_RDH(0); + rx_ring->tail = E1000_RDT(0); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + + rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; + rxdctl &= 0xFFF00000; + rxdctl |= IGBVF_RX_PTHRESH; + rxdctl |= IGBVF_RX_HTHRESH << 8; + rxdctl |= IGBVF_RX_WTHRESH << 16; + + igbvf_set_rlpml(adapter); + + /* enable receives */ + ew32(RXDCTL(0), rxdctl); +} + +/** + * igbvf_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void igbvf_set_multi(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct dev_mc_list *mc_ptr; + u8 *mta_list = NULL; + int i; + + if (netdev->mc_count) { + mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC); + if (!mta_list) { + dev_err(&adapter->pdev->dev, + "failed to allocate multicast filter list\n"); + return; + } + } + + /* prepare a packed array of only addresses. */ + mc_ptr = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + if (!mc_ptr) + break; + memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, + ETH_ALEN); + mc_ptr = mc_ptr->next; + } + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); + kfree(mta_list); +} + +/** + * igbvf_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void igbvf_configure(struct igbvf_adapter *adapter) +{ + igbvf_set_multi(adapter->netdev); + + igbvf_restore_vlan(adapter); + + igbvf_configure_tx(adapter); + igbvf_setup_srrctl(adapter); + igbvf_configure_rx(adapter); + igbvf_alloc_rx_buffers(adapter->rx_ring, + igbvf_desc_unused(adapter->rx_ring)); +} + +/* igbvf_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +static void igbvf_reset(struct igbvf_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + + /* Allow time for pending master requests to run */ + if (mac->ops.reset_hw(hw)) + dev_err(&adapter->pdev->dev, "PF still resetting\n"); + + mac->ops.init_hw(hw); + + if (is_valid_ether_addr(adapter->hw.mac.addr)) { + memcpy(netdev->dev_addr, adapter->hw.mac.addr, + netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, + netdev->addr_len); + } +} + +int igbvf_up(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + igbvf_configure(adapter); + + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + if (adapter->msix_entries) + igbvf_configure_msix(adapter); + + /* Clear any pending interrupts. */ + er32(EICR); + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + + return 0; +} + +void igbvf_down(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rxdctl, txdctl; + + /* + * signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__IGBVF_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + txdctl = er32(TXDCTL(0)); + ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + msleep(10); + + napi_disable(&adapter->rx_ring->napi); + + igbvf_irq_disable(adapter); + + del_timer_sync(&adapter->watchdog_timer); + + netdev->tx_queue_len = adapter->tx_queue_len; + netif_carrier_off(netdev); + + /* record the stats before reset*/ + igbvf_update_stats(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + igbvf_reset(adapter); + igbvf_clean_tx_ring(adapter->tx_ring); + igbvf_clean_rx_ring(adapter->rx_ring); +} + +void igbvf_reinit_locked(struct igbvf_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + igbvf_down(adapter); + igbvf_up(adapter); + clear_bit(__IGBVF_RESETTING, &adapter->state); +} + +/** + * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) + * @adapter: board private structure to initialize + * + * igbvf_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + s32 rc; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_hdr_size = 0; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + + adapter->tx_int_delay = 8; + adapter->tx_abs_int_delay = 32; + adapter->rx_int_delay = 0; + adapter->rx_abs_int_delay = 8; + adapter->itr_setting = 3; + adapter->itr = 20000; + + /* Set various function pointers */ + adapter->ei->init_ops(&adapter->hw); + + rc = adapter->hw.mac.ops.init_params(&adapter->hw); + if (rc) + return rc; + + rc = adapter->hw.mbx.ops.init_params(&adapter->hw); + if (rc) + return rc; + + igbvf_set_interrupt_capability(adapter); + + if (igbvf_alloc_queues(adapter)) + return -ENOMEM; + + spin_lock_init(&adapter->tx_queue_lock); + + /* Explicitly disable IRQ since the NIC can be in any state. */ + igbvf_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__IGBVF_DOWN, &adapter->state); + return 0; +} + +static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->stats.last_gprc = er32(VFGPRC); + adapter->stats.last_gorc = er32(VFGORC); + adapter->stats.last_gptc = er32(VFGPTC); + adapter->stats.last_gotc = er32(VFGOTC); + adapter->stats.last_mprc = er32(VFMPRC); + adapter->stats.last_gotlbc = er32(VFGOTLBC); + adapter->stats.last_gptlbc = er32(VFGPTLBC); + adapter->stats.last_gorlbc = er32(VFGORLBC); + adapter->stats.last_gprlbc = er32(VFGPRLBC); + + adapter->stats.base_gprc = er32(VFGPRC); + adapter->stats.base_gorc = er32(VFGORC); + adapter->stats.base_gptc = er32(VFGPTC); + adapter->stats.base_gotc = er32(VFGOTC); + adapter->stats.base_mprc = er32(VFMPRC); + adapter->stats.base_gotlbc = er32(VFGOTLBC); + adapter->stats.base_gptlbc = er32(VFGPTLBC); + adapter->stats.base_gorlbc = er32(VFGORLBC); + adapter->stats.base_gprlbc = er32(VFGPRLBC); +} + +/** + * igbvf_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int igbvf_open(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__IGBVF_TESTING, &adapter->state)) + return -EBUSY; + + /* allocate transmit descriptors */ + err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); + if (err) + goto err_setup_rx; + + /* + * before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + igbvf_configure(adapter); + + err = igbvf_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as igbvf_up() */ + clear_bit(__IGBVF_DOWN, &adapter->state); + + napi_enable(&adapter->rx_ring->napi); + + /* clear any pending interrupts */ + er32(EICR); + + igbvf_irq_enable(adapter); + + /* start the watchdog */ + hw->mac.get_link_status = 1; + mod_timer(&adapter->watchdog_timer, jiffies + 1); + + return 0; + +err_req_irq: + igbvf_free_rx_resources(adapter->rx_ring); +err_setup_rx: + igbvf_free_tx_resources(adapter->tx_ring); +err_setup_tx: + igbvf_reset(adapter); + + return err; +} + +/** + * igbvf_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int igbvf_close(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + + igbvf_free_irq(adapter); + + igbvf_free_tx_resources(adapter->tx_ring); + igbvf_free_rx_resources(adapter->rx_ring); + + return 0; +} +/** + * igbvf_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_set_mac(struct net_device *netdev, void *p) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + if (memcmp(addr->sa_data, hw->mac.addr, 6)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + + return 0; +} + +#define UPDATE_VF_COUNTER(reg, name) \ + { \ + u32 current_counter = er32(reg); \ + if (current_counter < adapter->stats.last_##name) \ + adapter->stats.name += 0x100000000LL; \ + adapter->stats.last_##name = current_counter; \ + adapter->stats.name &= 0xFFFFFFFF00000000LL; \ + adapter->stats.name |= current_counter; \ + } + +/** + * igbvf_update_stats - Update the board statistics counters + * @adapter: board private structure +**/ +void igbvf_update_stats(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* + * Prevent stats update while adapter is being reset, link is down + * or if the pci connection is down. + */ + if (adapter->link_speed == 0) + return; + + if (test_bit(__IGBVF_RESETTING, &adapter->state)) + return; + + if (pci_channel_offline(pdev)) + return; + + UPDATE_VF_COUNTER(VFGPRC, gprc); + UPDATE_VF_COUNTER(VFGORC, gorc); + UPDATE_VF_COUNTER(VFGPTC, gptc); + UPDATE_VF_COUNTER(VFGOTC, gotc); + UPDATE_VF_COUNTER(VFMPRC, mprc); + UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); + UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); + UPDATE_VF_COUNTER(VFGORLBC, gorlbc); + UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); + + /* Fill out the OS statistics structure */ + adapter->net_stats.multicast = adapter->stats.mprc; +} + +static void igbvf_print_link_info(struct igbvf_adapter *adapter) +{ + dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n", + adapter->link_speed, + ((adapter->link_duplex == FULL_DUPLEX) ? + "Full Duplex" : "Half Duplex")); +} + +static bool igbvf_has_link(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = E1000_SUCCESS; + bool link_active; + + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + + /* if check for link returns error we will need to reset */ + if (ret_val) + schedule_work(&adapter->reset_task); + + return link_active; +} + +/** + * igbvf_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void igbvf_watchdog(unsigned long data) +{ + struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); +} + +static void igbvf_watchdog_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter = container_of(work, + struct igbvf_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct igbvf_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link; + int tx_pending = 0; + + link = igbvf_has_link(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = 1; + + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + igbvf_print_link_info(adapter); + + /* + * tweak tx_queue_len according to speed/duplex + * and adjust the timeout factor + */ + netdev->tx_queue_len = adapter->tx_queue_len; + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = 0; + netdev->tx_queue_len = 10; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = 0; + netdev->tx_queue_len = 100; + /* maybe add some timeout factor ? */ + break; + } + + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + dev_info(&adapter->pdev->dev, "Link is Down\n"); + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } + + if (netif_carrier_ok(netdev)) { + igbvf_update_stats(adapter); + } else { + tx_pending = (igbvf_desc_unused(tx_ring) + 1 < + tx_ring->count); + if (tx_pending) { + /* + * We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + } + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + ew32(EICS, adapter->rx_ring->eims_value); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = 1; + + /* Reset the timer */ + if (!test_bit(__IGBVF_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + (2 * HZ))); +} + +#define IGBVF_TX_FLAGS_CSUM 0x00000001 +#define IGBVF_TX_FLAGS_VLAN 0x00000002 +#define IGBVF_TX_FLAGS_TSO 0x00000004 +#define IGBVF_TX_FLAGS_IPV4 0x00000008 +#define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGBVF_TX_FLAGS_VLAN_SHIFT 16 + +static int igbvf_tso(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + int err; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + u32 mss_l4len_idx, l4len; + *hdr_len = 0; + + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) { + dev_err(&adapter->pdev->dev, + "igbvf_tso returning an error\n"); + return err; + } + } + + l4len = tcp_hdrlen(skb); + *hdr_len += l4len; + + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + } + + i = tx_ring->next_to_use; + + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + /* VLAN MACLEN IPLEN */ + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + *hdr_len += skb_network_offset(skb); + info |= (skb_transport_header(skb) - skb_network_header(skb)); + *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); + context_desc->vlan_macip_lens = cpu_to_le32(info); + + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->protocol == htons(ETH_P_IP)) + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + + /* MSS L4LEN IDX */ + mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); + mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); + + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + context_desc->seqnum_seed = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + + tx_ring->next_to_use = i; + + return true; +} + +static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, u32 tx_flags) +{ + struct e1000_adv_tx_context_desc *context_desc; + unsigned int i; + struct igbvf_buffer *buffer_info; + u32 info = 0, tu_cmd = 0; + + if ((skb->ip_summed == CHECKSUM_PARTIAL) || + (tx_flags & IGBVF_TX_FLAGS_VLAN)) { + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + + info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); + if (skb->ip_summed == CHECKSUM_PARTIAL) + info |= (skb_transport_header(skb) - + skb_network_header(skb)); + + + context_desc->vlan_macip_lens = cpu_to_le32(info); + + tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + switch (skb->protocol) { + case __constant_htons(ETH_P_IP): + tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + case __constant_htons(ETH_P_IPV6): + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; + break; + default: + break; + } + } + + context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); + context_desc->seqnum_seed = 0; + context_desc->mss_l4len_idx = 0; + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = 0; + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; + } + + return false; +} + +static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* there is enough descriptors then we don't need to worry */ + if (igbvf_desc_unused(adapter->tx_ring) >= size) + return 0; + + netif_stop_queue(netdev); + + smp_mb(); + + /* We need to check again just in case room has been made available */ + if (igbvf_desc_unused(adapter->tx_ring) < size) + return -EBUSY; + + netif_wake_queue(netdev); + + ++adapter->restart_queue; + return 0; +} + +#define IGBVF_MAX_TXD_PWR 16 +#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) + +static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + struct sk_buff *skb, + unsigned int first) +{ + struct igbvf_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int count = 0, i; + unsigned int f; + dma_addr_t *map; + + i = tx_ring->next_to_use; + + if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { + dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); + return 0; + } + + map = skb_shinfo(skb)->dma_maps; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = map[count]; + count++; + + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { + struct skb_frag_struct *frag; + + i++; + if (i == tx_ring->count) + i = 0; + + frag = &skb_shinfo(skb)->frags[f]; + len = frag->size; + + buffer_info = &tx_ring->buffer_info[i]; + BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); + buffer_info->length = len; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = map[count]; + count++; + } + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; +} + +static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, + struct igbvf_ring *tx_ring, + int tx_flags, int count, u32 paylen, + u8 hdr_len) +{ + union e1000_adv_tx_desc *tx_desc = NULL; + struct igbvf_buffer *buffer_info; + u32 olinfo_status = 0, cmd_type_len; + unsigned int i; + + cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | + E1000_ADVTXD_DCMD_DEXT); + + if (tx_flags & IGBVF_TX_FLAGS_VLAN) + cmd_type_len |= E1000_ADVTXD_DCMD_VLE; + + if (tx_flags & IGBVF_TX_FLAGS_TSO) { + cmd_type_len |= E1000_ADVTXD_DCMD_TSE; + + /* insert tcp checksum */ + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + + /* insert ip checksum */ + if (tx_flags & IGBVF_TX_FLAGS_IPV4) + olinfo_status |= E1000_TXD_POPTS_IXSM << 8; + + } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { + olinfo_status |= E1000_TXD_POPTS_TXSM << 8; + } + + olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); + + i = tx_ring->next_to_use; + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); + tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->read.cmd_type_len = + cpu_to_le32(cmd_type_len | buffer_info->length); + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + i++; + if (i == tx_ring->count) + i = 0; + } + + tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, adapter->hw.hw_addr + tx_ring->tail); + /* we need this if more than one processor can write to our tail + * at a time, it syncronizes IO on IA64/Altix systems */ + mmiowb(); +} + +static int igbvf_xmit_frame_ring_adv(struct sk_buff *skb, + struct net_device *netdev, + struct igbvf_ring *tx_ring) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + unsigned int first, tx_flags = 0; + u8 hdr_len = 0; + int count = 0; + int tso = 0; + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* + * need: count + 4 desc gap to keep tail from touching + * + 2 desc gap to keep tail from touching head, + * + 1 desc for skb->data, + * + 1 desc for context descriptor, + * head, otherwise try next time + */ + if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { + /* this is a hard error */ + return NETDEV_TX_BUSY; + } + + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { + tx_flags |= IGBVF_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); + } + + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= IGBVF_TX_FLAGS_IPV4; + + first = tx_ring->next_to_use; + + tso = skb_is_gso(skb) ? + igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; + if (unlikely(tso < 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= IGBVF_TX_FLAGS_TSO; + else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && + (skb->ip_summed == CHECKSUM_PARTIAL)) + tx_flags |= IGBVF_TX_FLAGS_CSUM; + + /* + * count reflects descriptors mapped, if 0 then mapping error + * has occured and we need to rewind the descriptor queue + */ + count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); + + if (count) { + igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, + skb->len, hdr_len); + netdev->trans_start = jiffies; + /* Make sure there is space in the ring for the next send. */ + igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +static int igbvf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct igbvf_ring *tx_ring; + int retval; + + if (test_bit(__IGBVF_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tx_ring = &adapter->tx_ring[0]; + + retval = igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); + + return retval; +} + +/** + * igbvf_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void igbvf_tx_timeout(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void igbvf_reset_task(struct work_struct *work) +{ + struct igbvf_adapter *adapter; + adapter = container_of(work, struct igbvf_adapter, reset_task); + + igbvf_reinit_locked(adapter); +} + +/** + * igbvf_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + **/ +static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + /* only return the current stats */ + return &adapter->net_stats; +} + +/** + * igbvf_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { + dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame size limits */ + if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { + if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + dev_err(&adapter->pdev->dev, + "Jumbo Frames not supported.\n"); + return -EINVAL; + } + } + +#define MAX_STD_JUMBO_FRAME_SIZE 9234 + if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { + dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) + msleep(1); + /* igbvf_down has a dependency on max_frame_size */ + adapter->max_frame_size = max_frame; + if (netif_running(netdev)) + igbvf_down(adapter); + + /* + * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 1024) + adapter->rx_buffer_len = 1024; + else if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else +#if (PAGE_SIZE / 2) > 16384 + adapter->rx_buffer_len = 16384; +#else + adapter->rx_buffer_len = PAGE_SIZE / 2; +#endif + + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN; + + dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", + netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + igbvf_up(adapter); + else + igbvf_reset(adapter); + + clear_bit(__IGBVF_RESETTING, &adapter->state); + + return 0; +} + +static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + default: + return -EOPNOTSUPP; + } +} + +static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); + igbvf_down(adapter); + igbvf_free_irq(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int igbvf_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_restore_state(pdev); + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + if (netif_running(netdev)) { + err = igbvf_request_irq(adapter); + if (err) + return err; + } + + igbvf_reset(adapter); + + if (netif_running(netdev)) + igbvf_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void igbvf_shutdown(struct pci_dev *pdev) +{ + igbvf_suspend(pdev, PMSG_SUSPEND); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void igbvf_netpoll(struct net_device *netdev) +{ + struct igbvf_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + + igbvf_clean_tx_irq(adapter->tx_ring); + + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * igbvf_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + igbvf_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * igbvf_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the igbvf_resume routine. + */ +static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + igbvf_reset(adapter); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * igbvf_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the igbvf_resume routine. + */ +static void igbvf_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (igbvf_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static void igbvf_print_device_info(struct igbvf_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + + dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); + dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n", + /* MAC address */ + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); +} + +static const struct net_device_ops igbvf_netdev_ops = { + .ndo_open = igbvf_open, + .ndo_stop = igbvf_close, + .ndo_start_xmit = igbvf_xmit_frame, + .ndo_get_stats = igbvf_get_stats, + .ndo_set_multicast_list = igbvf_set_multi, + .ndo_set_mac_address = igbvf_set_mac, + .ndo_change_mtu = igbvf_change_mtu, + .ndo_do_ioctl = igbvf_ioctl, + .ndo_tx_timeout = igbvf_tx_timeout, + .ndo_vlan_rx_register = igbvf_vlan_rx_register, + .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = igbvf_netpoll, +#endif +}; + +/** + * igbvf_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in igbvf_pci_tbl + * + * Returns 0 on success, negative on failure + * + * igbvf_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int __devinit igbvf_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct igbvf_adapter *adapter; + struct e1000_hw *hw; + const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; + + static int cards_found; + int err, pci_using_dac; + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); + if (!err) + pci_using_dac = 1; + } else { + err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + if (err) { + dev_err(&pdev->dev, "No usable DMA " + "configuration, aborting\n"); + goto err_dma; + } + } + } + + err = pci_request_regions(pdev, igbvf_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->hw.back = adapter; + adapter->hw.mac.type = ei->mac; + adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + + pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); + + err = -EIO; + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if (ei->get_variants) { + err = ei->get_variants(adapter); + if (err) + goto err_ioremap; + } + + /* setup adapter struct */ + err = igbvf_sw_init(adapter); + if (err) + goto err_sw_init; + + /* construct the net_device struct */ + netdev->netdev_ops = &igbvf_netdev_ops; + + igbvf_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found++; + + netdev->features = NETIF_F_SG | + NETIF_F_IP_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER; + + netdev->features |= NETIF_F_IPV6_CSUM; + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + + if (pci_using_dac) + netdev->features |= NETIF_F_HIGHDMA; + + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_TSO6; + netdev->vlan_features |= NETIF_F_IP_CSUM; + netdev->vlan_features |= NETIF_F_IPV6_CSUM; + netdev->vlan_features |= NETIF_F_SG; + + /*reset the controller to put the device in a known good state */ + err = hw->mac.ops.reset_hw(hw); + if (err) { + dev_info(&pdev->dev, + "PF still in reset state, assigning new address\n"); + random_ether_addr(hw->mac.addr); + } else { + err = hw->mac.ops.read_mac_addr(hw); + if (err) { + dev_err(&pdev->dev, "Error reading MAC address\n"); + goto err_hw_init; + } + } + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->perm_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address: " + "%02x:%02x:%02x:%02x:%02x:%02x\n", + netdev->dev_addr[0], netdev->dev_addr[1], + netdev->dev_addr[2], netdev->dev_addr[3], + netdev->dev_addr[4], netdev->dev_addr[5]); + err = -EIO; + goto err_hw_init; + } + + setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, + (unsigned long) adapter); + + INIT_WORK(&adapter->reset_task, igbvf_reset_task); + INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); + + /* ring size defaults */ + adapter->rx_ring->count = 1024; + adapter->tx_ring->count = 1024; + + /* reset the hardware with the new settings */ + igbvf_reset(adapter); + + /* tell the stack to leave us alone until igbvf_open() is called */ + netif_carrier_off(netdev); + netif_stop_queue(netdev); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_hw_init; + + igbvf_print_device_info(adapter); + + igbvf_initialize_last_counter_stats(adapter); + + return 0; + +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + igbvf_reset_interrupt_capability(adapter); + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * igbvf_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * igbvf_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void __devexit igbvf_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct igbvf_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* + * flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__IGBVF_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + + flush_scheduled_work(); + + unregister_netdev(netdev); + + igbvf_reset_interrupt_capability(adapter); + + /* + * it is important to delete the napi struct prior to freeing the + * rx ring so that you do not end up with null pointer refs + */ + netif_napi_del(&adapter->rx_ring->napi); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_regions(pdev); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static struct pci_error_handlers igbvf_err_handler = { + .error_detected = igbvf_io_error_detected, + .slot_reset = igbvf_io_slot_reset, + .resume = igbvf_io_resume, +}; + +static struct pci_device_id igbvf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); + +/* PCI Device API Driver */ +static struct pci_driver igbvf_driver = { + .name = igbvf_driver_name, + .id_table = igbvf_pci_tbl, + .probe = igbvf_probe, + .remove = __devexit_p(igbvf_remove), +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = igbvf_suspend, + .resume = igbvf_resume, +#endif + .shutdown = igbvf_shutdown, + .err_handler = &igbvf_err_handler +}; + +/** + * igbvf_init_module - Driver Registration Routine + * + * igbvf_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init igbvf_init_module(void) +{ + int ret; + printk(KERN_INFO "%s - version %s\n", + igbvf_driver_string, igbvf_driver_version); + printk(KERN_INFO "%s\n", igbvf_copyright); + + ret = pci_register_driver(&igbvf_driver); + pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name, + PM_QOS_DEFAULT_VALUE); + + return ret; +} +module_init(igbvf_init_module); + +/** + * igbvf_exit_module - Driver Exit Cleanup Routine + * + * igbvf_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit igbvf_exit_module(void) +{ + pci_unregister_driver(&igbvf_driver); + pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name); +} +module_exit(igbvf_exit_module); + + +MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); +MODULE_DESCRIPTION("Intel(R) 82576 Virtual Function Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* netdev.c */ diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h new file mode 100644 index 00000000000..b9e24ed70d0 --- /dev/null +++ b/drivers/net/igbvf/regs.h @@ -0,0 +1,108 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +/* + * Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) +#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) + +/* Statistics registers */ +#define E1000_VFGPRC 0x00F10 +#define E1000_VFGORC 0x00F18 +#define E1000_VFMPRC 0x00F3C +#define E1000_VFGPTC 0x00F14 +#define E1000_VFGOTC 0x00F34 +#define E1000_VFGOTLBC 0x00F50 +#define E1000_VFGPTLBC 0x00F44 +#define E1000_VFGORLBC 0x00F48 +#define E1000_VFGPRLBC 0x00F40 + +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) + +/* Define macros for handling registers */ +#define er32(reg) readl(hw->hw_addr + E1000_##reg) +#define ew32(reg, val) writel((val), hw->hw_addr + E1000_##reg) +#define array_er32(reg, offset) \ + readl(hw->hw_addr + E1000_##reg + (offset << 2)) +#define array_ew32(reg, offset, val) \ + writel((val), hw->hw_addr + E1000_##reg + (offset << 2)) +#define e1e_flush() er32(STATUS) + +#endif diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c new file mode 100644 index 00000000000..2a4faf9ade6 --- /dev/null +++ b/drivers/net/igbvf/vf.c @@ -0,0 +1,398 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +#include "vf.h" + +static s32 e1000_check_for_link_vf(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_init_hw_vf(struct e1000_hw *hw); +static s32 e1000_reset_hw_vf(struct e1000_hw *hw); + +static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, + u32, u32, u32); +static void e1000_rar_set_vf(struct e1000_hw *, u8 *, u32); +static s32 e1000_read_mac_addr_vf(struct e1000_hw *); +static s32 e1000_set_vfta_vf(struct e1000_hw *, u16, bool); + +/** + * e1000_init_mac_params_vf - Inits MAC params + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_vf(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* VF's have no MTA Registers - PF feature only */ + mac->mta_reg_count = 128; + /* VF's have no access to RAR entries */ + mac->rar_entry_count = 1; + + /* Function pointers */ + /* reset */ + mac->ops.reset_hw = e1000_reset_hw_vf; + /* hw initialization */ + mac->ops.init_hw = e1000_init_hw_vf; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_vf; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_vf; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf; + /* set mac address */ + mac->ops.rar_set = e1000_rar_set_vf; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_vf; + /* set vlan filter table array */ + mac->ops.set_vfta = e1000_set_vfta_vf; + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_vf - Inits function pointers + * @hw: pointer to the HW structure + **/ +void e1000_init_function_pointers_vf(struct e1000_hw *hw) +{ + hw->mac.ops.init_params = e1000_init_mac_params_vf; + hw->mbx.ops.init_params = e1000_init_mbx_params_vf; +} + +/** + * e1000_get_link_up_info_vf - Gets link info. + * @hw: pointer to the HW structure + * @speed: pointer to 16 bit value to store link speed. + * @duplex: pointer to 16 bit value to store duplex. + * + * Since we cannot read the PHY and get accurate link info, we must rely upon + * the status register's data which is often stale and inaccurate. + **/ +static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_reset_hw_vf - Resets the HW + * @hw: pointer to the HW structure + * + * VF's provide a function level reset. This is done using bit 26 of ctrl_reg. + * This is all the reset we can perform on a VF. + **/ +static s32 e1000_reset_hw_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 timeout = E1000_VF_INIT_TIMEOUT; + u32 ret_val = -E1000_ERR_MAC_INIT; + u32 msgbuf[3]; + u8 *addr = (u8 *)(&msgbuf[1]); + u32 ctrl; + + /* assert vf queue/interrupt reset */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* we cannot initialize while the RSTI / RSTD bits are asserted */ + while (!mbx->ops.check_for_rst(hw) && timeout) { + timeout--; + udelay(5); + } + + if (timeout) { + /* mailbox timeout can now become active */ + mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT; + + /* notify pf of vf reset completion */ + msgbuf[0] = E1000_VF_RESET; + mbx->ops.write_posted(hw, msgbuf, 1); + + msleep(10); + + /* set our "perm_addr" based on info provided by PF */ + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + if (!ret_val) { + if (msgbuf[0] == (E1000_VF_RESET | E1000_VT_MSGTYPE_ACK)) + memcpy(hw->mac.perm_addr, addr, 6); + else + ret_val = -E1000_ERR_MAC_INIT; + } + } + + return ret_val; +} + +/** + * e1000_init_hw_vf - Inits the HW + * @hw: pointer to the HW structure + * + * Not much to do here except clear the PF Reset indication if there is one. + **/ +static s32 e1000_init_hw_vf(struct e1000_hw *hw) +{ + /* attempt to set and restore our mac address */ + e1000_rar_set_vf(hw, hw->mac.addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_vf - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. See + * e1000_mta_set_generic() + **/ +static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* + * The bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_vf - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * @rar_used_count: the first RAR register free to program + * @rar_count: total number of supported Receive Address Registers + * + * Updates the Receive Address Registers and Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + * The parameter rar_count will usually be hw->mac.rar_entry_count + * unless there are workarounds that change this. + **/ +void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count, + u32 rar_used_count, u32 rar_count) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[E1000_VFMAILBOX_SIZE]; + u16 *hash_list = (u16 *)&msgbuf[1]; + u32 hash_value; + u32 cnt, i; + + /* Each entry in the list uses 1 16 bit word. We have 30 + * 16 bit words available in our HW msg buffer (minus 1 for the + * msg type). That's 30 hash values if we pack 'em right. If + * there are more than 30 MC addresses to add then punt the + * extras for now and then add code to handle more than 30 later. + * It would be unusual for a server to request that many multi-cast + * addresses except for in large enterprise network environments. + */ + + cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; + msgbuf[0] = E1000_VF_SET_MULTICAST; + msgbuf[0] |= cnt << E1000_VT_MSGINFO_SHIFT; + + for (i = 0; i < cnt; i++) { + hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list); + hash_list[i] = hash_value & 0x0FFFF; + mc_addr_list += ETH_ADDR_LEN; + } + + mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE); +} + +/** + * e1000_set_vfta_vf - Set/Unset vlan filter table address + * @hw: pointer to the HW structure + * @vid: determines the vfta register and bit to set/unset + * @set: if true then set bit, else clear bit + **/ +static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + s32 err; + + msgbuf[0] = E1000_VF_SET_VLAN; + msgbuf[1] = vid; + /* Setting the 8 bit field MSG INFO to true indicates "add" */ + if (set) + msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT; + + mbx->ops.write_posted(hw, msgbuf, 2); + + err = mbx->ops.read_posted(hw, msgbuf, 2); + + /* if nacked the vlan was rejected */ + if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK))) + err = -E1000_ERR_MAC_INIT; + + return err; +} + +/** e1000_rlpml_set_vf - Set the maximum receive packet length + * @hw: pointer to the HW structure + * @max_size: value to assign to max frame size + **/ +void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[2]; + + msgbuf[0] = E1000_VF_SET_LPE; + msgbuf[1] = max_size; + + mbx->ops.write_posted(hw, msgbuf, 2); +} + +/** + * e1000_rar_set_vf - set device MAC address + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index receive address array register + **/ +static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + u32 msgbuf[3]; + u8 *msg_addr = (u8 *)(&msgbuf[1]); + s32 ret_val; + + memset(msgbuf, 0, 12); + msgbuf[0] = E1000_VF_SET_MAC_ADDR; + memcpy(msg_addr, addr, 6); + ret_val = mbx->ops.write_posted(hw, msgbuf, 3); + + if (!ret_val) + ret_val = mbx->ops.read_posted(hw, msgbuf, 3); + + /* if nacked the address was rejected, use "perm_addr" */ + if (!ret_val && + (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK))) + e1000_read_mac_addr_vf(hw); +} + +/** + * e1000_read_mac_addr_vf - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw) +{ + int i; + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_link_vf - Check for link for a virtual interface + * @hw: pointer to the HW structure + * + * Checks to see if the underlying PF is still talking to the VF and + * if it is then it reports the link state to the hardware, otherwise + * it reports link down and returns an error. + **/ +static s32 e1000_check_for_link_vf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 in_msg = 0; + + /* + * We only want to run this if there has been a rst asserted. + * in this case that could mean a link change, device reset, + * or a virtual function reset + */ + + /* If we were hit with a reset drop the link */ + if (!mbx->ops.check_for_rst(hw)) + mac->get_link_status = true; + + if (!mac->get_link_status) + goto out; + + /* if link status is down no point in checking to see if pf is up */ + if (!(er32(STATUS) & E1000_STATUS_LU)) + goto out; + + /* if the read failed it could just be a mailbox collision, best wait + * until we are called again and don't report an error */ + if (mbx->ops.read(hw, &in_msg, 1)) + goto out; + + /* if incoming message isn't clear to send we are waiting on response */ + if (!(in_msg & E1000_VT_MSGTYPE_CTS)) { + /* message is not CTS and is NACK we must have lost CTS status */ + if (in_msg & E1000_VT_MSGTYPE_NACK) + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* the pf is talking, if we timed out in the past we reinit */ + if (!mbx->timeout) { + ret_val = -E1000_ERR_MAC_INIT; + goto out; + } + + /* if we passed all the tests above then the link is up and we no + * longer need to check for link */ + mac->get_link_status = false; + +out: + return ret_val; +} + diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h new file mode 100644 index 00000000000..1e8ce3741a6 --- /dev/null +++ b/drivers/net/igbvf/vf.h @@ -0,0 +1,264 @@ +/******************************************************************************* + + Intel(R) 82576 Virtual Function Linux driver + Copyright(c) 2009 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_VF_H_ +#define _E1000_VF_H_ + +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/if_ether.h> + +#include "regs.h" +#include "defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576_VF 0x10CA +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 + +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +#define E1000_RAR_ENTRIES_VF 1 + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + u64 pkt_addr; /* Packet buffer address */ + u64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + u32 data; + struct { + u16 pkt_info; /* RSS/Packet type */ + u16 hdr_info; /* Split Header, + * hdr buffer length */ + } hs_rss; + } lo_dword; + union { + u32 rss; /* RSS Hash */ + struct { + u16 ip_id; /* IP id */ + u16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + u32 status_error; /* ext status/error */ + u16 length; /* Packet length */ + u16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + u64 buffer_addr; /* Address of descriptor's data buf */ + u32 cmd_type_len; + u32 olinfo_status; + } read; + struct { + u64 rsvd; /* Reserved */ + u32 nxtseq_seed; + u32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + u32 vlan_macip_lens; + u32 seqnum_seed; + u32 type_tucmd_mlhl; + u32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_vfadapt, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +struct e1000_vf_stats { + u64 base_gprc; + u64 base_gptc; + u64 base_gorc; + u64 base_gotc; + u64 base_mprc; + u64 base_gotlbc; + u64 base_gptlbc; + u64 base_gorlbc; + u64 base_gprlbc; + + u32 last_gprc; + u32 last_gptc; + u32 last_gorc; + u32 last_gotc; + u32 last_mprc; + u32 last_gotlbc; + u32 last_gptlbc; + u32 last_gorlbc; + u32 last_gprlbc; + + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 mprc; + u64 gotlbc; + u64 gptlbc; + u64 gorlbc; + u64 gprlbc; +}; + +#include "mbx.h" + +struct e1000_mac_operations { + /* Function pointers for the MAC. */ + s32 (*init_params)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*mta_set)(struct e1000_hw *, u32); + void (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*set_vfta)(struct e1000_hw *, u16, bool); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[6]; + u8 perm_addr[6]; + + enum e1000_mac_type type; + + u16 mta_reg_count; + u16 rar_entry_count; + + bool get_link_status; +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16); + s32 (*check_for_msg)(struct e1000_hw *); + s32 (*check_for_ack)(struct e1000_hw *); + s32 (*check_for_rst)(struct e1000_hw *); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_mbx_info mbx; + + union { + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +/* These functions must be implemented by drivers */ +void e1000_rlpml_set_vf(struct e1000_hw *, u16); +void e1000_init_function_pointers_vf(struct e1000_hw *hw); + + +#endif /* _E1000_VF_H_ */ diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index cbc63ff13ad..c5593f4665a 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -1214,6 +1214,19 @@ static void __devinit ioc3_serial_probe(struct pci_dev *pdev, struct ioc3 *ioc3) } #endif +static const struct net_device_ops ioc3_netdev_ops = { + .ndo_open = ioc3_open, + .ndo_stop = ioc3_close, + .ndo_start_xmit = ioc3_start_xmit, + .ndo_tx_timeout = ioc3_timeout, + .ndo_get_stats = ioc3_get_stats, + .ndo_set_multicast_list = ioc3_set_multicast_list, + .ndo_do_ioctl = ioc3_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ioc3_set_mac_address, + .ndo_change_mtu = eth_change_mtu, +}; + static int __devinit ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1310,15 +1323,8 @@ static int __devinit ioc3_probe(struct pci_dev *pdev, ioc3_get_eaddr(ip); /* The IOC3-specific entries in the device structure. */ - dev->open = ioc3_open; - dev->hard_start_xmit = ioc3_start_xmit; - dev->tx_timeout = ioc3_timeout; dev->watchdog_timeo = 5 * HZ; - dev->stop = ioc3_close; - dev->get_stats = ioc3_get_stats; - dev->do_ioctl = ioc3_ioctl; - dev->set_multicast_list = ioc3_set_multicast_list; - dev->set_mac_address = ioc3_set_mac_address; + dev->netdev_ops = &ioc3_netdev_ops; dev->ethtool_ops = &ioc3_ethtool_ops; dev->features = NETIF_F_IP_CSUM; diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c index 3126678bdd3..73585fd8f29 100644 --- a/drivers/net/isa-skeleton.c +++ b/drivers/net/isa-skeleton.c @@ -181,6 +181,18 @@ out: } #endif +static const struct net_device_ops netcard_netdev_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = net_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + /* * This is the real probe routine. Linux has a history of friendly device * probes on the ISA bus. A good device probes avoids doing writes, and @@ -303,13 +315,7 @@ static int __init netcard_probe1(struct net_device *dev, int ioaddr) np = netdev_priv(dev); spin_lock_init(&np->lock); - dev->open = net_open; - dev->stop = net_close; - dev->hard_start_xmit = net_send_packet; - dev->get_stats = net_get_stats; - dev->set_multicast_list = &set_multicast_list; - - dev->tx_timeout = &net_tx_timeout; + dev->netdev_ops = &netcard_netdev_ops; dev->watchdog_timeo = MY_TX_TIMEOUT; err = register_netdev(dev); diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c index de4db0dc787..4791238c3f6 100644 --- a/drivers/net/ixgbe/ixgbe_82598.c +++ b/drivers/net/ixgbe/ixgbe_82598.c @@ -885,61 +885,6 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) } /** - * ixgbe_blink_led_start_82598 - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - **/ -static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) -{ - ixgbe_link_speed speed = 0; - bool link_up = 0; - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - /* - * Link must be up to auto-blink the LEDs on the 82598EB MAC; - * force it if link is down. - */ - hw->mac.ops.check_link(hw, &speed, &link_up, false); - - if (!link_up) { - autoc_reg |= IXGBE_AUTOC_FLU; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - msleep(10); - } - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - **/ -static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) -{ - u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - autoc_reg &= ~IXGBE_AUTOC_FLU; - autoc_reg |= IXGBE_AUTOC_AN_RESTART; - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg &= ~IXGBE_LED_BLINK(index); - led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register * @hw: pointer to hardware structure * @reg: analog register to read @@ -1128,8 +1073,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { .get_link_capabilities = &ixgbe_get_link_capabilities_82598, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, - .blink_led_start = &ixgbe_blink_led_start_82598, - .blink_led_stop = &ixgbe_blink_led_stop_82598, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_82598, diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c index beae7e01260..29771fbaa42 100644 --- a/drivers/net/ixgbe/ixgbe_82599.c +++ b/drivers/net/ixgbe/ixgbe_82599.c @@ -68,8 +68,6 @@ s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); -s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index); -s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index); s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); @@ -991,40 +989,6 @@ s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw) } /** - * ixgbe_blink_led_start_82599 - Blink LED based on index. - * @hw: pointer to hardware structure - * @index: led number to blink - **/ -s32 ixgbe_blink_led_start_82599(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg |= IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** - * ixgbe_blink_led_stop_82599 - Stop blinking LED based on index. - * @hw: pointer to hardware structure - * @index: led number to stop blinking - **/ -s32 ixgbe_blink_led_stop_82599(struct ixgbe_hw *hw, u32 index) -{ - u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); - - led_reg &= ~IXGBE_LED_MODE_MASK(index); - led_reg &= ~IXGBE_LED_BLINK(index); - IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); - IXGBE_WRITE_FLUSH(hw); - - return 0; -} - -/** * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array * @hw: pointer to hardware structure **/ @@ -1243,8 +1207,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { .get_link_capabilities = &ixgbe_get_link_capabilities_82599, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, - .blink_led_start = &ixgbe_blink_led_start_82599, - .blink_led_stop = &ixgbe_blink_led_stop_82599, + .blink_led_start = &ixgbe_blink_led_start_generic, + .blink_led_stop = &ixgbe_blink_led_stop_generic, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_82599, diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c index 63ab6671d08..5567519676d 100644 --- a/drivers/net/ixgbe/ixgbe_common.c +++ b/drivers/net/ixgbe/ixgbe_common.c @@ -2071,3 +2071,58 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) return 0; } + +/** + * ixgbe_blink_led_start_generic - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink + **/ +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) +{ + ixgbe_link_speed speed = 0; + bool link_up = 0; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); + + if (!link_up) { + autoc_reg |= IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + msleep(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_BLINK(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} + +/** + * ixgbe_blink_led_stop_generic - Stop blinking LED based on index. + * @hw: pointer to hardware structure + * @index: led number to stop blinking + **/ +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) +{ + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); + + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg &= ~IXGBE_LED_BLINK(index); + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + + return 0; +} diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h index 24f73e719c3..dd260890ad0 100644 --- a/drivers/net/ixgbe/ixgbe_common.h +++ b/drivers/net/ixgbe/ixgbe_common.h @@ -76,6 +76,9 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); +s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) #ifndef writeq diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index aafc120f164..f0a20facc65 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -943,6 +943,24 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, } +static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct ixgbe_hw *hw = &adapter->hw; + int retval = 1; + + switch(hw->device_id) { + case IXGBE_DEV_ID_82599_KX4: + retval = 0; + break; + default: + wol->supported = 0; + retval = 0; + } + + return retval; +} + static void ixgbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { @@ -952,7 +970,8 @@ static void ixgbe_get_wol(struct net_device *netdev, WAKE_BCAST | WAKE_MAGIC; wol->wolopts = 0; - if (!device_can_wakeup(&adapter->pdev->dev)) + if (ixgbe_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) return; if (adapter->wol & IXGBE_WUFC_EX) @@ -974,6 +993,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; + if (ixgbe_wol_exclusion(adapter, wol)) + return wol->wolopts ? -EOPNOTSUPP : 0; + adapter->wol = 0; if (wol->wolopts & WAKE_UCAST) diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 9ef128ae645..febde45cf9f 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -2723,17 +2723,21 @@ static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) **/ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) { - /* Start with base case */ - adapter->num_rx_queues = 1; - adapter->num_tx_queues = 1; - #ifdef CONFIG_IXGBE_DCB if (ixgbe_set_dcb_queues(adapter)) - return; + goto done; #endif if (ixgbe_set_rss_queues(adapter)) - return; + goto done; + + /* fallback to base case */ + adapter->num_rx_queues = 1; + adapter->num_tx_queues = 1; + +done: + /* Notify the stack of the (possibly) reduced Tx Queue count. */ + adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; } static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, @@ -2992,9 +2996,6 @@ try_msi: } out: - /* Notify the stack of the (possibly) reduced Tx Queue count. */ - adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; - return err; } @@ -3611,9 +3612,9 @@ static int ixgbe_resume(struct pci_dev *pdev) return 0; } - #endif /* CONFIG_PM */ -static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) + +static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -3672,18 +3673,46 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) pci_enable_wake(pdev, PCI_D3cold, 0); } + *enable_wake = !!wufc; + ixgbe_release_hw_control(adapter); pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); + return 0; +} + +#ifdef CONFIG_PM +static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __ixgbe_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } return 0; } +#endif /* CONFIG_PM */ static void ixgbe_shutdown(struct pci_dev *pdev) { - ixgbe_suspend(pdev, PMSG_SUSPEND); + bool wake; + + __ixgbe_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } } /** @@ -4342,7 +4371,7 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) int count = 0; unsigned int f; - r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; + r_idx = skb->queue_mapping; tx_ring = &adapter->tx_ring[r_idx]; if (adapter->vlgrp && vlan_tx_tag_present(skb)) { diff --git a/drivers/net/jme.c b/drivers/net/jme.c index ece35040288..621a7c0c46b 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -2591,13 +2591,13 @@ static int jme_pci_dma64(struct pci_dev *pdev) { if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) - if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) return 1; if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_40BIT_MASK)) - if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK)) + !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) + if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) return 1; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c index 380a1a54d53..384e072de2e 100644 --- a/drivers/net/mac89x0.c +++ b/drivers/net/mac89x0.c @@ -168,6 +168,17 @@ writereg(struct net_device *dev, int portno, int value) nubus_writew(swab16(value), dev->mem_start + portno); } +static const struct net_device_ops mac89x0_netdev_ops = { + .ndo_open = net_open, + .ndo_stop = net_close, + .ndo_start_xmit = net_send_packet, + .ndo_get_stats = net_get_stats, + .ndo_set_multicast_list = set_multicast_list, + .ndo_set_mac_address = set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + /* Probe for the CS8900 card in slot E. We won't bother looking anywhere else until we have a really good reason to do so. */ struct net_device * __init mac89x0_probe(int unit) @@ -280,12 +291,7 @@ struct net_device * __init mac89x0_probe(int unit) printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr); - dev->open = net_open; - dev->stop = net_close; - dev->hard_start_xmit = net_send_packet; - dev->get_stats = net_get_stats; - dev->set_multicast_list = &set_multicast_list; - dev->set_mac_address = &set_mac_address; + dev->netdev_ops = &mac89x0_netdev_ops; err = register_netdev(dev); if (err) diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f50501013b1..46073de290c 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c @@ -1100,6 +1100,18 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return phy_mii_ioctl(phydev, if_mii(rq), cmd); } +static const struct net_device_ops macb_netdev_ops = { + .ndo_open = macb_open, + .ndo_stop = macb_close, + .ndo_start_xmit = macb_start_xmit, + .ndo_set_multicast_list = macb_set_rx_mode, + .ndo_get_stats = macb_get_stats, + .ndo_do_ioctl = macb_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init macb_probe(struct platform_device *pdev) { struct eth_platform_data *pdata; @@ -1175,12 +1187,7 @@ static int __init macb_probe(struct platform_device *pdev) goto err_out_iounmap; } - dev->open = macb_open; - dev->stop = macb_close; - dev->hard_start_xmit = macb_start_xmit; - dev->get_stats = macb_get_stats; - dev->set_multicast_list = macb_set_rx_mode; - dev->do_ioctl = macb_ioctl; + dev->netdev_ops = &macb_netdev_ops; netif_napi_add(dev, &bp->napi, macb_poll, 64); dev->ethtool_ops = &macb_ethtool_ops; diff --git a/drivers/net/macsonic.c b/drivers/net/macsonic.c index 527166e35d5..acd143da161 100644 --- a/drivers/net/macsonic.c +++ b/drivers/net/macsonic.c @@ -167,6 +167,18 @@ static int macsonic_close(struct net_device* dev) return err; } +static const struct net_device_ops macsonic_netdev_ops = { + .ndo_open = macsonic_open, + .ndo_stop = macsonic_close, + .ndo_start_xmit = sonic_send_packet, + .ndo_set_multicast_list = sonic_multicast_list, + .ndo_tx_timeout = sonic_tx_timeout, + .ndo_get_stats = sonic_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init macsonic_init(struct net_device *dev) { struct sonic_local* lp = netdev_priv(dev); @@ -198,12 +210,7 @@ static int __init macsonic_init(struct net_device *dev) lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); - dev->open = macsonic_open; - dev->stop = macsonic_close; - dev->hard_start_xmit = sonic_send_packet; - dev->get_stats = sonic_get_stats; - dev->set_multicast_list = &sonic_multicast_list; - dev->tx_timeout = sonic_tx_timeout; + dev->netdev_ops = &macsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c index 7cce3342ef8..606aa58afde 100644 --- a/drivers/net/mlx4/port.c +++ b/drivers/net/mlx4/port.c @@ -299,13 +299,14 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) struct mlx4_cmd_mailbox *mailbox; int err; + if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) + return 0; + mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); memset(mailbox->buf, 0, 256); - if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) - return 0; ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index a56d9d2df73..b3185bf2c15 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -2274,8 +2274,6 @@ static void port_start(struct mv643xx_eth_private *mp) pscr |= FORCE_LINK_PASS; wrlp(mp, PORT_SERIAL_CONTROL, pscr); - wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); - /* * Configure TX path and queues. */ @@ -2957,6 +2955,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) netif_carrier_off(dev); + wrlp(mp, SDMA_CONFIG, PORT_SDMA_CONFIG_DEFAULT_VALUE); + set_rx_coal(mp, 250); set_tx_coal(mp, 0); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 9eed126a82f..f2c4a665e93 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -2447,6 +2447,7 @@ static int myri10ge_open(struct net_device *dev) lro_mgr->lro_arr = ss->rx_done.lro_desc; lro_mgr->get_frag_header = myri10ge_get_frag_header; lro_mgr->max_aggr = myri10ge_lro_max_pkts; + lro_mgr->frag_align_pad = 2; if (lro_mgr->max_aggr > MAX_SKB_FRAGS) lro_mgr->max_aggr = MAX_SKB_FRAGS; diff --git a/drivers/net/niu.c b/drivers/net/niu.c index 73cac6c78cb..2b1745328cf 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -4834,6 +4834,7 @@ static int niu_compute_rbr_cfig_b(struct rx_ring_info *rp, u64 *ret) { u64 val = 0; + *ret = 0; switch (rp->rbr_block_size) { case 4 * 1024: val |= (RBR_BLKSIZE_4K << RBR_CFIG_B_BLKSIZE_SHIFT); @@ -9542,7 +9543,7 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np, plat_dev = platform_device_register_simple("niu", niu_parent_index, NULL, 0); - if (!plat_dev) + if (IS_ERR(plat_dev)) return NULL; for (i = 0; attr_name(niu_parent_attributes[i]); i++) { diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c index cf24cc34deb..e7070515d2e 100644 --- a/drivers/net/phy/fixed.c +++ b/drivers/net/phy/fixed.c @@ -19,6 +19,7 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/phy_fixed.h> +#include <linux/err.h> #define MII_REGS_NUM 29 @@ -207,8 +208,8 @@ static int __init fixed_mdio_bus_init(void) int ret; pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); - if (!pdev) { - ret = -ENOMEM; + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); goto err_pdev; } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index eb6411c4694..7a3ec9d39a9 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -69,6 +69,11 @@ #define MII_M1111_COPPER 0 #define MII_M1111_FIBER 1 +#define MII_88E1121_PHY_LED_CTRL 16 +#define MII_88E1121_PHY_LED_PAGE 3 +#define MII_88E1121_PHY_LED_DEF 0x0030 +#define MII_88E1121_PHY_PAGE 22 + #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 #define MII_M1011_PHY_STATUS_100 0x4000 @@ -154,6 +159,30 @@ static int marvell_config_aneg(struct phy_device *phydev) return err; } +static int m88e1121_config_aneg(struct phy_device *phydev) +{ + int err, temp; + + err = phy_write(phydev, MII_BMCR, BMCR_RESET); + if (err < 0) + return err; + + err = phy_write(phydev, MII_M1011_PHY_SCR, + MII_M1011_PHY_SCR_AUTO_CROSS); + if (err < 0) + return err; + + temp = phy_read(phydev, MII_88E1121_PHY_PAGE); + + phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); + phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); + phy_write(phydev, MII_88E1121_PHY_PAGE, temp); + + err = genphy_config_aneg(phydev); + + return err; +} + static int m88e1111_config_init(struct phy_device *phydev) { int err; @@ -429,6 +458,18 @@ static int marvell_read_status(struct phy_device *phydev) return 0; } +static int m88e1121_did_interrupt(struct phy_device *phydev) +{ + int imask; + + imask = phy_read(phydev, MII_M1011_IEVENT); + + if (imask & MII_M1011_IMASK_INIT) + return 1; + + return 0; +} + static struct phy_driver marvell_drivers[] = { { .phy_id = 0x01410c60, @@ -482,6 +523,19 @@ static struct phy_driver marvell_drivers[] = { .driver = {.owner = THIS_MODULE,}, }, { + .phy_id = 0x01410cb0, + .phy_id_mask = 0xfffffff0, + .name = "Marvell 88E1121R", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_aneg = &m88e1121_config_aneg, + .read_status = &marvell_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .did_interrupt = &m88e1121_did_interrupt, + .driver = { .owner = THIS_MODULE }, + }, + { .phy_id = 0x01410cd0, .phy_id_mask = 0xfffffff0, .name = "Marvell 88E1145", diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 3ff1f425f1b..61755cbd978 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -434,7 +434,7 @@ void phy_start_machine(struct phy_device *phydev, phydev->adjust_state = handler; INIT_DELAYED_WORK(&phydev->state_queue, phy_state_machine); - schedule_delayed_work(&phydev->state_queue, jiffies + HZ); + schedule_delayed_work(&phydev->state_queue, HZ); } /** @@ -655,6 +655,10 @@ static void phy_change(struct work_struct *work) struct phy_device *phydev = container_of(work, struct phy_device, phy_queue); + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + goto ignore; + err = phy_disable_interrupts(phydev); if (err) @@ -681,6 +685,11 @@ static void phy_change(struct work_struct *work) return; +ignore: + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); + return; + irq_enable_err: disable_irq(phydev->irq); atomic_inc(&phydev->irq_disable); @@ -937,6 +946,5 @@ static void phy_state_machine(struct work_struct *work) if (err < 0) phy_error(phydev); - schedule_delayed_work(&phydev->state_queue, - jiffies + PHY_STATE_TIME * HZ); + schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ); } diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 5e8540b6ffa..6f97b47d74a 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c @@ -160,6 +160,7 @@ MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>," "Florian Fainelli <florian@openwrt.org>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver"); +MODULE_VERSION(DRV_VERSION " " DRV_RELDATE); /* RX and TX interrupts that we handle */ #define RX_INTS (RX_FIFO_FULL | RX_NO_DESC | RX_FINISH) diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index dee23b159df..7269a426051 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -448,9 +448,6 @@ static void efx_init_channels(struct efx_nic *efx) WARN_ON(channel->rx_pkt != NULL); efx_rx_strategy(channel); - - netif_napi_add(channel->napi_dev, &channel->napi_str, - efx_poll, napi_weight); } } @@ -1321,6 +1318,8 @@ static int efx_init_napi(struct efx_nic *efx) efx_for_each_channel(channel, efx) { channel->napi_dev = efx->net_dev; + netif_napi_add(channel->napi_dev, &channel->napi_str, + efx_poll, napi_weight); } return 0; } @@ -1330,6 +1329,8 @@ static void efx_fini_napi(struct efx_nic *efx) struct efx_channel *channel; efx_for_each_channel(channel, efx) { + if (channel->napi_dev) + netif_napi_del(&channel->napi_str); channel->napi_dev = NULL; } } diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index d4629ab2c61..466a8abb005 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -1176,9 +1176,9 @@ void falcon_sim_phy_event(struct efx_nic *efx) EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); if (EFX_IS10G(efx)) - EFX_SET_OWORD_FIELD(phy_event, XG_PHY_INTR, 1); + EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1); else - EFX_SET_OWORD_FIELD(phy_event, G_PHY0_INTR, 1); + EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1); falcon_generate_event(&efx->channel[0], &phy_event); } diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 7b1882765a0..3ab28bb00c1 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -1188,6 +1188,19 @@ out: return ret; } +static const struct net_device_ops sh_eth_netdev_ops = { + .ndo_open = sh_eth_open, + .ndo_stop = sh_eth_close, + .ndo_start_xmit = sh_eth_start_xmit, + .ndo_get_stats = sh_eth_get_stats, + .ndo_set_multicast_list = sh_eth_set_multicast_list, + .ndo_tx_timeout = sh_eth_tx_timeout, + .ndo_do_ioctl = sh_eth_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static int sh_eth_drv_probe(struct platform_device *pdev) { int ret, i, devno = 0; @@ -1240,13 +1253,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) mdp->edmac_endian = pd->edmac_endian; /* set function */ - ndev->open = sh_eth_open; - ndev->hard_start_xmit = sh_eth_start_xmit; - ndev->stop = sh_eth_close; - ndev->get_stats = sh_eth_get_stats; - ndev->set_multicast_list = sh_eth_set_multicast_list; - ndev->do_ioctl = sh_eth_do_ioctl; - ndev->tx_timeout = sh_eth_tx_timeout; + ndev->netdev_ops = &sh_eth_netdev_ops; ndev->watchdog_timeo = TX_TIMEOUT; mdp->post_rx = POST_RX >> (devno << 1); diff --git a/drivers/net/skge.c b/drivers/net/skge.c index b8978d4af1b..c11cdd08ec5 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -2674,7 +2674,7 @@ static int skge_down(struct net_device *dev) if (netif_msg_ifdown(skge)) printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); - netif_stop_queue(dev); + netif_tx_disable(dev); if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) del_timer_sync(&skge->link_timer); @@ -2881,7 +2881,6 @@ static void skge_tx_clean(struct net_device *dev) } skge->tx_ring.to_clean = e; - netif_wake_queue(dev); } static void skge_tx_timeout(struct net_device *dev) @@ -2893,6 +2892,7 @@ static void skge_tx_timeout(struct net_device *dev) skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); skge_tx_clean(dev); + netif_wake_queue(dev); } static int skge_change_mtu(struct net_device *dev, int new_mtu) diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 912308eec86..329f890e290 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -369,7 +369,7 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, * MN10300/AM33 configuration */ -#include <asm/unit/smc91111.h> +#include <unit/smc91111.h> #else diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 6da67812982..eb7db032a78 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c @@ -317,7 +317,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx) goto out; } - SMSC_WARNING(HW, "Timed out waiting for MII write to finish"); + SMSC_WARNING(HW, "Timed out waiting for MII read to finish"); reg = -EIO; out: diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c index e0d84772771..a39c0b9ba8b 100644 --- a/drivers/net/sun3_82586.c +++ b/drivers/net/sun3_82586.c @@ -331,6 +331,18 @@ out: return ERR_PTR(err); } +static const struct net_device_ops sun3_82586_netdev_ops = { + .ndo_open = sun3_82586_open, + .ndo_stop = sun3_82586_close, + .ndo_start_xmit = sun3_82586_send_packet, + .ndo_set_multicast_list = set_multicast_list, + .ndo_tx_timeout = sun3_82586_timeout, + .ndo_get_stats = sun3_82586_get_stats, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) { int i, size, retval; @@ -381,13 +393,8 @@ static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr) printk("Memaddr: 0x%lx, Memsize: %d, IRQ %d\n",dev->mem_start,size, dev->irq); - dev->open = sun3_82586_open; - dev->stop = sun3_82586_close; - dev->get_stats = sun3_82586_get_stats; - dev->tx_timeout = sun3_82586_timeout; + dev->netdev_ops = &sun3_82586_netdev_ops; dev->watchdog_timeo = HZ/20; - dev->hard_start_xmit = sun3_82586_send_packet; - dev->set_multicast_list = set_multicast_list; dev->if_port = 0; return 0; diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c index d91e95b237b..0ce2db6ce2b 100644 --- a/drivers/net/tc35815.c +++ b/drivers/net/tc35815.c @@ -862,6 +862,22 @@ static int __devinit tc35815_init_dev_addr(struct net_device *dev) return 0; } +static const struct net_device_ops tc35815_netdev_ops = { + .ndo_open = tc35815_open, + .ndo_stop = tc35815_close, + .ndo_start_xmit = tc35815_send_packet, + .ndo_get_stats = tc35815_get_stats, + .ndo_set_multicast_list = tc35815_set_multicast_list, + .ndo_tx_timeout = tc35815_tx_timeout, + .ndo_do_ioctl = tc35815_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = tc35815_poll_controller, +#endif +}; + static int __devinit tc35815_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -904,21 +920,12 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev, ioaddr = pcim_iomap_table(pdev)[1]; /* Initialize the device structure. */ - dev->open = tc35815_open; - dev->hard_start_xmit = tc35815_send_packet; - dev->stop = tc35815_close; - dev->get_stats = tc35815_get_stats; - dev->set_multicast_list = tc35815_set_multicast_list; - dev->do_ioctl = tc35815_ioctl; + dev->netdev_ops = &tc35815_netdev_ops; dev->ethtool_ops = &tc35815_ethtool_ops; - dev->tx_timeout = tc35815_tx_timeout; dev->watchdog_timeo = TC35815_TX_TIMEOUT; #ifdef TC35815_NAPI netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); #endif -#ifdef CONFIG_NET_POLL_CONTROLLER - dev->poll_controller = tc35815_poll_controller; -#endif dev->irq = pdev->irq; dev->base_addr = (unsigned long)ioaddr; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6a736dda3ee..7a837c46596 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -12443,8 +12443,13 @@ static int __devinit tg3_get_device_address(struct tg3 *tp) /* Next, try NVRAM. */ if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) && !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) { - memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2); - memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo)); + dev->dev_addr[0] = ((hi >> 16) & 0xff); + dev->dev_addr[1] = ((hi >> 24) & 0xff); + dev->dev_addr[2] = ((lo >> 0) & 0xff); + dev->dev_addr[3] = ((lo >> 8) & 0xff); + dev->dev_addr[4] = ((lo >> 16) & 0xff); + dev->dev_addr[5] = ((lo >> 24) & 0xff); + } /* Finally just fetch it out of the MAC control regs. */ else { diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c index bb43e7fb2a5..0f78f99f9b2 100644 --- a/drivers/net/tsi108_eth.c +++ b/drivers/net/tsi108_eth.c @@ -1561,6 +1561,18 @@ static const struct ethtool_ops tsi108_ethtool_ops = { .set_settings = tsi108_set_settings, }; +static const struct net_device_ops tsi108_netdev_ops = { + .ndo_open = tsi108_open, + .ndo_stop = tsi108_close, + .ndo_start_xmit = tsi108_send_packet, + .ndo_set_multicast_list = tsi108_set_rx_mode, + .ndo_get_stats = tsi108_get_stats, + .ndo_do_ioctl = tsi108_do_ioctl, + .ndo_set_mac_address = tsi108_set_mac, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, +}; + static int tsi108_init_one(struct platform_device *pdev) { @@ -1616,14 +1628,8 @@ tsi108_init_one(struct platform_device *pdev) data->phy_type = einfo->phy_type; data->irq_num = einfo->irq_num; data->id = pdev->id; - dev->open = tsi108_open; - dev->stop = tsi108_close; - dev->hard_start_xmit = tsi108_send_packet; - dev->set_mac_address = tsi108_set_mac; - dev->set_multicast_list = tsi108_set_rx_mode; - dev->get_stats = tsi108_get_stats; netif_napi_add(dev, &data->napi, tsi108_poll, 64); - dev->do_ioctl = tsi108_do_ioctl; + dev->netdev_ops = &tsi108_netdev_ops; dev->ethtool_ops = &tsi108_ethtool_ops; /* Apparently, the Linux networking code won't use scatter-gather diff --git a/drivers/net/tun.c b/drivers/net/tun.c index a1b0697340b..16716aef184 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -518,7 +518,7 @@ static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun, int err; /* Under a page? Don't bother with paged skb. */ - if (prepad + len < PAGE_SIZE) + if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, @@ -565,7 +565,8 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { align = NET_IP_ALIGN; - if (unlikely(len < ETH_HLEN)) + if (unlikely(len < ETH_HLEN || + (gso.hdr_len && gso.hdr_len < ETH_HLEN))) return -EINVAL; } diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index fb53ef872df..754a4b182c1 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c @@ -377,7 +377,7 @@ static void velocity_print_info(struct velocity_info *vptr); static int velocity_open(struct net_device *dev); static int velocity_change_mtu(struct net_device *dev, int mtu); static int velocity_xmit(struct sk_buff *skb, struct net_device *dev); -static int velocity_intr(int irq, void *dev_instance); +static irqreturn_t velocity_intr(int irq, void *dev_instance); static void velocity_set_multi(struct net_device *dev); static struct net_device_stats *velocity_get_stats(struct net_device *dev); static int velocity_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); @@ -2215,7 +2215,7 @@ out: * efficiently as possible. */ -static int velocity_intr(int irq, void *dev_instance) +static irqreturn_t velocity_intr(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct velocity_info *vptr = netdev_priv(dev); diff --git a/drivers/net/wireless/ath9k/pci.c b/drivers/net/wireless/ath9k/pci.c index 6dbc58580ab..168411d322a 100644 --- a/drivers/net/wireless/ath9k/pci.c +++ b/drivers/net/wireless/ath9k/pci.c @@ -93,14 +93,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pci_enable_device(pdev)) return -EIO; - ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); goto bad; } - ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { printk(KERN_ERR "ath9k: 32-bit DMA consistent " diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index e3569a0a952..b1610ea4bb3 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c @@ -492,8 +492,8 @@ static int __devinit p54p_probe(struct pci_dev *pdev, goto err_disable_dev; } - if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) || - pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) || + pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c index a12a7211c98..5a4ad156f63 100644 --- a/drivers/net/xtsonic.c +++ b/drivers/net/xtsonic.c @@ -108,6 +108,18 @@ static int xtsonic_close(struct net_device *dev) return err; } +static const struct net_device_ops xtsonic_netdev_ops = { + .ndo_open = xtsonic_open, + .ndo_stop = xtsonic_close, + .ndo_start_xmit = sonic_send_packet, + .ndo_get_stats = sonic_get_stats, + .ndo_set_multicast_list = sonic_multicast_list, + .ndo_tx_timeout = sonic_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = eth_change_mtu, + .ndo_set_mac_address = eth_mac_addr, +}; + static int __init sonic_probe1(struct net_device *dev) { static unsigned version_printed = 0; @@ -205,12 +217,7 @@ static int __init sonic_probe1(struct net_device *dev) lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS * SONIC_BUS_SCALE(lp->dma_bitmode)); - dev->open = xtsonic_open; - dev->stop = xtsonic_close; - dev->hard_start_xmit = sonic_send_packet; - dev->get_stats = sonic_get_stats; - dev->set_multicast_list = &sonic_multicast_list; - dev->tx_timeout = sonic_tx_timeout; + dev->netdev_ops = &xtsonic_netdev_ops; dev->watchdog_timeo = TX_TIMEOUT; /* diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index 4fa3bb2ddfe..33e5ade774c 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -434,7 +434,8 @@ static void __init superio_parport_init(void) 0 /*base_hi*/, PAR_IRQ, PARPORT_DMA_NONE /* dma */, - NULL /*struct pci_dev* */) ) + NULL /*struct pci_dev* */), + 0 /* shared irq flags */ ) printk(KERN_WARNING PFX "Probing parallel port failed.\n"); #endif /* CONFIG_PARPORT_PC */ diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 25a00ce4f24..fa3a11365ec 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -173,12 +173,21 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) struct dmar_drhd_unit *dmaru; int ret = 0; + drhd = (struct acpi_dmar_hardware_unit *)header; + if (!drhd->address) { + /* Promote an attitude of violence to a BIOS engineer today */ + WARN(1, "Your BIOS is broken; DMAR reported at address zero!\n" + "BIOS vendor: %s; Ver: %s; Product Version: %s\n", + dmi_get_system_info(DMI_BIOS_VENDOR), + dmi_get_system_info(DMI_BIOS_VERSION), + dmi_get_system_info(DMI_PRODUCT_VERSION)); + return -ENODEV; + } dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); if (!dmaru) return -ENOMEM; dmaru->hdr = header; - drhd = (struct acpi_dmar_hardware_unit *)header; dmaru->reg_base_addr = drhd->address; dmaru->segment = drhd->segment; dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index fb3a3f3fca7..001b328adf8 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -733,8 +733,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ - start = PAGE_ALIGN(start); - end &= PAGE_MASK; + start &= PAGE_MASK; + end = PAGE_ALIGN(end); npages = (end - start) / VTD_PAGE_SIZE; /* we don't need lock here, nobody else touches the iova range */ diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 45940f31fe9..218b9a16ac3 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -174,8 +174,7 @@ struct fujitsu_hotkey_t { static struct fujitsu_hotkey_t *fujitsu_hotkey; -static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, - void *data); +static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event); #ifdef CONFIG_LEDS_CLASS static enum led_brightness logolamp_get(struct led_classdev *cdev); @@ -203,7 +202,7 @@ struct led_classdev kblamps_led = { static u32 dbg_level = 0x03; #endif -static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data); +static void acpi_fujitsu_notify(struct acpi_device *device, u32 event); /* Fujitsu ACPI interface function */ @@ -658,7 +657,6 @@ static struct dmi_system_id fujitsu_dmi_table[] = { static int acpi_fujitsu_add(struct acpi_device *device) { - acpi_status status; acpi_handle handle; int result = 0; int state = 0; @@ -673,20 +671,10 @@ static int acpi_fujitsu_add(struct acpi_device *device) sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); device->driver_data = fujitsu; - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, - acpi_fujitsu_notify, fujitsu); - - if (ACPI_FAILURE(status)) { - printk(KERN_ERR "Error installing notify handler\n"); - error = -ENODEV; - goto err_stop; - } - fujitsu->input = input = input_allocate_device(); if (!input) { error = -ENOMEM; - goto err_uninstall_notify; + goto err_stop; } snprintf(fujitsu->phys, sizeof(fujitsu->phys), @@ -743,9 +731,6 @@ static int acpi_fujitsu_add(struct acpi_device *device) end: err_free_input_dev: input_free_device(input); -err_uninstall_notify: - acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_fujitsu_notify); err_stop: return result; @@ -753,7 +738,6 @@ err_stop: static int acpi_fujitsu_remove(struct acpi_device *device, int type) { - acpi_status status; struct fujitsu_t *fujitsu = NULL; if (!device || !acpi_driver_data(device)) @@ -761,10 +745,6 @@ static int acpi_fujitsu_remove(struct acpi_device *device, int type) fujitsu = acpi_driver_data(device); - status = acpi_remove_notify_handler(fujitsu->acpi_handle, - ACPI_DEVICE_NOTIFY, - acpi_fujitsu_notify); - if (!device || !acpi_driver_data(device)) return -EINVAL; @@ -775,7 +755,7 @@ static int acpi_fujitsu_remove(struct acpi_device *device, int type) /* Brightness notify */ -static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) +static void acpi_fujitsu_notify(struct acpi_device *device, u32 event) { struct input_dev *input; int keycode; @@ -829,15 +809,12 @@ static void acpi_fujitsu_notify(acpi_handle handle, u32 event, void *data) input_report_key(input, keycode, 0); input_sync(input); } - - return; } /* ACPI device for hotkey handling */ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) { - acpi_status status; acpi_handle handle; int result = 0; int state = 0; @@ -854,17 +831,6 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS); device->driver_data = fujitsu_hotkey; - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, - acpi_fujitsu_hotkey_notify, - fujitsu_hotkey); - - if (ACPI_FAILURE(status)) { - printk(KERN_ERR "Error installing notify handler\n"); - error = -ENODEV; - goto err_stop; - } - /* kfifo */ spin_lock_init(&fujitsu_hotkey->fifo_lock); fujitsu_hotkey->fifo = @@ -879,7 +845,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) fujitsu_hotkey->input = input = input_allocate_device(); if (!input) { error = -ENOMEM; - goto err_uninstall_notify; + goto err_free_fifo; } snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys), @@ -975,9 +941,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) end: err_free_input_dev: input_free_device(input); -err_uninstall_notify: - acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_fujitsu_hotkey_notify); +err_free_fifo: kfifo_free(fujitsu_hotkey->fifo); err_stop: @@ -986,7 +950,6 @@ err_stop: static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) { - acpi_status status; struct fujitsu_hotkey_t *fujitsu_hotkey = NULL; if (!device || !acpi_driver_data(device)) @@ -994,10 +957,6 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) fujitsu_hotkey = acpi_driver_data(device); - status = acpi_remove_notify_handler(fujitsu_hotkey->acpi_handle, - ACPI_DEVICE_NOTIFY, - acpi_fujitsu_hotkey_notify); - fujitsu_hotkey->acpi_handle = NULL; kfifo_free(fujitsu_hotkey->fifo); @@ -1005,8 +964,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) return 0; } -static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, - void *data) +static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) { struct input_dev *input; int keycode, keycode_r; @@ -1089,8 +1047,6 @@ static void acpi_fujitsu_hotkey_notify(acpi_handle handle, u32 event, input_sync(input); break; } - - return; } /* Initialization */ @@ -1107,6 +1063,7 @@ static struct acpi_driver acpi_fujitsu_driver = { .ops = { .add = acpi_fujitsu_add, .remove = acpi_fujitsu_remove, + .notify = acpi_fujitsu_notify, }, }; @@ -1122,6 +1079,7 @@ static struct acpi_driver acpi_fujitsu_hotkey_driver = { .ops = { .add = acpi_fujitsu_hotkey_add, .remove = acpi_fujitsu_hotkey_remove, + .notify = acpi_fujitsu_hotkey_notify, }, }; diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index a5ce4bc202e..fe7cf0188ac 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -176,6 +176,7 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0, static int acpi_pcc_hotkey_add(struct acpi_device *device); static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type); static int acpi_pcc_hotkey_resume(struct acpi_device *device); +static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id pcc_device_ids[] = { { "MAT0012", 0}, @@ -194,6 +195,7 @@ static struct acpi_driver acpi_pcc_driver = { .add = acpi_pcc_hotkey_add, .remove = acpi_pcc_hotkey_remove, .resume = acpi_pcc_hotkey_resume, + .notify = acpi_pcc_hotkey_notify, }, }; @@ -271,7 +273,7 @@ static int acpi_pcc_retrieve_biosdata(struct pcc_acpi *pcc, u32 *sinf) union acpi_object *hkey = NULL; int i; - status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, 0, + status = acpi_evaluate_object(pcc->handle, METHOD_HKEY_SINF, NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, @@ -527,9 +529,9 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc) return; } -static void acpi_pcc_hotkey_notify(acpi_handle handle, u32 event, void *data) +static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event) { - struct pcc_acpi *pcc = (struct pcc_acpi *) data; + struct pcc_acpi *pcc = acpi_driver_data(device); switch (event) { case HKEY_NOTIFY: @@ -599,7 +601,6 @@ static int acpi_pcc_hotkey_resume(struct acpi_device *device) static int acpi_pcc_hotkey_add(struct acpi_device *device) { - acpi_status status; struct pcc_acpi *pcc; int num_sifr, result; @@ -640,22 +641,11 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) goto out_sinf; } - /* initialize hotkey input device */ - status = acpi_install_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, - acpi_pcc_hotkey_notify, pcc); - - if (ACPI_FAILURE(status)) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error installing notify handler\n")); - result = -ENODEV; - goto out_input; - } - /* initialize backlight */ pcc->backlight = backlight_device_register("panasonic", NULL, pcc, &pcc_backlight_ops); if (IS_ERR(pcc->backlight)) - goto out_notify; + goto out_input; if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) { ACPI_DEBUG_PRINT((ACPI_DB_ERROR, @@ -680,9 +670,6 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) out_backlight: backlight_device_unregister(pcc->backlight); -out_notify: - acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, - acpi_pcc_hotkey_notify); out_input: input_unregister_device(pcc->input_dev); /* no need to input_free_device() since core input API refcount and @@ -723,9 +710,6 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type) backlight_device_unregister(pcc->backlight); - acpi_remove_notify_handler(pcc->handle, ACPI_DEVICE_NOTIFY, - acpi_pcc_hotkey_notify); - input_unregister_device(pcc->input_dev); /* no need to input_free_device() since core input API refcount and * free()s the device */ diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index a90ec5cb2f2..d3c92d777bd 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -914,7 +914,7 @@ static struct sony_nc_event sony_127_events[] = { /* * ACPI callbacks */ -static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) +static void sony_nc_notify(struct acpi_device *device, u32 event) { u32 ev = event; @@ -933,7 +933,7 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) struct sony_nc_event *key_event; if (sony_call_snc_handle(key_handle, 0x200, &result)) { - dprintk("sony_acpi_notify, unable to decode" + dprintk("sony_nc_notify, unable to decode" " event 0x%.2x 0x%.2x\n", key_handle, ev); /* restore the original event */ @@ -968,7 +968,7 @@ static void sony_acpi_notify(acpi_handle handle, u32 event, void *data) } else sony_laptop_report_input_event(ev); - dprintk("sony_acpi_notify, event: 0x%.2x\n", ev); + dprintk("sony_nc_notify, event: 0x%.2x\n", ev); acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); } @@ -1276,15 +1276,6 @@ static int sony_nc_add(struct acpi_device *device) goto outwalk; } - status = acpi_install_notify_handler(sony_nc_acpi_handle, - ACPI_DEVICE_NOTIFY, - sony_acpi_notify, NULL); - if (ACPI_FAILURE(status)) { - printk(KERN_WARNING DRV_PFX "unable to install notify handler (%u)\n", status); - result = -ENODEV; - goto outinput; - } - if (acpi_video_backlight_support()) { printk(KERN_INFO DRV_PFX "brightness ignored, must be " "controlled by ACPI video driver\n"); @@ -1362,13 +1353,6 @@ static int sony_nc_add(struct acpi_device *device) if (sony_backlight_device) backlight_device_unregister(sony_backlight_device); - status = acpi_remove_notify_handler(sony_nc_acpi_handle, - ACPI_DEVICE_NOTIFY, - sony_acpi_notify); - if (ACPI_FAILURE(status)) - printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n"); - - outinput: sony_laptop_remove_input(); outwalk: @@ -1378,7 +1362,6 @@ static int sony_nc_add(struct acpi_device *device) static int sony_nc_remove(struct acpi_device *device, int type) { - acpi_status status; struct sony_nc_value *item; if (sony_backlight_device) @@ -1386,12 +1369,6 @@ static int sony_nc_remove(struct acpi_device *device, int type) sony_nc_acpi_device = NULL; - status = acpi_remove_notify_handler(sony_nc_acpi_handle, - ACPI_DEVICE_NOTIFY, - sony_acpi_notify); - if (ACPI_FAILURE(status)) - printk(KERN_WARNING DRV_PFX "unable to remove notify handler\n"); - for (item = sony_nc_values; item->name; ++item) { device_remove_file(&sony_pf_device->dev, &item->devattr); } @@ -1425,6 +1402,7 @@ static struct acpi_driver sony_nc_driver = { .add = sony_nc_add, .remove = sony_nc_remove, .resume = sony_nc_resume, + .notify = sony_nc_notify, }, }; diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 2f269e117b8..043b208d971 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -81,6 +81,7 @@ static struct wmi_block wmi_blocks; static int acpi_wmi_remove(struct acpi_device *device, int type); static int acpi_wmi_add(struct acpi_device *device); +static void acpi_wmi_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id wmi_device_ids[] = { {"PNP0C14", 0}, @@ -96,6 +97,7 @@ static struct acpi_driver acpi_wmi_driver = { .ops = { .add = acpi_wmi_add, .remove = acpi_wmi_remove, + .notify = acpi_wmi_notify, }, }; @@ -643,12 +645,11 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address, } } -static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data) +static void acpi_wmi_notify(struct acpi_device *device, u32 event) { struct guid_block *block; struct wmi_block *wblock; struct list_head *p; - struct acpi_device *device = data; list_for_each(p, &wmi_blocks.list) { wblock = list_entry(p, struct wmi_block, list); @@ -669,9 +670,6 @@ static void acpi_wmi_notify(acpi_handle handle, u32 event, void *data) static int acpi_wmi_remove(struct acpi_device *device, int type) { - acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_wmi_notify); - acpi_remove_address_space_handler(device->handle, ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); @@ -683,13 +681,6 @@ static int __init acpi_wmi_add(struct acpi_device *device) acpi_status status; int result = 0; - status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY, - acpi_wmi_notify, device); - if (ACPI_FAILURE(status)) { - printk(KERN_ERR PREFIX "Error installing notify handler\n"); - return -ENODEV; - } - status = acpi_install_address_space_handler(device->handle, ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler, diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c index 41aec2acbb9..e8b278f7178 100644 --- a/drivers/power/pcf50633-charger.c +++ b/drivers/power/pcf50633-charger.c @@ -36,6 +36,8 @@ struct pcf50633_mbc { struct power_supply usb; struct power_supply adapter; + + struct delayed_work charging_restart_work; }; int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) @@ -43,6 +45,8 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); int ret = 0; u8 bits; + int charging_start = 1; + u8 mbcs2, chgmod; if (ma >= 1000) bits = PCF50633_MBCC7_USB_1000mA; @@ -50,8 +54,10 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) bits = PCF50633_MBCC7_USB_500mA; else if (ma >= 100) bits = PCF50633_MBCC7_USB_100mA; - else + else { bits = PCF50633_MBCC7_USB_SUSPEND; + charging_start = 0; + } ret = pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC7, PCF50633_MBCC7_USB_MASK, bits); @@ -60,6 +66,22 @@ int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma) else dev_info(pcf->dev, "usb curlim to %d mA\n", ma); + /* Manual charging start */ + mbcs2 = pcf50633_reg_read(pcf, PCF50633_REG_MBCS2); + chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); + + /* If chgmod == BATFULL, setting chgena has no effect. + * We need to set resume instead. + */ + if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) + pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, + PCF50633_MBCC1_CHGENA, PCF50633_MBCC1_CHGENA); + else + pcf50633_reg_set_bit_mask(pcf, PCF50633_REG_MBCC1, + PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); + + mbc->usb_active = charging_start; + power_supply_changed(&mbc->usb); return ret; @@ -84,21 +106,6 @@ int pcf50633_mbc_get_status(struct pcf50633 *pcf) } EXPORT_SYMBOL_GPL(pcf50633_mbc_get_status); -void pcf50633_mbc_set_status(struct pcf50633 *pcf, int what, int status) -{ - struct pcf50633_mbc *mbc = platform_get_drvdata(pcf->mbc_pdev); - - if (what & PCF50633_MBC_USB_ONLINE) - mbc->usb_online = !!status; - if (what & PCF50633_MBC_USB_ACTIVE) - mbc->usb_active = !!status; - if (what & PCF50633_MBC_ADAPTER_ONLINE) - mbc->adapter_online = !!status; - if (what & PCF50633_MBC_ADAPTER_ACTIVE) - mbc->adapter_active = !!status; -} -EXPORT_SYMBOL_GPL(pcf50633_mbc_set_status); - static ssize_t show_chgmode(struct device *dev, struct device_attribute *attr, char *buf) { @@ -160,10 +167,44 @@ static struct attribute_group mbc_attr_group = { .attrs = pcf50633_mbc_sysfs_entries, }; +/* MBC state machine switches into charging mode when the battery voltage + * falls below 96% of a battery float voltage. But the voltage drop in Li-ion + * batteries is marginal(1~2 %) till about 80% of its capacity - which means, + * after a BATFULL, charging won't be restarted until 80%. + * + * This work_struct function restarts charging at regular intervals to make + * sure we don't discharge too much + */ + +static void pcf50633_mbc_charging_restart(struct work_struct *work) +{ + struct pcf50633_mbc *mbc; + u8 mbcs2, chgmod; + + mbc = container_of(work, struct pcf50633_mbc, + charging_restart_work.work); + + mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2); + chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK); + + if (chgmod != PCF50633_MBCS2_MBC_BAT_FULL) + return; + + /* Restart charging */ + pcf50633_reg_set_bit_mask(mbc->pcf, PCF50633_REG_MBCC1, + PCF50633_MBCC1_RESUME, PCF50633_MBCC1_RESUME); + mbc->usb_active = 1; + power_supply_changed(&mbc->usb); + + dev_info(mbc->pcf->dev, "Charging restarted\n"); +} + static void pcf50633_mbc_irq_handler(int irq, void *data) { struct pcf50633_mbc *mbc = data; + int chg_restart_interval = + mbc->pcf->pdata->charging_restart_interval; /* USB */ if (irq == PCF50633_IRQ_USBINS) { @@ -172,6 +213,7 @@ pcf50633_mbc_irq_handler(int irq, void *data) mbc->usb_online = 0; mbc->usb_active = 0; pcf50633_mbc_usb_curlim_set(mbc->pcf, 0); + cancel_delayed_work_sync(&mbc->charging_restart_work); } /* Adapter */ @@ -186,7 +228,14 @@ pcf50633_mbc_irq_handler(int irq, void *data) if (irq == PCF50633_IRQ_BATFULL) { mbc->usb_active = 0; mbc->adapter_active = 0; - } + + if (chg_restart_interval > 0) + schedule_delayed_work(&mbc->charging_restart_work, + chg_restart_interval); + } else if (irq == PCF50633_IRQ_USBLIMON) + mbc->usb_active = 0; + else if (irq == PCF50633_IRQ_USBLIMOFF) + mbc->usb_active = 1; power_supply_changed(&mbc->usb); power_supply_changed(&mbc->adapter); @@ -303,6 +352,9 @@ static int __devinit pcf50633_mbc_probe(struct platform_device *pdev) return ret; } + INIT_DELAYED_WORK(&mbc->charging_restart_work, + pcf50633_mbc_charging_restart); + ret = sysfs_create_group(&pdev->dev.kobj, &mbc_attr_group); if (ret) dev_err(mbc->pcf->dev, "failed to create sysfs entries\n"); @@ -328,6 +380,8 @@ static int __devexit pcf50633_mbc_remove(struct platform_device *pdev) power_supply_unregister(&mbc->usb); power_supply_unregister(&mbc->adapter); + cancel_delayed_work_sync(&mbc->charging_restart_work); + kfree(mbc); return 0; diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c index b56a704409d..a232de6a570 100644 --- a/drivers/power/pda_power.c +++ b/drivers/power/pda_power.c @@ -12,11 +12,14 @@ #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/err.h> #include <linux/interrupt.h> #include <linux/power_supply.h> #include <linux/pda_power.h> +#include <linux/regulator/consumer.h> #include <linux/timer.h> #include <linux/jiffies.h> +#include <linux/usb/otg.h> static inline unsigned int get_irq_flags(struct resource *res) { @@ -35,6 +38,11 @@ static struct timer_list supply_timer; static struct timer_list polling_timer; static int polling; +#ifdef CONFIG_USB_OTG_UTILS +static struct otg_transceiver *transceiver; +#endif +static struct regulator *ac_draw; + enum { PDA_PSY_OFFLINE = 0, PDA_PSY_ONLINE = 1, @@ -104,18 +112,35 @@ static void update_status(void) static void update_charger(void) { - if (!pdata->set_charge) - return; - - if (new_ac_status > 0) { - dev_dbg(dev, "charger on (AC)\n"); - pdata->set_charge(PDA_POWER_CHARGE_AC); - } else if (new_usb_status > 0) { - dev_dbg(dev, "charger on (USB)\n"); - pdata->set_charge(PDA_POWER_CHARGE_USB); - } else { - dev_dbg(dev, "charger off\n"); - pdata->set_charge(0); + static int regulator_enabled; + int max_uA = pdata->ac_max_uA; + + if (pdata->set_charge) { + if (new_ac_status > 0) { + dev_dbg(dev, "charger on (AC)\n"); + pdata->set_charge(PDA_POWER_CHARGE_AC); + } else if (new_usb_status > 0) { + dev_dbg(dev, "charger on (USB)\n"); + pdata->set_charge(PDA_POWER_CHARGE_USB); + } else { + dev_dbg(dev, "charger off\n"); + pdata->set_charge(0); + } + } else if (ac_draw) { + if (new_ac_status > 0) { + regulator_set_current_limit(ac_draw, max_uA, max_uA); + if (!regulator_enabled) { + dev_dbg(dev, "charger on (AC)\n"); + regulator_enable(ac_draw); + regulator_enabled = 1; + } + } else { + if (regulator_enabled) { + dev_dbg(dev, "charger off\n"); + regulator_disable(ac_draw); + regulator_enabled = 0; + } + } } } @@ -194,6 +219,13 @@ static void polling_timer_func(unsigned long unused) jiffies + msecs_to_jiffies(pdata->polling_interval)); } +#ifdef CONFIG_USB_OTG_UTILS +static int otg_is_usb_online(void) +{ + return (transceiver->state == OTG_STATE_B_PERIPHERAL); +} +#endif + static int pda_power_probe(struct platform_device *pdev) { int ret = 0; @@ -227,6 +259,9 @@ static int pda_power_probe(struct platform_device *pdev) if (!pdata->polling_interval) pdata->polling_interval = 2000; + if (!pdata->ac_max_uA) + pdata->ac_max_uA = 500000; + setup_timer(&charger_timer, charger_timer_func, 0); setup_timer(&supply_timer, supply_timer_func, 0); @@ -240,6 +275,13 @@ static int pda_power_probe(struct platform_device *pdev) pda_psy_usb.num_supplicants = pdata->num_supplicants; } + ac_draw = regulator_get(dev, "ac_draw"); + if (IS_ERR(ac_draw)) { + dev_dbg(dev, "couldn't get ac_draw regulator\n"); + ac_draw = NULL; + ret = PTR_ERR(ac_draw); + } + if (pdata->is_ac_online) { ret = power_supply_register(&pdev->dev, &pda_psy_ac); if (ret) { @@ -261,6 +303,13 @@ static int pda_power_probe(struct platform_device *pdev) } } +#ifdef CONFIG_USB_OTG_UTILS + transceiver = otg_get_transceiver(); + if (transceiver && !pdata->is_usb_online) { + pdata->is_usb_online = otg_is_usb_online; + } +#endif + if (pdata->is_usb_online) { ret = power_supply_register(&pdev->dev, &pda_psy_usb); if (ret) { @@ -300,10 +349,18 @@ usb_irq_failed: usb_supply_failed: if (pdata->is_ac_online && ac_irq) free_irq(ac_irq->start, &pda_psy_ac); +#ifdef CONFIG_USB_OTG_UTILS + if (transceiver) + otg_put_transceiver(transceiver); +#endif ac_irq_failed: if (pdata->is_ac_online) power_supply_unregister(&pda_psy_ac); ac_supply_failed: + if (ac_draw) { + regulator_put(ac_draw); + ac_draw = NULL; + } if (pdata->exit) pdata->exit(dev); init_failed: @@ -327,6 +384,14 @@ static int pda_power_remove(struct platform_device *pdev) power_supply_unregister(&pda_psy_usb); if (pdata->is_ac_online) power_supply_unregister(&pda_psy_ac); +#ifdef CONFIG_USB_OTG_UTILS + if (transceiver) + otg_put_transceiver(transceiver); +#endif + if (ac_draw) { + regulator_put(ac_draw); + ac_draw = NULL; + } if (pdata->exit) pdata->exit(dev); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 0570794ccf1..d1815272c43 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -20,6 +20,7 @@ #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/hdreg.h> +#include <linux/async.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> @@ -480,8 +481,10 @@ static void dasd_change_state(struct dasd_device *device) if (rc && rc != -EAGAIN) device->target = device->state; - if (device->state == device->target) + if (device->state == device->target) { wake_up(&dasd_init_waitq); + dasd_put_device(device); + } /* let user-space know that the device status changed */ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); @@ -513,12 +516,15 @@ void dasd_kick_device(struct dasd_device *device) */ void dasd_set_target_state(struct dasd_device *device, int target) { + dasd_get_device(device); /* If we are in probeonly mode stop at DASD_STATE_READY. */ if (dasd_probeonly && target > DASD_STATE_READY) target = DASD_STATE_READY; if (device->target != target) { - if (device->state == target) + if (device->state == target) { wake_up(&dasd_init_waitq); + dasd_put_device(device); + } device->target = target; } if (device->state != device->target) @@ -2148,6 +2154,22 @@ dasd_exit(void) * SECTION: common functions for ccw_driver use */ +static void dasd_generic_auto_online(void *data, async_cookie_t cookie) +{ + struct ccw_device *cdev = data; + int ret; + + ret = ccw_device_set_online(cdev); + if (ret) + pr_warning("%s: Setting the DASD online failed with rc=%d\n", + dev_name(&cdev->dev), ret); + else { + struct dasd_device *device = dasd_device_from_cdev(cdev); + wait_event(dasd_init_waitq, _wait_for_device(device)); + dasd_put_device(device); + } +} + /* * Initial attempt at a probe function. this can be simplified once * the other detection code is gone. @@ -2180,10 +2202,7 @@ int dasd_generic_probe(struct ccw_device *cdev, */ if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) - ret = ccw_device_set_online(cdev); - if (ret) - pr_warning("%s: Setting the DASD online failed with rc=%d\n", - dev_name(&cdev->dev), ret); + async_schedule(dasd_generic_auto_online, cdev); return 0; } @@ -2290,13 +2309,7 @@ int dasd_generic_set_online(struct ccw_device *cdev, } else pr_debug("dasd_generic device %s found\n", dev_name(&cdev->dev)); - - /* FIXME: we have to wait for the root device but we don't want - * to wait for each single device but for all at once. */ - wait_event(dasd_init_waitq, _wait_for_device(device)); - dasd_put_device(device); - return rc; } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 21254793c60..cb52da033f0 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -2019,15 +2019,23 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( ccw++; recid += count; new_track = 0; + /* first idaw for a ccw may start anywhere */ + if (!idaw_dst) + idaw_dst = dst; } - /* If we start a new idaw, everything is fine and the - * start of the new idaw is the start of this segment. + /* If we start a new idaw, we must make sure that it + * starts on an IDA_BLOCK_SIZE boundary. * If we continue an idaw, we must make sure that the * current segment begins where the so far accumulated * idaw ends */ - if (!idaw_dst) - idaw_dst = dst; + if (!idaw_dst) { + if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { + dasd_sfree_request(cqr, startdev); + return ERR_PTR(-ERANGE); + } else + idaw_dst = dst; + } if ((idaw_dst + idaw_len) != dst) { dasd_sfree_request(cqr, startdev); return ERR_PTR(-ERANGE); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9e8a2914259..accd957454e 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -881,42 +881,6 @@ no_handler: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); } -static void qdio_call_shutdown(struct work_struct *work) -{ - struct ccw_device_private *priv; - struct ccw_device *cdev; - - priv = container_of(work, struct ccw_device_private, kick_work); - cdev = priv->cdev; - qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); - put_device(&cdev->dev); -} - -static void qdio_int_error(struct ccw_device *cdev) -{ - struct qdio_irq *irq_ptr = cdev->private->qdio_data; - - switch (irq_ptr->state) { - case QDIO_IRQ_STATE_INACTIVE: - case QDIO_IRQ_STATE_CLEANUP: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); - break; - case QDIO_IRQ_STATE_ESTABLISHED: - case QDIO_IRQ_STATE_ACTIVE: - qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); - if (get_device(&cdev->dev)) { - /* Can't call shutdown from interrupt context. */ - PREPARE_WORK(&cdev->private->kick_work, - qdio_call_shutdown); - queue_work(ccw_device_work, &cdev->private->kick_work); - } - break; - default: - WARN_ON(1); - } - wake_up(&cdev->private->wait_q); -} - static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, int dstat) { @@ -973,10 +937,8 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, switch (PTR_ERR(irb)) { case -EIO: DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); - return; - case -ETIMEDOUT: - DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no); - qdio_int_error(cdev); + qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); + wake_up(&cdev->private->wait_q); return; default: WARN_ON(1); @@ -1001,7 +963,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, case QDIO_IRQ_STATE_ACTIVE: if (cstat & SCHN_STAT_PCI) { qdio_int_handler_pci(irq_ptr); - /* no state change so no need to wake up wait_q */ return; } if ((cstat & ~SCHN_STAT_PCI) || dstat) { diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c index e6d1fc8c54f..a85ad05e854 100644 --- a/drivers/sbus/char/jsflash.c +++ b/drivers/sbus/char/jsflash.c @@ -383,18 +383,22 @@ static int jsf_ioctl_program(void __user *arg) return 0; } -static int jsf_ioctl(struct inode *inode, struct file *f, unsigned int cmd, - unsigned long arg) +static long jsf_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { + lock_kernel(); int error = -ENOTTY; void __user *argp = (void __user *)arg; - if (!capable(CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) { + unlock_kernel(); return -EPERM; + } switch (cmd) { case JSFLASH_IDENT: - if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) + if (copy_to_user(argp, &jsf0.id, JSFIDSZ)) { + unlock_kernel(); return -EFAULT; + } break; case JSFLASH_ERASE: error = jsf_ioctl_erase(arg); @@ -404,6 +408,7 @@ static int jsf_ioctl(struct inode *inode, struct file *f, unsigned int cmd, break; } + unlock_kernel(); return error; } @@ -439,7 +444,7 @@ static const struct file_operations jsf_fops = { .llseek = jsf_lseek, .read = jsf_read, .write = jsf_write, - .ioctl = jsf_ioctl, + .unlocked_ioctl = jsf_ioctl, .mmap = jsf_mmap, .open = jsf_open, .release = jsf_release, diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 27993c37775..2c56fd56ec6 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c @@ -197,9 +197,8 @@ static struct uctrl_driver { static void uctrl_get_event_status(struct uctrl_driver *); static void uctrl_get_external_status(struct uctrl_driver *); -static int -uctrl_ioctl(struct inode *inode, struct file *file, - unsigned int cmd, unsigned long arg) +static long +uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { default: @@ -226,7 +225,7 @@ static irqreturn_t uctrl_interrupt(int irq, void *dev_id) static const struct file_operations uctrl_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .ioctl = uctrl_ioctl, + .unlocked_ioctl = uctrl_ioctl, .open = uctrl_open, }; diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index fdb14ec4fd4..8b7983aba8f 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -2234,10 +2234,10 @@ static int twa_resume(struct pci_dev *pdev) pci_set_master(pdev); pci_try_set_mwi(pdev); - if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) - || pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) - if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) - || pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume"); retval = -ENODEV; goto out_disable_device; diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 280261c451d..2a889853a10 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -1378,7 +1378,7 @@ int aac_get_adapter_info(struct aac_dev* dev) if (dev->nondasd_support && !dev->in_reset) printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); - if (dma_get_required_mask(&dev->pdev->dev) > DMA_32BIT_MASK) + if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32)) dev->needs_dac = 1; dev->dac_support = 0; if ((sizeof(dma_addr_t) > 4) && dev->needs_dac && diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index b1bd3fc7bae..36fd2e75da1 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -1394,7 +1394,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, */ cmd->sense_buffer[8] = 0; /* Information */ cmd->sense_buffer[9] = 0xa; /* Add. length */ - do_div(bghm, cmd->device->sector_size); + bghm /= cmd->device->sector_size; failing_sector = scsi_get_lba(cmd); failing_sector += bghm; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 52427a8324f..a91f5143cea 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -855,9 +855,9 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) if (sizeof(dma_addr_t) > 4) { const uint64_t required_mask = dma_get_required_mask(&pdev->dev); - if ((required_mask > DMA_32BIT_MASK) && !pci_set_dma_mask(pdev, - DMA_64BIT_MASK) && !pci_set_consistent_dma_mask(pdev, - DMA_64BIT_MASK)) { + if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev, + DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(64))) { ioc->base_add_sg_single = &_base_add_sg_single_64; ioc->sge_size = sizeof(Mpi2SGESimple64_t); desc = "64"; @@ -865,8 +865,8 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) } } - if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK) - && !pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) { + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { ioc->base_add_sg_single = &_base_add_sg_single_32; ioc->sge_size = sizeof(Mpi2SGESimple32_t); desc = "32"; diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 687dcf2d015..5defe5ea5ed 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -1663,7 +1663,7 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha) /* Load RISC code. */ risc_address = ha->fwstart; - fw_data = (const __le16 *)&fw->data[4]; + fw_data = (const __le16 *)&fw->data[6]; risc_code_size = (fw->size - 6) / 2; for (i = 0; i < risc_code_size; i++) { @@ -1722,7 +1722,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) /* Load RISC code. */ risc_address = ha->fwstart; - fw_data = (const __le16 *)&fw->data[4]; + fw_data = (const __le16 *)&fw->data[6]; risc_code_size = (fw->size - 6) / 2; dprintk(1, "%s: DMA RISC code (%i) words\n", diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index e1850904ff7..fbc83bebdd8 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -38,9 +38,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) { }; #endif -/* scsi_scan.c */ -int scsi_complete_async_scans(void); - /* scsi_devinfo.c */ extern int scsi_get_device_flags(struct scsi_device *sdev, const unsigned char *vendor, diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c index 8a636103083..2f21af21269 100644 --- a/drivers/scsi/scsi_wait_scan.c +++ b/drivers/scsi/scsi_wait_scan.c @@ -11,7 +11,7 @@ */ #include <linux/module.h> -#include "scsi_priv.h" +#include <scsi/scsi_scan.h> static int __init wait_scan_init(void) { diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 0328fd4006e..343e3a35b6a 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -854,7 +854,7 @@ config SERIAL_IMX_CONSOLE config SERIAL_UARTLITE tristate "Xilinx uartlite serial port support" - depends on PPC32 + depends on PPC32 || MICROBLAZE select SERIAL_CORE help Say Y here if you want to use the Xilinx uartlite serial controller. @@ -1340,7 +1340,7 @@ config SERIAL_NETX_CONSOLE config SERIAL_OF_PLATFORM tristate "Serial port on Open Firmware platform bus" - depends on PPC_OF + depends on PPC_OF || MICROBLAZE depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL help If you have a PowerPC based system that has serial ports diff --git a/drivers/serial/max3100.c b/drivers/serial/max3100.c new file mode 100644 index 00000000000..9fd33e5622b --- /dev/null +++ b/drivers/serial/max3100.c @@ -0,0 +1,927 @@ +/* + * + * Copyright (C) 2008 Christian Pellegrin <chripell@evolware.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * + * Notes: the MAX3100 doesn't provide an interrupt on CTS so we have + * to use polling for flow control. TX empty IRQ is unusable, since + * writing conf clears FIFO buffer and we cannot have this interrupt + * always asking us for attention. + * + * Example platform data: + + static struct plat_max3100 max3100_plat_data = { + .loopback = 0, + .crystal = 0, + .poll_time = 100, + }; + + static struct spi_board_info spi_board_info[] = { + { + .modalias = "max3100", + .platform_data = &max3100_plat_data, + .irq = IRQ_EINT12, + .max_speed_hz = 5*1000*1000, + .chip_select = 0, + }, + }; + + * The initial minor number is 209 in the low-density serial port: + * mknod /dev/ttyMAX0 c 204 209 + */ + +#define MAX3100_MAJOR 204 +#define MAX3100_MINOR 209 +/* 4 MAX3100s should be enough for everyone */ +#define MAX_MAX3100 4 + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/serial_core.h> +#include <linux/serial.h> +#include <linux/spi/spi.h> +#include <linux/freezer.h> + +#include <linux/serial_max3100.h> + +#define MAX3100_C (1<<14) +#define MAX3100_D (0<<14) +#define MAX3100_W (1<<15) +#define MAX3100_RX (0<<15) + +#define MAX3100_WC (MAX3100_W | MAX3100_C) +#define MAX3100_RC (MAX3100_RX | MAX3100_C) +#define MAX3100_WD (MAX3100_W | MAX3100_D) +#define MAX3100_RD (MAX3100_RX | MAX3100_D) +#define MAX3100_CMD (3 << 14) + +#define MAX3100_T (1<<14) +#define MAX3100_R (1<<15) + +#define MAX3100_FEN (1<<13) +#define MAX3100_SHDN (1<<12) +#define MAX3100_TM (1<<11) +#define MAX3100_RM (1<<10) +#define MAX3100_PM (1<<9) +#define MAX3100_RAM (1<<8) +#define MAX3100_IR (1<<7) +#define MAX3100_ST (1<<6) +#define MAX3100_PE (1<<5) +#define MAX3100_L (1<<4) +#define MAX3100_BAUD (0xf) + +#define MAX3100_TE (1<<10) +#define MAX3100_RAFE (1<<10) +#define MAX3100_RTS (1<<9) +#define MAX3100_CTS (1<<9) +#define MAX3100_PT (1<<8) +#define MAX3100_DATA (0xff) + +#define MAX3100_RT (MAX3100_R | MAX3100_T) +#define MAX3100_RTC (MAX3100_RT | MAX3100_CTS | MAX3100_RAFE) + +/* the following simulate a status reg for ignore_status_mask */ +#define MAX3100_STATUS_PE 1 +#define MAX3100_STATUS_FE 2 +#define MAX3100_STATUS_OE 4 + +struct max3100_port { + struct uart_port port; + struct spi_device *spi; + + int cts; /* last CTS received for flow ctrl */ + int tx_empty; /* last TX empty bit */ + + spinlock_t conf_lock; /* shared data */ + int conf_commit; /* need to make changes */ + int conf; /* configuration for the MAX31000 + * (bits 0-7, bits 8-11 are irqs) */ + int rts_commit; /* need to change rts */ + int rts; /* rts status */ + int baud; /* current baud rate */ + + int parity; /* keeps track if we should send parity */ +#define MAX3100_PARITY_ON 1 +#define MAX3100_PARITY_ODD 2 +#define MAX3100_7BIT 4 + int rx_enabled; /* if we should rx chars */ + + int irq; /* irq assigned to the max3100 */ + + int minor; /* minor number */ + int crystal; /* 1 if 3.6864Mhz crystal 0 for 1.8432 */ + int loopback; /* 1 if we are in loopback mode */ + + /* for handling irqs: need workqueue since we do spi_sync */ + struct workqueue_struct *workqueue; + struct work_struct work; + /* set to 1 to make the workhandler exit as soon as possible */ + int force_end_work; + /* need to know we are suspending to avoid deadlock on workqueue */ + int suspending; + + /* hook for suspending MAX3100 via dedicated pin */ + void (*max3100_hw_suspend) (int suspend); + + /* poll time (in ms) for ctrl lines */ + int poll_time; + /* and its timer */ + struct timer_list timer; +}; + +static struct max3100_port *max3100s[MAX_MAX3100]; /* the chips */ +static DEFINE_MUTEX(max3100s_lock); /* race on probe */ + +static int max3100_do_parity(struct max3100_port *s, u16 c) +{ + int parity; + + if (s->parity & MAX3100_PARITY_ODD) + parity = 1; + else + parity = 0; + + if (s->parity & MAX3100_7BIT) + c &= 0x7f; + else + c &= 0xff; + + parity = parity ^ (hweight8(c) & 1); + return parity; +} + +static int max3100_check_parity(struct max3100_port *s, u16 c) +{ + return max3100_do_parity(s, c) == ((c >> 8) & 1); +} + +static void max3100_calc_parity(struct max3100_port *s, u16 *c) +{ + if (s->parity & MAX3100_7BIT) + *c &= 0x7f; + else + *c &= 0xff; + + if (s->parity & MAX3100_PARITY_ON) + *c |= max3100_do_parity(s, *c) << 8; +} + +static void max3100_work(struct work_struct *w); + +static void max3100_dowork(struct max3100_port *s) +{ + if (!s->force_end_work && !work_pending(&s->work) && + !freezing(current) && !s->suspending) + queue_work(s->workqueue, &s->work); +} + +static void max3100_timeout(unsigned long data) +{ + struct max3100_port *s = (struct max3100_port *)data; + + if (s->port.info) { + max3100_dowork(s); + mod_timer(&s->timer, jiffies + s->poll_time); + } +} + +static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) +{ + struct spi_message message; + u16 etx, erx; + int status; + struct spi_transfer tran = { + .tx_buf = &etx, + .rx_buf = &erx, + .len = 2, + }; + + etx = cpu_to_be16(tx); + spi_message_init(&message); + spi_message_add_tail(&tran, &message); + status = spi_sync(s->spi, &message); + if (status) { + dev_warn(&s->spi->dev, "error while calling spi_sync\n"); + return -EIO; + } + *rx = be16_to_cpu(erx); + s->tx_empty = (*rx & MAX3100_T) > 0; + dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx); + return 0; +} + +static int max3100_handlerx(struct max3100_port *s, u16 rx) +{ + unsigned int ch, flg, status = 0; + int ret = 0, cts; + + if (rx & MAX3100_R && s->rx_enabled) { + dev_dbg(&s->spi->dev, "%s\n", __func__); + ch = rx & (s->parity & MAX3100_7BIT ? 0x7f : 0xff); + if (rx & MAX3100_RAFE) { + s->port.icount.frame++; + flg = TTY_FRAME; + status |= MAX3100_STATUS_FE; + } else { + if (s->parity & MAX3100_PARITY_ON) { + if (max3100_check_parity(s, rx)) { + s->port.icount.rx++; + flg = TTY_NORMAL; + } else { + s->port.icount.parity++; + flg = TTY_PARITY; + status |= MAX3100_STATUS_PE; + } + } else { + s->port.icount.rx++; + flg = TTY_NORMAL; + } + } + uart_insert_char(&s->port, status, MAX3100_STATUS_OE, ch, flg); + ret = 1; + } + + cts = (rx & MAX3100_CTS) > 0; + if (s->cts != cts) { + s->cts = cts; + uart_handle_cts_change(&s->port, cts ? TIOCM_CTS : 0); + } + + return ret; +} + +static void max3100_work(struct work_struct *w) +{ + struct max3100_port *s = container_of(w, struct max3100_port, work); + int rxchars; + u16 tx, rx; + int conf, cconf, rts, crts; + struct circ_buf *xmit = &s->port.info->xmit; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + rxchars = 0; + do { + spin_lock(&s->conf_lock); + conf = s->conf; + cconf = s->conf_commit; + s->conf_commit = 0; + rts = s->rts; + crts = s->rts_commit; + s->rts_commit = 0; + spin_unlock(&s->conf_lock); + if (cconf) + max3100_sr(s, MAX3100_WC | conf, &rx); + if (crts) { + max3100_sr(s, MAX3100_WD | MAX3100_TE | + (s->rts ? MAX3100_RTS : 0), &rx); + rxchars += max3100_handlerx(s, rx); + } + + max3100_sr(s, MAX3100_RD, &rx); + rxchars += max3100_handlerx(s, rx); + + if (rx & MAX3100_T) { + tx = 0xffff; + if (s->port.x_char) { + tx = s->port.x_char; + s->port.icount.tx++; + s->port.x_char = 0; + } else if (!uart_circ_empty(xmit) && + !uart_tx_stopped(&s->port)) { + tx = xmit->buf[xmit->tail]; + xmit->tail = (xmit->tail + 1) & + (UART_XMIT_SIZE - 1); + s->port.icount.tx++; + } + if (tx != 0xffff) { + max3100_calc_parity(s, &tx); + tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0); + max3100_sr(s, tx, &rx); + rxchars += max3100_handlerx(s, rx); + } + } + + if (rxchars > 16 && s->port.info->port.tty != NULL) { + tty_flip_buffer_push(s->port.info->port.tty); + rxchars = 0; + } + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(&s->port); + + } while (!s->force_end_work && + !freezing(current) && + ((rx & MAX3100_R) || + (!uart_circ_empty(xmit) && + !uart_tx_stopped(&s->port)))); + + if (rxchars > 0 && s->port.info->port.tty != NULL) + tty_flip_buffer_push(s->port.info->port.tty); +} + +static irqreturn_t max3100_irq(int irqno, void *dev_id) +{ + struct max3100_port *s = dev_id; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + max3100_dowork(s); + return IRQ_HANDLED; +} + +static void max3100_enable_ms(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + if (s->poll_time > 0) + mod_timer(&s->timer, jiffies); + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static void max3100_start_tx(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + max3100_dowork(s); +} + +static void max3100_stop_rx(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + s->rx_enabled = 0; + spin_lock(&s->conf_lock); + s->conf &= ~MAX3100_RM; + s->conf_commit = 1; + spin_unlock(&s->conf_lock); + max3100_dowork(s); +} + +static unsigned int max3100_tx_empty(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + /* may not be truly up-to-date */ + max3100_dowork(s); + return s->tx_empty; +} + +static unsigned int max3100_get_mctrl(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + /* may not be truly up-to-date */ + max3100_dowork(s); + /* always assert DCD and DSR since these lines are not wired */ + return (s->cts ? TIOCM_CTS : 0) | TIOCM_DSR | TIOCM_CAR; +} + +static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + int rts; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + rts = (mctrl & TIOCM_RTS) > 0; + + spin_lock(&s->conf_lock); + if (s->rts != rts) { + s->rts = rts; + s->rts_commit = 1; + max3100_dowork(s); + } + spin_unlock(&s->conf_lock); +} + +static void +max3100_set_termios(struct uart_port *port, struct ktermios *termios, + struct ktermios *old) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + int baud = 0; + unsigned cflag; + u32 param_new, param_mask, parity = 0; + struct tty_struct *tty = s->port.info->port.tty; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + if (!tty) + return; + + cflag = termios->c_cflag; + param_new = 0; + param_mask = 0; + + baud = tty_get_baud_rate(tty); + param_new = s->conf & MAX3100_BAUD; + switch (baud) { + case 300: + if (s->crystal) + baud = s->baud; + else + param_new = 15; + break; + case 600: + param_new = 14 + s->crystal; + break; + case 1200: + param_new = 13 + s->crystal; + break; + case 2400: + param_new = 12 + s->crystal; + break; + case 4800: + param_new = 11 + s->crystal; + break; + case 9600: + param_new = 10 + s->crystal; + break; + case 19200: + param_new = 9 + s->crystal; + break; + case 38400: + param_new = 8 + s->crystal; + break; + case 57600: + param_new = 1 + s->crystal; + break; + case 115200: + param_new = 0 + s->crystal; + break; + case 230400: + if (s->crystal) + param_new = 0; + else + baud = s->baud; + break; + default: + baud = s->baud; + } + tty_encode_baud_rate(tty, baud, baud); + s->baud = baud; + param_mask |= MAX3100_BAUD; + + if ((cflag & CSIZE) == CS8) { + param_new &= ~MAX3100_L; + parity &= ~MAX3100_7BIT; + } else { + param_new |= MAX3100_L; + parity |= MAX3100_7BIT; + cflag = (cflag & ~CSIZE) | CS7; + } + param_mask |= MAX3100_L; + + if (cflag & CSTOPB) + param_new |= MAX3100_ST; + else + param_new &= ~MAX3100_ST; + param_mask |= MAX3100_ST; + + if (cflag & PARENB) { + param_new |= MAX3100_PE; + parity |= MAX3100_PARITY_ON; + } else { + param_new &= ~MAX3100_PE; + parity &= ~MAX3100_PARITY_ON; + } + param_mask |= MAX3100_PE; + + if (cflag & PARODD) + parity |= MAX3100_PARITY_ODD; + else + parity &= ~MAX3100_PARITY_ODD; + + /* mask termios capabilities we don't support */ + cflag &= ~CMSPAR; + termios->c_cflag = cflag; + + s->port.ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + s->port.ignore_status_mask |= + MAX3100_STATUS_PE | MAX3100_STATUS_FE | + MAX3100_STATUS_OE; + + /* we are sending char from a workqueue so enable */ + s->port.info->port.tty->low_latency = 1; + + if (s->poll_time > 0) + del_timer_sync(&s->timer); + + uart_update_timeout(port, termios->c_cflag, baud); + + spin_lock(&s->conf_lock); + s->conf = (s->conf & ~param_mask) | (param_new & param_mask); + s->conf_commit = 1; + s->parity = parity; + spin_unlock(&s->conf_lock); + max3100_dowork(s); + + if (UART_ENABLE_MS(&s->port, termios->c_cflag)) + max3100_enable_ms(&s->port); +} + +static void max3100_shutdown(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (s->suspending) + return; + + s->force_end_work = 1; + + if (s->poll_time > 0) + del_timer_sync(&s->timer); + + if (s->workqueue) { + flush_workqueue(s->workqueue); + destroy_workqueue(s->workqueue); + s->workqueue = NULL; + } + if (s->irq) + free_irq(s->irq, s); + + /* set shutdown mode to save power */ + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(1); + else { + u16 tx, rx; + + tx = MAX3100_WC | MAX3100_SHDN; + max3100_sr(s, tx, &rx); + } +} + +static int max3100_startup(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + char b[12]; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + s->conf = MAX3100_RM; + s->baud = s->crystal ? 230400 : 115200; + s->rx_enabled = 1; + + if (s->suspending) + return 0; + + s->force_end_work = 0; + s->parity = 0; + s->rts = 0; + + sprintf(b, "max3100-%d", s->minor); + s->workqueue = create_freezeable_workqueue(b); + if (!s->workqueue) { + dev_warn(&s->spi->dev, "cannot create workqueue\n"); + return -EBUSY; + } + INIT_WORK(&s->work, max3100_work); + + if (request_irq(s->irq, max3100_irq, + IRQF_TRIGGER_FALLING, "max3100", s) < 0) { + dev_warn(&s->spi->dev, "cannot allocate irq %d\n", s->irq); + s->irq = 0; + destroy_workqueue(s->workqueue); + s->workqueue = NULL; + return -EBUSY; + } + + if (s->loopback) { + u16 tx, rx; + tx = 0x4001; + max3100_sr(s, tx, &rx); + } + + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(0); + s->conf_commit = 1; + max3100_dowork(s); + /* wait for clock to settle */ + msleep(50); + + max3100_enable_ms(&s->port); + + return 0; +} + +static const char *max3100_type(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + return s->port.type == PORT_MAX3100 ? "MAX3100" : NULL; +} + +static void max3100_release_port(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static void max3100_config_port(struct uart_port *port, int flags) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (flags & UART_CONFIG_TYPE) + s->port.type = PORT_MAX3100; +} + +static int max3100_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + int ret = -EINVAL; + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100) + ret = 0; + return ret; +} + +static void max3100_stop_tx(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static int max3100_request_port(struct uart_port *port) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + return 0; +} + +static void max3100_break_ctl(struct uart_port *port, int break_state) +{ + struct max3100_port *s = container_of(port, + struct max3100_port, + port); + + dev_dbg(&s->spi->dev, "%s\n", __func__); +} + +static struct uart_ops max3100_ops = { + .tx_empty = max3100_tx_empty, + .set_mctrl = max3100_set_mctrl, + .get_mctrl = max3100_get_mctrl, + .stop_tx = max3100_stop_tx, + .start_tx = max3100_start_tx, + .stop_rx = max3100_stop_rx, + .enable_ms = max3100_enable_ms, + .break_ctl = max3100_break_ctl, + .startup = max3100_startup, + .shutdown = max3100_shutdown, + .set_termios = max3100_set_termios, + .type = max3100_type, + .release_port = max3100_release_port, + .request_port = max3100_request_port, + .config_port = max3100_config_port, + .verify_port = max3100_verify_port, +}; + +static struct uart_driver max3100_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "ttyMAX", + .dev_name = "ttyMAX", + .major = MAX3100_MAJOR, + .minor = MAX3100_MINOR, + .nr = MAX_MAX3100, +}; +static int uart_driver_registered; + +static int __devinit max3100_probe(struct spi_device *spi) +{ + int i, retval; + struct plat_max3100 *pdata; + u16 tx, rx; + + mutex_lock(&max3100s_lock); + + if (!uart_driver_registered) { + uart_driver_registered = 1; + retval = uart_register_driver(&max3100_uart_driver); + if (retval) { + printk(KERN_ERR "Couldn't register max3100 uart driver\n"); + mutex_unlock(&max3100s_lock); + return retval; + } + } + + for (i = 0; i < MAX_MAX3100; i++) + if (!max3100s[i]) + break; + if (i == MAX_MAX3100) { + dev_warn(&spi->dev, "too many MAX3100 chips\n"); + mutex_unlock(&max3100s_lock); + return -ENOMEM; + } + + max3100s[i] = kzalloc(sizeof(struct max3100_port), GFP_KERNEL); + if (!max3100s[i]) { + dev_warn(&spi->dev, + "kmalloc for max3100 structure %d failed!\n", i); + mutex_unlock(&max3100s_lock); + return -ENOMEM; + } + max3100s[i]->spi = spi; + max3100s[i]->irq = spi->irq; + spin_lock_init(&max3100s[i]->conf_lock); + dev_set_drvdata(&spi->dev, max3100s[i]); + pdata = spi->dev.platform_data; + max3100s[i]->crystal = pdata->crystal; + max3100s[i]->loopback = pdata->loopback; + max3100s[i]->poll_time = pdata->poll_time * HZ / 1000; + if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0) + max3100s[i]->poll_time = 1; + max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend; + max3100s[i]->minor = i; + init_timer(&max3100s[i]->timer); + max3100s[i]->timer.function = max3100_timeout; + max3100s[i]->timer.data = (unsigned long) max3100s[i]; + + dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i); + max3100s[i]->port.irq = max3100s[i]->irq; + max3100s[i]->port.uartclk = max3100s[i]->crystal ? 3686400 : 1843200; + max3100s[i]->port.fifosize = 16; + max3100s[i]->port.ops = &max3100_ops; + max3100s[i]->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; + max3100s[i]->port.line = i; + max3100s[i]->port.type = PORT_MAX3100; + max3100s[i]->port.dev = &spi->dev; + retval = uart_add_one_port(&max3100_uart_driver, &max3100s[i]->port); + if (retval < 0) + dev_warn(&spi->dev, + "uart_add_one_port failed for line %d with error %d\n", + i, retval); + + /* set shutdown mode to save power. Will be woken-up on open */ + if (max3100s[i]->max3100_hw_suspend) + max3100s[i]->max3100_hw_suspend(1); + else { + tx = MAX3100_WC | MAX3100_SHDN; + max3100_sr(max3100s[i], tx, &rx); + } + mutex_unlock(&max3100s_lock); + return 0; +} + +static int __devexit max3100_remove(struct spi_device *spi) +{ + struct max3100_port *s = dev_get_drvdata(&spi->dev); + int i; + + mutex_lock(&max3100s_lock); + + /* find out the index for the chip we are removing */ + for (i = 0; i < MAX_MAX3100; i++) + if (max3100s[i] == s) + break; + + dev_dbg(&spi->dev, "%s: removing port %d\n", __func__, i); + uart_remove_one_port(&max3100_uart_driver, &max3100s[i]->port); + kfree(max3100s[i]); + max3100s[i] = NULL; + + /* check if this is the last chip we have */ + for (i = 0; i < MAX_MAX3100; i++) + if (max3100s[i]) { + mutex_unlock(&max3100s_lock); + return 0; + } + pr_debug("removing max3100 driver\n"); + uart_unregister_driver(&max3100_uart_driver); + + mutex_unlock(&max3100s_lock); + return 0; +} + +#ifdef CONFIG_PM + +static int max3100_suspend(struct spi_device *spi, pm_message_t state) +{ + struct max3100_port *s = dev_get_drvdata(&spi->dev); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + disable_irq(s->irq); + + s->suspending = 1; + uart_suspend_port(&max3100_uart_driver, &s->port); + + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(1); + else { + /* no HW suspend, so do SW one */ + u16 tx, rx; + + tx = MAX3100_WC | MAX3100_SHDN; + max3100_sr(s, tx, &rx); + } + return 0; +} + +static int max3100_resume(struct spi_device *spi) +{ + struct max3100_port *s = dev_get_drvdata(&spi->dev); + + dev_dbg(&s->spi->dev, "%s\n", __func__); + + if (s->max3100_hw_suspend) + s->max3100_hw_suspend(0); + uart_resume_port(&max3100_uart_driver, &s->port); + s->suspending = 0; + + enable_irq(s->irq); + + s->conf_commit = 1; + if (s->workqueue) + max3100_dowork(s); + + return 0; +} + +#else +#define max3100_suspend NULL +#define max3100_resume NULL +#endif + +static struct spi_driver max3100_driver = { + .driver = { + .name = "max3100", + .bus = &spi_bus_type, + .owner = THIS_MODULE, + }, + + .probe = max3100_probe, + .remove = __devexit_p(max3100_remove), + .suspend = max3100_suspend, + .resume = max3100_resume, +}; + +static int __init max3100_init(void) +{ + return spi_register_driver(&max3100_driver); +} +module_init(max3100_init); + +static void __exit max3100_exit(void) +{ + spi_unregister_driver(&max3100_driver); +} +module_exit(max3100_exit); + +MODULE_DESCRIPTION("MAX3100 driver"); +MODULE_AUTHOR("Christian Pellegrin <chripell@evolware.org>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index a4dc79b1d7a..47c6837850b 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -1178,7 +1178,7 @@ static struct uart_driver sunsu_reg = { .major = TTY_MAJOR, }; -static int __init sunsu_kbd_ms_init(struct uart_sunsu_port *up) +static int __devinit sunsu_kbd_ms_init(struct uart_sunsu_port *up) { int quot, baud; #ifdef CONFIG_SERIO diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c index 7fb9b5c4669..12d13d99b6f 100644 --- a/drivers/sh/intc.c +++ b/drivers/sh/intc.c @@ -44,6 +44,7 @@ struct intc_handle_int { struct intc_desc_int { struct list_head list; struct sys_device sysdev; + pm_message_t state; unsigned long *reg; #ifdef CONFIG_SMP unsigned long *smp; @@ -786,18 +787,44 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state) /* get intc controller associated with this sysdev */ d = container_of(dev, struct intc_desc_int, sysdev); - /* enable wakeup irqs belonging to this intc controller */ - for_each_irq_desc(irq, desc) { - if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip)) - intc_enable(irq); + switch (state.event) { + case PM_EVENT_ON: + if (d->state.event != PM_EVENT_FREEZE) + break; + for_each_irq_desc(irq, desc) { + if (desc->chip != &d->chip) + continue; + if (desc->status & IRQ_DISABLED) + intc_disable(irq); + else + intc_enable(irq); + } + break; + case PM_EVENT_FREEZE: + /* nothing has to be done */ + break; + case PM_EVENT_SUSPEND: + /* enable wakeup irqs belonging to this intc controller */ + for_each_irq_desc(irq, desc) { + if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip)) + intc_enable(irq); + } + break; } + d->state = state; return 0; } +static int intc_resume(struct sys_device *dev) +{ + return intc_suspend(dev, PMSG_ON); +} + static struct sysdev_class intc_sysdev_class = { .name = "intc", .suspend = intc_suspend, + .resume = intc_resume, }; /* register this intc as sysdev to allow suspend/resume */ diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 643908b74bc..8eba98c8ed1 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -658,7 +658,7 @@ int spi_write_then_read(struct spi_device *spi, int status; struct spi_message message; - struct spi_transfer x; + struct spi_transfer x[2]; u8 *local_buf; /* Use preallocated DMA-safe buffer. We can't avoid copying here, @@ -669,9 +669,15 @@ int spi_write_then_read(struct spi_device *spi, return -EINVAL; spi_message_init(&message); - memset(&x, 0, sizeof x); - x.len = n_tx + n_rx; - spi_message_add_tail(&x, &message); + memset(x, 0, sizeof x); + if (n_tx) { + x[0].len = n_tx; + spi_message_add_tail(&x[0], &message); + } + if (n_rx) { + x[1].len = n_rx; + spi_message_add_tail(&x[1], &message); + } /* ... unless someone else is using the pre-allocated buffer */ if (!mutex_trylock(&lock)) { @@ -682,15 +688,15 @@ int spi_write_then_read(struct spi_device *spi, local_buf = buf; memcpy(local_buf, txbuf, n_tx); - x.tx_buf = local_buf; - x.rx_buf = local_buf; + x[0].tx_buf = local_buf; + x[1].rx_buf = local_buf + n_tx; /* do the i/o */ status = spi_sync(spi, &message); if (status == 0) - memcpy(rxbuf, x.rx_buf + n_tx, n_rx); + memcpy(rxbuf, x[1].rx_buf, n_rx); - if (x.tx_buf == buf) + if (x[0].tx_buf == buf) mutex_unlock(&lock); else kfree(local_buf); diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c index 0348072b3ab..75ebe338c6f 100644 --- a/drivers/staging/b3dfg/b3dfg.c +++ b/drivers/staging/b3dfg/b3dfg.c @@ -1000,7 +1000,7 @@ static int __devinit b3dfg_probe(struct pci_dev *pdev, pci_set_master(pdev); - r = pci_set_dma_mask(pdev, DMA_32BIT_MASK); + r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (r) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_free_res; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 869d47cb6db..0a69c0977e3 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -546,10 +546,6 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) tty->driver_data = acm; acm->tty = tty; - /* force low_latency on so that our tty_push actually forces the data through, - otherwise it is scheduled, and with high data rates data can get lost. */ - tty->low_latency = 1; - if (usb_autopm_get_interface(acm->control) < 0) goto early_bail; else diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 4ed228a8994..bb5e6f67157 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -280,7 +280,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) * are always powered while this driver is active, and use * active-low power switches. */ - for (i = 0; i < pdata->ports; i++) { + for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) { if (pdata->vbus_pin[i] <= 0) continue; gpio_request(pdata->vbus_pin[i], "ohci_vbus"); @@ -298,7 +298,7 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) int i; if (pdata) { - for (i = 0; i < pdata->ports; i++) { + for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) { if (pdata->vbus_pin[i] <= 0) continue; gpio_direction_output(pdata->vbus_pin[i], 1); diff --git a/drivers/usb/otg/nop-usb-xceiv.c b/drivers/usb/otg/nop-usb-xceiv.c index 4b933f646f2..c567168f89a 100644 --- a/drivers/usb/otg/nop-usb-xceiv.c +++ b/drivers/usb/otg/nop-usb-xceiv.c @@ -36,14 +36,14 @@ struct nop_usb_xceiv { struct device *dev; }; -static u64 nop_xceiv_dmamask = DMA_32BIT_MASK; +static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32); static struct platform_device nop_xceiv_device = { .name = "nop_usb_xceiv", .id = -1, .dev = { .dma_mask = &nop_xceiv_dmamask, - .coherent_dma_mask = DMA_32BIT_MASK, + .coherent_dma_mask = DMA_BIT_MASK(32), .platform_data = NULL, }, }; diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 2620bf6fe5e..9c4c700c7cc 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct urb *urb) } tty = tty_port_tty_get(&port->port); - if (tty && urb->actual_length) { - usb_serial_debug_data(debug, dev, __func__, - urb->actual_length, urb->transfer_buffer); - - if (!tport->tp_is_open) - dbg("%s - port closed, dropping data", __func__); - else - ti_recv(&urb->dev->dev, tty, + if (tty) { + if (urb->actual_length) { + usb_serial_debug_data(debug, dev, __func__, + urb->actual_length, urb->transfer_buffer); + + if (!tport->tp_is_open) + dbg("%s - port closed, dropping data", + __func__); + else + ti_recv(&urb->dev->dev, tty, urb->transfer_buffer, urb->actual_length); - - spin_lock(&tport->tp_lock); - tport->tp_icount.rx += urb->actual_length; - spin_unlock(&tport->tp_lock); + spin_lock(&tport->tp_lock); + tport->tp_icount.rx += urb->actual_length; + spin_unlock(&tport->tp_lock); + } tty_kref_put(tty); } diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index 16bb7e3c031..6c37e8ee5ef 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c @@ -698,8 +698,8 @@ static void __devinit radeon_get_pllinfo(struct radeonfb_info *rinfo) found: /* * Some methods fail to retrieve SCLK and MCLK values, we apply default - * settings in this case (200Mhz). If that really happne often, we could - * fetch from registers instead... + * settings in this case (200Mhz). If that really happens often, we + * could fetch from registers instead... */ if (rinfo->pll.mclk == 0) rinfo->pll.mclk = 20000; diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index dd37cbcaf8c..157057c79ca 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -35,8 +35,6 @@ static int fb_notifier_callback(struct notifier_block *self, return 0; bd = container_of(self, struct backlight_device, fb_notif); - if (!lock_fb_info(evdata->info)) - return -ENODEV; mutex_lock(&bd->ops_lock); if (bd->ops) if (!bd->ops->check_fb || @@ -49,7 +47,6 @@ static int fb_notifier_callback(struct notifier_block *self, backlight_update_status(bd); } mutex_unlock(&bd->ops_lock); - unlock_fb_info(evdata->info); return 0; } diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 0bb13df0fa8..b6449470106 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c @@ -40,8 +40,6 @@ static int fb_notifier_callback(struct notifier_block *self, if (!ld->ops) return 0; - if (!lock_fb_info(evdata->info)) - return -ENODEV; mutex_lock(&ld->ops_lock); if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) { if (event == FB_EVENT_BLANK) { @@ -53,7 +51,6 @@ static int fb_notifier_callback(struct notifier_block *self, } } mutex_unlock(&ld->ops_lock); - unlock_fb_info(evdata->info); return 0; } diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c index d42e385f091..4c2bf923418 100644 --- a/drivers/video/cirrusfb.c +++ b/drivers/video/cirrusfb.c @@ -567,9 +567,7 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var, default: dev_dbg(info->device, "Unsupported bpp size: %d\n", var->bits_per_pixel); - assert(false); - /* should never occur */ - break; + return -EINVAL; } if (var->xres_virtual < var->xres) diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 2cd500a304f..471a9a60376 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -2263,9 +2263,12 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info, } + if (!lock_fb_info(info)) + return; event.info = info; event.data = ␣ fb_notifier_call_chain(FB_EVENT_CONBLANK, &event); + unlock_fb_info(info); } static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) @@ -2956,8 +2959,6 @@ static int fbcon_fb_unregistered(struct fb_info *info) { int i, idx; - if (!lock_fb_info(info)) - return -ENODEV; idx = info->node; for (i = first_fb_vc; i <= last_fb_vc; i++) { if (con2fb_map[i] == idx) @@ -2985,8 +2986,6 @@ static int fbcon_fb_unregistered(struct fb_info *info) if (primary_device == idx) primary_device = -1; - unlock_fb_info(info); - if (!num_registered_fb) unregister_con_driver(&fb_con); @@ -3027,11 +3026,8 @@ static int fbcon_fb_registered(struct fb_info *info) { int ret = 0, i, idx; - if (!lock_fb_info(info)) - return -ENODEV; idx = info->node; fbcon_select_primary(info); - unlock_fb_info(info); if (info_idx == -1) { for (i = first_fb_vc; i <= last_fb_vc; i++) { @@ -3152,53 +3148,23 @@ static int fbcon_event_notify(struct notifier_block *self, switch(action) { case FB_EVENT_SUSPEND: - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } fbcon_suspended(info); - unlock_fb_info(info); break; case FB_EVENT_RESUME: - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } fbcon_resumed(info); - unlock_fb_info(info); break; case FB_EVENT_MODE_CHANGE: - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } fbcon_modechanged(info); - unlock_fb_info(info); break; case FB_EVENT_MODE_CHANGE_ALL: - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } fbcon_set_all_vcs(info); - unlock_fb_info(info); break; case FB_EVENT_MODE_DELETE: mode = event->data; - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } ret = fbcon_mode_deleted(info, mode); - unlock_fb_info(info); break; case FB_EVENT_FB_UNBIND: - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } idx = info->node; - unlock_fb_info(info); ret = fbcon_fb_unbind(idx); break; case FB_EVENT_FB_REGISTERED: @@ -3217,29 +3183,14 @@ static int fbcon_event_notify(struct notifier_block *self, con2fb->framebuffer = con2fb_map[con2fb->console - 1]; break; case FB_EVENT_BLANK: - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } fbcon_fb_blanked(info, *(int *)event->data); - unlock_fb_info(info); break; case FB_EVENT_NEW_MODELIST: - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } fbcon_new_modelist(info); - unlock_fb_info(info); break; case FB_EVENT_GET_REQ: caps = event->data; - if (!lock_fb_info(info)) { - ret = -ENODEV; - goto done; - } fbcon_get_requirement(info, caps); - unlock_fb_info(info); break; } done: diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 0c5b9a9fd56..8dea2bc9270 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c @@ -210,12 +210,15 @@ static int __init efifb_probe(struct platform_device *dev) unsigned int size_total; int request_succeeded = 0; - printk(KERN_INFO "efifb: probing for efifb\n"); - if (!screen_info.lfb_depth) screen_info.lfb_depth = 32; if (!screen_info.pages) screen_info.pages = 1; + if (!screen_info.lfb_base) { + printk(KERN_DEBUG "efifb: invalid framebuffer address\n"); + return -ENODEV; + } + printk(KERN_INFO "efifb: probing for efifb\n"); /* just assume they're all unset if any are */ if (!screen_info.blue_size) { diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 2ac32e6b595..d412a1ddc12 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1097,8 +1097,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EINVAL; con2fb.framebuffer = -1; event.data = &con2fb; + if (!lock_fb_info(info)) + return -ENODEV; event.info = info; fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event); + unlock_fb_info(info); ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0; break; case FBIOPUT_CON2FBMAP: @@ -1115,8 +1118,11 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, break; } event.data = &con2fb; + if (!lock_fb_info(info)) + return -ENODEV; event.info = info; ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); + unlock_fb_info(info); break; case FBIOBLANK: if (!lock_fb_info(info)) @@ -1521,7 +1527,10 @@ register_framebuffer(struct fb_info *fb_info) registered_fb[i] = fb_info; event.info = fb_info; + if (!lock_fb_info(fb_info)) + return -ENODEV; fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event); + unlock_fb_info(fb_info); return 0; } @@ -1555,8 +1564,12 @@ unregister_framebuffer(struct fb_info *fb_info) goto done; } + + if (!lock_fb_info(fb_info)) + return -ENODEV; event.info = fb_info; ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event); + unlock_fb_info(fb_info); if (ret) { ret = -EINVAL; @@ -1590,6 +1603,8 @@ void fb_set_suspend(struct fb_info *info, int state) { struct fb_event event; + if (!lock_fb_info(info)) + return; event.info = info; if (state) { fb_notifier_call_chain(FB_EVENT_SUSPEND, &event); @@ -1598,6 +1613,7 @@ void fb_set_suspend(struct fb_info *info, int state) info->state = FBINFO_STATE_RUNNING; fb_notifier_call_chain(FB_EVENT_RESUME, &event); } + unlock_fb_info(info); } /** @@ -1667,8 +1683,11 @@ int fb_new_modelist(struct fb_info *info) err = 1; if (!list_empty(&info->modelist)) { + if (!lock_fb_info(info)) + return -ENODEV; event.info = info; err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event); + unlock_fb_info(info); } return err; diff --git a/drivers/video/intelfb/intelfb.h b/drivers/video/intelfb/intelfb.h index a50bea61480..40984551c92 100644 --- a/drivers/video/intelfb/intelfb.h +++ b/drivers/video/intelfb/intelfb.h @@ -53,6 +53,7 @@ #define PCI_DEVICE_ID_INTEL_830M 0x3577 #define PCI_DEVICE_ID_INTEL_845G 0x2562 #define PCI_DEVICE_ID_INTEL_85XGM 0x3582 +#define PCI_DEVICE_ID_INTEL_854 0x358E #define PCI_DEVICE_ID_INTEL_865G 0x2572 #define PCI_DEVICE_ID_INTEL_915G 0x2582 #define PCI_DEVICE_ID_INTEL_915GM 0x2592 @@ -154,6 +155,7 @@ enum intel_chips { INTEL_85XGM, INTEL_852GM, INTEL_852GME, + INTEL_854, INTEL_855GM, INTEL_855GME, INTEL_865G, diff --git a/drivers/video/intelfb/intelfb_i2c.c b/drivers/video/intelfb/intelfb_i2c.c index b3065492bb2..487f2be4746 100644 --- a/drivers/video/intelfb/intelfb_i2c.c +++ b/drivers/video/intelfb/intelfb_i2c.c @@ -156,6 +156,7 @@ void intelfb_create_i2c_busses(struct intelfb_info *dinfo) switch(dinfo->chipset) { case INTEL_830M: case INTEL_845G: + case INTEL_854: case INTEL_855GM: case INTEL_865G: dinfo->output[i].type = INTELFB_OUTPUT_DVO; diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c index 6d8e5415c80..ace14fe02fc 100644 --- a/drivers/video/intelfb/intelfbdrv.c +++ b/drivers/video/intelfb/intelfbdrv.c @@ -182,6 +182,7 @@ static struct pci_device_id intelfb_pci_table[] __devinitdata = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_845G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_845G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_85XGM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_85XGM }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_865G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_865G }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_854, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_854 }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915G }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G }, diff --git a/drivers/video/intelfb/intelfbhw.c b/drivers/video/intelfb/intelfbhw.c index 8b26b27c2db..0689f97c523 100644 --- a/drivers/video/intelfb/intelfbhw.c +++ b/drivers/video/intelfb/intelfbhw.c @@ -84,6 +84,11 @@ int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo) dinfo->mobile = 0; dinfo->pll_index = PLLS_I8xx; return 0; + case PCI_DEVICE_ID_INTEL_854: + dinfo->mobile = 1; + dinfo->name = "Intel(R) 854"; + dinfo->chipset = INTEL_854; + return 0; case PCI_DEVICE_ID_INTEL_85XGM: tmp = 0; dinfo->mobile = 1; diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 4dcec48a1d7..c3fad34309e 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -45,11 +45,11 @@ struct s3fb_info { static const struct svga_fb_format s3fb_formats[] = { { 0, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, FB_TYPE_TEXT, FB_AUX_TEXT_SVGA_STEP4, FB_VISUAL_PSEUDOCOLOR, 8, 16}, - { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, + { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 0, FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 8, 16}, - { 4, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 1, + { 4, {0, 4, 0}, {0, 4, 0}, {0, 4, 0}, {0, 0, 0}, 1, FB_TYPE_INTERLEAVED_PLANES, 1, FB_VISUAL_PSEUDOCOLOR, 8, 16}, - { 8, {0, 6, 0}, {0, 6, 0}, {0, 6, 0}, {0, 0, 0}, 0, + { 8, {0, 8, 0}, {0, 8, 0}, {0, 8, 0}, {0, 0, 0}, 0, FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_PSEUDOCOLOR, 4, 8}, {16, {10, 5, 0}, {5, 5, 0}, {0, 5, 0}, {0, 0, 0}, 0, FB_TYPE_PACKED_PIXELS, 0, FB_VISUAL_TRUECOLOR, 2, 4}, diff --git a/drivers/video/sa1100fb.c b/drivers/video/sa1100fb.c index fad58cf9ef7..10ddad8e17d 100644 --- a/drivers/video/sa1100fb.c +++ b/drivers/video/sa1100fb.c @@ -199,16 +199,20 @@ extern void (*sa1100fb_backlight_power)(int on); extern void (*sa1100fb_lcd_power)(int on); -/* - * IMHO this looks wrong. In 8BPP, length should be 8. - */ -static struct sa1100fb_rgb rgb_8 = { +static struct sa1100fb_rgb rgb_4 = { .red = { .offset = 0, .length = 4, }, .green = { .offset = 0, .length = 4, }, .blue = { .offset = 0, .length = 4, }, .transp = { .offset = 0, .length = 0, }, }; +static struct sa1100fb_rgb rgb_8 = { + .red = { .offset = 0, .length = 8, }, + .green = { .offset = 0, .length = 8, }, + .blue = { .offset = 0, .length = 8, }, + .transp = { .offset = 0, .length = 0, }, +}; + static struct sa1100fb_rgb def_rgb_16 = { .red = { .offset = 11, .length = 5, }, .green = { .offset = 5, .length = 6, }, @@ -613,7 +617,7 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) DPRINTK("var->bits_per_pixel=%d\n", var->bits_per_pixel); switch (var->bits_per_pixel) { case 4: - rgbidx = RGB_8; + rgbidx = RGB_4; break; case 8: rgbidx = RGB_8; @@ -1382,6 +1386,7 @@ static struct sa1100fb_info * __init sa1100fb_init_fbinfo(struct device *dev) fbi->fb.monspecs = monspecs; fbi->fb.pseudo_palette = (fbi + 1); + fbi->rgb[RGB_4] = &rgb_4; fbi->rgb[RGB_8] = &rgb_8; fbi->rgb[RGB_16] = &def_rgb_16; diff --git a/drivers/video/sa1100fb.h b/drivers/video/sa1100fb.h index 86831db9a04..1c3b459865d 100644 --- a/drivers/video/sa1100fb.h +++ b/drivers/video/sa1100fb.h @@ -57,9 +57,10 @@ struct sa1100fb_lcd_reg { unsigned long lccr3; }; -#define RGB_8 (0) -#define RGB_16 (1) -#define NR_RGB 2 +#define RGB_4 (0) +#define RGB_8 (1) +#define RGB_16 (2) +#define NR_RGB 3 struct sa1100fb_info { struct fb_info fb; diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index 346d6458cf7..7e17ee95a97 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -1129,7 +1129,7 @@ sisfb_bpp_to_var(struct sis_video_info *ivideo, struct fb_var_screeninfo *var) switch(var->bits_per_pixel) { case 8: var->red.offset = var->green.offset = var->blue.offset = 0; - var->red.length = var->green.length = var->blue.length = 6; + var->red.length = var->green.length = var->blue.length = 8; break; case 16: var->red.offset = 11; diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c index a439159204a..89158bc71da 100644 --- a/drivers/video/skeletonfb.c +++ b/drivers/video/skeletonfb.c @@ -308,9 +308,11 @@ static int xxxfb_setcolreg(unsigned regno, unsigned red, unsigned green, * color depth = SUM(var->{color}.length) * * Pseudocolor: - * var->{color}.offset is 0 - * var->{color}.length contains width of DAC or the number of unique - * colors available (color depth) + * var->{color}.offset is 0 unless the palette index takes less than + * bits_per_pixel bits and is stored in the upper + * bits of the pixel value + * var->{color}.length is set so that 1 << length is the number of + * available palette entries * pseudo_palette is not used * RAMDAC[X] is programmed to (red, green, blue) * color depth = var->{color}.length diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index 0b370aebdbf..421770b5e6a 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -55,6 +55,7 @@ static u16 maxvf __devinitdata; /* maximum vertical frequency */ static u16 maxhf __devinitdata; /* maximum horizontal frequency */ static u16 vbemode __devinitdata; /* force use of a specific VBE mode */ static char *mode_option __devinitdata; +static u8 dac_width = 6; static struct uvesafb_ktask *uvfb_tasks[UVESAFB_TASKS_MAX]; static DEFINE_MUTEX(uvfb_lock); @@ -303,22 +304,10 @@ static void uvesafb_setup_var(struct fb_var_screeninfo *var, var->blue.offset = 0; var->transp.offset = 0; - /* - * We're assuming that we can switch the DAC to 8 bits. If - * this proves to be incorrect, we'll update the fields - * later in set_par(). - */ - if (par->vbe_ib.capabilities & VBE_CAP_CAN_SWITCH_DAC) { - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - } else { - var->red.length = 6; - var->green.length = 6; - var->blue.length = 6; - var->transp.length = 0; - } + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + var->transp.length = 0; } } @@ -1006,7 +995,7 @@ static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green, struct fb_info *info) { struct uvesafb_pal_entry entry; - int shift = 16 - info->var.green.length; + int shift = 16 - dac_width; int err = 0; if (regno >= info->cmap.len) @@ -1055,7 +1044,7 @@ static int uvesafb_setcolreg(unsigned regno, unsigned red, unsigned green, static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info) { struct uvesafb_pal_entry *entries; - int shift = 16 - info->var.green.length; + int shift = 16 - dac_width; int i, err = 0; if (info->var.bits_per_pixel == 8) { @@ -1317,13 +1306,9 @@ setmode: err = uvesafb_exec(task); if (err || (task->t.regs.eax & 0xffff) != 0x004f || ((task->t.regs.ebx & 0xff00) >> 8) != 8) { - /* - * We've failed to set the DAC palette format - - * time to correct var. - */ - info->var.red.length = 6; - info->var.green.length = 6; - info->var.blue.length = 6; + dac_width = 6; + } else { + dac_width = 8; } } diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c index cc919ae4657..050d432c7d9 100644 --- a/drivers/video/vfb.c +++ b/drivers/video/vfb.c @@ -318,13 +318,16 @@ static int vfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, * {hardwarespecific} contains width of RAMDAC * cmap[X] is programmed to (X << red.offset) | (X << green.offset) | (X << blue.offset) * RAMDAC[X] is programmed to (red, green, blue) - * + * * Pseudocolor: - * uses offset = 0 && length = RAMDAC register width. - * var->{color}.offset is 0 - * var->{color}.length contains widht of DAC + * var->{color}.offset is 0 unless the palette index takes less than + * bits_per_pixel bits and is stored in the upper + * bits of the pixel value + * var->{color}.length is set so that 1 << length is the number of available + * palette entries * cmap is not used * RAMDAC[X] is programmed to (red, green, blue) + * * Truecolor: * does not use DAC. Usually 3 are present. * var->{color}.offset contains start of bitfield diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 63024145215..5eb8f21da82 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -240,8 +240,6 @@ config ORION5X_WATCHDOG To compile this driver as a module, choose M here: the module will be called orion5x_wdt. -# ARM26 Architecture - # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 806b3eb0853..7f8c56b14f5 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -42,8 +42,6 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o obj-$(CONFIG_ORION5X_WATCHDOG) += orion5x_wdt.o -# ARM26 Architecture - # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o diff --git a/drivers/watchdog/at91rm9200_wdt.c b/drivers/watchdog/at91rm9200_wdt.c index e35d5458923..29e52c237a3 100644 --- a/drivers/watchdog/at91rm9200_wdt.c +++ b/drivers/watchdog/at91rm9200_wdt.c @@ -197,7 +197,7 @@ static struct miscdevice at91wdt_miscdev = { .fops = &at91wdt_fops, }; -static int __init at91wdt_probe(struct platform_device *pdev) +static int __devinit at91wdt_probe(struct platform_device *pdev) { int res; @@ -214,7 +214,7 @@ static int __init at91wdt_probe(struct platform_device *pdev) return 0; } -static int __exit at91wdt_remove(struct platform_device *pdev) +static int __devexit at91wdt_remove(struct platform_device *pdev) { int res; @@ -252,7 +252,7 @@ static int at91wdt_resume(struct platform_device *pdev) static struct platform_driver at91wdt_driver = { .probe = at91wdt_probe, - .remove = __exit_p(at91wdt_remove), + .remove = __devexit_p(at91wdt_remove), .shutdown = at91wdt_shutdown, .suspend = at91wdt_suspend, .resume = at91wdt_resume, diff --git a/drivers/watchdog/i6300esb.c b/drivers/watchdog/i6300esb.c index 2dbe83570d6..7ba0b11ec52 100644 --- a/drivers/watchdog/i6300esb.c +++ b/drivers/watchdog/i6300esb.c @@ -52,10 +52,10 @@ #define ESB_LOCK_REG 0x68 /* WDT lock register */ /* Memory mapped registers */ -#define ESB_TIMER1_REG BASEADDR + 0x00 /* Timer1 value after each reset */ -#define ESB_TIMER2_REG BASEADDR + 0x04 /* Timer2 value after each reset */ -#define ESB_GINTSR_REG BASEADDR + 0x08 /* General Interrupt Status Register */ -#define ESB_RELOAD_REG BASEADDR + 0x0c /* Reload register */ +#define ESB_TIMER1_REG (BASEADDR + 0x00)/* Timer1 value after each reset */ +#define ESB_TIMER2_REG (BASEADDR + 0x04)/* Timer2 value after each reset */ +#define ESB_GINTSR_REG (BASEADDR + 0x08)/* General Interrupt Status Register */ +#define ESB_RELOAD_REG (BASEADDR + 0x0c)/* Reload register */ /* Lock register bits */ #define ESB_WDT_FUNC (0x01 << 2) /* Watchdog functionality */ @@ -68,6 +68,7 @@ #define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */ /* Reload register bits */ +#define ESB_WDT_TIMEOUT (0x01 << 9) /* Watchdog timed out */ #define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */ /* Magic constants */ @@ -87,7 +88,6 @@ static struct platform_device *esb_platform_device; /* 30 sec default heartbeat (1 < heartbeat < 2*1023) */ #define WATCHDOG_HEARTBEAT 30 static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ - module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds. (1<heartbeat<2046, default=" @@ -123,7 +123,7 @@ static int esb_timer_start(void) esb_unlock_registers(); writew(ESB_WDT_RELOAD, ESB_RELOAD_REG); /* Enable or Enable + Lock? */ - val = 0x02 | (nowayout ? 0x01 : 0x00); + val = ESB_WDT_ENABLE | (nowayout ? ESB_WDT_LOCK : 0x00); pci_write_config_byte(esb_pci, ESB_LOCK_REG, val); spin_unlock(&esb_lock); return 0; @@ -143,7 +143,7 @@ static int esb_timer_stop(void) spin_unlock(&esb_lock); /* Returns 0 if the timer was disabled, non-zero otherwise */ - return (val & 0x01); + return val & ESB_WDT_ENABLE; } static void esb_timer_keepalive(void) @@ -190,18 +190,6 @@ static int esb_timer_set_heartbeat(int time) return 0; } -static int esb_timer_read(void) -{ - u32 count; - - /* This isn't documented, and doesn't take into - * acount which stage is running, but it looks - * like a 20 bit count down, so we might as well report it. - */ - pci_read_config_dword(esb_pci, 0x64, &count); - return (int)count; -} - /* * /dev/watchdog handling */ @@ -282,7 +270,7 @@ static long esb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: - return put_user(esb_timer_read(), p); + return put_user(0, p); case WDIOC_GETBOOTSTATUS: return put_user(triggered, p); @@ -362,8 +350,6 @@ MODULE_DEVICE_TABLE(pci, esb_pci_tbl); static unsigned char __devinit esb_getdevice(void) { - u8 val1; - unsigned short val2; /* * Find the PCI device */ @@ -371,66 +357,79 @@ static unsigned char __devinit esb_getdevice(void) esb_pci = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_9, NULL); - if (esb_pci) { - if (pci_enable_device(esb_pci)) { - printk(KERN_ERR PFX "failed to enable device\n"); - goto err_devput; - } + if (!esb_pci) + return 0; - if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) { - printk(KERN_ERR PFX "failed to request region\n"); - goto err_disable; - } + if (pci_enable_device(esb_pci)) { + printk(KERN_ERR PFX "failed to enable device\n"); + goto err_devput; + } - BASEADDR = pci_ioremap_bar(esb_pci, 0); - if (BASEADDR == NULL) { - /* Something's wrong here, BASEADDR has to be set */ - printk(KERN_ERR PFX "failed to get BASEADDR\n"); - goto err_release; - } + if (pci_request_region(esb_pci, 0, ESB_MODULE_NAME)) { + printk(KERN_ERR PFX "failed to request region\n"); + goto err_disable; + } - /* - * The watchdog has two timers, it can be setup so that the - * expiry of timer1 results in an interrupt and the expiry of - * timer2 results in a reboot. We set it to not generate - * any interrupts as there is not much we can do with it - * right now. - * - * We also enable reboots and set the timer frequency to - * the PCI clock divided by 2^15 (approx 1KHz). - */ - pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003); - - /* Check that the WDT isn't already locked */ - pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1); - if (val1 & ESB_WDT_LOCK) - printk(KERN_WARNING PFX "nowayout already set\n"); - - /* Set the timer to watchdog mode and disable it for now */ - pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00); - - /* Check if the watchdog was previously triggered */ - esb_unlock_registers(); - val2 = readw(ESB_RELOAD_REG); - triggered = (val2 & (0x01 << 9) >> 9); - - /* Reset trigger flag and timers */ - esb_unlock_registers(); - writew((0x11 << 8), ESB_RELOAD_REG); - - /* Done */ - return 1; + BASEADDR = pci_ioremap_bar(esb_pci, 0); + if (BASEADDR == NULL) { + /* Something's wrong here, BASEADDR has to be set */ + printk(KERN_ERR PFX "failed to get BASEADDR\n"); + goto err_release; + } + + /* Done */ + return 1; err_release: - pci_release_region(esb_pci, 0); + pci_release_region(esb_pci, 0); err_disable: - pci_disable_device(esb_pci); + pci_disable_device(esb_pci); err_devput: - pci_dev_put(esb_pci); - } + pci_dev_put(esb_pci); return 0; } +static void __devinit esb_initdevice(void) +{ + u8 val1; + u16 val2; + + /* + * Config register: + * Bit 5 : 0 = Enable WDT_OUTPUT + * Bit 2 : 0 = set the timer frequency to the PCI clock + * divided by 2^15 (approx 1KHz). + * Bits 1:0 : 11 = WDT_INT_TYPE Disabled. + * The watchdog has two timers, it can be setup so that the + * expiry of timer1 results in an interrupt and the expiry of + * timer2 results in a reboot. We set it to not generate + * any interrupts as there is not much we can do with it + * right now. + */ + pci_write_config_word(esb_pci, ESB_CONFIG_REG, 0x0003); + + /* Check that the WDT isn't already locked */ + pci_read_config_byte(esb_pci, ESB_LOCK_REG, &val1); + if (val1 & ESB_WDT_LOCK) + printk(KERN_WARNING PFX "nowayout already set\n"); + + /* Set the timer to watchdog mode and disable it for now */ + pci_write_config_byte(esb_pci, ESB_LOCK_REG, 0x00); + + /* Check if the watchdog was previously triggered */ + esb_unlock_registers(); + val2 = readw(ESB_RELOAD_REG); + if (val2 & ESB_WDT_TIMEOUT) + triggered = WDIOF_CARDRESET; + + /* Reset WDT_TIMEOUT flag and timers */ + esb_unlock_registers(); + writew((ESB_WDT_TIMEOUT | ESB_WDT_RELOAD), ESB_RELOAD_REG); + + /* And set the correct timeout value */ + esb_timer_set_heartbeat(heartbeat); +} + static int __devinit esb_probe(struct platform_device *dev) { int ret; @@ -441,13 +440,17 @@ static int __devinit esb_probe(struct platform_device *dev) /* Check that the heartbeat value is within it's range; if not reset to the default */ - if (esb_timer_set_heartbeat(heartbeat)) { - esb_timer_set_heartbeat(WATCHDOG_HEARTBEAT); + if (heartbeat < 0x1 || heartbeat > 2 * 0x03ff) { + heartbeat = WATCHDOG_HEARTBEAT; printk(KERN_INFO PFX "heartbeat value must be 1<heartbeat<2046, using %d\n", heartbeat); } + /* Initialize the watchdog and make sure it does not run */ + esb_initdevice(); + + /* Register the watchdog so that userspace has access to it */ ret = misc_register(&esb_miscdev); if (ret != 0) { printk(KERN_ERR PFX @@ -455,7 +458,6 @@ static int __devinit esb_probe(struct platform_device *dev) WATCHDOG_MINOR, ret); goto err_unmap; } - esb_timer_stop(); printk(KERN_INFO PFX "initialized (0x%p). heartbeat=%d sec (nowayout=%d)\n", BASEADDR, heartbeat, nowayout); @@ -463,11 +465,8 @@ static int __devinit esb_probe(struct platform_device *dev) err_unmap: iounmap(BASEADDR); -/* err_release: */ pci_release_region(esb_pci, 0); -/* err_disable: */ pci_disable_device(esb_pci); -/* err_devput: */ pci_dev_put(esb_pci); return ret; } diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c index 74c92d38411..ae3832110ac 100644 --- a/drivers/watchdog/ks8695_wdt.c +++ b/drivers/watchdog/ks8695_wdt.c @@ -221,7 +221,7 @@ static struct miscdevice ks8695wdt_miscdev = { .fops = &ks8695wdt_fops, }; -static int __init ks8695wdt_probe(struct platform_device *pdev) +static int __devinit ks8695wdt_probe(struct platform_device *pdev) { int res; @@ -238,7 +238,7 @@ static int __init ks8695wdt_probe(struct platform_device *pdev) return 0; } -static int __exit ks8695wdt_remove(struct platform_device *pdev) +static int __devexit ks8695wdt_remove(struct platform_device *pdev) { int res; @@ -276,7 +276,7 @@ static int ks8695wdt_resume(struct platform_device *pdev) static struct platform_driver ks8695wdt_driver = { .probe = ks8695wdt_probe, - .remove = __exit_p(ks8695wdt_remove), + .remove = __devexit_p(ks8695wdt_remove), .shutdown = ks8695wdt_shutdown, .suspend = ks8695wdt_suspend, .resume = ks8695wdt_resume, diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index aa5ad6e33f0..f2713851aaa 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -258,7 +258,7 @@ static const struct file_operations omap_wdt_fops = { .release = omap_wdt_release, }; -static int __init omap_wdt_probe(struct platform_device *pdev) +static int __devinit omap_wdt_probe(struct platform_device *pdev) { struct resource *res, *mem; struct omap_wdt_dev *wdev; @@ -367,7 +367,7 @@ static void omap_wdt_shutdown(struct platform_device *pdev) omap_wdt_disable(wdev); } -static int omap_wdt_remove(struct platform_device *pdev) +static int __devexit omap_wdt_remove(struct platform_device *pdev) { struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -426,7 +426,7 @@ static int omap_wdt_resume(struct platform_device *pdev) static struct platform_driver omap_wdt_driver = { .probe = omap_wdt_probe, - .remove = omap_wdt_remove, + .remove = __devexit_p(omap_wdt_remove), .shutdown = omap_wdt_shutdown, .suspend = omap_wdt_suspend, .resume = omap_wdt_resume, diff --git a/drivers/watchdog/orion5x_wdt.c b/drivers/watchdog/orion5x_wdt.c index e81441f103d..7529616739d 100644 --- a/drivers/watchdog/orion5x_wdt.c +++ b/drivers/watchdog/orion5x_wdt.c @@ -42,7 +42,17 @@ static unsigned int wdt_tclk; static unsigned long wdt_status; static spinlock_t wdt_lock; -static void wdt_enable(void) +static void orion5x_wdt_ping(void) +{ + spin_lock(&wdt_lock); + + /* Reload watchdog duration */ + writel(wdt_tclk * heartbeat, WDT_VAL); + + spin_unlock(&wdt_lock); +} + +static void orion5x_wdt_enable(void) { u32 reg; @@ -69,7 +79,7 @@ static void wdt_enable(void) spin_unlock(&wdt_lock); } -static void wdt_disable(void) +static void orion5x_wdt_disable(void) { u32 reg; @@ -101,7 +111,7 @@ static int orion5x_wdt_open(struct inode *inode, struct file *file) if (test_and_set_bit(WDT_IN_USE, &wdt_status)) return -EBUSY; clear_bit(WDT_OK_TO_CLOSE, &wdt_status); - wdt_enable(); + orion5x_wdt_enable(); return nonseekable_open(inode, file); } @@ -122,18 +132,28 @@ static ssize_t orion5x_wdt_write(struct file *file, const char *data, set_bit(WDT_OK_TO_CLOSE, &wdt_status); } } - wdt_enable(); + orion5x_wdt_ping(); } return len; } -static struct watchdog_info ident = { +static int orion5x_wdt_settimeout(int new_time) +{ + if ((new_time <= 0) || (new_time > wdt_max_duration)) + return -EINVAL; + + /* Set new watchdog time to be used when + * orion5x_wdt_enable() or orion5x_wdt_ping() is called. */ + heartbeat = new_time; + return 0; +} + +static const struct watchdog_info ident = { .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "Orion5x Watchdog", }; - static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { @@ -152,7 +172,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, break; case WDIOC_KEEPALIVE: - wdt_enable(); + orion5x_wdt_ping(); ret = 0; break; @@ -161,12 +181,11 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, if (ret) break; - if (time <= 0 || time > wdt_max_duration) { + if (orion5x_wdt_settimeout(time)) { ret = -EINVAL; break; } - heartbeat = time; - wdt_enable(); + orion5x_wdt_ping(); /* Fall through */ case WDIOC_GETTIMEOUT: @@ -187,7 +206,7 @@ static long orion5x_wdt_ioctl(struct file *file, unsigned int cmd, static int orion5x_wdt_release(struct inode *inode, struct file *file) { if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) - wdt_disable(); + orion5x_wdt_disable(); else printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - " "timer will not stop\n"); @@ -230,7 +249,7 @@ static int __devinit orion5x_wdt_probe(struct platform_device *pdev) orion5x_wdt_miscdev.parent = &pdev->dev; wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk; - if (heartbeat <= 0 || heartbeat > wdt_max_duration) + if (orion5x_wdt_settimeout(heartbeat)) heartbeat = wdt_max_duration; ret = misc_register(&orion5x_wdt_miscdev); @@ -247,7 +266,7 @@ static int __devexit orion5x_wdt_remove(struct platform_device *pdev) int ret; if (test_bit(WDT_IN_USE, &wdt_status)) { - wdt_disable(); + orion5x_wdt_disable(); clear_bit(WDT_IN_USE, &wdt_status); } @@ -258,9 +277,16 @@ static int __devexit orion5x_wdt_remove(struct platform_device *pdev) return ret; } +static void orion5x_wdt_shutdown(struct platform_device *pdev) +{ + if (test_bit(WDT_IN_USE, &wdt_status)) + orion5x_wdt_disable(); +} + static struct platform_driver orion5x_wdt_driver = { .probe = orion5x_wdt_probe, .remove = __devexit_p(orion5x_wdt_remove), + .shutdown = orion5x_wdt_shutdown, .driver = { .owner = THIS_MODULE, .name = "orion5x_wdt", @@ -285,10 +311,11 @@ MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>"); MODULE_DESCRIPTION("Orion5x Processor Watchdog"); module_param(heartbeat, int, 0); -MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds"); +MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds"); module_param(nowayout, int, 0); -MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index 5f54c01c156..bdfd584ad85 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -21,29 +21,41 @@ static void disable_hotplug_cpu(int cpu) set_cpu_present(cpu, false); } -static void vcpu_hotplug(unsigned int cpu) +static int vcpu_online(unsigned int cpu) { int err; char dir[32], state[32]; - if (!cpu_possible(cpu)) - return; - sprintf(dir, "cpu/%u", cpu); err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state); if (err != 1) { printk(KERN_ERR "XENBUS: Unable to read cpu state\n"); - return; + return err; } - if (strcmp(state, "online") == 0) { + if (strcmp(state, "online") == 0) + return 1; + else if (strcmp(state, "offline") == 0) + return 0; + + printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", state, cpu); + return -EINVAL; +} +static void vcpu_hotplug(unsigned int cpu) +{ + if (!cpu_possible(cpu)) + return; + + switch (vcpu_online(cpu)) { + case 1: enable_hotplug_cpu(cpu); - } else if (strcmp(state, "offline") == 0) { + break; + case 0: (void)cpu_down(cpu); disable_hotplug_cpu(cpu); - } else { - printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n", - state, cpu); + break; + default: + break; } } @@ -64,12 +76,20 @@ static void handle_vcpu_hotplug_event(struct xenbus_watch *watch, static int setup_cpu_watcher(struct notifier_block *notifier, unsigned long event, void *data) { + int cpu; static struct xenbus_watch cpu_watch = { .node = "cpu", .callback = handle_vcpu_hotplug_event}; (void)register_xenbus_watch(&cpu_watch); + for_each_possible_cpu(cpu) { + if (vcpu_online(cpu) == 0) { + (void)cpu_down(cpu); + cpu_clear(cpu, cpu_present_map); + } + } + return NOTIFY_DONE; } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 0d61db1e7b4..4b5b84837ee 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -62,14 +62,15 @@ static int xen_suspend(void *data) gnttab_resume(); xen_mm_unpin_all(); - sysdev_resume(); - if (!*cancelled) { xen_irq_resume(); xen_console_resume(); xen_timer_resume(); } + sysdev_resume(); + device_power_up(PMSG_RESUME); + return 0; } |