diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-12-23 00:41:14 -0700 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-12-23 00:41:14 -0700 |
commit | cfb13c5db08c90311a5defdde9a0328ee788cca5 (patch) | |
tree | 842cb60d1d19b99e550fdbc653723e4b13e51ee5 /drivers | |
parent | 4b6ba8aacbb3185703b797286547d0f8f3859b02 (diff) | |
parent | 90a8a73c06cc32b609a880d48449d7083327e11a (diff) |
Merge commit 'v2.6.37-rc7' into devicetree/next
Diffstat (limited to 'drivers')
1004 files changed, 22275 insertions, 11174 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 14cf9077bb2..f3ebb30f1b7 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_REGULATOR) += regulator/ # char/ comes before serial/ etc so that the VT console is the boot-time # default. +obj-y += tty/ obj-y += char/ # gpu/ comes after char for AGP vs DRM startup diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index ba9afeaa23a..25d3aaebc10 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -100,24 +100,7 @@ static const struct file_operations acpi_ac_fops = { .release = single_release, }; #endif -static int get_ac_property(struct power_supply *psy, - enum power_supply_property psp, - union power_supply_propval *val) -{ - struct acpi_ac *ac = to_acpi_ac(psy); - switch (psp) { - case POWER_SUPPLY_PROP_ONLINE: - val->intval = ac->state; - break; - default: - return -EINVAL; - } - return 0; -} -static enum power_supply_property ac_props[] = { - POWER_SUPPLY_PROP_ONLINE, -}; /* -------------------------------------------------------------------------- AC Adapter Management -------------------------------------------------------------------------- */ @@ -140,6 +123,35 @@ static int acpi_ac_get_state(struct acpi_ac *ac) return 0; } +/* -------------------------------------------------------------------------- + sysfs I/F + -------------------------------------------------------------------------- */ +static int get_ac_property(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct acpi_ac *ac = to_acpi_ac(psy); + + if (!ac) + return -ENODEV; + + if (acpi_ac_get_state(ac)) + return -ENODEV; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + val->intval = ac->state; + break; + default: + return -EINVAL; + } + return 0; +} + +static enum power_supply_property ac_props[] = { + POWER_SUPPLY_PROP_ONLINE, +}; + #ifdef CONFIG_ACPI_PROCFS_POWER /* -------------------------------------------------------------------------- FS Interface (/proc) diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c index 1211c03149e..5850d320404 100644 --- a/drivers/acpi/apei/erst.c +++ b/drivers/acpi/apei/erst.c @@ -86,7 +86,7 @@ static struct erst_erange { * It is used to provide exclusive accessing for ERST Error Log * Address Range too. */ -static DEFINE_SPINLOCK(erst_lock); +static DEFINE_RAW_SPINLOCK(erst_lock); static inline int erst_errno(int command_status) { @@ -421,9 +421,9 @@ ssize_t erst_get_record_count(void) if (erst_disable) return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); + raw_spin_lock_irqsave(&erst_lock, flags); count = __erst_get_record_count(); - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return count; } @@ -456,9 +456,9 @@ int erst_get_next_record_id(u64 *record_id) if (erst_disable) return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); + raw_spin_lock_irqsave(&erst_lock, flags); rc = __erst_get_next_record_id(record_id); - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return rc; } @@ -624,17 +624,17 @@ int erst_write(const struct cper_record_header *record) return -EINVAL; if (erst_erange.attr & ERST_RANGE_NVRAM) { - if (!spin_trylock_irqsave(&erst_lock, flags)) + if (!raw_spin_trylock_irqsave(&erst_lock, flags)) return -EBUSY; rc = __erst_write_to_nvram(record); - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return rc; } if (record->record_length > erst_erange.size) return -EINVAL; - if (!spin_trylock_irqsave(&erst_lock, flags)) + if (!raw_spin_trylock_irqsave(&erst_lock, flags)) return -EBUSY; memcpy(erst_erange.vaddr, record, record->record_length); rcd_erange = erst_erange.vaddr; @@ -642,7 +642,7 @@ int erst_write(const struct cper_record_header *record) memcpy(&rcd_erange->persistence_information, "ER", 2); rc = __erst_write_to_storage(0); - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return rc; } @@ -696,9 +696,9 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record, if (erst_disable) return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); + raw_spin_lock_irqsave(&erst_lock, flags); len = __erst_read(record_id, record, buflen); - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return len; } EXPORT_SYMBOL_GPL(erst_read); @@ -719,20 +719,20 @@ ssize_t erst_read_next(struct cper_record_header *record, size_t buflen) if (erst_disable) return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); + raw_spin_lock_irqsave(&erst_lock, flags); rc = __erst_get_next_record_id(&record_id); if (rc) { - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return rc; } /* no more record */ if (record_id == APEI_ERST_INVALID_RECORD_ID) { - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return 0; } len = __erst_read(record_id, record, buflen); - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return len; } @@ -746,12 +746,12 @@ int erst_clear(u64 record_id) if (erst_disable) return -ENODEV; - spin_lock_irqsave(&erst_lock, flags); + raw_spin_lock_irqsave(&erst_lock, flags); if (erst_erange.attr & ERST_RANGE_NVRAM) rc = __erst_clear_from_nvram(record_id); else rc = __erst_clear_from_storage(record_id); - spin_unlock_irqrestore(&erst_lock, flags); + raw_spin_unlock_irqrestore(&erst_lock, flags); return rc; } diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c index 1a3508a7fe0..daa7bc63f1d 100644 --- a/drivers/acpi/apei/hest.c +++ b/drivers/acpi/apei/hest.c @@ -46,9 +46,9 @@ EXPORT_SYMBOL_GPL(hest_disable); /* HEST table parsing */ -static struct acpi_table_hest *hest_tab; +static struct acpi_table_hest *__read_mostly hest_tab; -static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { +static const int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = { [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1, [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi), @@ -126,7 +126,7 @@ struct ghes_arr { unsigned int count; }; -static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) +static int __init hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) { int *count = data; @@ -135,7 +135,7 @@ static int hest_parse_ghes_count(struct acpi_hest_header *hest_hdr, void *data) return 0; } -static int hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) +static int __init hest_parse_ghes(struct acpi_hest_header *hest_hdr, void *data) { struct platform_device *ghes_dev; struct ghes_arr *ghes_arr = data; @@ -165,7 +165,7 @@ err: return rc; } -static int hest_ghes_dev_register(unsigned int ghes_count) +static int __init hest_ghes_dev_register(unsigned int ghes_count) { int rc, i; struct ghes_arr ghes_arr; diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 95649d37307..9fb9d5ac939 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -130,6 +130,8 @@ struct acpi_battery { unsigned long flags; }; +static int acpi_battery_update(struct acpi_battery *battery); + #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); inline int acpi_battery_present(struct acpi_battery *battery) @@ -184,6 +186,9 @@ static int acpi_battery_get_property(struct power_supply *psy, int ret = 0; struct acpi_battery *battery = to_acpi_battery(psy); + if (acpi_battery_update(battery)) + return -ENODEV; + if (acpi_battery_present(battery)) { /* run battery update only if it is present */ acpi_battery_get_state(battery); diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 6355b575ee5..5df67f1d6c6 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c @@ -80,7 +80,7 @@ int __init acpi_debugfs_init(void) if (!acpi_dir) goto err; - cm_dentry = debugfs_create_file("custom_method", S_IWUGO, + cm_dentry = debugfs_create_file("custom_method", S_IWUSR, acpi_dir, NULL, &cm_fops); if (!cm_dentry) goto err; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 372ff80b7b0..302b31ed31f 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -934,6 +934,9 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { ec_flag_msi, "MSI hardware", { DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, { + ec_flag_msi, "MSI hardware", { + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, + { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, {}, diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 966feddf6b1..055d7b701ff 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -110,9 +110,6 @@ struct acpi_ioremap { static LIST_HEAD(acpi_ioremaps); static DEFINE_SPINLOCK(acpi_ioremap_lock); -#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ -static char osi_setup_string[OSI_STRING_LENGTH_MAX]; - static void __init acpi_osi_setup_late(void); /* @@ -152,8 +149,7 @@ static struct osi_linux { unsigned int enable:1; unsigned int dmi:1; unsigned int cmdline:1; - unsigned int known:1; -} osi_linux = { 0, 0, 0, 0}; +} osi_linux = {0, 0, 0}; static u32 acpi_osi_handler(acpi_string interface, u32 supported) { @@ -1055,13 +1051,53 @@ static int __init acpi_os_name_setup(char *str) __setup("acpi_os_name=", acpi_os_name_setup); +#define OSI_STRING_LENGTH_MAX 64 /* arbitrary */ +#define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */ + +struct osi_setup_entry { + char string[OSI_STRING_LENGTH_MAX]; + bool enable; +}; + +static struct osi_setup_entry __initdata osi_setup_entries[OSI_STRING_ENTRIES_MAX]; + +void __init acpi_osi_setup(char *str) +{ + struct osi_setup_entry *osi; + bool enable = true; + int i; + + if (!acpi_gbl_create_osi_method) + return; + + if (str == NULL || *str == '\0') { + printk(KERN_INFO PREFIX "_OSI method disabled\n"); + acpi_gbl_create_osi_method = FALSE; + return; + } + + if (*str == '!') { + str++; + enable = false; + } + + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { + osi = &osi_setup_entries[i]; + if (!strcmp(osi->string, str)) { + osi->enable = enable; + break; + } else if (osi->string[0] == '\0') { + osi->enable = enable; + strncpy(osi->string, str, OSI_STRING_LENGTH_MAX); + break; + } + } +} + static void __init set_osi_linux(unsigned int enable) { - if (osi_linux.enable != enable) { + if (osi_linux.enable != enable) osi_linux.enable = enable; - printk(KERN_NOTICE PREFIX "%sed _OSI(Linux)\n", - enable ? "Add": "Delet"); - } if (osi_linux.enable) acpi_osi_setup("Linux"); @@ -1073,7 +1109,8 @@ static void __init set_osi_linux(unsigned int enable) static void __init acpi_cmdline_osi_linux(unsigned int enable) { - osi_linux.cmdline = 1; /* cmdline set the default */ + osi_linux.cmdline = 1; /* cmdline set the default and override DMI */ + osi_linux.dmi = 0; set_osi_linux(enable); return; @@ -1081,15 +1118,12 @@ static void __init acpi_cmdline_osi_linux(unsigned int enable) void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) { - osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ - printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); if (enable == -1) return; - osi_linux.known = 1; /* DMI knows which OSI(Linux) default needed */ - + osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */ set_osi_linux(enable); return; @@ -1104,37 +1138,44 @@ void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d) */ static void __init acpi_osi_setup_late(void) { - char *str = osi_setup_string; + struct osi_setup_entry *osi; + char *str; + int i; + acpi_status status; - if (*str == '\0') - return; + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { + osi = &osi_setup_entries[i]; + str = osi->string; - if (!strcmp("!Linux", str)) { - acpi_cmdline_osi_linux(0); /* !enable */ - } else if (*str == '!') { - if (acpi_remove_interface(++str) == AE_OK) - printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); - } else if (!strcmp("Linux", str)) { - acpi_cmdline_osi_linux(1); /* enable */ - } else { - if (acpi_install_interface(str) == AE_OK) - printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); + if (*str == '\0') + break; + if (osi->enable) { + status = acpi_install_interface(str); + + if (ACPI_SUCCESS(status)) + printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str); + } else { + status = acpi_remove_interface(str); + + if (ACPI_SUCCESS(status)) + printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str); + } } } -int __init acpi_osi_setup(char *str) +static int __init osi_setup(char *str) { - if (str == NULL || *str == '\0') { - printk(KERN_INFO PREFIX "_OSI method disabled\n"); - acpi_gbl_create_osi_method = FALSE; - } else { - strncpy(osi_setup_string, str, OSI_STRING_LENGTH_MAX); - } + if (str && !strcmp("Linux", str)) + acpi_cmdline_osi_linux(1); + else if (str && !strcmp("!Linux", str)) + acpi_cmdline_osi_linux(0); + else + acpi_osi_setup(str); return 1; } -__setup("acpi_osi=", acpi_osi_setup); +__setup("acpi_osi=", osi_setup); /* enable serialization to combat AE_ALREADY_EXISTS errors */ static int __init acpi_serialize_setup(char *str) @@ -1530,7 +1571,7 @@ acpi_status __init acpi_os_initialize(void) return AE_OK; } -acpi_status acpi_os_initialize1(void) +acpi_status __init acpi_os_initialize1(void) { kacpid_wq = create_workqueue("kacpid"); kacpi_notify_wq = create_workqueue("kacpi_notify"); diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 67dedeed144..4c9c2fb5d98 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -213,11 +213,13 @@ static int acpi_power_on(acpi_handle handle) resource->name)); } else { result = __acpi_power_on(resource); + if (result) + resource->ref_count--; } mutex_unlock(&resource->resource_lock); - return 0; + return result; } static int acpi_power_off_device(acpi_handle handle) @@ -465,10 +467,12 @@ int acpi_power_transition(struct acpi_device *device, int state) struct acpi_handle_list *tl = NULL; /* Target Resources */ int i = 0; - if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) return -EINVAL; + if (device->power.state == state) + return 0; + if ((device->power.state < ACPI_STATE_D0) || (device->power.state > ACPI_STATE_D3)) return -ENODEV; @@ -488,10 +492,6 @@ int acpi_power_transition(struct acpi_device *device, int state) goto end; } - if (device->power.state == state) { - goto end; - } - /* * Then we dereference all power resources used in the current list. */ diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index fde49b9b1d9..79cb6533289 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c @@ -156,15 +156,6 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state) return 0; } -static int acpi_thermal_cpufreq_increase(unsigned int cpu) -{ - return -ENODEV; -} -static int acpi_thermal_cpufreq_decrease(unsigned int cpu) -{ - return -ENODEV; -} - #endif int acpi_processor_get_limit_info(struct acpi_processor *pr) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 721d93b3cee..febb153b5a6 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -27,8 +27,6 @@ static u8 sleep_states[ACPI_S_STATE_COUNT]; -static u32 acpi_target_sleep_state = ACPI_STATE_S0; - static void acpi_sleep_tts_switch(u32 acpi_state) { union acpi_object in_arg = { ACPI_TYPE_INTEGER }; @@ -81,6 +79,8 @@ static int acpi_sleep_prepare(u32 acpi_state) } #ifdef CONFIG_ACPI_SLEEP +static u32 acpi_target_sleep_state = ACPI_STATE_S0; + /* * The ACPI specification wants us to save NVS memory regions during hibernation * and to restore them during the subsequent resume. Windows does that also for @@ -427,6 +427,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), }, }, + { + .callback = init_nvs_nosave, + .ident = "Sony Vaio VGN-NW130D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), + }, + }, {}, }; #endif /* CONFIG_SUSPEND */ diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d050e073e57..66aa4bee80a 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2552,8 +2552,11 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc) * * If door lock fails, always clear sdev->locked to * avoid this infinite loop. + * + * This may happen before SCSI scan is complete. Make + * sure qc->dev->sdev isn't NULL before dereferencing. */ - if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) + if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev) qc->dev->sdev->locked = 0; qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; @@ -3163,8 +3166,8 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, /** * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device + * @shost: SCSI host of command to be sent * @cmd: SCSI command to be sent - * @done: Completion function, called when command is complete * * In some cases, this function translates SCSI commands into * ATA taskfiles, and queues the taskfiles to be sent to @@ -3174,37 +3177,36 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, * ATA and ATAPI devices appearing as SCSI devices. * * LOCKING: - * Releases scsi-layer-held lock, and obtains host lock. + * ATA host lock * * RETURNS: * Return value from __ata_scsi_queuecmd() if @cmd can be queued, * 0 otherwise. */ -int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { struct ata_port *ap; struct ata_device *dev; struct scsi_device *scsidev = cmd->device; - struct Scsi_Host *shost = scsidev->host; int rc = 0; + unsigned long irq_flags; ap = ata_shost_to_port(shost); - spin_unlock(shost->host_lock); - spin_lock(ap->lock); + spin_lock_irqsave(ap->lock, irq_flags); ata_scsi_dump_cdb(ap, cmd); dev = ata_scsi_find_dev(ap, scsidev); if (likely(dev)) - rc = __ata_scsi_queuecmd(cmd, done, dev); + rc = __ata_scsi_queuecmd(cmd, cmd->scsi_done, dev); else { cmd->result = (DID_BAD_TARGET << 16); - done(cmd); + cmd->scsi_done(cmd); } - spin_unlock(ap->lock); - spin_lock(shost->host_lock); + spin_unlock_irqrestore(ap->lock, irq_flags); + return rc; } diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index eaf194138f2..6bd9425ba5a 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -142,7 +142,7 @@ static int autospeed; /* Chip present which snoops speed changes */ static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */ static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */ -#ifdef PATA_WINBOND_VLB_MODULE +#ifdef CONFIG_PATA_WINBOND_VLB_MODULE static int winbond = 1; /* Set to probe Winbond controllers, give I/O port if non standard */ #else diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 74b82981789..fa1b95a9a7f 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -653,8 +653,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance) ap = host->ports[i]; ocd = ap->dev->platform_data; - - ocd = ap->dev->platform_data; cf_port = ap->private_data; dma_int.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine)); diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c index c21589986c6..8b677bbf2d3 100644 --- a/drivers/ata/sata_via.c +++ b/drivers/ata/sata_via.c @@ -538,7 +538,7 @@ static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) return 0; } -static void svia_configure(struct pci_dev *pdev) +static void svia_configure(struct pci_dev *pdev, int board_id) { u8 tmp8; @@ -577,7 +577,7 @@ static void svia_configure(struct pci_dev *pdev) } /* - * vt6421 has problems talking to some drives. The following + * vt6420/1 has problems talking to some drives. The following * is the fix from Joseph Chan <JosephChan@via.com.tw>. * * When host issues HOLD, device may send up to 20DW of data @@ -596,8 +596,9 @@ static void svia_configure(struct pci_dev *pdev) * * https://bugzilla.kernel.org/show_bug.cgi?id=15173 * http://article.gmane.org/gmane.linux.ide/46352 + * http://thread.gmane.org/gmane.linux.kernel/1062139 */ - if (pdev->device == 0x3249) { + if (board_id == vt6420 || board_id == vt6421) { pci_read_config_byte(pdev, 0x52, &tmp8); tmp8 |= 1 << 2; pci_write_config_byte(pdev, 0x52, tmp8); @@ -652,7 +653,7 @@ static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - svia_configure(pdev); + svia_configure(pdev, board_id); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c index 46b94762125..f9b983ae687 100644 --- a/drivers/atm/adummy.c +++ b/drivers/atm/adummy.c @@ -154,7 +154,7 @@ static int __init adummy_init(void) err = -ENOMEM; goto out; } - atm_dev = atm_dev_register(DEV_LABEL, &adummy_ops, -1, NULL); + atm_dev = atm_dev_register(DEV_LABEL, NULL, &adummy_ops, -1, NULL); if (!atm_dev) { printk(KERN_ERR DEV_LABEL ": atm_dev_register() failed\n"); err = -ENODEV; diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index a33896a482e..ffe9b655292 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -2244,7 +2244,8 @@ static int __devinit amb_probe(struct pci_dev *pci_dev, const struct pci_device_ goto out_reset; } - dev->atm_dev = atm_dev_register (DEV_LABEL, &amb_ops, -1, NULL); + dev->atm_dev = atm_dev_register (DEV_LABEL, &pci_dev->dev, &amb_ops, -1, + NULL); if (!dev->atm_dev) { PRINTD (DBG_ERR, "failed to register Madge ATM adapter"); err = -EINVAL; diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c index b9101818b47..2b464b631f2 100644 --- a/drivers/atm/atmtcp.c +++ b/drivers/atm/atmtcp.c @@ -366,7 +366,7 @@ static int atmtcp_create(int itf,int persist,struct atm_dev **result) if (!dev_data) return -ENOMEM; - dev = atm_dev_register(DEV_LABEL,&atmtcp_v_dev_ops,itf,NULL); + dev = atm_dev_register(DEV_LABEL,NULL,&atmtcp_v_dev_ops,itf,NULL); if (!dev) { kfree(dev_data); return itf == -1 ? -ENOMEM : -EBUSY; diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 97c5898cd76..c495fae7420 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2244,7 +2244,7 @@ static int __devinit eni_init_one(struct pci_dev *pci_dev, &zeroes); if (!cpu_zeroes) goto out1; } - dev = atm_dev_register(DEV_LABEL,&ops,-1,NULL); + dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto out2; pci_set_drvdata(pci_dev, dev); eni_dev->pci_dev = pci_dev; diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 5d86bb803e9..7d912baf01d 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1911,7 +1911,7 @@ static int __devinit firestream_init_one (struct pci_dev *pci_dev, fs_dev, sizeof (struct fs_dev)); if (!fs_dev) goto err_out; - atm_dev = atm_dev_register("fs", &ops, -1, NULL); + atm_dev = atm_dev_register("fs", &pci_dev->dev, &ops, -1, NULL); if (!atm_dev) goto err_out_free_fs_dev; diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index c8fc69c85a0..962c309b40c 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2567,14 +2567,14 @@ release: static int __devinit -fore200e_register(struct fore200e* fore200e) +fore200e_register(struct fore200e* fore200e, struct device *parent) { struct atm_dev* atm_dev; DPRINTK(2, "device %s being registered\n", fore200e->name); - atm_dev = atm_dev_register(fore200e->bus->proc_name, &fore200e_ops, -1, - NULL); + atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops, + -1, NULL); if (atm_dev == NULL) { printk(FORE200E "unable to register device %s\n", fore200e->name); return -ENODEV; @@ -2594,9 +2594,9 @@ fore200e_register(struct fore200e* fore200e) static int __devinit -fore200e_init(struct fore200e* fore200e) +fore200e_init(struct fore200e* fore200e, struct device *parent) { - if (fore200e_register(fore200e) < 0) + if (fore200e_register(fore200e, parent) < 0) return -ENODEV; if (fore200e->bus->configure(fore200e) < 0) @@ -2662,7 +2662,7 @@ static int __devinit fore200e_sba_probe(struct platform_device *op, sprintf(fore200e->name, "%s-%d", bus->model_name, index); - err = fore200e_init(fore200e); + err = fore200e_init(fore200e, &op->dev); if (err < 0) { fore200e_shutdown(fore200e); kfree(fore200e); @@ -2740,7 +2740,7 @@ fore200e_pca_detect(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent sprintf(fore200e->name, "%s-%d", bus->model_name, index); - err = fore200e_init(fore200e); + err = fore200e_init(fore200e, &pci_dev->dev); if (err < 0) { fore200e_shutdown(fore200e); goto out_free; diff --git a/drivers/atm/he.c b/drivers/atm/he.c index 801e8b6e9d1..6cf59bf281d 100644 --- a/drivers/atm/he.c +++ b/drivers/atm/he.c @@ -366,7 +366,7 @@ he_init_one(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent) goto init_one_failure; } - atm_dev = atm_dev_register(DEV_LABEL, &he_ops, -1, NULL); + atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &he_ops, -1, NULL); if (!atm_dev) { err = -ENODEV; goto init_one_failure; diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c index a95790452a6..24761e1d664 100644 --- a/drivers/atm/horizon.c +++ b/drivers/atm/horizon.c @@ -2733,7 +2733,8 @@ static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_ PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p", iobase, irq, membase); - dev->atm_dev = atm_dev_register(DEV_LABEL, &hrz_ops, -1, NULL); + dev->atm_dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &hrz_ops, -1, + NULL); if (!(dev->atm_dev)) { PRINTD(DBG_ERR, "failed to register Madge ATM adapter"); err = -EINVAL; diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index bce57328ddd..bfb7feee040 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -3698,7 +3698,8 @@ idt77252_init_one(struct pci_dev *pcidev, const struct pci_device_id *id) goto err_out_iounmap; } - dev = atm_dev_register("idt77252", &idt77252_ops, -1, NULL); + dev = atm_dev_register("idt77252", &pcidev->dev, &idt77252_ops, -1, + NULL); if (!dev) { printk("%s: can't register atm device\n", card->name); err = -EIO; diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 9309d4724e1..72925405375 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -3172,7 +3172,7 @@ static int __devinit ia_init_one(struct pci_dev *pdev, ret = -ENODEV; goto err_out_free_iadev; } - dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL); + dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL); if (!dev) { ret = -ENOMEM; goto err_out_disable_dev; diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c index cbe15a86c66..a395c9aab14 100644 --- a/drivers/atm/lanai.c +++ b/drivers/atm/lanai.c @@ -2591,7 +2591,7 @@ static int __devinit lanai_init_one(struct pci_dev *pci, return -ENOMEM; } - atmdev = atm_dev_register(DEV_LABEL, &ops, -1, NULL); + atmdev = atm_dev_register(DEV_LABEL, &pci->dev, &ops, -1, NULL); if (atmdev == NULL) { printk(KERN_ERR DEV_LABEL ": couldn't register atm device!\n"); diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c index 2f3516b7f11..6b313ee9231 100644 --- a/drivers/atm/nicstar.c +++ b/drivers/atm/nicstar.c @@ -771,7 +771,8 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev) } /* Register device */ - card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); + card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, + -1, NULL); if (card->atmdev == NULL) { printk("nicstar%d: can't register device.\n", i); error = 17; diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c index 1a9332e4efe..9a676ee3082 100644 --- a/drivers/atm/solos-attrlist.c +++ b/drivers/atm/solos-attrlist.c @@ -1,6 +1,7 @@ SOLOS_ATTR_RO(DriverVersion) SOLOS_ATTR_RO(APIVersion) SOLOS_ATTR_RO(FirmwareVersion) +SOLOS_ATTR_RO(Version) // SOLOS_ATTR_RO(DspVersion) // SOLOS_ATTR_RO(CommonHandshake) SOLOS_ATTR_RO(Connected) diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index f46138ab38b..73fb1c4f4cd 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -166,7 +166,7 @@ static irqreturn_t solos_irq(int irq, void *dev_id); static struct atm_vcc* find_vcc(struct atm_dev *dev, short vpi, int vci); static int list_vccs(int vci); static void release_vccs(struct atm_dev *dev); -static int atm_init(struct solos_card *); +static int atm_init(struct solos_card *, struct device *); static void atm_remove(struct solos_card *); static int send_command(struct solos_card *card, int dev, const char *buf, size_t size); static void solos_bh(unsigned long); @@ -1161,6 +1161,14 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", major_ver, minor_ver, fpga_ver); + if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade || + db_fpga_upgrade || db_firmware_upgrade)) { + dev_warn(&dev->dev, + "FPGA too old; cannot upgrade flash. Use JTAG.\n"); + fpga_upgrade = firmware_upgrade = 0; + db_fpga_upgrade = db_firmware_upgrade = 0; + } + if (card->fpga_version >= DMA_SUPPORTED){ card->using_dma = 1; } else { @@ -1202,7 +1210,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) if (db_firmware_upgrade) flash_upgrade(card, 3); - err = atm_init(card); + err = atm_init(card, &dev->dev); if (err) goto out_free_irq; @@ -1225,7 +1233,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) return err; } -static int atm_init(struct solos_card *card) +static int atm_init(struct solos_card *card, struct device *parent) { int i; @@ -1236,7 +1244,7 @@ static int atm_init(struct solos_card *card) skb_queue_head_init(&card->tx_queue[i]); skb_queue_head_init(&card->cli_queue[i]); - card->atmdev[i] = atm_dev_register("solos-pci", &fpga_ops, -1, NULL); + card->atmdev[i] = atm_dev_register("solos-pci", parent, &fpga_ops, -1, NULL); if (!card->atmdev[i]) { dev_err(&card->dev->dev, "Could not register ATM device %d\n", i); atm_remove(card); diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 4e885d2da49..624917902b6 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1597,7 +1597,7 @@ static int __devinit zatm_init_one(struct pci_dev *pci_dev, goto out; } - dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL); + dev = atm_dev_register(DEV_LABEL, &pci_dev->dev, &ops, -1, NULL); if (!dev) goto out_free; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 31b526661ec..ead3e79d6fc 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -475,20 +475,33 @@ End: */ void dpm_resume_noirq(pm_message_t state) { - struct device *dev; + struct list_head list; ktime_t starttime = ktime_get(); + INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); transition_started = false; - list_for_each_entry(dev, &dpm_list, power.entry) + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.next); + + get_device(dev); if (dev->power.status > DPM_OFF) { int error; dev->power.status = DPM_OFF; + mutex_unlock(&dpm_list_mtx); + error = device_resume_noirq(dev, state); + + mutex_lock(&dpm_list_mtx); if (error) pm_dev_err(dev, state, " early", error); } + if (!list_empty(&dev->power.entry)) + list_move_tail(&dev->power.entry, &list); + put_device(dev); + } + list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); dpm_show_time(starttime, state, "early"); resume_device_irqs(); @@ -789,20 +802,33 @@ End: */ int dpm_suspend_noirq(pm_message_t state) { - struct device *dev; + struct list_head list; ktime_t starttime = ktime_get(); int error = 0; + INIT_LIST_HEAD(&list); suspend_device_irqs(); mutex_lock(&dpm_list_mtx); - list_for_each_entry_reverse(dev, &dpm_list, power.entry) { + while (!list_empty(&dpm_list)) { + struct device *dev = to_device(dpm_list.prev); + + get_device(dev); + mutex_unlock(&dpm_list_mtx); + error = device_suspend_noirq(dev, state); + + mutex_lock(&dpm_list_mtx); if (error) { pm_dev_err(dev, state, " late", error); + put_device(dev); break; } dev->power.status = DPM_OFF_IRQ; + if (!list_empty(&dev->power.entry)) + list_move(&dev->power.entry, &list); + put_device(dev); } + list_splice_tail(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); if (error) dpm_resume_noirq(resume_event(state)); diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index a1725e6488d..7888501ad9e 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1341,7 +1341,7 @@ static struct request *set_next_request(void) { struct request_queue *q; int cnt = FD_MAX_UNITS; - struct request *rq; + struct request *rq = NULL; /* Find next queue we can dispatch from */ fdc_queue = fdc_queue + 1; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 541e1887996..528f6318ded 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) BUG(); bio_endio(bio, -ENXIO); return 0; - } else if (bio->bi_rw & REQ_HARDBARRIER) { - bio_endio(bio, -EOPNOTSUPP); - return 0; } else if (bio->bi_io_vec == NULL) { printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); BUG(); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 4e4cc6c828c..605a67e40bb 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1399,7 +1399,7 @@ static struct request *set_next_request(void) { struct request_queue *q; int old_pos = fdc_queue; - struct request *rq; + struct request *rq = NULL; do { q = unit[fdc_queue].disk->queue; diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 2cc4dda4627..8e0f9256eb5 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26"); MODULE_LICENSE("GPL"); static DEFINE_MUTEX(cciss_mutex); +static struct proc_dir_entry *proc_cciss; #include "cciss_cmd.h" #include "cciss.h" @@ -113,6 +114,8 @@ static struct board_type products[] = { {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, {0x40910E11, "Smart Array 6i", &SA5_access}, {0x3225103C, "Smart Array P600", &SA5_access}, + {0x3223103C, "Smart Array P800", &SA5_access}, + {0x3234103C, "Smart Array P400", &SA5_access}, {0x3235103C, "Smart Array P400i", &SA5_access}, {0x3211103C, "Smart Array E200i", &SA5_access}, {0x3212103C, "Smart Array E200", &SA5_access}, @@ -361,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", #define ENG_GIG_FACTOR (ENG_GIG/512) #define ENGAGE_SCSI "engage scsi" -static struct proc_dir_entry *proc_cciss; - static void cciss_seq_show_header(struct seq_file *seq) { ctlr_info_t *h = seq->private; @@ -2833,6 +2834,8 @@ static int cciss_revalidate(struct gendisk *disk) InquiryData_struct *inq_buff = NULL; for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { + if (!h->drv[logvol]) + continue; if (memcmp(h->drv[logvol]->LunID, drv->LunID, sizeof(drv->LunID)) == 0) { FOUND = 1; @@ -3753,7 +3756,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) for (i = 0; i < MAX_CONFIG_WAIT; i++) { if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; - msleep(10); + usleep_range(10000, 20000); } } @@ -3937,10 +3940,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) *board_id = ((subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id; - for (i = 0; i < ARRAY_SIZE(products); i++) { + for (i = 0; i < ARRAY_SIZE(products); i++) if (*board_id == products[i].board_id) return i; - } dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", *board_id); return -ENODEV; @@ -3971,18 +3973,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, return -ENODEV; } -static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) +static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev, + void __iomem *vaddr, int wait_for_ready) +#define BOARD_READY 1 +#define BOARD_NOT_READY 0 { - int i; + int i, iterations; u32 scratchpad; - for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { - scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); - if (scratchpad == CCISS_FIRMWARE_READY) - return 0; + if (wait_for_ready) + iterations = CCISS_BOARD_READY_ITERATIONS; + else + iterations = CCISS_BOARD_NOT_READY_ITERATIONS; + + for (i = 0; i < iterations; i++) { + scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); + if (wait_for_ready) { + if (scratchpad == CCISS_FIRMWARE_READY) + return 0; + } else { + if (scratchpad != CCISS_FIRMWARE_READY) + return 0; + } msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); } - dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); + dev_warn(&pdev->dev, "board not ready, timed out.\n"); return -ENODEV; } @@ -4031,6 +4046,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h) static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) { h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); + + /* Limit commands in memory limited kdump scenario. */ + if (reset_devices && h->max_commands > 32) + h->max_commands = 32; + if (h->max_commands < 16) { dev_warn(&h->pdev->dev, "Controller reports " "max supported commands of %d, an obvious lie. " @@ -4148,7 +4168,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) err = -ENOMEM; goto err_out_free_res; } - err = cciss_wait_for_board_ready(h); + err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) goto err_out_free_res; err = cciss_find_cfgtables(h); @@ -4313,36 +4333,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) #define cciss_noop(p) cciss_message(p, 3, 0) -static __devinit int cciss_reset_msi(struct pci_dev *pdev) -{ -/* the #defines are stolen from drivers/pci/msi.h. */ -#define msi_control_reg(base) (base + PCI_MSI_FLAGS) -#define PCI_MSIX_FLAGS_ENABLE (1 << 15) - - int pos; - u16 control = 0; - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); - if (pos) { - pci_read_config_word(pdev, msi_control_reg(pos), &control); - if (control & PCI_MSI_FLAGS_ENABLE) { - dev_info(&pdev->dev, "resetting MSI\n"); - pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); - } - } - - pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - if (pos) { - pci_read_config_word(pdev, msi_control_reg(pos), &control); - if (control & PCI_MSIX_FLAGS_ENABLE) { - dev_info(&pdev->dev, "resetting MSI-X\n"); - pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); - } - } - - return 0; -} - static int cciss_controller_hard_reset(struct pci_dev *pdev, void * __iomem vaddr, bool use_doorbell) { @@ -4397,17 +4387,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev, * states or using the doorbell register. */ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) { - u16 saved_config_space[32]; u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; u32 misc_fw_support, active_transport; - int rc, i; + int rc; CfgTable_struct __iomem *cfgtable; bool use_doorbell; u32 board_id; + u16 command_register; /* For controllers as old a the p600, this is very nearly * the same thing as @@ -4417,14 +4407,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) * pci_set_power_state(pci_dev, PCI_D0); * pci_restore_state(pci_dev); * - * but we can't use these nice canned kernel routines on - * kexec, because they also check the MSI/MSI-X state in PCI - * configuration space and do the wrong thing when it is - * set/cleared. Also, the pci_save/restore_state functions - * violate the ordering requirements for restoring the - * configuration space from the CCISS document (see the - * comment below). So we roll our own .... - * * For controllers newer than the P600, the pci power state * method of resetting doesn't work so we have another way * using the doorbell register. @@ -4443,8 +4425,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) return -ENODEV; } - for (i = 0; i < 32; i++) - pci_read_config_word(pdev, 2*i, &saved_config_space[i]); + /* Save the PCI command register */ + pci_read_config_word(pdev, 4, &command_register); + /* Turn the board off. This is so that later pci_restore_state() + * won't turn the board on before the rest of config space is ready. + */ + pci_disable_device(pdev); + pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ rc = cciss_pci_find_memory_BAR(pdev, &paddr); @@ -4479,26 +4466,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; - - /* Restore the PCI configuration space. The Open CISS - * Specification says, "Restore the PCI Configuration - * Registers, offsets 00h through 60h. It is important to - * restore the command register, 16-bits at offset 04h, - * last. Do not restore the configuration status register, - * 16-bits at offset 06h." Note that the offset is 2*i. - */ - for (i = 0; i < 32; i++) { - if (i == 2 || i == 3) - continue; - pci_write_config_word(pdev, 2*i, saved_config_space[i]); + pci_restore_state(pdev); + rc = pci_enable_device(pdev); + if (rc) { + dev_warn(&pdev->dev, "failed to enable device.\n"); + goto unmap_cfgtable; } - wmb(); - pci_write_config_word(pdev, 4, saved_config_space[2]); + pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) need a little pause here */ msleep(CCISS_POST_RESET_PAUSE_MSECS); + /* Wait for board to become not ready, then ready. */ + dev_info(&pdev->dev, "Waiting for board to become ready.\n"); + rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); + if (rc) /* Don't bail, might be E500, etc. which can't be reset */ + dev_warn(&pdev->dev, + "failed waiting for board to become not ready\n"); + rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); + if (rc) { + dev_warn(&pdev->dev, + "failed waiting for board to become ready\n"); + goto unmap_cfgtable; + } + dev_info(&pdev->dev, "board ready.\n"); + /* Controller should be in simple mode at this point. If it's not, * It means we're on one of those controllers which doesn't support * the doorbell reset method and on which the PCI power management reset @@ -4539,8 +4532,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) return 0; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; - if (cciss_reset_msi(pdev)) - return -ENODEV; /* Now try to get the controller to respond to a no-op */ for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { @@ -4936,7 +4927,8 @@ static void __exit cciss_cleanup(void) } } kthread_stop(cciss_scan_thread); - remove_proc_entry("driver/cciss", NULL); + if (proc_cciss) + remove_proc_entry("driver/cciss", NULL); bus_unregister(&cciss_bus_type); } diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index ae340ffc8f8..4b8933d778f 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -200,10 +200,14 @@ struct ctlr_info * the above. */ #define CCISS_BOARD_READY_WAIT_SECS (120) +#define CCISS_BOARD_NOT_READY_WAIT_SECS (10) #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) #define CCISS_BOARD_READY_ITERATIONS \ ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ CCISS_BOARD_READY_POLL_INTERVAL_MSECS) +#define CCISS_BOARD_NOT_READY_ITERATIONS \ + ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \ + CCISS_BOARD_READY_POLL_INTERVAL_MSECS) #define CCISS_POST_RESET_PAUSE_MSECS (3000) #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) #define CCISS_POST_RESET_NOOP_RETRIES (12) diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 575495f3c4b..727d0225b7d 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -62,8 +62,8 @@ static int cciss_scsi_proc_info( int length, /* length of data in buffer */ int func); /* 0 == read, 1 == write */ -static int cciss_scsi_queue_command (struct scsi_cmnd *cmd, - void (* done)(struct scsi_cmnd *)); +static int cciss_scsi_queue_command (struct Scsi_Host *h, + struct scsi_cmnd *cmd); static int cciss_eh_device_reset_handler(struct scsi_cmnd *); static int cciss_eh_abort_handler(struct scsi_cmnd *); @@ -1406,7 +1406,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, static int -cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) +cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { ctlr_info_t *h; int rc; @@ -1504,6 +1504,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd return 0; } +static DEF_SCSI_QCMD(cciss_scsi_queue_command) + static void cciss_unregister_scsi(ctlr_info_t *h) { struct cciss_scsi_adapter_data_t *sa; diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index ac04ef97eac..ba95cba192b 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, init_completion(&md_io.event); md_io.error = 0; - if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) - rw |= REQ_HARDBARRIER; + if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags)) + rw |= REQ_FUA; rw |= REQ_UNPLUG | REQ_SYNC; - retry: bio = bio_alloc(GFP_NOIO, 1); bio->bi_bdev = bdev->md_bdev; bio->bi_sector = sector; @@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, wait_for_completion(&md_io.event); ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; - /* check for unsupported barrier op. - * would rather check on EOPNOTSUPP, but that is not reliable. - * don't try again for ANY return value != 0 */ - if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) { - /* Try again with no barrier */ - dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); - set_bit(MD_NO_BARRIER, &mdev->flags); - rw &= ~REQ_HARDBARRIER; - bio_put(bio); - goto retry; - } out: bio_put(bio); return ok; @@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) u32 xor_sum = 0; if (!get_ldev(mdev)) { - dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); + dev_err(DEV, + "disk is %s, cannot start al transaction (-%d +%d)\n", + drbd_disk_str(mdev->state.disk), evicted, new_enr); complete(&((struct update_al_work *)w)->event); return 1; } /* do we have to do a bitmap write, first? * TODO reduce maximum latency: * submit both bios, then wait for both, - * instead of doing two synchronous sector writes. */ + * instead of doing two synchronous sector writes. + * For now, we must not write the transaction, + * if we cannot write out the bitmap of the evicted extent. */ if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); - mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ + /* The bitmap write may have failed, causing a state change. */ + if (mdev->state.disk < D_INCONSISTENT) { + dev_err(DEV, + "disk is %s, cannot write al transaction (-%d +%d)\n", + drbd_disk_str(mdev->state.disk), evicted, new_enr); + complete(&((struct update_al_work *)w)->event); + put_ldev(mdev); + return 1; + } + + mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */ buffer = (struct al_transaction *)page_address(mdev->md_io_page); buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); @@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) unsigned int enr; unsigned long add = 0; char ppb[10]; - int i; + int i, tmp; wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); @@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) enr = lc_element_by_index(mdev->act_log, i)->lc_number; if (enr == LC_FREE) continue; - add += drbd_bm_ALe_set_all(mdev, enr); + tmp = drbd_bm_ALe_set_all(mdev, enr); + dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr); + add += tmp; } lc_unlock(mdev->act_log); diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 9bdcf4393c0..1ea1a34e78b 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -114,11 +114,11 @@ struct drbd_conf; #define D_ASSERT(exp) if (!(exp)) \ dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) -#define ERR_IF(exp) if (({ \ - int _b = (exp) != 0; \ - if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ - __func__, #exp, __FILE__, __LINE__); \ - _b; \ +#define ERR_IF(exp) if (({ \ + int _b = (exp) != 0; \ + if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \ + __func__, #exp, __FILE__, __LINE__); \ + _b; \ })) /* Defines to control fault insertion */ @@ -749,17 +749,12 @@ struct drbd_epoch { /* drbd_epoch flag bits */ enum { - DE_BARRIER_IN_NEXT_EPOCH_ISSUED, - DE_BARRIER_IN_NEXT_EPOCH_DONE, - DE_CONTAINS_A_BARRIER, DE_HAVE_BARRIER_NUMBER, - DE_IS_FINISHING, }; enum epoch_event { EV_PUT, EV_GOT_BARRIER_NR, - EV_BARRIER_DONE, EV_BECAME_LAST, EV_CLEANUP = 32, /* used as flag */ }; @@ -801,11 +796,6 @@ enum { __EE_CALL_AL_COMPLETE_IO, __EE_MAY_SET_IN_SYNC, - /* This epoch entry closes an epoch using a barrier. - * On sucessful completion, the epoch is released, - * and the P_BARRIER_ACK send. */ - __EE_IS_BARRIER, - /* In case a barrier failed, * we need to resubmit without the barrier flag. */ __EE_RESUBMITTED, @@ -820,7 +810,6 @@ enum { }; #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) -#define EE_IS_BARRIER (1<<__EE_IS_BARRIER) #define EE_RESUBMITTED (1<<__EE_RESUBMITTED) #define EE_WAS_ERROR (1<<__EE_WAS_ERROR) #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) @@ -843,16 +832,15 @@ enum { * Gets cleared when the state.conn * goes into C_CONNECTED state. */ WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ - NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ CONSIDER_RESYNC, - MD_NO_BARRIER, /* meta data device does not support barriers, - so don't even try */ + MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ SUSPEND_IO, /* suspend application io */ BITMAP_IO, /* suspend application io; once no more io in flight, start bitmap io */ BITMAP_IO_QUEUED, /* Started bitmap IO */ - GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ + GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ + WAS_IO_ERROR, /* Local disk failed returned IO error */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ NET_CONGESTED, /* The data socket is congested */ @@ -947,7 +935,6 @@ enum write_ordering_e { WO_none, WO_drain_io, WO_bdev_flush, - WO_bio_barrier }; struct fifo_buffer { @@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); extern void drbd_go_diskless(struct drbd_conf *mdev); +extern void drbd_ldev_destroy(struct drbd_conf *mdev); /* Meta data layout @@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, case EP_PASS_ON: if (!forcedetach) { if (__ratelimit(&drbd_ratelimit_state)) - dev_err(DEV, "Local IO failed in %s." - "Passing error on...\n", where); + dev_err(DEV, "Local IO failed in %s.\n", where); break; } /* NOTE fall through to detach case if forcedetach set */ case EP_DETACH: case EP_CALL_HELPER: + set_bit(WAS_IO_ERROR, &mdev->flags); if (mdev->state.disk > D_FAILED) { _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); - dev_err(DEV, "Local IO failed in %s." - "Detaching...\n", where); + dev_err(DEV, + "Local IO failed in %s. Detaching...\n", where); } break; } @@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) static inline sector_t drbd_get_capacity(struct block_device *bdev) { /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ - return bdev ? bdev->bd_inode->i_size >> 9 : 0; + return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0; } /** @@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev) __release(local); D_ASSERT(i >= 0); if (i == 0) { + if (mdev->state.disk == D_DISKLESS) + /* even internal references gone, safe to destroy */ + drbd_ldev_destroy(mdev); if (mdev->state.disk == D_FAILED) + /* all application IO references gone. */ drbd_go_diskless(mdev); wake_up(&mdev->misc_wait); } @@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat { int io_allowed; + /* never get a reference while D_DISKLESS */ + if (mdev->state.disk == D_DISKLESS) + return 0; + atomic_inc(&mdev->local_cnt); io_allowed = (mdev->state.disk >= mins); if (!io_allowed) @@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) { int r; - if (test_bit(MD_NO_BARRIER, &mdev->flags)) + if (test_bit(MD_NO_FUA, &mdev->flags)) return; r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); if (r) { - set_bit(MD_NO_BARRIER, &mdev->flags); + set_bit(MD_NO_FUA, &mdev->flags); dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); } } diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 25c7a73c506..6be5401d0e8 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) ns.conn = os.conn; + /* we cannot fail (again) if we already detached */ + if (ns.disk == D_FAILED && os.disk == D_DISKLESS) + ns.disk = D_DISKLESS; + + /* if we are only D_ATTACHING yet, + * we can (and should) go directly to D_DISKLESS. */ + if (ns.disk == D_FAILED && os.disk == D_ATTACHING) + ns.disk = D_DISKLESS; + /* After C_DISCONNECTING only C_STANDALONE may follow */ if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) ns.conn = os.conn; @@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)) set_bit(DEVICE_DYING, &mdev->flags); - mdev->state.i = ns.i; + /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference + * on the ldev here, to be sure the transition -> D_DISKLESS resp. + * drbd_ldev_destroy() won't happen before our corresponding + * after_state_ch works run, where we put_ldev again. */ + if ((os.disk != D_FAILED && ns.disk == D_FAILED) || + (os.disk != D_DISKLESS && ns.disk == D_DISKLESS)) + atomic_inc(&mdev->local_cnt); + + mdev->state = ns; wake_up(&mdev->misc_wait); wake_up(&mdev->state_wait); @@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (test_bit(NEW_CUR_UUID, &mdev->flags)) { drbd_uuid_new_current(mdev); clear_bit(NEW_CUR_UUID, &mdev->flags); - drbd_md_sync(mdev); } spin_lock_irq(&mdev->req_lock); _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); @@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); - /* first half of local IO error */ - if (os.disk > D_FAILED && ns.disk == D_FAILED) { - enum drbd_io_error_p eh = EP_PASS_ON; + /* first half of local IO error, failure to attach, + * or administrative detach */ + if (os.disk != D_FAILED && ns.disk == D_FAILED) { + enum drbd_io_error_p eh; + int was_io_error; + /* corresponding get_ldev was in __drbd_set_state, to serialize + * our cleanup here with the transition to D_DISKLESS, + * so it is safe to dreference ldev here. */ + eh = mdev->ldev->dc.on_io_error; + was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); + + /* current state still has to be D_FAILED, + * there is only one way out: to D_DISKLESS, + * and that may only happen after our put_ldev below. */ + if (mdev->state.disk != D_FAILED) + dev_err(DEV, + "ASSERT FAILED: disk is %s during detach\n", + drbd_disk_str(mdev->state.disk)); if (drbd_send_state(mdev)) - dev_warn(DEV, "Notified peer that my disk is broken.\n"); + dev_warn(DEV, "Notified peer that I am detaching my disk\n"); else - dev_err(DEV, "Sending state for drbd_io_error() failed\n"); + dev_err(DEV, "Sending state for detaching disk failed\n"); drbd_rs_cancel_all(mdev); - if (get_ldev_if_state(mdev, D_FAILED)) { - eh = mdev->ldev->dc.on_io_error; - put_ldev(mdev); - } - if (eh == EP_CALL_HELPER) + /* In case we want to get something to stable storage still, + * this may be the last chance. + * Following put_ldev may transition to D_DISKLESS. */ + drbd_md_sync(mdev); + put_ldev(mdev); + + if (was_io_error && eh == EP_CALL_HELPER) drbd_khelper(mdev, "local-io-error"); } + /* second half of local IO error, failure to attach, + * or administrative detach, + * after local_cnt references have reached zero again */ + if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) { + /* We must still be diskless, + * re-attach has to be serialized with this! */ + if (mdev->state.disk != D_DISKLESS) + dev_err(DEV, + "ASSERT FAILED: disk is %s while going diskless\n", + drbd_disk_str(mdev->state.disk)); - /* second half of local IO error handling, - * after local_cnt references have reached zero: */ - if (os.disk == D_FAILED && ns.disk == D_DISKLESS) { - mdev->rs_total = 0; - mdev->rs_failed = 0; - atomic_set(&mdev->rs_pending_cnt, 0); - } - - if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) { - /* We must still be diskless, - * re-attach has to be serialized with this! */ - if (mdev->state.disk != D_DISKLESS) - dev_err(DEV, - "ASSERT FAILED: disk is %s while going diskless\n", - drbd_disk_str(mdev->state.disk)); + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); - /* we cannot assert local_cnt == 0 here, as get_ldev_if_state - * will inc/dec it frequently. Since we became D_DISKLESS, no - * one has touched the protected members anymore, though, so we - * are safe to free them here. */ if (drbd_send_state(mdev)) - dev_warn(DEV, "Notified peer that I detached my disk.\n"); + dev_warn(DEV, "Notified peer that I'm now diskless.\n"); else - dev_err(DEV, "Sending state for detach failed\n"); - - lc_destroy(mdev->resync); - mdev->resync = NULL; - lc_destroy(mdev->act_log); - mdev->act_log = NULL; - __no_warn(local, - drbd_free_bc(mdev->ldev); - mdev->ldev = NULL;); - - if (mdev->md_io_tmpp) { - __free_page(mdev->md_io_tmpp); - mdev->md_io_tmpp = NULL; - } + dev_err(DEV, "Sending state for being diskless failed\n"); + /* corresponding get_ldev in __drbd_set_state + * this may finaly trigger drbd_ldev_destroy. */ + put_ldev(mdev); } /* Disks got bigger while they were detached */ @@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) drbd_set_defaults(mdev); - /* for now, we do NOT yet support it, - * even though we start some framework - * to eventually support barriers */ - set_bit(NO_BARRIER_SUPP, &mdev->flags); - atomic_set(&mdev->ap_bio_cnt, 0); atomic_set(&mdev->ap_pending_cnt, 0); atomic_set(&mdev->rs_pending_cnt, 0); @@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) drbd_thread_init(mdev, &mdev->asender, drbd_asender); mdev->agreed_pro_version = PRO_VERSION_MAX; - mdev->write_ordering = WO_bio_barrier; + mdev->write_ordering = WO_bdev_flush; mdev->resync_wenr = LC_FREE; } @@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) D_ASSERT(list_empty(&mdev->resync_work.list)); D_ASSERT(list_empty(&mdev->unplug_work.list)); D_ASSERT(list_empty(&mdev->go_diskless.list)); - } @@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) get_random_bytes(&val, sizeof(u64)); _drbd_uuid_set(mdev, UI_CURRENT, val); + /* get it to stable storage _now_ */ + drbd_md_sync(mdev); } void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) @@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) return 1; } +void drbd_ldev_destroy(struct drbd_conf *mdev) +{ + lc_destroy(mdev->resync); + mdev->resync = NULL; + lc_destroy(mdev->act_log); + mdev->act_log = NULL; + __no_warn(local, + drbd_free_bc(mdev->ldev); + mdev->ldev = NULL;); + + if (mdev->md_io_tmpp) { + __free_page(mdev->md_io_tmpp); + mdev->md_io_tmpp = NULL; + } + clear_bit(GO_DISKLESS, &mdev->flags); +} + static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) { D_ASSERT(mdev->state.disk == D_FAILED); /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will * inc/dec it frequently. Once we are D_DISKLESS, no one will touch - * the protected members anymore, though, so in the after_state_ch work - * it will be safe to free them. */ + * the protected members anymore, though, so once put_ldev reaches zero + * again, it will be safe to free them. */ drbd_force_state(mdev, NS(disk, D_DISKLESS)); - /* We need to wait for return of references checked out while we still - * have been D_FAILED, though (drbd_md_sync, bitmap io). */ - wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); - - clear_bit(GO_DISKLESS, &mdev->flags); return 1; } @@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev) D_ASSERT(mdev->state.disk == D_FAILED); if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) drbd_queue_work(&mdev->data.work, &mdev->go_diskless); - /* don't drbd_queue_work_front, - * we need to serialize with the after_state_ch work - * of the -> D_FAILED transition. */ } /** diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 87925e97e61..29e5c70e4e2 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp retcode = ERR_DISK_CONFIGURED; goto fail; } + /* It may just now have detached because of IO error. Make sure + * drbd_ldev_destroy is done already, we may end up here very fast, + * e.g. if someone calls attach from the on-io-error handler, + * to realize a "hot spare" feature (not that I'd recommend that) */ + wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); /* allocation not in the IO path, cqueue thread context */ nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); @@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp /* Reset the "barriers don't work" bits here, then force meta data to * be written, to ensure we determine if barriers are supported. */ if (nbc->dc.no_md_flush) - set_bit(MD_NO_BARRIER, &mdev->flags); + set_bit(MD_NO_FUA, &mdev->flags); else - clear_bit(MD_NO_BARRIER, &mdev->flags); + clear_bit(MD_NO_FUA, &mdev->flags); /* Point of no return reached. * Devices and memory are no longer released by error cleanup below. @@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp nbc = NULL; resync_lru = NULL; - mdev->write_ordering = WO_bio_barrier; - drbd_bump_write_ordering(mdev, WO_bio_barrier); + mdev->write_ordering = WO_bdev_flush; + drbd_bump_write_ordering(mdev, WO_bdev_flush); if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) set_bit(CRASHED_PRIMARY, &mdev->flags); @@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp force_diskless_dec: put_ldev(mdev); force_diskless: - drbd_force_state(mdev, NS(disk, D_DISKLESS)); + drbd_force_state(mdev, NS(disk, D_FAILED)); drbd_md_sync(mdev); release_bdev2_fail: if (nbc) @@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp return 0; } +/* Detaching the disk is a process in multiple stages. First we need to lock + * out application IO, in-flight IO, IO stuck in drbd_al_begin_io. + * Then we transition to D_DISKLESS, and wait for put_ldev() to return all + * internal references as well. + * Only then we have finally detached. */ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) { + drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); + if (mdev->state.disk == D_DISKLESS) + wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + drbd_resume_io(mdev); return 0; } @@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp if (test_bit(NEW_CUR_UUID, &mdev->flags)) { drbd_uuid_new_current(mdev); clear_bit(NEW_CUR_UUID, &mdev->flags); - drbd_md_sync(mdev); } drbd_suspend_io(mdev); reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index ad325c5d0ce..7e6ac307e2d 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v) [WO_none] = 'n', [WO_drain_io] = 'd', [WO_bdev_flush] = 'f', - [WO_bio_barrier] = 'b', }; seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index efd6169acf2..24487d4fb20 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -36,7 +36,6 @@ #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/pkt_sched.h> #define __KERNEL_SYSCALLS__ #include <linux/unistd.h> @@ -49,11 +48,6 @@ #include "drbd_vli.h" -struct flush_work { - struct drbd_work w; - struct drbd_epoch *epoch; -}; - enum finish_epoch { FE_STILL_LIVE, FE_DESTROYED, @@ -66,16 +60,6 @@ static int drbd_do_auth(struct drbd_conf *mdev); static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); static int e_end_block(struct drbd_conf *, struct drbd_work *, int); -static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) -{ - struct drbd_epoch *prev; - spin_lock(&mdev->epoch_lock); - prev = list_entry(epoch->list.prev, struct drbd_epoch, list); - if (prev == epoch || prev == mdev->current_epoch) - prev = NULL; - spin_unlock(&mdev->epoch_lock); - return prev; -} #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) @@ -981,7 +965,7 @@ static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsi return TRUE; } -static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) +static void drbd_flush(struct drbd_conf *mdev) { int rv; @@ -997,24 +981,6 @@ static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct d } put_ldev(mdev); } - - return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE); -} - -static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel) -{ - struct flush_work *fw = (struct flush_work *)w; - struct drbd_epoch *epoch = fw->epoch; - - kfree(w); - - if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags)) - drbd_flush_after_epoch(mdev, epoch); - - drbd_may_finish_epoch(mdev, epoch, EV_PUT | - (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0)); - - return 1; } /** @@ -1027,15 +993,13 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch, enum epoch_event ev) { - int finish, epoch_size; + int epoch_size; struct drbd_epoch *next_epoch; - int schedule_flush = 0; enum finish_epoch rv = FE_STILL_LIVE; spin_lock(&mdev->epoch_lock); do { next_epoch = NULL; - finish = 0; epoch_size = atomic_read(&epoch->epoch_size); @@ -1045,16 +1009,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, break; case EV_GOT_BARRIER_NR: set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); - - /* Special case: If we just switched from WO_bio_barrier to - WO_bdev_flush we should not finish the current epoch */ - if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 && - mdev->write_ordering != WO_bio_barrier && - epoch == mdev->current_epoch) - clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags); - break; - case EV_BARRIER_DONE: - set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags); break; case EV_BECAME_LAST: /* nothing to do*/ @@ -1063,23 +1017,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, if (epoch_size != 0 && atomic_read(&epoch->active) == 0 && - test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) && - epoch->list.prev == &mdev->current_epoch->list && - !test_bit(DE_IS_FINISHING, &epoch->flags)) { - /* Nearly all conditions are met to finish that epoch... */ - if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) || - mdev->write_ordering == WO_none || - (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) || - ev & EV_CLEANUP) { - finish = 1; - set_bit(DE_IS_FINISHING, &epoch->flags); - } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) && - mdev->write_ordering == WO_bio_barrier) { - atomic_inc(&epoch->active); - schedule_flush = 1; - } - } - if (finish) { + test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) { if (!(ev & EV_CLEANUP)) { spin_unlock(&mdev->epoch_lock); drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); @@ -1102,6 +1040,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, /* atomic_set(&epoch->active, 0); is already zero */ if (rv == FE_STILL_LIVE) rv = FE_RECYCLED; + wake_up(&mdev->ee_wait); } } @@ -1113,22 +1052,6 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, spin_unlock(&mdev->epoch_lock); - if (schedule_flush) { - struct flush_work *fw; - fw = kmalloc(sizeof(*fw), GFP_ATOMIC); - if (fw) { - fw->w.cb = w_flush; - fw->epoch = epoch; - drbd_queue_work(&mdev->data.work, &fw->w); - } else { - dev_warn(DEV, "Could not kmalloc a flush_work obj\n"); - set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); - /* That is not a recursion, only one level */ - drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE); - drbd_may_finish_epoch(mdev, epoch, EV_PUT); - } - } - return rv; } @@ -1144,19 +1067,16 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) [WO_none] = "none", [WO_drain_io] = "drain", [WO_bdev_flush] = "flush", - [WO_bio_barrier] = "barrier", }; pwo = mdev->write_ordering; wo = min(pwo, wo); - if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier) - wo = WO_bdev_flush; if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) wo = WO_drain_io; if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) wo = WO_none; mdev->write_ordering = wo; - if (pwo != mdev->write_ordering || wo == WO_bio_barrier) + if (pwo != mdev->write_ordering || wo == WO_bdev_flush) dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); } @@ -1192,7 +1112,7 @@ next_bio: bio->bi_sector = sector; bio->bi_bdev = mdev->ldev->backing_bdev; /* we special case some flags in the multi-bio case, see below - * (REQ_UNPLUG, REQ_HARDBARRIER) */ + * (REQ_UNPLUG) */ bio->bi_rw = rw; bio->bi_private = e; bio->bi_end_io = drbd_endio_sec; @@ -1226,11 +1146,6 @@ next_bio: bio->bi_rw &= ~REQ_UNPLUG; drbd_generic_make_request(mdev, fault_type, bio); - - /* strip off REQ_HARDBARRIER, - * unless it is the first or last bio */ - if (bios && bios->bi_next) - bios->bi_rw &= ~REQ_HARDBARRIER; } while (bios); maybe_kick_lo(mdev); return 0; @@ -1244,45 +1159,9 @@ fail: return -ENOMEM; } -/** - * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set - * @mdev: DRBD device. - * @w: work object. - * @cancel: The connection will be closed anyways (unused in this callback) - */ -int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local) -{ - struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; - /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place, - (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) - so that we can finish that epoch in drbd_may_finish_epoch(). - That is necessary if we already have a long chain of Epochs, before - we realize that REQ_HARDBARRIER is actually not supported */ - - /* As long as the -ENOTSUPP on the barrier is reported immediately - that will never trigger. If it is reported late, we will just - print that warning and continue correctly for all future requests - with WO_bdev_flush */ - if (previous_epoch(mdev, e->epoch)) - dev_warn(DEV, "Write ordering was not enforced (one time event)\n"); - - /* we still have a local reference, - * get_ldev was done in receive_Data. */ - - e->w.cb = e_end_block; - if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) { - /* drbd_submit_ee fails for one reason only: - * if was not able to allocate sufficient bios. - * requeue, try again later. */ - e->w.cb = w_e_reissue; - drbd_queue_work(&mdev->data.work, &e->w); - } - return 1; -} - static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) { - int rv, issue_flush; + int rv; struct p_barrier *p = &mdev->data.rbuf.barrier; struct drbd_epoch *epoch; @@ -1300,44 +1179,40 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign * Therefore we must send the barrier_ack after the barrier request was * completed. */ switch (mdev->write_ordering) { - case WO_bio_barrier: case WO_none: if (rv == FE_RECYCLED) return TRUE; - break; + + /* receiver context, in the writeout path of the other node. + * avoid potential distributed deadlock */ + epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); + if (epoch) + break; + else + dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); + /* Fall through */ case WO_bdev_flush: case WO_drain_io: - if (rv == FE_STILL_LIVE) { - set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); - drbd_wait_ee_list_empty(mdev, &mdev->active_ee); - rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); - } - if (rv == FE_RECYCLED) - return TRUE; - - /* The asender will send all the ACKs and barrier ACKs out, since - all EEs moved from the active_ee to the done_ee. We need to - provide a new epoch object for the EEs that come in soon */ - break; - } - - /* receiver context, in the writeout path of the other node. - * avoid potential distributed deadlock */ - epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); - if (!epoch) { - dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); - issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); drbd_wait_ee_list_empty(mdev, &mdev->active_ee); - if (issue_flush) { - rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); - if (rv == FE_RECYCLED) - return TRUE; + drbd_flush(mdev); + + if (atomic_read(&mdev->current_epoch->epoch_size)) { + epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); + if (epoch) + break; } - drbd_wait_ee_list_empty(mdev, &mdev->done_ee); + epoch = mdev->current_epoch; + wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); + + D_ASSERT(atomic_read(&epoch->active) == 0); + D_ASSERT(epoch->flags == 0); return TRUE; + default: + dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); + return FALSE; } epoch->flags = 0; @@ -1652,15 +1527,8 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; sector_t sector = e->sector; - struct drbd_epoch *epoch; int ok = 1, pcmd; - if (e->flags & EE_IS_BARRIER) { - epoch = previous_epoch(mdev, e->epoch); - if (epoch) - drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0)); - } - if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { if (likely((e->flags & EE_WAS_ERROR) == 0)) { pcmd = (mdev->state.conn >= C_SYNC_SOURCE && @@ -1817,27 +1685,6 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned e->epoch = mdev->current_epoch; atomic_inc(&e->epoch->epoch_size); atomic_inc(&e->epoch->active); - - if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) { - struct drbd_epoch *epoch; - /* Issue a barrier if we start a new epoch, and the previous epoch - was not a epoch containing a single request which already was - a Barrier. */ - epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); - if (epoch == e->epoch) { - set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); - rw |= REQ_HARDBARRIER; - e->flags |= EE_IS_BARRIER; - } else { - if (atomic_read(&epoch->epoch_size) > 1 || - !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { - set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); - set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); - rw |= REQ_HARDBARRIER; - e->flags |= EE_IS_BARRIER; - } - } - } spin_unlock(&mdev->epoch_lock); dp_flags = be32_to_cpu(p->dp_flags); @@ -1995,10 +1842,11 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned break; } - if (mdev->state.pdsk == D_DISKLESS) { + if (mdev->state.pdsk < D_INCONSISTENT) { /* In case we have the only disk of the cluster, */ drbd_set_out_of_sync(mdev, e->sector, e->size); e->flags |= EE_CALL_AL_COMPLETE_IO; + e->flags &= ~EE_MAY_SET_IN_SYNC; drbd_al_begin_io(mdev, e->sector); } @@ -3362,7 +3210,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned if (ns.conn == C_MASK) { ns.conn = C_CONNECTED; if (mdev->state.disk == D_NEGOTIATING) { - drbd_force_state(mdev, NS(disk, D_DISKLESS)); + drbd_force_state(mdev, NS(disk, D_FAILED)); } else if (peer_state.disk == D_NEGOTIATING) { dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); peer_state.disk = D_DISKLESS; @@ -3779,17 +3627,19 @@ static void drbdd(struct drbd_conf *mdev) } shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); - rv = drbd_recv(mdev, &header->h80.payload, shs); - if (unlikely(rv != shs)) { - dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); - goto err_out; - } - if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); goto err_out; } + if (shs) { + rv = drbd_recv(mdev, &header->h80.payload, shs); + if (unlikely(rv != shs)) { + dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); + goto err_out; + } + } + rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs); if (unlikely(!rv)) { diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 9e91a2545fc..11a75d32a2e 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) if (!hlist_unhashed(&req->colision)) hlist_del(&req->colision); else - D_ASSERT((s & RQ_NET_MASK) == 0); + D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); /* for writes we need to do some extra housekeeping */ if (rw == WRITE) @@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) mdev->state.conn >= C_CONNECTED)); if (!(local || remote) && !is_susp(mdev->state)) { - dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); + if (__ratelimit(&drbd_ratelimit_state)) + dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); goto fail_free_complete; } @@ -942,12 +943,21 @@ allocate_barrier: if (local) { req->private_bio->bi_bdev = mdev->ldev->backing_bdev; - if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR - : rw == READ ? DRBD_FAULT_DT_RD - : DRBD_FAULT_DT_RA)) + /* State may have changed since we grabbed our reference on the + * mdev->ldev member. Double check, and short-circuit to endio. + * In case the last activity log transaction failed to get on + * stable storage, and this is a WRITE, we may not even submit + * this bio. */ + if (get_ldev(mdev)) { + if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR + : rw == READ ? DRBD_FAULT_DT_RD + : DRBD_FAULT_DT_RA)) + bio_endio(req->private_bio, -EIO); + else + generic_make_request(req->private_bio); + put_ldev(mdev); + } else bio_endio(req->private_bio, -EIO); - else - generic_make_request(req->private_bio); } /* we need to plug ALWAYS since we possibly need to kick lo_dev. @@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) return 0; } - /* Reject barrier requests if we know the underlying device does - * not support them. - * XXX: Need to get this info from peer as well some how so we - * XXX: reject if EITHER side/data/metadata area does not support them. - * - * because of those XXX, this is not yet enabled, - * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. - */ - if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) { - /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ - bio_endio(bio, -EOPNOTSUPP); - return 0; - } - /* * what we "blindly" assume: */ diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 181ea036482..ab2bd09d54b 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -339,7 +339,8 @@ static inline int _req_mod(struct drbd_request *req, enum drbd_req_event what) } /* completion of master bio is outside of spinlock. - * If you need it irqsave, do it your self! */ + * If you need it irqsave, do it your self! + * Which means: don't use from bio endio callback. */ static inline int req_mod(struct drbd_request *req, enum drbd_req_event what) { diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 108d58015cd..34f224b018b 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -26,7 +26,6 @@ #include <linux/module.h> #include <linux/drbd.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/memcontrol.h> @@ -102,12 +101,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) put_ldev(mdev); } -static int is_failed_barrier(int ee_flags) -{ - return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED)) - == (EE_IS_BARRIER|EE_WAS_ERROR); -} - /* writes on behalf of the partner, or resync writes, * "submitted" by the receiver, final stage. */ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) @@ -119,21 +112,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo int is_syncer_req; int do_al_complete_io; - /* if this is a failed barrier request, disable use of barriers, - * and schedule for resubmission */ - if (is_failed_barrier(e->flags)) { - drbd_bump_write_ordering(mdev, WO_bdev_flush); - spin_lock_irqsave(&mdev->req_lock, flags); - list_del(&e->w.list); - e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED; - e->w.cb = w_e_reissue; - /* put_ldev actually happens below, once we come here again. */ - __release(local); - spin_unlock_irqrestore(&mdev->req_lock, flags); - drbd_queue_work(&mdev->data.work, &e->w); - return; - } - D_ASSERT(e->block_id != ID_VACANT); /* after we moved e to done_ee, @@ -215,8 +193,10 @@ void drbd_endio_sec(struct bio *bio, int error) */ void drbd_endio_pri(struct bio *bio, int error) { + unsigned long flags; struct drbd_request *req = bio->bi_private; struct drbd_conf *mdev = req->mdev; + struct bio_and_error m; enum drbd_req_event what; int uptodate = bio_flagged(bio, BIO_UPTODATE); @@ -242,7 +222,13 @@ void drbd_endio_pri(struct bio *bio, int error) bio_put(req->private_bio); req->private_bio = ERR_PTR(error); - req_mod(req, what); + /* not req_mod(), we need irqsave here! */ + spin_lock_irqsave(&mdev->req_lock, flags); + __req_mod(req, what, &m); + spin_unlock_irqrestore(&mdev->req_lock, flags); + + if (m.bio) + complete_master_bio(mdev, &m); } int w_read_retry_remote(struct drbd_conf *mdev, struct drbd_work *w, int cancel) @@ -925,7 +911,7 @@ out: drbd_md_sync(mdev); if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { - dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); + dev_info(DEV, "Writing the whole bitmap\n"); drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); } diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 767107cce98..3951020e494 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -4363,9 +4363,9 @@ out_unreg_blkdev: out_put_disk: while (dr--) { del_timer(&motor_off_timer[dr]); - put_disk(disks[dr]); if (disks[dr]->queue) blk_cleanup_queue(disks[dr]->queue); + put_disk(disks[dr]); } return err; } @@ -4573,8 +4573,8 @@ static void __exit floppy_module_exit(void) device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos); platform_device_unregister(&floppy_device[drive]); } - put_disk(disks[drive]); blk_cleanup_queue(disks[drive]->queue); + put_disk(disks[drive]); } del_timer_sync(&fd_timeout); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1e5284ef65f..7ea0bea2f7e 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) if (bio_rw(bio) == WRITE) { struct file *file = lo->lo_backing_file; - /* REQ_HARDBARRIER is deprecated */ - if (bio->bi_rw & REQ_HARDBARRIER) { - ret = -EOPNOTSUPP; - goto out; - } - if (bio->bi_rw & REQ_FLUSH) { ret = vfs_fsync(file, 0); if (unlikely(ret && ret != -EINVAL)) { diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6ec9d53806c..008d4a00b50 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -21,80 +21,9 @@ - Instructions for use - -------------------- + For usage instructions, please refer to: - 1) Map a Linux block device to an existing rbd image. - - Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name] - - $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add - - The snapshot name can be "-" or omitted to map the image read/write. - - 2) List all active blkdev<->object mappings. - - In this example, we have performed step #1 twice, creating two blkdevs, - mapped to two separate rados objects in the rados rbd pool - - $ cat /sys/class/rbd/list - #id major client_name pool name snap KB - 0 254 client4143 rbd foo - 1024000 - - The columns, in order, are: - - blkdev unique id - - blkdev assigned major - - rados client id - - rados pool name - - rados block device name - - mapped snapshot ("-" if none) - - device size in KB - - - 3) Create a snapshot. - - Usage: <blkdev id> <snapname> - - $ echo "0 mysnap" > /sys/class/rbd/snap_create - - - 4) Listing a snapshot. - - $ cat /sys/class/rbd/snaps_list - #id snap KB - 0 - 1024000 (*) - 0 foo 1024000 - - The columns, in order, are: - - blkdev unique id - - snapshot name, '-' means none (active read/write version) - - size of device at time of snapshot - - the (*) indicates this is the active version - - 5) Rollback to snapshot. - - Usage: <blkdev id> <snapname> - - $ echo "0 mysnap" > /sys/class/rbd/snap_rollback - - - 6) Mapping an image using snapshot. - - A snapshot mapping is read-only. This is being done by passing - snap=<snapname> to the options when adding a device. - - $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add - - - 7) Remove an active blkdev<->rbd image mapping. - - In this example, we remove the mapping with blkdev unique id 1. - - $ echo 1 > /sys/class/rbd/remove - - - NOTE: The actual creation and deletion of rados objects is outside the scope - of this driver. + Documentation/ABI/testing/sysfs-bus-rbd */ @@ -163,6 +92,14 @@ struct rbd_request { u64 len; }; +struct rbd_snap { + struct device dev; + const char *name; + size_t size; + struct list_head node; + u64 id; +}; + /* * a single device */ @@ -193,21 +130,60 @@ struct rbd_device { int read_only; struct list_head node; + + /* list of snapshots */ + struct list_head snaps; + + /* sysfs related */ + struct device dev; +}; + +static struct bus_type rbd_bus_type = { + .name = "rbd", }; static spinlock_t node_lock; /* protects client get/put */ -static struct class *class_rbd; /* /sys/class/rbd */ static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ static LIST_HEAD(rbd_dev_list); /* devices */ static LIST_HEAD(rbd_client_list); /* clients */ +static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); +static void rbd_dev_release(struct device *dev); +static ssize_t rbd_snap_rollback(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size); +static ssize_t rbd_snap_add(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count); +static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, + struct rbd_snap *snap);; + + +static struct rbd_device *dev_to_rbd(struct device *dev) +{ + return container_of(dev, struct rbd_device, dev); +} + +static struct device *rbd_get_dev(struct rbd_device *rbd_dev) +{ + return get_device(&rbd_dev->dev); +} + +static void rbd_put_dev(struct rbd_device *rbd_dev) +{ + put_device(&rbd_dev->dev); +} static int rbd_open(struct block_device *bdev, fmode_t mode) { struct gendisk *disk = bdev->bd_disk; struct rbd_device *rbd_dev = disk->private_data; + rbd_get_dev(rbd_dev); + set_device_ro(bdev, rbd_dev->read_only); if ((mode & FMODE_WRITE) && rbd_dev->read_only) @@ -216,9 +192,19 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) return 0; } +static int rbd_release(struct gendisk *disk, fmode_t mode) +{ + struct rbd_device *rbd_dev = disk->private_data; + + rbd_put_dev(rbd_dev); + + return 0; +} + static const struct block_device_operations rbd_bd_ops = { .owner = THIS_MODULE, .open = rbd_open, + .release = rbd_release, }; /* @@ -361,7 +347,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header, int ret = -ENOMEM; init_rwsem(&header->snap_rwsem); - header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); header->snapc = kmalloc(sizeof(struct ceph_snap_context) + snap_count * @@ -1256,10 +1241,20 @@ bad: return -ERANGE; } +static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev) +{ + struct rbd_snap *snap; + + while (!list_empty(&rbd_dev->snaps)) { + snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node); + __rbd_remove_snap_dev(rbd_dev, snap); + } +} + /* * only read the first part of the ondisk header, without the snaps info */ -static int rbd_update_snaps(struct rbd_device *rbd_dev) +static int __rbd_update_snaps(struct rbd_device *rbd_dev) { int ret; struct rbd_image_header h; @@ -1280,12 +1275,15 @@ static int rbd_update_snaps(struct rbd_device *rbd_dev) rbd_dev->header.total_snaps = h.total_snaps; rbd_dev->header.snapc = h.snapc; rbd_dev->header.snap_names = h.snap_names; + rbd_dev->header.snap_names_len = h.snap_names_len; rbd_dev->header.snap_sizes = h.snap_sizes; rbd_dev->header.snapc->seq = snap_seq; + ret = __rbd_init_snaps_header(rbd_dev); + up_write(&rbd_dev->header.snap_rwsem); - return 0; + return ret; } static int rbd_init_disk(struct rbd_device *rbd_dev) @@ -1300,6 +1298,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (rc) return rc; + /* no need to lock here, as rbd_dev is not registered yet */ + rc = __rbd_init_snaps_header(rbd_dev); + if (rc) + return rc; + rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size); if (rc) return rc; @@ -1343,54 +1346,360 @@ out: return rc; } -/******************************************************************** - * /sys/class/rbd/ - * add map rados objects to blkdev - * remove unmap rados objects - * list show mappings - *******************************************************************/ +/* + sysfs +*/ + +static ssize_t rbd_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size); +} + +static ssize_t rbd_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); -static void class_rbd_release(struct class *cls) + return sprintf(buf, "%d\n", rbd_dev->major); +} + +static ssize_t rbd_client_id_show(struct device *dev, + struct device_attribute *attr, char *buf) { - kfree(cls); + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->client)); } -static ssize_t class_rbd_list(struct class *c, - struct class_attribute *attr, - char *data) +static ssize_t rbd_pool_show(struct device *dev, + struct device_attribute *attr, char *buf) { - int n = 0; - struct list_head *tmp; - int max = PAGE_SIZE; + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%s\n", rbd_dev->pool_name); +} + +static ssize_t rbd_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%s\n", rbd_dev->obj); +} + +static ssize_t rbd_snap_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + + return sprintf(buf, "%s\n", rbd_dev->snap_name); +} + +static ssize_t rbd_image_refresh(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct rbd_device *rbd_dev = dev_to_rbd(dev); + int rc; + int ret = size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - n += snprintf(data, max, - "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n"); + rc = __rbd_update_snaps(rbd_dev); + if (rc < 0) + ret = rc; - list_for_each(tmp, &rbd_dev_list) { - struct rbd_device *rbd_dev; + mutex_unlock(&ctl_mutex); + return ret; +} - rbd_dev = list_entry(tmp, struct rbd_device, node); - n += snprintf(data+n, max-n, - "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n", - rbd_dev->id, - rbd_dev->major, - ceph_client_id(rbd_dev->client), - rbd_dev->pool_name, - rbd_dev->obj, rbd_dev->snap_name, - rbd_dev->header.image_size >> 10); - if (n == max) +static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); +static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); +static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); +static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); +static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); +static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); +static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); +static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); +static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback); + +static struct attribute *rbd_attrs[] = { + &dev_attr_size.attr, + &dev_attr_major.attr, + &dev_attr_client_id.attr, + &dev_attr_pool.attr, + &dev_attr_name.attr, + &dev_attr_current_snap.attr, + &dev_attr_refresh.attr, + &dev_attr_create_snap.attr, + &dev_attr_rollback_snap.attr, + NULL +}; + +static struct attribute_group rbd_attr_group = { + .attrs = rbd_attrs, +}; + +static const struct attribute_group *rbd_attr_groups[] = { + &rbd_attr_group, + NULL +}; + +static void rbd_sysfs_dev_release(struct device *dev) +{ +} + +static struct device_type rbd_device_type = { + .name = "rbd", + .groups = rbd_attr_groups, + .release = rbd_sysfs_dev_release, +}; + + +/* + sysfs - snapshots +*/ + +static ssize_t rbd_snap_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + + return sprintf(buf, "%lld\n", (long long)snap->size); +} + +static ssize_t rbd_snap_id_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + + return sprintf(buf, "%lld\n", (long long)snap->id); +} + +static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); +static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); + +static struct attribute *rbd_snap_attrs[] = { + &dev_attr_snap_size.attr, + &dev_attr_snap_id.attr, + NULL, +}; + +static struct attribute_group rbd_snap_attr_group = { + .attrs = rbd_snap_attrs, +}; + +static void rbd_snap_dev_release(struct device *dev) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + kfree(snap->name); + kfree(snap); +} + +static const struct attribute_group *rbd_snap_attr_groups[] = { + &rbd_snap_attr_group, + NULL +}; + +static struct device_type rbd_snap_device_type = { + .groups = rbd_snap_attr_groups, + .release = rbd_snap_dev_release, +}; + +static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev, + struct rbd_snap *snap) +{ + list_del(&snap->node); + device_unregister(&snap->dev); +} + +static int rbd_register_snap_dev(struct rbd_device *rbd_dev, + struct rbd_snap *snap, + struct device *parent) +{ + struct device *dev = &snap->dev; + int ret; + + dev->type = &rbd_snap_device_type; + dev->parent = parent; + dev->release = rbd_snap_dev_release; + dev_set_name(dev, "snap_%s", snap->name); + ret = device_register(dev); + + return ret; +} + +static int __rbd_add_snap_dev(struct rbd_device *rbd_dev, + int i, const char *name, + struct rbd_snap **snapp) +{ + int ret; + struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL); + if (!snap) + return -ENOMEM; + snap->name = kstrdup(name, GFP_KERNEL); + snap->size = rbd_dev->header.snap_sizes[i]; + snap->id = rbd_dev->header.snapc->snaps[i]; + if (device_is_registered(&rbd_dev->dev)) { + ret = rbd_register_snap_dev(rbd_dev, snap, + &rbd_dev->dev); + if (ret < 0) + goto err; + } + *snapp = snap; + return 0; +err: + kfree(snap->name); + kfree(snap); + return ret; +} + +/* + * search for the previous snap in a null delimited string list + */ +const char *rbd_prev_snap_name(const char *name, const char *start) +{ + if (name < start + 2) + return NULL; + + name -= 2; + while (*name) { + if (name == start) + return start; + name--; + } + return name + 1; +} + +/* + * compare the old list of snapshots that we have to what's in the header + * and update it accordingly. Note that the header holds the snapshots + * in a reverse order (from newest to oldest) and we need to go from + * older to new so that we don't get a duplicate snap name when + * doing the process (e.g., removed snapshot and recreated a new + * one with the same name. + */ +static int __rbd_init_snaps_header(struct rbd_device *rbd_dev) +{ + const char *name, *first_name; + int i = rbd_dev->header.total_snaps; + struct rbd_snap *snap, *old_snap = NULL; + int ret; + struct list_head *p, *n; + + first_name = rbd_dev->header.snap_names; + name = first_name + rbd_dev->header.snap_names_len; + + list_for_each_prev_safe(p, n, &rbd_dev->snaps) { + u64 cur_id; + + old_snap = list_entry(p, struct rbd_snap, node); + + if (i) + cur_id = rbd_dev->header.snapc->snaps[i - 1]; + + if (!i || old_snap->id < cur_id) { + /* old_snap->id was skipped, thus was removed */ + __rbd_remove_snap_dev(rbd_dev, old_snap); + continue; + } + if (old_snap->id == cur_id) { + /* we have this snapshot already */ + i--; + name = rbd_prev_snap_name(name, first_name); + continue; + } + for (; i > 0; + i--, name = rbd_prev_snap_name(name, first_name)) { + if (!name) { + WARN_ON(1); + return -EINVAL; + } + cur_id = rbd_dev->header.snapc->snaps[i]; + /* snapshot removal? handle it above */ + if (cur_id >= old_snap->id) + break; + /* a new snapshot */ + ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); + if (ret < 0) + return ret; + + /* note that we add it backward so using n and not p */ + list_add(&snap->node, n); + p = &snap->node; + } + } + /* we're done going over the old snap list, just add what's left */ + for (; i > 0; i--) { + name = rbd_prev_snap_name(name, first_name); + if (!name) { + WARN_ON(1); + return -EINVAL; + } + ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap); + if (ret < 0) + return ret; + list_add(&snap->node, &rbd_dev->snaps); + } + + return 0; +} + + +static void rbd_root_dev_release(struct device *dev) +{ +} + +static struct device rbd_root_dev = { + .init_name = "rbd", + .release = rbd_root_dev_release, +}; + +static int rbd_bus_add_dev(struct rbd_device *rbd_dev) +{ + int ret = -ENOMEM; + struct device *dev; + struct rbd_snap *snap; + + mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); + dev = &rbd_dev->dev; + + dev->bus = &rbd_bus_type; + dev->type = &rbd_device_type; + dev->parent = &rbd_root_dev; + dev->release = rbd_dev_release; + dev_set_name(dev, "%d", rbd_dev->id); + ret = device_register(dev); + if (ret < 0) + goto done_free; + + list_for_each_entry(snap, &rbd_dev->snaps, node) { + ret = rbd_register_snap_dev(rbd_dev, snap, + &rbd_dev->dev); + if (ret < 0) break; } mutex_unlock(&ctl_mutex); - return n; + return 0; +done_free: + mutex_unlock(&ctl_mutex); + return ret; } -static ssize_t class_rbd_add(struct class *c, - struct class_attribute *attr, - const char *buf, size_t count) +static void rbd_bus_del_dev(struct rbd_device *rbd_dev) +{ + device_unregister(&rbd_dev->dev); +} + +static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count) { struct ceph_osd_client *osdc; struct rbd_device *rbd_dev; @@ -1419,6 +1728,7 @@ static ssize_t class_rbd_add(struct class *c, /* static rbd_device initialization */ spin_lock_init(&rbd_dev->lock); INIT_LIST_HEAD(&rbd_dev->node); + INIT_LIST_HEAD(&rbd_dev->snaps); /* generate unique id: find highest unique id, add one */ mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); @@ -1478,6 +1788,9 @@ static ssize_t class_rbd_add(struct class *c, } rbd_dev->major = irc; + rc = rbd_bus_add_dev(rbd_dev); + if (rc) + goto err_out_disk; /* set up and announce blkdev mapping */ rc = rbd_init_disk(rbd_dev); if (rc) @@ -1487,6 +1800,8 @@ static ssize_t class_rbd_add(struct class *c, err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); +err_out_disk: + rbd_free_disk(rbd_dev); err_out_client: rbd_put_client(rbd_dev); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); @@ -1518,35 +1833,10 @@ static struct rbd_device *__rbd_get_dev(unsigned long id) return NULL; } -static ssize_t class_rbd_remove(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static void rbd_dev_release(struct device *dev) { - struct rbd_device *rbd_dev = NULL; - int target_id, rc; - unsigned long ul; - - rc = strict_strtoul(buf, 10, &ul); - if (rc) - return rc; - - /* convert to int; abort if we lost anything in the conversion */ - target_id = (int) ul; - if (target_id != ul) - return -EINVAL; - - /* remove object from list immediately */ - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - rbd_dev = __rbd_get_dev(target_id); - if (rbd_dev) - list_del_init(&rbd_dev->node); - - mutex_unlock(&ctl_mutex); - - if (!rbd_dev) - return -ENOENT; + struct rbd_device *rbd_dev = + container_of(dev, struct rbd_device, dev); rbd_put_client(rbd_dev); @@ -1557,67 +1847,11 @@ static ssize_t class_rbd_remove(struct class *c, /* release module ref */ module_put(THIS_MODULE); - - return count; } -static ssize_t class_rbd_snaps_list(struct class *c, - struct class_attribute *attr, - char *data) -{ - struct rbd_device *rbd_dev = NULL; - struct list_head *tmp; - struct rbd_image_header *header; - int i, n = 0, max = PAGE_SIZE; - int ret; - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - n += snprintf(data, max, "#id\tsnap\tKB\n"); - - list_for_each(tmp, &rbd_dev_list) { - char *names, *p; - struct ceph_snap_context *snapc; - - rbd_dev = list_entry(tmp, struct rbd_device, node); - header = &rbd_dev->header; - - down_read(&header->snap_rwsem); - - names = header->snap_names; - snapc = header->snapc; - - n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", - rbd_dev->id, RBD_SNAP_HEAD_NAME, - header->image_size >> 10, - (!rbd_dev->cur_snap ? " (*)" : "")); - if (n == max) - break; - - p = names; - for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) { - n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n", - rbd_dev->id, p, header->snap_sizes[i] >> 10, - (rbd_dev->cur_snap && - (snap_index(header, i) == rbd_dev->cur_snap) ? - " (*)" : "")); - if (n == max) - break; - } - - up_read(&header->snap_rwsem); - } - - - ret = n; - mutex_unlock(&ctl_mutex); - return ret; -} - -static ssize_t class_rbd_snaps_refresh(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static ssize_t rbd_remove(struct bus_type *bus, + const char *buf, + size_t count) { struct rbd_device *rbd_dev = NULL; int target_id, rc; @@ -1641,95 +1875,70 @@ static ssize_t class_rbd_snaps_refresh(struct class *c, goto done; } - rc = rbd_update_snaps(rbd_dev); - if (rc < 0) - ret = rc; + list_del_init(&rbd_dev->node); + + __rbd_remove_all_snaps(rbd_dev); + rbd_bus_del_dev(rbd_dev); done: mutex_unlock(&ctl_mutex); return ret; } -static ssize_t class_rbd_snap_create(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static ssize_t rbd_snap_add(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { - struct rbd_device *rbd_dev = NULL; - int target_id, ret; - char *name; - - name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL); + struct rbd_device *rbd_dev = dev_to_rbd(dev); + int ret; + char *name = kmalloc(count + 1, GFP_KERNEL); if (!name) return -ENOMEM; - /* parse snaps add command */ - if (sscanf(buf, "%d " - "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", - &target_id, - name) != 2) { - ret = -EINVAL; - goto done; - } + snprintf(name, count, "%s", buf); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - rbd_dev = __rbd_get_dev(target_id); - if (!rbd_dev) { - ret = -ENOENT; - goto done_unlock; - } - ret = rbd_header_add_snap(rbd_dev, name, GFP_KERNEL); if (ret < 0) goto done_unlock; - ret = rbd_update_snaps(rbd_dev); + ret = __rbd_update_snaps(rbd_dev); if (ret < 0) goto done_unlock; ret = count; done_unlock: mutex_unlock(&ctl_mutex); -done: kfree(name); return ret; } -static ssize_t class_rbd_rollback(struct class *c, - struct class_attribute *attr, - const char *buf, - size_t count) +static ssize_t rbd_snap_rollback(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) { - struct rbd_device *rbd_dev = NULL; - int target_id, ret; + struct rbd_device *rbd_dev = dev_to_rbd(dev); + int ret; u64 snapid; - char snap_name[RBD_MAX_SNAP_NAME_LEN]; u64 cur_ofs; - char *seg_name; + char *seg_name = NULL; + char *snap_name = kmalloc(count + 1, GFP_KERNEL); + ret = -ENOMEM; + if (!snap_name) + return ret; /* parse snaps add command */ - if (sscanf(buf, "%d " - "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s", - &target_id, - snap_name) != 2) { - return -EINVAL; - } - - ret = -ENOMEM; + snprintf(snap_name, count, "%s", buf); seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); if (!seg_name) - return ret; + goto done; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - rbd_dev = __rbd_get_dev(target_id); - if (!rbd_dev) { - ret = -ENOENT; - goto done_unlock; - } - ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL); if (ret < 0) goto done_unlock; @@ -1750,7 +1959,7 @@ static ssize_t class_rbd_rollback(struct class *c, seg_name, ret); } - ret = rbd_update_snaps(rbd_dev); + ret = __rbd_update_snaps(rbd_dev); if (ret < 0) goto done_unlock; @@ -1758,57 +1967,42 @@ static ssize_t class_rbd_rollback(struct class *c, done_unlock: mutex_unlock(&ctl_mutex); +done: kfree(seg_name); + kfree(snap_name); return ret; } -static struct class_attribute class_rbd_attrs[] = { - __ATTR(add, 0200, NULL, class_rbd_add), - __ATTR(remove, 0200, NULL, class_rbd_remove), - __ATTR(list, 0444, class_rbd_list, NULL), - __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh), - __ATTR(snap_create, 0200, NULL, class_rbd_snap_create), - __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL), - __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback), +static struct bus_attribute rbd_bus_attrs[] = { + __ATTR(add, S_IWUSR, NULL, rbd_add), + __ATTR(remove, S_IWUSR, NULL, rbd_remove), __ATTR_NULL }; /* * create control files in sysfs - * /sys/class/rbd/... + * /sys/bus/rbd/... */ static int rbd_sysfs_init(void) { - int ret = -ENOMEM; + int ret; - class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL); - if (!class_rbd) - goto out; + rbd_bus_type.bus_attrs = rbd_bus_attrs; - class_rbd->name = DRV_NAME; - class_rbd->owner = THIS_MODULE; - class_rbd->class_release = class_rbd_release; - class_rbd->class_attrs = class_rbd_attrs; + ret = bus_register(&rbd_bus_type); + if (ret < 0) + return ret; - ret = class_register(class_rbd); - if (ret) - goto out_class; - return 0; + ret = device_register(&rbd_root_dev); -out_class: - kfree(class_rbd); - class_rbd = NULL; - pr_err(DRV_NAME ": failed to create class rbd\n"); -out: return ret; } static void rbd_sysfs_cleanup(void) { - if (class_rbd) - class_destroy(class_rbd); - class_rbd = NULL; + device_unregister(&rbd_root_dev); + bus_unregister(&rbd_bus_type); } int __init rbd_init(void) diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 06e2812ba12..657873e4328 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -65,14 +65,14 @@ enum blkif_state { struct blk_shadow { struct blkif_request req; - unsigned long request; + struct request *request; unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; }; static DEFINE_MUTEX(blkfront_mutex); static const struct block_device_operations xlvbd_block_fops; -#define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) +#define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) /* * We have one of these per vbd, whether ide, scsi or 'other'. They @@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, unsigned long id) { info->shadow[id].req.id = info->shadow_free; - info->shadow[id].request = 0; + info->shadow[id].request = NULL; info->shadow_free = id; } @@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, } /* - * blkif_queue_request + * Generate a Xen blkfront IO request from a blk layer request. Reads + * and writes are handled as expected. Since we lack a loose flush + * request, we map flushes into a full ordered barrier. * - * request block io - * - * id: for guest use only. - * operation: BLKIF_OP_{READ,WRITE,PROBE} - * buffer: buffer to read/write into. this should be a - * virtual address in the guest os. + * @req: a request struct */ static int blkif_queue_request(struct request *req) { @@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) /* Fill out a communications ring structure. */ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); id = get_id_from_freelist(info); - info->shadow[id].request = (unsigned long)req; + info->shadow[id].request = req; ring_req->id = id; ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); @@ -289,8 +286,18 @@ static int blkif_queue_request(struct request *req) ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ; - if (req->cmd_flags & REQ_HARDBARRIER) + + if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + /* + * Ideally we could just do an unordered + * flush-to-disk, but all we have is a full write + * barrier at the moment. However, a barrier write is + * a superset of FUA, so we can implement it the same + * way. (It's also a FLUSH+FUA, since it is + * guaranteed ordered WRT previous writes.) + */ ring_req->operation = BLKIF_OP_WRITE_BARRIER; + } ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); @@ -636,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; - req = (struct request *)info->shadow[id].request; + req = info->shadow[id].request; blkif_completion(&info->shadow[id]); @@ -649,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", info->gd->disk_name); error = -EOPNOTSUPP; + } + if (unlikely(bret->status == BLKIF_RSP_ERROR && + info->shadow[id].req.nr_segments == 0)) { + printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", + info->gd->disk_name); + error = -EOPNOTSUPP; + } + if (unlikely(error)) { + if (error == -EOPNOTSUPP) + error = 0; info->feature_flush = 0; xlvbd_flush(info); } @@ -901,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) /* Stage 3: Find pending requests and requeue them. */ for (i = 0; i < BLK_RING_SIZE; i++) { /* Not in use? */ - if (copy[i].request == 0) + if (!copy[i].request) continue; /* Grab a request slot and copy shadow state into it. */ @@ -918,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) req->seg[j].gref, info->xbdev->otherend_id, pfn_to_mfn(info->shadow[req->id].frame[j]), - rq_data_dir( - (struct request *) - info->shadow[req->id].request)); + rq_data_dir(info->shadow[req->id].request)); info->shadow[req->id].req = *req; info->ring.req_prod_pvt++; @@ -1069,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) */ info->feature_flush = 0; - /* - * The driver doesn't properly handled empty flushes, so - * lets disable barrier support for now. - */ -#if 0 if (!err && barrier) - info->feature_flush = REQ_FLUSH; -#endif + info->feature_flush = REQ_FLUSH | REQ_FUA; err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 128cae4e862..949ed09c636 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -35,6 +35,10 @@ static struct usb_device_id ath3k_table[] = { /* Atheros AR3011 */ { USB_DEVICE(0x0CF3, 0x3000) }, + + /* Atheros AR3011 with sflash firmware*/ + { USB_DEVICE(0x0CF3, 0x3002) }, + { } /* Terminating entry */ }; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index d120a5c1c09..1da773f899a 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -68,6 +68,9 @@ static struct usb_device_id btusb_table[] = { /* Apple MacBookPro6,2 */ { USB_DEVICE(0x05ac, 0x8218) }, + /* Apple MacBookAir3,1, MacBookAir3,2 */ + { USB_DEVICE(0x05ac, 0x821b) }, + /* AVM BlueFRITZ! USB v2.0 */ { USB_DEVICE(0x057c, 0x3800) }, @@ -96,6 +99,9 @@ static struct usb_device_id blacklist_table[] = { /* Broadcom BCM2033 without firmware */ { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, + /* Atheros 3011 with sflash firmware */ + { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, + /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, @@ -236,7 +242,8 @@ static void btusb_intr_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { - BT_ERR("%s urb %p failed to resubmit (%d)", + if (err != -EPERM) + BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -320,7 +327,8 @@ static void btusb_bulk_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { - BT_ERR("%s urb %p failed to resubmit (%d)", + if (err != -EPERM) + BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -409,7 +417,8 @@ static void btusb_isoc_complete(struct urb *urb) err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { - BT_ERR("%s urb %p failed to resubmit (%d)", + if (err != -EPERM) + BT_ERR("%s urb %p failed to resubmit (%d)", hdev->name, urb, -err); usb_unanchor_urb(urb); } @@ -1029,6 +1038,8 @@ static int btusb_probe(struct usb_interface *intf, usb_set_intfdata(intf, data); + usb_enable_autosuspend(interface_to_usbdev(intf)); + return 0; } diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 3a9c0141683..ba53ec956c9 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -2,24 +2,10 @@ # Makefile for the kernel character device drivers. # -# -# This file contains the font map for the default (hardware) font -# -FONTMAPFILE = cp437.uni - -obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o - -obj-y += tty_mutex.o -obj-$(CONFIG_LEGACY_PTYS) += pty.o -obj-$(CONFIG_UNIX98_PTYS) += pty.o +obj-y += mem.o random.o obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o obj-y += misc.o -obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o -obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o -obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o -obj-$(CONFIG_AUDIT) += tty_audit.o -obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o obj-$(CONFIG_MVME147_SCC) += generic_serial.o vme_scc.o obj-$(CONFIG_MVME162_SCC) += generic_serial.o vme_scc.o obj-$(CONFIG_BVME6000_SCC) += generic_serial.o vme_scc.o @@ -41,8 +27,6 @@ obj-$(CONFIG_ISI) += isicom.o obj-$(CONFIG_SYNCLINK) += synclink.o obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o -obj-$(CONFIG_N_HDLC) += n_hdlc.o -obj-$(CONFIG_N_GSM) += n_gsm.o obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o obj-$(CONFIG_SX) += sx.o generic_serial.o obj-$(CONFIG_RIO) += rio/ generic_serial.o @@ -74,7 +58,6 @@ obj-$(CONFIG_PRINTER) += lp.o obj-$(CONFIG_APM_EMULATION) += apm-emulation.o obj-$(CONFIG_DTLK) += dtlk.o -obj-$(CONFIG_R3964) += n_r3964.o obj-$(CONFIG_APPLICOM) += applicom.o obj-$(CONFIG_SONYPI) += sonypi.o obj-$(CONFIG_RTC) += rtc.o @@ -115,28 +98,3 @@ obj-$(CONFIG_RAMOOPS) += ramoops.o obj-$(CONFIG_JS_RTC) += js-rtc.o js-rtc-y = rtc.o - -# Files generated that shall be removed upon make clean -clean-files := consolemap_deftbl.c defkeymap.c - -quiet_cmd_conmk = CONMK $@ - cmd_conmk = scripts/conmakehash $< > $@ - -$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE) - $(call cmd,conmk) - -$(obj)/defkeymap.o: $(obj)/defkeymap.c - -# Uncomment if you're changing the keymap and have an appropriate -# loadkeys version for the map. By default, we'll use the shipped -# versions. -# GENERATE_KEYMAP := 1 - -ifdef GENERATE_KEYMAP - -$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map - loadkeys --mktable $< > $@.tmp - sed -e 's/^static *//' $@.tmp > $@ - rm $@.tmp - -endif diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c index 43412c03969..3cb4539a98b 100644 --- a/drivers/char/agp/frontend.c +++ b/drivers/char/agp/frontend.c @@ -39,7 +39,6 @@ #include <linux/mm.h> #include <linux/fs.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include "agp.h" diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 6b6760ea243..16a2847b7cd 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -812,8 +812,10 @@ static int intel_fake_agp_fetch_size(void) static void i830_cleanup(void) { - kunmap(intel_private.i8xx_page); - intel_private.i8xx_flush_page = NULL; + if (intel_private.i8xx_flush_page) { + kunmap(intel_private.i8xx_flush_page); + intel_private.i8xx_flush_page = NULL; + } __free_page(intel_private.i8xx_page); intel_private.i8xx_page = NULL; @@ -1210,14 +1212,14 @@ static void gen6_write_entry(dma_addr_t addr, unsigned int entry, unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; u32 pte_flags; - if (type_mask == AGP_USER_UNCACHED_MEMORY) + if (type_mask == AGP_USER_MEMORY) pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID; else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { - pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } else { /* set 'normal'/'cached' to LLC by default */ - pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; if (gfdt) pte_flags |= GEN6_PTE_GFDT; } diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index b0a70461a12..6ee3348bc3e 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c @@ -81,7 +81,6 @@ static char *serial_version = "4.30"; #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/platform_device.h> @@ -1299,7 +1298,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, { struct async_struct * info = tty->driver_data; struct async_icount cprev, cnow; /* kernel counter temps */ - struct serial_icounter_struct icount; void __user *argp = (void __user *)arg; unsigned long flags; diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c index f6718f05dad..095ab90535c 100644 --- a/drivers/char/briq_panel.c +++ b/drivers/char/briq_panel.c @@ -6,7 +6,6 @@ #include <linux/module.h> -#include <linux/smp_lock.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/tty.h> diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 55b8667f739..7066e801b9d 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -14,7 +14,6 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> -#include <linux/smp_lock.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/major.h> diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c index 788da05190c..2016aad8520 100644 --- a/drivers/char/hw_random/core.c +++ b/drivers/char/hw_random/core.c @@ -37,7 +37,6 @@ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/delay.h> diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 3bc0eef8871..d72433f2d31 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -120,7 +120,7 @@ static int i8k_smm(struct smm_regs *regs) int eax = regs->eax; #if defined(CONFIG_X86_64) - asm("pushq %%rax\n\t" + asm volatile("pushq %%rax\n\t" "movl 0(%%rax),%%edx\n\t" "pushq %%rdx\n\t" "movl 4(%%rax),%%ebx\n\t" @@ -146,7 +146,7 @@ static int i8k_smm(struct smm_regs *regs) : "a"(regs) : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); #else - asm("pushl %%eax\n\t" + asm volatile("pushl %%eax\n\t" "movl 0(%%eax),%%edx\n\t" "push %%edx\n\t" "movl 4(%%eax),%%ebx\n\t" @@ -167,7 +167,8 @@ static int i8k_smm(struct smm_regs *regs) "movl %%edx,0(%%eax)\n\t" "lahf\n\t" "shrl $8,%%eax\n\t" - "andl $1,%%eax\n":"=a"(rc) + "andl $1,%%eax\n" + :"=a"(rc) : "a"(regs) : "%ebx", "%ecx", "%edx", "%esi", "%edi", "memory"); #endif diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index 667abd23ad6..7c6de4c9245 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c @@ -21,7 +21,6 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index dd3f9b1f11b..294d03e8c61 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c @@ -1828,7 +1828,6 @@ static int ntty_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { struct port *port = tty->driver_data; - void __user *argp = (void __user *)arg; int rval = -ENOIOCTLCMD; DBG1("******** IOCTL, cmd: %d", cmd); diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index d962f25dcc2..777181a2e60 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -979,8 +979,9 @@ static ssize_t cmm_read(struct file *filp, __user char *buf, size_t count, if (dev->flags0 & 1) { set_bit(IS_CMM_ABSENT, &dev->flags); rc = -ENODEV; + } else { + rc = -EIO; } - rc = -EIO; goto release_io; } diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index bfc10f89d95..eaa41992fbe 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c @@ -2796,6 +2796,7 @@ static const struct tty_operations mgslpc_ops = { .hangup = mgslpc_hangup, .tiocmget = tiocmget, .tiocmset = tiocmset, + .get_icount = mgslpc_get_icount, .proc_fops = &mgslpc_proc_fops, }; diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index f646725bd56..748c3b0ecd8 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c @@ -52,7 +52,6 @@ #include <linux/interrupt.h> #include <linux/serial.h> #include <linux/serialP.h> -#include <linux/smp_lock.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index 9f8495b4fc8..a7616d226a4 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c @@ -87,7 +87,6 @@ #include <linux/tty_flip.h> #include <linux/mm.h> #include <linux/serial.h> -#include <linux/smp_lock.h> #include <linux/fcntl.h> #include <linux/major.h> #include <linux/delay.h> diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index 4bef6ab8362..461a5a04551 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c @@ -40,7 +40,6 @@ #include <linux/stallion.h> #include <linux/ioport.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/ctype.h> diff --git a/drivers/char/sx.c b/drivers/char/sx.c index e53f1686539..a786326cea2 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c @@ -216,7 +216,6 @@ #include <linux/eisa.h> #include <linux/pci.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/init.h> #include <linux/miscdevice.h> #include <linux/bitops.h> diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 1030f842013..c17a305ecb2 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/wait.h> +#include <linux/acpi.h> #include "tpm.h" #define TPM_HEADER_SIZE 10 @@ -78,6 +79,26 @@ enum tis_defaults { static LIST_HEAD(tis_chips); static DEFINE_SPINLOCK(tis_lock); +#ifdef CONFIG_ACPI +static int is_itpm(struct pnp_dev *dev) +{ + struct acpi_device *acpi = pnp_acpi_device(dev); + struct acpi_hardware_id *id; + + list_for_each_entry(id, &acpi->pnp.ids, list) { + if (!strcmp("INTC0102", id->id)) + return 1; + } + + return 0; +} +#else +static int is_itpm(struct pnp_dev *dev) +{ + return 0; +} +#endif + static int check_locality(struct tpm_chip *chip, int l) { if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & @@ -472,6 +493,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start, "1.2 TPM (device-id 0x%X, rev-id %d)\n", vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); + if (is_itpm(to_pnp_dev(dev))) + itpm = 1; + if (itpm) dev_info(dev, "Intel iTPM workaround enabled\n"); diff --git a/drivers/char/uv_mmtimer.c b/drivers/char/uv_mmtimer.c index 493b47a0d51..956ebe2080a 100644 --- a/drivers/char/uv_mmtimer.c +++ b/drivers/char/uv_mmtimer.c @@ -23,7 +23,6 @@ #include <linux/interrupt.h> #include <linux/time.h> #include <linux/math64.h> -#include <linux/smp_lock.h> #include <asm/genapic.h> #include <asm/uv/uv_hub.h> diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 6c1b676643a..896a2ced1d2 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1547,31 +1547,16 @@ static int init_vqs(struct ports_device *portdev) nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); - if (!vqs) { - err = -ENOMEM; - goto fail; - } io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); - if (!io_callbacks) { - err = -ENOMEM; - goto free_vqs; - } io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); - if (!io_names) { - err = -ENOMEM; - goto free_callbacks; - } portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), GFP_KERNEL); - if (!portdev->in_vqs) { - err = -ENOMEM; - goto free_names; - } portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), GFP_KERNEL); - if (!portdev->out_vqs) { + if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs || + !portdev->out_vqs) { err = -ENOMEM; - goto free_invqs; + goto free; } /* @@ -1605,7 +1590,7 @@ static int init_vqs(struct ports_device *portdev) io_callbacks, (const char **)io_names); if (err) - goto free_outvqs; + goto free; j = 0; portdev->in_vqs[0] = vqs[0]; @@ -1621,23 +1606,19 @@ static int init_vqs(struct ports_device *portdev) portdev->out_vqs[i] = vqs[j + 1]; } } - kfree(io_callbacks); kfree(io_names); + kfree(io_callbacks); kfree(vqs); return 0; -free_names: - kfree(io_names); -free_callbacks: - kfree(io_callbacks); -free_outvqs: +free: kfree(portdev->out_vqs); -free_invqs: kfree(portdev->in_vqs); -free_vqs: + kfree(io_names); + kfree(io_callbacks); kfree(vqs); -fail: + return err; } diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index a4461165228..f975d24890f 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -283,16 +283,21 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, } while (delay); } -static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) { - unsigned long flags; - if (delta > p->max_match_value) dev_warn(&p->pdev->dev, "delta out of range\n"); - spin_lock_irqsave(&p->lock, flags); p->next_match_value = delta; sh_cmt_clock_event_program_verify(p, 0); +} + +static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) +{ + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + __sh_cmt_set_next(p, delta); spin_unlock_irqrestore(&p->lock, flags); } @@ -359,7 +364,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) /* setup timeout if no clockevent */ if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) - sh_cmt_set_next(p, p->max_match_value); + __sh_cmt_set_next(p, p->max_match_value); out: spin_unlock_irqrestore(&p->lock, flags); @@ -381,7 +386,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) /* adjust the timeout to maximum if only clocksource left */ if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) - sh_cmt_set_next(p, p->max_match_value); + __sh_cmt_set_next(p, p->max_match_value); spin_unlock_irqrestore(&p->lock, flags); } @@ -616,13 +621,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "cmt_fck"); if (IS_ERR(p->clk)) { - dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); - p->clk = clk_get(&p->pdev->dev, cfg->clk); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "cannot get clock\n"); - ret = PTR_ERR(p->clk); - goto err1; - } + dev_err(&p->pdev->dev, "cannot get clock\n"); + ret = PTR_ERR(p->clk); + goto err1; } if (resource_size(res) == 6) { diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index ef7a5be8a09..40630cb9823 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c @@ -287,13 +287,9 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); if (IS_ERR(p->clk)) { - dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); - p->clk = clk_get(&p->pdev->dev, cfg->clk); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "cannot get clock\n"); - ret = PTR_ERR(p->clk); - goto err1; - } + dev_err(&p->pdev->dev, "cannot get clock\n"); + ret = PTR_ERR(p->clk); + goto err1; } return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index de715901b82..36aba992306 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -393,13 +393,9 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "tmu_fck"); if (IS_ERR(p->clk)) { - dev_warn(&p->pdev->dev, "using deprecated clock lookup\n"); - p->clk = clk_get(&p->pdev->dev, cfg->clk); - if (IS_ERR(p->clk)) { - dev_err(&p->pdev->dev, "cannot get clock\n"); - ret = PTR_ERR(p->clk); - goto err1; - } + dev_err(&p->pdev->dev, "cannot get clock\n"); + ret = PTR_ERR(p->clk); + goto err1; } return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c index e16c3fa8d2e..05117f1ad86 100644 --- a/drivers/connector/connector.c +++ b/drivers/connector/connector.c @@ -36,6 +36,7 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>"); MODULE_DESCRIPTION("Generic userspace <-> kernelspace connector."); +MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_CONNECTOR); static struct cn_dev cdev; diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 88ee01510ec..76141262ea1 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1832,7 +1832,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node, return -ENODEV; ino = mdesc_get_property(mdesc, node, "ino", &ino_len); - if (!intr) + if (!ino) return -ENODEV; if (intr_len != ino_len) diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 2e992bc8015..8a515baa38f 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -286,7 +286,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, if (initial) asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count)); + : "d" (control_word), "b" (key), "c" (initial)); asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index a8a84f4587f..64b21f5cd74 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,8 +1,8 @@ ifeq ($(CONFIG_DMADEVICES_DEBUG),y) - EXTRA_CFLAGS += -DDEBUG + ccflags-y += -DDEBUG endif ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) - EXTRA_CFLAGS += -DVERBOSE_DEBUG + ccflags-y += -DVERBOSE_DEBUG endif obj-$(CONFIG_DMA_ENGINE) += dmaengine.o diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index a0f3e6a06e0..ea0ee81cff5 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -722,7 +722,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->lli.daddr = mem; desc->lli.ctrla = ctrla | ATC_DST_WIDTH(mem_width) - | len >> mem_width; + | len >> reg_width; desc->lli.ctrlb = ctrlb; if (!first) { diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 286c3ac6bdc..e5e172d2169 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -50,9 +50,11 @@ static void dma_init(struct fsldma_chan *chan) * EIE - Error interrupt enable * EOSIE - End of segments interrupt enable (basic mode) * EOLNIE - End of links interrupt enable + * BWC - Bandwidth sharing among channels */ - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE - | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC + | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE + | FSL_DMA_MR_EOSIE, 32); break; case FSL_DMA_IP_83XX: /* Set the channel to below modes: diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index cb4d6ff5159..ba9f403c0fb 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved. * * Author: * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 @@ -36,6 +36,13 @@ #define FSL_DMA_MR_DAHE 0x00002000 #define FSL_DMA_MR_SAHE 0x00001000 +/* + * Bandwidth/pause control determines how many bytes a given + * channel is allowed to transfer before the DMA engine pauses + * the current channel and switches to the next channel + */ +#define FSL_DMA_MR_BWC 0x08000000 + /* Special MR definition for MPC8349 */ #define FSL_DMA_MR_EOTIE 0x00000080 #define FSL_DMA_MR_PRC_RM 0x00000800 diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index f629e4961af..e53d438142b 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -379,7 +379,7 @@ static int __init imxdma_probe(struct platform_device *pdev) return 0; err_init: - while (i-- >= 0) { + while (--i >= 0) { struct imxdma_channel *imxdmac = &imxdma->channel[i]; imx_dma_free(imxdmac->imxdma_channel); } diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 0834323a059..d0602dd5d1b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -951,7 +951,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; int param; - bd->buffer_addr = sgl->dma_address; + bd->buffer_addr = sg->dma_address; count = sg->length; @@ -1385,7 +1385,7 @@ static int __init sdma_module_init(void) { return platform_driver_probe(&sdma_driver, sdma_probe); } -subsys_initcall(sdma_module_init); +module_init(sdma_module_init); MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("i.MX SDMA driver"); diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 338bc4eed1f..3109bd94bc4 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -1075,7 +1075,6 @@ static int mid_setup_dma(struct pci_dev *pdev) if (NULL == dma->dma_pool) { pr_err("ERR_MDMA:pci_pool_create failed\n"); err = -ENOMEM; - kfree(dma); goto err_dma_pool; } @@ -1186,7 +1185,6 @@ err_engine: free_irq(pdev->irq, dma); err_irq: pci_pool_destroy(dma->dma_pool); - kfree(dma); err_dma_pool: pr_err("ERR_MDMA:setup_dma failed: %d\n", err); return err; @@ -1413,7 +1411,7 @@ static const struct dev_pm_ops intel_mid_dma_pm = { .runtime_idle = dma_runtime_idle, }; -static struct pci_driver intel_mid_dma_pci = { +static struct pci_driver intel_mid_dma_pci_driver = { .name = "Intel MID DMA", .id_table = intel_mid_dma_ids, .probe = intel_mid_dma_probe, @@ -1431,13 +1429,13 @@ static int __init intel_mid_dma_init(void) { pr_debug("INFO_MDMA: LNW DMA Driver Version %s\n", INTEL_MID_DMA_DRIVER_VERSION); - return pci_register_driver(&intel_mid_dma_pci); + return pci_register_driver(&intel_mid_dma_pci_driver); } fs_initcall(intel_mid_dma_init); static void __exit intel_mid_dma_exit(void) { - pci_unregister_driver(&intel_mid_dma_pci); + pci_unregister_driver(&intel_mid_dma_pci_driver); } module_exit(intel_mid_dma_exit); diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 8997d3fb905..0ff7270af25 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o +ioatdma-y := pci.o dma.o dma_v2.o dma_v3.o dca.o diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 92b679024fe..c064c89420d 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -259,11 +259,6 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) return; } - channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); - channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); - channel_writel(pd_chan, SIZE, desc->regs.size); - channel_writel(pd_chan, NEXT, desc->regs.next); - dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", pd_chan->chan.chan_id, desc->regs.dev_addr); dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", @@ -273,10 +268,16 @@ static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", pd_chan->chan.chan_id, desc->regs.next); - if (list_empty(&desc->tx_list)) + if (list_empty(&desc->tx_list)) { + channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); + channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); + channel_writel(pd_chan, SIZE, desc->regs.size); + channel_writel(pd_chan, NEXT, desc->regs.next); pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); - else + } else { + channel_writel(pd_chan, NEXT, desc->txd.phys); pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); + } val = dma_readl(pd, CTL2); val |= 1 << (DMA_CTL2_START_SHIFT_BITS + pd_chan->chan.chan_id); diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 0d58a4a4487..cef584533ee 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4449,9 +4449,8 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev, if (!request_mem_region(res.start, resource_size(&res), dev_driver_string(&ofdev->dev))) { - dev_err(&ofdev->dev, "failed to request memory region " - "(0x%016llx-0x%016llx)\n", - (u64)res.start, (u64)res.end); + dev_err(&ofdev->dev, "failed to request memory region %pR\n", + &res); initcode = PPC_ADMA_INIT_MEMREG; ret = -EBUSY; goto out; diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index eb6b54dbb80..85ffd5e38c5 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -1213,3 +1213,4 @@ module_exit(sh_dmae_exit); MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:sh-dma-engine"); diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index b3781399b38..ba2898b3639 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile @@ -10,16 +10,16 @@ obj-$(CONFIG_EDAC) := edac_stub.o obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o obj-$(CONFIG_EDAC_MCE) += edac_mce.o -edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o -edac_core-objs += edac_module.o edac_device_sysfs.o +edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o +edac_core-y += edac_module.o edac_device_sysfs.o ifdef CONFIG_PCI -edac_core-objs += edac_pci.o edac_pci_sysfs.o +edac_core-y += edac_pci.o edac_pci_sysfs.o endif obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o -edac_mce_amd-objs := mce_amd.o +edac_mce_amd-y := mce_amd.o obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 8521401bbd7..eca9ba193e9 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1572,7 +1572,7 @@ static int f10_match_to_this_node(struct amd64_pvt *pvt, int dram_range, debugf1(" HoleOffset=0x%x HoleValid=0x%x IntlvSel=0x%x\n", hole_off, hole_valid, intlv_sel); - if (intlv_en || + if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en))) return -EINVAL; diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index d7ca43a828b..251440cd50a 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h @@ -41,10 +41,10 @@ #define MC_PROC_NAME_MAX_LEN 7 #if PAGE_SHIFT < 20 -#define PAGES_TO_MiB( pages ) ( ( pages ) >> ( 20 - PAGE_SHIFT ) ) -#define MiB_TO_PAGES(mb) ((mb) >> (20 - PAGE_SHIFT)) +#define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) +#define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) #else /* PAGE_SHIFT > 20 */ -#define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) +#define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20)) #define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20)) #endif diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index ba6586a69cc..795ea69c4d8 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -586,14 +586,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) return NULL; } - /* marking MCI offline */ - mci->op_state = OP_OFFLINE; - del_mc_from_global_list(mci); mutex_unlock(&mem_ctls_mutex); - /* flush workq processes and remove sysfs */ + /* flush workq processes */ edac_mc_workq_teardown(mci); + + /* marking MCI offline */ + mci->op_state = OP_OFFLINE; + + /* remove from sysfs */ edac_remove_sysfs_mci_device(mci); edac_printk(KERN_INFO, EDAC_MC, diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c index 8d0688f36d4..39faded3cad 100644 --- a/drivers/edac/mce_amd_inj.c +++ b/drivers/edac/mce_amd_inj.c @@ -139,7 +139,7 @@ static int __init edac_init_mce_inject(void) return 0; err_sysfs_create: - while (i-- >= 0) + while (--i >= 0) sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); kobject_del(mce_kobj); diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 18fdd9703b4..1a467a91fb0 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -7,6 +7,7 @@ */ #include <linux/bug.h> +#include <linux/delay.h> #include <linux/device.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> @@ -26,8 +27,14 @@ #include <asm/unaligned.h> #include <net/arp.h> -#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */ -#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2) +/* rx limits */ +#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ +#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) + +/* tx limits */ +#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ +#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ +#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ #define IEEE1394_BROADCAST_CHANNEL 31 #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) @@ -169,15 +176,8 @@ struct fwnet_device { struct fw_address_handler handler; u64 local_fifo; - /* List of packets to be sent */ - struct list_head packet_list; - /* - * List of packets that were broadcasted. When we get an ISO interrupt - * one of them has been sent - */ - struct list_head broadcasted_list; - /* List of packets that have been sent but not yet acked */ - struct list_head sent_list; + /* Number of tx datagrams that have been queued but not yet acked */ + int queued_datagrams; struct list_head peer_list; struct fw_card *card; @@ -195,7 +195,7 @@ struct fwnet_peer { unsigned pdg_size; /* pd_list size */ u16 datagram_label; /* outgoing datagram label */ - unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ + u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ int node_id; int generation; unsigned speed; @@ -203,22 +203,18 @@ struct fwnet_peer { /* This is our task struct. It's used for the packet complete callback. */ struct fwnet_packet_task { - /* - * ptask can actually be on dev->packet_list, dev->broadcasted_list, - * or dev->sent_list depending on its current state. - */ - struct list_head pt_link; struct fw_transaction transaction; struct rfc2734_header hdr; struct sk_buff *skb; struct fwnet_device *dev; int outstanding_pkts; - unsigned max_payload; u64 fifo_addr; u16 dest_node; + u16 max_payload; u8 generation; u8 speed; + u8 enqueued; }; /* @@ -650,8 +646,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, net->stats.rx_packets++; net->stats.rx_bytes += skb->len; } - if (netif_queue_stopped(net)) - netif_wake_queue(net); return 0; @@ -660,8 +654,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net, net->stats.rx_dropped++; dev_kfree_skb_any(skb); - if (netif_queue_stopped(net)) - netif_wake_queue(net); return -ENOENT; } @@ -793,15 +785,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, * Datagram is not complete, we're done for the * moment. */ - spin_unlock_irqrestore(&dev->lock, flags); - - return 0; + retval = 0; fail: spin_unlock_irqrestore(&dev->lock, flags); - if (netif_queue_stopped(net)) - netif_wake_queue(net); - return retval; } @@ -901,11 +888,19 @@ static void fwnet_free_ptask(struct fwnet_packet_task *ptask) kmem_cache_free(fwnet_packet_task_cache, ptask); } +/* Caller must hold dev->lock. */ +static void dec_queued_datagrams(struct fwnet_device *dev) +{ + if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) + netif_wake_queue(dev->netdev); +} + static int fwnet_send_packet(struct fwnet_packet_task *ptask); static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) { struct fwnet_device *dev = ptask->dev; + struct sk_buff *skb = ptask->skb; unsigned long flags; bool free; @@ -914,10 +909,14 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) ptask->outstanding_pkts--; /* Check whether we or the networking TX soft-IRQ is last user. */ - free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link)); + free = (ptask->outstanding_pkts == 0 && ptask->enqueued); + if (free) + dec_queued_datagrams(dev); - if (ptask->outstanding_pkts == 0) - list_del(&ptask->pt_link); + if (ptask->outstanding_pkts == 0) { + dev->netdev->stats.tx_packets++; + dev->netdev->stats.tx_bytes += skb->len; + } spin_unlock_irqrestore(&dev->lock, flags); @@ -926,7 +925,6 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) u16 fg_off; u16 datagram_label; u16 lf; - struct sk_buff *skb; /* Update the ptask to point to the next fragment and send it */ lf = fwnet_get_hdr_lf(&ptask->hdr); @@ -953,7 +951,7 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); break; } - skb = ptask->skb; + skb_pull(skb, ptask->max_payload); if (ptask->outstanding_pkts > 1) { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, @@ -970,6 +968,31 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) fwnet_free_ptask(ptask); } +static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) +{ + struct fwnet_device *dev = ptask->dev; + unsigned long flags; + bool free; + + spin_lock_irqsave(&dev->lock, flags); + + /* One fragment failed; don't try to send remaining fragments. */ + ptask->outstanding_pkts = 0; + + /* Check whether we or the networking TX soft-IRQ is last user. */ + free = ptask->enqueued; + if (free) + dec_queued_datagrams(dev); + + dev->netdev->stats.tx_dropped++; + dev->netdev->stats.tx_errors++; + + spin_unlock_irqrestore(&dev->lock, flags); + + if (free) + fwnet_free_ptask(ptask); +} + static void fwnet_write_complete(struct fw_card *card, int rcode, void *payload, size_t length, void *data) { @@ -977,11 +1000,12 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, ptask = data; - if (rcode == RCODE_COMPLETE) + if (rcode == RCODE_COMPLETE) { fwnet_transmit_packet_done(ptask); - else + } else { fw_error("fwnet_write_complete: failed: %x\n", rcode); - /* ??? error recovery */ + fwnet_transmit_packet_failed(ptask); + } } static int fwnet_send_packet(struct fwnet_packet_task *ptask) @@ -1039,9 +1063,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ - free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); + free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) - list_add_tail(&ptask->pt_link, &dev->broadcasted_list); + ptask->enqueued = true; + else + dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); @@ -1056,9 +1082,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask) spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ - free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); + free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) - list_add_tail(&ptask->pt_link, &dev->sent_list); + ptask->enqueued = true; + else + dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); @@ -1224,6 +1252,15 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) struct fwnet_peer *peer; unsigned long flags; + spin_lock_irqsave(&dev->lock, flags); + + /* Can this happen? */ + if (netif_queue_stopped(dev->netdev)) { + spin_unlock_irqrestore(&dev->lock, flags); + + return NETDEV_TX_BUSY; + } + ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); if (ptask == NULL) goto fail; @@ -1242,9 +1279,6 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) proto = hdr_buf.h_proto; dg_size = skb->len; - /* serialize access to peer, including peer->datagram_label */ - spin_lock_irqsave(&dev->lock, flags); - /* * Set the transmission type for the packet. ARP packets and IP * broadcast packets are sent via GASP. @@ -1266,7 +1300,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) - goto fail_unlock; + goto fail; generation = peer->generation; dest_node = peer->node_id; @@ -1320,18 +1354,21 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) max_payload += RFC2374_FRAG_HDR_SIZE; } + if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) + netif_stop_queue(dev->netdev); + spin_unlock_irqrestore(&dev->lock, flags); ptask->max_payload = max_payload; - INIT_LIST_HEAD(&ptask->pt_link); + ptask->enqueued = 0; fwnet_send_packet(ptask); return NETDEV_TX_OK; - fail_unlock: - spin_unlock_irqrestore(&dev->lock, flags); fail: + spin_unlock_irqrestore(&dev->lock, flags); + if (ptask) kmem_cache_free(fwnet_packet_task_cache, ptask); @@ -1377,7 +1414,7 @@ static void fwnet_init_dev(struct net_device *net) net->addr_len = FWNET_ALEN; net->hard_header_len = FWNET_HLEN; net->type = ARPHRD_IEEE1394; - net->tx_queue_len = 10; + net->tx_queue_len = FWNET_TX_QUEUE_LEN; } /* caller must hold fwnet_device_mutex */ @@ -1457,14 +1494,9 @@ static int fwnet_probe(struct device *_dev) dev->broadcast_rcv_context = NULL; dev->broadcast_xmt_max_payload = 0; dev->broadcast_xmt_datagramlabel = 0; - dev->local_fifo = FWNET_NO_FIFO_ADDR; - - INIT_LIST_HEAD(&dev->packet_list); - INIT_LIST_HEAD(&dev->broadcasted_list); - INIT_LIST_HEAD(&dev->sent_list); + dev->queued_datagrams = 0; INIT_LIST_HEAD(&dev->peer_list); - dev->card = card; dev->netdev = net; @@ -1522,7 +1554,7 @@ static int fwnet_remove(struct device *_dev) struct fwnet_peer *peer = dev_get_drvdata(_dev); struct fwnet_device *dev = peer->dev; struct net_device *net; - struct fwnet_packet_task *ptask, *pt_next; + int i; mutex_lock(&fwnet_device_mutex); @@ -1540,21 +1572,9 @@ static int fwnet_remove(struct device *_dev) dev->card); fw_iso_context_destroy(dev->broadcast_rcv_context); } - list_for_each_entry_safe(ptask, pt_next, - &dev->packet_list, pt_link) { - dev_kfree_skb_any(ptask->skb); - kmem_cache_free(fwnet_packet_task_cache, ptask); - } - list_for_each_entry_safe(ptask, pt_next, - &dev->broadcasted_list, pt_link) { - dev_kfree_skb_any(ptask->skb); - kmem_cache_free(fwnet_packet_task_cache, ptask); - } - list_for_each_entry_safe(ptask, pt_next, - &dev->sent_list, pt_link) { - dev_kfree_skb_any(ptask->skb); - kmem_cache_free(fwnet_packet_task_cache, ptask); - } + for (i = 0; dev->queued_datagrams && i < 5; i++) + ssleep(1); + WARN_ON(dev->queued_datagrams); list_del(&dev->dev_link); free_netdev(net); diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 9dcb17d51ae..e3c8b60bd86 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -242,6 +242,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) static char ohci_driver_name[] = KBUILD_MODNAME; +#define PCI_DEVICE_ID_AGERE_FW643 0x5901 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 @@ -253,18 +254,34 @@ static char ohci_driver_name[] = KBUILD_MODNAME; /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { - unsigned short vendor, device, flags; + unsigned short vendor, device, revision, flags; } ohci_quirks[] = { - {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER | - QUIRK_RESET_PACKET | - QUIRK_NO_1394A}, - {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, - {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, - {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, QUIRK_NO_MSI}, - {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, - {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, - {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, - {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, + {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER}, + + {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, + QUIRK_BE_HEADERS}, + + {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, + QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, + QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER}, + + {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER}, + + {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, + QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, + + {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_RESET_PACKET}, + + {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, }; /* This overrides anything that was found in ohci_quirks[]. */ @@ -577,17 +594,11 @@ static int ohci_update_phy_reg(struct fw_card *card, int addr, return ret; } -static int ar_context_add_page(struct ar_context *ctx) +static void ar_context_link_page(struct ar_context *ctx, + struct ar_buffer *ab, dma_addr_t ab_bus) { - struct device *dev = ctx->ohci->card.device; - struct ar_buffer *ab; - dma_addr_t uninitialized_var(ab_bus); size_t offset; - ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); - if (ab == NULL) - return -ENOMEM; - ab->next = NULL; memset(&ab->descriptor, 0, sizeof(ab->descriptor)); ab->descriptor.control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | @@ -606,6 +617,19 @@ static int ar_context_add_page(struct ar_context *ctx) reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); flush_writes(ctx->ohci); +} + +static int ar_context_add_page(struct ar_context *ctx) +{ + struct device *dev = ctx->ohci->card.device; + struct ar_buffer *ab; + dma_addr_t uninitialized_var(ab_bus); + + ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC); + if (ab == NULL) + return -ENOMEM; + + ar_context_link_page(ctx, ab, ab_bus); return 0; } @@ -730,16 +754,17 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) static void ar_context_tasklet(unsigned long data) { struct ar_context *ctx = (struct ar_context *)data; - struct fw_ohci *ohci = ctx->ohci; struct ar_buffer *ab; struct descriptor *d; void *buffer, *end; + __le16 res_count; ab = ctx->current_buffer; d = &ab->descriptor; - if (d->res_count == 0) { - size_t size, rest, offset; + res_count = ACCESS_ONCE(d->res_count); + if (res_count == 0) { + size_t size, size2, rest, pktsize, size3, offset; dma_addr_t start_bus; void *start; @@ -750,29 +775,63 @@ static void ar_context_tasklet(unsigned long data) */ offset = offsetof(struct ar_buffer, data); - start = buffer = ab; + start = ab; start_bus = le32_to_cpu(ab->descriptor.data_address) - offset; + buffer = ab->data; ab = ab->next; d = &ab->descriptor; - size = buffer + PAGE_SIZE - ctx->pointer; + size = start + PAGE_SIZE - ctx->pointer; + /* valid buffer data in the next page */ rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count); + /* what actually fits in this page */ + size2 = min(rest, (size_t)PAGE_SIZE - offset - size); memmove(buffer, ctx->pointer, size); - memcpy(buffer + size, ab->data, rest); - ctx->current_buffer = ab; - ctx->pointer = (void *) ab->data + rest; - end = buffer + size + rest; + memcpy(buffer + size, ab->data, size2); + + while (size > 0) { + void *next = handle_ar_packet(ctx, buffer); + pktsize = next - buffer; + if (pktsize >= size) { + /* + * We have handled all the data that was + * originally in this page, so we can now + * continue in the next page. + */ + buffer = next; + break; + } + /* move the next packet to the start of the buffer */ + memmove(buffer, next, size + size2 - pktsize); + size -= pktsize; + /* fill up this page again */ + size3 = min(rest - size2, + (size_t)PAGE_SIZE - offset - size - size2); + memcpy(buffer + size + size2, + (void *) ab->data + size2, size3); + size2 += size3; + } - while (buffer < end) - buffer = handle_ar_packet(ctx, buffer); + if (rest > 0) { + /* handle the packets that are fully in the next page */ + buffer = (void *) ab->data + + (buffer - (start + offset + size)); + end = (void *) ab->data + rest; - dma_free_coherent(ohci->card.device, PAGE_SIZE, - start, start_bus); - ar_context_add_page(ctx); + while (buffer < end) + buffer = handle_ar_packet(ctx, buffer); + + ctx->current_buffer = ab; + ctx->pointer = end; + + ar_context_link_page(ctx, start, start_bus); + } else { + ctx->pointer = start + PAGE_SIZE; + } } else { buffer = ctx->pointer; ctx->pointer = end = - (void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count); + (void *) ab + PAGE_SIZE - le16_to_cpu(res_count); while (buffer < end) buffer = handle_ar_packet(ctx, buffer); @@ -2885,9 +2944,11 @@ static int __devinit pci_probe(struct pci_dev *dev, } for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) - if (ohci_quirks[i].vendor == dev->vendor && - (ohci_quirks[i].device == dev->device || - ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) { + if ((ohci_quirks[i].vendor == dev->vendor) && + (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || + ohci_quirks[i].device == dev->device) && + (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || + ohci_quirks[i].revision >= dev->revision)) { ohci->quirks = ohci_quirks[i].flags; break; } diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index bfae4b30979..afa576a75a8 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1468,7 +1468,7 @@ static int sbp2_map_scatterlist(struct sbp2_command_orb *orb, /* SCSI stack integration */ -static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) +static int sbp2_scsi_queuecommand_lck(struct scsi_cmnd *cmd, scsi_done_fn_t done) { struct sbp2_logical_unit *lu = cmd->device->hostdata; struct fw_device *device = target_device(lu->tgt); @@ -1534,6 +1534,8 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done) return retval; } +static DEF_SCSI_QCMD(sbp2_scsi_queuecommand) + static int sbp2_scsi_slave_alloc(struct scsi_device *sdev) { struct sbp2_logical_unit *lu = sdev->hostdata; diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c index e23c06893d1..599f6c9e0fb 100644 --- a/drivers/gpio/cs5535-gpio.c +++ b/drivers/gpio/cs5535-gpio.c @@ -56,6 +56,18 @@ static struct cs5535_gpio_chip { * registers, see include/linux/cs5535.h. */ +static void errata_outl(u32 val, unsigned long addr) +{ + /* + * According to the CS5536 errata (#36), after suspend + * a write to the high bank GPIO register will clear all + * non-selected bits; the recommended workaround is a + * read-modify-write operation. + */ + val |= inl(addr); + outl(val, addr); +} + static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, unsigned int reg) { @@ -64,7 +76,7 @@ static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, outl(1 << offset, chip->base + reg); else /* high bank register */ - outl(1 << (offset - 16), chip->base + 0x80 + reg); + errata_outl(1 << (offset - 16), chip->base + 0x80 + reg); } void cs5535_gpio_set(unsigned offset, unsigned int reg) @@ -86,7 +98,7 @@ static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset, outl(1 << (offset + 16), chip->base + reg); else /* high bank register */ - outl(1 << offset, chip->base + 0x80 + reg); + errata_outl(1 << offset, chip->base + 0x80 + reg); } void cs5535_gpio_clear(unsigned offset, unsigned int reg) diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6985cb1da72..2baa6708e44 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -156,12 +156,12 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 }, { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 }, { DRM_MODE_CONNECTOR_Component, "Component", 0 }, - { DRM_MODE_CONNECTOR_9PinDIN, "9-pin DIN", 0 }, - { DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, - { DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, - { DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, + { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 }, + { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 }, + { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 }, + { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 }, { DRM_MODE_CONNECTOR_TV, "TV", 0 }, - { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 }, + { DRM_MODE_CONNECTOR_eDP, "eDP", 0 }, }; static struct drm_prop_enum_list drm_encoder_enum_list[] = diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index dcbeb98f195..bede10a0340 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -241,7 +241,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (!drm_helper_encoder_in_use(encoder)) { + if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) { drm_encoder_disable(encoder); /* disconnector encoder from any connector */ encoder->crtc = NULL; @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder, struct drm_crtc *tmp; int crtc_mask = 1; - WARN(!crtc, "checking null crtc?"); + WARN(!crtc, "checking null crtc?\n"); dev = crtc->dev; @@ -471,6 +471,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) int count = 0, ro, fail = 0; struct drm_crtc_helper_funcs *crtc_funcs; int ret = 0; + int i; DRM_DEBUG_KMS("\n"); @@ -666,6 +667,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (ret != 0) goto fail; } + DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); + for (i = 0; i < set->num_connectors; i++) { + DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, + drm_get_connector_name(set->connectors[i])); + set->connectors[i]->dpms = DRM_MODE_DPMS_ON; + } kfree(save_connectors); kfree(save_encoders); @@ -841,7 +848,7 @@ static void output_poll_execute(struct work_struct *work) struct delayed_work *delayed_work = to_delayed_work(work); struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); struct drm_connector *connector; - enum drm_connector_status old_status, status; + enum drm_connector_status old_status; bool repoll = false, changed = false; if (!drm_kms_helper_poll) @@ -866,8 +873,9 @@ static void output_poll_execute(struct work_struct *work) !(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; - status = connector->funcs->detect(connector, false); - if (old_status != status) + connector->status = connector->funcs->detect(connector, false); + DRM_DEBUG_KMS("connector status updated to %d\n", connector->status); + if (old_status != connector->status) changed = true; } diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c1a26217a53..a245d17165a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -240,7 +240,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, .addr = DDC_ADDR, .flags = I2C_M_RD, .len = len, - .buf = buf + start, + .buf = buf, } }; @@ -253,7 +253,7 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, static u8 * drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) { - int i, j = 0; + int i, j = 0, valid_extensions = 0; u8 *block, *new; if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL) @@ -280,14 +280,28 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) for (j = 1; j <= block[0x7e]; j++) { for (i = 0; i < 4; i++) { - if (drm_do_probe_ddc_edid(adapter, block, j, - EDID_LENGTH)) + if (drm_do_probe_ddc_edid(adapter, + block + (valid_extensions + 1) * EDID_LENGTH, + j, EDID_LENGTH)) goto out; - if (drm_edid_block_valid(block + j * EDID_LENGTH)) + if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) { + valid_extensions++; break; + } } if (i == 4) - goto carp; + dev_warn(connector->dev->dev, + "%s: Ignoring invalid EDID block %d.\n", + drm_get_connector_name(connector), j); + } + + if (valid_extensions != block[0x7e]) { + block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; + block[0x7e] = valid_extensions; + new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); + if (!new) + goto out; + block = new; } return block; diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index b744dad5c23..a39794bac04 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -37,7 +37,6 @@ #include "drmP.h" #include <linux/poll.h> #include <linux/slab.h> -#include <linux/smp_lock.h> /* from BKL pushdown: note that nothing else serializes idr_find() */ DEFINE_MUTEX(drm_global_mutex); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 9d3a5030b6e..16d5155edad 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -585,10 +585,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, struct timeval now; unsigned long flags; unsigned int seq; + int ret; e = kzalloc(sizeof *e, GFP_KERNEL); - if (e == NULL) - return -ENOMEM; + if (e == NULL) { + ret = -ENOMEM; + goto err_put; + } e->pipe = pipe; e->base.pid = current->pid; @@ -603,9 +606,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, spin_lock_irqsave(&dev->event_lock, flags); if (file_priv->event_space < sizeof e->event) { - spin_unlock_irqrestore(&dev->event_lock, flags); - kfree(e); - return -ENOMEM; + ret = -EBUSY; + goto err_unlock; } file_priv->event_space -= sizeof e->event; @@ -626,7 +628,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, if ((seq - vblwait->request.sequence) <= (1 << 23)) { e->event.tv_sec = now.tv_sec; e->event.tv_usec = now.tv_usec; - drm_vblank_put(dev, e->pipe); + drm_vblank_put(dev, pipe); list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); trace_drm_vblank_event_delivered(current->pid, pipe, @@ -638,6 +640,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, spin_unlock_irqrestore(&dev->event_lock, flags); return 0; + +err_unlock: + spin_unlock_irqrestore(&dev->event_lock, flags); + kfree(e); +err_put: + drm_vblank_put(dev, pipe); + return ret; } /** diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7a26f4dd21a..e6800819bca 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -767,6 +767,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_HAS_BLT: value = HAS_BLT(dev); break; + case I915_PARAM_HAS_COHERENT_RINGS: + value = 1; + break; default: DRM_DEBUG_DRIVER("Unknown parameter %d\n", param->param); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3467dd42076..f737960712e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0; module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); unsigned int i915_powersave = 1; -module_param_named(powersave, i915_powersave, int, 0400); +module_param_named(powersave, i915_powersave, int, 0600); unsigned int i915_lvds_downclock = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); @@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = { static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, - .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, + .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, + .has_fbc = 0, /* disabled due to buggy hardware */ .has_bsd_ring = 1, }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2c2c19b6285..409826da309 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain); +int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, + bool interruptible); int i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, @@ -1321,6 +1323,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) +#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define PRIMARY_RINGBUFFER_SIZE (128*1024) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8eb8453208b..275ec6ed43a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -38,8 +38,7 @@ static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); -static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, - bool pipelined); +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, @@ -547,6 +546,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj_priv; int ret = 0; + if (args->size == 0) + return 0; + + if (!access_ok(VERIFY_WRITE, + (char __user *)(uintptr_t)args->data_ptr, + args->size)) + return -EFAULT; + + ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) + return -EFAULT; + ret = i915_mutex_lock_interruptible(dev); if (ret) return ret; @@ -564,23 +576,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, goto out; } - if (args->size == 0) - goto out; - - if (!access_ok(VERIFY_WRITE, - (char __user *)(uintptr_t)args->data_ptr, - args->size)) { - ret = -EFAULT; - goto out; - } - - ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, - args->size); - if (ret) { - ret = -EFAULT; - goto out; - } - ret = i915_gem_object_get_pages_or_evict(obj); if (ret) goto out; @@ -981,7 +976,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_pwrite *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - int ret = 0; + int ret; + + if (args->size == 0) + return 0; + + if (!access_ok(VERIFY_READ, + (char __user *)(uintptr_t)args->data_ptr, + args->size)) + return -EFAULT; + + ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, + args->size); + if (ret) + return -EFAULT; ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -994,30 +1002,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, } obj_priv = to_intel_bo(obj); - /* Bounds check destination. */ if (args->offset > obj->size || args->size > obj->size - args->offset) { ret = -EINVAL; goto out; } - if (args->size == 0) - goto out; - - if (!access_ok(VERIFY_READ, - (char __user *)(uintptr_t)args->data_ptr, - args->size)) { - ret = -EFAULT; - goto out; - } - - ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, - args->size); - if (ret) { - ret = -EFAULT; - goto out; - } - /* We can only do the GTT pwrite on untiled buffers, as otherwise * it would end up going through the fenced access, and we'll get * different detiling behavior between reading and writing. @@ -2172,7 +2162,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) static int i915_ring_idle(struct drm_device *dev, struct intel_ring_buffer *ring) { - if (list_empty(&ring->gpu_write_list)) + if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) return 0; i915_gem_flush_ring(dev, NULL, ring, @@ -2190,9 +2180,7 @@ i915_gpu_idle(struct drm_device *dev) int ret; lists_empty = (list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list)); + list_empty(&dev_priv->mm.active_list)); if (lists_empty) return 0; @@ -2605,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, if (reg->gpu) { int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; @@ -2753,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) /** Flushes any GPU write domain for the object if it's dirty. */ static int -i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, - bool pipelined) +i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; uint32_t old_write_domain; @@ -2773,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, obj->read_domains, old_write_domain); - if (pipelined) - return 0; - - return i915_gem_object_wait_rendering(obj, true); + return 0; } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2837,18 +2821,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, false); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret != 0) return ret; + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; i915_gem_object_flush_cpu_write_domain(obj); - if (write) { - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } - old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; @@ -2886,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, if (obj_priv->gtt_space == NULL) return -EINVAL; - ret = i915_gem_object_flush_gpu_write_domain(obj, true); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret) return ret; @@ -2909,6 +2890,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, return 0; } +int +i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, + bool interruptible) +{ + if (!obj->active) + return 0; + + if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) + i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, + 0, obj->base.write_domain); + + return i915_gem_object_wait_rendering(&obj->base, interruptible); +} + /** * Moves a single object to the CPU read, and possibly write domain. * @@ -2921,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) uint32_t old_write_domain, old_read_domains; int ret; - ret = i915_gem_object_flush_gpu_write_domain(obj, false); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret != 0) return ret; + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; i915_gem_object_flush_gtt_write_domain(obj); @@ -2932,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) */ i915_gem_object_set_to_full_cpu_read_domain(obj); - if (write) { - ret = i915_gem_object_wait_rendering(obj, true); - if (ret) - return ret; - } - old_write_domain = obj->write_domain; old_read_domains = obj->read_domains; @@ -3108,7 +3100,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * write domain */ if (obj->write_domain && - obj->write_domain != obj->pending_read_domains) { + (obj->write_domain != obj->pending_read_domains || + obj_priv->ring != ring)) { flush_domains |= obj->write_domain; invalidate_domains |= obj->pending_read_domains & ~obj->write_domain; @@ -3201,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, if (offset == 0 && size == obj->size) return i915_gem_object_set_to_cpu_domain(obj, 0); - ret = i915_gem_object_flush_gpu_write_domain(obj, false); + ret = i915_gem_object_flush_gpu_write_domain(obj); if (ret != 0) return ret; + ret = i915_gem_object_wait_rendering(obj, true); + if (ret) + return ret; + i915_gem_object_flush_gtt_write_domain(obj); /* If we're already fully in the CPU read domain, we're done. */ @@ -3250,192 +3247,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, return 0; } -/** - * Pin an object to the GTT and evaluate the relocations landing in it. - */ static int -i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, - struct drm_file *file_priv, - struct drm_i915_gem_exec_object2 *entry) +i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry, + struct drm_i915_gem_relocation_entry *reloc) { struct drm_device *dev = obj->base.dev; - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_i915_gem_relocation_entry __user *user_relocs; - struct drm_gem_object *target_obj = NULL; - uint32_t target_handle = 0; - int i, ret = 0; + struct drm_gem_object *target_obj; + uint32_t target_offset; + int ret = -EINVAL; - user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; - for (i = 0; i < entry->relocation_count; i++) { - struct drm_i915_gem_relocation_entry reloc; - uint32_t target_offset; + target_obj = drm_gem_object_lookup(dev, file_priv, + reloc->target_handle); + if (target_obj == NULL) + return -ENOENT; - if (__copy_from_user_inatomic(&reloc, - user_relocs+i, - sizeof(reloc))) { - ret = -EFAULT; - break; - } + target_offset = to_intel_bo(target_obj)->gtt_offset; - if (reloc.target_handle != target_handle) { - drm_gem_object_unreference(target_obj); +#if WATCH_RELOC + DRM_INFO("%s: obj %p offset %08x target %d " + "read %08x write %08x gtt %08x " + "presumed %08x delta %08x\n", + __func__, + obj, + (int) reloc->offset, + (int) reloc->target_handle, + (int) reloc->read_domains, + (int) reloc->write_domain, + (int) target_offset, + (int) reloc->presumed_offset, + reloc->delta); +#endif - target_obj = drm_gem_object_lookup(dev, file_priv, - reloc.target_handle); - if (target_obj == NULL) { - ret = -ENOENT; - break; - } + /* The target buffer should have appeared before us in the + * exec_object list, so it should have a GTT space bound by now. + */ + if (target_offset == 0) { + DRM_ERROR("No GTT space found for object %d\n", + reloc->target_handle); + goto err; + } - target_handle = reloc.target_handle; - } - target_offset = to_intel_bo(target_obj)->gtt_offset; + /* Validate that the target is in a valid r/w GPU domain */ + if (reloc->write_domain & (reloc->write_domain - 1)) { + DRM_ERROR("reloc with multiple write domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + goto err; + } + if (reloc->write_domain & I915_GEM_DOMAIN_CPU || + reloc->read_domains & I915_GEM_DOMAIN_CPU) { + DRM_ERROR("reloc with read/write CPU domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + goto err; + } + if (reloc->write_domain && target_obj->pending_write_domain && + reloc->write_domain != target_obj->pending_write_domain) { + DRM_ERROR("Write domain conflict: " + "obj %p target %d offset %d " + "new %08x old %08x\n", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->write_domain, + target_obj->pending_write_domain); + goto err; + } -#if WATCH_RELOC - DRM_INFO("%s: obj %p offset %08x target %d " - "read %08x write %08x gtt %08x " - "presumed %08x delta %08x\n", - __func__, - obj, - (int) reloc.offset, - (int) reloc.target_handle, - (int) reloc.read_domains, - (int) reloc.write_domain, - (int) target_offset, - (int) reloc.presumed_offset, - reloc.delta); -#endif + target_obj->pending_read_domains |= reloc->read_domains; + target_obj->pending_write_domain |= reloc->write_domain; - /* The target buffer should have appeared before us in the - * exec_object list, so it should have a GTT space bound by now. - */ - if (target_offset == 0) { - DRM_ERROR("No GTT space found for object %d\n", - reloc.target_handle); - ret = -EINVAL; - break; - } + /* If the relocation already has the right value in it, no + * more work needs to be done. + */ + if (target_offset == reloc->presumed_offset) + goto out; - /* Validate that the target is in a valid r/w GPU domain */ - if (reloc.write_domain & (reloc.write_domain - 1)) { - DRM_ERROR("reloc with multiple write domains: " - "obj %p target %d offset %d " - "read %08x write %08x", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.read_domains, - reloc.write_domain); - ret = -EINVAL; - break; - } - if (reloc.write_domain & I915_GEM_DOMAIN_CPU || - reloc.read_domains & I915_GEM_DOMAIN_CPU) { - DRM_ERROR("reloc with read/write CPU domains: " - "obj %p target %d offset %d " - "read %08x write %08x", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.read_domains, - reloc.write_domain); - ret = -EINVAL; - break; - } - if (reloc.write_domain && target_obj->pending_write_domain && - reloc.write_domain != target_obj->pending_write_domain) { - DRM_ERROR("Write domain conflict: " - "obj %p target %d offset %d " - "new %08x old %08x\n", - obj, reloc.target_handle, - (int) reloc.offset, - reloc.write_domain, - target_obj->pending_write_domain); - ret = -EINVAL; - break; - } + /* Check that the relocation address is valid... */ + if (reloc->offset > obj->base.size - 4) { + DRM_ERROR("Relocation beyond object bounds: " + "obj %p target %d offset %d size %d.\n", + obj, reloc->target_handle, + (int) reloc->offset, + (int) obj->base.size); + goto err; + } + if (reloc->offset & 3) { + DRM_ERROR("Relocation not 4-byte aligned: " + "obj %p target %d offset %d.\n", + obj, reloc->target_handle, + (int) reloc->offset); + goto err; + } - target_obj->pending_read_domains |= reloc.read_domains; - target_obj->pending_write_domain |= reloc.write_domain; + /* and points to somewhere within the target object. */ + if (reloc->delta >= target_obj->size) { + DRM_ERROR("Relocation beyond target object bounds: " + "obj %p target %d delta %d size %d.\n", + obj, reloc->target_handle, + (int) reloc->delta, + (int) target_obj->size); + goto err; + } - /* If the relocation already has the right value in it, no - * more work needs to be done. - */ - if (target_offset == reloc.presumed_offset) - continue; + reloc->delta += target_offset; + if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { + uint32_t page_offset = reloc->offset & ~PAGE_MASK; + char *vaddr; - /* Check that the relocation address is valid... */ - if (reloc.offset > obj->base.size - 4) { - DRM_ERROR("Relocation beyond object bounds: " - "obj %p target %d offset %d size %d.\n", - obj, reloc.target_handle, - (int) reloc.offset, (int) obj->base.size); - ret = -EINVAL; - break; - } - if (reloc.offset & 3) { - DRM_ERROR("Relocation not 4-byte aligned: " - "obj %p target %d offset %d.\n", - obj, reloc.target_handle, - (int) reloc.offset); - ret = -EINVAL; - break; - } + vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); + *(uint32_t *)(vaddr + page_offset) = reloc->delta; + kunmap_atomic(vaddr); + } else { + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t __iomem *reloc_entry; + void __iomem *reloc_page; - /* and points to somewhere within the target object. */ - if (reloc.delta >= target_obj->size) { - DRM_ERROR("Relocation beyond target object bounds: " - "obj %p target %d delta %d size %d.\n", - obj, reloc.target_handle, - (int) reloc.delta, (int) target_obj->size); - ret = -EINVAL; - break; - } + ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); + if (ret) + goto err; - reloc.delta += target_offset; - if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { - uint32_t page_offset = reloc.offset & ~PAGE_MASK; - char *vaddr; + /* Map the page containing the relocation we're going to perform. */ + reloc->offset += obj->gtt_offset; + reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, + reloc->offset & PAGE_MASK); + reloc_entry = (uint32_t __iomem *) + (reloc_page + (reloc->offset & ~PAGE_MASK)); + iowrite32(reloc->delta, reloc_entry); + io_mapping_unmap_atomic(reloc_page); + } - vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); - *(uint32_t *)(vaddr + page_offset) = reloc.delta; - kunmap_atomic(vaddr); - } else { - uint32_t __iomem *reloc_entry; - void __iomem *reloc_page; + /* and update the user's relocation entry */ + reloc->presumed_offset = target_offset; - ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); - if (ret) - break; +out: + ret = 0; +err: + drm_gem_object_unreference(target_obj); + return ret; +} - /* Map the page containing the relocation we're going to perform. */ - reloc.offset += obj->gtt_offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - reloc.offset & PAGE_MASK); - reloc_entry = (uint32_t __iomem *) - (reloc_page + (reloc.offset & ~PAGE_MASK)); - iowrite32(reloc.delta, reloc_entry); - io_mapping_unmap_atomic(reloc_page); - } +static int +i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry) +{ + struct drm_i915_gem_relocation_entry __user *user_relocs; + int i, ret; + + user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; + for (i = 0; i < entry->relocation_count; i++) { + struct drm_i915_gem_relocation_entry reloc; + + if (__copy_from_user_inatomic(&reloc, + user_relocs+i, + sizeof(reloc))) + return -EFAULT; + + ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc); + if (ret) + return ret; - /* and update the user's relocation entry */ - reloc.presumed_offset = target_offset; if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, - &reloc.presumed_offset, - sizeof(reloc.presumed_offset))) { - ret = -EFAULT; - break; - } + &reloc.presumed_offset, + sizeof(reloc.presumed_offset))) + return -EFAULT; } - drm_gem_object_unreference(target_obj); - return ret; + return 0; +} + +static int +i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, + struct drm_file *file_priv, + struct drm_i915_gem_exec_object2 *entry, + struct drm_i915_gem_relocation_entry *relocs) +{ + int i, ret; + + for (i = 0; i < entry->relocation_count; i++) { + ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]); + if (ret) + return ret; + } + + return 0; } static int -i915_gem_execbuffer_pin(struct drm_device *dev, - struct drm_file *file, - struct drm_gem_object **object_list, - struct drm_i915_gem_exec_object2 *exec_list, - int count) +i915_gem_execbuffer_relocate(struct drm_device *dev, + struct drm_file *file, + struct drm_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + int i, ret; + + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate_object(obj, file, + &exec_list[i]); + if (ret) + return ret; + } + + return 0; +} + +static int +i915_gem_execbuffer_reserve(struct drm_device *dev, + struct drm_file *file, + struct drm_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) { struct drm_i915_private *dev_priv = dev->dev_private; int ret, i, retry; @@ -3497,6 +3532,133 @@ i915_gem_execbuffer_pin(struct drm_device *dev, return 0; } +static int +i915_gem_execbuffer_relocate_slow(struct drm_device *dev, + struct drm_file *file, + struct drm_gem_object **object_list, + struct drm_i915_gem_exec_object2 *exec_list, + int count) +{ + struct drm_i915_gem_relocation_entry *reloc; + int i, total, ret; + + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + obj->in_execbuffer = false; + } + + mutex_unlock(&dev->struct_mutex); + + total = 0; + for (i = 0; i < count; i++) + total += exec_list[i].relocation_count; + + reloc = drm_malloc_ab(total, sizeof(*reloc)); + if (reloc == NULL) { + mutex_lock(&dev->struct_mutex); + return -ENOMEM; + } + + total = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_relocation_entry __user *user_relocs; + + user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; + + if (copy_from_user(reloc+total, user_relocs, + exec_list[i].relocation_count * + sizeof(*reloc))) { + ret = -EFAULT; + mutex_lock(&dev->struct_mutex); + goto err; + } + + total += exec_list[i].relocation_count; + } + + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_lock(&dev->struct_mutex); + goto err; + } + + ret = i915_gem_execbuffer_reserve(dev, file, + object_list, exec_list, + count); + if (ret) + goto err; + + total = 0; + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); + obj->base.pending_read_domains = 0; + obj->base.pending_write_domain = 0; + ret = i915_gem_execbuffer_relocate_object_slow(obj, file, + &exec_list[i], + reloc + total); + if (ret) + goto err; + + total += exec_list[i].relocation_count; + } + + /* Leave the user relocations as are, this is the painfully slow path, + * and we want to avoid the complication of dropping the lock whilst + * having buffers reserved in the aperture and so causing spurious + * ENOSPC for random operations. + */ + +err: + drm_free_large(reloc); + return ret; +} + +static int +i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, + struct drm_file *file, + struct intel_ring_buffer *ring, + struct drm_gem_object **objects, + int count) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int ret, i; + + /* Zero the global flush/invalidate flags. These + * will be modified as new domains are computed + * for each object + */ + dev->invalidate_domains = 0; + dev->flush_domains = 0; + dev_priv->mm.flush_rings = 0; + for (i = 0; i < count; i++) + i915_gem_object_set_to_gpu_domain(objects[i], ring); + + if (dev->invalidate_domains | dev->flush_domains) { +#if WATCH_EXEC + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", + __func__, + dev->invalidate_domains, + dev->flush_domains); +#endif + i915_gem_flush(dev, file, + dev->invalidate_domains, + dev->flush_domains, + dev_priv->mm.flush_rings); + } + + for (i = 0; i < count; i++) { + struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); + /* XXX replace with semaphores */ + if (obj->ring && ring != obj->ring) { + ret = i915_gem_object_wait_rendering(&obj->base, true); + if (ret) + return ret; + } + } + + return 0; +} + /* Throttle our rendering by waiting until the ring has completed our requests * emitted over 20 msec ago. * @@ -3580,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, for (i = 0; i < count; i++) { char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; - size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); + int length; /* limited by fault_in_pages_readable() */ + + /* First check for malicious input causing overflow */ + if (exec[i].relocation_count > + INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) + return -EINVAL; + length = exec[i].relocation_count * + sizeof(struct drm_i915_gem_relocation_entry); if (!access_ok(VERIFY_READ, ptr, length)) return -EFAULT; @@ -3724,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } /* Move the objects en-masse into the GTT, evicting if necessary. */ - ret = i915_gem_execbuffer_pin(dev, file, - object_list, exec_list, - args->buffer_count); + ret = i915_gem_execbuffer_reserve(dev, file, + object_list, exec_list, + args->buffer_count); if (ret) goto err; /* The objects are in their final locations, apply the relocations. */ - for (i = 0; i < args->buffer_count; i++) { - struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); - obj->base.pending_read_domains = 0; - obj->base.pending_write_domain = 0; - ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); + ret = i915_gem_execbuffer_relocate(dev, file, + object_list, exec_list, + args->buffer_count); + if (ret) { + if (ret == -EFAULT) { + ret = i915_gem_execbuffer_relocate_slow(dev, file, + object_list, + exec_list, + args->buffer_count); + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + } if (ret) goto err; } @@ -3757,33 +3932,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - /* Zero the global flush/invalidate flags. These - * will be modified as new domains are computed - * for each object - */ - dev->invalidate_domains = 0; - dev->flush_domains = 0; - dev_priv->mm.flush_rings = 0; - - for (i = 0; i < args->buffer_count; i++) { - struct drm_gem_object *obj = object_list[i]; - - /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj, ring); - } - - if (dev->invalidate_domains | dev->flush_domains) { -#if WATCH_EXEC - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", - __func__, - dev->invalidate_domains, - dev->flush_domains); -#endif - i915_gem_flush(dev, file, - dev->invalidate_domains, - dev->flush_domains, - dev_priv->mm.flush_rings); - } + ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring, + object_list, args->buffer_count); + if (ret) + goto err; for (i = 0; i < args->buffer_count; i++) { struct drm_gem_object *obj = object_list[i]; @@ -4043,8 +4195,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) alignment = i915_gem_get_gtt_alignment(obj); if (obj_priv->gtt_offset & (alignment - 1)) { WARN(obj_priv->pin_count, - "bo is already pinned with incorrect alignment:" - " offset=%x, req.alignment=%x\n", + "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", obj_priv->gtt_offset, alignment); ret = i915_gem_object_unbind(obj); if (ret) @@ -4223,10 +4374,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * use this buffer rather sooner than later, so issuing the required * flush earlier is beneficial. */ - if (obj->write_domain & I915_GEM_GPU_DOMAINS) + if (obj->write_domain & I915_GEM_GPU_DOMAINS) { i915_gem_flush_ring(dev, file_priv, obj_priv->ring, 0, obj->write_domain); + } else if (obj_priv->ring->outstanding_lazy_request) { + /* This ring is not being cleared by active usage, + * so emit a request to do so. + */ + u32 seqno = i915_add_request(dev, + NULL, NULL, + obj_priv->ring); + if (seqno == 0) + ret = -ENOMEM; + } /* Update the active list for the hardware's current position. * Otherwise this only updates on a delayed timer or when irqs @@ -4856,17 +5017,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_file *file_priv) { struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); - void *obj_addr; - int ret; - char __user *user_data; + void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset; + char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; - user_data = (char __user *) (uintptr_t) args->data_ptr; - obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; + DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size); - DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); - ret = copy_from_user(obj_addr, user_data, args->size); - if (ret) - return -EFAULT; + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; + + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + mutex_unlock(&dev->struct_mutex); + unwritten = copy_from_user(vaddr, user_data, args->size); + mutex_lock(&dev->struct_mutex); + if (unwritten) + return -EFAULT; + } drm_agp_chipset_flush(dev); return 0; @@ -4900,9 +5068,7 @@ i915_gpu_is_active(struct drm_device *dev) int lists_empty; lists_empty = list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list); + list_empty(&dev_priv->mm.active_list); return !lists_empty; } diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 43a4013f53f..d8ae7d1d0cc 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list)); + list_empty(&dev_priv->mm.active_list)); if (lists_empty) return -ENOSPC; @@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev) lists_empty = (list_empty(&dev_priv->mm.inactive_list) && list_empty(&dev_priv->mm.flushing_list) && - list_empty(&dev_priv->render_ring.active_list) && - list_empty(&dev_priv->bsd_ring.active_list) && - list_empty(&dev_priv->blt_ring.active_list)); + list_empty(&dev_priv->mm.active_list)); BUG_ON(!lists_empty); return 0; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 25ed911a311..878fc766a12 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3033,6 +3033,7 @@ #define TRANS_DP_10BPC (1<<9) #define TRANS_DP_6BPC (2<<9) #define TRANS_DP_12BPC (3<<9) +#define TRANS_DP_BPC_MASK (3<<9) #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) #define TRANS_DP_VSYNC_ACTIVE_LOW 0 #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 989c19d2d95..42729d25da5 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; + /* Cursor state */ + dev_priv->saveCURACNTR = I915_READ(CURACNTR); + dev_priv->saveCURAPOS = I915_READ(CURAPOS); + dev_priv->saveCURABASE = I915_READ(CURABASE); + dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); + dev_priv->saveCURBPOS = I915_READ(CURBPOS); + dev_priv->saveCURBBASE = I915_READ(CURBBASE); + if (IS_GEN2(dev)) + dev_priv->saveCURSIZE = I915_READ(CURSIZE); + if (HAS_PCH_SPLIT(dev)) { dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); @@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev) I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); + /* Cursor state */ + I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); + I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); + I915_WRITE(CURABASE, dev_priv->saveCURABASE); + I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); + I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); + I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); + if (IS_GEN2(dev)) + I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); + return; } @@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev) /* Don't save them in KMS mode */ i915_save_modeset_reg(dev); - /* Cursor state */ - dev_priv->saveCURACNTR = I915_READ(CURACNTR); - dev_priv->saveCURAPOS = I915_READ(CURAPOS); - dev_priv->saveCURABASE = I915_READ(CURABASE); - dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); - dev_priv->saveCURBPOS = I915_READ(CURBPOS); - dev_priv->saveCURBBASE = I915_READ(CURBBASE); - if (IS_GEN2(dev)) - dev_priv->saveCURSIZE = I915_READ(CURSIZE); - /* CRT state */ if (HAS_PCH_SPLIT(dev)) { dev_priv->saveADPA = I915_READ(PCH_ADPA); @@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev) /* Don't restore them in KMS mode */ i915_restore_modeset_reg(dev); - /* Cursor state */ - I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); - I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); - I915_WRITE(CURABASE, dev_priv->saveCURABASE); - I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); - I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); - I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); - if (IS_GEN2(dev)) - I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); - /* CRT state */ if (HAS_PCH_SPLIT(dev)) I915_WRITE(PCH_ADPA, dev_priv->saveADPA); @@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev) /* Clock gating state */ intel_init_clock_gating(dev); - if (HAS_PCH_SPLIT(dev)) + if (HAS_PCH_SPLIT(dev)) { ironlake_enable_drps(dev); + intel_init_emon(dev); + } /* Cache mode state */ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 65c88f9ba12..2cb8e0b9f1e 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c @@ -190,37 +190,6 @@ out: kfree(output.pointer); } -static int intel_dsm_switchto(enum vga_switcheroo_client_id id) -{ - return 0; -} - -static int intel_dsm_power_state(enum vga_switcheroo_client_id id, - enum vga_switcheroo_state state) -{ - return 0; -} - -static int intel_dsm_init(void) -{ - return 0; -} - -static int intel_dsm_get_client_id(struct pci_dev *pdev) -{ - if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) - return VGA_SWITCHEROO_IGD; - else - return VGA_SWITCHEROO_DIS; -} - -static struct vga_switcheroo_handler intel_dsm_handler = { - .switchto = intel_dsm_switchto, - .power_state = intel_dsm_power_state, - .init = intel_dsm_init, - .get_client_id = intel_dsm_get_client_id, -}; - static bool intel_dsm_pci_probe(struct pci_dev *pdev) { acpi_handle dhandle, intel_handle; @@ -276,11 +245,8 @@ void intel_register_dsm_handler(void) { if (!intel_dsm_detect()) return; - - vga_switcheroo_register_handler(&intel_dsm_handler); } void intel_unregister_dsm_handler(void) { - vga_switcheroo_unregister_handler(); } diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index c55c7704335..8df57431606 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -34,6 +34,25 @@ #include "i915_drm.h" #include "i915_drv.h" +/* Here's the desired hotplug mode */ +#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ + ADPA_CRT_HOTPLUG_WARMUP_10MS | \ + ADPA_CRT_HOTPLUG_SAMPLE_4S | \ + ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ + ADPA_CRT_HOTPLUG_VOLREF_325MV | \ + ADPA_CRT_HOTPLUG_ENABLE) + +struct intel_crt { + struct intel_encoder base; + bool force_hotplug_required; +}; + +static struct intel_crt *intel_attached_crt(struct drm_connector *connector) +{ + return container_of(intel_attached_encoder(connector), + struct intel_crt, base); +} + static void intel_crt_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); } - adpa = 0; + adpa = ADPA_HOTPLUG_BITS; if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) adpa |= ADPA_HSYNC_ACTIVE_HIGH; if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) @@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct intel_crt *crt = intel_attached_crt(connector); struct drm_i915_private *dev_priv = dev->dev_private; - u32 adpa, temp; + u32 adpa; bool ret; - bool turn_off_dac = false; - temp = adpa = I915_READ(PCH_ADPA); + /* The first time through, trigger an explicit detection cycle */ + if (crt->force_hotplug_required) { + bool turn_off_dac = HAS_PCH_SPLIT(dev); + u32 save_adpa; - if (HAS_PCH_SPLIT(dev)) - turn_off_dac = true; - - adpa &= ~ADPA_CRT_HOTPLUG_MASK; - if (turn_off_dac) - adpa &= ~ADPA_DAC_ENABLE; - - /* disable HPD first */ - I915_WRITE(PCH_ADPA, adpa); - (void)I915_READ(PCH_ADPA); - - adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | - ADPA_CRT_HOTPLUG_WARMUP_10MS | - ADPA_CRT_HOTPLUG_SAMPLE_4S | - ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ - ADPA_CRT_HOTPLUG_VOLREF_325MV | - ADPA_CRT_HOTPLUG_ENABLE | - ADPA_CRT_HOTPLUG_FORCE_TRIGGER); - - DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); - I915_WRITE(PCH_ADPA, adpa); - - if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, - 1000)) - DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); - - if (turn_off_dac) { - /* Make sure hotplug is enabled */ - I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); - (void)I915_READ(PCH_ADPA); + crt->force_hotplug_required = 0; + + save_adpa = adpa = I915_READ(PCH_ADPA); + DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); + + adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; + if (turn_off_dac) + adpa &= ~ADPA_DAC_ENABLE; + + I915_WRITE(PCH_ADPA, adpa); + + if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, + 1000)) + DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); + + if (turn_off_dac) { + I915_WRITE(PCH_ADPA, save_adpa); + POSTING_READ(PCH_ADPA); + } } /* Check the status to see if both blue and green are on now */ adpa = I915_READ(PCH_ADPA); - adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; - if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || - (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) + if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) ret = true; else ret = false; + DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); return ret; } @@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; } -static bool intel_crt_detect_ddc(struct drm_encoder *encoder) +static bool intel_crt_detect_ddc(struct intel_crt *crt) { - struct intel_encoder *intel_encoder = to_intel_encoder(encoder); - struct drm_i915_private *dev_priv = encoder->dev->dev_private; + struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; /* CRT should always be at 0, but check anyway */ - if (intel_encoder->type != INTEL_OUTPUT_ANALOG) + if (crt->base.type != INTEL_OUTPUT_ANALOG) return false; if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { @@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) return true; } - if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { + if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); return true; } @@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) } static enum drm_connector_status -intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) +intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) { - struct drm_encoder *encoder = &intel_encoder->base; + struct drm_encoder *encoder = &crt->base.base; struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); @@ -434,7 +443,7 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; - struct intel_encoder *encoder = intel_attached_encoder(connector); + struct intel_crt *crt = intel_attached_crt(connector); struct drm_crtc *crtc; int dpms_mode; enum drm_connector_status status; @@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force) if (intel_crt_detect_hotplug(connector)) { DRM_DEBUG_KMS("CRT detected via hotplug\n"); return connector_status_connected; - } else + } else { + DRM_DEBUG_KMS("CRT not detected via hotplug\n"); return connector_status_disconnected; + } } - if (intel_crt_detect_ddc(&encoder->base)) + if (intel_crt_detect_ddc(crt)) return connector_status_connected; if (!force) return connector->status; /* for pre-945g platforms use load detect */ - if (encoder->base.crtc && encoder->base.crtc->enabled) { - status = intel_crt_load_detect(encoder->base.crtc, encoder); + crtc = crt->base.base.crtc; + if (crtc && crtc->enabled) { + status = intel_crt_load_detect(crtc, crt); } else { - crtc = intel_get_load_detect_pipe(encoder, connector, + crtc = intel_get_load_detect_pipe(&crt->base, connector, NULL, &dpms_mode); if (crtc) { - if (intel_crt_detect_ddc(&encoder->base)) + if (intel_crt_detect_ddc(crt)) status = connector_status_connected; else - status = intel_crt_load_detect(crtc, encoder); - intel_release_load_detect_pipe(encoder, + status = intel_crt_load_detect(crtc, crt); + intel_release_load_detect_pipe(&crt->base, connector, dpms_mode); } else status = connector_status_unknown; @@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { void intel_crt_init(struct drm_device *dev) { struct drm_connector *connector; - struct intel_encoder *intel_encoder; + struct intel_crt *crt; struct intel_connector *intel_connector; struct drm_i915_private *dev_priv = dev->dev_private; - intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); - if (!intel_encoder) + crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); + if (!crt) return; intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { - kfree(intel_encoder); + kfree(crt); return; } @@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev) drm_connector_init(dev, &intel_connector->base, &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); - drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, + drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC); - intel_connector_attach_encoder(intel_connector, intel_encoder); + intel_connector_attach_encoder(intel_connector, &crt->base); - intel_encoder->type = INTEL_OUTPUT_ANALOG; - intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | - (1 << INTEL_ANALOG_CLONE_BIT) | - (1 << INTEL_SDVO_LVDS_CLONE_BIT); - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + crt->base.type = INTEL_OUTPUT_ANALOG; + crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | + 1 << INTEL_ANALOG_CLONE_BIT | + 1 << INTEL_SDVO_LVDS_CLONE_BIT); + crt->base.crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = 1; connector->doublescan_allowed = 0; - drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); + drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); drm_sysfs_connector_add(connector); @@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev) else connector->polled = DRM_CONNECTOR_POLL_CONNECT; + /* + * Configure the automatic hotplug detection stuff + */ + crt->force_hotplug_required = 0; + if (HAS_PCH_SPLIT(dev)) { + u32 adpa; + + adpa = I915_READ(PCH_ADPA); + adpa &= ~ADPA_CRT_HOTPLUG_MASK; + adpa |= ADPA_HOTPLUG_BITS; + I915_WRITE(PCH_ADPA, adpa); + POSTING_READ(PCH_ADPA); + + DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); + crt->force_hotplug_required = 1; + } + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 990f065374b..d9b7092439e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, wait_event(dev_priv->pending_flip_queue, atomic_read(&obj_priv->pending_flip) == 0); + + /* Big Hammer, we also need to ensure that any pending + * MI_WAIT_FOR_EVENT inside a user batch buffer on the + * current scanout is retired before unpinning the old + * framebuffer. + */ + ret = i915_gem_object_flush_gpu(obj_priv, false); + if (ret) { + i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); + mutex_unlock(&dev->struct_mutex); + return ret; + } } ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, @@ -1681,6 +1693,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) udelay(500); } +static void intel_fdi_normal_train(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + u32 reg, temp; + + /* enable normal train */ + reg = FDI_TX_CTL(pipe); + temp = I915_READ(reg); + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; + I915_WRITE(reg, temp); + + reg = FDI_RX_CTL(pipe); + temp = I915_READ(reg); + if (HAS_PCH_CPT(dev)) { + temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; + temp |= FDI_LINK_TRAIN_NORMAL_CPT; + } else { + temp &= ~FDI_LINK_TRAIN_NONE; + temp |= FDI_LINK_TRAIN_NONE; + } + I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); + + /* wait one idle pattern time */ + POSTING_READ(reg); + udelay(1000); +} + /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -1767,27 +1810,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train done\n"); - /* enable normal train */ - reg = FDI_TX_CTL(pipe); - temp = I915_READ(reg); - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; - I915_WRITE(reg, temp); - - reg = FDI_RX_CTL(pipe); - temp = I915_READ(reg); - if (HAS_PCH_CPT(dev)) { - temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; - temp |= FDI_LINK_TRAIN_NORMAL_CPT; - } else { - temp &= ~FDI_LINK_TRAIN_NONE; - temp |= FDI_LINK_TRAIN_NONE; - } - I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); - - /* wait one idle pattern time */ - POSTING_READ(reg); - udelay(1000); } static const int const snb_b_fdi_train_param [] = { @@ -2090,15 +2112,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); + intel_fdi_normal_train(crtc); + /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { reg = TRANS_DP_CTL(pipe); temp = I915_READ(reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK); + TRANS_DP_SYNC_MASK | + TRANS_DP_BPC_MASK); temp |= (TRANS_DP_OUTPUT_ENABLE | TRANS_DP_ENH_FRAMING); + temp |= TRANS_DP_8BPC; if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; @@ -2200,9 +2226,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) udelay(100); /* Ironlake workaround, disable clock pointer after downing FDI */ - I915_WRITE(FDI_RX_CHICKEN(pipe), - I915_READ(FDI_RX_CHICKEN(pipe) & - ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); + if (HAS_PCH_IBX(dev)) + I915_WRITE(FDI_RX_CHICKEN(pipe), + I915_READ(FDI_RX_CHICKEN(pipe) & + ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); /* still set train pattern 1 */ reg = FDI_TX_CTL(pipe); @@ -2687,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den) } } -#define DATA_N 0x800000 -#define LINK_N 0x80000 - static void ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, int link_clock, struct fdi_m_n *m_n) { - u64 temp; - m_n->tu = 64; /* default size */ - temp = (u64) DATA_N * pixel_clock; - temp = div_u64(temp, link_clock); - m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); - m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ - m_n->gmch_n = DATA_N; + /* BUG_ON(pixel_clock > INT_MAX / 36); */ + m_n->gmch_m = bits_per_pixel * pixel_clock; + m_n->gmch_n = link_clock * nlanes * 8; fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); - temp = (u64) LINK_N * pixel_clock; - m_n->link_m = div_u64(temp, link_clock); - m_n->link_n = LINK_N; + m_n->link_m = pixel_clock; + m_n->link_n = link_clock; fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); } @@ -3691,6 +3710,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* FDI link */ if (HAS_PCH_SPLIT(dev)) { + int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int lane = 0, link_bw, bpp; /* CPU eDP doesn't require FDI link, so just set DP M/N according to current link config */ @@ -3774,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, intel_crtc->fdi_lanes = lane; + if (pixel_multiplier > 1) + link_bw *= pixel_multiplier; ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); } @@ -5211,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { .page_flip = intel_crtc_page_flip, }; +static void intel_sanitize_modesetting(struct drm_device *dev, + int pipe, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 reg, val; + + if (HAS_PCH_SPLIT(dev)) + return; + + /* Who knows what state these registers were left in by the BIOS or + * grub? + * + * If we leave the registers in a conflicting state (e.g. with the + * display plane reading from the other pipe than the one we intend + * to use) then when we attempt to teardown the active mode, we will + * not disable the pipes and planes in the correct order -- leaving + * a plane reading from a disabled pipe and possibly leading to + * undefined behaviour. + */ + + reg = DSPCNTR(plane); + val = I915_READ(reg); + + if ((val & DISPLAY_PLANE_ENABLE) == 0) + return; + if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) + return; + + /* This display plane is active and attached to the other CPU pipe. */ + pipe = !pipe; + + /* Disable the plane and wait for it to stop reading from the pipe. */ + I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); + intel_flush_display_plane(dev, plane); + + if (IS_GEN2(dev)) + intel_wait_for_vblank(dev, pipe); + + if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) + return; + + /* Switch off the pipe. */ + reg = PIPECONF(pipe); + val = I915_READ(reg); + if (val & PIPECONF_ENABLE) { + I915_WRITE(reg, val & ~PIPECONF_ENABLE); + intel_wait_for_pipe_off(dev, pipe); + } +} static void intel_crtc_init(struct drm_device *dev, int pipe) { @@ -5262,6 +5333,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, (unsigned long)intel_crtc); + + intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); } int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, @@ -5311,9 +5384,14 @@ static void intel_setup_outputs(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; bool dpd_is_edp = false; + bool has_lvds = false; if (IS_MOBILE(dev) && !IS_I830(dev)) - intel_lvds_init(dev); + has_lvds = intel_lvds_init(dev); + if (!has_lvds && !HAS_PCH_SPLIT(dev)) { + /* disable the panel fitter on everything but LVDS */ + I915_WRITE(PFIT_CONTROL, 0); + } if (HAS_PCH_SPLIT(dev)) { dpd_is_edp = intel_dpd_is_edp(dev); @@ -5581,20 +5659,19 @@ void ironlake_enable_drps(struct drm_device *dev) fmin = (rgvmodectl & MEMMODE_FMIN_MASK); fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT; - fstart = fmax; vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; - dev_priv->fmax = fstart; /* IPS callback will increase this */ + dev_priv->fmax = fmax; /* IPS callback will increase this */ dev_priv->fstart = fstart; - dev_priv->max_delay = fmax; + dev_priv->max_delay = fstart; dev_priv->min_delay = fmin; dev_priv->cur_delay = fstart; - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, - fstart); + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 891f4f1d63b..df648cb4c29 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -584,17 +584,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, mode->clock = dev_priv->panel_fixed_mode->clock; } - /* Just use VBT values for eDP */ - if (is_edp(intel_dp)) { - intel_dp->lane_count = dev_priv->edp.lanes; - intel_dp->link_bw = dev_priv->edp.rate; - adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); - DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", - intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock); - return true; - } - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); @@ -613,6 +602,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } + if (is_edp(intel_dp)) { + /* okay we failed just pick the highest */ + intel_dp->lane_count = max_lane_count; + intel_dp->link_bw = bws[max_clock]; + adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); + DRM_DEBUG_KMS("Force picking display port link bw %02x lane " + "count %d clock %d\n", + intel_dp->link_bw, intel_dp->lane_count, + adjusted_mode->clock); + + return true; + } + return false; } @@ -1087,21 +1089,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp) } static uint32_t -intel_dp_signal_levels(struct intel_dp *intel_dp) +intel_dp_signal_levels(uint8_t train_set, int lane_count) { - struct drm_device *dev = intel_dp->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t signal_levels = 0; - u8 train_set = intel_dp->train_set[0]; - u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; - u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; + uint32_t signal_levels = 0; - if (is_edp(intel_dp)) { - vswing = dev_priv->edp.vswing; - preemphasis = dev_priv->edp.preemphasis; - } - - switch (vswing) { + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { case DP_TRAIN_VOLTAGE_SWING_400: default: signal_levels |= DP_VOLTAGE_0_4; @@ -1116,7 +1108,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp) signal_levels |= DP_VOLTAGE_1_2; break; } - switch (preemphasis) { + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { case DP_TRAIN_PRE_EMPHASIS_0: default: signal_levels |= DP_PRE_EMPHASIS_0; @@ -1203,18 +1195,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) } static bool -intel_dp_aux_handshake_required(struct intel_dp *intel_dp) -{ - struct drm_device *dev = intel_dp->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - if (is_edp(intel_dp) && dev_priv->no_aux_handshake) - return false; - - return true; -} - -static bool intel_dp_set_link_train(struct intel_dp *intel_dp, uint32_t dp_reg_value, uint8_t dp_train_pat) @@ -1226,9 +1206,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, I915_WRITE(intel_dp->output_reg, dp_reg_value); POSTING_READ(intel_dp->output_reg); - if (!intel_dp_aux_handshake_required(intel_dp)) - return true; - intel_dp_aux_native_write_1(intel_dp, DP_TRAINING_PATTERN_SET, dp_train_pat); @@ -1261,11 +1238,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) POSTING_READ(intel_dp->output_reg); intel_wait_for_vblank(dev, intel_crtc->pipe); - if (intel_dp_aux_handshake_required(intel_dp)) - /* Write the link configuration data */ - intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, - intel_dp->link_configuration, - DP_LINK_CONFIGURATION_SIZE); + /* Write the link configuration data */ + intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, + intel_dp->link_configuration, + DP_LINK_CONFIGURATION_SIZE); DP |= DP_PORT_EN; if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) @@ -1283,7 +1259,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1297,37 +1273,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) break; /* Set training pattern 1 */ - udelay(500); - if (intel_dp_aux_handshake_required(intel_dp)) { + udelay(100); + if (!intel_dp_get_link_status(intel_dp)) break; - } else { - if (!intel_dp_get_link_status(intel_dp)) - break; - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { - clock_recovery = true; - break; - } + if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { + clock_recovery = true; + break; + } - /* Check to see if we've tried the max voltage */ - for (i = 0; i < intel_dp->lane_count; i++) - if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) - break; - if (i == intel_dp->lane_count) + /* Check to see if we've tried the max voltage */ + for (i = 0; i < intel_dp->lane_count; i++) + if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) break; + if (i == intel_dp->lane_count) + break; - /* Check to see if we've tried the same voltage 5 times */ - if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { - ++tries; - if (tries == 5) - break; - } else - tries = 0; - voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + /* Check to see if we've tried the same voltage 5 times */ + if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { + ++tries; + if (tries == 5) + break; + } else + tries = 0; + voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); - } + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); } intel_dp->DP = DP; @@ -1354,7 +1326,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; } else { - signal_levels = intel_dp_signal_levels(intel_dp); + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; } @@ -1368,28 +1340,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) DP_TRAINING_PATTERN_2)) break; - udelay(500); - - if (!intel_dp_aux_handshake_required(intel_dp)) { + udelay(400); + if (!intel_dp_get_link_status(intel_dp)) break; - } else { - if (!intel_dp_get_link_status(intel_dp)) - break; - if (intel_channel_eq_ok(intel_dp)) { - channel_eq = true; - break; - } + if (intel_channel_eq_ok(intel_dp)) { + channel_eq = true; + break; + } - /* Try 5 times */ - if (tries > 5) - break; + /* Try 5 times */ + if (tries > 5) + break; - /* Compute new intel_dp->train_set as requested by target */ - intel_get_adjust_train(intel_dp); - ++tries; - } + /* Compute new intel_dp->train_set as requested by target */ + intel_get_adjust_train(intel_dp); + ++tries; } + if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) reg = DP | DP_LINK_TRAIN_OFF_CPT; else @@ -1408,6 +1376,9 @@ intel_dp_link_down(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t DP = intel_dp->DP; + if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) + return; + DRM_DEBUG_KMS("\n"); if (is_edp(intel_dp)) { @@ -1430,6 +1401,28 @@ intel_dp_link_down(struct intel_dp *intel_dp) if (is_edp(intel_dp)) DP |= DP_LINK_TRAIN_OFF; + + if (!HAS_PCH_CPT(dev) && + I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { + struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); + /* Hardware workaround: leaving our transcoder select + * set to transcoder B while it's off will prevent the + * corresponding HDMI output on transcoder A. + * + * Combine this with another hardware workaround: + * transcoder select bit can only be cleared while the + * port is enabled. + */ + DP &= ~DP_PIPEB_SELECT; + I915_WRITE(intel_dp->output_reg, DP); + + /* Changes to enable or select take place the vblank + * after being written. + */ + intel_wait_for_vblank(intel_dp->base.base.dev, + intel_crtc->pipe); + } + I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); POSTING_READ(intel_dp->output_reg); } @@ -1517,7 +1510,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) status = connector_status_connected; } - return bit; + return status; } /** diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9af9f86a876..e52c6125bb1 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -237,7 +237,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); -extern void intel_lvds_init(struct drm_device *dev); +extern bool intel_lvds_init(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, @@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, extern void intel_init_clock_gating(struct drm_device *dev); extern void ironlake_enable_drps(struct drm_device *dev); extern void ironlake_disable_drps(struct drm_device *dev); +extern void intel_init_emon(struct drm_device *dev); extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj, diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2be4f728ed0..3dba086e7ee 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) }; struct intel_gpio *gpio; - if (pin < 1 || pin > 7) + if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) return NULL; gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); @@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) gpio->reg += PCH_GPIOA - GPIOA; gpio->dev_priv = dev_priv; - snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); + snprintf(gpio->adapter.name, sizeof(gpio->adapter.name), + "i915 GPIO%c", "?BACDE?F"[pin]); gpio->adapter.owner = THIS_MODULE; gpio->adapter.algo_data = &gpio->algo; gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; @@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev) "panel", "dpc", "dpb", - "reserved" + "reserved", "dpd", }; struct drm_i915_private *dev_priv = dev->dev_private; @@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev) bus->adapter.owner = THIS_MODULE; bus->adapter.class = I2C_CLASS_DDC; snprintf(bus->adapter.name, - I2C_NAME_SIZE, - "gmbus %s", + sizeof(bus->adapter.name), + "i915 gmbus %s", names[i]); bus->adapter.dev.parent = &dev->pdev->dev; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index f1a649990ea..25bcedf386f 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) /** * Sets the power state for the panel. */ -static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) +static void intel_lvds_enable(struct intel_lvds *intel_lvds) { struct drm_device *dev = intel_lvds->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) lvds_reg = LVDS; } - if (on) { - I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); - intel_panel_set_backlight(dev, dev_priv->backlight_level); - } else { - dev_priv->backlight_level = intel_panel_get_backlight(dev); - - intel_panel_set_backlight(dev, 0); - I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); - if (intel_lvds->pfit_control) { - if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) - DRM_ERROR("timed out waiting for panel to power off\n"); - I915_WRITE(PFIT_CONTROL, 0); - intel_lvds->pfit_control = 0; + if (intel_lvds->pfit_dirty) { + /* + * Enable automatic panel scaling so that non-native modes + * fill the screen. The panel fitter should only be + * adjusted whilst the pipe is disabled, according to + * register description and PRM. + */ + DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", + intel_lvds->pfit_control, + intel_lvds->pfit_pgm_ratios); + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { + DRM_ERROR("timed out waiting for panel to power off\n"); + } else { + I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); + I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); intel_lvds->pfit_dirty = false; } + } + + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); + POSTING_READ(lvds_reg); + + intel_panel_set_backlight(dev, dev_priv->backlight_level); +} + +static void intel_lvds_disable(struct intel_lvds *intel_lvds) +{ + struct drm_device *dev = intel_lvds->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 ctl_reg, lvds_reg; + + if (HAS_PCH_SPLIT(dev)) { + ctl_reg = PCH_PP_CONTROL; + lvds_reg = PCH_LVDS; + } else { + ctl_reg = PP_CONTROL; + lvds_reg = LVDS; + } + + dev_priv->backlight_level = intel_panel_get_backlight(dev); + intel_panel_set_backlight(dev, 0); - I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); + I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); + + if (intel_lvds->pfit_control) { + if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) + DRM_ERROR("timed out waiting for panel to power off\n"); + + I915_WRITE(PFIT_CONTROL, 0); + intel_lvds->pfit_dirty = true; } + + I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); POSTING_READ(lvds_reg); } @@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) struct intel_lvds *intel_lvds = to_intel_lvds(encoder); if (mode == DRM_MODE_DPMS_ON) - intel_lvds_set_power(intel_lvds, true); + intel_lvds_enable(intel_lvds); else - intel_lvds_set_power(intel_lvds, false); + intel_lvds_disable(intel_lvds); /* XXX: We never power down the LVDS pairs. */ } @@ -411,43 +446,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder) /* Always do a full power on as we do not know what state * we were left in. */ - intel_lvds_set_power(intel_lvds, true); + intel_lvds_enable(intel_lvds); } static void intel_lvds_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - struct drm_device *dev = encoder->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_lvds *intel_lvds = to_intel_lvds(encoder); - /* * The LVDS pin pair will already have been turned on in the * intel_crtc_mode_set since it has a large impact on the DPLL * settings. */ - - if (HAS_PCH_SPLIT(dev)) - return; - - if (!intel_lvds->pfit_dirty) - return; - - /* - * Enable automatic panel scaling so that non-native modes fill the - * screen. Should be enabled before the pipe is enabled, according to - * register description and PRM. - */ - DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", - intel_lvds->pfit_control, - intel_lvds->pfit_pgm_ratios); - if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) - DRM_ERROR("timed out waiting for panel to power off\n"); - - I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); - I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); - intel_lvds->pfit_dirty = false; } /** @@ -481,11 +491,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct drm_display_mode *mode; - if (intel_lvds->edid) { - drm_mode_connector_update_edid_property(connector, - intel_lvds->edid); + if (intel_lvds->edid) return drm_add_edid_modes(connector, intel_lvds->edid); - } mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); if (mode == 0) @@ -840,7 +847,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin) * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ -void intel_lvds_init(struct drm_device *dev) +bool intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds *intel_lvds; @@ -856,37 +863,37 @@ void intel_lvds_init(struct drm_device *dev) /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) - return; + return false; pin = GMBUS_PORT_PANEL; if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); - return; + return false; } if (HAS_PCH_SPLIT(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) - return; + return false; if (dev_priv->edp.support) { DRM_DEBUG_KMS("disable LVDS for eDP support\n"); - return; + return false; } } if (!intel_lvds_ddc_probe(dev, pin)) { DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); - return; + return false; } intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); if (!intel_lvds) { - return; + return false; } intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); if (!intel_connector) { kfree(intel_lvds); - return; + return false; } if (!HAS_PCH_SPLIT(dev)) { @@ -939,7 +946,16 @@ void intel_lvds_init(struct drm_device *dev) */ intel_lvds->edid = drm_get_edid(connector, &dev_priv->gmbus[pin].adapter); - + if (intel_lvds->edid) { + if (drm_add_edid_modes(connector, + intel_lvds->edid)) { + drm_mode_connector_update_edid_property(connector, + intel_lvds->edid); + } else { + kfree(intel_lvds->edid); + intel_lvds->edid = NULL; + } + } if (!intel_lvds->edid) { /* Didn't get an EDID, so * Set wide sync ranges so we get all modes @@ -1020,7 +1036,7 @@ out: /* keep the LVDS connector */ dev_priv->int_lvds_connector = connector; drm_sysfs_connector_add(connector); - return; + return true; failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); @@ -1028,4 +1044,5 @@ failed: drm_encoder_cleanup(encoder); kfree(intel_lvds); kfree(intel_connector); + return false; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 917c7dc3cd6..9b0d9a867ae 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev) return 0; err_out: - iounmap(opregion->header); + iounmap(base); return err; } diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index afb96d25219..02ff0a481f4 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev, { int uv_hscale = uv_hsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags); - u32 stride_mask, depth, tmp; + u32 stride_mask; + int depth; + u32 tmp; /* check src dimensions */ if (IS_845G(dev) || IS_I830(dev)) { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 09f2dc353ae..89a65be8a3f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -156,28 +156,30 @@ static int init_ring_common(struct drm_device *dev, /* G45 ring initialization fails to reset head to zero */ if (head != 0) { - DRM_ERROR("%s head not reset to zero " - "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); + DRM_DEBUG_KMS("%s head not reset to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); I915_WRITE_HEAD(ring, 0); - DRM_ERROR("%s head forced to zero " - "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); + if (I915_READ_HEAD(ring) & HEAD_ADDR) { + DRM_ERROR("failed to set %s head to zero " + "ctl %08x head %08x tail %08x start %08x\n", + ring->name, + I915_READ_CTL(ring), + I915_READ_HEAD(ring), + I915_READ_TAIL(ring), + I915_READ_START(ring)); + } } I915_WRITE_CTL(ring, ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) - | RING_NO_REPORT | RING_VALID); + | RING_REPORT_64K | RING_VALID); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* If the head is still not zero, the ring is dead */ @@ -654,6 +656,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev, i915_gem_object_unpin(ring->gem_object); drm_gem_object_unreference(ring->gem_object); ring->gem_object = NULL; + + if (ring->cleanup) + ring->cleanup(ring); + cleanup_status_page(dev, ring); } @@ -688,6 +694,17 @@ int intel_wait_ring_buffer(struct drm_device *dev, { unsigned long end; drm_i915_private_t *dev_priv = dev->dev_private; + u32 head; + + head = intel_read_status_page(ring, 4); + if (head) { + ring->head = head & HEAD_ADDR; + ring->space = ring->head - (ring->tail + 8); + if (ring->space < 0) + ring->space += ring->size; + if (ring->space >= n) + return 0; + } trace_i915_ring_wait_begin (dev); end = jiffies + 3 * HZ; @@ -854,19 +871,125 @@ blt_ring_put_user_irq(struct drm_device *dev, /* do nothing */ } + +/* Workaround for some stepping of SNB, + * each time when BLT engine ring tail moved, + * the first command in the ring to be parsed + * should be MI_BATCH_BUFFER_START + */ +#define NEED_BLT_WORKAROUND(dev) \ + (IS_GEN6(dev) && (dev->pdev->revision < 8)) + +static inline struct drm_i915_gem_object * +to_blt_workaround(struct intel_ring_buffer *ring) +{ + return ring->private; +} + +static int blt_ring_init(struct drm_device *dev, + struct intel_ring_buffer *ring) +{ + if (NEED_BLT_WORKAROUND(dev)) { + struct drm_i915_gem_object *obj; + u32 __iomem *ptr; + int ret; + + obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); + if (obj == NULL) + return -ENOMEM; + + ret = i915_gem_object_pin(&obj->base, 4096); + if (ret) { + drm_gem_object_unreference(&obj->base); + return ret; + } + + ptr = kmap(obj->pages[0]); + iowrite32(MI_BATCH_BUFFER_END, ptr); + iowrite32(MI_NOOP, ptr+1); + kunmap(obj->pages[0]); + + ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); + if (ret) { + i915_gem_object_unpin(&obj->base); + drm_gem_object_unreference(&obj->base); + return ret; + } + + ring->private = obj; + } + + return init_ring_common(dev, ring); +} + +static void blt_ring_begin(struct drm_device *dev, + struct intel_ring_buffer *ring, + int num_dwords) +{ + if (ring->private) { + intel_ring_begin(dev, ring, num_dwords+2); + intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); + intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); + } else + intel_ring_begin(dev, ring, 4); +} + +static void blt_ring_flush(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 invalidate_domains, + u32 flush_domains) +{ + blt_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_FLUSH_DW); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_emit(dev, ring, 0); + intel_ring_advance(dev, ring); +} + +static u32 +blt_ring_add_request(struct drm_device *dev, + struct intel_ring_buffer *ring, + u32 flush_domains) +{ + u32 seqno = i915_gem_get_seqno(dev); + + blt_ring_begin(dev, ring, 4); + intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX); + intel_ring_emit(dev, ring, + I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(dev, ring, seqno); + intel_ring_emit(dev, ring, MI_USER_INTERRUPT); + intel_ring_advance(dev, ring); + + DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); + return seqno; +} + +static void blt_ring_cleanup(struct intel_ring_buffer *ring) +{ + if (!ring->private) + return; + + i915_gem_object_unpin(ring->private); + drm_gem_object_unreference(ring->private); + ring->private = NULL; +} + static const struct intel_ring_buffer gen6_blt_ring = { .name = "blt ring", .id = RING_BLT, .mmio_base = BLT_RING_BASE, .size = 32 * PAGE_SIZE, - .init = init_ring_common, + .init = blt_ring_init, .write_tail = ring_write_tail, - .flush = gen6_ring_flush, - .add_request = ring_add_request, + .flush = blt_ring_flush, + .add_request = blt_ring_add_request, .get_seqno = ring_status_page_get_seqno, .user_irq_get = blt_ring_get_user_irq, .user_irq_put = blt_ring_put_user_irq, .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, + .cleanup = blt_ring_cleanup, }; int intel_init_render_ring_buffer(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index a05aff0e576..3126c268198 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -63,6 +63,7 @@ struct intel_ring_buffer { struct drm_i915_gem_execbuffer2 *exec, struct drm_clip_rect *cliprects, uint64_t exec_offset); + void (*cleanup)(struct intel_ring_buffer *ring); /** * List of objects currently involved in rendering from the @@ -98,6 +99,8 @@ struct intel_ring_buffer { wait_queue_head_t irq_queue; drm_local_map_t map; + + void *private; }; static inline u32 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index de158b76bcd..d97e6cb52d3 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -107,7 +107,8 @@ struct intel_sdvo { * This is set if we treat the device as HDMI, instead of DVI. */ bool is_hdmi; - bool has_audio; + bool has_hdmi_monitor; + bool has_hdmi_audio; /** * This is set if we detect output of sdvo device as LVDS and @@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, if (!intel_sdvo_set_target_input(intel_sdvo)) return; - if (intel_sdvo->is_hdmi && + if (intel_sdvo->has_hdmi_monitor && !intel_sdvo_set_avi_infoframe(intel_sdvo)) return; @@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, } if (intel_crtc->pipe == 1) sdvox |= SDVO_PIPE_B_SELECT; - if (intel_sdvo->has_audio) + if (intel_sdvo->has_hdmi_audio) sdvox |= SDVO_AUDIO_ENABLE; if (INTEL_INFO(dev)->gen >= 4) { @@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector) return drm_get_edid(connector, &sdvo->ddc); } -static struct drm_connector * -intel_find_analog_connector(struct drm_device *dev) -{ - struct drm_connector *connector; - struct intel_sdvo *encoder; - - list_for_each_entry(encoder, - &dev->mode_config.encoder_list, - base.base.head) { - if (encoder->base.type == INTEL_OUTPUT_ANALOG) { - list_for_each_entry(connector, - &dev->mode_config.connector_list, - head) { - if (&encoder->base == - intel_attached_encoder(connector)) - return connector; - } - } - } - - return NULL; -} - -static int -intel_analog_is_connected(struct drm_device *dev) -{ - struct drm_connector *analog_connector; - - analog_connector = intel_find_analog_connector(dev); - if (!analog_connector) - return false; - - if (analog_connector->funcs->detect(analog_connector, false) == - connector_status_disconnected) - return false; - - return true; -} - /* Mac mini hack -- use the same DDC as the analog connector */ static struct edid * intel_sdvo_get_analog_edid(struct drm_connector *connector) { struct drm_i915_private *dev_priv = connector->dev->dev_private; - if (!intel_analog_is_connected(connector->dev)) - return NULL; - - return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); + return drm_get_edid(connector, + &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); } enum drm_connector_status @@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) /* DDC bus is shared, match EDID to connector type */ if (edid->input & DRM_EDID_INPUT_DIGITAL) { status = connector_status_connected; - intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); - intel_sdvo->has_audio = drm_detect_monitor_audio(edid); + if (intel_sdvo->is_hdmi) { + intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); + intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); + } } connector->display_info.raw_edid = NULL; kfree(edid); @@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) if (status == connector_status_connected) { struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); if (intel_sdvo_connector->force_audio) - intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; + intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0; } return status; @@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) return connector_status_unknown; - if (intel_sdvo->is_tv) { - /* add 30ms delay when the output type is SDVO-TV */ + + /* add 30ms delay when the output type might be TV */ + if (intel_sdvo->caps.output_flags & + (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) mdelay(30); - } + if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) return connector_status_unknown; @@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) edid = intel_sdvo_get_analog_edid(connector); if (edid != NULL) { - drm_mode_connector_update_edid_property(connector, edid); - drm_add_edid_modes(connector, edid); + if (edid->input & DRM_EDID_INPUT_DIGITAL) { + drm_mode_connector_update_edid_property(connector, edid); + drm_add_edid_modes(connector, edid); + } connector->display_info.raw_edid = NULL; kfree(edid); } @@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector, intel_sdvo_connector->force_audio = val; - if (val > 0 && intel_sdvo->has_audio) + if (val > 0 && intel_sdvo->has_hdmi_audio) return 0; - if (val < 0 && !intel_sdvo->has_audio) + if (val < 0 && !intel_sdvo->has_hdmi_audio) return 0; - intel_sdvo->has_audio = val > 0; + intel_sdvo->has_hdmi_audio = val > 0; goto done; } @@ -2070,6 +2036,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_set_colorimetry(intel_sdvo, SDVO_COLORIMETRY_RGB256); connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; + + intel_sdvo_add_hdmi_properties(intel_sdvo_connector); intel_sdvo->is_hdmi = true; } intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | @@ -2077,8 +2045,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); - intel_sdvo_add_hdmi_properties(intel_sdvo_connector); - return true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index 406228f4a2a..b14c8111057 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -31,6 +31,7 @@ */ #include <linux/backlight.h> +#include <linux/acpi.h> #include "drmP.h" #include "nouveau_drv.h" @@ -136,6 +137,14 @@ int nouveau_backlight_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; +#ifdef CONFIG_ACPI + if (acpi_video_backlight_support()) { + NV_INFO(dev, "ACPI backlight interface available, " + "not registering our own\n"); + return 0; + } +#endif + switch (dev_priv->card_type) { case NV_40: return nouveau_nv40_backlight_init(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 5f21030a293..b2293576f27 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6829,7 +6829,7 @@ nouveau_bios_posted(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned htotal; - if (dev_priv->chipset >= NV_50) { + if (dev_priv->card_type >= NV_50) { if (NVReadVgaCrtc(dev, 0, 0x00) == 0 && NVReadVgaCrtc(dev, 0, 0x1a) == 0) return false; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 80353e2b840..c41e1c200ef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -143,8 +143,10 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, nvbo->no_vm = no_vm; nvbo->tile_mode = tile_mode; nvbo->tile_flags = tile_flags; + nvbo->bo.bdev = &dev_priv->ttm.bdev; - nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); + nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), + &align, &size); align >>= PAGE_SHIFT; nouveau_bo_placement_set(nvbo, flags, 0); @@ -176,6 +178,31 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; } +static void +set_placement_range(struct nouveau_bo *nvbo, uint32_t type) +{ + struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + + if (dev_priv->card_type == NV_10 && + nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { + /* + * Make sure that the color and depth buffers are handled + * by independent memory controller units. Up to a 9x + * speed up when alpha-blending and depth-test are enabled + * at the same time. + */ + int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; + + if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { + nvbo->placement.fpfn = vram_pages / 2; + nvbo->placement.lpfn = ~0; + } else { + nvbo->placement.fpfn = 0; + nvbo->placement.lpfn = vram_pages / 2; + } + } +} + void nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) { @@ -190,6 +217,8 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) pl->busy_placement = nvbo->busy_placements; set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, type | busy, flags); + + set_placement_range(nvbo, type); } int @@ -525,7 +554,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, stride = 16 * 4; height = amount / stride; - if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { + if (new_mem->mem_type == TTM_PL_VRAM && + nouveau_bo_tile_layout(nvbo)) { ret = RING_SPACE(chan, 8); if (ret) return ret; @@ -546,7 +576,8 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); OUT_RING (chan, 1); } - if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) { + if (old_mem->mem_type == TTM_PL_VRAM && + nouveau_bo_tile_layout(nvbo)) { ret = RING_SPACE(chan, 8); if (ret) return ret; @@ -753,7 +784,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, if (dev_priv->card_type == NV_50) { ret = nv50_mem_vm_bind_linear(dev, offset + dev_priv->vm_vram_base, - new_mem->size, nvbo->tile_flags, + new_mem->size, + nouveau_bo_tile_layout(nvbo), offset); if (ret) return ret; @@ -894,7 +926,8 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { - if (dev_priv->card_type < NV_50 || !nvbo->tile_flags) + if (dev_priv->card_type < NV_50 || + !nouveau_bo_tile_layout(nvbo)) return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 0871495096f..52c356e9a3d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -281,7 +281,7 @@ detect_analog: nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG); if (!nv_encoder && !nouveau_tv_disable) nv_encoder = find_encoder_by_type(connector, OUTPUT_TV); - if (nv_encoder) { + if (nv_encoder && force) { struct drm_encoder *encoder = to_drm_encoder(nv_encoder); struct drm_encoder_helper_funcs *helper = encoder->helper_private; @@ -641,11 +641,28 @@ nouveau_connector_get_modes(struct drm_connector *connector) return ret; } +static unsigned +get_tmds_link_bandwidth(struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct drm_nouveau_private *dev_priv = connector->dev->dev_private; + struct dcb_entry *dcb = nv_connector->detected_encoder->dcb; + + if (dcb->location != DCB_LOC_ON_CHIP || + dev_priv->chipset >= 0x46) + return 165000; + else if (dev_priv->chipset >= 0x40) + return 155000; + else if (dev_priv->chipset >= 0x18) + return 135000; + else + return 112000; +} + static int nouveau_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct drm_nouveau_private *dev_priv = connector->dev->dev_private; struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; struct drm_encoder *encoder = to_drm_encoder(nv_encoder); @@ -663,11 +680,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, max_clock = 400000; break; case OUTPUT_TMDS: - if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) || - !nv_encoder->dcb->duallink_possible) - max_clock = 165000; - else - max_clock = 330000; + max_clock = get_tmds_link_bandwidth(connector); + if (nouveau_duallink && nv_encoder->dcb->duallink_possible) + max_clock *= 2; break; case OUTPUT_ANALOG: max_clock = nv_encoder->dcb->crtconf.maxfreq; @@ -709,44 +724,6 @@ nouveau_connector_best_encoder(struct drm_connector *connector) return NULL; } -void -nouveau_connector_set_polling(struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_crtc *crtc; - bool spare_crtc = false; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - spare_crtc |= !crtc->enabled; - - connector->polled = 0; - - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_VGA: - case DRM_MODE_CONNECTOR_TV: - if (dev_priv->card_type >= NV_50 || - (nv_gf4_disp_arch(dev) && spare_crtc)) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; - break; - - case DRM_MODE_CONNECTOR_DVII: - case DRM_MODE_CONNECTOR_DVID: - case DRM_MODE_CONNECTOR_HDMIA: - case DRM_MODE_CONNECTOR_DisplayPort: - case DRM_MODE_CONNECTOR_eDP: - if (dev_priv->card_type >= NV_50) - connector->polled = DRM_CONNECTOR_POLL_HPD; - else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID || - spare_crtc) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; - break; - - default: - break; - } -} - static const struct drm_connector_helper_funcs nouveau_connector_helper_funcs = { .get_modes = nouveau_connector_get_modes, @@ -872,6 +849,7 @@ nouveau_connector_create(struct drm_device *dev, int index) dev->mode_config.scaling_mode_property, nv_connector->scaling_mode); } + connector->polled = DRM_CONNECTOR_POLL_CONNECT; /* fall-through */ case DCB_CONNECTOR_TV_0: case DCB_CONNECTOR_TV_1: @@ -888,11 +866,16 @@ nouveau_connector_create(struct drm_device *dev, int index) dev->mode_config.dithering_mode_property, nv_connector->use_dithering ? DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); + + if (dcb->type != DCB_CONNECTOR_LVDS) { + if (dev_priv->card_type >= NV_50) + connector->polled = DRM_CONNECTOR_POLL_HPD; + else + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + } break; } - nouveau_connector_set_polling(connector); - drm_sysfs_connector_add(connector); dcb->drm = connector; return dcb->drm; diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index c21ed6b16f8..711b1e9203a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -52,9 +52,6 @@ static inline struct nouveau_connector *nouveau_connector( struct drm_connector * nouveau_connector_create(struct drm_device *, int index); -void -nouveau_connector_set_polling(struct drm_connector *); - int nouveau_connector_bpp(struct drm_connector *); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 3a07e580d27..1c7db64c03b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -100,6 +100,9 @@ struct nouveau_bo { int pin_refcnt; }; +#define nouveau_bo_tile_layout(nvbo) \ + ((nvbo)->tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) + static inline struct nouveau_bo * nouveau_bo(struct ttm_buffer_object *bo) { @@ -304,6 +307,7 @@ struct nouveau_fifo_engine { void (*destroy_context)(struct nouveau_channel *); int (*load_context)(struct nouveau_channel *); int (*unload_context)(struct drm_device *); + void (*tlb_flush)(struct drm_device *dev); }; struct nouveau_pgraph_object_method { @@ -336,6 +340,7 @@ struct nouveau_pgraph_engine { void (*destroy_context)(struct nouveau_channel *); int (*load_context)(struct nouveau_channel *); int (*unload_context)(struct drm_device *); + void (*tlb_flush)(struct drm_device *dev); void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr, uint32_t size, uint32_t pitch); @@ -485,13 +490,13 @@ enum nv04_fp_display_regs { }; struct nv04_crtc_reg { - unsigned char MiscOutReg; /* */ + unsigned char MiscOutReg; uint8_t CRTC[0xa0]; uint8_t CR58[0x10]; uint8_t Sequencer[5]; uint8_t Graphics[9]; uint8_t Attribute[21]; - unsigned char DAC[768]; /* Internal Colorlookuptable */ + unsigned char DAC[768]; /* PCRTC regs */ uint32_t fb_start; @@ -539,43 +544,9 @@ struct nv04_output_reg { }; struct nv04_mode_state { - uint32_t bpp; - uint32_t width; - uint32_t height; - uint32_t interlace; - uint32_t repaint0; - uint32_t repaint1; - uint32_t screen; - uint32_t scale; - uint32_t dither; - uint32_t extra; - uint32_t fifo; - uint32_t pixel; - uint32_t horiz; - int arbitration0; - int arbitration1; - uint32_t pll; - uint32_t pllB; - uint32_t vpll; - uint32_t vpll2; - uint32_t vpllB; - uint32_t vpll2B; + struct nv04_crtc_reg crtc_reg[2]; uint32_t pllsel; uint32_t sel_clk; - uint32_t general; - uint32_t crtcOwner; - uint32_t head; - uint32_t head2; - uint32_t cursorConfig; - uint32_t cursor0; - uint32_t cursor1; - uint32_t cursor2; - uint32_t timingH; - uint32_t timingV; - uint32_t displayV; - uint32_t crtcSync; - - struct nv04_crtc_reg crtc_reg[2]; }; enum nouveau_card_type { @@ -613,6 +584,12 @@ struct drm_nouveau_private { struct work_struct irq_work; struct work_struct hpd_work; + struct { + spinlock_t lock; + uint32_t hpd0_bits; + uint32_t hpd1_bits; + } hpd_state; + struct list_head vbl_waiting; struct { @@ -1045,6 +1022,7 @@ extern int nv50_fifo_create_context(struct nouveau_channel *); extern void nv50_fifo_destroy_context(struct nouveau_channel *); extern int nv50_fifo_load_context(struct nouveau_channel *); extern int nv50_fifo_unload_context(struct drm_device *); +extern void nv50_fifo_tlb_flush(struct drm_device *dev); /* nvc0_fifo.c */ extern int nvc0_fifo_init(struct drm_device *); @@ -1122,6 +1100,8 @@ extern int nv50_graph_load_context(struct nouveau_channel *); extern int nv50_graph_unload_context(struct drm_device *); extern void nv50_graph_context_switch(struct drm_device *); extern int nv50_grctx_init(struct nouveau_grctx *); +extern void nv50_graph_tlb_flush(struct drm_device *dev); +extern void nv86_graph_tlb_flush(struct drm_device *dev); /* nvc0_graph.c */ extern int nvc0_graph_init(struct drm_device *); @@ -1239,7 +1219,6 @@ extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val); extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index); extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val); -extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *); /* nouveau_fence.c */ struct nouveau_fence; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 441b12420bb..ab1bbfbf266 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -249,6 +249,7 @@ alloc_semaphore(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_semaphore *sema; + int ret; if (!USE_SEMA(dev)) return NULL; @@ -257,10 +258,14 @@ alloc_semaphore(struct drm_device *dev) if (!sema) goto fail; + ret = drm_mm_pre_get(&dev_priv->fence.heap); + if (ret) + goto fail; + spin_lock(&dev_priv->fence.lock); sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0); if (sema->mem) - sema->mem = drm_mm_get_block(sema->mem, 4, 0); + sema->mem = drm_mm_get_block_atomic(sema->mem, 4, 0); spin_unlock(&dev_priv->fence.lock); if (!sema->mem) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 5c4c929d7f7..9a1fdcf400c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -107,23 +107,29 @@ nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) } static bool -nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { - switch (tile_flags) { - case 0x0000: - case 0x1800: - case 0x2800: - case 0x4800: - case 0x7000: - case 0x7400: - case 0x7a00: - case 0xe000: - break; - default: - NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); - return false; +nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->card_type >= NV_50) { + switch (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) { + case 0x0000: + case 0x1800: + case 0x2800: + case 0x4800: + case 0x7000: + case 0x7400: + case 0x7a00: + case 0xe000: + return true; + } + } else { + if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)) + return true; } - return true; + NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); + return false; } int diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index bed669a54a2..b9672a05c41 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -519,11 +519,11 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) struct pll_lims pll_lim; struct nouveau_pll_vals pv; - uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF; + enum pll_types pll = head ? PLL_VPLL1 : PLL_VPLL0; - if (get_pll_limits(dev, pllreg, &pll_lim)) + if (get_pll_limits(dev, pll, &pll_lim)) return; - nouveau_hw_get_pllvals(dev, pllreg, &pv); + nouveau_hw_get_pllvals(dev, pll, &pv); if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m && pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n && @@ -536,7 +536,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head) pv.M1 = pll_lim.vco1.max_m; pv.N1 = pll_lim.vco1.min_n; pv.log2P = pll_lim.max_usable_log2p; - nouveau_hw_setpll(dev, pllreg, &pv); + nouveau_hw_setpll(dev, pll_lim.reg, &pv); } /* diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h index 869130f8360..2989090b943 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.h +++ b/drivers/gpu/drm/nouveau/nouveau_hw.h @@ -416,6 +416,25 @@ nv_fix_nv40_hw_cursor(struct drm_device *dev, int head) } static inline void +nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + NVWriteCRTC(dev, head, NV_PCRTC_START, offset); + + if (dev_priv->card_type == NV_04) { + /* + * Hilarious, the 24th bit doesn't want to stick to + * PCRTC_START... + */ + int cre_heb = NVReadVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX); + + NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HEB__INDEX, + (cre_heb & ~0x40) | ((offset >> 18) & 0x40)); + } +} + +static inline void nv_show_cursor(struct drm_device *dev, int head, bool show) { struct drm_nouveau_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index fdd7e3de79c..cb389d01432 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -256,7 +256,7 @@ nouveau_i2c_find(struct drm_device *dev, int index) if (index >= DCB_MAX_NUM_I2C_ENTRIES) return NULL; - if (dev_priv->chipset >= NV_50 && (i2c->entry & 0x00000100)) { + if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) { uint32_t reg = 0xe500, val; if (i2c->port_type == 6) { diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 6fd51a51c60..7bfd9e6c9d6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -42,6 +42,13 @@ #include "nouveau_connector.h" #include "nv50_display.h" +static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); + +static int nouveau_ratelimit(void) +{ + return __ratelimit(&nouveau_ratelimit_state); +} + void nouveau_irq_preinstall(struct drm_device *dev) { @@ -53,6 +60,7 @@ nouveau_irq_preinstall(struct drm_device *dev) if (dev_priv->card_type >= NV_50) { INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh); INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh); + spin_lock_init(&dev_priv->hpd_state.lock); INIT_LIST_HEAD(&dev_priv->vbl_waiting); } } @@ -202,8 +210,8 @@ nouveau_fifo_irq_handler(struct drm_device *dev) } if (status & NV_PFIFO_INTR_DMA_PUSHER) { - u32 get = nv_rd32(dev, 0x003244); - u32 put = nv_rd32(dev, 0x003240); + u32 dma_get = nv_rd32(dev, 0x003244); + u32 dma_put = nv_rd32(dev, 0x003240); u32 push = nv_rd32(dev, 0x003220); u32 state = nv_rd32(dev, 0x003228); @@ -213,16 +221,18 @@ nouveau_fifo_irq_handler(struct drm_device *dev) u32 ib_get = nv_rd32(dev, 0x003334); u32 ib_put = nv_rd32(dev, 0x003330); - NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " + if (nouveau_ratelimit()) + NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x " "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x " "State 0x%08x Push 0x%08x\n", - chid, ho_get, get, ho_put, put, ib_get, ib_put, - state, push); + chid, ho_get, dma_get, ho_put, + dma_put, ib_get, ib_put, state, + push); /* METHOD_COUNT, in DMA_STATE on earlier chipsets */ nv_wr32(dev, 0x003364, 0x00000000); - if (get != put || ho_get != ho_put) { - nv_wr32(dev, 0x003244, put); + if (dma_get != dma_put || ho_get != ho_put) { + nv_wr32(dev, 0x003244, dma_put); nv_wr32(dev, 0x003328, ho_put); } else if (ib_get != ib_put) { @@ -231,10 +241,10 @@ nouveau_fifo_irq_handler(struct drm_device *dev) } else { NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x " "Put 0x%08x State 0x%08x Push 0x%08x\n", - chid, get, put, state, push); + chid, dma_get, dma_put, state, push); - if (get != put) - nv_wr32(dev, 0x003244, put); + if (dma_get != dma_put) + nv_wr32(dev, 0x003244, dma_put); } nv_wr32(dev, 0x003228, 0x00000000); @@ -266,8 +276,9 @@ nouveau_fifo_irq_handler(struct drm_device *dev) } if (status) { - NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", - status, chid); + if (nouveau_ratelimit()) + NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n", + status, chid); nv_wr32(dev, NV03_PFIFO_INTR_0, status); status = 0; } @@ -544,13 +555,6 @@ nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource) nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap); } -static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20); - -static int nouveau_ratelimit(void) -{ - return __ratelimit(&nouveau_ratelimit_state); -} - static inline void nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource) diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index a163c7c612e..fe4a30dc4b4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -33,9 +33,9 @@ #include "drmP.h" #include "drm.h" #include "drm_sarea.h" -#include "nouveau_drv.h" -#define MIN(a,b) a < b ? a : b +#include "nouveau_drv.h" +#include "nouveau_pm.h" /* * NV10-NV40 tiling helpers @@ -175,11 +175,10 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, } } } - dev_priv->engine.instmem.flush(dev); - nv50_vm_flush(dev, 5); - nv50_vm_flush(dev, 0); - nv50_vm_flush(dev, 4); + dev_priv->engine.instmem.flush(dev); + dev_priv->engine.fifo.tlb_flush(dev); + dev_priv->engine.graph.tlb_flush(dev); nv50_vm_flush(dev, 6); return 0; } @@ -209,11 +208,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) pte++; } } - dev_priv->engine.instmem.flush(dev); - nv50_vm_flush(dev, 5); - nv50_vm_flush(dev, 0); - nv50_vm_flush(dev, 4); + dev_priv->engine.instmem.flush(dev); + dev_priv->engine.fifo.tlb_flush(dev); + dev_priv->engine.graph.tlb_flush(dev); nv50_vm_flush(dev, 6); } @@ -653,6 +651,7 @@ nouveau_mem_gart_init(struct drm_device *dev) void nouveau_mem_timing_init(struct drm_device *dev) { + /* cards < NVC0 only */ struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct nouveau_pm_memtimings *memtimings = &pm->memtimings; @@ -719,14 +718,14 @@ nouveau_mem_timing_init(struct drm_device *dev) tUNK_19 = 1; tUNK_20 = 0; tUNK_21 = 0; - switch (MIN(recordlen,21)) { - case 21: + switch (min(recordlen, 22)) { + case 22: tUNK_21 = entry[21]; - case 20: + case 21: tUNK_20 = entry[20]; - case 19: + case 20: tUNK_19 = entry[19]; - case 18: + case 19: tUNK_18 = entry[18]; default: tUNK_0 = entry[0]; @@ -756,24 +755,30 @@ nouveau_mem_timing_init(struct drm_device *dev) timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10); if(recordlen > 19) { timing->reg_100228 += (tUNK_19 - 1) << 24; - } else { + }/* I cannot back-up this else-statement right now + else { timing->reg_100228 += tUNK_12 << 24; - } + }*/ /* XXX: reg_10022c */ + timing->reg_10022c = tUNK_2 - 1; timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 | tUNK_13 << 8 | tUNK_13); /* XXX: +6? */ timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC); - if(tUNK_10 > tUNK_11) { - timing->reg_100234 += tUNK_10 << 16; - } else { - timing->reg_100234 += tUNK_11 << 16; + timing->reg_100234 += max(tUNK_10,tUNK_11) << 16; + + /* XXX; reg_100238, reg_10023c + * reg: 0x00?????? + * reg_10023c: + * 0 for pre-NV50 cards + * 0x????0202 for NV50+ cards (empirical evidence) */ + if(dev_priv->card_type >= NV_50) { + timing->reg_10023c = 0x202; } - /* XXX; reg_100238, reg_10023c */ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i, timing->reg_100220, timing->reg_100224, timing->reg_100228, timing->reg_10022c); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index 896cf863414..dd572adca02 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -129,7 +129,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, if (ramin == NULL) { spin_unlock(&dev_priv->ramin_lock); nouveau_gpuobj_ref(NULL, &gpuobj); - return ret; + return -ENOMEM; } ramin = drm_mm_get_block_atomic(ramin, size, align); diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index 1c99c55d6d4..9f7b158f582 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c @@ -284,6 +284,7 @@ nouveau_sysfs_fini(struct drm_device *dev) } } +#ifdef CONFIG_HWMON static ssize_t nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf) { @@ -395,10 +396,12 @@ static struct attribute *hwmon_attributes[] = { static const struct attribute_group hwmon_attrgroup = { .attrs = hwmon_attributes, }; +#endif static int nouveau_hwmon_init(struct drm_device *dev) { +#ifdef CONFIG_HWMON struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; struct device *hwmon_dev; @@ -425,13 +428,14 @@ nouveau_hwmon_init(struct drm_device *dev) } pm->hwmon = hwmon_dev; - +#endif return 0; } static void nouveau_hwmon_fini(struct drm_device *dev) { +#ifdef CONFIG_HWMON struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pm_engine *pm = &dev_priv->engine.pm; @@ -439,6 +443,7 @@ nouveau_hwmon_fini(struct drm_device *dev) sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup); hwmon_device_unregister(pm->hwmon); } +#endif } int diff --git a/drivers/gpu/drm/nouveau/nouveau_ramht.c b/drivers/gpu/drm/nouveau/nouveau_ramht.c index 7f16697cc96..2d8580927ca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ramht.c +++ b/drivers/gpu/drm/nouveau/nouveau_ramht.c @@ -153,26 +153,42 @@ nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle, return -ENOMEM; } +static struct nouveau_ramht_entry * +nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle) +{ + struct nouveau_ramht *ramht = chan ? chan->ramht : NULL; + struct nouveau_ramht_entry *entry; + unsigned long flags; + + if (!ramht) + return NULL; + + spin_lock_irqsave(&ramht->lock, flags); + list_for_each_entry(entry, &ramht->entries, head) { + if (entry->channel == chan && + (!handle || entry->handle == handle)) { + list_del(&entry->head); + spin_unlock_irqrestore(&ramht->lock, flags); + + return entry; + } + } + spin_unlock_irqrestore(&ramht->lock, flags); + + return NULL; +} + static void -nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) +nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; struct nouveau_gpuobj *ramht = chan->ramht->gpuobj; - struct nouveau_ramht_entry *entry, *tmp; + unsigned long flags; u32 co, ho; - list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) { - if (entry->channel != chan || entry->handle != handle) - continue; - - nouveau_gpuobj_ref(NULL, &entry->gpuobj); - list_del(&entry->head); - kfree(entry); - break; - } - + spin_lock_irqsave(&chan->ramht->lock, flags); co = ho = nouveau_ramht_hash_handle(chan, handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && @@ -184,7 +200,7 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) nv_wo32(ramht, co + 0, 0x00000000); nv_wo32(ramht, co + 4, 0x00000000); instmem->flush(dev); - return; + goto out; } co += 8; @@ -194,17 +210,22 @@ nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle) NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", chan->id, handle); +out: + spin_unlock_irqrestore(&chan->ramht->lock, flags); } void nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle) { - struct nouveau_ramht *ramht = chan->ramht; - unsigned long flags; + struct nouveau_ramht_entry *entry; - spin_lock_irqsave(&ramht->lock, flags); - nouveau_ramht_remove_locked(chan, handle); - spin_unlock_irqrestore(&ramht->lock, flags); + entry = nouveau_ramht_remove_entry(chan, handle); + if (!entry) + return; + + nouveau_ramht_remove_hash(chan, entry->handle); + nouveau_gpuobj_ref(NULL, &entry->gpuobj); + kfree(entry); } struct nouveau_gpuobj * @@ -265,23 +286,19 @@ void nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr, struct nouveau_channel *chan) { - struct nouveau_ramht_entry *entry, *tmp; + struct nouveau_ramht_entry *entry; struct nouveau_ramht *ramht; - unsigned long flags; if (ref) kref_get(&ref->refcount); ramht = *ptr; if (ramht) { - spin_lock_irqsave(&ramht->lock, flags); - list_for_each_entry_safe(entry, tmp, &ramht->entries, head) { - if (entry->channel != chan) - continue; - - nouveau_ramht_remove_locked(chan, entry->handle); + while ((entry = nouveau_ramht_remove_entry(chan, 0))) { + nouveau_ramht_remove_hash(chan, entry->handle); + nouveau_gpuobj_ref(NULL, &entry->gpuobj); + kfree(entry); } - spin_unlock_irqrestore(&ramht->lock, flags); kref_put(&ramht->refcount, nouveau_ramht_del); } diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 288bacac7e5..d4ac9700703 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -120,8 +120,8 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) dev_priv->engine.instmem.flush(nvbe->dev); if (dev_priv->card_type == NV_50) { - nv50_vm_flush(dev, 5); /* PGRAPH */ - nv50_vm_flush(dev, 0); /* PFIFO */ + dev_priv->engine.fifo.tlb_flush(dev); + dev_priv->engine.graph.tlb_flush(dev); } nvbe->bound = true; @@ -162,8 +162,8 @@ nouveau_sgdma_unbind(struct ttm_backend *be) dev_priv->engine.instmem.flush(nvbe->dev); if (dev_priv->card_type == NV_50) { - nv50_vm_flush(dev, 5); - nv50_vm_flush(dev, 0); + dev_priv->engine.fifo.tlb_flush(dev); + dev_priv->engine.graph.tlb_flush(dev); } nvbe->bound = false; @@ -224,7 +224,11 @@ nouveau_sgdma_init(struct drm_device *dev) int i, ret; if (dev_priv->card_type < NV_50) { - aper_size = (64 * 1024 * 1024); + if(dev_priv->ramin_rsvd_vram < 2 * 1024 * 1024) + aper_size = 64 * 1024 * 1024; + else + aper_size = 512 * 1024 * 1024; + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; obj_size += 8; /* ctxdma header */ } else { diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index ed7757f1408..049f755567e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -354,6 +354,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->graph.destroy_context = nv50_graph_destroy_context; engine->graph.load_context = nv50_graph_load_context; engine->graph.unload_context = nv50_graph_unload_context; + if (dev_priv->chipset != 0x86) + engine->graph.tlb_flush = nv50_graph_tlb_flush; + else { + /* from what i can see nvidia do this on every + * pre-NVA3 board except NVAC, but, we've only + * ever seen problems on NV86 + */ + engine->graph.tlb_flush = nv86_graph_tlb_flush; + } engine->fifo.channels = 128; engine->fifo.init = nv50_fifo_init; engine->fifo.takedown = nv50_fifo_takedown; @@ -365,6 +374,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.destroy_context = nv50_fifo_destroy_context; engine->fifo.load_context = nv50_fifo_load_context; engine->fifo.unload_context = nv50_fifo_unload_context; + engine->fifo.tlb_flush = nv50_fifo_tlb_flush; engine->display.early_init = nv50_display_early_init; engine->display.late_takedown = nv50_display_late_takedown; engine->display.create = nv50_display_create; @@ -1041,6 +1051,9 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, case NOUVEAU_GETPARAM_PTIMER_TIME: getparam->value = dev_priv->engine.timer.read(dev); break; + case NOUVEAU_GETPARAM_HAS_BO_USAGE: + getparam->value = 1; + break; case NOUVEAU_GETPARAM_GRAPH_UNITS: /* NV40 and NV50 versions are quite different, but register * address is the same. User is supposed to know the card @@ -1051,7 +1064,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, } /* FALLTHRU */ default: - NV_ERROR(dev, "unknown parameter %lld\n", getparam->param); + NV_DEBUG(dev, "unknown parameter %lld\n", getparam->param); return -EINVAL; } @@ -1066,7 +1079,7 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, switch (setparam->param) { default: - NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); + NV_DEBUG(dev, "unknown parameter %lld\n", setparam->param); return -EINVAL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c index 16bbbf1eff6..7ecc4adc1e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_temp.c +++ b/drivers/gpu/drm/nouveau/nouveau_temp.c @@ -191,7 +191,7 @@ nv40_temp_get(struct drm_device *dev) int offset = sensor->offset_mult / sensor->offset_div; int core_temp; - if (dev_priv->chipset >= 0x50) { + if (dev_priv->card_type >= NV_50) { core_temp = nv_rd32(dev, 0x20008); } else { core_temp = nv_rd32(dev, 0x0015b4) & 0x1fff; diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index c71abc2a34d..40e18074162 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -158,7 +158,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = crtc->dev; - struct drm_connector *connector; unsigned char seq1 = 0, crtc17 = 0; unsigned char crtc1A; @@ -213,10 +212,6 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) NVVgaSeqReset(dev, nv_crtc->index, false); NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); - - /* Update connector polling modes */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - nouveau_connector_set_polling(connector); } static bool @@ -831,7 +826,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, /* Update the framebuffer location. */ regp->fb_start = nv_crtc->fb.offset & ~3; regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8); - NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start); + nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); /* Update the arbitration parameters. */ nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index c936403b26e..ef23550407b 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -185,14 +185,15 @@ static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder, struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); - /* For internal panels and gpu scaling on DVI we need the native mode */ - if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) { - if (!nv_connector->native_mode) - return false; + if (!nv_connector->native_mode || + nv_connector->scaling_mode == DRM_MODE_SCALE_NONE || + mode->hdisplay > nv_connector->native_mode->hdisplay || + mode->vdisplay > nv_connector->native_mode->vdisplay) { + nv_encoder->mode = *adjusted_mode; + + } else { nv_encoder->mode = *nv_connector->native_mode; adjusted_mode->clock = nv_connector->native_mode->clock; - } else { - nv_encoder->mode = *adjusted_mode; } return true; diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c index 6a6eb697d38..eb1c70dd82e 100644 --- a/drivers/gpu/drm/nouveau/nv04_pm.c +++ b/drivers/gpu/drm/nouveau/nv04_pm.c @@ -76,6 +76,15 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state) reg += 4; nouveau_hw_setpll(dev, reg, &state->calc); + + if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) { + if (dev_priv->card_type == NV_20) + nv_mask(dev, 0x1002c4, 0, 1 << 20); + + /* Reset the DLLs */ + nv_mask(dev, 0x1002c0, 0, 1 << 8); + } + kfree(state); } diff --git a/drivers/gpu/drm/nouveau/nv50_calc.c b/drivers/gpu/drm/nouveau/nv50_calc.c index 2cdc2bfe717..de81151648f 100644 --- a/drivers/gpu/drm/nouveau/nv50_calc.c +++ b/drivers/gpu/drm/nouveau/nv50_calc.c @@ -51,24 +51,28 @@ nv50_calc_pll2(struct drm_device *dev, struct pll_lims *pll, int clk, int *N, int *fN, int *M, int *P) { fixed20_12 fb_div, a, b; + u32 refclk = pll->refclk / 10; + u32 max_vco_freq = pll->vco1.maxfreq / 10; + u32 max_vco_inputfreq = pll->vco1.max_inputfreq / 10; + clk /= 10; - *P = pll->vco1.maxfreq / clk; + *P = max_vco_freq / clk; if (*P > pll->max_p) *P = pll->max_p; if (*P < pll->min_p) *P = pll->min_p; - /* *M = ceil(refclk / pll->vco.max_inputfreq); */ - a.full = dfixed_const(pll->refclk); - b.full = dfixed_const(pll->vco1.max_inputfreq); + /* *M = floor((refclk + max_vco_inputfreq) / max_vco_inputfreq); */ + a.full = dfixed_const(refclk + max_vco_inputfreq); + b.full = dfixed_const(max_vco_inputfreq); a.full = dfixed_div(a, b); - a.full = dfixed_ceil(a); + a.full = dfixed_floor(a); *M = dfixed_trunc(a); /* fb_div = (vco * *M) / refclk; */ fb_div.full = dfixed_const(clk * *P); fb_div.full = dfixed_mul(fb_div, a); - a.full = dfixed_const(pll->refclk); + a.full = dfixed_const(refclk); fb_div.full = dfixed_div(fb_div, a); /* *N = floor(fb_div); */ diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index 16380d52cd8..56476d0c6de 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -546,7 +546,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, } nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base; - nv_crtc->fb.tile_flags = fb->nvbo->tile_flags; + nv_crtc->fb.tile_flags = nouveau_bo_tile_layout(fb->nvbo); nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8; if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) { ret = RING_SPACE(evo, 2); @@ -578,7 +578,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, fb->nvbo->tile_mode); } if (dev_priv->chipset == 0x50) - OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format); + OUT_RING(evo, (nv_crtc->fb.tile_flags << 8) | format); else OUT_RING(evo, format); diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 55c9663ef2b..f624c611dde 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1032,11 +1032,18 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) struct drm_connector *connector; const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; uint32_t unplug_mask, plug_mask, change_mask; - uint32_t hpd0, hpd1 = 0; + uint32_t hpd0, hpd1; - hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050); + spin_lock_irq(&dev_priv->hpd_state.lock); + hpd0 = dev_priv->hpd_state.hpd0_bits; + dev_priv->hpd_state.hpd0_bits = 0; + hpd1 = dev_priv->hpd_state.hpd1_bits; + dev_priv->hpd_state.hpd1_bits = 0; + spin_unlock_irq(&dev_priv->hpd_state.lock); + + hpd0 &= nv_rd32(dev, 0xe050); if (dev_priv->chipset >= 0x90) - hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070); + hpd1 &= nv_rd32(dev, 0xe070); plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16); unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000); @@ -1078,10 +1085,6 @@ nv50_display_irq_hotplug_bh(struct work_struct *work) helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF); } - nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054)); - if (dev_priv->chipset >= 0x90) - nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074)); - drm_helper_hpd_irq_event(dev); } @@ -1092,8 +1095,22 @@ nv50_display_irq_handler(struct drm_device *dev) uint32_t delayed = 0; if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) { - if (!work_pending(&dev_priv->hpd_work)) - queue_work(dev_priv->wq, &dev_priv->hpd_work); + uint32_t hpd0_bits, hpd1_bits = 0; + + hpd0_bits = nv_rd32(dev, 0xe054); + nv_wr32(dev, 0xe054, hpd0_bits); + + if (dev_priv->chipset >= 0x90) { + hpd1_bits = nv_rd32(dev, 0xe074); + nv_wr32(dev, 0xe074, hpd1_bits); + } + + spin_lock(&dev_priv->hpd_state.lock); + dev_priv->hpd_state.hpd0_bits |= hpd0_bits; + dev_priv->hpd_state.hpd1_bits |= hpd1_bits; + spin_unlock(&dev_priv->hpd_state.lock); + + queue_work(dev_priv->wq, &dev_priv->hpd_work); } while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) { diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index a46a961102f..1da65bd60c1 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -464,3 +464,8 @@ nv50_fifo_unload_context(struct drm_device *dev) return 0; } +void +nv50_fifo_tlb_flush(struct drm_device *dev) +{ + nv50_vm_flush(dev, 5); +} diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index cbf5ae2f67d..8b669d0af61 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -402,3 +402,55 @@ struct nouveau_pgraph_object_class nv50_graph_grclass[] = { { 0x8597, false, NULL }, /* tesla (nva3, nva5, nva8) */ {} }; + +void +nv50_graph_tlb_flush(struct drm_device *dev) +{ + nv50_vm_flush(dev, 0); +} + +void +nv86_graph_tlb_flush(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + bool idle, timeout = false; + unsigned long flags; + u64 start; + u32 tmp; + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + nv_mask(dev, 0x400500, 0x00000001, 0x00000000); + + start = ptimer->read(dev); + do { + idle = true; + + for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) { + if ((tmp & 7) == 1) + idle = false; + } + + for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) { + if ((tmp & 7) == 1) + idle = false; + } + + for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) { + if ((tmp & 7) == 1) + idle = false; + } + } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000)); + + if (timeout) { + NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: " + "0x%08x 0x%08x 0x%08x 0x%08x\n", + nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380), + nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388)); + } + + nv50_vm_flush(dev, 0); + + nv_mask(dev, 0x400500, 0x00000001, 0x00000001); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); +} diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index a53fc974332..b773229b764 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -402,7 +402,6 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) } dev_priv->engine.instmem.flush(dev); - nv50_vm_flush(dev, 4); nv50_vm_flush(dev, 6); gpuobj->im_bound = 1; diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 8e421f644a5..05efb5b9f13 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, base += 3; break; case ATOM_IIO_WRITE: + (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f12a5b3ec05..4dc5b4714c5 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1650,7 +1650,36 @@ static void evergreen_gpu_init(struct radeon_device *rdev) } } - rdev->config.evergreen.tile_config = gb_addr_config; + /* setup tiling info dword. gb_addr_config is not adequate since it does + * not have bank info, so create a custom tiling dword. + * bits 3:0 num_pipes + * bits 7:4 num_banks + * bits 11:8 group_size + * bits 15:12 row_size + */ + rdev->config.evergreen.tile_config = 0; + switch (rdev->config.evergreen.max_tile_pipes) { + case 1: + default: + rdev->config.evergreen.tile_config |= (0 << 0); + break; + case 2: + rdev->config.evergreen.tile_config |= (1 << 0); + break; + case 4: + rdev->config.evergreen.tile_config |= (2 << 0); + break; + case 8: + rdev->config.evergreen.tile_config |= (3 << 0); + break; + } + rdev->config.evergreen.tile_config |= + ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; + rdev->config.evergreen.tile_config |= + ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; + rdev->config.evergreen.tile_config |= + ((gb_addr_config & 0x30000000) >> 28) << 12; + WREG32(GB_BACKEND_MAP, gb_backend_map); WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); @@ -2033,7 +2062,7 @@ int evergreen_irq_set(struct radeon_device *rdev) u32 grbm_int_cntl = 0; if (!rdev->irq.installed) { - WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); return -EINVAL; } /* don't enable anything if the ih is disabled */ @@ -2295,6 +2324,7 @@ restart_ih: case 0: /* D1 vblank */ if (disp_int & LB_D1_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 0); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); @@ -2316,6 +2346,7 @@ restart_ih: case 0: /* D2 vblank */ if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 1); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); @@ -2337,6 +2368,7 @@ restart_ih: case 0: /* D3 vblank */ if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 2); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT; DRM_DEBUG("IH: D3 vblank\n"); @@ -2358,6 +2390,7 @@ restart_ih: case 0: /* D4 vblank */ if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 3); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT; DRM_DEBUG("IH: D4 vblank\n"); @@ -2379,6 +2412,7 @@ restart_ih: case 0: /* D5 vblank */ if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 4); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT; DRM_DEBUG("IH: D5 vblank\n"); @@ -2400,6 +2434,7 @@ restart_ih: case 0: /* D6 vblank */ if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 5); + rdev->pm.vblank_sync = true; wake_up(&rdev->irq.vblank_queue); disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT; DRM_DEBUG("IH: D6 vblank\n"); diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index ac3b6dde23d..e0e590110dd 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c @@ -459,7 +459,7 @@ int evergreen_blit_init(struct radeon_device *rdev) obj_size += evergreen_ps_size * 4; obj_size = ALIGN(obj_size, 256); - r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("evergreen failed to allocate shader\n"); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 0e8f28a6892..8e10aa9f74b 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -442,7 +442,7 @@ int r100_pci_gart_init(struct radeon_device *rdev) int r; if (rdev->gart.table.ram.ptr) { - WARN(1, "R100 PCI GART already initialized.\n"); + WARN(1, "R100 PCI GART already initialized\n"); return 0; } /* Initialize common gart structure */ @@ -516,7 +516,7 @@ int r100_irq_set(struct radeon_device *rdev) uint32_t tmp = 0; if (!rdev->irq.installed) { - WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); WREG32(R_000040_GEN_INT_CNTL, 0); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 34527e600fe..cde1d3480d9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -91,7 +91,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev) int r; if (rdev->gart.table.vram.robj) { - WARN(1, "RV370 PCIE GART already initialized.\n"); + WARN(1, "RV370 PCIE GART already initialized\n"); return 0; } /* Initialize common gart structure */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 33952a12f0a..4d7a2e1bdb9 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -97,14 +97,8 @@ u32 rv6xx_get_temp(struct radeon_device *rdev) { u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> ASIC_T_SHIFT; - u32 actual_temp = 0; - if ((temp >> 7) & 1) - actual_temp = 0; - else - actual_temp = (temp >> 1) & 0xff; - - return actual_temp * 1000; + return temp * 1000; } void r600_pm_get_dynpm_state(struct radeon_device *rdev) @@ -884,12 +878,15 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) u32 tmp; /* flush hdp cache so updates hit vram */ - if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && + !(rdev->flags & RADEON_IS_AGP)) { void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; u32 tmp; /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL + * This seems to cause problems on some AGP cards. Just use the old + * method for them. */ WREG32(HDP_DEBUG1, 0); tmp = readl((void __iomem *)ptr); @@ -919,7 +916,7 @@ int r600_pcie_gart_init(struct radeon_device *rdev) int r; if (rdev->gart.table.vram.robj) { - WARN(1, "R600 PCIE GART already initialized.\n"); + WARN(1, "R600 PCIE GART already initialized\n"); return 0; } /* Initialize common gart structure */ @@ -1201,8 +1198,10 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->vram_end, mc->real_vram_size >> 20); } else { u64 base = 0; - if (rdev->flags & RADEON_IS_IGP) - base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + if (rdev->flags & RADEON_IS_IGP) { + base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF; + base <<= 24; + } radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, mc); @@ -2724,7 +2723,7 @@ static int r600_ih_ring_alloc(struct radeon_device *rdev) /* Allocate ring buffer */ if (rdev->ih.ring_obj == NULL) { r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size, - true, + PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, &rdev->ih.ring_obj); if (r) { @@ -2995,7 +2994,7 @@ int r600_irq_set(struct radeon_device *rdev) u32 hdmi1, hdmi2; if (!rdev->irq.installed) { - WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); return -EINVAL; } /* don't enable anything if the ih is disabled */ @@ -3489,10 +3488,12 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) { /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read - * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL. + * This seems to cause problems on some AGP cards. Just use the old + * method for them. */ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) && - rdev->vram_scratch.ptr) { + rdev->vram_scratch.ptr && !(rdev->flags & RADEON_IS_AGP)) { void __iomem *ptr = (void *)rdev->vram_scratch.ptr; u32 tmp; diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 8362974ef41..86e5aa07f0d 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -501,7 +501,7 @@ int r600_blit_init(struct radeon_device *rdev) obj_size += r6xx_ps_size * 4; obj_size = ALIGN(obj_size, 256); - r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_create(rdev, NULL, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("r600 failed to allocate shader\n"); diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 37cc2aa9f92..0f90fc3482c 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -50,6 +50,7 @@ struct r600_cs_track { u32 nsamples; u32 cb_color_base_last[8]; struct radeon_bo *cb_color_bo[8]; + u64 cb_color_bo_mc[8]; u32 cb_color_bo_offset[8]; struct radeon_bo *cb_color_frag_bo[8]; struct radeon_bo *cb_color_tile_bo[8]; @@ -67,6 +68,7 @@ struct r600_cs_track { u32 db_depth_size; u32 db_offset; struct radeon_bo *db_bo; + u64 db_bo_mc; }; static inline int r600_bpe_from_format(u32 *bpe, u32 format) @@ -140,6 +142,68 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format) return 0; } +struct array_mode_checker { + int array_mode; + u32 group_size; + u32 nbanks; + u32 npipes; + u32 nsamples; + u32 bpe; +}; + +/* returns alignment in pixels for pitch/height/depth and bytes for base */ +static inline int r600_get_array_mode_alignment(struct array_mode_checker *values, + u32 *pitch_align, + u32 *height_align, + u32 *depth_align, + u64 *base_align) +{ + u32 tile_width = 8; + u32 tile_height = 8; + u32 macro_tile_width = values->nbanks; + u32 macro_tile_height = values->npipes; + u32 tile_bytes = tile_width * tile_height * values->bpe * values->nsamples; + u32 macro_tile_bytes = macro_tile_width * macro_tile_height * tile_bytes; + + switch (values->array_mode) { + case ARRAY_LINEAR_GENERAL: + /* technically tile_width/_height for pitch/height */ + *pitch_align = 1; /* tile_width */ + *height_align = 1; /* tile_height */ + *depth_align = 1; + *base_align = 1; + break; + case ARRAY_LINEAR_ALIGNED: + *pitch_align = max((u32)64, (u32)(values->group_size / values->bpe)); + *height_align = tile_height; + *depth_align = 1; + *base_align = values->group_size; + break; + case ARRAY_1D_TILED_THIN1: + *pitch_align = max((u32)tile_width, + (u32)(values->group_size / + (tile_height * values->bpe * values->nsamples))); + *height_align = tile_height; + *depth_align = 1; + *base_align = values->group_size; + break; + case ARRAY_2D_TILED_THIN1: + *pitch_align = max((u32)macro_tile_width, + (u32)(((values->group_size / tile_height) / + (values->bpe * values->nsamples)) * + values->nbanks)) * tile_width; + *height_align = macro_tile_height * tile_height; + *depth_align = 1; + *base_align = max(macro_tile_bytes, + (*pitch_align) * values->bpe * (*height_align) * values->nsamples); + break; + default: + return -EINVAL; + } + + return 0; +} + static void r600_cs_track_init(struct r600_cs_track *track) { int i; @@ -153,10 +217,12 @@ static void r600_cs_track_init(struct r600_cs_track *track) track->cb_color_info[i] = 0; track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; + track->cb_color_bo_mc[i] = 0xFFFFFFFF; } track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->db_bo = NULL; + track->db_bo_mc = 0xFFFFFFFF; /* assume the biggest format and that htile is enabled */ track->db_depth_info = 7 | (1 << 25); track->db_depth_view = 0xFFFFC000; @@ -168,7 +234,10 @@ static void r600_cs_track_init(struct r600_cs_track *track) static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) { struct r600_cs_track *track = p->track; - u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; + u32 bpe = 0, slice_tile_max, size, tmp; + u32 height, height_align, pitch, pitch_align, depth_align; + u64 base_offset, base_align; + struct array_mode_checker array_check; volatile u32 *ib = p->ib->ptr; unsigned array_mode; @@ -183,60 +252,40 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) i, track->cb_color_info[i]); return -EINVAL; } - /* pitch is the number of 8x8 tiles per row */ - pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; + /* pitch in pixels */ + pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) * 8; slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; slice_tile_max *= 64; - height = slice_tile_max / (pitch * 8); + height = slice_tile_max / pitch; if (height > 8192) height = 8192; array_mode = G_0280A0_ARRAY_MODE(track->cb_color_info[i]); + + base_offset = track->cb_color_bo_mc[i] + track->cb_color_bo_offset[i]; + array_check.array_mode = array_mode; + array_check.group_size = track->group_size; + array_check.nbanks = track->nbanks; + array_check.npipes = track->npipes; + array_check.nsamples = track->nsamples; + array_check.bpe = bpe; + if (r600_get_array_mode_alignment(&array_check, + &pitch_align, &height_align, &depth_align, &base_align)) { + dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, + G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, + track->cb_color_info[i]); + return -EINVAL; + } switch (array_mode) { case V_0280A0_ARRAY_LINEAR_GENERAL: - /* technically height & 0x7 */ break; case V_0280A0_ARRAY_LINEAR_ALIGNED: - pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } - if (!IS_ALIGNED(height, 8)) { - dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", - __func__, __LINE__, height); - return -EINVAL; - } break; case V_0280A0_ARRAY_1D_TILED_THIN1: - pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8; - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } /* avoid breaking userspace */ if (height > 7) height &= ~0x7; - if (!IS_ALIGNED(height, 8)) { - dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", - __func__, __LINE__, height); - return -EINVAL; - } break; case V_0280A0_ARRAY_2D_TILED_THIN1: - pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)) / 8; - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } - if (!IS_ALIGNED((height / 8), track->npipes)) { - dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", - __func__, __LINE__, height); - return -EINVAL; - } break; default: dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, @@ -244,8 +293,24 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) track->cb_color_info[i]); return -EINVAL; } + + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + if (!IS_ALIGNED(height, height_align)) { + dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", + __func__, __LINE__, height); + return -EINVAL; + } + if (!IS_ALIGNED(base_offset, base_align)) { + dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); + return -EINVAL; + } + /* check offset */ - tmp = height * pitch * 8 * bpe; + tmp = height * pitch * bpe; if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { /* the initial DDX does bad things with the CB size occasionally */ @@ -260,15 +325,11 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) return -EINVAL; } } - if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { - dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); - return -EINVAL; - } /* limit max tile */ - tmp = (height * pitch * 8) >> 6; + tmp = (height * pitch) >> 6; if (tmp < slice_tile_max) slice_tile_max = tmp; - tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | + tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); ib[track->cb_color_size_idx[i]] = tmp; return 0; @@ -310,7 +371,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) /* Check depth buffer */ if (G_028800_STENCIL_ENABLE(track->db_depth_control) || G_028800_Z_ENABLE(track->db_depth_control)) { - u32 nviews, bpe, ntiles, pitch, pitch_align, height, size, slice_tile_max; + u32 nviews, bpe, ntiles, size, slice_tile_max; + u32 height, height_align, pitch, pitch_align, depth_align; + u64 base_offset, base_align; + struct array_mode_checker array_check; + int array_mode; + if (track->db_bo == NULL) { dev_warn(p->dev, "z/stencil with no depth buffer\n"); return -EINVAL; @@ -353,41 +419,34 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); } else { size = radeon_bo_size(track->db_bo); - pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; + /* pitch in pixels */ + pitch = (G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1) * 8; slice_tile_max = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; slice_tile_max *= 64; - height = slice_tile_max / (pitch * 8); + height = slice_tile_max / pitch; if (height > 8192) height = 8192; - switch (G_028010_ARRAY_MODE(track->db_depth_info)) { + base_offset = track->db_bo_mc + track->db_offset; + array_mode = G_028010_ARRAY_MODE(track->db_depth_info); + array_check.array_mode = array_mode; + array_check.group_size = track->group_size; + array_check.nbanks = track->nbanks; + array_check.npipes = track->npipes; + array_check.nsamples = track->nsamples; + array_check.bpe = bpe; + if (r600_get_array_mode_alignment(&array_check, + &pitch_align, &height_align, &depth_align, &base_align)) { + dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, + G_028010_ARRAY_MODE(track->db_depth_info), + track->db_depth_info); + return -EINVAL; + } + switch (array_mode) { case V_028010_ARRAY_1D_TILED_THIN1: - pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8); - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } /* don't break userspace */ height &= ~0x7; - if (!IS_ALIGNED(height, 8)) { - dev_warn(p->dev, "%s:%d db height (%d) invalid\n", - __func__, __LINE__, height); - return -EINVAL; - } break; case V_028010_ARRAY_2D_TILED_THIN1: - pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } - if (!IS_ALIGNED((height / 8), track->npipes)) { - dev_warn(p->dev, "%s:%d db height (%d) invalid\n", - __func__, __LINE__, height); - return -EINVAL; - } break; default: dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, @@ -395,15 +454,27 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) track->db_depth_info); return -EINVAL; } - if (!IS_ALIGNED(track->db_offset, track->group_size)) { - dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); + + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + if (!IS_ALIGNED(height, height_align)) { + dev_warn(p->dev, "%s:%d db height (%d) invalid\n", + __func__, __LINE__, height); return -EINVAL; } + if (!IS_ALIGNED(base_offset, base_align)) { + dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); + return -EINVAL; + } + ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; tmp = ntiles * bpe * 64 * nviews; if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { - dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", + dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, radeon_bo_size(track->db_bo)); return -EINVAL; @@ -954,6 +1025,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_base_last[tmp] = ib[idx]; track->cb_color_bo[tmp] = reloc->robj; + track->cb_color_bo_mc[tmp] = reloc->lobj.gpu_offset; break; case DB_DEPTH_BASE: r = r600_cs_packet_next_reloc(p, &reloc); @@ -965,6 +1037,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx track->db_offset = radeon_get_ib_value(p, idx) << 8; ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->db_bo = reloc->robj; + track->db_bo_mc = reloc->lobj.gpu_offset; break; case DB_HTILE_DATA_BASE: case SQ_PGM_START_FS: @@ -1086,16 +1159,25 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, struct radeon_bo *texture, struct radeon_bo *mipmap, + u64 base_offset, + u64 mip_offset, u32 tiling_flags) { struct r600_cs_track *track = p->track; u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; - u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; + u32 word0, word1, l0_size, mipmap_size; + u32 height_align, pitch, pitch_align, depth_align; + u64 base_align; + struct array_mode_checker array_check; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) return 0; + /* convert to bytes */ + base_offset <<= 8; + mip_offset <<= 8; + word0 = radeon_get_ib_value(p, idx + 0); if (tiling_flags & RADEON_TILING_MACRO) word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); @@ -1128,46 +1210,38 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i return -EINVAL; } - pitch = G_038000_PITCH(word0) + 1; - switch (G_038000_TILE_MODE(word0)) { - case V_038000_ARRAY_LINEAR_GENERAL: - pitch_align = 1; - /* XXX check height align */ - break; - case V_038000_ARRAY_LINEAR_ALIGNED: - pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } - /* XXX check height align */ - break; - case V_038000_ARRAY_1D_TILED_THIN1: - pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } - /* XXX check height align */ - break; - case V_038000_ARRAY_2D_TILED_THIN1: - pitch_align = max((u32)track->nbanks, - (u32)(((track->group_size / 8) / bpe) * track->nbanks)) / 8; - if (!IS_ALIGNED(pitch, pitch_align)) { - dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", - __func__, __LINE__, pitch); - return -EINVAL; - } - /* XXX check height align */ - break; - default: - dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, - G_038000_TILE_MODE(word0), word0); + /* pitch in texels */ + pitch = (G_038000_PITCH(word0) + 1) * 8; + array_check.array_mode = G_038000_TILE_MODE(word0); + array_check.group_size = track->group_size; + array_check.nbanks = track->nbanks; + array_check.npipes = track->npipes; + array_check.nsamples = 1; + array_check.bpe = bpe; + if (r600_get_array_mode_alignment(&array_check, + &pitch_align, &height_align, &depth_align, &base_align)) { + dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", + __func__, __LINE__, G_038000_TILE_MODE(word0)); + return -EINVAL; + } + + /* XXX check height as well... */ + + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + if (!IS_ALIGNED(base_offset, base_align)) { + dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", + __func__, __LINE__, base_offset); + return -EINVAL; + } + if (!IS_ALIGNED(mip_offset, base_align)) { + dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", + __func__, __LINE__, mip_offset); return -EINVAL; } - /* XXX check offset align */ word0 = radeon_get_ib_value(p, idx + 4); word1 = radeon_get_ib_value(p, idx + 5); @@ -1402,7 +1476,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); mipmap = reloc->robj; r = r600_check_texture_resource(p, idx+(i*7)+1, - texture, mipmap, reloc->lobj.tiling_flags); + texture, mipmap, + base_offset + radeon_get_ib_value(p, idx+1+(i*7)+2), + mip_offset + radeon_get_ib_value(p, idx+1+(i*7)+3), + reloc->lobj.tiling_flags); if (r) return r; ib[idx+1+(i*7)+2] += base_offset; diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h index d84612ae47e..33cda016b08 100644 --- a/drivers/gpu/drm/radeon/r600_reg.h +++ b/drivers/gpu/drm/radeon/r600_reg.h @@ -86,6 +86,7 @@ #define R600_HDP_NONSURFACE_BASE 0x2c04 #define R600_BUS_CNTL 0x5420 +# define R600_BIOS_ROM_DIS (1 << 1) #define R600_CONFIG_CNTL 0x5424 #define R600_CONFIG_MEMSIZE 0x5428 #define R600_CONFIG_F0_BASE 0x542C diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 966a793e225..bff4dc4f410 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -51,6 +51,12 @@ #define PTE_READABLE (1 << 5) #define PTE_WRITEABLE (1 << 6) +/* tiling bits */ +#define ARRAY_LINEAR_GENERAL 0x00000000 +#define ARRAY_LINEAR_ALIGNED 0x00000001 +#define ARRAY_1D_TILED_THIN1 0x00000002 +#define ARRAY_2D_TILED_THIN1 0x00000004 + /* Registers */ #define ARB_POP 0x2418 #define ENABLE_TC128 (1 << 30) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 73f600d39ad..3a7095743d4 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1262,6 +1262,10 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); (rdev->family == CHIP_RS400) || \ (rdev->family == CHIP_RS480)) #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) +#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ + (rdev->family == CHIP_RS690) || \ + (rdev->family == CHIP_RS740) || \ + (rdev->family >= CHIP_R600)) #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) #define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 04cac7ec903..bc5a2c3382d 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev } } + /* some DCE3 boards have bad data for this entry */ + if (ASIC_IS_DCE3(rdev)) { + if ((i == 4) && + (gpio->usClkMaskRegisterIndex == 0x1fda) && + (gpio->sucI2cId.ucAccess == 0x94)) + gpio->sucI2cId.ucAccess = 0x14; + } + if (gpio->sucI2cId.ucAccess == id) { i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; @@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev) } } + /* some DCE3 boards have bad data for this entry */ + if (ASIC_IS_DCE3(rdev)) { + if ((i == 4) && + (gpio->usClkMaskRegisterIndex == 0x1fda) && + (gpio->sucI2cId.ucAccess == 0x94)) + gpio->sucI2cId.ucAccess = 0x14; + } + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; @@ -526,8 +542,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) if (crev < 2) return false; - router.valid = false; - obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) (ctx->bios + data_offset + @@ -624,6 +638,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) if (connector_type == DRM_MODE_CONNECTOR_Unknown) continue; + router.ddc_valid = false; + router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { uint8_t grph_obj_id, grph_obj_num, grph_obj_type; @@ -647,9 +663,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) usDeviceTag)); } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { - router.valid = false; for (k = 0; k < router_obj->ucNumberOfObjects; k++) { - u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID); + u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + @@ -657,6 +672,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ATOM_I2C_RECORD *i2c_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; + ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + @@ -690,10 +706,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) record; - router.valid = true; - router.mux_type = ddc_path->ucMuxType; - router.mux_control_pin = ddc_path->ucMuxControlPin; - router.mux_state = ddc_path->ucMuxState[enum_id]; + router.ddc_valid = true; + router.ddc_mux_type = ddc_path->ucMuxType; + router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; + router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; + break; + case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: + cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) + record; + router.cd_valid = true; + router.cd_mux_type = cd_path->ucMuxType; + router.cd_mux_control_pin = cd_path->ucMuxControlPin; + router.cd_mux_state = cd_path->ucMuxState[enum_id]; break; } record = (ATOM_COMMON_RECORD_HEADER *) @@ -860,7 +884,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; struct radeon_router router; - router.valid = false; + router.ddc_valid = false; + router.cd_valid = false; bios_connectors = kzalloc(bc_size, GFP_KERNEL); if (!bios_connectors) diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c index 7932dc4d6b9..c558685cc63 100644 --- a/drivers/gpu/drm/radeon/radeon_benchmark.c +++ b/drivers/gpu/drm/radeon/radeon_benchmark.c @@ -41,7 +41,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, size = bsize; n = 1024; - r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj); + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, sdomain, &sobj); if (r) { goto out_cleanup; } @@ -53,7 +53,7 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, if (r) { goto out_cleanup; } - r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj); + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, ddomain, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 654787ec43f..8f2c7b50dcf 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } return true; } + static bool r700_read_disabled_bios(struct radeon_device *rdev) { uint32_t viph_control; @@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) bool r; viph_control = RREG32(RADEON_VIPH_CONTROL); - bus_cntl = RREG32(RADEON_BUS_CNTL); + bus_cntl = RREG32(R600_BUS_CNTL); d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); @@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) /* disable VIP */ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); /* enable the rom */ - WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); /* Disable VGA mode */ WREG32(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | @@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev) cg_spll_status = RREG32(R600_CG_SPLL_STATUS); } WREG32(RADEON_VIPH_CONTROL, viph_control); - WREG32(RADEON_BUS_CNTL, bus_cntl); + WREG32(R600_BUS_CNTL, bus_cntl); WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); @@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) bool r; viph_control = RREG32(RADEON_VIPH_CONTROL); - bus_cntl = RREG32(RADEON_BUS_CNTL); + bus_cntl = RREG32(R600_BUS_CNTL); d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); @@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) /* disable VIP */ WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); /* enable the rom */ - WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); + WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); /* Disable VGA mode */ WREG32(AVIVO_D1VGA_CONTROL, (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | @@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev) /* restore regs */ WREG32(RADEON_VIPH_CONTROL, viph_control); - WREG32(RADEON_BUS_CNTL, bus_cntl); + WREG32(R600_BUS_CNTL, bus_cntl); WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 7b7ea269549..137b8075f6e 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -571,6 +571,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde } if (clk_mask && data_mask) { + /* system specific masks */ i2c.mask_clk_mask = clk_mask; i2c.mask_data_mask = data_mask; i2c.a_clk_mask = clk_mask; @@ -579,7 +580,19 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde i2c.en_data_mask = data_mask; i2c.y_clk_mask = clk_mask; i2c.y_data_mask = data_mask; + } else if ((ddc_line == RADEON_GPIOPAD_MASK) || + (ddc_line == RADEON_MDGPIO_MASK)) { + /* default gpiopad masks */ + i2c.mask_clk_mask = (0x20 << 8); + i2c.mask_data_mask = 0x80; + i2c.a_clk_mask = (0x20 << 8); + i2c.a_data_mask = 0x80; + i2c.en_clk_mask = (0x20 << 8); + i2c.en_data_mask = 0x80; + i2c.y_clk_mask = (0x20 << 8); + i2c.y_data_mask = 0x80; } else { + /* default masks for ddc pads */ i2c.mask_clk_mask = RADEON_GPIO_EN_1; i2c.mask_data_mask = RADEON_GPIO_EN_0; i2c.a_clk_mask = RADEON_GPIO_A_1; @@ -716,7 +729,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) clk = RBIOS8(offset + 3 + (i * 5) + 3); data = RBIOS8(offset + 3 + (i * 5) + 4); i2c = combios_setup_i2c_bus(rdev, DDC_MONID, - clk, data); + (1 << clk), (1 << data)); rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); break; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 4dac4b0a02e..8afaf7a7459 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -183,13 +183,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, continue; if (priority == true) { - DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); - DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); + DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); + DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector)); conflict->status = connector_status_disconnected; radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); } else { - DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); - DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); + DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); + DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict)); current_status = connector_status_disconnected; } break; @@ -432,13 +432,13 @@ static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, mode->vdisplay == native_mode->vdisplay) { *native_mode = *mode; drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V); - DRM_INFO("Determined LVDS native mode details from EDID\n"); + DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n"); break; } } } if (!native_mode->clock) { - DRM_INFO("No LVDS native mode details, disabling RMX\n"); + DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n"); radeon_encoder->rmx_type = RMX_OFF; } } @@ -1008,9 +1008,21 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) static int radeon_dp_get_modes(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; int ret; + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + if (!radeon_dig_connector->edp_on) + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + } ret = radeon_ddc_get_modes(radeon_connector); + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { + if (!radeon_dig_connector->edp_on) + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_OFF); + } + return ret; } @@ -1029,8 +1041,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { /* eDP is always DP */ radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; + if (!radeon_dig_connector->edp_on) + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); if (radeon_dp_getdpcd(radeon_connector)) ret = connector_status_connected; + if (!radeon_dig_connector->edp_on) + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_OFF); } else { radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { @@ -1116,7 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_connector->shared_ddc = true; shared_ddc = true; } - if (radeon_connector->router_bus && router->valid && + if (radeon_connector->router_bus && router->ddc_valid && (radeon_connector->router.router_id == router->router_id)) { radeon_connector->shared_ddc = false; shared_ddc = false; @@ -1136,7 +1154,7 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_connector->connector_object_id = connector_object_id; radeon_connector->hpd = *hpd; radeon_connector->router = *router; - if (router->valid) { + if (router->ddc_valid || router->cd_valid) { radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info); if (!radeon_connector->router_bus) goto failed; @@ -1157,6 +1175,8 @@ radeon_add_atom_connector(struct drm_device *dev, /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -1172,6 +1192,8 @@ radeon_add_atom_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1208,6 +1230,11 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.load_detect_property, 1); } + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_DVII) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: @@ -1238,6 +1265,11 @@ radeon_add_atom_connector(struct drm_device *dev, 0); } subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_HDMIB) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: @@ -1275,6 +1307,9 @@ radeon_add_atom_connector(struct drm_device *dev, rdev->mode_info.underscan_vborder_property, 0); } + connector->interlace_allowed = true; + /* in theory with a DP to VGA converter... */ + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1290,6 +1325,8 @@ radeon_add_atom_connector(struct drm_device *dev, radeon_atombios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); @@ -1308,6 +1345,8 @@ radeon_add_atom_connector(struct drm_device *dev, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; } @@ -1385,6 +1424,8 @@ radeon_add_legacy_connector(struct drm_device *dev, /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -1400,6 +1441,8 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = true; + connector->doublescan_allowed = true; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1417,6 +1460,11 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); } subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = true; + if (connector_type == DRM_MODE_CONNECTOR_DVII) + connector->doublescan_allowed = true; + else + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1439,6 +1487,8 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_combios_get_tv_info(rdev)); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; case DRM_MODE_CONNECTOR_LVDS: drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); @@ -1452,6 +1502,8 @@ radeon_add_legacy_connector(struct drm_device *dev, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; + connector->interlace_allowed = false; + connector->doublescan_allowed = false; break; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 8adfedfe547..e12e79326cb 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -180,7 +180,7 @@ int radeon_wb_init(struct radeon_device *rdev) int r; if (rdev->wb.wb_obj == NULL) { - r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, + r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj); if (r) { dev_warn(rdev->dev, "(%d) create WB bo failed\n", r); @@ -286,7 +286,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 mc->mc_vram_size = mc->aper_size; } mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; - dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", + dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); } @@ -323,7 +323,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; } mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; - dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", + dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n", mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0383631da69..1df4dc6c063 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -315,10 +315,14 @@ static void radeon_print_display_setup(struct drm_device *dev) radeon_connector->ddc_bus->rec.en_data_reg, radeon_connector->ddc_bus->rec.y_clk_reg, radeon_connector->ddc_bus->rec.y_data_reg); - if (radeon_connector->router_bus) + if (radeon_connector->router.ddc_valid) DRM_INFO(" DDC Router 0x%x/0x%x\n", - radeon_connector->router.mux_control_pin, - radeon_connector->router.mux_state); + radeon_connector->router.ddc_mux_control_pin, + radeon_connector->router.ddc_mux_state); + if (radeon_connector->router.cd_valid) + DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", + radeon_connector->router.cd_mux_control_pin, + radeon_connector->router.cd_mux_state); } else { if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || connector->connector_type == DRM_MODE_CONNECTOR_DVII || @@ -398,8 +402,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) int ret = 0; /* on hw with routers, select right port */ - if (radeon_connector->router.valid) - radeon_router_select_port(radeon_connector); + if (radeon_connector->router.ddc_valid) + radeon_router_select_ddc_port(radeon_connector); if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) { @@ -432,8 +436,8 @@ static int radeon_ddc_dump(struct drm_connector *connector) int ret = 0; /* on hw with routers, select right port */ - if (radeon_connector->router.valid) - radeon_router_select_port(radeon_connector); + if (radeon_connector->router.ddc_valid) + radeon_router_select_ddc_port(radeon_connector); if (!radeon_connector->ddc_bus) return -1; diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index ae58b6849a2..041943df966 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -176,6 +176,7 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder) return false; } } + void radeon_link_encoder_connector(struct drm_device *dev) { @@ -228,6 +229,27 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) return NULL; } +struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *other_encoder; + struct radeon_encoder *other_radeon_encoder; + + if (radeon_encoder->is_ext_encoder) + return NULL; + + list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { + if (other_encoder == encoder) + continue; + other_radeon_encoder = to_radeon_encoder(other_encoder); + if (other_radeon_encoder->is_ext_encoder && + (radeon_encoder->devices & other_radeon_encoder->devices)) + return other_encoder; + } + return NULL; +} + void radeon_panel_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { @@ -426,52 +448,49 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) } -void -atombios_external_tmds_setup(struct drm_encoder *encoder, int action) -{ - struct drm_device *dev = encoder->dev; - struct radeon_device *rdev = dev->dev_private; - struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION args; - int index = 0; - - memset(&args, 0, sizeof(args)); - - index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); - - args.sXTmdsEncoder.ucEnable = action; - - if (radeon_encoder->pixel_clock > 165000) - args.sXTmdsEncoder.ucMisc = PANEL_ENCODER_MISC_DUAL; - - /*if (pScrn->rgbBits == 8)*/ - args.sXTmdsEncoder.ucMisc |= (1 << 1); - - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - -} +union dvo_encoder_control { + ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds; + DVO_ENCODER_CONTROL_PS_ALLOCATION dvo; + DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3; +}; -static void -atombios_ddia_setup(struct drm_encoder *encoder, int action) +void +atombios_dvo_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - DVO_ENCODER_CONTROL_PS_ALLOCATION args; - int index = 0; + union dvo_encoder_control args; + int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); memset(&args, 0, sizeof(args)); - index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl); + if (ASIC_IS_DCE3(rdev)) { + /* DCE3+ */ + args.dvo_v3.ucAction = action; + args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.dvo_v3.ucDVOConfig = 0; /* XXX */ + } else if (ASIC_IS_DCE2(rdev)) { + /* DCE2 (pre-DCE3 R6xx, RS600/690/740 */ + args.dvo.sDVOEncoder.ucAction = action; + args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + /* DFP1, CRT1, TV1 depending on the type of port */ + args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX; + + if (radeon_encoder->pixel_clock > 165000) + args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL; + } else { + /* R4xx, R5xx */ + args.ext_tmds.sXTmdsEncoder.ucEnable = action; - args.sDVOEncoder.ucAction = action; - args.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + if (radeon_encoder->pixel_clock > 165000) + args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL; - if (radeon_encoder->pixel_clock > 165000) - args.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute = PANEL_ENCODER_MISC_DUAL; + /*if (pScrn->rgbBits == 8)*/ + args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB; + } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - } union lvds_encoder_control { @@ -532,14 +551,14 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB) - args.v1.ucMisc |= (1 << 1); + args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; } else { if (dig->linkb) args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB; if (radeon_encoder->pixel_clock > 165000) args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; /*if (pScrn->rgbBits == 8) */ - args.v1.ucMisc |= (1 << 1); + args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB; } break; case 2: @@ -595,6 +614,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) int atombios_get_encoder_mode(struct drm_encoder *encoder) { + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; @@ -602,9 +622,20 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) struct radeon_connector_atom_dig *dig_connector; connector = radeon_get_connector_for_encoder(encoder); - if (!connector) - return 0; - + if (!connector) { + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + return ATOM_ENCODER_MODE_DVI; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: + default: + return ATOM_ENCODER_MODE_CRT; + } + } radeon_connector = to_radeon_connector(connector); switch (connector->connector_type) { @@ -834,6 +865,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); + break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: @@ -978,6 +1012,105 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +void +atombios_set_edp_panel_power(struct drm_connector *connector, int action) +{ + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; + union dig_transmitter_control args; + int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); + uint8_t frev, crev; + + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP) + return; + + if (!ASIC_IS_DCE4(rdev)) + return; + + if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) || + (action != ATOM_TRANSMITTER_ACTION_POWER_OFF)) + return; + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + memset(&args, 0, sizeof(args)); + + args.v1.ucAction = action; + + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +union external_encoder_control { + EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1; +}; + +static void +atombios_external_encoder_setup(struct drm_encoder *encoder, + struct drm_encoder *ext_encoder, + int action) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + union external_encoder_control args; + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl); + u8 frev, crev; + int dp_clock = 0; + int dp_lane_count = 0; + int connector_object_id = 0; + + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *dig_connector = + radeon_connector->con_priv; + + dp_clock = dig_connector->dp_clock; + dp_lane_count = dig_connector->dp_lane_count; + connector_object_id = + (radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; + } + + memset(&args, 0, sizeof(args)); + + if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) + return; + + switch (frev) { + case 1: + /* no params on frev 1 */ + break; + case 2: + switch (crev) { + case 1: + case 2: + args.v1.sDigEncoder.ucAction = action; + args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); + + if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) { + if (dp_clock == 270000) + args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; + args.v1.sDigEncoder.ucLaneNum = dp_lane_count; + } else if (radeon_encoder->pixel_clock > 165000) + args.v1.sDigEncoder.ucLaneNum = 8; + else + args.v1.sDigEncoder.ucLaneNum = 4; + break; + default: + DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); + return; + } + break; + default: + DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); + return; + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + static void atombios_yuv_setup(struct drm_encoder *encoder, bool enable) { @@ -1021,6 +1154,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; int index = 0; bool is_dig = false; @@ -1043,9 +1177,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_DDI: - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); break; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: + if (ASIC_IS_DCE3(rdev)) + is_dig = true; + else + index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl); + break; case ENCODER_OBJECT_ID_INTERNAL_LVDS: index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl); break; @@ -1082,34 +1221,85 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + if (connector && + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *radeon_dig_connector = + radeon_connector->con_priv; + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_ON); + radeon_dig_connector->edp_on = true; + } dp_link_train(encoder, connector); if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON); } + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); + if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF); + if (connector && + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + struct radeon_connector_atom_dig *radeon_dig_connector = + radeon_connector->con_priv; + atombios_set_edp_panel_power(connector, + ATOM_TRANSMITTER_ACTION_POWER_OFF); + radeon_dig_connector->edp_on = false; + } } + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); break; } } else { switch (mode) { case DRM_MODE_DPMS_ON: args.ucAction = ATOM_ENABLE; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + args.ucAction = ATOM_LCD_BLON; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: args.ucAction = ATOM_DISABLE; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + args.ucAction = ATOM_LCD_BLOFF; + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + } break; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } + + if (ext_encoder) { + int action; + + switch (mode) { + case DRM_MODE_DPMS_ON: + default: + action = ATOM_ENABLE; + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + action = ATOM_DISABLE; + break; + } + atombios_external_encoder_setup(encoder, ext_encoder, action); + } + radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); } @@ -1242,7 +1432,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) break; default: DRM_ERROR("Unknown table version: %d, %d\n", frev, crev); - break; + return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); @@ -1357,6 +1547,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; @@ -1400,11 +1591,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: - atombios_ddia_setup(encoder, ATOM_ENABLE); - break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - atombios_external_tmds_setup(encoder, ATOM_ENABLE); + atombios_dvo_setup(encoder, ATOM_ENABLE); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: @@ -1419,6 +1608,11 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, } break; } + + if (ext_encoder) { + atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); + } + atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { @@ -1520,6 +1714,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); if (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) { @@ -1531,6 +1726,13 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) radeon_atom_output_lock(encoder, true); radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + /* select the clock/data port if it uses a router */ + if (connector) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + if (radeon_connector->router.cd_valid) + radeon_router_select_cd_port(radeon_connector); + } + /* this is needed for the pll/ss setup to work correctly in some cases */ atombios_set_encoder_crtc_source(encoder); } @@ -1547,6 +1749,23 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig; + + /* check for pre-DCE3 cards with shared encoders; + * can't really use the links individually, so don't disable + * the encoder if it's in use by another connector + */ + if (!ASIC_IS_DCE3(rdev)) { + struct drm_encoder *other_encoder; + struct radeon_encoder *other_radeon_encoder; + + list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) { + other_radeon_encoder = to_radeon_encoder(other_encoder); + if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) && + drm_helper_encoder_in_use(other_encoder)) + goto disable_done; + } + } + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); switch (radeon_encoder->encoder_id) { @@ -1570,11 +1789,9 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: - atombios_ddia_setup(encoder, ATOM_DISABLE); - break; case ENCODER_OBJECT_ID_INTERNAL_DVO1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: - atombios_external_tmds_setup(encoder, ATOM_DISABLE); + atombios_dvo_setup(encoder, ATOM_DISABLE); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: @@ -1586,6 +1803,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) break; } +disable_done: if (radeon_encoder_is_digital(encoder)) { if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) r600_hdmi_disable(encoder); @@ -1595,6 +1813,53 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) radeon_encoder->active_device = 0; } +/* these are handled by the primary encoders */ +static void radeon_atom_ext_prepare(struct drm_encoder *encoder) +{ + +} + +static void radeon_atom_ext_commit(struct drm_encoder *encoder) +{ + +} + +static void +radeon_atom_ext_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + +} + +static void radeon_atom_ext_disable(struct drm_encoder *encoder) +{ + +} + +static void +radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode) +{ + +} + +static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { + .dpms = radeon_atom_ext_dpms, + .mode_fixup = radeon_atom_ext_mode_fixup, + .prepare = radeon_atom_ext_prepare, + .mode_set = radeon_atom_ext_mode_set, + .commit = radeon_atom_ext_commit, + .disable = radeon_atom_ext_disable, + /* no detect for TMDS/LVDS yet */ +}; + static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { .dpms = radeon_atom_encoder_dpms, .mode_fixup = radeon_atom_mode_fixup, @@ -1704,6 +1969,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; radeon_encoder->underscan_type = UNDERSCAN_OFF; + radeon_encoder->is_ext_encoder = false; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: @@ -1745,6 +2011,9 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t radeon_encoder->rmx_type = RMX_FULL; drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); + } else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); } else { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); @@ -1753,5 +2022,22 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; + case ENCODER_OBJECT_ID_SI170B: + case ENCODER_OBJECT_ID_CH7303: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: + case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: + case ENCODER_OBJECT_ID_TITFP513: + case ENCODER_OBJECT_ID_VT1623: + case ENCODER_OBJECT_ID_HDMI_SI1930: + /* these are handled by the primary encoders */ + radeon_encoder->is_ext_encoder = true; + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); + else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC); + else + drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); + drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs); + break; } } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 216392d0353..daacb281dfa 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -240,7 +240,8 @@ retry: */ if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { /* good news we believe it's a lockup */ - WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq); + WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", + fence->seq, seq); /* FIXME: what should we do ? marking everyone * as signaled for now */ diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index e65b90317fa..65016117d95 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -79,8 +79,8 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) if (rdev->gart.table.vram.robj == NULL) { r = radeon_bo_create(rdev, NULL, rdev->gart.table_size, - true, RADEON_GEM_DOMAIN_VRAM, - &rdev->gart.table.vram.robj); + PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, + &rdev->gart.table.vram.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index d1e595d9172..df95eb83dac 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -67,7 +67,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, if (alignment < PAGE_SIZE) { alignment = PAGE_SIZE; } - r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj); + r = radeon_bo_create(rdev, gobj, size, alignment, kernel, initial_domain, &robj); if (r) { if (r != -ERESTARTSYS) DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index 6a13ee38a5b..ded2a45bc95 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -53,8 +53,8 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) }; /* on hw with routers, select right port */ - if (radeon_connector->router.valid) - radeon_router_select_port(radeon_connector); + if (radeon_connector->router.ddc_valid) + radeon_router_select_ddc_port(radeon_connector); ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2); if (ret == 2) @@ -896,7 +896,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, ((rdev->family <= CHIP_RS480) || ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) { /* set the radeon hw i2c adapter */ - sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name); + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "Radeon i2c hw bus %s", name); i2c->adapter.algo = &radeon_i2c_algo; ret = i2c_add_adapter(&i2c->adapter); if (ret) { @@ -905,7 +906,8 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, } } else { /* set the radeon bit adapter */ - sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name); + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "Radeon i2c bit bus %s", name); i2c->adapter.algo_data = &i2c->algo.bit; i2c->algo.bit.pre_xfer = pre_xfer; i2c->algo.bit.post_xfer = post_xfer; @@ -946,6 +948,8 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->dev = dev; + snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), + "Radeon aux bus %s", name); i2c_set_adapdata(&i2c->adapter, i2c); i2c->adapter.algo_data = &i2c->algo.dp; i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch; @@ -1084,26 +1088,51 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, addr, val); } -/* router switching */ -void radeon_router_select_port(struct radeon_connector *radeon_connector) +/* ddc router switching */ +void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector) { u8 val; - if (!radeon_connector->router.valid) + if (!radeon_connector->router.ddc_valid) return; radeon_i2c_get_byte(radeon_connector->router_bus, radeon_connector->router.i2c_addr, 0x3, &val); - val &= radeon_connector->router.mux_control_pin; + val &= ~radeon_connector->router.ddc_mux_control_pin; radeon_i2c_put_byte(radeon_connector->router_bus, radeon_connector->router.i2c_addr, 0x3, val); radeon_i2c_get_byte(radeon_connector->router_bus, radeon_connector->router.i2c_addr, 0x1, &val); - val &= radeon_connector->router.mux_control_pin; - val |= radeon_connector->router.mux_state; + val &= ~radeon_connector->router.ddc_mux_control_pin; + val |= radeon_connector->router.ddc_mux_state; + radeon_i2c_put_byte(radeon_connector->router_bus, + radeon_connector->router.i2c_addr, + 0x1, val); +} + +/* clock/data router switching */ +void radeon_router_select_cd_port(struct radeon_connector *radeon_connector) +{ + u8 val; + + if (!radeon_connector->router.cd_valid) + return; + + radeon_i2c_get_byte(radeon_connector->router_bus, + radeon_connector->router.i2c_addr, + 0x3, &val); + val &= ~radeon_connector->router.cd_mux_control_pin; + radeon_i2c_put_byte(radeon_connector->router_bus, + radeon_connector->router.i2c_addr, + 0x3, val); + radeon_i2c_get_byte(radeon_connector->router_bus, + radeon_connector->router.i2c_addr, + 0x1, &val); + val &= ~radeon_connector->router.cd_mux_control_pin; + val |= radeon_connector->router.cd_mux_state; radeon_i2c_put_byte(radeon_connector->router_bus, radeon_connector->router.i2c_addr, 0x1, val); diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 2f349a30019..465746bd51b 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c @@ -76,7 +76,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) default: DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", crtc); - return EINVAL; + return -EINVAL; } } else { switch (crtc) { @@ -89,7 +89,7 @@ int radeon_enable_vblank(struct drm_device *dev, int crtc) default: DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", crtc); - return EINVAL; + return -EINVAL; } } diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 0b8397000f4..59f834ba283 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -670,7 +670,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, if (rdev->is_atom_bios) { radeon_encoder->pixel_clock = adjusted_mode->clock; - atombios_external_tmds_setup(encoder, ATOM_ENABLE); + atombios_dvo_setup(encoder, ATOM_ENABLE); fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); } else { fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 92457163d07..e301c6f9e05 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -375,6 +375,7 @@ struct radeon_encoder { int hdmi_config_offset; int hdmi_audio_workaround; int hdmi_buffer_status; + bool is_ext_encoder; }; struct radeon_connector_atom_dig { @@ -385,6 +386,7 @@ struct radeon_connector_atom_dig { u8 dp_sink_type; int dp_clock; int dp_lane_count; + bool edp_on; }; struct radeon_gpio_rec { @@ -401,13 +403,19 @@ struct radeon_hpd { }; struct radeon_router { - bool valid; u32 router_id; struct radeon_i2c_bus_rec i2c_info; u8 i2c_addr; - u8 mux_type; - u8 mux_control_pin; - u8 mux_state; + /* i2c mux */ + bool ddc_valid; + u8 ddc_mux_type; + u8 ddc_mux_control_pin; + u8 ddc_mux_state; + /* clock/data mux */ + bool cd_valid; + u8 cd_mux_type; + u8 cd_mux_control_pin; + u8 cd_mux_state; }; struct radeon_connector { @@ -488,7 +496,8 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, u8 slave_addr, u8 addr, u8 val); -extern void radeon_router_select_port(struct radeon_connector *radeon_connector); +extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector); +extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector); extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); @@ -516,9 +525,10 @@ struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); -extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); +extern void atombios_dvo_setup(struct drm_encoder *encoder, int action); extern void atombios_digital_setup(struct drm_encoder *encoder, int action); extern int atombios_get_encoder_mode(struct drm_encoder *encoder); +extern void atombios_set_edp_panel_power(struct drm_connector *connector, int action); extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); extern void radeon_crtc_load_lut(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d7ab9141641..a598d0049aa 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -69,7 +69,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) u32 c = 0; rbo->placement.fpfn = 0; - rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; + rbo->placement.lpfn = 0; rbo->placement.placement = rbo->placements; rbo->placement.busy_placement = rbo->placements; if (domain & RADEON_GEM_DOMAIN_VRAM) @@ -86,11 +86,13 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) } int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, - unsigned long size, bool kernel, u32 domain, - struct radeon_bo **bo_ptr) + unsigned long size, int byte_align, bool kernel, u32 domain, + struct radeon_bo **bo_ptr) { struct radeon_bo *bo; enum ttm_bo_type type; + unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; + unsigned long max_size = 0; int r; if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { @@ -102,6 +104,16 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, type = ttm_bo_type_device; } *bo_ptr = NULL; + + /* maximun bo size is the minimun btw visible vram and gtt size */ + max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); + if ((page_align << PAGE_SHIFT) >= max_size) { + printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", + __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); + return -ENOMEM; + } + +retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -109,13 +121,11 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, bo->gobj = gobj; bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); - -retry: radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ mutex_lock(&rdev->vram_mutex); r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, - &bo->placement, 0, 0, !kernel, NULL, size, + &bo->placement, page_align, 0, !kernel, NULL, size, &radeon_ttm_bo_destroy); mutex_unlock(&rdev->vram_mutex); if (unlikely(r != 0)) { diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 3481bc7f6f5..d143702b244 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -137,9 +137,10 @@ static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, } extern int radeon_bo_create(struct radeon_device *rdev, - struct drm_gem_object *gobj, unsigned long size, - bool kernel, u32 domain, - struct radeon_bo **bo_ptr); + struct drm_gem_object *gobj, unsigned long size, + int byte_align, + bool kernel, u32 domain, + struct radeon_bo **bo_ptr); extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); extern void radeon_bo_kunmap(struct radeon_bo *bo); extern void radeon_bo_unref(struct radeon_bo **bo); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 6ea798ce821..06e79822a2b 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -176,8 +176,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev) INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); /* Allocate 1M object buffer */ r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, - true, RADEON_GEM_DOMAIN_GTT, - &rdev->ib_pool.robj); + PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, + &rdev->ib_pool.robj); if (r) { DRM_ERROR("radeon: failed to ib pool (%d).\n", r); return r; @@ -332,7 +332,7 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) rdev->cp.ring_size = ring_size; /* Allocate ring buffer */ if (rdev->cp.ring_obj == NULL) { - r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true, + r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, &rdev->cp.ring_obj); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 313c96bc09d..5b44f652145 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -52,7 +52,7 @@ void radeon_test_moves(struct radeon_device *rdev) goto out_cleanup; } - r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); @@ -71,7 +71,7 @@ void radeon_test_moves(struct radeon_device *rdev) void **gtt_start, **gtt_end; void **vram_start, **vram_end; - r = radeon_bo_create(rdev, NULL, size, true, + r = radeon_bo_create(rdev, NULL, size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index fe95bb35317..1272e4b6a1d 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -529,7 +529,7 @@ int radeon_ttm_init(struct radeon_device *rdev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } - r = radeon_bo_create(rdev, NULL, 256 * 1024, true, + r = radeon_bo_create(rdev, NULL, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, &rdev->stollen_vga_memory); if (r) { @@ -689,7 +689,8 @@ static int radeon_ttm_backend_bind(struct ttm_backend *backend, gtt = container_of(backend, struct radeon_ttm_backend, backend); gtt->offset = bo_mem->start << PAGE_SHIFT; if (!gtt->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); + WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + gtt->num_pages, bo_mem, backend); } r = radeon_gart_bind(gtt->rdev, gtt->offset, gtt->num_pages, gtt->pages); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index f683e51a2a0..5512e4e5e63 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -78,7 +78,7 @@ int rs400_gart_init(struct radeon_device *rdev) int r; if (rdev->gart.table.ram.ptr) { - WARN(1, "RS400 GART already initialized.\n"); + WARN(1, "RS400 GART already initialized\n"); return 0; } /* Check gart size */ diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index b091a1f6fa4..f1c6e02c2e6 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -375,7 +375,7 @@ int rs600_gart_init(struct radeon_device *rdev) int r; if (rdev->gart.table.vram.robj) { - WARN(1, "RS600 GART already initialized.\n"); + WARN(1, "RS600 GART already initialized\n"); return 0; } /* Initialize common gart structure */ @@ -505,7 +505,7 @@ int rs600_irq_set(struct radeon_device *rdev) ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1); if (!rdev->irq.installed) { - WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n"); + WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); WREG32(R_000040_GEN_INT_CNTL, 0); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 245374e2b77..4dfead8cee3 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -915,8 +915,8 @@ static int rv770_vram_scratch_init(struct radeon_device *rdev) if (rdev->vram_scratch.robj == NULL) { r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, - true, RADEON_GEM_DOMAIN_VRAM, - &rdev->vram_scratch.robj); + PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, + &rdev->vram_scratch.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a1cb783c713..148a322d8f5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -27,14 +27,6 @@ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -/* Notes: - * - * We store bo pointer in drm_mm_node struct so we know which bo own a - * specific node. There is no protection on the pointer, thus to make - * sure things don't go berserk you have to access this pointer while - * holding the global lru lock and make sure anytime you free a node you - * reset the pointer to NULL. - */ #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" @@ -45,6 +37,7 @@ #include <linux/mm.h> #include <linux/file.h> #include <linux/module.h> +#include <asm/atomic.h> #define TTM_ASSERT_LOCKED(param) #define TTM_DEBUG(fmt, arg...) @@ -231,6 +224,9 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, int ret; while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { + /** + * Deadlock avoidance for multi-bo reserving. + */ if (use_sequence && bo->seq_valid && (sequence - bo->val_seq < (1 << 31))) { return -EAGAIN; @@ -248,6 +244,14 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, } if (use_sequence) { + /** + * Wake up waiters that may need to recheck for deadlock, + * if we decreased the sequence number. + */ + if (unlikely((bo->val_seq - sequence < (1 << 31)) + || !bo->seq_valid)) + wake_up_all(&bo->event_queue); + bo->val_seq = sequence; bo->seq_valid = true; } else { @@ -452,6 +456,11 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) ttm_bo_mem_put(bo, &bo->mem); atomic_set(&bo->reserved, 0); + + /* + * Make processes trying to reserve really pick it up. + */ + smp_mb__after_atomic_dec(); wake_up_all(&bo->event_queue); } @@ -460,7 +469,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_global *glob = bo->glob; struct ttm_bo_driver *driver; - void *sync_obj; + void *sync_obj = NULL; void *sync_obj_arg; int put_count; int ret; @@ -495,17 +504,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) spin_lock(&glob->lru_lock); } queue: - sync_obj = bo->sync_obj; - sync_obj_arg = bo->sync_obj_arg; driver = bdev->driver; + if (bo->sync_obj) + sync_obj = driver->sync_obj_ref(bo->sync_obj); + sync_obj_arg = bo->sync_obj_arg; kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); spin_unlock(&glob->lru_lock); spin_unlock(&bo->lock); - if (sync_obj) + if (sync_obj) { driver->sync_obj_flush(sync_obj, sync_obj_arg); + driver->sync_obj_unref(&sync_obj); + } schedule_delayed_work(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); } @@ -822,7 +834,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; int ret; @@ -832,12 +843,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; if (mem->mm_node) break; - spin_lock(&glob->lru_lock); - if (list_empty(&man->lru)) { - spin_unlock(&glob->lru_lock); - break; - } - spin_unlock(&glob->lru_lock); ret = ttm_mem_evict_first(bdev, mem_type, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(ret != 0)) @@ -1125,35 +1130,9 @@ EXPORT_SYMBOL(ttm_bo_validate); int ttm_bo_check_placement(struct ttm_buffer_object *bo, struct ttm_placement *placement) { - int i; + BUG_ON((placement->fpfn || placement->lpfn) && + (bo->mem.num_pages > (placement->lpfn - placement->fpfn))); - if (placement->fpfn || placement->lpfn) { - if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) { - printk(KERN_ERR TTM_PFX "Page number range to small " - "Need %lu pages, range is [%u, %u]\n", - bo->mem.num_pages, placement->fpfn, - placement->lpfn); - return -EINVAL; - } - } - for (i = 0; i < placement->num_placement; i++) { - if (!capable(CAP_SYS_ADMIN)) { - if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) { - printk(KERN_ERR TTM_PFX "Need to be root to " - "modify NO_EVICT status.\n"); - return -EINVAL; - } - } - } - for (i = 0; i < placement->num_busy_placement; i++) { - if (!capable(CAP_SYS_ADMIN)) { - if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) { - printk(KERN_ERR TTM_PFX "Need to be root to " - "modify NO_EVICT status.\n"); - return -EINVAL; - } - } - } return 0; } @@ -1176,6 +1155,10 @@ int ttm_bo_init(struct ttm_bo_device *bdev, num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (num_pages == 0) { printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); + if (destroy) + (*destroy)(bo); + else + kfree(bo); return -EINVAL; } bo->destroy = destroy; @@ -1369,18 +1352,9 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, int ret = -EINVAL; struct ttm_mem_type_manager *man; - if (type >= TTM_NUM_MEM_TYPES) { - printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); - return ret; - } - + BUG_ON(type >= TTM_NUM_MEM_TYPES); man = &bdev->man[type]; - if (man->has_type) { - printk(KERN_ERR TTM_PFX - "Memory manager already initialized for type %d\n", - type); - return ret; - } + BUG_ON(man->has_type); ret = bdev->driver->init_mem_type(bdev, type, man); if (ret) @@ -1389,13 +1363,6 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, ret = 0; if (type != TTM_PL_SYSTEM) { - if (!p_size) { - printk(KERN_ERR TTM_PFX - "Zero size memory manager type %d\n", - type); - return ret; - } - ret = (*man->func->init)(man, p_size); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index 7410c190c89..038e947d00f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -1,6 +1,6 @@ /************************************************************************** * - * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA + * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,20 +31,29 @@ #include "ttm/ttm_module.h" #include "ttm/ttm_bo_driver.h" #include "ttm/ttm_placement.h" -#include <linux/jiffies.h> +#include "drm_mm.h" #include <linux/slab.h> -#include <linux/sched.h> -#include <linux/mm.h> -#include <linux/file.h> +#include <linux/spinlock.h> #include <linux/module.h> +/** + * Currently we use a spinlock for the lock, but a mutex *may* be + * more appropriate to reduce scheduling latency if the range manager + * ends up with very fragmented allocation patterns. + */ + +struct ttm_range_manager { + struct drm_mm mm; + spinlock_t lock; +}; + static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { - struct ttm_bo_global *glob = man->bdev->glob; - struct drm_mm *mm = man->priv; + struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; + struct drm_mm *mm = &rman->mm; struct drm_mm_node *node = NULL; unsigned long lpfn; int ret; @@ -57,19 +66,19 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, if (unlikely(ret)) return ret; - spin_lock(&glob->lru_lock); + spin_lock(&rman->lock); node = drm_mm_search_free_in_range(mm, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, 1); if (unlikely(node == NULL)) { - spin_unlock(&glob->lru_lock); + spin_unlock(&rman->lock); return 0; } node = drm_mm_get_block_atomic_range(node, mem->num_pages, - mem->page_alignment, - placement->fpfn, - lpfn); - spin_unlock(&glob->lru_lock); + mem->page_alignment, + placement->fpfn, + lpfn); + spin_unlock(&rman->lock); } while (node == NULL); mem->mm_node = node; @@ -80,12 +89,12 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) { - struct ttm_bo_global *glob = man->bdev->glob; + struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; if (mem->mm_node) { - spin_lock(&glob->lru_lock); + spin_lock(&rman->lock); drm_mm_put_block(mem->mm_node); - spin_unlock(&glob->lru_lock); + spin_unlock(&rman->lock); mem->mm_node = NULL; } } @@ -93,49 +102,49 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, static int ttm_bo_man_init(struct ttm_mem_type_manager *man, unsigned long p_size) { - struct drm_mm *mm; + struct ttm_range_manager *rman; int ret; - mm = kzalloc(sizeof(*mm), GFP_KERNEL); - if (!mm) + rman = kzalloc(sizeof(*rman), GFP_KERNEL); + if (!rman) return -ENOMEM; - ret = drm_mm_init(mm, 0, p_size); + ret = drm_mm_init(&rman->mm, 0, p_size); if (ret) { - kfree(mm); + kfree(rman); return ret; } - man->priv = mm; + spin_lock_init(&rman->lock); + man->priv = rman; return 0; } static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) { - struct ttm_bo_global *glob = man->bdev->glob; - struct drm_mm *mm = man->priv; - int ret = 0; + struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; + struct drm_mm *mm = &rman->mm; - spin_lock(&glob->lru_lock); + spin_lock(&rman->lock); if (drm_mm_clean(mm)) { drm_mm_takedown(mm); - kfree(mm); + spin_unlock(&rman->lock); + kfree(rman); man->priv = NULL; - } else - ret = -EBUSY; - spin_unlock(&glob->lru_lock); - return ret; + return 0; + } + spin_unlock(&rman->lock); + return -EBUSY; } static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, const char *prefix) { - struct ttm_bo_global *glob = man->bdev->glob; - struct drm_mm *mm = man->priv; + struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; - spin_lock(&glob->lru_lock); - drm_mm_debug_table(mm, prefix); - spin_unlock(&glob->lru_lock); + spin_lock(&rman->lock); + drm_mm_debug_table(&rman->mm, prefix); + spin_unlock(&rman->lock); } const struct ttm_mem_type_manager_func ttm_bo_manager_func = { diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a7bab87a548..af789dc869b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -440,10 +440,8 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) return ret; ret = be->func->bind(be, bo_mem); - if (ret) { - printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n"); + if (unlikely(ret != 0)) return ret; - } ttm->state = tt_bound; diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 9b5b4d9dd62..3e038a394c5 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -235,9 +235,9 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - first_pfn + 1; - if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) + vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages); + if (NULL == vsg->pages) return -ENOMEM; - memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); down_read(¤t->mm->mmap_sem); ret = get_user_pages(current, current->mm, (unsigned long)xfer->mem_addr, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 51d9f9f1d7f..76954e3528c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -691,6 +691,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, fence_rep.error = ret; fence_rep.fence_seq = (uint64_t) sequence; + fence_rep.pad64 = 0; user_fence_rep = (struct drm_vmw_fence_rep __user *) (unsigned long)arg->fence_rep; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 87c6e6156d7..cceeb42789b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -720,6 +720,8 @@ static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb) &vmw_vram_ne_placement, false, &vmw_dmabuf_bo_free); vmw_overlay_resume_all(dev_priv); + if (unlikely(ret != 0)) + vfbs->buffer = NULL; return ret; } @@ -730,6 +732,9 @@ static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb) struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(&vfb->base); + if (unlikely(vfbs->buffer == NULL)) + return 0; + bo = &vfbs->buffer->base; ttm_bo_unref(&bo); vfbs->buffer = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index a01c47ddb5b..29113c9b26a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -557,7 +557,7 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) return -EINVAL; } - dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv)); + dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL); if (!dev_priv->ldu_priv) return -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index df2036ed18d..f1a52f9e729 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -585,7 +585,7 @@ int vmw_overlay_init(struct vmw_private *dev_priv) return -ENOSYS; } - overlay = kmalloc(GFP_KERNEL, sizeof(*overlay)); + overlay = kmalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 36e129f0023..5408b1b7996 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -862,7 +862,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, &vmw_vram_sys_placement, true, &vmw_user_dmabuf_destroy); if (unlikely(ret != 0)) - return ret; + goto out_no_dmabuf; tmp = ttm_bo_reference(&vmw_user_bo->dma.base); ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, @@ -870,19 +870,21 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, false, ttm_buffer_type, &vmw_user_dmabuf_release, NULL); - if (unlikely(ret != 0)) { - ttm_bo_unref(&tmp); - } else { + if (unlikely(ret != 0)) + goto out_no_base_object; + else { rep->handle = vmw_user_bo->base.hash.key; rep->map_handle = vmw_user_bo->dma.base.addr_space_offset; rep->cur_gmr_id = vmw_user_bo->base.hash.key; rep->cur_gmr_offset = 0; } - ttm_bo_unref(&tmp); +out_no_base_object: + ttm_bo_unref(&tmp); +out_no_dmabuf: ttm_read_unlock(&vmaster->lock); - return 0; + return ret; } int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig index 742c423567c..0e1edd7311f 100644 --- a/drivers/gpu/stub/Kconfig +++ b/drivers/gpu/stub/Kconfig @@ -3,6 +3,9 @@ config STUB_POULSBO depends on PCI # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled # but for select to work, need to select ACPI_VIDEO's dependencies, ick + select VIDEO_OUTPUT_CONTROL if ACPI + select BACKLIGHT_CLASS_DEVICE if ACPI + select INPUT if ACPI select ACPI_VIDEO if ACPI help Choose this option if you have a system that has Intel GMA500 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 515345b11ac..88cb04e7962 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1386,6 +1386,7 @@ static const struct hid_device_id hid_blacklist[] = { { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c index 54b017ad258..5a1b52e0eb8 100644 --- a/drivers/hid/hid-egalax.c +++ b/drivers/hid/hid-egalax.c @@ -221,7 +221,7 @@ static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id) struct egalax_data *td; struct hid_report *report; - td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL); + td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL); if (!td) { dev_err(&hdev->dev, "cannot allocate eGalax data\n"); return -ENOMEM; diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index bb0b3659437..d8d372bae3c 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -174,7 +174,7 @@ static int hidinput_setkeycode(struct input_dev *dev, clear_bit(*old_keycode, dev->keybit); set_bit(usage->code, dev->keybit); - dbg_hid(KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", + dbg_hid("Assigned keycode %d to HID usage code %x\n", usage->code, usage->hid); /* @@ -203,8 +203,8 @@ static int hidinput_setkeycode(struct input_dev *dev, * * as seen in the HID specification v1.11 6.2.2.7 Global Items. * - * Only exponent 1 length units are processed. Centimeters are converted to - * inches. Degrees are converted to radians. + * Only exponent 1 length units are processed. Centimeters and inches are + * converted to millimeters. Degrees are converted to radians. */ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code) { @@ -225,13 +225,16 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code) */ if (code == ABS_X || code == ABS_Y || code == ABS_Z) { if (field->unit == 0x11) { /* If centimeters */ - /* Convert to inches */ - prev = logical_extents; - logical_extents *= 254; - if (logical_extents < prev) + /* Convert to millimeters */ + unit_exponent += 1; + } else if (field->unit == 0x13) { /* If inches */ + /* Convert to millimeters */ + prev = physical_extents; + physical_extents *= 254; + if (physical_extents < prev) return 0; - unit_exponent += 2; - } else if (field->unit != 0x13) { /* If not inches */ + unit_exponent -= 1; + } else { return 0; } } else if (code == ABS_RX || code == ABS_RY || code == ABS_RZ) { diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c index 15434c81479..25be4e1461b 100644 --- a/drivers/hid/hid-tmff.c +++ b/drivers/hid/hid-tmff.c @@ -256,6 +256,8 @@ static const struct hid_device_id tm_devices[] = { .driver_data = (unsigned long)ff_joystick }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */ .driver_data = (unsigned long)ff_joystick }, + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a), /* F430 Force Feedback Wheel */ + .driver_data = (unsigned long)ff_joystick }, { } }; MODULE_DEVICE_TABLE(hid, tm_devices); diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 8a4b32dca9f..e1f07483691 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -32,7 +32,6 @@ #include <linux/hid.h> #include <linux/mutex.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/hidraw.h> diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index fedd88df9a1..984feb351a5 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -29,7 +29,6 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <linux/input.h> #include <linux/usb.h> #include <linux/hid.h> diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c index 1e4c21fc1a8..86d822aa9bb 100644 --- a/drivers/hwmon/ad7414.c +++ b/drivers/hwmon/ad7414.c @@ -178,11 +178,13 @@ static int ad7414_probe(struct i2c_client *client, { struct ad7414_data *data; int conf; - int err = 0; + int err; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | - I2C_FUNC_SMBUS_READ_WORD_DATA)) + I2C_FUNC_SMBUS_READ_WORD_DATA)) { + err = -EOPNOTSUPP; goto exit; + } data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL); if (!data) { diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c index 4bf969c0a32..be0fdd58aa2 100644 --- a/drivers/hwmon/adm1026.c +++ b/drivers/hwmon/adm1026.c @@ -916,27 +916,27 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr, int nr = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct adm1026_data *data = i2c_get_clientdata(client); - int val, orig_div, new_div, shift; + int val, orig_div, new_div; val = simple_strtol(buf, NULL, 10); new_div = DIV_TO_REG(val); - if (new_div == 0) { - return -EINVAL; - } + mutex_lock(&data->update_lock); orig_div = data->fan_div[nr]; data->fan_div[nr] = DIV_FROM_REG(new_div); if (nr < 4) { /* 0 <= nr < 4 */ - shift = 2 * nr; adm1026_write_value(client, ADM1026_REG_FAN_DIV_0_3, - ((DIV_TO_REG(orig_div) & (~(0x03 << shift))) | - (new_div << shift))); + (DIV_TO_REG(data->fan_div[0]) << 0) | + (DIV_TO_REG(data->fan_div[1]) << 2) | + (DIV_TO_REG(data->fan_div[2]) << 4) | + (DIV_TO_REG(data->fan_div[3]) << 6)); } else { /* 3 < nr < 8 */ - shift = 2 * (nr - 4); adm1026_write_value(client, ADM1026_REG_FAN_DIV_4_7, - ((DIV_TO_REG(orig_div) & (~(0x03 << (2 * shift)))) | - (new_div << shift))); + (DIV_TO_REG(data->fan_div[4]) << 0) | + (DIV_TO_REG(data->fan_div[5]) << 2) | + (DIV_TO_REG(data->fan_div[6]) << 4) | + (DIV_TO_REG(data->fan_div[7]) << 6)); } if (data->fan_div[nr] != orig_div) { diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 9e775717abb..87d92a56a93 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -1286,8 +1286,10 @@ static int adt7470_probe(struct i2c_client *client, init_completion(&data->auto_update_stop); data->auto_update = kthread_run(adt7470_update_thread, client, dev_name(data->hwmon_dev)); - if (IS_ERR(data->auto_update)) + if (IS_ERR(data->auto_update)) { + err = PTR_ERR(data->auto_update); goto exit_unregister; + } return 0; diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index fa9708c2d72..4033974d1bb 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c @@ -4,7 +4,7 @@ Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si> Based on max6650.c: - Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de> + Copyright (C) 2007 Hans J. Koch <hjk@hansjkoch.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index aa701a18370..f141a1de519 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -376,10 +376,6 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data, } } - err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group); - if (err) - goto err_free_gpio; - fan_data->num_ctrl = num_ctrl; fan_data->ctrl = ctrl; fan_data->num_speed = pdata->num_speed; @@ -391,6 +387,10 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data, goto err_free_gpio; } + err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group); + if (err) + goto err_free_gpio; + return 0; err_free_gpio: diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c index 937983407e2..c4c40be0edb 100644 --- a/drivers/hwmon/i5k_amb.c +++ b/drivers/hwmon/i5k_amb.c @@ -497,12 +497,14 @@ static unsigned long chipset_ids[] = { 0 }; +#ifdef MODULE static struct pci_device_id i5k_amb_ids[] __devinitdata = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, { 0, } }; MODULE_DEVICE_TABLE(pci, i5k_amb_ids); +#endif static int __devinit i5k_amb_probe(struct platform_device *pdev) { diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 14a5d981be7..a428a926419 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -187,6 +187,7 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 }; #define IT87_REG_FAN_MAIN_CTRL 0x13 #define IT87_REG_FAN_CTL 0x14 #define IT87_REG_PWM(nr) (0x15 + (nr)) +#define IT87_REG_PWM_DUTY(nr) (0x63 + (nr) * 8) #define IT87_REG_VIN(nr) (0x20 + (nr)) #define IT87_REG_TEMP(nr) (0x29 + (nr)) @@ -251,12 +252,16 @@ struct it87_data { u8 fan_main_ctrl; /* Register value */ u8 fan_ctl; /* Register value */ - /* The following 3 arrays correspond to the same registers. The - * meaning of bits 6-0 depends on the value of bit 7, and we want - * to preserve settings on mode changes, so we have to track all - * values separately. */ + /* The following 3 arrays correspond to the same registers up to + * the IT8720F. The meaning of bits 6-0 depends on the value of bit + * 7, and we want to preserve settings on mode changes, so we have + * to track all values separately. + * Starting with the IT8721F, the manual PWM duty cycles are stored + * in separate registers (8-bit values), so the separate tracking + * is no longer needed, but it is still done to keep the driver + * simple. */ u8 pwm_ctrl[3]; /* Register value */ - u8 pwm_duty[3]; /* Manual PWM value set by user (bit 6-0) */ + u8 pwm_duty[3]; /* Manual PWM value set by user */ u8 pwm_temp_map[3]; /* PWM to temp. chan. mapping (bits 1-0) */ /* Automatic fan speed control registers */ @@ -832,7 +837,9 @@ static ssize_t set_pwm_enable(struct device *dev, data->fan_main_ctrl); } else { if (val == 1) /* Manual mode */ - data->pwm_ctrl[nr] = data->pwm_duty[nr]; + data->pwm_ctrl[nr] = data->type == it8721 ? + data->pwm_temp_map[nr] : + data->pwm_duty[nr]; else /* Automatic mode */ data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr]; it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]); @@ -858,12 +865,25 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr, return -EINVAL; mutex_lock(&data->update_lock); - data->pwm_duty[nr] = pwm_to_reg(data, val); - /* If we are in manual mode, write the duty cycle immediately; - * otherwise, just store it for later use. */ - if (!(data->pwm_ctrl[nr] & 0x80)) { - data->pwm_ctrl[nr] = data->pwm_duty[nr]; - it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]); + if (data->type == it8721) { + /* If we are in automatic mode, the PWM duty cycle register + * is read-only so we can't write the value */ + if (data->pwm_ctrl[nr] & 0x80) { + mutex_unlock(&data->update_lock); + return -EBUSY; + } + data->pwm_duty[nr] = pwm_to_reg(data, val); + it87_write_value(data, IT87_REG_PWM_DUTY(nr), + data->pwm_duty[nr]); + } else { + data->pwm_duty[nr] = pwm_to_reg(data, val); + /* If we are in manual mode, write the duty cycle immediately; + * otherwise, just store it for later use. */ + if (!(data->pwm_ctrl[nr] & 0x80)) { + data->pwm_ctrl[nr] = data->pwm_duty[nr]; + it87_write_value(data, IT87_REG_PWM(nr), + data->pwm_ctrl[nr]); + } } mutex_unlock(&data->update_lock); return count; @@ -1958,7 +1978,10 @@ static void __devinit it87_init_device(struct platform_device *pdev) * channels to use when later setting to automatic mode later. * Use a 1:1 mapping by default (we are clueless.) * In both cases, the value can (and should) be changed by the user - * prior to switching to a different mode. */ + * prior to switching to a different mode. + * Note that this is no longer needed for the IT8721F and later, as + * these have separate registers for the temperature mapping and the + * manual duty cycle. */ for (i = 0; i < 3; i++) { data->pwm_temp_map[i] = i; data->pwm_duty[i] = 0x7f; /* Full speed */ @@ -2034,10 +2057,16 @@ static void __devinit it87_init_device(struct platform_device *pdev) static void it87_update_pwm_ctrl(struct it87_data *data, int nr) { data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr)); - if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */ + if (data->type == it8721) { data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03; - else /* Manual mode */ - data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f; + data->pwm_duty[nr] = it87_read_value(data, + IT87_REG_PWM_DUTY(nr)); + } else { + if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */ + data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03; + else /* Manual mode */ + data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f; + } if (has_old_autopwm(data)) { int i; diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c index 9f4bae07f71..8853afce85c 100644 --- a/drivers/hwmon/lis3lv02d_i2c.c +++ b/drivers/hwmon/lis3lv02d_i2c.c @@ -186,7 +186,7 @@ static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int lis3lv02d_i2c_suspend(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); @@ -213,12 +213,9 @@ static int lis3lv02d_i2c_resume(struct device *dev) return 0; } -#else -#define lis3lv02d_i2c_suspend NULL -#define lis3lv02d_i2c_resume NULL -#define lis3lv02d_i2c_shutdown NULL -#endif +#endif /* CONFIG_PM_SLEEP */ +#ifdef CONFIG_PM_RUNTIME static int lis3_i2c_runtime_suspend(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); @@ -236,6 +233,7 @@ static int lis3_i2c_runtime_resume(struct device *dev) lis3lv02d_poweron(lis3); return 0; } +#endif /* CONFIG_PM_RUNTIME */ static const struct i2c_device_id lis3lv02d_id[] = { {"lis3lv02d", 0 }, diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c index 6669255aadc..c9ed14eba5a 100644 --- a/drivers/hwmon/lm93.c +++ b/drivers/hwmon/lm93.c @@ -20,7 +20,7 @@ Adapted to 2.6.20 by Carsten Emde <cbe@osadl.org> Copyright (c) 2006 Carsten Emde, Open Source Automation Development Lab - Modified for mainline integration by Hans J. Koch <hjk@linutronix.de> + Modified for mainline integration by Hans J. Koch <hjk@hansjkoch.de> Copyright (c) 2007 Hans J. Koch, Linutronix GmbH This program is free software; you can redistribute it and/or modify @@ -2629,7 +2629,7 @@ static void __exit lm93_exit(void) } MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>, " - "Hans J. Koch <hjk@linutronix.de"); + "Hans J. Koch <hjk@hansjkoch.de>"); MODULE_DESCRIPTION("LM93 driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c index 464340f2549..4546d82f024 100644 --- a/drivers/hwmon/lm95241.c +++ b/drivers/hwmon/lm95241.c @@ -128,9 +128,12 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr, { struct i2c_client *client = to_i2c_client(dev); struct lm95241_data *data = i2c_get_clientdata(client); + unsigned long val; - strict_strtol(buf, 10, &data->interval); - data->interval = data->interval * HZ / 1000; + if (strict_strtoul(buf, 10, &val) < 0) + return -EINVAL; + + data->interval = val * HZ / 1000; return count; } @@ -188,7 +191,9 @@ static ssize_t set_type##flag(struct device *dev, \ struct lm95241_data *data = i2c_get_clientdata(client); \ \ long val; \ - strict_strtol(buf, 10, &val); \ +\ + if (strict_strtol(buf, 10, &val) < 0) \ + return -EINVAL; \ \ if ((val == 1) || (val == 2)) { \ \ @@ -227,7 +232,9 @@ static ssize_t set_min##flag(struct device *dev, \ struct lm95241_data *data = i2c_get_clientdata(client); \ \ long val; \ - strict_strtol(buf, 10, &val); \ +\ + if (strict_strtol(buf, 10, &val) < 0) \ + return -EINVAL;\ \ mutex_lock(&data->update_lock); \ \ @@ -256,7 +263,9 @@ static ssize_t set_max##flag(struct device *dev, \ struct lm95241_data *data = i2c_get_clientdata(client); \ \ long val; \ - strict_strtol(buf, 10, &val); \ +\ + if (strict_strtol(buf, 10, &val) < 0) \ + return -EINVAL; \ \ mutex_lock(&data->update_lock); \ \ diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c index 00d975eb5b8..c7e6d8e8165 100644 --- a/drivers/hwmon/ltc4215.c +++ b/drivers/hwmon/ltc4215.c @@ -205,7 +205,6 @@ LTC4215_ALARM(curr1_max_alarm, (1 << 2), LTC4215_STATUS); /* Power (virtual) */ LTC4215_POWER(power1_input); -LTC4215_ALARM(power1_alarm, (1 << 3), LTC4215_STATUS); /* Input Voltage */ LTC4215_VOLTAGE(in1_input, LTC4215_ADIN); @@ -214,6 +213,7 @@ LTC4215_ALARM(in1_min_alarm, (1 << 1), LTC4215_STATUS); /* Output Voltage */ LTC4215_VOLTAGE(in2_input, LTC4215_SOURCE); +LTC4215_ALARM(in2_min_alarm, (1 << 3), LTC4215_STATUS); /* Finally, construct an array of pointers to members of the above objects, * as required for sysfs_create_group() @@ -223,13 +223,13 @@ static struct attribute *ltc4215_attributes[] = { &sensor_dev_attr_curr1_max_alarm.dev_attr.attr, &sensor_dev_attr_power1_input.dev_attr.attr, - &sensor_dev_attr_power1_alarm.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_max_alarm.dev_attr.attr, &sensor_dev_attr_in1_min_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in2_min_alarm.dev_attr.attr, NULL, }; diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c index 26762617867..4b50601027d 100644 --- a/drivers/hwmon/ltc4261.c +++ b/drivers/hwmon/ltc4261.c @@ -82,7 +82,7 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev) val = i2c_smbus_read_byte_data(client, i); if (unlikely(val < 0)) { dev_dbg(dev, - "Failed to read ADC value: error %d", + "Failed to read ADC value: error %d\n", val); ret = ERR_PTR(val); goto abort; @@ -230,8 +230,7 @@ static int ltc4261_probe(struct i2c_client *client, return -ENODEV; if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) { - dev_err(&client->dev, "Failed to read register %d:%02x:%02x\n", - adapter->id, client->addr, LTC4261_STATUS); + dev_err(&client->dev, "Failed to read status register\n"); return -ENODEV; } diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index a0160ee5cae..9a11532ecae 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -2,7 +2,7 @@ * max6650.c - Part of lm_sensors, Linux kernel modules for hardware * monitoring. * - * (C) 2007 by Hans J. Koch <hjk@linutronix.de> + * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de> * * based on code written by John Morris <john.morris@spirentcom.com> * Copyright (c) 2003 Spirent Communications diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c index 1d840aa8378..cdbc7448491 100644 --- a/drivers/hwmon/w83795.c +++ b/drivers/hwmon/w83795.c @@ -165,10 +165,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = { #define W83795_REG_VID_CTRL 0x6A +#define W83795_REG_ALARM_CTRL 0x40 +#define ALARM_CTRL_RTSACS (1 << 7) #define W83795_REG_ALARM(index) (0x41 + (index)) +#define W83795_REG_CLR_CHASSIS 0x4D #define W83795_REG_BEEP(index) (0x50 + (index)) -#define W83795_REG_CLR_CHASSIS 0x4D +#define W83795_REG_OVT_CFG 0x58 +#define OVT_CFG_SEL (1 << 7) #define W83795_REG_FCMS1 0x201 @@ -178,6 +182,14 @@ static const u8 IN_LSB_SHIFT_IDX[][2] = { #define W83795_REG_TSS(index) (0x209 + (index)) +#define TSS_MAP_RESERVED 0xff +static const u8 tss_map[4][6] = { + { 0, 1, 2, 3, 4, 5}, + { 6, 7, 8, 9, 0, 1}, + {10, 11, 12, 13, 2, 3}, + { 4, 5, 4, 5, TSS_MAP_RESERVED, TSS_MAP_RESERVED}, +}; + #define PWM_OUTPUT 0 #define PWM_FREQ 1 #define PWM_START 2 @@ -369,6 +381,7 @@ struct w83795_data { u8 setup_pwm[3]; /* Register value */ u8 alarms[6]; /* Register value */ + u8 enable_beep; u8 beeps[6]; /* Register value */ char valid; @@ -499,8 +512,11 @@ static void w83795_update_limits(struct i2c_client *client) } /* Read beep settings */ - for (i = 0; i < ARRAY_SIZE(data->beeps); i++) - data->beeps[i] = w83795_read(client, W83795_REG_BEEP(i)); + if (data->enable_beep) { + for (i = 0; i < ARRAY_SIZE(data->beeps); i++) + data->beeps[i] = + w83795_read(client, W83795_REG_BEEP(i)); + } data->valid_limits = 1; } @@ -577,6 +593,7 @@ static struct w83795_data *w83795_update_device(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); u16 tmp; + u8 intrusion; int i; mutex_lock(&data->update_lock); @@ -648,9 +665,24 @@ static struct w83795_data *w83795_update_device(struct device *dev) w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT)); } - /* update alarm */ + /* Update intrusion and alarms + * It is important to read intrusion first, because reading from + * register SMI STS6 clears the interrupt status temporarily. */ + tmp = w83795_read(client, W83795_REG_ALARM_CTRL); + /* Switch to interrupt status for intrusion if needed */ + if (tmp & ALARM_CTRL_RTSACS) + w83795_write(client, W83795_REG_ALARM_CTRL, + tmp & ~ALARM_CTRL_RTSACS); + intrusion = w83795_read(client, W83795_REG_ALARM(5)) & (1 << 6); + /* Switch to real-time alarms */ + w83795_write(client, W83795_REG_ALARM_CTRL, tmp | ALARM_CTRL_RTSACS); for (i = 0; i < ARRAY_SIZE(data->alarms); i++) data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i)); + data->alarms[5] |= intrusion; + /* Restore original configuration if needed */ + if (!(tmp & ALARM_CTRL_RTSACS)) + w83795_write(client, W83795_REG_ALARM_CTRL, + tmp & ~ALARM_CTRL_RTSACS); data->last_updated = jiffies; data->valid = 1; @@ -730,6 +762,10 @@ store_chassis_clear(struct device *dev, val = w83795_read(client, W83795_REG_CLR_CHASSIS); val |= 0x80; w83795_write(client, W83795_REG_CLR_CHASSIS, val); + + /* Clear status and force cache refresh */ + w83795_read(client, W83795_REG_ALARM(5)); + data->valid = 0; mutex_unlock(&data->update_lock); return count; } @@ -857,20 +893,20 @@ show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) int index = sensor_attr->index; u8 tmp; - if (1 == (data->pwm_fcms[0] & (1 << index))) { + /* Speed cruise mode */ + if (data->pwm_fcms[0] & (1 << index)) { tmp = 2; goto out; } + /* Thermal cruise or SmartFan IV mode */ for (tmp = 0; tmp < 6; tmp++) { if (data->pwm_tfmr[tmp] & (1 << index)) { tmp = 3; goto out; } } - if (data->pwm_fomc & (1 << index)) - tmp = 0; - else - tmp = 1; + /* Manual mode */ + tmp = 1; out: return sprintf(buf, "%u\n", tmp); @@ -890,23 +926,21 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, if (strict_strtoul(buf, 10, &val) < 0) return -EINVAL; - if (val > 2) + if (val < 1 || val > 2) return -EINVAL; mutex_lock(&data->update_lock); switch (val) { - case 0: case 1: + /* Clear speed cruise mode bits */ data->pwm_fcms[0] &= ~(1 << index); w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); + /* Clear thermal cruise mode bits */ for (i = 0; i < 6; i++) { data->pwm_tfmr[i] &= ~(1 << index); w83795_write(client, W83795_REG_TFMR(i), data->pwm_tfmr[i]); } - data->pwm_fomc |= 1 << index; - data->pwm_fomc ^= val << index; - w83795_write(client, W83795_REG_FOMC, data->pwm_fomc); break; case 2: data->pwm_fcms[0] |= (1 << index); @@ -918,23 +952,60 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr, } static ssize_t +show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct w83795_data *data = w83795_update_pwm_config(dev); + int index = to_sensor_dev_attr_2(attr)->index; + unsigned int mode; + + if (data->pwm_fomc & (1 << index)) + mode = 0; /* DC */ + else + mode = 1; /* PWM */ + + return sprintf(buf, "%u\n", mode); +} + +/* + * Check whether a given temperature source can ever be useful. + * Returns the number of selectable temperature channels which are + * enabled. + */ +static int w83795_tss_useful(const struct w83795_data *data, int tsrc) +{ + int useful = 0, i; + + for (i = 0; i < 4; i++) { + if (tss_map[i][tsrc] == TSS_MAP_RESERVED) + continue; + if (tss_map[i][tsrc] < 6) /* Analog */ + useful += (data->has_temp >> tss_map[i][tsrc]) & 1; + else /* Digital */ + useful += (data->has_dts >> (tss_map[i][tsrc] - 6)) & 1; + } + + return useful; +} + +static ssize_t show_temp_src(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); struct w83795_data *data = w83795_update_pwm_config(dev); int index = sensor_attr->index; - u8 val = index / 2; - u8 tmp = data->temp_src[val]; + u8 tmp = data->temp_src[index / 2]; if (index & 1) - val = 4; + tmp >>= 4; /* Pick high nibble */ else - val = 0; - tmp >>= val; - tmp &= 0x0f; + tmp &= 0x0f; /* Pick low nibble */ - return sprintf(buf, "%u\n", tmp); + /* Look-up the actual temperature channel number */ + if (tmp >= 4 || tss_map[tmp][index] == TSS_MAP_RESERVED) + return -EINVAL; /* Shouldn't happen */ + + return sprintf(buf, "%u\n", (unsigned int)tss_map[tmp][index] + 1); } static ssize_t @@ -946,12 +1017,21 @@ store_temp_src(struct device *dev, struct device_attribute *attr, struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index; - unsigned long tmp; + int tmp; + unsigned long channel; u8 val = index / 2; - if (strict_strtoul(buf, 10, &tmp) < 0) + if (strict_strtoul(buf, 10, &channel) < 0 || + channel < 1 || channel > 14) + return -EINVAL; + + /* Check if request can be fulfilled */ + for (tmp = 0; tmp < 4; tmp++) { + if (tss_map[tmp][index] == channel - 1) + break; + } + if (tmp == 4) /* No match */ return -EINVAL; - tmp = SENSORS_LIMIT(tmp, 0, 15); mutex_lock(&data->update_lock); if (index & 1) { @@ -1515,7 +1595,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, #define NOT_USED -1 -/* Don't change the attribute order, _max and _min are accessed by index +/* Don't change the attribute order, _max, _min and _beep are accessed by index * somewhere else in the code */ #define SENSOR_ATTR_IN(index) { \ SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \ @@ -1530,6 +1610,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, show_alarm_beep, store_beep, BEEP_ENABLE, \ index + ((index > 14) ? 1 : 0)) } +/* Don't change the attribute order, _beep is accessed by index + * somewhere else in the code */ #define SENSOR_ATTR_FAN(index) { \ SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \ NULL, FAN_INPUT, index - 1), \ @@ -1553,9 +1635,13 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, show_pwm, store_pwm, PWM_FREQ, index - 1), \ SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \ show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \ + SENSOR_ATTR_2(pwm##index##_mode, S_IRUGO, \ + show_pwm_mode, NULL, NOT_USED, index - 1), \ SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \ show_fanin, store_fanin, FANIN_TARGET, index - 1) } +/* Don't change the attribute order, _beep is accessed by index + * somewhere else in the code */ #define SENSOR_ATTR_DTS(index) { \ SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \ show_dts_mode, NULL, NOT_USED, index - 7), \ @@ -1574,6 +1660,8 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) } +/* Don't change the attribute order, _beep is accessed by index + * somewhere else in the code */ #define SENSOR_ATTR_TEMP(index) { \ SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \ show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ @@ -1593,8 +1681,6 @@ store_sf_setup(struct device *dev, struct device_attribute *attr, SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ show_alarm_beep, store_beep, BEEP_ENABLE, \ index + (index > 4 ? 11 : 17)), \ - SENSOR_ATTR_2(temp##index##_source_sel, S_IWUSR | S_IRUGO, \ - show_temp_src, store_temp_src, NOT_USED, index - 1), \ SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \ show_temp_pwm_enable, store_temp_pwm_enable, \ TEMP_PWM_ENABLE, index - 1), \ @@ -1680,7 +1766,7 @@ static const struct sensor_device_attribute_2 w83795_fan[][4] = { SENSOR_ATTR_FAN(14), }; -static const struct sensor_device_attribute_2 w83795_temp[][29] = { +static const struct sensor_device_attribute_2 w83795_temp[][28] = { SENSOR_ATTR_TEMP(1), SENSOR_ATTR_TEMP(2), SENSOR_ATTR_TEMP(3), @@ -1700,7 +1786,7 @@ static const struct sensor_device_attribute_2 w83795_dts[][8] = { SENSOR_ATTR_DTS(14), }; -static const struct sensor_device_attribute_2 w83795_pwm[][7] = { +static const struct sensor_device_attribute_2 w83795_pwm[][8] = { SENSOR_ATTR_PWM(1), SENSOR_ATTR_PWM(2), SENSOR_ATTR_PWM(3), @@ -1711,13 +1797,24 @@ static const struct sensor_device_attribute_2 w83795_pwm[][7] = { SENSOR_ATTR_PWM(8), }; +static const struct sensor_device_attribute_2 w83795_tss[6] = { + SENSOR_ATTR_2(temp1_source_sel, S_IWUSR | S_IRUGO, + show_temp_src, store_temp_src, NOT_USED, 0), + SENSOR_ATTR_2(temp2_source_sel, S_IWUSR | S_IRUGO, + show_temp_src, store_temp_src, NOT_USED, 1), + SENSOR_ATTR_2(temp3_source_sel, S_IWUSR | S_IRUGO, + show_temp_src, store_temp_src, NOT_USED, 2), + SENSOR_ATTR_2(temp4_source_sel, S_IWUSR | S_IRUGO, + show_temp_src, store_temp_src, NOT_USED, 3), + SENSOR_ATTR_2(temp5_source_sel, S_IWUSR | S_IRUGO, + show_temp_src, store_temp_src, NOT_USED, 4), + SENSOR_ATTR_2(temp6_source_sel, S_IWUSR | S_IRUGO, + show_temp_src, store_temp_src, NOT_USED, 5), +}; + static const struct sensor_device_attribute_2 sda_single_files[] = { SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, store_chassis_clear, ALARM_STATUS, 46), - SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep, - store_beep, BEEP_ENABLE, 46), - SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep, - store_beep, BEEP_ENABLE, 47), #ifdef CONFIG_SENSORS_W83795_FANCTRL SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin, store_fanin, FANIN_TOL, NOT_USED), @@ -1730,6 +1827,13 @@ static const struct sensor_device_attribute_2 sda_single_files[] = { #endif }; +static const struct sensor_device_attribute_2 sda_beep_files[] = { + SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep, + store_beep, BEEP_ENABLE, 46), + SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep, + store_beep, BEEP_ENABLE, 47), +}; + /* * Driver interface */ @@ -1859,6 +1963,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, if (!(data->has_in & (1 << i))) continue; for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) { + if (j == 4 && !data->enable_beep) + continue; err = fn(dev, &w83795_in[i][j].dev_attr); if (err) return err; @@ -1869,18 +1975,37 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, if (!(data->has_fan & (1 << i))) continue; for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) { + if (j == 3 && !data->enable_beep) + continue; err = fn(dev, &w83795_fan[i][j].dev_attr); if (err) return err; } } + for (i = 0; i < ARRAY_SIZE(w83795_tss); i++) { + j = w83795_tss_useful(data, i); + if (!j) + continue; + err = fn(dev, &w83795_tss[i].dev_attr); + if (err) + return err; + } + for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { err = fn(dev, &sda_single_files[i].dev_attr); if (err) return err; } + if (data->enable_beep) { + for (i = 0; i < ARRAY_SIZE(sda_beep_files); i++) { + err = fn(dev, &sda_beep_files[i].dev_attr); + if (err) + return err; + } + } + #ifdef CONFIG_SENSORS_W83795_FANCTRL for (i = 0; i < data->has_pwm; i++) { for (j = 0; j < ARRAY_SIZE(w83795_pwm[0]); j++) { @@ -1899,6 +2024,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, #else for (j = 0; j < 8; j++) { #endif + if (j == 7 && !data->enable_beep) + continue; err = fn(dev, &w83795_temp[i][j].dev_attr); if (err) return err; @@ -1910,6 +2037,8 @@ static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, if (!(data->has_dts & (1 << i))) continue; for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) { + if (j == 7 && !data->enable_beep) + continue; err = fn(dev, &w83795_dts[i][j].dev_attr); if (err) return err; @@ -2049,6 +2178,18 @@ static int w83795_probe(struct i2c_client *client, else data->has_pwm = 2; + /* Check if BEEP pin is available */ + if (data->chip_type == w83795g) { + /* The W83795G has a dedicated BEEP pin */ + data->enable_beep = 1; + } else { + /* The W83795ADG has a shared pin for OVT# and BEEP, so you + * can't have both */ + tmp = w83795_read(client, W83795_REG_OVT_CFG); + if ((tmp & OVT_CFG_SEL) == 0) + data->enable_beep = 1; + } + err = w83795_handle_files(dev, device_create_file); if (err) goto exit_remove; diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index b923074b2cb..30f06e956bf 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -75,8 +75,7 @@ config I2C_HELPER_AUTO In doubt, say Y. config I2C_SMBUS - tristate - prompt "SMBus-specific protocols" if !I2C_HELPER_AUTO + tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO help Say Y here if you want support for SMBus extensions to the I2C specification. At the moment, the only supported extension is diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig index 3998dd620a0..f1cfe7e5508 100644 --- a/drivers/i2c/algos/Kconfig +++ b/drivers/i2c/algos/Kconfig @@ -3,7 +3,7 @@ # menu "I2C Algorithms" - depends on !I2C_HELPER_AUTO + visible if !I2C_HELPER_AUTO config I2C_ALGOBIT tristate "I2C bit-banging interfaces" @@ -15,15 +15,3 @@ config I2C_ALGOPCA tristate "I2C PCA 9564 interfaces" endmenu - -# In automatic configuration mode, we still have to define the -# symbols to avoid unmet dependencies. - -if I2C_HELPER_AUTO -config I2C_ALGOBIT - tristate -config I2C_ALGOPCF - tristate -config I2C_ALGOPCA - tristate -endif diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c950be3cce2..3a6321cb803 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -99,6 +99,7 @@ config I2C_I801 ICH10 5/3400 Series (PCH) Cougar Point (PCH) + Patsburg (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 59d65981eed..02835ce7ff4 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -3,6 +3,8 @@ Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker <mdsxyz123@yahoo.com> Copyright (C) 2007, 2008 Jean Delvare <khali@linux-fr.org> + Copyright (C) 2010 Intel Corporation, + David Woodhouse <dwmw2@infradead.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -43,6 +45,10 @@ ICH10 0x3a60 32 hard yes yes yes 5/3400 Series (PCH) 0x3b30 32 hard yes yes yes Cougar Point (PCH) 0x1c22 32 hard yes yes yes + Patsburg (PCH) 0x1d22 32 hard yes yes yes + Patsburg (PCH) IDF 0x1d70 32 hard yes yes yes + Patsburg (PCH) IDF 0x1d71 32 hard yes yes yes + Patsburg (PCH) IDF 0x1d72 32 hard yes yes yes Features supported by this driver: Software PEC no @@ -50,12 +56,11 @@ Block buffer yes Block process call transaction no I2C block read transaction yes (doesn't use the block buffer) + Slave mode no See the file Documentation/i2c/busses/i2c-i801 for details. */ -/* Note: we assume there can only be one I801, with one SMBus interface */ - #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> @@ -69,16 +74,16 @@ #include <linux/dmi.h> /* I801 SMBus address offsets */ -#define SMBHSTSTS (0 + i801_smba) -#define SMBHSTCNT (2 + i801_smba) -#define SMBHSTCMD (3 + i801_smba) -#define SMBHSTADD (4 + i801_smba) -#define SMBHSTDAT0 (5 + i801_smba) -#define SMBHSTDAT1 (6 + i801_smba) -#define SMBBLKDAT (7 + i801_smba) -#define SMBPEC (8 + i801_smba) /* ICH3 and later */ -#define SMBAUXSTS (12 + i801_smba) /* ICH4 and later */ -#define SMBAUXCTL (13 + i801_smba) /* ICH4 and later */ +#define SMBHSTSTS(p) (0 + (p)->smba) +#define SMBHSTCNT(p) (2 + (p)->smba) +#define SMBHSTCMD(p) (3 + (p)->smba) +#define SMBHSTADD(p) (4 + (p)->smba) +#define SMBHSTDAT0(p) (5 + (p)->smba) +#define SMBHSTDAT1(p) (6 + (p)->smba) +#define SMBBLKDAT(p) (7 + (p)->smba) +#define SMBPEC(p) (8 + (p)->smba) /* ICH3 and later */ +#define SMBAUXSTS(p) (12 + (p)->smba) /* ICH4 and later */ +#define SMBAUXCTL(p) (13 + (p)->smba) /* ICH4 and later */ /* PCI Address Constants */ #define SMBBAR 4 @@ -127,16 +132,25 @@ SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \ SMBHSTSTS_INTR) -static unsigned long i801_smba; -static unsigned char i801_original_hstcfg; +/* Patsburg also has three 'Integrated Device Function' SMBus controllers */ +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0 0x1d70 +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1 0x1d71 +#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2 0x1d72 + +struct i801_priv { + struct i2c_adapter adapter; + unsigned long smba; + unsigned char original_hstcfg; + struct pci_dev *pci_dev; + unsigned int features; +}; + static struct pci_driver i801_driver; -static struct pci_dev *I801_dev; #define FEATURE_SMBUS_PEC (1 << 0) #define FEATURE_BLOCK_BUFFER (1 << 1) #define FEATURE_BLOCK_PROC (1 << 2) #define FEATURE_I2C_BLOCK_READ (1 << 3) -static unsigned int i801_features; static const char *i801_feature_names[] = { "SMBus PEC", @@ -151,24 +165,24 @@ MODULE_PARM_DESC(disable_features, "Disable selected driver features"); /* Make sure the SMBus host is ready to start transmitting. Return 0 if it is, -EBUSY if it is not. */ -static int i801_check_pre(void) +static int i801_check_pre(struct i801_priv *priv) { int status; - status = inb_p(SMBHSTSTS); + status = inb_p(SMBHSTSTS(priv)); if (status & SMBHSTSTS_HOST_BUSY) { - dev_err(&I801_dev->dev, "SMBus is busy, can't use it!\n"); + dev_err(&priv->pci_dev->dev, "SMBus is busy, can't use it!\n"); return -EBUSY; } status &= STATUS_FLAGS; if (status) { - dev_dbg(&I801_dev->dev, "Clearing status flags (%02x)\n", + dev_dbg(&priv->pci_dev->dev, "Clearing status flags (%02x)\n", status); - outb_p(status, SMBHSTSTS); - status = inb_p(SMBHSTSTS) & STATUS_FLAGS; + outb_p(status, SMBHSTSTS(priv)); + status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS; if (status) { - dev_err(&I801_dev->dev, + dev_err(&priv->pci_dev->dev, "Failed clearing status flags (%02x)\n", status); return -EBUSY; @@ -179,48 +193,50 @@ static int i801_check_pre(void) } /* Convert the status register to an error code, and clear it. */ -static int i801_check_post(int status, int timeout) +static int i801_check_post(struct i801_priv *priv, int status, int timeout) { int result = 0; /* If the SMBus is still busy, we give up */ if (timeout) { - dev_err(&I801_dev->dev, "Transaction timeout\n"); + dev_err(&priv->pci_dev->dev, "Transaction timeout\n"); /* try to stop the current command */ - dev_dbg(&I801_dev->dev, "Terminating the current operation\n"); - outb_p(inb_p(SMBHSTCNT) | SMBHSTCNT_KILL, SMBHSTCNT); + dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n"); + outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL, + SMBHSTCNT(priv)); msleep(1); - outb_p(inb_p(SMBHSTCNT) & (~SMBHSTCNT_KILL), SMBHSTCNT); + outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL), + SMBHSTCNT(priv)); /* Check if it worked */ - status = inb_p(SMBHSTSTS); + status = inb_p(SMBHSTSTS(priv)); if ((status & SMBHSTSTS_HOST_BUSY) || !(status & SMBHSTSTS_FAILED)) - dev_err(&I801_dev->dev, + dev_err(&priv->pci_dev->dev, "Failed terminating the transaction\n"); - outb_p(STATUS_FLAGS, SMBHSTSTS); + outb_p(STATUS_FLAGS, SMBHSTSTS(priv)); return -ETIMEDOUT; } if (status & SMBHSTSTS_FAILED) { result = -EIO; - dev_err(&I801_dev->dev, "Transaction failed\n"); + dev_err(&priv->pci_dev->dev, "Transaction failed\n"); } if (status & SMBHSTSTS_DEV_ERR) { result = -ENXIO; - dev_dbg(&I801_dev->dev, "No response\n"); + dev_dbg(&priv->pci_dev->dev, "No response\n"); } if (status & SMBHSTSTS_BUS_ERR) { result = -EAGAIN; - dev_dbg(&I801_dev->dev, "Lost arbitration\n"); + dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n"); } if (result) { /* Clear error flags */ - outb_p(status & STATUS_FLAGS, SMBHSTSTS); - status = inb_p(SMBHSTSTS) & STATUS_FLAGS; + outb_p(status & STATUS_FLAGS, SMBHSTSTS(priv)); + status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS; if (status) { - dev_warn(&I801_dev->dev, "Failed clearing status " + dev_warn(&priv->pci_dev->dev, "Failed clearing status " "flags at end of transaction (%02x)\n", status); } @@ -229,86 +245,88 @@ static int i801_check_post(int status, int timeout) return result; } -static int i801_transaction(int xact) +static int i801_transaction(struct i801_priv *priv, int xact) { int status; int result; int timeout = 0; - result = i801_check_pre(); + result = i801_check_pre(priv); if (result < 0) return result; /* the current contents of SMBHSTCNT can be overwritten, since PEC, * INTREN, SMBSCMD are passed in xact */ - outb_p(xact | I801_START, SMBHSTCNT); + outb_p(xact | I801_START, SMBHSTCNT(priv)); /* We will always wait for a fraction of a second! */ do { msleep(1); - status = inb_p(SMBHSTSTS); + status = inb_p(SMBHSTSTS(priv)); } while ((status & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_TIMEOUT)); - result = i801_check_post(status, timeout > MAX_TIMEOUT); + result = i801_check_post(priv, status, timeout > MAX_TIMEOUT); if (result < 0) return result; - outb_p(SMBHSTSTS_INTR, SMBHSTSTS); + outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv)); return 0; } /* wait for INTR bit as advised by Intel */ -static void i801_wait_hwpec(void) +static void i801_wait_hwpec(struct i801_priv *priv) { int timeout = 0; int status; do { msleep(1); - status = inb_p(SMBHSTSTS); + status = inb_p(SMBHSTSTS(priv)); } while ((!(status & SMBHSTSTS_INTR)) && (timeout++ < MAX_TIMEOUT)); if (timeout > MAX_TIMEOUT) - dev_dbg(&I801_dev->dev, "PEC Timeout!\n"); + dev_dbg(&priv->pci_dev->dev, "PEC Timeout!\n"); - outb_p(status, SMBHSTSTS); + outb_p(status, SMBHSTSTS(priv)); } -static int i801_block_transaction_by_block(union i2c_smbus_data *data, +static int i801_block_transaction_by_block(struct i801_priv *priv, + union i2c_smbus_data *data, char read_write, int hwpec) { int i, len; int status; - inb_p(SMBHSTCNT); /* reset the data buffer index */ + inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ /* Use 32-byte buffer to process this transaction */ if (read_write == I2C_SMBUS_WRITE) { len = data->block[0]; - outb_p(len, SMBHSTDAT0); + outb_p(len, SMBHSTDAT0(priv)); for (i = 0; i < len; i++) - outb_p(data->block[i+1], SMBBLKDAT); + outb_p(data->block[i+1], SMBBLKDAT(priv)); } - status = i801_transaction(I801_BLOCK_DATA | ENABLE_INT9 | + status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 | I801_PEC_EN * hwpec); if (status) return status; if (read_write == I2C_SMBUS_READ) { - len = inb_p(SMBHSTDAT0); + len = inb_p(SMBHSTDAT0(priv)); if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) return -EPROTO; data->block[0] = len; for (i = 0; i < len; i++) - data->block[i + 1] = inb_p(SMBBLKDAT); + data->block[i + 1] = inb_p(SMBBLKDAT(priv)); } return 0; } -static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, +static int i801_block_transaction_byte_by_byte(struct i801_priv *priv, + union i2c_smbus_data *data, char read_write, int command, int hwpec) { @@ -318,15 +336,15 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, int result; int timeout; - result = i801_check_pre(); + result = i801_check_pre(priv); if (result < 0) return result; len = data->block[0]; if (read_write == I2C_SMBUS_WRITE) { - outb_p(len, SMBHSTDAT0); - outb_p(data->block[1], SMBBLKDAT); + outb_p(len, SMBHSTDAT0(priv)); + outb_p(data->block[1], SMBBLKDAT(priv)); } for (i = 1; i <= len; i++) { @@ -342,34 +360,37 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, else smbcmd = I801_BLOCK_DATA; } - outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT); + outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv)); if (i == 1) - outb_p(inb(SMBHSTCNT) | I801_START, SMBHSTCNT); + outb_p(inb(SMBHSTCNT(priv)) | I801_START, + SMBHSTCNT(priv)); /* We will always wait for a fraction of a second! */ timeout = 0; do { msleep(1); - status = inb_p(SMBHSTSTS); + status = inb_p(SMBHSTSTS(priv)); } while ((!(status & SMBHSTSTS_BYTE_DONE)) && (timeout++ < MAX_TIMEOUT)); - result = i801_check_post(status, timeout > MAX_TIMEOUT); + result = i801_check_post(priv, status, timeout > MAX_TIMEOUT); if (result < 0) return result; if (i == 1 && read_write == I2C_SMBUS_READ && command != I2C_SMBUS_I2C_BLOCK_DATA) { - len = inb_p(SMBHSTDAT0); + len = inb_p(SMBHSTDAT0(priv)); if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) { - dev_err(&I801_dev->dev, + dev_err(&priv->pci_dev->dev, "Illegal SMBus block read size %d\n", len); /* Recover */ - while (inb_p(SMBHSTSTS) & SMBHSTSTS_HOST_BUSY) - outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS); - outb_p(SMBHSTSTS_INTR, SMBHSTSTS); + while (inb_p(SMBHSTSTS(priv)) & + SMBHSTSTS_HOST_BUSY) + outb_p(SMBHSTSTS_BYTE_DONE, + SMBHSTSTS(priv)); + outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv)); return -EPROTO; } data->block[0] = len; @@ -377,27 +398,28 @@ static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data, /* Retrieve/store value in SMBBLKDAT */ if (read_write == I2C_SMBUS_READ) - data->block[i] = inb_p(SMBBLKDAT); + data->block[i] = inb_p(SMBBLKDAT(priv)); if (read_write == I2C_SMBUS_WRITE && i+1 <= len) - outb_p(data->block[i+1], SMBBLKDAT); + outb_p(data->block[i+1], SMBBLKDAT(priv)); /* signals SMBBLKDAT ready */ - outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS); + outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS(priv)); } return 0; } -static int i801_set_block_buffer_mode(void) +static int i801_set_block_buffer_mode(struct i801_priv *priv) { - outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_E32B, SMBAUXCTL); - if ((inb_p(SMBAUXCTL) & SMBAUXCTL_E32B) == 0) + outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv)); + if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0) return -EIO; return 0; } /* Block transaction function */ -static int i801_block_transaction(union i2c_smbus_data *data, char read_write, +static int i801_block_transaction(struct i801_priv *priv, + union i2c_smbus_data *data, char read_write, int command, int hwpec) { int result = 0; @@ -406,11 +428,11 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write, if (command == I2C_SMBUS_I2C_BLOCK_DATA) { if (read_write == I2C_SMBUS_WRITE) { /* set I2C_EN bit in configuration register */ - pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc); - pci_write_config_byte(I801_dev, SMBHSTCFG, + pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc); + pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc | SMBHSTCFG_I2C_EN); - } else if (!(i801_features & FEATURE_I2C_BLOCK_READ)) { - dev_err(&I801_dev->dev, + } else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) { + dev_err(&priv->pci_dev->dev, "I2C block read is unsupported!\n"); return -EOPNOTSUPP; } @@ -429,22 +451,23 @@ static int i801_block_transaction(union i2c_smbus_data *data, char read_write, /* Experience has shown that the block buffer can only be used for SMBus (not I2C) block transactions, even though the datasheet doesn't mention this limitation. */ - if ((i801_features & FEATURE_BLOCK_BUFFER) + if ((priv->features & FEATURE_BLOCK_BUFFER) && command != I2C_SMBUS_I2C_BLOCK_DATA - && i801_set_block_buffer_mode() == 0) - result = i801_block_transaction_by_block(data, read_write, - hwpec); + && i801_set_block_buffer_mode(priv) == 0) + result = i801_block_transaction_by_block(priv, data, + read_write, hwpec); else - result = i801_block_transaction_byte_by_byte(data, read_write, + result = i801_block_transaction_byte_by_byte(priv, data, + read_write, command, hwpec); if (result == 0 && hwpec) - i801_wait_hwpec(); + i801_wait_hwpec(priv); if (command == I2C_SMBUS_I2C_BLOCK_DATA && read_write == I2C_SMBUS_WRITE) { /* restore saved configuration register value */ - pci_write_config_byte(I801_dev, SMBHSTCFG, hostc); + pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc); } return result; } @@ -457,81 +480,85 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, int hwpec; int block = 0; int ret, xact = 0; + struct i801_priv *priv = i2c_get_adapdata(adap); - hwpec = (i801_features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) + hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK && size != I2C_SMBUS_I2C_BLOCK_DATA; switch (size) { case I2C_SMBUS_QUICK: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), - SMBHSTADD); + SMBHSTADD(priv)); xact = I801_QUICK; break; case I2C_SMBUS_BYTE: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), - SMBHSTADD); + SMBHSTADD(priv)); if (read_write == I2C_SMBUS_WRITE) - outb_p(command, SMBHSTCMD); + outb_p(command, SMBHSTCMD(priv)); xact = I801_BYTE; break; case I2C_SMBUS_BYTE_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), - SMBHSTADD); - outb_p(command, SMBHSTCMD); + SMBHSTADD(priv)); + outb_p(command, SMBHSTCMD(priv)); if (read_write == I2C_SMBUS_WRITE) - outb_p(data->byte, SMBHSTDAT0); + outb_p(data->byte, SMBHSTDAT0(priv)); xact = I801_BYTE_DATA; break; case I2C_SMBUS_WORD_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), - SMBHSTADD); - outb_p(command, SMBHSTCMD); + SMBHSTADD(priv)); + outb_p(command, SMBHSTCMD(priv)); if (read_write == I2C_SMBUS_WRITE) { - outb_p(data->word & 0xff, SMBHSTDAT0); - outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1); + outb_p(data->word & 0xff, SMBHSTDAT0(priv)); + outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv)); } xact = I801_WORD_DATA; break; case I2C_SMBUS_BLOCK_DATA: outb_p(((addr & 0x7f) << 1) | (read_write & 0x01), - SMBHSTADD); - outb_p(command, SMBHSTCMD); + SMBHSTADD(priv)); + outb_p(command, SMBHSTCMD(priv)); block = 1; break; case I2C_SMBUS_I2C_BLOCK_DATA: /* NB: page 240 of ICH5 datasheet shows that the R/#W * bit should be cleared here, even when reading */ - outb_p((addr & 0x7f) << 1, SMBHSTADD); + outb_p((addr & 0x7f) << 1, SMBHSTADD(priv)); if (read_write == I2C_SMBUS_READ) { /* NB: page 240 of ICH5 datasheet also shows * that DATA1 is the cmd field when reading */ - outb_p(command, SMBHSTDAT1); + outb_p(command, SMBHSTDAT1(priv)); } else - outb_p(command, SMBHSTCMD); + outb_p(command, SMBHSTCMD(priv)); block = 1; break; default: - dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size); + dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n", + size); return -EOPNOTSUPP; } if (hwpec) /* enable/disable hardware PEC */ - outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_CRC, SMBAUXCTL); + outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv)); else - outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL); + outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC), + SMBAUXCTL(priv)); if (block) - ret = i801_block_transaction(data, read_write, size, hwpec); + ret = i801_block_transaction(priv, data, read_write, size, + hwpec); else - ret = i801_transaction(xact | ENABLE_INT9); + ret = i801_transaction(priv, xact | ENABLE_INT9); /* Some BIOSes don't like it when PEC is enabled at reboot or resume time, so we forcibly disable it after every transaction. Turn off E32B for the same reason. */ if (hwpec || block) - outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), - SMBAUXCTL); + outb_p(inb_p(SMBAUXCTL(priv)) & + ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv)); if (block) return ret; @@ -543,10 +570,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, switch (xact & 0x7f) { case I801_BYTE: /* Result put in SMBHSTDAT0 */ case I801_BYTE_DATA: - data->byte = inb_p(SMBHSTDAT0); + data->byte = inb_p(SMBHSTDAT0(priv)); break; case I801_WORD_DATA: - data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8); + data->word = inb_p(SMBHSTDAT0(priv)) + + (inb_p(SMBHSTDAT1(priv)) << 8); break; } return 0; @@ -555,11 +583,13 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, static u32 i801_func(struct i2c_adapter *adapter) { + struct i801_priv *priv = i2c_get_adapdata(adapter); + return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK | - ((i801_features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | - ((i801_features & FEATURE_I2C_BLOCK_READ) ? + ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) | + ((priv->features & FEATURE_I2C_BLOCK_READ) ? I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0); } @@ -568,12 +598,6 @@ static const struct i2c_algorithm smbus_algorithm = { .functionality = i801_func, }; -static struct i2c_adapter i801_adapter = { - .owner = THIS_MODULE, - .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, - .algo = &smbus_algorithm, -}; - static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) }, @@ -592,6 +616,10 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) }, { 0, } }; @@ -704,16 +732,25 @@ static int __devinit i801_probe(struct pci_dev *dev, { unsigned char temp; int err, i; + struct i801_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + i2c_set_adapdata(&priv->adapter, priv); + priv->adapter.owner = THIS_MODULE; + priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; + priv->adapter.algo = &smbus_algorithm; - I801_dev = dev; - i801_features = 0; + priv->pci_dev = dev; switch (dev->device) { default: - i801_features |= FEATURE_I2C_BLOCK_READ; + priv->features |= FEATURE_I2C_BLOCK_READ; /* fall through */ case PCI_DEVICE_ID_INTEL_82801DB_3: - i801_features |= FEATURE_SMBUS_PEC; - i801_features |= FEATURE_BLOCK_BUFFER; + priv->features |= FEATURE_SMBUS_PEC; + priv->features |= FEATURE_BLOCK_BUFFER; /* fall through */ case PCI_DEVICE_ID_INTEL_82801CA_3: case PCI_DEVICE_ID_INTEL_82801BA_2: @@ -724,11 +761,11 @@ static int __devinit i801_probe(struct pci_dev *dev, /* Disable features on user request */ for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) { - if (i801_features & disable_features & (1 << i)) + if (priv->features & disable_features & (1 << i)) dev_notice(&dev->dev, "%s disabled by user\n", i801_feature_names[i]); } - i801_features &= ~disable_features; + priv->features &= ~disable_features; err = pci_enable_device(dev); if (err) { @@ -738,8 +775,8 @@ static int __devinit i801_probe(struct pci_dev *dev, } /* Determine the address of the SMBus area */ - i801_smba = pci_resource_start(dev, SMBBAR); - if (!i801_smba) { + priv->smba = pci_resource_start(dev, SMBBAR); + if (!priv->smba) { dev_err(&dev->dev, "SMBus base address uninitialized, " "upgrade BIOS\n"); err = -ENODEV; @@ -755,19 +792,19 @@ static int __devinit i801_probe(struct pci_dev *dev, err = pci_request_region(dev, SMBBAR, i801_driver.name); if (err) { dev_err(&dev->dev, "Failed to request SMBus region " - "0x%lx-0x%Lx\n", i801_smba, + "0x%lx-0x%Lx\n", priv->smba, (unsigned long long)pci_resource_end(dev, SMBBAR)); goto exit; } - pci_read_config_byte(I801_dev, SMBHSTCFG, &temp); - i801_original_hstcfg = temp; + pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp); + priv->original_hstcfg = temp; temp &= ~SMBHSTCFG_I2C_EN; /* SMBus timing */ if (!(temp & SMBHSTCFG_HST_EN)) { dev_info(&dev->dev, "Enabling SMBus device\n"); temp |= SMBHSTCFG_HST_EN; } - pci_write_config_byte(I801_dev, SMBHSTCFG, temp); + pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp); if (temp & SMBHSTCFG_SMB_SMI_EN) dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n"); @@ -775,19 +812,19 @@ static int __devinit i801_probe(struct pci_dev *dev, dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n"); /* Clear special mode bits */ - if (i801_features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER)) - outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), - SMBAUXCTL); + if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER)) + outb_p(inb_p(SMBAUXCTL(priv)) & + ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv)); /* set up the sysfs linkage to our parent device */ - i801_adapter.dev.parent = &dev->dev; + priv->adapter.dev.parent = &dev->dev; /* Retry up to 3 times on lost arbitration */ - i801_adapter.retries = 3; + priv->adapter.retries = 3; - snprintf(i801_adapter.name, sizeof(i801_adapter.name), - "SMBus I801 adapter at %04lx", i801_smba); - err = i2c_add_adapter(&i801_adapter); + snprintf(priv->adapter.name, sizeof(priv->adapter.name), + "SMBus I801 adapter at %04lx", priv->smba); + err = i2c_add_adapter(&priv->adapter); if (err) { dev_err(&dev->dev, "Failed to add SMBus adapter\n"); goto exit_release; @@ -801,27 +838,33 @@ static int __devinit i801_probe(struct pci_dev *dev, memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = apanel_addr; strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE); - i2c_new_device(&i801_adapter, &info); + i2c_new_device(&priv->adapter, &info); } #endif #if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE if (dmi_name_in_vendors("FUJITSU")) - dmi_walk(dmi_check_onboard_devices, &i801_adapter); + dmi_walk(dmi_check_onboard_devices, &priv->adapter); #endif + pci_set_drvdata(dev, priv); return 0; exit_release: pci_release_region(dev, SMBBAR); exit: + kfree(priv); return err; } static void __devexit i801_remove(struct pci_dev *dev) { - i2c_del_adapter(&i801_adapter); - pci_write_config_byte(I801_dev, SMBHSTCFG, i801_original_hstcfg); + struct i801_priv *priv = pci_get_drvdata(dev); + + i2c_del_adapter(&priv->adapter); + pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); pci_release_region(dev, SMBBAR); + pci_set_drvdata(dev, NULL); + kfree(priv); /* * do not call pci_disable_device(dev) since it can cause hard hangs on * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010) @@ -831,8 +874,10 @@ static void __devexit i801_remove(struct pci_dev *dev) #ifdef CONFIG_PM static int i801_suspend(struct pci_dev *dev, pm_message_t mesg) { + struct i801_priv *priv = pci_get_drvdata(dev); + pci_save_state(dev); - pci_write_config_byte(dev, SMBHSTCFG, i801_original_hstcfg); + pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); pci_set_power_state(dev, pci_choose_state(dev, mesg)); return 0; } diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c index 80f70d3a744..c71492782bb 100644 --- a/drivers/i2c/busses/i2c-intel-mid.c +++ b/drivers/i2c/busses/i2c-intel-mid.c @@ -999,7 +999,7 @@ static int __devinit intel_mid_i2c_probe(struct pci_dev *dev, /* Initialize struct members */ snprintf(mrst->adap.name, sizeof(mrst->adap.name), - "MRST/Medfield I2C at %lx", start); + "Intel MID I2C at %lx", start); mrst->adap.owner = THIS_MODULE; mrst->adap.algo = &intel_mid_i2c_algorithm; mrst->adap.dev.parent = &dev->dev; diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index d231f683f57..6b4cc567645 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -848,6 +848,18 @@ static int i2c_register_adapter(struct i2c_adapter *adap) goto out_list; } + /* Sanity checks */ + if (unlikely(adap->name[0] == '\0')) { + pr_err("i2c-core: Attempt to register an adapter with " + "no name!\n"); + return -EINVAL; + } + if (unlikely(!adap->algo)) { + pr_err("i2c-core: Attempt to register adapter '%s' with " + "no algo!\n", adap->name); + return -EINVAL; + } + rt_mutex_init(&adap->bus_lock); mutex_init(&adap->userspace_clients_lock); INIT_LIST_HEAD(&adap->userspace_clients); diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index d32a4843fc3..d7a4833be41 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -120,7 +120,6 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, snprintf(priv->adap.name, sizeof(priv->adap.name), "i2c-%d-mux (chan_id %d)", i2c_adapter_id(parent), chan_id); priv->adap.owner = THIS_MODULE; - priv->adap.id = parent->id; priv->adap.algo = &priv->algo; priv->adap.algo_data = priv; priv->adap.dev.parent = &parent->dev; diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 41665d2f9f9..c131d58bcb5 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -273,8 +273,6 @@ static int intel_idle_probe(void) pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); - if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ - lapic_timer_reliable_states = 0xFFFFFFFF; if (boot_cpu_data.x86 != 6) /* family 6 */ return -ENODEV; @@ -286,8 +284,6 @@ static int intel_idle_probe(void) case 0x1F: /* Core i7 and i5 Processor - Nehalem */ case 0x2E: /* Nehalem-EX Xeon */ case 0x2F: /* Westmere-EX Xeon */ - lapic_timer_reliable_states = (1 << 1); /* C1 */ - case 0x25: /* Westmere */ case 0x2C: /* Westmere */ cpuidle_state_table = nehalem_cstates; @@ -295,7 +291,6 @@ static int intel_idle_probe(void) case 0x1C: /* 28 - Atom Processor */ case 0x26: /* 38 - Lincroft Atom Processor */ - lapic_timer_reliable_states = (1 << 1); /* C1 */ cpuidle_state_table = atom_cstates; break; @@ -303,10 +298,6 @@ static int intel_idle_probe(void) case 0x2D: /* SNB Xeon */ cpuidle_state_table = snb_cstates; break; -#ifdef FUTURE_USE - case 0x17: /* 23 - Core 2 Duo */ - lapic_timer_reliable_states = (1 << 2) | (1 << 1); /* C2, C1 */ -#endif default: pr_debug(PREFIX "does not run on family %d model %d\n", @@ -314,6 +305,9 @@ static int intel_idle_probe(void) return -ENODEV; } + if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ + lapic_timer_reliable_states = 0xFFFFFFFF; + pr_debug(PREFIX "v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index bb7e1928082..9b737ff133e 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c @@ -278,36 +278,6 @@ void ib_ud_header_init(int payload_bytes, EXPORT_SYMBOL(ib_ud_header_init); /** - * ib_lrh_header_pack - Pack LRH header struct into wire format - * @lrh:unpacked LRH header struct - * @buf:Buffer to pack into - * - * ib_lrh_header_pack() packs the LRH header structure @lrh into - * wire format in the buffer @buf. - */ -int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf) -{ - ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf); - return 0; -} -EXPORT_SYMBOL(ib_lrh_header_pack); - -/** - * ib_lrh_header_unpack - Unpack LRH structure from wire format - * @lrh:unpacked LRH header struct - * @buf:Buffer to pack into - * - * ib_lrh_header_unpack() unpacks the LRH header structure from - * wire format (in buf) into @lrh. - */ -int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh) -{ - ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh); - return 0; -} -EXPORT_SYMBOL(ib_lrh_header_unpack); - -/** * ib_ud_header_pack - Pack UD header struct into wire format * @header:UD header struct * @buf:Buffer to pack into diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b342248aec0..c42699285f8 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -893,68 +893,81 @@ out: return ret ? ret : in_len; } +static int copy_wc_to_user(void __user *dest, struct ib_wc *wc) +{ + struct ib_uverbs_wc tmp; + + tmp.wr_id = wc->wr_id; + tmp.status = wc->status; + tmp.opcode = wc->opcode; + tmp.vendor_err = wc->vendor_err; + tmp.byte_len = wc->byte_len; + tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data; + tmp.qp_num = wc->qp->qp_num; + tmp.src_qp = wc->src_qp; + tmp.wc_flags = wc->wc_flags; + tmp.pkey_index = wc->pkey_index; + tmp.slid = wc->slid; + tmp.sl = wc->sl; + tmp.dlid_path_bits = wc->dlid_path_bits; + tmp.port_num = wc->port_num; + tmp.reserved = 0; + + if (copy_to_user(dest, &tmp, sizeof tmp)) + return -EFAULT; + + return 0; +} + ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) { struct ib_uverbs_poll_cq cmd; - struct ib_uverbs_poll_cq_resp *resp; + struct ib_uverbs_poll_cq_resp resp; + u8 __user *header_ptr; + u8 __user *data_ptr; struct ib_cq *cq; - struct ib_wc *wc; - int ret = 0; - int i; - int rsize; + struct ib_wc wc; + int ret; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; - wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL); - if (!wc) - return -ENOMEM; - - rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc); - resp = kmalloc(rsize, GFP_KERNEL); - if (!resp) { - ret = -ENOMEM; - goto out_wc; - } - cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0); - if (!cq) { - ret = -EINVAL; - goto out; - } + if (!cq) + return -EINVAL; - resp->count = ib_poll_cq(cq, cmd.ne, wc); + /* we copy a struct ib_uverbs_poll_cq_resp to user space */ + header_ptr = (void __user *)(unsigned long) cmd.response; + data_ptr = header_ptr + sizeof resp; - put_cq_read(cq); + memset(&resp, 0, sizeof resp); + while (resp.count < cmd.ne) { + ret = ib_poll_cq(cq, 1, &wc); + if (ret < 0) + goto out_put; + if (!ret) + break; + + ret = copy_wc_to_user(data_ptr, &wc); + if (ret) + goto out_put; - for (i = 0; i < resp->count; i++) { - resp->wc[i].wr_id = wc[i].wr_id; - resp->wc[i].status = wc[i].status; - resp->wc[i].opcode = wc[i].opcode; - resp->wc[i].vendor_err = wc[i].vendor_err; - resp->wc[i].byte_len = wc[i].byte_len; - resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data; - resp->wc[i].qp_num = wc[i].qp->qp_num; - resp->wc[i].src_qp = wc[i].src_qp; - resp->wc[i].wc_flags = wc[i].wc_flags; - resp->wc[i].pkey_index = wc[i].pkey_index; - resp->wc[i].slid = wc[i].slid; - resp->wc[i].sl = wc[i].sl; - resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits; - resp->wc[i].port_num = wc[i].port_num; + data_ptr += sizeof(struct ib_uverbs_wc); + ++resp.count; } - if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize)) + if (copy_to_user(header_ptr, &resp, sizeof resp)) { ret = -EFAULT; + goto out_put; + } -out: - kfree(resp); + ret = in_len; -out_wc: - kfree(wc); - return ret ? ret : in_len; +out_put: + put_cq_read(cq); + return ret; } ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file, diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index 5440da0e59b..1b1146f8712 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst, dst->grh.sgid_index = src->grh.sgid_index; dst->grh.hop_limit = src->grh.hop_limit; dst->grh.traffic_class = src->grh.traffic_class; + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); dst->dlid = src->dlid; dst->sl = src->sl; dst->src_path_bits = src->src_path_bits; dst->static_rate = src->static_rate; dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; dst->port_num = src->port_num; + dst->reserved = 0; } EXPORT_SYMBOL(ib_copy_ah_attr_to_user); void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, struct ib_qp_attr *src) { + dst->qp_state = src->qp_state; dst->cur_qp_state = src->cur_qp_state; dst->path_mtu = src->path_mtu; dst->path_mig_state = src->path_mig_state; @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, dst->rnr_retry = src->rnr_retry; dst->alt_port_num = src->alt_port_num; dst->alt_timeout = src->alt_timeout; + memset(dst->reserved, 0, sizeof(dst->reserved)); } EXPORT_SYMBOL(ib_copy_qp_attr_to_user); diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 6078992da3f..9292a15ad7c 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -40,7 +40,6 @@ #include <linux/highmem.h> #include <linux/io.h> #include <linux/jiffies.h> -#include <linux/smp_lock.h> #include <asm/pgtable.h> #include "ipath_kernel.h" diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index bf3e20cd029..30e09caf0da 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -219,7 +219,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, struct net_device *ndev; enum ib_mtu tmp; - props->active_width = IB_WIDTH_4X; + props->active_width = IB_WIDTH_1X; props->active_speed = 4; props->port_cap_flags = IB_PORT_CM_SUP; props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; @@ -242,7 +242,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, tmp = iboe_get_mtu(ndev->mtu); props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; - props->state = netif_running(ndev) && netif_oper_up(ndev) ? + props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 9a7794ac34c..2001f20a436 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1816,6 +1816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? MLX4_WQE_CTRL_FENCE : 0) | size; + if (be16_to_cpu(vlan) < 0x1000) { + ctrl->ins_vlan = 1 << 6; + ctrl->vlan_tag = vlan; + } + /* * Make sure descriptor is fully written before * setting ownership bit (because HW can start @@ -1831,11 +1836,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; - if (be16_to_cpu(vlan) < 0x1000) { - ctrl->ins_vlan = 1 << 6; - ctrl->vlan_tag = vlan; - } - stamp = ind + qp->sq_spare_wqes; ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index cfc1d65c457..1e1e347a771 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1123,7 +1123,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) } } -static int srp_queuecommand(struct scsi_cmnd *scmnd, +static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *)) { struct srp_target_port *target = host_to_target(scmnd->device->host); @@ -1196,6 +1196,8 @@ err: return SCSI_MLQUEUE_HOST_BUSY; } +static DEF_SCSI_QCMD(srp_queuecommand) + static int srp_alloc_iu_bufs(struct srp_target_port *target) { int i; diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index e3f7fc6f956..68f09a86843 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -534,76 +534,73 @@ static int handle_eviocgbit(struct input_dev *dev, } #undef OLD_KEY_MAX -static int evdev_handle_get_keycode(struct input_dev *dev, - void __user *p, size_t size) +static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p) { - struct input_keymap_entry ke; + struct input_keymap_entry ke = { + .len = sizeof(unsigned int), + .flags = 0, + }; + int __user *ip = (int __user *)p; int error; - memset(&ke, 0, sizeof(ke)); - - if (size == sizeof(unsigned int[2])) { - /* legacy case */ - int __user *ip = (int __user *)p; + /* legacy case */ + if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) + return -EFAULT; - if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) - return -EFAULT; + error = input_get_keycode(dev, &ke); + if (error) + return error; - ke.len = sizeof(unsigned int); - ke.flags = 0; + if (put_user(ke.keycode, ip + 1)) + return -EFAULT; - error = input_get_keycode(dev, &ke); - if (error) - return error; + return 0; +} - if (put_user(ke.keycode, ip + 1)) - return -EFAULT; +static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p) +{ + struct input_keymap_entry ke; + int error; - } else { - size = min(size, sizeof(ke)); + if (copy_from_user(&ke, p, sizeof(ke))) + return -EFAULT; - if (copy_from_user(&ke, p, size)) - return -EFAULT; + error = input_get_keycode(dev, &ke); + if (error) + return error; - error = input_get_keycode(dev, &ke); - if (error) - return error; + if (copy_to_user(p, &ke, sizeof(ke))) + return -EFAULT; - if (copy_to_user(p, &ke, size)) - return -EFAULT; - } return 0; } -static int evdev_handle_set_keycode(struct input_dev *dev, - void __user *p, size_t size) +static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p) { - struct input_keymap_entry ke; - - memset(&ke, 0, sizeof(ke)); + struct input_keymap_entry ke = { + .len = sizeof(unsigned int), + .flags = 0, + }; + int __user *ip = (int __user *)p; - if (size == sizeof(unsigned int[2])) { - /* legacy case */ - int __user *ip = (int __user *)p; + if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) + return -EFAULT; - if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) - return -EFAULT; + if (get_user(ke.keycode, ip + 1)) + return -EFAULT; - if (get_user(ke.keycode, ip + 1)) - return -EFAULT; + return input_set_keycode(dev, &ke); +} - ke.len = sizeof(unsigned int); - ke.flags = 0; +static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p) +{ + struct input_keymap_entry ke; - } else { - size = min(size, sizeof(ke)); + if (copy_from_user(&ke, p, sizeof(ke))) + return -EFAULT; - if (copy_from_user(&ke, p, size)) - return -EFAULT; - - if (ke.len > sizeof(ke.scancode)) - return -EINVAL; - } + if (ke.len > sizeof(ke.scancode)) + return -EINVAL; return input_set_keycode(dev, &ke); } @@ -669,6 +666,18 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, return evdev_grab(evdev, client); else return evdev_ungrab(evdev, client); + + case EVIOCGKEYCODE: + return evdev_handle_get_keycode(dev, p); + + case EVIOCSKEYCODE: + return evdev_handle_set_keycode(dev, p); + + case EVIOCGKEYCODE_V2: + return evdev_handle_get_keycode_v2(dev, p); + + case EVIOCSKEYCODE_V2: + return evdev_handle_set_keycode_v2(dev, p); } size = _IOC_SIZE(cmd); @@ -708,12 +717,6 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, return -EFAULT; return error; - - case EVIOC_MASK_SIZE(EVIOCGKEYCODE): - return evdev_handle_get_keycode(dev, p, size); - - case EVIOC_MASK_SIZE(EVIOCSKEYCODE): - return evdev_handle_set_keycode(dev, p, size); } /* Multi-number variable-length handlers */ diff --git a/drivers/input/input.c b/drivers/input/input.c index d092ef9291d..db409d6bd5d 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -24,7 +24,6 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/rcupdate.h> -#include <linux/smp_lock.h> #include "input-compat.h" MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); @@ -74,6 +73,7 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz) * dev->event_lock held and interrupts disabled. */ static void input_pass_event(struct input_dev *dev, + struct input_handler *src_handler, unsigned int type, unsigned int code, int value) { struct input_handler *handler; @@ -92,6 +92,15 @@ static void input_pass_event(struct input_dev *dev, continue; handler = handle->handler; + + /* + * If this is the handler that injected this + * particular event we want to skip it to avoid + * filters firing again and again. + */ + if (handler == src_handler) + continue; + if (!handler->filter) { if (filtered) break; @@ -121,7 +130,7 @@ static void input_repeat_key(unsigned long data) if (test_bit(dev->repeat_key, dev->key) && is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { - input_pass_event(dev, EV_KEY, dev->repeat_key, 2); + input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2); if (dev->sync) { /* @@ -130,7 +139,7 @@ static void input_repeat_key(unsigned long data) * Otherwise assume that the driver will send * SYN_REPORT once it's done. */ - input_pass_event(dev, EV_SYN, SYN_REPORT, 1); + input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); } if (dev->rep[REP_PERIOD]) @@ -163,6 +172,7 @@ static void input_stop_autorepeat(struct input_dev *dev) #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) static int input_handle_abs_event(struct input_dev *dev, + struct input_handler *src_handler, unsigned int code, int *pval) { bool is_mt_event; @@ -206,13 +216,15 @@ static int input_handle_abs_event(struct input_dev *dev, /* Flush pending "slot" event */ if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); - input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); + input_pass_event(dev, src_handler, + EV_ABS, ABS_MT_SLOT, dev->slot); } return INPUT_PASS_TO_HANDLERS; } static void input_handle_event(struct input_dev *dev, + struct input_handler *src_handler, unsigned int type, unsigned int code, int value) { int disposition = INPUT_IGNORE_EVENT; @@ -265,7 +277,8 @@ static void input_handle_event(struct input_dev *dev, case EV_ABS: if (is_event_supported(code, dev->absbit, ABS_MAX)) - disposition = input_handle_abs_event(dev, code, &value); + disposition = input_handle_abs_event(dev, src_handler, + code, &value); break; @@ -323,7 +336,7 @@ static void input_handle_event(struct input_dev *dev, dev->event(dev, type, code, value); if (disposition & INPUT_PASS_TO_HANDLERS) - input_pass_event(dev, type, code, value); + input_pass_event(dev, src_handler, type, code, value); } /** @@ -352,7 +365,7 @@ void input_event(struct input_dev *dev, spin_lock_irqsave(&dev->event_lock, flags); add_input_randomness(type, code, value); - input_handle_event(dev, type, code, value); + input_handle_event(dev, NULL, type, code, value); spin_unlock_irqrestore(&dev->event_lock, flags); } } @@ -382,7 +395,8 @@ void input_inject_event(struct input_handle *handle, rcu_read_lock(); grab = rcu_dereference(dev->grab); if (!grab || grab == handle) - input_handle_event(dev, type, code, value); + input_handle_event(dev, handle->handler, + type, code, value); rcu_read_unlock(); spin_unlock_irqrestore(&dev->event_lock, flags); @@ -595,10 +609,10 @@ static void input_dev_release_keys(struct input_dev *dev) for (code = 0; code <= KEY_MAX; code++) { if (is_event_supported(code, dev->keybit, KEY_MAX) && __test_and_clear_bit(code, dev->key)) { - input_pass_event(dev, EV_KEY, code, 0); + input_pass_event(dev, NULL, EV_KEY, code, 0); } } - input_pass_event(dev, EV_SYN, SYN_REPORT, 1); + input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); } } @@ -738,7 +752,7 @@ static int input_default_setkeycode(struct input_dev *dev, if (index >= dev->keycodemax) return -EINVAL; - if (dev->keycodesize < sizeof(dev->keycode) && + if (dev->keycodesize < sizeof(ke->keycode) && (ke->keycode >> (dev->keycodesize * 8))) return -EINVAL; @@ -873,9 +887,9 @@ int input_set_keycode(struct input_dev *dev, !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && __test_and_clear_bit(old_keycode, dev->key)) { - input_pass_event(dev, EV_KEY, old_keycode, 0); + input_pass_event(dev, NULL, EV_KEY, old_keycode, 0); if (dev->sync) - input_pass_event(dev, EV_SYN, SYN_REPORT, 1); + input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); } out: @@ -1565,8 +1579,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) } \ } while (0) -#ifdef CONFIG_PM -static void input_dev_reset(struct input_dev *dev, bool activate) +static void input_dev_toggle(struct input_dev *dev, bool activate) { if (!dev->event) return; @@ -1580,12 +1593,44 @@ static void input_dev_reset(struct input_dev *dev, bool activate) } } +/** + * input_reset_device() - reset/restore the state of input device + * @dev: input device whose state needs to be reset + * + * This function tries to reset the state of an opened input device and + * bring internal state and state if the hardware in sync with each other. + * We mark all keys as released, restore LED state, repeat rate, etc. + */ +void input_reset_device(struct input_dev *dev) +{ + mutex_lock(&dev->mutex); + + if (dev->users) { + input_dev_toggle(dev, true); + + /* + * Keys that have been pressed at suspend time are unlikely + * to be still pressed when we resume. + */ + spin_lock_irq(&dev->event_lock); + input_dev_release_keys(dev); + spin_unlock_irq(&dev->event_lock); + } + + mutex_unlock(&dev->mutex); +} +EXPORT_SYMBOL(input_reset_device); + +#ifdef CONFIG_PM static int input_dev_suspend(struct device *dev) { struct input_dev *input_dev = to_input_dev(dev); mutex_lock(&input_dev->mutex); - input_dev_reset(input_dev, false); + + if (input_dev->users) + input_dev_toggle(input_dev, false); + mutex_unlock(&input_dev->mutex); return 0; @@ -1595,18 +1640,7 @@ static int input_dev_resume(struct device *dev) { struct input_dev *input_dev = to_input_dev(dev); - mutex_lock(&input_dev->mutex); - input_dev_reset(input_dev, true); - - /* - * Keys that have been pressed at suspend time are unlikely - * to be still pressed when we resume. - */ - spin_lock_irq(&input_dev->event_lock); - input_dev_release_keys(input_dev); - spin_unlock_irq(&input_dev->event_lock); - - mutex_unlock(&input_dev->mutex); + input_reset_device(input_dev); return 0; } diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c index d53b9e90023..27b6a3ce18c 100644 --- a/drivers/input/joystick/turbografx.c +++ b/drivers/input/joystick/turbografx.c @@ -245,6 +245,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) goto err_free_tgfx; } + parport_put_port(pp); return tgfx; err_free_dev: diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index b8c51b9781d..3a87f3ba5f7 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -179,6 +179,22 @@ config KEYBOARD_GPIO To compile this driver as a module, choose M here: the module will be called gpio_keys. +config KEYBOARD_GPIO_POLLED + tristate "Polled GPIO buttons" + depends on GENERIC_GPIO + select INPUT_POLLDEV + help + This driver implements support for buttons connected + to GPIO pins that are not capable of generating interrupts. + + Say Y here if your device has buttons connected + directly to such GPIO pins. Your board-specific + setup logic must also provide a platform device, + with configuration data saying which GPIOs are used. + + To compile this driver as a module, choose M here: the + module will be called gpio_keys_polled. + config KEYBOARD_TCA6416 tristate "TCA6416 Keypad Support" depends on I2C diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index a34452e8ebe..622de73a445 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o +obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c index b92d1cd5cba..af45d275f68 100644 --- a/drivers/input/keyboard/adp5588-keys.c +++ b/drivers/input/keyboard/adp5588-keys.c @@ -4,7 +4,7 @@ * I2C QWERTY Keypad and IO Expander * Bugs: Enter bugs at http://blackfin.uclinux.org/ * - * Copyright (C) 2008-2009 Analog Devices Inc. + * Copyright (C) 2008-2010 Analog Devices Inc. * Licensed under the GPL-2 or later. */ @@ -24,29 +24,6 @@ #include <linux/i2c/adp5588.h> - /* Configuration Register1 */ -#define AUTO_INC (1 << 7) -#define GPIEM_CFG (1 << 6) -#define OVR_FLOW_M (1 << 5) -#define INT_CFG (1 << 4) -#define OVR_FLOW_IEN (1 << 3) -#define K_LCK_IM (1 << 2) -#define GPI_IEN (1 << 1) -#define KE_IEN (1 << 0) - -/* Interrupt Status Register */ -#define CMP2_INT (1 << 5) -#define CMP1_INT (1 << 4) -#define OVR_FLOW_INT (1 << 3) -#define K_LCK_INT (1 << 2) -#define GPI_INT (1 << 1) -#define KE_INT (1 << 0) - -/* Key Lock and Event Counter Register */ -#define K_LCK_EN (1 << 6) -#define LCK21 0x30 -#define KEC 0xF - /* Key Event Register xy */ #define KEY_EV_PRESSED (1 << 7) #define KEY_EV_MASK (0x7F) @@ -55,10 +32,6 @@ #define KEYP_MAX_EVENT 10 -#define MAXGPIO 18 -#define ADP_BANK(offs) ((offs) >> 3) -#define ADP_BIT(offs) (1u << ((offs) & 0x7)) - /* * Early pre 4.0 Silicon required to delay readout by at least 25ms, * since the Event Counter Register updated 25ms after the interrupt @@ -75,7 +48,7 @@ struct adp5588_kpad { const struct adp5588_gpi_map *gpimap; unsigned short gpimapsize; #ifdef CONFIG_GPIOLIB - unsigned char gpiomap[MAXGPIO]; + unsigned char gpiomap[ADP5588_MAXGPIO]; bool export_gpio; struct gpio_chip gc; struct mutex gpio_lock; /* Protect cached dir, dat_out */ @@ -103,8 +76,8 @@ static int adp5588_write(struct i2c_client *client, u8 reg, u8 val) static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off) { struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); - unsigned int bank = ADP_BANK(kpad->gpiomap[off]); - unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit); } @@ -113,8 +86,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip, unsigned off, int val) { struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); - unsigned int bank = ADP_BANK(kpad->gpiomap[off]); - unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); mutex_lock(&kpad->gpio_lock); @@ -132,8 +105,8 @@ static void adp5588_gpio_set_value(struct gpio_chip *chip, static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off) { struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); - unsigned int bank = ADP_BANK(kpad->gpiomap[off]); - unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); int ret; mutex_lock(&kpad->gpio_lock); @@ -150,8 +123,8 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip, unsigned off, int val) { struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc); - unsigned int bank = ADP_BANK(kpad->gpiomap[off]); - unsigned int bit = ADP_BIT(kpad->gpiomap[off]); + unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]); + unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]); int ret; mutex_lock(&kpad->gpio_lock); @@ -176,7 +149,7 @@ static int adp5588_gpio_direction_output(struct gpio_chip *chip, static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, const struct adp5588_kpad_platform_data *pdata) { - bool pin_used[MAXGPIO]; + bool pin_used[ADP5588_MAXGPIO]; int n_unused = 0; int i; @@ -191,7 +164,7 @@ static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad, for (i = 0; i < kpad->gpimapsize; i++) pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true; - for (i = 0; i < MAXGPIO; i++) + for (i = 0; i < ADP5588_MAXGPIO; i++) if (!pin_used[i]) kpad->gpiomap[n_unused++] = i; @@ -234,7 +207,7 @@ static int __devinit adp5588_gpio_add(struct adp5588_kpad *kpad) return error; } - for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { + for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { kpad->dat_out[i] = adp5588_read(kpad->client, GPIO_DAT_OUT1 + i); kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i); @@ -318,11 +291,11 @@ static void adp5588_work(struct work_struct *work) status = adp5588_read(client, INT_STAT); - if (status & OVR_FLOW_INT) /* Unlikely and should never happen */ + if (status & ADP5588_OVR_FLOW_INT) /* Unlikely and should never happen */ dev_err(&client->dev, "Event Overflow Error\n"); - if (status & KE_INT) { - ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC; + if (status & ADP5588_KE_INT) { + ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC; if (ev_cnt) { adp5588_report_events(kpad, ev_cnt); input_sync(kpad->input); @@ -360,7 +333,7 @@ static int __devinit adp5588_setup(struct i2c_client *client) if (pdata->en_keylock) { ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1); ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2); - ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN); + ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN); } for (i = 0; i < KEYP_MAX_EVENT; i++) @@ -384,7 +357,7 @@ static int __devinit adp5588_setup(struct i2c_client *client) } if (gpio_data) { - for (i = 0; i <= ADP_BANK(MAXGPIO); i++) { + for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) { int pull_mask = gpio_data->pullup_dis_mask; ret |= adp5588_write(client, GPIO_PULL1 + i, @@ -392,11 +365,14 @@ static int __devinit adp5588_setup(struct i2c_client *client) } } - ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT | - OVR_FLOW_INT | K_LCK_INT | - GPI_INT | KE_INT); /* Status is W1C */ + ret |= adp5588_write(client, INT_STAT, + ADP5588_CMP2_INT | ADP5588_CMP1_INT | + ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT | + ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */ - ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN); + ret |= adp5588_write(client, CFG, ADP5588_INT_CFG | + ADP5588_OVR_FLOW_IEN | + ADP5588_KE_IEN); if (ret < 0) { dev_err(&client->dev, "Write Error\n"); diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index d358ef8623f..11478eb2c27 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -63,6 +63,10 @@ static bool atkbd_extra; module_param_named(extra, atkbd_extra, bool, 0); MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards"); +static bool atkbd_terminal; +module_param_named(terminal, atkbd_terminal, bool, 0); +MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2"); + /* * Scancode to keycode tables. These are just the default setting, and * are loadable via a userland utility. @@ -136,7 +140,8 @@ static const unsigned short atkbd_unxlate_table[128] = { #define ATKBD_CMD_ENABLE 0x00f4 #define ATKBD_CMD_RESET_DIS 0x00f5 /* Reset to defaults and disable */ #define ATKBD_CMD_RESET_DEF 0x00f6 /* Reset to defaults */ -#define ATKBD_CMD_SETALL_MBR 0x00fa +#define ATKBD_CMD_SETALL_MB 0x00f8 /* Set all keys to give break codes */ +#define ATKBD_CMD_SETALL_MBR 0x00fa /* ... and repeat */ #define ATKBD_CMD_RESET_BAT 0x02ff #define ATKBD_CMD_RESEND 0x00fe #define ATKBD_CMD_EX_ENABLE 0x10ea @@ -764,6 +769,11 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra } } + if (atkbd_terminal) { + ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB); + return 3; + } + if (target_set != 3) return 2; diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c new file mode 100644 index 00000000000..4c17aff2065 --- /dev/null +++ b/drivers/input/keyboard/gpio_keys_polled.c @@ -0,0 +1,261 @@ +/* + * Driver for buttons on GPIO lines not capable of generating interrupts + * + * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org> + * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com> + * + * This file was based on: /drivers/input/misc/cobalt_btns.c + * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * + * also was based on: /drivers/input/keyboard/gpio_keys.c + * Copyright 2005 Phil Blundell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/input.h> +#include <linux/input-polldev.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/gpio_keys.h> + +#define DRV_NAME "gpio-keys-polled" + +struct gpio_keys_button_data { + int last_state; + int count; + int threshold; + int can_sleep; +}; + +struct gpio_keys_polled_dev { + struct input_polled_dev *poll_dev; + struct device *dev; + struct gpio_keys_platform_data *pdata; + struct gpio_keys_button_data data[0]; +}; + +static void gpio_keys_polled_check_state(struct input_dev *input, + struct gpio_keys_button *button, + struct gpio_keys_button_data *bdata) +{ + int state; + + if (bdata->can_sleep) + state = !!gpio_get_value_cansleep(button->gpio); + else + state = !!gpio_get_value(button->gpio); + + if (state != bdata->last_state) { + unsigned int type = button->type ?: EV_KEY; + + input_event(input, type, button->code, + !!(state ^ button->active_low)); + input_sync(input); + bdata->count = 0; + bdata->last_state = state; + } +} + +static void gpio_keys_polled_poll(struct input_polled_dev *dev) +{ + struct gpio_keys_polled_dev *bdev = dev->private; + struct gpio_keys_platform_data *pdata = bdev->pdata; + struct input_dev *input = dev->input; + int i; + + for (i = 0; i < bdev->pdata->nbuttons; i++) { + struct gpio_keys_button_data *bdata = &bdev->data[i]; + + if (bdata->count < bdata->threshold) + bdata->count++; + else + gpio_keys_polled_check_state(input, &pdata->buttons[i], + bdata); + } +} + +static void gpio_keys_polled_open(struct input_polled_dev *dev) +{ + struct gpio_keys_polled_dev *bdev = dev->private; + struct gpio_keys_platform_data *pdata = bdev->pdata; + + if (pdata->enable) + pdata->enable(bdev->dev); +} + +static void gpio_keys_polled_close(struct input_polled_dev *dev) +{ + struct gpio_keys_polled_dev *bdev = dev->private; + struct gpio_keys_platform_data *pdata = bdev->pdata; + + if (pdata->disable) + pdata->disable(bdev->dev); +} + +static int __devinit gpio_keys_polled_probe(struct platform_device *pdev) +{ + struct gpio_keys_platform_data *pdata = pdev->dev.platform_data; + struct device *dev = &pdev->dev; + struct gpio_keys_polled_dev *bdev; + struct input_polled_dev *poll_dev; + struct input_dev *input; + int error; + int i; + + if (!pdata || !pdata->poll_interval) + return -EINVAL; + + bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) + + pdata->nbuttons * sizeof(struct gpio_keys_button_data), + GFP_KERNEL); + if (!bdev) { + dev_err(dev, "no memory for private data\n"); + return -ENOMEM; + } + + poll_dev = input_allocate_polled_device(); + if (!poll_dev) { + dev_err(dev, "no memory for polled device\n"); + error = -ENOMEM; + goto err_free_bdev; + } + + poll_dev->private = bdev; + poll_dev->poll = gpio_keys_polled_poll; + poll_dev->poll_interval = pdata->poll_interval; + poll_dev->open = gpio_keys_polled_open; + poll_dev->close = gpio_keys_polled_close; + + input = poll_dev->input; + + input->evbit[0] = BIT(EV_KEY); + input->name = pdev->name; + input->phys = DRV_NAME"/input0"; + input->dev.parent = &pdev->dev; + + input->id.bustype = BUS_HOST; + input->id.vendor = 0x0001; + input->id.product = 0x0001; + input->id.version = 0x0100; + + for (i = 0; i < pdata->nbuttons; i++) { + struct gpio_keys_button *button = &pdata->buttons[i]; + struct gpio_keys_button_data *bdata = &bdev->data[i]; + unsigned int gpio = button->gpio; + unsigned int type = button->type ?: EV_KEY; + + if (button->wakeup) { + dev_err(dev, DRV_NAME " does not support wakeup\n"); + error = -EINVAL; + goto err_free_gpio; + } + + error = gpio_request(gpio, + button->desc ? button->desc : DRV_NAME); + if (error) { + dev_err(dev, "unable to claim gpio %u, err=%d\n", + gpio, error); + goto err_free_gpio; + } + + error = gpio_direction_input(gpio); + if (error) { + dev_err(dev, + "unable to set direction on gpio %u, err=%d\n", + gpio, error); + goto err_free_gpio; + } + + bdata->can_sleep = gpio_cansleep(gpio); + bdata->last_state = -1; + bdata->threshold = DIV_ROUND_UP(button->debounce_interval, + pdata->poll_interval); + + input_set_capability(input, type, button->code); + } + + bdev->poll_dev = poll_dev; + bdev->dev = dev; + bdev->pdata = pdata; + platform_set_drvdata(pdev, bdev); + + error = input_register_polled_device(poll_dev); + if (error) { + dev_err(dev, "unable to register polled device, err=%d\n", + error); + goto err_free_gpio; + } + + /* report initial state of the buttons */ + for (i = 0; i < pdata->nbuttons; i++) + gpio_keys_polled_check_state(input, &pdata->buttons[i], + &bdev->data[i]); + + return 0; + +err_free_gpio: + while (--i >= 0) + gpio_free(pdata->buttons[i].gpio); + + input_free_polled_device(poll_dev); + +err_free_bdev: + kfree(bdev); + + platform_set_drvdata(pdev, NULL); + return error; +} + +static int __devexit gpio_keys_polled_remove(struct platform_device *pdev) +{ + struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev); + struct gpio_keys_platform_data *pdata = bdev->pdata; + int i; + + input_unregister_polled_device(bdev->poll_dev); + + for (i = 0; i < pdata->nbuttons; i++) + gpio_free(pdata->buttons[i].gpio); + + input_free_polled_device(bdev->poll_dev); + + kfree(bdev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver gpio_keys_polled_driver = { + .probe = gpio_keys_polled_probe, + .remove = __devexit_p(gpio_keys_polled_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init gpio_keys_polled_init(void) +{ + return platform_driver_register(&gpio_keys_polled_driver); +} + +static void __exit gpio_keys_polled_exit(void) +{ + platform_driver_unregister(&gpio_keys_polled_driver); +} + +module_init(gpio_keys_polled_init); +module_exit(gpio_keys_polled_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>"); +MODULE_DESCRIPTION("Polled GPIO Buttons driver"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c index 4b42ffc0532..d1583aea172 100644 --- a/drivers/input/misc/pcf8574_keypad.c +++ b/drivers/input/misc/pcf8574_keypad.c @@ -127,14 +127,6 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2 idev->id.product = 0x0001; idev->id.version = 0x0100; - input_set_drvdata(idev, lp); - - ret = input_register_device(idev); - if (ret) { - dev_err(&client->dev, "input_register_device() failed\n"); - goto fail_register; - } - lp->laststate = read_state(lp); ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler, @@ -142,16 +134,21 @@ static int __devinit pcf8574_kp_probe(struct i2c_client *client, const struct i2 DRV_NAME, lp); if (ret) { dev_err(&client->dev, "IRQ %d is not free\n", client->irq); - goto fail_irq; + goto fail_free_device; + } + + ret = input_register_device(idev); + if (ret) { + dev_err(&client->dev, "input_register_device() failed\n"); + goto fail_free_irq; } i2c_set_clientdata(client, lp); return 0; - fail_irq: - input_unregister_device(idev); - fail_register: - input_set_drvdata(idev, NULL); + fail_free_irq: + free_irq(client->irq, lp); + fail_free_device: input_free_device(idev); fail_allocate: kfree(lp); diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index a9cf7683163..b77f9991278 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c @@ -630,7 +630,7 @@ static void atp_complete_geyser_3_4(struct urb *urb) /* Just update the base values (i.e. touchpad in untouched state) */ if (dev->data[dev->info->datalen - 1] & ATP_STATUS_BASE_UPDATE) { - dprintk(KERN_DEBUG "appletouch: updated base values\n"); + dprintk("appletouch: updated base values\n"); memcpy(dev->xy_old, dev->xy_cur, sizeof(dev->xy_old)); goto exit; diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 613a3652f98..0aefaa88587 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -51,7 +51,8 @@ #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) -#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100) +#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ +#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) /* synaptics modes query bits */ diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c index 3c287dd879d..4225f5d6b15 100644 --- a/drivers/input/serio/gscps2.c +++ b/drivers/input/serio/gscps2.c @@ -358,7 +358,7 @@ static int __devinit gscps2_probe(struct parisc_device *dev) gscps2_reset(ps2port); ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f; - snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s", + snprintf(serio->name, sizeof(serio->name), "gsc-ps2-%s", (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse"); strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys)); serio->id.type = SERIO_8042; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index ed7ad7416b2..a5475b57708 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -351,6 +351,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* + * Most (all?) VAIOs do not have external PS/2 ports nor + * they implement active multiplexing properly, and + * MUX discovery usually messes up keyboard/touchpad. + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "VAIO"), + }, + }, + { /* Amoi M636/A737 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c index cd82bb12591..b7ba4597f7f 100644 --- a/drivers/input/serio/serio_raw.c +++ b/drivers/input/serio/serio_raw.c @@ -11,7 +11,6 @@ #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/poll.h> #include <linux/module.h> #include <linux/serio.h> diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c index aea9a9399a3..d94f7e9aa99 100644 --- a/drivers/input/tablet/acecad.c +++ b/drivers/input/tablet/acecad.c @@ -229,12 +229,13 @@ static int usb_acecad_probe(struct usb_interface *intf, const struct usb_device_ err = input_register_device(acecad->input); if (err) - goto fail2; + goto fail3; usb_set_intfdata(intf, acecad); return 0; + fail3: usb_free_urb(acecad->irq); fail2: usb_free_coherent(dev, 8, acecad->data, acecad->data_dma); fail1: input_free_device(input_dev); kfree(acecad); diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 57b25b84d1f..0a619c558bf 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1097,7 +1097,7 @@ store_tabletPointerMode(struct device *dev, struct device_attribute *attr, const } static DEVICE_ATTR(pointer_mode, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletPointerMode, store_tabletPointerMode); /*********************************************************************** @@ -1134,7 +1134,7 @@ store_tabletCoordinateMode(struct device *dev, struct device_attribute *attr, co } static DEVICE_ATTR(coordinate_mode, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletCoordinateMode, store_tabletCoordinateMode); /*********************************************************************** @@ -1176,7 +1176,7 @@ store_tabletToolMode(struct device *dev, struct device_attribute *attr, const ch } static DEVICE_ATTR(tool_mode, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletToolMode, store_tabletToolMode); /*********************************************************************** @@ -1219,7 +1219,7 @@ store_tabletXtilt(struct device *dev, struct device_attribute *attr, const char } static DEVICE_ATTR(xtilt, - S_IRUGO | S_IWUGO, show_tabletXtilt, store_tabletXtilt); + S_IRUGO | S_IWUSR, show_tabletXtilt, store_tabletXtilt); /*********************************************************************** * support routines for the 'ytilt' file. Note that this file @@ -1261,7 +1261,7 @@ store_tabletYtilt(struct device *dev, struct device_attribute *attr, const char } static DEVICE_ATTR(ytilt, - S_IRUGO | S_IWUGO, show_tabletYtilt, store_tabletYtilt); + S_IRUGO | S_IWUSR, show_tabletYtilt, store_tabletYtilt); /*********************************************************************** * support routines for the 'jitter' file. Note that this file @@ -1288,7 +1288,7 @@ store_tabletJitterDelay(struct device *dev, struct device_attribute *attr, const } static DEVICE_ATTR(jitter, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletJitterDelay, store_tabletJitterDelay); /*********************************************************************** @@ -1317,7 +1317,7 @@ store_tabletProgrammableDelay(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(delay, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletProgrammableDelay, store_tabletProgrammableDelay); /*********************************************************************** @@ -1406,7 +1406,7 @@ store_tabletStylusUpper(struct device *dev, struct device_attribute *attr, const } static DEVICE_ATTR(stylus_upper, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletStylusUpper, store_tabletStylusUpper); /*********************************************************************** @@ -1437,7 +1437,7 @@ store_tabletStylusLower(struct device *dev, struct device_attribute *attr, const } static DEVICE_ATTR(stylus_lower, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletStylusLower, store_tabletStylusLower); /*********************************************************************** @@ -1475,7 +1475,7 @@ store_tabletMouseLeft(struct device *dev, struct device_attribute *attr, const c } static DEVICE_ATTR(mouse_left, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletMouseLeft, store_tabletMouseLeft); /*********************************************************************** @@ -1505,7 +1505,7 @@ store_tabletMouseMiddle(struct device *dev, struct device_attribute *attr, const } static DEVICE_ATTR(mouse_middle, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletMouseMiddle, store_tabletMouseMiddle); /*********************************************************************** @@ -1535,7 +1535,7 @@ store_tabletMouseRight(struct device *dev, struct device_attribute *attr, const } static DEVICE_ATTR(mouse_right, - S_IRUGO | S_IWUGO, + S_IRUGO | S_IWUSR, show_tabletMouseRight, store_tabletMouseRight); /*********************************************************************** @@ -1567,7 +1567,7 @@ store_tabletWheel(struct device *dev, struct device_attribute *attr, const char } static DEVICE_ATTR(wheel, - S_IRUGO | S_IWUGO, show_tabletWheel, store_tabletWheel); + S_IRUGO | S_IWUSR, show_tabletWheel, store_tabletWheel); /*********************************************************************** * support routines for the 'execute' file. Note that this file @@ -1600,7 +1600,7 @@ store_tabletExecute(struct device *dev, struct device_attribute *attr, const cha } static DEVICE_ATTR(execute, - S_IRUGO | S_IWUGO, show_tabletExecute, store_tabletExecute); + S_IRUGO | S_IWUSR, show_tabletExecute, store_tabletExecute); /*********************************************************************** * support routines for the 'odm_code' file. Note that this file diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index b3252ef1e27..435b0af401e 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c @@ -1436,6 +1436,14 @@ static struct wacom_features wacom_features_0xD2 = { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT }; static struct wacom_features wacom_features_0xD3 = { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; +static const struct wacom_features wacom_features_0xD4 = + { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 255, 63, BAMBOO_PT }; +static struct wacom_features wacom_features_0xD8 = + { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; +static struct wacom_features wacom_features_0xDA = + { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT }; +static struct wacom_features wacom_features_0xDB = + { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; #define USB_DEVICE_WACOM(prod) \ USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ @@ -1504,6 +1512,10 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xD1) }, { USB_DEVICE_WACOM(0xD2) }, { USB_DEVICE_WACOM(0xD3) }, + { USB_DEVICE_WACOM(0xD4) }, + { USB_DEVICE_WACOM(0xD8) }, + { USB_DEVICE_WACOM(0xDA) }, + { USB_DEVICE_WACOM(0xDB) }, { USB_DEVICE_WACOM(0xF0) }, { USB_DEVICE_WACOM(0xCC) }, { USB_DEVICE_WACOM(0x90) }, diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c index ba6f0bd1e76..bc3b5187f3a 100644 --- a/drivers/input/touchscreen/ad7879.c +++ b/drivers/input/touchscreen/ad7879.c @@ -129,6 +129,9 @@ struct ad7879 { u16 cmd_crtl1; u16 cmd_crtl2; u16 cmd_crtl3; + int x; + int y; + int Rt; }; static int ad7879_read(struct ad7879 *ts, u8 reg) @@ -175,13 +178,32 @@ static int ad7879_report(struct ad7879 *ts) Rt /= z1; Rt = (Rt + 2047) >> 12; - if (!timer_pending(&ts->timer)) + /* + * Sample found inconsistent, pressure is beyond + * the maximum. Don't report it to user space. + */ + if (Rt > ts->pressure_max) + return -EINVAL; + + /* + * Note that we delay reporting events by one sample. + * This is done to avoid reporting last sample of the + * touch sequence, which may be incomplete if finger + * leaves the surface before last reading is taken. + */ + if (timer_pending(&ts->timer)) { + /* Touch continues */ input_report_key(input_dev, BTN_TOUCH, 1); + input_report_abs(input_dev, ABS_X, ts->x); + input_report_abs(input_dev, ABS_Y, ts->y); + input_report_abs(input_dev, ABS_PRESSURE, ts->Rt); + input_sync(input_dev); + } + + ts->x = x; + ts->y = y; + ts->Rt = Rt; - input_report_abs(input_dev, ABS_X, x); - input_report_abs(input_dev, ABS_Y, y); - input_report_abs(input_dev, ABS_PRESSURE, Rt); - input_sync(input_dev); return 0; } diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c index ccde5860256..2ca9e5d6646 100644 --- a/drivers/input/touchscreen/bu21013_ts.c +++ b/drivers/input/touchscreen/bu21013_ts.c @@ -514,7 +514,7 @@ err_free_irq: err_cs_disable: pdata->cs_dis(pdata->cs_pin); err_free_mem: - input_free_device(bu21013_data->in_dev); + input_free_device(in_dev); kfree(bu21013_data); return error; diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index f45f80f6d33..73fd6642b68 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -178,6 +178,7 @@ static const struct usb_device_id usbtouch_devices[] = { #ifdef CONFIG_TOUCHSCREEN_USB_ITM {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM}, + {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM}, #endif #ifdef CONFIG_TOUCHSCREEN_USB_ETURBO diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c index 40b914bded8..2e72227bd07 100644 --- a/drivers/isdn/hisax/isar.c +++ b/drivers/isdn/hisax/isar.c @@ -1427,8 +1427,8 @@ modeisar(struct BCState *bcs, int mode, int bc) &bcs->hw.isar.reg->Flags)) bcs->hw.isar.dpath = 1; else { - printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n"); - debugl1(cs, "isar modeisar analog funktions only with DP1"); + printk(KERN_WARNING"isar modeisar analog functions only with DP1\n"); + debugl1(cs, "isar modeisar analog functions only with DP1"); return(1); } break; diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c index 2e847a90bad..f2b5bab5e6a 100644 --- a/drivers/isdn/icn/icn.c +++ b/drivers/isdn/icn/icn.c @@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup); static int __init icn_init(void) { char *p; - char rev[10]; + char rev[20]; memset(&dev, 0, sizeof(icn_dev)); dev.memaddr = (membase & 0x0ffc000); @@ -1637,9 +1637,10 @@ static int __init icn_init(void) spin_lock_init(&dev.devlock); if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); + strncpy(rev, p + 1, 20); p = strchr(rev, '$'); - *p = 0; + if (p) + *p = 0; } else strcpy(rev, " ??? "); printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev, diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index cc2a88d5192..6f190f4cdbc 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -7,20 +7,20 @@ menuconfig NEW_LEDS This is not related to standard keyboard LEDs which are controlled via the input system. -if NEW_LEDS - config LEDS_CLASS - tristate "LED Class Support" + bool "LED Class Support" + depends on NEW_LEDS help This option enables the led sysfs class in /sys/class/leds. You'll need this to do anything useful with LEDs. If unsure, say N. -if LEDS_CLASS +if NEW_LEDS comment "LED drivers" config LEDS_88PM860X tristate "LED Support for Marvell 88PM860x PMIC" + depends on LEDS_CLASS depends on MFD_88PM860X help This option enables support for on-chip LED drivers found on Marvell @@ -28,6 +28,7 @@ config LEDS_88PM860X config LEDS_ATMEL_PWM tristate "LED Support using Atmel PWM outputs" + depends on LEDS_CLASS depends on ATMEL_PWM help This option enables support for LEDs driven using outputs @@ -35,6 +36,7 @@ config LEDS_ATMEL_PWM config LEDS_LOCOMO tristate "LED Support for Locomo device" + depends on LEDS_CLASS depends on SHARP_LOCOMO help This option enables support for the LEDs on Sharp Locomo. @@ -42,6 +44,7 @@ config LEDS_LOCOMO config LEDS_MIKROTIK_RB532 tristate "LED Support for Mikrotik Routerboard 532" + depends on LEDS_CLASS depends on MIKROTIK_RB532 help This option enables support for the so called "User LED" of @@ -49,6 +52,7 @@ config LEDS_MIKROTIK_RB532 config LEDS_S3C24XX tristate "LED Support for Samsung S3C24XX GPIO LEDs" + depends on LEDS_CLASS depends on ARCH_S3C2410 help This option enables support for LEDs connected to GPIO lines @@ -56,12 +60,14 @@ config LEDS_S3C24XX config LEDS_AMS_DELTA tristate "LED Support for the Amstrad Delta (E3)" + depends on LEDS_CLASS depends on MACH_AMS_DELTA help This option enables support for the LEDs on Amstrad Delta (E3). config LEDS_NET48XX tristate "LED Support for Soekris net48xx series Error LED" + depends on LEDS_CLASS depends on SCx200_GPIO help This option enables support for the Soekris net4801 and net4826 error @@ -79,18 +85,21 @@ config LEDS_NET5501 config LEDS_FSG tristate "LED Support for the Freecom FSG-3" + depends on LEDS_CLASS depends on MACH_FSG help This option enables support for the LEDs on the Freecom FSG-3. config LEDS_WRAP tristate "LED Support for the WRAP series LEDs" + depends on LEDS_CLASS depends on SCx200_GPIO help This option enables support for the PCEngines WRAP programmable LEDs. config LEDS_ALIX2 tristate "LED Support for ALIX.2 and ALIX.3 series" + depends on LEDS_CLASS depends on X86 && !GPIO_CS5535 && !CS5535_GPIO help This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. @@ -98,12 +107,14 @@ config LEDS_ALIX2 config LEDS_H1940 tristate "LED Support for iPAQ H1940 device" + depends on LEDS_CLASS depends on ARCH_H1940 help This option enables support for the LEDs on the h1940. config LEDS_COBALT_QUBE tristate "LED Support for the Cobalt Qube series front LED" + depends on LEDS_CLASS depends on MIPS_COBALT help This option enables support for the front LED on Cobalt Qube series @@ -117,6 +128,7 @@ config LEDS_COBALT_RAQ config LEDS_SUNFIRE tristate "LED support for SunFire servers." + depends on LEDS_CLASS depends on SPARC64 select LEDS_TRIGGERS help @@ -125,6 +137,7 @@ config LEDS_SUNFIRE config LEDS_HP6XX tristate "LED Support for the HP Jornada 6xx" + depends on LEDS_CLASS depends on SH_HP6XX help This option enables LED support for the handheld @@ -132,6 +145,7 @@ config LEDS_HP6XX config LEDS_PCA9532 tristate "LED driver for PCA9532 dimmer" + depends on LEDS_CLASS depends on I2C && INPUT && EXPERIMENTAL help This option enables support for NXP pca9532 @@ -140,6 +154,7 @@ config LEDS_PCA9532 config LEDS_GPIO tristate "LED Support for GPIO connected LEDs" + depends on LEDS_CLASS depends on GENERIC_GPIO help This option enables support for the LEDs connected to GPIO @@ -167,6 +182,7 @@ config LEDS_GPIO_OF config LEDS_LP3944 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" + depends on LEDS_CLASS depends on I2C help This option enables support for LEDs connected to the National @@ -176,8 +192,27 @@ config LEDS_LP3944 To compile this driver as a module, choose M here: the module will be called leds-lp3944. +config LEDS_LP5521 + tristate "LED Support for N.S. LP5521 LED driver chip" + depends on LEDS_CLASS && I2C + help + If you say yes here you get support for the National Semiconductor + LP5521 LED driver. It is 3 channel chip with programmable engines. + Driver provides direct control via LED class and interface for + programming the engines. + +config LEDS_LP5523 + tristate "LED Support for N.S. LP5523 LED driver chip" + depends on LEDS_CLASS && I2C + help + If you say yes here you get support for the National Semiconductor + LP5523 LED driver. It is 9 channel chip with programmable engines. + Driver provides direct control via LED class and interface for + programming the engines. + config LEDS_CLEVO_MAIL tristate "Mail LED on Clevo notebook" + depends on LEDS_CLASS depends on X86 && SERIO_I8042 && DMI help This driver makes the mail LED accessible from userspace @@ -208,6 +243,7 @@ config LEDS_CLEVO_MAIL config LEDS_PCA955X tristate "LED Support for PCA955x I2C chips" + depends on LEDS_CLASS depends on I2C help This option enables support for LEDs connected to PCA955x @@ -216,6 +252,7 @@ config LEDS_PCA955X config LEDS_WM831X_STATUS tristate "LED support for status LEDs on WM831x PMICs" + depends on LEDS_CLASS depends on MFD_WM831X help This option enables support for the status LEDs of the WM831x @@ -223,6 +260,7 @@ config LEDS_WM831X_STATUS config LEDS_WM8350 tristate "LED Support for WM8350 AudioPlus PMIC" + depends on LEDS_CLASS depends on MFD_WM8350 help This option enables support for LEDs driven by the Wolfson @@ -230,6 +268,7 @@ config LEDS_WM8350 config LEDS_DA903X tristate "LED Support for DA9030/DA9034 PMIC" + depends on LEDS_CLASS depends on PMIC_DA903X help This option enables support for on-chip LED drivers found @@ -237,6 +276,7 @@ config LEDS_DA903X config LEDS_DAC124S085 tristate "LED Support for DAC124S085 SPI DAC" + depends on LEDS_CLASS depends on SPI help This option enables support for DAC124S085 SPI DAC from NatSemi, @@ -244,18 +284,21 @@ config LEDS_DAC124S085 config LEDS_PWM tristate "PWM driven LED Support" + depends on LEDS_CLASS depends on HAVE_PWM help This option enables support for pwm driven LEDs config LEDS_REGULATOR tristate "REGULATOR driven LED support" + depends on LEDS_CLASS depends on REGULATOR help This option enables support for regulator driven LEDs. config LEDS_BD2802 tristate "LED driver for BD2802 RGB LED" + depends on LEDS_CLASS depends on I2C help This option enables support for BD2802GU RGB LED driver chips @@ -263,6 +306,7 @@ config LEDS_BD2802 config LEDS_INTEL_SS4200 tristate "LED driver for Intel NAS SS4200 series" + depends on LEDS_CLASS depends on PCI && DMI help This option enables support for the Intel SS4200 series of @@ -272,6 +316,7 @@ config LEDS_INTEL_SS4200 config LEDS_LT3593 tristate "LED driver for LT3593 controllers" + depends on LEDS_CLASS depends on GENERIC_GPIO help This option enables support for LEDs driven by a Linear Technology @@ -280,6 +325,7 @@ config LEDS_LT3593 config LEDS_ADP5520 tristate "LED Support for ADP5520/ADP5501 PMIC" + depends on LEDS_CLASS depends on PMIC_ADP5520 help This option enables support for on-chip LED drivers found @@ -290,6 +336,7 @@ config LEDS_ADP5520 config LEDS_DELL_NETBOOKS tristate "External LED on Dell Business Netbooks" + depends on LEDS_CLASS depends on X86 && ACPI_WMI help This adds support for the Latitude 2100 and similar @@ -297,6 +344,7 @@ config LEDS_DELL_NETBOOKS config LEDS_MC13783 tristate "LED Support for MC13783 PMIC" + depends on LEDS_CLASS depends on MFD_MC13783 help This option enable support for on-chip LED drivers found @@ -304,6 +352,7 @@ config LEDS_MC13783 config LEDS_NS2 tristate "LED support for Network Space v2 GPIO LEDs" + depends on LEDS_CLASS depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2 default y help @@ -322,17 +371,17 @@ config LEDS_NETXBIG config LEDS_TRIGGERS bool "LED Trigger support" + depends on LEDS_CLASS help This option enables trigger support for the leds class. These triggers allow kernel events to drive the LEDs and can be configured via sysfs. If unsure, say Y. -if LEDS_TRIGGERS - comment "LED Triggers" config LEDS_TRIGGER_TIMER tristate "LED Timer Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be controlled by a programmable timer via sysfs. Some LED hardware can be programmed to start @@ -344,12 +393,14 @@ config LEDS_TRIGGER_TIMER config LEDS_TRIGGER_IDE_DISK bool "LED IDE Disk Trigger" depends on IDE_GD_ATA + depends on LEDS_TRIGGERS help This allows LEDs to be controlled by IDE disk activity. If unsure, say Y. config LEDS_TRIGGER_HEARTBEAT tristate "LED Heartbeat Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be controlled by a CPU load average. The flash frequency is a hyperbolic function of the 1-minute @@ -358,6 +409,7 @@ config LEDS_TRIGGER_HEARTBEAT config LEDS_TRIGGER_BACKLIGHT tristate "LED backlight Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be controlled as a backlight device: they turn off and on when the display is blanked and unblanked. @@ -366,6 +418,7 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_GPIO tristate "LED GPIO Trigger" + depends on LEDS_TRIGGERS depends on GPIOLIB help This allows LEDs to be controlled by gpio events. It's good @@ -378,6 +431,7 @@ config LEDS_TRIGGER_GPIO config LEDS_TRIGGER_DEFAULT_ON tristate "LED Default ON Trigger" + depends on LEDS_TRIGGERS help This allows LEDs to be initialised in the ON state. If unsure, say Y. @@ -385,8 +439,4 @@ config LEDS_TRIGGER_DEFAULT_ON comment "iptables trigger is under Netfilter config (LED target)" depends on LEDS_TRIGGERS -endif # LEDS_TRIGGERS - -endif # LEDS_CLASS - endif # NEW_LEDS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 9c96db40ef6..aae6989ff6b 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -23,6 +23,8 @@ obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o +obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o +obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o obj-$(CONFIG_LEDS_FSG) += leds-fsg.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 26066007650..211e21f34bd 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -81,6 +81,79 @@ static struct device_attribute led_class_attrs[] = { __ATTR_NULL, }; +static void led_timer_function(unsigned long data) +{ + struct led_classdev *led_cdev = (void *)data; + unsigned long brightness; + unsigned long delay; + + if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) { + led_set_brightness(led_cdev, LED_OFF); + return; + } + + brightness = led_get_brightness(led_cdev); + if (!brightness) { + /* Time to switch the LED on. */ + brightness = led_cdev->blink_brightness; + delay = led_cdev->blink_delay_on; + } else { + /* Store the current brightness value to be able + * to restore it when the delay_off period is over. + */ + led_cdev->blink_brightness = brightness; + brightness = LED_OFF; + delay = led_cdev->blink_delay_off; + } + + led_set_brightness(led_cdev, brightness); + + mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); +} + +static void led_stop_software_blink(struct led_classdev *led_cdev) +{ + /* deactivate previous settings */ + del_timer_sync(&led_cdev->blink_timer); + led_cdev->blink_delay_on = 0; + led_cdev->blink_delay_off = 0; +} + +static void led_set_software_blink(struct led_classdev *led_cdev, + unsigned long delay_on, + unsigned long delay_off) +{ + int current_brightness; + + current_brightness = led_get_brightness(led_cdev); + if (current_brightness) + led_cdev->blink_brightness = current_brightness; + if (!led_cdev->blink_brightness) + led_cdev->blink_brightness = led_cdev->max_brightness; + + if (delay_on == led_cdev->blink_delay_on && + delay_off == led_cdev->blink_delay_off) + return; + + led_stop_software_blink(led_cdev); + + led_cdev->blink_delay_on = delay_on; + led_cdev->blink_delay_off = delay_off; + + /* never on - don't blink */ + if (!delay_on) + return; + + /* never off - just set to brightness */ + if (!delay_off) { + led_set_brightness(led_cdev, led_cdev->blink_brightness); + return; + } + + mod_timer(&led_cdev->blink_timer, jiffies + 1); +} + + /** * led_classdev_suspend - suspend an led_classdev. * @led_cdev: the led_classdev to suspend. @@ -148,6 +221,10 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) led_update_brightness(led_cdev); + init_timer(&led_cdev->blink_timer); + led_cdev->blink_timer.function = led_timer_function; + led_cdev->blink_timer.data = (unsigned long)led_cdev; + #ifdef CONFIG_LEDS_TRIGGERS led_trigger_set_default(led_cdev); #endif @@ -157,7 +234,6 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) return 0; } - EXPORT_SYMBOL_GPL(led_classdev_register); /** @@ -175,6 +251,9 @@ void led_classdev_unregister(struct led_classdev *led_cdev) up_write(&led_cdev->trigger_lock); #endif + /* Stop blinking */ + led_brightness_set(led_cdev, LED_OFF); + device_unregister(led_cdev->dev); down_write(&leds_list_lock); @@ -183,6 +262,30 @@ void led_classdev_unregister(struct led_classdev *led_cdev) } EXPORT_SYMBOL_GPL(led_classdev_unregister); +void led_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + if (led_cdev->blink_set && + led_cdev->blink_set(led_cdev, delay_on, delay_off)) + return; + + /* blink with 1 Hz as default if nothing specified */ + if (!*delay_on && !*delay_off) + *delay_on = *delay_off = 500; + + led_set_software_blink(led_cdev, *delay_on, *delay_off); +} +EXPORT_SYMBOL(led_blink_set); + +void led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + led_stop_software_blink(led_cdev); + led_cdev->brightness_set(led_cdev, brightness); +} +EXPORT_SYMBOL(led_brightness_set); + static int __init leds_init(void) { leds_class = class_create(THIS_MODULE, "leds"); diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index f1c00db88b5..c41eb6180c9 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -113,7 +113,7 @@ void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger) if (led_cdev->trigger->deactivate) led_cdev->trigger->deactivate(led_cdev); led_cdev->trigger = NULL; - led_set_brightness(led_cdev, LED_OFF); + led_brightness_set(led_cdev, LED_OFF); } if (trigger) { write_lock_irqsave(&trigger->leddev_list_lock, flags); diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index ea57e05d08f..4d9fa38d9ff 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -316,7 +316,7 @@ static struct of_platform_driver of_gpio_leds_driver = { static int __init gpio_led_init(void) { - int ret; + int ret = 0; #ifdef CONFIG_LEDS_GPIO_PLATFORM ret = platform_driver_register(&gpio_led_driver); diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c new file mode 100644 index 00000000000..33facd0c45d --- /dev/null +++ b/drivers/leds/leds-lp5521.c @@ -0,0 +1,837 @@ +/* + * LP5521 LED chip driver. + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/i2c.h> +#include <linux/mutex.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/ctype.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/leds.h> +#include <linux/leds-lp5521.h> +#include <linux/workqueue.h> +#include <linux/slab.h> + +#define LP5521_PROGRAM_LENGTH 32 /* in bytes */ + +#define LP5521_MAX_LEDS 3 /* Maximum number of LEDs */ +#define LP5521_MAX_ENGINES 3 /* Maximum number of engines */ + +#define LP5521_ENG_MASK_BASE 0x30 /* 00110000 */ +#define LP5521_ENG_STATUS_MASK 0x07 /* 00000111 */ + +#define LP5521_CMD_LOAD 0x15 /* 00010101 */ +#define LP5521_CMD_RUN 0x2a /* 00101010 */ +#define LP5521_CMD_DIRECT 0x3f /* 00111111 */ +#define LP5521_CMD_DISABLED 0x00 /* 00000000 */ + +/* Registers */ +#define LP5521_REG_ENABLE 0x00 +#define LP5521_REG_OP_MODE 0x01 +#define LP5521_REG_R_PWM 0x02 +#define LP5521_REG_G_PWM 0x03 +#define LP5521_REG_B_PWM 0x04 +#define LP5521_REG_R_CURRENT 0x05 +#define LP5521_REG_G_CURRENT 0x06 +#define LP5521_REG_B_CURRENT 0x07 +#define LP5521_REG_CONFIG 0x08 +#define LP5521_REG_R_CHANNEL_PC 0x09 +#define LP5521_REG_G_CHANNEL_PC 0x0A +#define LP5521_REG_B_CHANNEL_PC 0x0B +#define LP5521_REG_STATUS 0x0C +#define LP5521_REG_RESET 0x0D +#define LP5521_REG_GPO 0x0E +#define LP5521_REG_R_PROG_MEM 0x10 +#define LP5521_REG_G_PROG_MEM 0x30 +#define LP5521_REG_B_PROG_MEM 0x50 + +#define LP5521_PROG_MEM_BASE LP5521_REG_R_PROG_MEM +#define LP5521_PROG_MEM_SIZE 0x20 + +/* Base register to set LED current */ +#define LP5521_REG_LED_CURRENT_BASE LP5521_REG_R_CURRENT + +/* Base register to set the brightness */ +#define LP5521_REG_LED_PWM_BASE LP5521_REG_R_PWM + +/* Bits in ENABLE register */ +#define LP5521_MASTER_ENABLE 0x40 /* Chip master enable */ +#define LP5521_LOGARITHMIC_PWM 0x80 /* Logarithmic PWM adjustment */ +#define LP5521_EXEC_RUN 0x2A + +/* Bits in CONFIG register */ +#define LP5521_PWM_HF 0x40 /* PWM: 0 = 256Hz, 1 = 558Hz */ +#define LP5521_PWRSAVE_EN 0x20 /* 1 = Power save mode */ +#define LP5521_CP_MODE_OFF 0 /* Charge pump (CP) off */ +#define LP5521_CP_MODE_BYPASS 8 /* CP forced to bypass mode */ +#define LP5521_CP_MODE_1X5 0x10 /* CP forced to 1.5x mode */ +#define LP5521_CP_MODE_AUTO 0x18 /* Automatic mode selection */ +#define LP5521_R_TO_BATT 4 /* R out: 0 = CP, 1 = Vbat */ +#define LP5521_CLK_SRC_EXT 0 /* Ext-clk source (CLK_32K) */ +#define LP5521_CLK_INT 1 /* Internal clock */ +#define LP5521_CLK_AUTO 2 /* Automatic clock selection */ + +/* Status */ +#define LP5521_EXT_CLK_USED 0x08 + +struct lp5521_engine { + const struct attribute_group *attributes; + int id; + u8 mode; + u8 prog_page; + u8 engine_mask; +}; + +struct lp5521_led { + int id; + u8 chan_nr; + u8 led_current; + u8 max_current; + struct led_classdev cdev; + struct work_struct brightness_work; + u8 brightness; +}; + +struct lp5521_chip { + struct lp5521_platform_data *pdata; + struct mutex lock; /* Serialize control */ + struct i2c_client *client; + struct lp5521_engine engines[LP5521_MAX_ENGINES]; + struct lp5521_led leds[LP5521_MAX_LEDS]; + u8 num_channels; + u8 num_leds; +}; + +static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev) +{ + return container_of(cdev, struct lp5521_led, cdev); +} + +static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine) +{ + return container_of(engine, struct lp5521_chip, + engines[engine->id - 1]); +} + +static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led) +{ + return container_of(led, struct lp5521_chip, + leds[led->id]); +} + +static void lp5521_led_brightness_work(struct work_struct *work); + +static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + +static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf) +{ + s32 ret; + + ret = i2c_smbus_read_byte_data(client, reg); + if (ret < 0) + return -EIO; + + *buf = ret; + return 0; +} + +static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode) +{ + struct lp5521_chip *chip = engine_to_lp5521(engine); + struct i2c_client *client = chip->client; + int ret; + u8 engine_state; + + /* Only transition between RUN and DIRECT mode are handled here */ + if (mode == LP5521_CMD_LOAD) + return 0; + + if (mode == LP5521_CMD_DISABLED) + mode = LP5521_CMD_DIRECT; + + ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state); + + /* set mode only for this engine */ + engine_state &= ~(engine->engine_mask); + mode &= engine->engine_mask; + engine_state |= mode; + ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state); + + return ret; +} + +static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern) +{ + struct lp5521_chip *chip = engine_to_lp5521(eng); + struct i2c_client *client = chip->client; + int ret; + int addr; + u8 mode; + + /* move current engine to direct mode and remember the state */ + ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); + /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ + usleep_range(1000, 2000); + ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); + + /* For loading, all the engines to load mode */ + lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); + /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ + usleep_range(1000, 2000); + lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); + /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ + usleep_range(1000, 2000); + + addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; + i2c_smbus_write_i2c_block_data(client, + addr, + LP5521_PROG_MEM_SIZE, + pattern); + + ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode); + return ret; +} + +static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) +{ + return lp5521_write(chip->client, + LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr, + curr); +} + +static void lp5521_init_engine(struct lp5521_chip *chip, + const struct attribute_group *attr_group) +{ + int i; + for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { + chip->engines[i].id = i + 1; + chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2); + chip->engines[i].prog_page = i; + chip->engines[i].attributes = &attr_group[i]; + } +} + +static int lp5521_configure(struct i2c_client *client, + const struct attribute_group *attr_group) +{ + struct lp5521_chip *chip = i2c_get_clientdata(client); + int ret; + + lp5521_init_engine(chip, attr_group); + + /* Set all PWMs to direct control mode */ + ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); + + /* Enable auto-powersave, set charge pump to auto, red to battery */ + ret |= lp5521_write(client, LP5521_REG_CONFIG, + LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT); + + /* Initialize all channels PWM to zero -> leds off */ + ret |= lp5521_write(client, LP5521_REG_R_PWM, 0); + ret |= lp5521_write(client, LP5521_REG_G_PWM, 0); + ret |= lp5521_write(client, LP5521_REG_B_PWM, 0); + + /* Set engines are set to run state when OP_MODE enables engines */ + ret |= lp5521_write(client, LP5521_REG_ENABLE, + LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | + LP5521_EXEC_RUN); + /* enable takes 500us. 1 - 2 ms leaves some margin */ + usleep_range(1000, 2000); + + return ret; +} + +static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf) +{ + int ret; + u8 status; + + ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status); + if (ret < 0) + return ret; + + /* Check that ext clock is really in use if requested */ + if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT) + if ((status & LP5521_EXT_CLK_USED) == 0) + return -EIO; + return 0; +} + +static void lp5521_set_brightness(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct lp5521_led *led = cdev_to_led(cdev); + led->brightness = (u8)brightness; + schedule_work(&led->brightness_work); +} + +static void lp5521_led_brightness_work(struct work_struct *work) +{ + struct lp5521_led *led = container_of(work, + struct lp5521_led, + brightness_work); + struct lp5521_chip *chip = led_to_lp5521(led); + struct i2c_client *client = chip->client; + + mutex_lock(&chip->lock); + lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr, + led->brightness); + mutex_unlock(&chip->lock); +} + +/* Detect the chip by setting its ENABLE register and reading it back. */ +static int lp5521_detect(struct i2c_client *client) +{ + int ret; + u8 buf; + + ret = lp5521_write(client, LP5521_REG_ENABLE, + LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); + if (ret) + return ret; + /* enable takes 500us. 1 - 2 ms leaves some margin */ + usleep_range(1000, 2000); + ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); + if (ret) + return ret; + if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM)) + return -ENODEV; + + return 0; +} + +/* Set engine mode and create appropriate sysfs attributes, if required. */ +static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode) +{ + struct lp5521_chip *chip = engine_to_lp5521(engine); + struct i2c_client *client = chip->client; + struct device *dev = &client->dev; + int ret = 0; + + /* if in that mode already do nothing, except for run */ + if (mode == engine->mode && mode != LP5521_CMD_RUN) + return 0; + + if (mode == LP5521_CMD_RUN) { + ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN); + } else if (mode == LP5521_CMD_LOAD) { + lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); + lp5521_set_engine_mode(engine, LP5521_CMD_LOAD); + + ret = sysfs_create_group(&dev->kobj, engine->attributes); + if (ret) + return ret; + } else if (mode == LP5521_CMD_DISABLED) { + lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED); + } + + /* remove load attribute from sysfs if not in load mode */ + if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD) + sysfs_remove_group(&dev->kobj, engine->attributes); + + engine->mode = mode; + + return ret; +} + +static int lp5521_do_store_load(struct lp5521_engine *engine, + const char *buf, size_t len) +{ + struct lp5521_chip *chip = engine_to_lp5521(engine); + struct i2c_client *client = chip->client; + int ret, nrchars, offset = 0, i = 0; + char c[3]; + unsigned cmd; + u8 pattern[LP5521_PROGRAM_LENGTH] = {0}; + + while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) { + /* separate sscanfs because length is working only for %s */ + ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); + ret = sscanf(c, "%2x", &cmd); + if (ret != 1) + goto fail; + pattern[i] = (u8)cmd; + + offset += nrchars; + i++; + } + + /* Each instruction is 16bit long. Check that length is even */ + if (i % 2) + goto fail; + + mutex_lock(&chip->lock); + ret = lp5521_load_program(engine, pattern); + mutex_unlock(&chip->lock); + + if (ret) { + dev_err(&client->dev, "failed loading pattern\n"); + return ret; + } + + return len; +fail: + dev_err(&client->dev, "wrong pattern format\n"); + return -EINVAL; +} + +static ssize_t store_engine_load(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5521_chip *chip = i2c_get_clientdata(client); + return lp5521_do_store_load(&chip->engines[nr - 1], buf, len); +} + +#define store_load(nr) \ +static ssize_t store_engine##nr##_load(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return store_engine_load(dev, attr, buf, len, nr); \ +} +store_load(1) +store_load(2) +store_load(3) + +static ssize_t show_engine_mode(struct device *dev, + struct device_attribute *attr, + char *buf, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5521_chip *chip = i2c_get_clientdata(client); + switch (chip->engines[nr - 1].mode) { + case LP5521_CMD_RUN: + return sprintf(buf, "run\n"); + case LP5521_CMD_LOAD: + return sprintf(buf, "load\n"); + case LP5521_CMD_DISABLED: + return sprintf(buf, "disabled\n"); + default: + return sprintf(buf, "disabled\n"); + } +} + +#define show_mode(nr) \ +static ssize_t show_engine##nr##_mode(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return show_engine_mode(dev, attr, buf, nr); \ +} +show_mode(1) +show_mode(2) +show_mode(3) + +static ssize_t store_engine_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5521_chip *chip = i2c_get_clientdata(client); + struct lp5521_engine *engine = &chip->engines[nr - 1]; + mutex_lock(&chip->lock); + + if (!strncmp(buf, "run", 3)) + lp5521_set_mode(engine, LP5521_CMD_RUN); + else if (!strncmp(buf, "load", 4)) + lp5521_set_mode(engine, LP5521_CMD_LOAD); + else if (!strncmp(buf, "disabled", 8)) + lp5521_set_mode(engine, LP5521_CMD_DISABLED); + + mutex_unlock(&chip->lock); + return len; +} + +#define store_mode(nr) \ +static ssize_t store_engine##nr##_mode(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return store_engine_mode(dev, attr, buf, len, nr); \ +} +store_mode(1) +store_mode(2) +store_mode(3) + +static ssize_t show_max_current(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct lp5521_led *led = cdev_to_led(led_cdev); + + return sprintf(buf, "%d\n", led->max_current); +} + +static ssize_t show_current(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct lp5521_led *led = cdev_to_led(led_cdev); + + return sprintf(buf, "%d\n", led->led_current); +} + +static ssize_t store_current(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct lp5521_led *led = cdev_to_led(led_cdev); + struct lp5521_chip *chip = led_to_lp5521(led); + ssize_t ret; + unsigned long curr; + + if (strict_strtoul(buf, 0, &curr)) + return -EINVAL; + + if (curr > led->max_current) + return -EINVAL; + + mutex_lock(&chip->lock); + ret = lp5521_set_led_current(chip, led->id, curr); + mutex_unlock(&chip->lock); + + if (ret < 0) + return ret; + + led->led_current = (u8)curr; + + return len; +} + +static ssize_t lp5521_selftest(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5521_chip *chip = i2c_get_clientdata(client); + int ret; + + mutex_lock(&chip->lock); + ret = lp5521_run_selftest(chip, buf); + mutex_unlock(&chip->lock); + return sprintf(buf, "%s\n", ret ? "FAIL" : "OK"); +} + +/* led class device attributes */ +static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); +static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); + +static struct attribute *lp5521_led_attributes[] = { + &dev_attr_led_current.attr, + &dev_attr_max_current.attr, + NULL, +}; + +static struct attribute_group lp5521_led_attribute_group = { + .attrs = lp5521_led_attributes +}; + +/* device attributes */ +static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, + show_engine1_mode, store_engine1_mode); +static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, + show_engine2_mode, store_engine2_mode); +static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, + show_engine3_mode, store_engine3_mode); +static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); +static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); +static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); +static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL); + +static struct attribute *lp5521_attributes[] = { + &dev_attr_engine1_mode.attr, + &dev_attr_engine2_mode.attr, + &dev_attr_engine3_mode.attr, + &dev_attr_selftest.attr, + NULL +}; + +static struct attribute *lp5521_engine1_attributes[] = { + &dev_attr_engine1_load.attr, + NULL +}; + +static struct attribute *lp5521_engine2_attributes[] = { + &dev_attr_engine2_load.attr, + NULL +}; + +static struct attribute *lp5521_engine3_attributes[] = { + &dev_attr_engine3_load.attr, + NULL +}; + +static const struct attribute_group lp5521_group = { + .attrs = lp5521_attributes, +}; + +static const struct attribute_group lp5521_engine_group[] = { + {.attrs = lp5521_engine1_attributes }, + {.attrs = lp5521_engine2_attributes }, + {.attrs = lp5521_engine3_attributes }, +}; + +static int lp5521_register_sysfs(struct i2c_client *client) +{ + struct device *dev = &client->dev; + return sysfs_create_group(&dev->kobj, &lp5521_group); +} + +static void lp5521_unregister_sysfs(struct i2c_client *client) +{ + struct lp5521_chip *chip = i2c_get_clientdata(client); + struct device *dev = &client->dev; + int i; + + sysfs_remove_group(&dev->kobj, &lp5521_group); + + for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { + if (chip->engines[i].mode == LP5521_CMD_LOAD) + sysfs_remove_group(&dev->kobj, + chip->engines[i].attributes); + } + + for (i = 0; i < chip->num_leds; i++) + sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, + &lp5521_led_attribute_group); +} + +static int __init lp5521_init_led(struct lp5521_led *led, + struct i2c_client *client, + int chan, struct lp5521_platform_data *pdata) +{ + struct device *dev = &client->dev; + char name[32]; + int res; + + if (chan >= LP5521_MAX_LEDS) + return -EINVAL; + + if (pdata->led_config[chan].led_current == 0) + return 0; + + led->led_current = pdata->led_config[chan].led_current; + led->max_current = pdata->led_config[chan].max_current; + led->chan_nr = pdata->led_config[chan].chan_nr; + + if (led->chan_nr >= LP5521_MAX_LEDS) { + dev_err(dev, "Use channel numbers between 0 and %d\n", + LP5521_MAX_LEDS - 1); + return -EINVAL; + } + + snprintf(name, sizeof(name), "%s:channel%d", client->name, chan); + led->cdev.brightness_set = lp5521_set_brightness; + led->cdev.name = name; + res = led_classdev_register(dev, &led->cdev); + if (res < 0) { + dev_err(dev, "couldn't register led on channel %d\n", chan); + return res; + } + + res = sysfs_create_group(&led->cdev.dev->kobj, + &lp5521_led_attribute_group); + if (res < 0) { + dev_err(dev, "couldn't register current attribute\n"); + led_classdev_unregister(&led->cdev); + return res; + } + return 0; +} + +static int lp5521_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lp5521_chip *chip; + struct lp5521_platform_data *pdata; + int ret, i, led; + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + chip->client = client; + + pdata = client->dev.platform_data; + + if (!pdata) { + dev_err(&client->dev, "no platform data\n"); + ret = -EINVAL; + goto fail1; + } + + mutex_init(&chip->lock); + + chip->pdata = pdata; + + if (pdata->setup_resources) { + ret = pdata->setup_resources(); + if (ret < 0) + goto fail1; + } + + if (pdata->enable) { + pdata->enable(0); + usleep_range(1000, 2000); /* Keep enable down at least 1ms */ + pdata->enable(1); + usleep_range(1000, 2000); /* 500us abs min. */ + } + + lp5521_write(client, LP5521_REG_RESET, 0xff); + usleep_range(10000, 20000); /* + * Exact value is not available. 10 - 20ms + * appears to be enough for reset. + */ + ret = lp5521_detect(client); + + if (ret) { + dev_err(&client->dev, "Chip not found\n"); + goto fail2; + } + + dev_info(&client->dev, "%s programmable led chip found\n", id->name); + + ret = lp5521_configure(client, lp5521_engine_group); + if (ret < 0) { + dev_err(&client->dev, "error configuring chip\n"); + goto fail2; + } + + /* Initialize leds */ + chip->num_channels = pdata->num_channels; + chip->num_leds = 0; + led = 0; + for (i = 0; i < pdata->num_channels; i++) { + /* Do not initialize channels that are not connected */ + if (pdata->led_config[i].led_current == 0) + continue; + + ret = lp5521_init_led(&chip->leds[led], client, i, pdata); + if (ret) { + dev_err(&client->dev, "error initializing leds\n"); + goto fail3; + } + chip->num_leds++; + + chip->leds[led].id = led; + /* Set initial LED current */ + lp5521_set_led_current(chip, led, + chip->leds[led].led_current); + + INIT_WORK(&(chip->leds[led].brightness_work), + lp5521_led_brightness_work); + + led++; + } + + ret = lp5521_register_sysfs(client); + if (ret) { + dev_err(&client->dev, "registering sysfs failed\n"); + goto fail3; + } + return ret; +fail3: + for (i = 0; i < chip->num_leds; i++) { + led_classdev_unregister(&chip->leds[i].cdev); + cancel_work_sync(&chip->leds[i].brightness_work); + } +fail2: + if (pdata->enable) + pdata->enable(0); + if (pdata->release_resources) + pdata->release_resources(); +fail1: + kfree(chip); + return ret; +} + +static int lp5521_remove(struct i2c_client *client) +{ + struct lp5521_chip *chip = i2c_get_clientdata(client); + int i; + + lp5521_unregister_sysfs(client); + + for (i = 0; i < chip->num_leds; i++) { + led_classdev_unregister(&chip->leds[i].cdev); + cancel_work_sync(&chip->leds[i].brightness_work); + } + + if (chip->pdata->enable) + chip->pdata->enable(0); + if (chip->pdata->release_resources) + chip->pdata->release_resources(); + kfree(chip); + return 0; +} + +static const struct i2c_device_id lp5521_id[] = { + { "lp5521", 0 }, /* Three channel chip */ + { } +}; +MODULE_DEVICE_TABLE(i2c, lp5521_id); + +static struct i2c_driver lp5521_driver = { + .driver = { + .name = "lp5521", + }, + .probe = lp5521_probe, + .remove = lp5521_remove, + .id_table = lp5521_id, +}; + +static int __init lp5521_init(void) +{ + int ret; + + ret = i2c_add_driver(&lp5521_driver); + + if (ret < 0) + printk(KERN_ALERT "Adding lp5521 driver failed\n"); + + return ret; +} + +static void __exit lp5521_exit(void) +{ + i2c_del_driver(&lp5521_driver); +} + +module_init(lp5521_init); +module_exit(lp5521_exit); + +MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo"); +MODULE_DESCRIPTION("LP5521 LED engine"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c new file mode 100644 index 00000000000..0cc4ead2fd8 --- /dev/null +++ b/drivers/leds/leds-lp5523.c @@ -0,0 +1,1069 @@ +/* + * lp5523.c - LP5523 LED Driver + * + * Copyright (C) 2010 Nokia Corporation + * + * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/i2c.h> +#include <linux/mutex.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/ctype.h> +#include <linux/spinlock.h> +#include <linux/wait.h> +#include <linux/leds.h> +#include <linux/leds-lp5523.h> +#include <linux/workqueue.h> +#include <linux/slab.h> + +#define LP5523_REG_ENABLE 0x00 +#define LP5523_REG_OP_MODE 0x01 +#define LP5523_REG_RATIOMETRIC_MSB 0x02 +#define LP5523_REG_RATIOMETRIC_LSB 0x03 +#define LP5523_REG_ENABLE_LEDS_MSB 0x04 +#define LP5523_REG_ENABLE_LEDS_LSB 0x05 +#define LP5523_REG_LED_CNTRL_BASE 0x06 +#define LP5523_REG_LED_PWM_BASE 0x16 +#define LP5523_REG_LED_CURRENT_BASE 0x26 +#define LP5523_REG_CONFIG 0x36 +#define LP5523_REG_CHANNEL1_PC 0x37 +#define LP5523_REG_CHANNEL2_PC 0x38 +#define LP5523_REG_CHANNEL3_PC 0x39 +#define LP5523_REG_STATUS 0x3a +#define LP5523_REG_GPO 0x3b +#define LP5523_REG_VARIABLE 0x3c +#define LP5523_REG_RESET 0x3d +#define LP5523_REG_TEMP_CTRL 0x3e +#define LP5523_REG_TEMP_READ 0x3f +#define LP5523_REG_TEMP_WRITE 0x40 +#define LP5523_REG_LED_TEST_CTRL 0x41 +#define LP5523_REG_LED_TEST_ADC 0x42 +#define LP5523_REG_ENG1_VARIABLE 0x45 +#define LP5523_REG_ENG2_VARIABLE 0x46 +#define LP5523_REG_ENG3_VARIABLE 0x47 +#define LP5523_REG_MASTER_FADER1 0x48 +#define LP5523_REG_MASTER_FADER2 0x49 +#define LP5523_REG_MASTER_FADER3 0x4a +#define LP5523_REG_CH1_PROG_START 0x4c +#define LP5523_REG_CH2_PROG_START 0x4d +#define LP5523_REG_CH3_PROG_START 0x4e +#define LP5523_REG_PROG_PAGE_SEL 0x4f +#define LP5523_REG_PROG_MEM 0x50 + +#define LP5523_CMD_LOAD 0x15 /* 00010101 */ +#define LP5523_CMD_RUN 0x2a /* 00101010 */ +#define LP5523_CMD_DISABLED 0x00 /* 00000000 */ + +#define LP5523_ENABLE 0x40 +#define LP5523_AUTO_INC 0x40 +#define LP5523_PWR_SAVE 0x20 +#define LP5523_PWM_PWR_SAVE 0x04 +#define LP5523_CP_1 0x08 +#define LP5523_CP_1_5 0x10 +#define LP5523_CP_AUTO 0x18 +#define LP5523_INT_CLK 0x01 +#define LP5523_AUTO_CLK 0x02 +#define LP5523_EN_LEDTEST 0x80 +#define LP5523_LEDTEST_DONE 0x80 + +#define LP5523_DEFAULT_CURRENT 50 /* microAmps */ +#define LP5523_PROGRAM_LENGTH 32 /* in bytes */ +#define LP5523_PROGRAM_PAGES 6 +#define LP5523_ADC_SHORTCIRC_LIM 80 + +#define LP5523_LEDS 9 +#define LP5523_ENGINES 3 + +#define LP5523_ENG_MASK_BASE 0x30 /* 00110000 */ + +#define LP5523_ENG_STATUS_MASK 0x07 /* 00000111 */ + +#define LP5523_IRQ_FLAGS IRQF_TRIGGER_FALLING + +#define LP5523_EXT_CLK_USED 0x08 + +#define LED_ACTIVE(mux, led) (!!(mux & (0x0001 << led))) +#define SHIFT_MASK(id) (((id) - 1) * 2) + +struct lp5523_engine { + const struct attribute_group *attributes; + int id; + u8 mode; + u8 prog_page; + u8 mux_page; + u16 led_mux; + u8 engine_mask; +}; + +struct lp5523_led { + int id; + u8 chan_nr; + u8 led_current; + u8 max_current; + struct led_classdev cdev; + struct work_struct brightness_work; + u8 brightness; +}; + +struct lp5523_chip { + struct mutex lock; /* Serialize control */ + struct i2c_client *client; + struct lp5523_engine engines[LP5523_ENGINES]; + struct lp5523_led leds[LP5523_LEDS]; + struct lp5523_platform_data *pdata; + u8 num_channels; + u8 num_leds; +}; + +static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev) +{ + return container_of(cdev, struct lp5523_led, cdev); +} + +static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) +{ + return container_of(engine, struct lp5523_chip, + engines[engine->id - 1]); +} + +static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) +{ + return container_of(led, struct lp5523_chip, + leds[led->id]); +} + +static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode); +static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode); +static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern); + +static void lp5523_led_brightness_work(struct work_struct *work); + +static int lp5523_write(struct i2c_client *client, u8 reg, u8 value) +{ + return i2c_smbus_write_byte_data(client, reg, value); +} + +static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf) +{ + s32 ret = i2c_smbus_read_byte_data(client, reg); + + if (ret < 0) + return -EIO; + + *buf = ret; + return 0; +} + +static int lp5523_detect(struct i2c_client *client) +{ + int ret; + u8 buf; + + ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40); + if (ret) + return ret; + ret = lp5523_read(client, LP5523_REG_ENABLE, &buf); + if (ret) + return ret; + if (buf == 0x40) + return 0; + else + return -ENODEV; +} + +static int lp5523_configure(struct i2c_client *client) +{ + struct lp5523_chip *chip = i2c_get_clientdata(client); + int ret = 0; + u8 status; + + /* one pattern per engine setting led mux start and stop addresses */ + u8 pattern[][LP5523_PROGRAM_LENGTH] = { + { 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0}, + { 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0}, + { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, + }; + + ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); + /* Chip startup time is 500 us, 1 - 2 ms gives some margin */ + usleep_range(1000, 2000); + + ret |= lp5523_write(client, LP5523_REG_CONFIG, + LP5523_AUTO_INC | LP5523_PWR_SAVE | + LP5523_CP_AUTO | LP5523_AUTO_CLK | + LP5523_PWM_PWR_SAVE); + + /* turn on all leds */ + ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01); + ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff); + + /* hardcode 32 bytes of memory for each engine from program memory */ + ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00); + ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10); + ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20); + + /* write led mux address space for each channel */ + ret |= lp5523_load_program(&chip->engines[0], pattern[0]); + ret |= lp5523_load_program(&chip->engines[1], pattern[1]); + ret |= lp5523_load_program(&chip->engines[2], pattern[2]); + + if (ret) { + dev_err(&client->dev, "could not load mux programs\n"); + return -1; + } + + /* set all engines exec state and mode to run 00101010 */ + ret |= lp5523_write(client, LP5523_REG_ENABLE, + (LP5523_CMD_RUN | LP5523_ENABLE)); + + ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN); + + if (ret) { + dev_err(&client->dev, "could not start mux programs\n"); + return -1; + } + + /* Let the programs run for couple of ms and check the engine status */ + usleep_range(3000, 6000); + lp5523_read(client, LP5523_REG_STATUS, &status); + status &= LP5523_ENG_STATUS_MASK; + + if (status == LP5523_ENG_STATUS_MASK) { + dev_dbg(&client->dev, "all engines configured\n"); + } else { + dev_info(&client->dev, "status == %x\n", status); + dev_err(&client->dev, "cound not configure LED engine\n"); + return -1; + } + + dev_info(&client->dev, "disabling engines\n"); + + ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED); + + return ret; +} + +static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode) +{ + struct lp5523_chip *chip = engine_to_lp5523(engine); + struct i2c_client *client = chip->client; + int ret; + u8 engine_state; + + ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state); + if (ret) + goto fail; + + engine_state &= ~(engine->engine_mask); + + /* set mode only for this engine */ + mode &= engine->engine_mask; + + engine_state |= mode; + + ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state); +fail: + return ret; +} + +static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux) +{ + struct lp5523_chip *chip = engine_to_lp5523(engine); + struct i2c_client *client = chip->client; + int ret = 0; + + ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); + + ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page); + ret |= lp5523_write(client, LP5523_REG_PROG_MEM, + (u8)(mux >> 8)); + ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux)); + engine->led_mux = mux; + + return ret; +} + +static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern) +{ + struct lp5523_chip *chip = engine_to_lp5523(engine); + struct i2c_client *client = chip->client; + + int ret = 0; + + ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); + + ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, + engine->prog_page); + ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM, + LP5523_PROGRAM_LENGTH, pattern); + + return ret; +} + +static int lp5523_run_program(struct lp5523_engine *engine) +{ + struct lp5523_chip *chip = engine_to_lp5523(engine); + struct i2c_client *client = chip->client; + int ret; + + ret = lp5523_write(client, LP5523_REG_ENABLE, + LP5523_CMD_RUN | LP5523_ENABLE); + if (ret) + goto fail; + + ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN); +fail: + return ret; +} + +static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len) +{ + int i; + u16 tmp_mux = 0; + len = len < LP5523_LEDS ? len : LP5523_LEDS; + for (i = 0; i < len; i++) { + switch (buf[i]) { + case '1': + tmp_mux |= (1 << i); + break; + case '0': + break; + case '\n': + i = len; + break; + default: + return -1; + } + } + *mux = tmp_mux; + + return 0; +} + +static void lp5523_mux_to_array(u16 led_mux, char *array) +{ + int i, pos = 0; + for (i = 0; i < LP5523_LEDS; i++) + pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i)); + + array[pos] = '\0'; +} + +/*--------------------------------------------------------------*/ +/* Sysfs interface */ +/*--------------------------------------------------------------*/ + +static ssize_t show_engine_leds(struct device *dev, + struct device_attribute *attr, + char *buf, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5523_chip *chip = i2c_get_clientdata(client); + char mux[LP5523_LEDS + 1]; + + lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux); + + return sprintf(buf, "%s\n", mux); +} + +#define show_leds(nr) \ +static ssize_t show_engine##nr##_leds(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return show_engine_leds(dev, attr, buf, nr); \ +} +show_leds(1) +show_leds(2) +show_leds(3) + +static ssize_t store_engine_leds(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5523_chip *chip = i2c_get_clientdata(client); + u16 mux = 0; + + if (lp5523_mux_parse(buf, &mux, len)) + return -EINVAL; + + if (lp5523_load_mux(&chip->engines[nr - 1], mux)) + return -EINVAL; + + return len; +} + +#define store_leds(nr) \ +static ssize_t store_engine##nr##_leds(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return store_engine_leds(dev, attr, buf, len, nr); \ +} +store_leds(1) +store_leds(2) +store_leds(3) + +static ssize_t lp5523_selftest(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5523_chip *chip = i2c_get_clientdata(client); + int i, ret, pos = 0; + int led = 0; + u8 status, adc, vdd; + + mutex_lock(&chip->lock); + + ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); + if (ret < 0) + goto fail; + + /* Check that ext clock is really in use if requested */ + if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT)) + if ((status & LP5523_EXT_CLK_USED) == 0) + goto fail; + + /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ + lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, + LP5523_EN_LEDTEST | 16); + usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */ + ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); + if (!(status & LP5523_LEDTEST_DONE)) + usleep_range(3000, 6000); /* Was not ready. Wait little bit */ + + ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); + vdd--; /* There may be some fluctuation in measurement */ + + for (i = 0; i < LP5523_LEDS; i++) { + /* Skip non-existing channels */ + if (chip->pdata->led_config[i].led_current == 0) + continue; + + /* Set default current */ + lp5523_write(chip->client, + LP5523_REG_LED_CURRENT_BASE + i, + chip->pdata->led_config[i].led_current); + + lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); + /* let current stabilize 2 - 4ms before measurements start */ + usleep_range(2000, 4000); + lp5523_write(chip->client, + LP5523_REG_LED_TEST_CTRL, + LP5523_EN_LEDTEST | i); + /* ADC conversion time is 2.7 ms typically */ + usleep_range(3000, 6000); + ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); + if (!(status & LP5523_LEDTEST_DONE)) + usleep_range(3000, 6000);/* Was not ready. Wait. */ + ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); + + if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) + pos += sprintf(buf + pos, "LED %d FAIL\n", i); + + lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00); + + /* Restore current */ + lp5523_write(chip->client, + LP5523_REG_LED_CURRENT_BASE + i, + chip->leds[led].led_current); + led++; + } + if (pos == 0) + pos = sprintf(buf, "OK\n"); + goto release_lock; +fail: + pos = sprintf(buf, "FAIL\n"); + +release_lock: + mutex_unlock(&chip->lock); + + return pos; +} + +static void lp5523_set_brightness(struct led_classdev *cdev, + enum led_brightness brightness) +{ + struct lp5523_led *led = cdev_to_led(cdev); + + led->brightness = (u8)brightness; + + schedule_work(&led->brightness_work); +} + +static void lp5523_led_brightness_work(struct work_struct *work) +{ + struct lp5523_led *led = container_of(work, + struct lp5523_led, + brightness_work); + struct lp5523_chip *chip = led_to_lp5523(led); + struct i2c_client *client = chip->client; + + mutex_lock(&chip->lock); + + lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr, + led->brightness); + + mutex_unlock(&chip->lock); +} + +static int lp5523_do_store_load(struct lp5523_engine *engine, + const char *buf, size_t len) +{ + struct lp5523_chip *chip = engine_to_lp5523(engine); + struct i2c_client *client = chip->client; + int ret, nrchars, offset = 0, i = 0; + char c[3]; + unsigned cmd; + u8 pattern[LP5523_PROGRAM_LENGTH] = {0}; + + while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) { + /* separate sscanfs because length is working only for %s */ + ret = sscanf(buf + offset, "%2s%n ", c, &nrchars); + ret = sscanf(c, "%2x", &cmd); + if (ret != 1) + goto fail; + pattern[i] = (u8)cmd; + + offset += nrchars; + i++; + } + + /* Each instruction is 16bit long. Check that length is even */ + if (i % 2) + goto fail; + + mutex_lock(&chip->lock); + + ret = lp5523_load_program(engine, pattern); + mutex_unlock(&chip->lock); + + if (ret) { + dev_err(&client->dev, "failed loading pattern\n"); + return ret; + } + + return len; +fail: + dev_err(&client->dev, "wrong pattern format\n"); + return -EINVAL; +} + +static ssize_t store_engine_load(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5523_chip *chip = i2c_get_clientdata(client); + return lp5523_do_store_load(&chip->engines[nr - 1], buf, len); +} + +#define store_load(nr) \ +static ssize_t store_engine##nr##_load(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return store_engine_load(dev, attr, buf, len, nr); \ +} +store_load(1) +store_load(2) +store_load(3) + +static ssize_t show_engine_mode(struct device *dev, + struct device_attribute *attr, + char *buf, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5523_chip *chip = i2c_get_clientdata(client); + switch (chip->engines[nr - 1].mode) { + case LP5523_CMD_RUN: + return sprintf(buf, "run\n"); + case LP5523_CMD_LOAD: + return sprintf(buf, "load\n"); + case LP5523_CMD_DISABLED: + return sprintf(buf, "disabled\n"); + default: + return sprintf(buf, "disabled\n"); + } +} + +#define show_mode(nr) \ +static ssize_t show_engine##nr##_mode(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return show_engine_mode(dev, attr, buf, nr); \ +} +show_mode(1) +show_mode(2) +show_mode(3) + +static ssize_t store_engine_mode(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len, int nr) +{ + struct i2c_client *client = to_i2c_client(dev); + struct lp5523_chip *chip = i2c_get_clientdata(client); + struct lp5523_engine *engine = &chip->engines[nr - 1]; + mutex_lock(&chip->lock); + + if (!strncmp(buf, "run", 3)) + lp5523_set_mode(engine, LP5523_CMD_RUN); + else if (!strncmp(buf, "load", 4)) + lp5523_set_mode(engine, LP5523_CMD_LOAD); + else if (!strncmp(buf, "disabled", 8)) + lp5523_set_mode(engine, LP5523_CMD_DISABLED); + + mutex_unlock(&chip->lock); + return len; +} + +#define store_mode(nr) \ +static ssize_t store_engine##nr##_mode(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return store_engine_mode(dev, attr, buf, len, nr); \ +} +store_mode(1) +store_mode(2) +store_mode(3) + +static ssize_t show_max_current(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct lp5523_led *led = cdev_to_led(led_cdev); + + return sprintf(buf, "%d\n", led->max_current); +} + +static ssize_t show_current(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct lp5523_led *led = cdev_to_led(led_cdev); + + return sprintf(buf, "%d\n", led->led_current); +} + +static ssize_t store_current(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct lp5523_led *led = cdev_to_led(led_cdev); + struct lp5523_chip *chip = led_to_lp5523(led); + ssize_t ret; + unsigned long curr; + + if (strict_strtoul(buf, 0, &curr)) + return -EINVAL; + + if (curr > led->max_current) + return -EINVAL; + + mutex_lock(&chip->lock); + ret = lp5523_write(chip->client, + LP5523_REG_LED_CURRENT_BASE + led->chan_nr, + (u8)curr); + mutex_unlock(&chip->lock); + + if (ret < 0) + return ret; + + led->led_current = (u8)curr; + + return len; +} + +/* led class device attributes */ +static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current); +static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL); + +static struct attribute *lp5523_led_attributes[] = { + &dev_attr_led_current.attr, + &dev_attr_max_current.attr, + NULL, +}; + +static struct attribute_group lp5523_led_attribute_group = { + .attrs = lp5523_led_attributes +}; + +/* device attributes */ +static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO, + show_engine1_mode, store_engine1_mode); +static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO, + show_engine2_mode, store_engine2_mode); +static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO, + show_engine3_mode, store_engine3_mode); +static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO, + show_engine1_leds, store_engine1_leds); +static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO, + show_engine2_leds, store_engine2_leds); +static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO, + show_engine3_leds, store_engine3_leds); +static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load); +static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load); +static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load); +static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL); + +static struct attribute *lp5523_attributes[] = { + &dev_attr_engine1_mode.attr, + &dev_attr_engine2_mode.attr, + &dev_attr_engine3_mode.attr, + &dev_attr_selftest.attr, + NULL +}; + +static struct attribute *lp5523_engine1_attributes[] = { + &dev_attr_engine1_load.attr, + &dev_attr_engine1_leds.attr, + NULL +}; + +static struct attribute *lp5523_engine2_attributes[] = { + &dev_attr_engine2_load.attr, + &dev_attr_engine2_leds.attr, + NULL +}; + +static struct attribute *lp5523_engine3_attributes[] = { + &dev_attr_engine3_load.attr, + &dev_attr_engine3_leds.attr, + NULL +}; + +static const struct attribute_group lp5523_group = { + .attrs = lp5523_attributes, +}; + +static const struct attribute_group lp5523_engine_group[] = { + {.attrs = lp5523_engine1_attributes }, + {.attrs = lp5523_engine2_attributes }, + {.attrs = lp5523_engine3_attributes }, +}; + +static int lp5523_register_sysfs(struct i2c_client *client) +{ + struct device *dev = &client->dev; + int ret; + + ret = sysfs_create_group(&dev->kobj, &lp5523_group); + if (ret < 0) + return ret; + + return 0; +} + +static void lp5523_unregister_sysfs(struct i2c_client *client) +{ + struct lp5523_chip *chip = i2c_get_clientdata(client); + struct device *dev = &client->dev; + int i; + + sysfs_remove_group(&dev->kobj, &lp5523_group); + + for (i = 0; i < ARRAY_SIZE(chip->engines); i++) + if (chip->engines[i].mode == LP5523_CMD_LOAD) + sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]); + + for (i = 0; i < chip->num_leds; i++) + sysfs_remove_group(&chip->leds[i].cdev.dev->kobj, + &lp5523_led_attribute_group); +} + +/*--------------------------------------------------------------*/ +/* Set chip operating mode */ +/*--------------------------------------------------------------*/ +static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode) +{ + /* engine to chip */ + struct lp5523_chip *chip = engine_to_lp5523(engine); + struct i2c_client *client = chip->client; + struct device *dev = &client->dev; + int ret = 0; + + /* if in that mode already do nothing, except for run */ + if (mode == engine->mode && mode != LP5523_CMD_RUN) + return 0; + + if (mode == LP5523_CMD_RUN) { + ret = lp5523_run_program(engine); + } else if (mode == LP5523_CMD_LOAD) { + lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); + lp5523_set_engine_mode(engine, LP5523_CMD_LOAD); + + ret = sysfs_create_group(&dev->kobj, engine->attributes); + if (ret) + return ret; + } else if (mode == LP5523_CMD_DISABLED) { + lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED); + } + + /* remove load attribute from sysfs if not in load mode */ + if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD) + sysfs_remove_group(&dev->kobj, engine->attributes); + + engine->mode = mode; + + return ret; +} + +/*--------------------------------------------------------------*/ +/* Probe, Attach, Remove */ +/*--------------------------------------------------------------*/ +static int __init lp5523_init_engine(struct lp5523_engine *engine, int id) +{ + if (id < 1 || id > LP5523_ENGINES) + return -1; + engine->id = id; + engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id); + engine->prog_page = id - 1; + engine->mux_page = id + 2; + engine->attributes = &lp5523_engine_group[id - 1]; + + return 0; +} + +static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev, + int chan, struct lp5523_platform_data *pdata) +{ + char name[32]; + int res; + + if (chan >= LP5523_LEDS) + return -EINVAL; + + if (pdata->led_config[chan].led_current) { + led->led_current = pdata->led_config[chan].led_current; + led->max_current = pdata->led_config[chan].max_current; + led->chan_nr = pdata->led_config[chan].chan_nr; + + if (led->chan_nr >= LP5523_LEDS) { + dev_err(dev, "Use channel numbers between 0 and %d\n", + LP5523_LEDS - 1); + return -EINVAL; + } + + snprintf(name, 32, "lp5523:channel%d", chan); + + led->cdev.name = name; + led->cdev.brightness_set = lp5523_set_brightness; + res = led_classdev_register(dev, &led->cdev); + if (res < 0) { + dev_err(dev, "couldn't register led on channel %d\n", + chan); + return res; + } + res = sysfs_create_group(&led->cdev.dev->kobj, + &lp5523_led_attribute_group); + if (res < 0) { + dev_err(dev, "couldn't register current attribute\n"); + led_classdev_unregister(&led->cdev); + return res; + } + } else { + led->led_current = 0; + } + return 0; +} + +static struct i2c_driver lp5523_driver; + +static int lp5523_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct lp5523_chip *chip; + struct lp5523_platform_data *pdata; + int ret, i, led; + + chip = kzalloc(sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + chip->client = client; + + pdata = client->dev.platform_data; + + if (!pdata) { + dev_err(&client->dev, "no platform data\n"); + ret = -EINVAL; + goto fail1; + } + + mutex_init(&chip->lock); + + chip->pdata = pdata; + + if (pdata->setup_resources) { + ret = pdata->setup_resources(); + if (ret < 0) + goto fail1; + } + + if (pdata->enable) { + pdata->enable(0); + usleep_range(1000, 2000); /* Keep enable down at least 1ms */ + pdata->enable(1); + usleep_range(1000, 2000); /* 500us abs min. */ + } + + lp5523_write(client, LP5523_REG_RESET, 0xff); + usleep_range(10000, 20000); /* + * Exact value is not available. 10 - 20ms + * appears to be enough for reset. + */ + ret = lp5523_detect(client); + if (ret) + goto fail2; + + dev_info(&client->dev, "LP5523 Programmable led chip found\n"); + + /* Initialize engines */ + for (i = 0; i < ARRAY_SIZE(chip->engines); i++) { + ret = lp5523_init_engine(&chip->engines[i], i + 1); + if (ret) { + dev_err(&client->dev, "error initializing engine\n"); + goto fail2; + } + } + ret = lp5523_configure(client); + if (ret < 0) { + dev_err(&client->dev, "error configuring chip\n"); + goto fail2; + } + + /* Initialize leds */ + chip->num_channels = pdata->num_channels; + chip->num_leds = 0; + led = 0; + for (i = 0; i < pdata->num_channels; i++) { + /* Do not initialize channels that are not connected */ + if (pdata->led_config[i].led_current == 0) + continue; + + ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata); + if (ret) { + dev_err(&client->dev, "error initializing leds\n"); + goto fail3; + } + chip->num_leds++; + + chip->leds[led].id = led; + /* Set LED current */ + lp5523_write(client, + LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr, + chip->leds[led].led_current); + + INIT_WORK(&(chip->leds[led].brightness_work), + lp5523_led_brightness_work); + + led++; + } + + ret = lp5523_register_sysfs(client); + if (ret) { + dev_err(&client->dev, "registering sysfs failed\n"); + goto fail3; + } + return ret; +fail3: + for (i = 0; i < chip->num_leds; i++) { + led_classdev_unregister(&chip->leds[i].cdev); + cancel_work_sync(&chip->leds[i].brightness_work); + } +fail2: + if (pdata->enable) + pdata->enable(0); + if (pdata->release_resources) + pdata->release_resources(); +fail1: + kfree(chip); + return ret; +} + +static int lp5523_remove(struct i2c_client *client) +{ + struct lp5523_chip *chip = i2c_get_clientdata(client); + int i; + + lp5523_unregister_sysfs(client); + + for (i = 0; i < chip->num_leds; i++) { + led_classdev_unregister(&chip->leds[i].cdev); + cancel_work_sync(&chip->leds[i].brightness_work); + } + + if (chip->pdata->enable) + chip->pdata->enable(0); + if (chip->pdata->release_resources) + chip->pdata->release_resources(); + kfree(chip); + return 0; +} + +static const struct i2c_device_id lp5523_id[] = { + { "lp5523", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, lp5523_id); + +static struct i2c_driver lp5523_driver = { + .driver = { + .name = "lp5523", + }, + .probe = lp5523_probe, + .remove = lp5523_remove, + .id_table = lp5523_id, +}; + +static int __init lp5523_init(void) +{ + int ret; + + ret = i2c_add_driver(&lp5523_driver); + + if (ret < 0) + printk(KERN_ALERT "Adding lp5523 driver failed\n"); + + return ret; +} + +static void __exit lp5523_exit(void) +{ + i2c_del_driver(&lp5523_driver); +} + +module_init(lp5523_init); +module_exit(lp5523_exit); + +MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>"); +MODULE_DESCRIPTION("LP5523 LED engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c index 3063f591f0d..1739557a903 100644 --- a/drivers/leds/leds-net5501.c +++ b/drivers/leds/leds-net5501.c @@ -92,3 +92,5 @@ unmap: } arch_initcall(soekris_init); + +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c index a688293abd0..614ebebaaa2 100644 --- a/drivers/leds/leds-ss4200.c +++ b/drivers/leds/leds-ss4200.c @@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") } }, + {} }; /* diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index 82b77bd482f..b09bcbeade9 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c @@ -12,73 +12,25 @@ */ #include <linux/module.h> -#include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/init.h> -#include <linux/list.h> -#include <linux/spinlock.h> #include <linux/device.h> -#include <linux/sysdev.h> -#include <linux/timer.h> #include <linux/ctype.h> #include <linux/leds.h> -#include <linux/slab.h> #include "leds.h" -struct timer_trig_data { - int brightness_on; /* LED brightness during "on" period. - * (LED_OFF < brightness_on <= LED_FULL) - */ - unsigned long delay_on; /* milliseconds on */ - unsigned long delay_off; /* milliseconds off */ - struct timer_list timer; -}; - -static void led_timer_function(unsigned long data) -{ - struct led_classdev *led_cdev = (struct led_classdev *) data; - struct timer_trig_data *timer_data = led_cdev->trigger_data; - unsigned long brightness; - unsigned long delay; - - if (!timer_data->delay_on || !timer_data->delay_off) { - led_set_brightness(led_cdev, LED_OFF); - return; - } - - brightness = led_get_brightness(led_cdev); - if (!brightness) { - /* Time to switch the LED on. */ - brightness = timer_data->brightness_on; - delay = timer_data->delay_on; - } else { - /* Store the current brightness value to be able - * to restore it when the delay_off period is over. - */ - timer_data->brightness_on = brightness; - brightness = LED_OFF; - delay = timer_data->delay_off; - } - - led_set_brightness(led_cdev, brightness); - - mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay)); -} - static ssize_t led_delay_on_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); - struct timer_trig_data *timer_data = led_cdev->trigger_data; - return sprintf(buf, "%lu\n", timer_data->delay_on); + return sprintf(buf, "%lu\n", led_cdev->blink_delay_on); } static ssize_t led_delay_on_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct led_classdev *led_cdev = dev_get_drvdata(dev); - struct timer_trig_data *timer_data = led_cdev->trigger_data; int ret = -EINVAL; char *after; unsigned long state = simple_strtoul(buf, &after, 10); @@ -88,21 +40,7 @@ static ssize_t led_delay_on_store(struct device *dev, count++; if (count == size) { - if (timer_data->delay_on != state) { - /* the new value differs from the previous */ - timer_data->delay_on = state; - - /* deactivate previous settings */ - del_timer_sync(&timer_data->timer); - - /* try to activate hardware acceleration, if any */ - if (!led_cdev->blink_set || - led_cdev->blink_set(led_cdev, - &timer_data->delay_on, &timer_data->delay_off)) { - /* no hardware acceleration, blink via timer */ - mod_timer(&timer_data->timer, jiffies + 1); - } - } + led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off); ret = count; } @@ -113,16 +51,14 @@ static ssize_t led_delay_off_show(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); - struct timer_trig_data *timer_data = led_cdev->trigger_data; - return sprintf(buf, "%lu\n", timer_data->delay_off); + return sprintf(buf, "%lu\n", led_cdev->blink_delay_off); } static ssize_t led_delay_off_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct led_classdev *led_cdev = dev_get_drvdata(dev); - struct timer_trig_data *timer_data = led_cdev->trigger_data; int ret = -EINVAL; char *after; unsigned long state = simple_strtoul(buf, &after, 10); @@ -132,21 +68,7 @@ static ssize_t led_delay_off_store(struct device *dev, count++; if (count == size) { - if (timer_data->delay_off != state) { - /* the new value differs from the previous */ - timer_data->delay_off = state; - - /* deactivate previous settings */ - del_timer_sync(&timer_data->timer); - - /* try to activate hardware acceleration, if any */ - if (!led_cdev->blink_set || - led_cdev->blink_set(led_cdev, - &timer_data->delay_on, &timer_data->delay_off)) { - /* no hardware acceleration, blink via timer */ - mod_timer(&timer_data->timer, jiffies + 1); - } - } + led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state); ret = count; } @@ -158,60 +80,34 @@ static DEVICE_ATTR(delay_off, 0644, led_delay_off_show, led_delay_off_store); static void timer_trig_activate(struct led_classdev *led_cdev) { - struct timer_trig_data *timer_data; int rc; - timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL); - if (!timer_data) - return; - - timer_data->brightness_on = led_get_brightness(led_cdev); - if (timer_data->brightness_on == LED_OFF) - timer_data->brightness_on = led_cdev->max_brightness; - led_cdev->trigger_data = timer_data; - - init_timer(&timer_data->timer); - timer_data->timer.function = led_timer_function; - timer_data->timer.data = (unsigned long) led_cdev; + led_cdev->trigger_data = NULL; rc = device_create_file(led_cdev->dev, &dev_attr_delay_on); if (rc) - goto err_out; + return; rc = device_create_file(led_cdev->dev, &dev_attr_delay_off); if (rc) goto err_out_delayon; - /* If there is hardware support for blinking, start one - * user friendly blink rate chosen by the driver. - */ - if (led_cdev->blink_set) - led_cdev->blink_set(led_cdev, - &timer_data->delay_on, &timer_data->delay_off); + led_cdev->trigger_data = (void *)1; return; err_out_delayon: device_remove_file(led_cdev->dev, &dev_attr_delay_on); -err_out: - led_cdev->trigger_data = NULL; - kfree(timer_data); } static void timer_trig_deactivate(struct led_classdev *led_cdev) { - struct timer_trig_data *timer_data = led_cdev->trigger_data; - unsigned long on = 0, off = 0; - - if (timer_data) { + if (led_cdev->trigger_data) { device_remove_file(led_cdev->dev, &dev_attr_delay_on); device_remove_file(led_cdev->dev, &dev_attr_delay_off); - del_timer_sync(&timer_data->timer); - kfree(timer_data); } - /* If there is hardware support for blinking, stop it */ - if (led_cdev->blink_set) - led_cdev->blink_set(led_cdev, &on, &off); + /* Stop blinking */ + led_brightness_set(led_cdev, LED_OFF); } static struct led_trigger timer_led_trigger = { diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig index 3d7355ff730..fa51af11c6f 100644 --- a/drivers/macintosh/Kconfig +++ b/drivers/macintosh/Kconfig @@ -102,6 +102,7 @@ config ADB_PMU_LED config ADB_PMU_LED_IDE bool "Use front LED as IDE LED by default" depends on ADB_PMU_LED + depends on LEDS_CLASS select LEDS_TRIGGERS select LEDS_TRIGGER_IDE_DISK help diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index 44469662517..f5f4da3d0b6 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -80,7 +80,7 @@ static void adb_iop_end_req(struct adb_request *req, int state) static void adb_iop_complete(struct iop_msg *msg) { struct adb_request *req; - uint flags; + unsigned long flags; local_irq_save(flags); @@ -103,7 +103,7 @@ static void adb_iop_listen(struct iop_msg *msg) { struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message; struct adb_request *req; - uint flags; + unsigned long flags; #ifdef DEBUG_ADB_IOP int i; #endif diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 90267f8d64e..4d705cea0f8 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -517,9 +517,8 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, */ if (q->merge_bvec_fn && !ti->type->merge) - limits->max_sectors = - min_not_zero(limits->max_sectors, - (unsigned int) (PAGE_SIZE >> 9)); + blk_limits_max_hw_sectors(limits, + (unsigned int) (PAGE_SIZE >> 9)); return 0; } EXPORT_SYMBOL_GPL(dm_set_device_limits); @@ -1131,11 +1130,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, */ q->limits = *limits; - if (limits->no_cluster) - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); - else - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q); - if (!dm_table_supports_discards(t)) queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); else diff --git a/drivers/md/md.c b/drivers/md/md.c index 4e957f3140a..175c424f201 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -371,10 +371,15 @@ static void md_end_flush(struct bio *bio, int err) bio_put(bio); } -static void submit_flushes(mddev_t *mddev) +static void md_submit_flush_data(struct work_struct *ws); + +static void submit_flushes(struct work_struct *ws) { + mddev_t *mddev = container_of(ws, mddev_t, flush_work); mdk_rdev_t *rdev; + INIT_WORK(&mddev->flush_work, md_submit_flush_data); + atomic_set(&mddev->flush_pending, 1); rcu_read_lock(); list_for_each_entry_rcu(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0 && @@ -397,6 +402,8 @@ static void submit_flushes(mddev_t *mddev) rdev_dec_pending(rdev, mddev); } rcu_read_unlock(); + if (atomic_dec_and_test(&mddev->flush_pending)) + queue_work(md_wq, &mddev->flush_work); } static void md_submit_flush_data(struct work_struct *ws) @@ -404,8 +411,6 @@ static void md_submit_flush_data(struct work_struct *ws) mddev_t *mddev = container_of(ws, mddev_t, flush_work); struct bio *bio = mddev->flush_bio; - atomic_set(&mddev->flush_pending, 1); - if (bio->bi_size == 0) /* an empty barrier - all done */ bio_endio(bio, 0); @@ -414,10 +419,9 @@ static void md_submit_flush_data(struct work_struct *ws) if (mddev->pers->make_request(mddev, bio)) generic_make_request(bio); } - if (atomic_dec_and_test(&mddev->flush_pending)) { - mddev->flush_bio = NULL; - wake_up(&mddev->sb_wait); - } + + mddev->flush_bio = NULL; + wake_up(&mddev->sb_wait); } void md_flush_request(mddev_t *mddev, struct bio *bio) @@ -429,13 +433,8 @@ void md_flush_request(mddev_t *mddev, struct bio *bio) mddev->flush_bio = bio; spin_unlock_irq(&mddev->write_lock); - atomic_set(&mddev->flush_pending, 1); - INIT_WORK(&mddev->flush_work, md_submit_flush_data); - - submit_flushes(mddev); - - if (atomic_dec_and_test(&mddev->flush_pending)) - queue_work(md_wq, &mddev->flush_work); + INIT_WORK(&mddev->flush_work, submit_flushes); + queue_work(md_wq, &mddev->flush_work); } EXPORT_SYMBOL(md_flush_request); @@ -706,7 +705,7 @@ static struct mdk_personality *find_pers(int level, char *clevel) /* return the offset of the super block in 512byte sectors */ static inline sector_t calc_dev_sboffset(struct block_device *bdev) { - sector_t num_sectors = bdev->bd_inode->i_size / 512; + sector_t num_sectors = i_size_read(bdev->bd_inode) / 512; return MD_NEW_SIZE_SECTORS(num_sectors); } @@ -1337,7 +1336,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); - return num_sectors / 2; /* kB for sysfs */ + return num_sectors; } @@ -1386,7 +1385,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) */ switch(minor_version) { case 0: - sb_start = rdev->bdev->bd_inode->i_size >> 9; + sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; sb_start -= 8*2; sb_start &= ~(sector_t)(4*2-1); break; @@ -1472,7 +1471,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ret = 0; } if (minor_version) - rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - + rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - le64_to_cpu(sb->data_offset); else rdev->sectors = rdev->sb_start; @@ -1680,7 +1679,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) return 0; /* component must fit device */ if (rdev->sb_start < rdev->data_offset) { /* minor versions 1 and 2; superblock before data */ - max_sectors = rdev->bdev->bd_inode->i_size >> 9; + max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; max_sectors -= rdev->data_offset; if (!num_sectors || num_sectors > max_sectors) num_sectors = max_sectors; @@ -1690,7 +1689,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) } else { /* minor version 0; superblock after data */ sector_t sb_start; - sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; + sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; sb_start &= ~(sector_t)(4*2 - 1); max_sectors = rdev->sectors + sb_start - rdev->sb_start; if (!num_sectors || num_sectors > max_sectors) @@ -1704,7 +1703,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); md_super_wait(rdev->mddev); - return num_sectors / 2; /* kB for sysfs */ + return num_sectors; } static struct super_type super_types[] = { @@ -2584,7 +2583,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) if (!sectors) return -EBUSY; } else if (!sectors) - sectors = (rdev->bdev->bd_inode->i_size >> 9) - + sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - rdev->data_offset; } if (sectors < my_mddev->dev_sectors) @@ -2797,7 +2796,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi kobject_init(&rdev->kobj, &rdev_ktype); - size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; + size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; if (!size) { printk(KERN_WARNING "md: %s has zero or unknown size, marking faulty!\n", @@ -4296,9 +4295,6 @@ static int md_alloc(dev_t dev, char *name) goto abort; mddev->queue->queuedata = mddev; - /* Can be unlocked because the queue is new: no concurrency */ - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); - blk_queue_make_request(mddev->queue, md_make_request); disk = alloc_disk(1 << shift); @@ -4338,6 +4334,8 @@ static int md_alloc(dev_t dev, char *name) if (mddev->kobj.sd && sysfs_create_group(&mddev->kobj, &md_bitmap_group)) printk(KERN_DEBUG "pointless warning\n"); + + blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); abort: mutex_unlock(&disks_mutex); if (!error && mddev->kobj.sd) { @@ -5158,7 +5156,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) PTR_ERR(rdev)); return PTR_ERR(rdev); } - /* set save_raid_disk if appropriate */ + /* set saved_raid_disk if appropriate */ if (!mddev->persistent) { if (info->state & (1<<MD_DISK_SYNC) && info->raid_disk < mddev->raid_disks) @@ -5168,7 +5166,10 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) } else super_types[mddev->major_version]. validate_super(mddev, rdev); - rdev->saved_raid_disk = rdev->raid_disk; + if (test_bit(In_sync, &rdev->flags)) + rdev->saved_raid_disk = rdev->raid_disk; + else + rdev->saved_raid_disk = -1; clear_bit(In_sync, &rdev->flags); /* just to be sure */ if (info->state & (1<<MD_DISK_WRITEMOSTLY)) @@ -5235,8 +5236,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) if (!mddev->persistent) { printk(KERN_INFO "md: nonpersistent superblock ...\n"); - rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; - } else + rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; + } else rdev->sb_start = calc_dev_sboffset(rdev->bdev); rdev->sectors = rdev->sb_start; @@ -5306,7 +5307,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) if (mddev->persistent) rdev->sb_start = calc_dev_sboffset(rdev->bdev); else - rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; + rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; rdev->sectors = rdev->sb_start; @@ -6040,9 +6041,8 @@ static int md_thread(void * arg) || kthread_should_stop(), thread->timeout); - clear_bit(THREAD_WAKEUP, &thread->flags); - - thread->run(thread->mddev); + if (test_and_clear_bit(THREAD_WAKEUP, &thread->flags)) + thread->run(thread->mddev); } return 0; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 45f8324196e..845cf95b612 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1161,6 +1161,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number) * is not possible. */ if (!test_bit(Faulty, &rdev->flags) && + !mddev->recovery_disabled && mddev->degraded < conf->raid_disks) { err = -EBUSY; goto abort; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c67aa54694a..0641674827f 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2397,13 +2397,13 @@ static int run(mddev_t *mddev) return 0; out_free_conf: + md_unregister_thread(mddev->thread); if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); safe_put_page(conf->tmppage); kfree(conf->mirrors); kfree(conf); mddev->private = NULL; - md_unregister_thread(mddev->thread); out: return -EIO; } diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c index 647d52b1a1b..f60107c3b09 100644 --- a/drivers/media/IR/ir-keytable.c +++ b/drivers/media/IR/ir-keytable.c @@ -389,6 +389,8 @@ static int ir_getkeycode(struct input_dev *dev, ke->len = sizeof(entry->scancode); memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode)); + retval = 0; + out: spin_unlock_irqrestore(&rc_tab->lock, flags); return retval; diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index bad2cedb8d9..a28541b2b1a 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -19,7 +19,6 @@ comment "Multimedia core support" config VIDEO_DEV tristate "Video For Linux" - depends on BKL # used in many drivers for ioctl handling, need to kill ---help--- V4L core support for video capture and overlay devices, webcams and AM/FM radio cards. diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c index 05bde9ccb77..1d1d8d20075 100644 --- a/drivers/media/common/saa7146_hlp.c +++ b/drivers/media/common/saa7146_hlp.c @@ -558,7 +558,7 @@ static void saa7146_set_window(struct saa7146_dev *dev, int width, int height, e static void saa7146_set_position(struct saa7146_dev *dev, int w_x, int w_y, int w_height, enum v4l2_field field, u32 pixelformat) { struct saa7146_vv *vv = dev->vv_data; - struct saa7146_format *sfmt = format_by_fourcc(dev, pixelformat); + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev, pixelformat); int b_depth = vv->ov_fmt->depth; int b_bpl = vv->ov_fb.fmt.bytesperline; @@ -702,7 +702,7 @@ static int calculate_video_dma_grab_packed(struct saa7146_dev* dev, struct saa71 struct saa7146_vv *vv = dev->vv_data; struct saa7146_video_dma vdma1; - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); int width = buf->fmt->width; int height = buf->fmt->height; @@ -827,7 +827,7 @@ static int calculate_video_dma_grab_planar(struct saa7146_dev* dev, struct saa71 struct saa7146_video_dma vdma2; struct saa7146_video_dma vdma3; - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); int width = buf->fmt->width; int height = buf->fmt->height; @@ -994,7 +994,7 @@ static void program_capture_engine(struct saa7146_dev *dev, int planar) void saa7146_set_capture(struct saa7146_dev *dev, struct saa7146_buf *buf, struct saa7146_buf *next) { - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); struct saa7146_vv *vv = dev->vv_data; u32 vdma1_prot_addr; diff --git a/drivers/media/common/saa7146_i2c.c b/drivers/media/common/saa7146_i2c.c index 3d88542612e..74ee172b5bc 100644 --- a/drivers/media/common/saa7146_i2c.c +++ b/drivers/media/common/saa7146_i2c.c @@ -391,7 +391,6 @@ static int saa7146_i2c_xfer(struct i2c_adapter* adapter, struct i2c_msg *msg, in /*****************************************************************************/ /* i2c-adapter helper functions */ -#include <linux/i2c-id.h> /* exported algorithm data */ static struct i2c_algorithm saa7146_algo = { diff --git a/drivers/media/common/saa7146_video.c b/drivers/media/common/saa7146_video.c index 741c5732b43..d246910129e 100644 --- a/drivers/media/common/saa7146_video.c +++ b/drivers/media/common/saa7146_video.c @@ -84,7 +84,7 @@ static struct saa7146_format formats[] = { static int NUM_FORMATS = sizeof(formats)/sizeof(struct saa7146_format); -struct saa7146_format* format_by_fourcc(struct saa7146_dev *dev, int fourcc) +struct saa7146_format* saa7146_format_by_fourcc(struct saa7146_dev *dev, int fourcc) { int i, j = NUM_FORMATS; @@ -266,7 +266,7 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb); struct scatterlist *list = dma->sglist; int length = dma->sglen; - struct saa7146_format *sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); + struct saa7146_format *sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); DEB_EE(("dev:%p, buf:%p, sg_len:%d\n",dev,buf,length)); @@ -408,7 +408,7 @@ static int video_begin(struct saa7146_fh *fh) } } - fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); + fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); /* we need to have a valid format set here */ BUG_ON(NULL == fmt); @@ -460,7 +460,7 @@ static int video_end(struct saa7146_fh *fh, struct file *file) return -EBUSY; } - fmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); + fmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); /* we need to have a valid format set here */ BUG_ON(NULL == fmt); @@ -536,7 +536,7 @@ static int vidioc_s_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *f return -EPERM; /* check args */ - fmt = format_by_fourcc(dev, fb->fmt.pixelformat); + fmt = saa7146_format_by_fourcc(dev, fb->fmt.pixelformat); if (NULL == fmt) return -EINVAL; @@ -760,7 +760,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *fh, struct v4l2_forma DEB_EE(("V4L2_BUF_TYPE_VIDEO_CAPTURE: dev:%p, fh:%p\n", dev, fh)); - fmt = format_by_fourcc(dev, f->fmt.pix.pixelformat); + fmt = saa7146_format_by_fourcc(dev, f->fmt.pix.pixelformat); if (NULL == fmt) return -EINVAL; @@ -1264,7 +1264,7 @@ static int buffer_prepare(struct videobuf_queue *q, buf->fmt = &fh->video_fmt; buf->vb.field = fh->video_fmt.field; - sfmt = format_by_fourcc(dev,buf->fmt->pixelformat); + sfmt = saa7146_format_by_fourcc(dev,buf->fmt->pixelformat); release_all_pagetables(dev, buf); if( 0 != IS_PLANAR(sfmt->trans)) { @@ -1378,7 +1378,7 @@ static int video_open(struct saa7146_dev *dev, struct file *file) fh->video_fmt.pixelformat = V4L2_PIX_FMT_BGR24; fh->video_fmt.bytesperline = 0; fh->video_fmt.field = V4L2_FIELD_ANY; - sfmt = format_by_fourcc(dev,fh->video_fmt.pixelformat); + sfmt = saa7146_format_by_fourcc(dev,fh->video_fmt.pixelformat); fh->video_fmt.sizeimage = (fh->video_fmt.width * fh->video_fmt.height * sfmt->depth)/8; videobuf_queue_sg_init(&fh->video_q, &video_qops, diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig index 2385e6cca63..78b089526e0 100644 --- a/drivers/media/common/tuners/Kconfig +++ b/drivers/media/common/tuners/Kconfig @@ -31,7 +31,7 @@ config MEDIA_TUNER select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMISE select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE -menuconfig MEDIA_TUNER_CUSTOMISE +config MEDIA_TUNER_CUSTOMISE bool "Customize analog and hybrid tuner modules to build" depends on MEDIA_TUNER default y if EMBEDDED @@ -44,7 +44,8 @@ menuconfig MEDIA_TUNER_CUSTOMISE If unsure say N. -if MEDIA_TUNER_CUSTOMISE +menu "Customize TV tuners" + visible if MEDIA_TUNER_CUSTOMISE config MEDIA_TUNER_SIMPLE tristate "Simple tuner support" @@ -185,5 +186,4 @@ config MEDIA_TUNER_TDA18218 default m if MEDIA_TUNER_CUSTOMISE help NXP TDA18218 silicon tuner driver. - -endif # MEDIA_TUNER_CUSTOMISE +endmenu diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c index 4d0646da608..7ea517b7e18 100644 --- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c @@ -36,7 +36,6 @@ #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/kthread.h> #include "dvb_ca_en50221.h" diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index 1589d5a5cb4..cad6634610e 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -36,7 +36,6 @@ #include <linux/list.h> #include <linux/freezer.h> #include <linux/jiffies.h> -#include <linux/smp_lock.h> #include <linux/kthread.h> #include <asm/processor.h> diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig index e9062b08a48..96b27016670 100644 --- a/drivers/media/dvb/frontends/Kconfig +++ b/drivers/media/dvb/frontends/Kconfig @@ -12,9 +12,8 @@ config DVB_FE_CUSTOMISE If unsure say N. -if DVB_FE_CUSTOMISE - menu "Customise DVB Frontends" + visible if DVB_FE_CUSTOMISE comment "Multistandard (satellite) frontends" depends on DVB_CORE @@ -619,5 +618,3 @@ config DVB_DUMMY_FE tristate "Dummy frontend driver" default n endmenu - -endif diff --git a/drivers/media/dvb/frontends/dibx000_common.c b/drivers/media/dvb/frontends/dibx000_common.c index a4991026254..2311c0a3406 100644 --- a/drivers/media/dvb/frontends/dibx000_common.c +++ b/drivers/media/dvb/frontends/dibx000_common.c @@ -130,6 +130,7 @@ static int i2c_adapter_init(struct i2c_adapter *i2c_adap, struct dibx000_i2c_master *mst) { strncpy(i2c_adap->name, name, sizeof(i2c_adap->name)); + i2c_adap->algo = algo; i2c_adap->algo_data = NULL; i2c_set_adapdata(i2c_adap, mst); if (i2c_add_adapter(i2c_adap) < 0) diff --git a/drivers/media/dvb/ngene/ngene-core.c b/drivers/media/dvb/ngene/ngene-core.c index 4caeb163a66..3a7ef71087b 100644 --- a/drivers/media/dvb/ngene/ngene-core.c +++ b/drivers/media/dvb/ngene/ngene-core.c @@ -34,7 +34,6 @@ #include <linux/io.h> #include <asm/div64.h> #include <linux/pci.h> -#include <linux/smp_lock.h> #include <linux/timer.h> #include <linux/byteorder/generic.h> #include <linux/firmware.h> diff --git a/drivers/media/dvb/ngene/ngene-dvb.c b/drivers/media/dvb/ngene/ngene-dvb.c index 48f980b21d6..3832e5983c1 100644 --- a/drivers/media/dvb/ngene/ngene-dvb.c +++ b/drivers/media/dvb/ngene/ngene-dvb.c @@ -35,7 +35,6 @@ #include <linux/io.h> #include <asm/div64.h> #include <linux/pci.h> -#include <linux/smp_lock.h> #include <linux/timer.h> #include <linux/byteorder/generic.h> #include <linux/firmware.h> diff --git a/drivers/media/dvb/ngene/ngene-i2c.c b/drivers/media/dvb/ngene/ngene-i2c.c index c3ae956714e..d28554f8ce9 100644 --- a/drivers/media/dvb/ngene/ngene-i2c.c +++ b/drivers/media/dvb/ngene/ngene-i2c.c @@ -37,7 +37,6 @@ #include <asm/div64.h> #include <linux/pci.h> #include <linux/pci_ids.h> -#include <linux/smp_lock.h> #include <linux/timer.h> #include <linux/byteorder/generic.h> #include <linux/firmware.h> diff --git a/drivers/media/radio/radio-aimslab.c b/drivers/media/radio/radio-aimslab.c index 5bf4985daed..05e832f61c3 100644 --- a/drivers/media/radio/radio-aimslab.c +++ b/drivers/media/radio/radio-aimslab.c @@ -361,7 +361,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations rtrack_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops rtrack_ioctl_ops = { @@ -412,13 +412,6 @@ static int __init rtrack_init(void) rt->vdev.release = video_device_release_empty; video_set_drvdata(&rt->vdev, rt); - if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { - v4l2_device_unregister(&rt->v4l2_dev); - release_region(rt->io, 2); - return -EINVAL; - } - v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n"); - /* Set up the I/O locking */ mutex_init(&rt->lock); @@ -430,6 +423,13 @@ static int __init rtrack_init(void) sleep_delay(2000000); /* make sure it's totally down */ outb(0xc0, rt->io); /* steady volume, mute card */ + if (video_register_device(&rt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { + v4l2_device_unregister(&rt->v4l2_dev); + release_region(rt->io, 2); + return -EINVAL; + } + v4l2_info(v4l2_dev, "AIMSlab RadioTrack/RadioReveal card driver.\n"); + return 0; } diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c index c2231139362..dd8a6ab0d43 100644 --- a/drivers/media/radio/radio-aztech.c +++ b/drivers/media/radio/radio-aztech.c @@ -324,7 +324,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv, static const struct v4l2_file_operations aztech_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops aztech_ioctl_ops = { @@ -375,6 +375,8 @@ static int __init aztech_init(void) az->vdev.ioctl_ops = &aztech_ioctl_ops; az->vdev.release = video_device_release_empty; video_set_drvdata(&az->vdev, az); + /* mute card - prevents noisy bootups */ + outb(0, az->io); if (video_register_device(&az->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(v4l2_dev); @@ -383,8 +385,6 @@ static int __init aztech_init(void) } v4l2_info(v4l2_dev, "Aztech radio card driver v1.00/19990224 rkroll@exploits.org\n"); - /* mute card - prevents noisy bootups */ - outb(0, az->io); return 0; } diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c index b701ea6e7c7..bc9ad0897c5 100644 --- a/drivers/media/radio/radio-cadet.c +++ b/drivers/media/radio/radio-cadet.c @@ -328,11 +328,10 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo unsigned char readbuf[RDS_BUFFER]; int i = 0; + mutex_lock(&dev->lock); if (dev->rdsstat == 0) { - mutex_lock(&dev->lock); dev->rdsstat = 1; outb(0x80, dev->io); /* Select RDS fifo */ - mutex_unlock(&dev->lock); init_timer(&dev->readtimer); dev->readtimer.function = cadet_handler; dev->readtimer.data = (unsigned long)dev; @@ -340,12 +339,15 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo add_timer(&dev->readtimer); } if (dev->rdsin == dev->rdsout) { + mutex_unlock(&dev->lock); if (file->f_flags & O_NONBLOCK) return -EWOULDBLOCK; interruptible_sleep_on(&dev->read_queue); + mutex_lock(&dev->lock); } while (i < count && dev->rdsin != dev->rdsout) readbuf[i++] = dev->rdsbuf[dev->rdsout++]; + mutex_unlock(&dev->lock); if (copy_to_user(data, readbuf, i)) return -EFAULT; @@ -525,9 +527,11 @@ static int cadet_open(struct file *file) { struct cadet *dev = video_drvdata(file); + mutex_lock(&dev->lock); dev->users++; if (1 == dev->users) init_waitqueue_head(&dev->read_queue); + mutex_unlock(&dev->lock); return 0; } @@ -535,11 +539,13 @@ static int cadet_release(struct file *file) { struct cadet *dev = video_drvdata(file); + mutex_lock(&dev->lock); dev->users--; if (0 == dev->users) { del_timer_sync(&dev->readtimer); dev->rdsstat = 0; } + mutex_unlock(&dev->lock); return 0; } @@ -559,7 +565,7 @@ static const struct v4l2_file_operations cadet_fops = { .open = cadet_open, .release = cadet_release, .read = cadet_read, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .poll = cadet_poll, }; diff --git a/drivers/media/radio/radio-gemtek-pci.c b/drivers/media/radio/radio-gemtek-pci.c index 79039674a0e..28fa85ba208 100644 --- a/drivers/media/radio/radio-gemtek-pci.c +++ b/drivers/media/radio/radio-gemtek-pci.c @@ -361,7 +361,7 @@ MODULE_DEVICE_TABLE(pci, gemtek_pci_id); static const struct v4l2_file_operations gemtek_pci_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops gemtek_pci_ioctl_ops = { @@ -422,11 +422,11 @@ static int __devinit gemtek_pci_probe(struct pci_dev *pdev, const struct pci_dev card->vdev.release = video_device_release_empty; video_set_drvdata(&card->vdev, card); + gemtek_pci_mute(card); + if (video_register_device(&card->vdev, VFL_TYPE_RADIO, nr_radio) < 0) goto err_video; - gemtek_pci_mute(card); - v4l2_info(v4l2_dev, "Gemtek PCI Radio (rev. %d) found at 0x%04x-0x%04x.\n", pdev->revision, card->iobase, card->iobase + card->length - 1); diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c index 73985f641f0..259936422e4 100644 --- a/drivers/media/radio/radio-gemtek.c +++ b/drivers/media/radio/radio-gemtek.c @@ -378,7 +378,7 @@ static int gemtek_probe(struct gemtek *gt) static const struct v4l2_file_operations gemtek_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static int vidioc_querycap(struct file *file, void *priv, @@ -577,12 +577,6 @@ static int __init gemtek_init(void) gt->vdev.release = video_device_release_empty; video_set_drvdata(>->vdev, gt); - if (video_register_device(>->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { - v4l2_device_unregister(v4l2_dev); - release_region(gt->io, 1); - return -EBUSY; - } - /* Set defaults */ gt->lastfreq = GEMTEK_LOWFREQ; gt->bu2614data = 0; @@ -590,6 +584,12 @@ static int __init gemtek_init(void) if (initmute) gemtek_mute(gt); + if (video_register_device(>->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { + v4l2_device_unregister(v4l2_dev); + release_region(gt->io, 1); + return -EBUSY; + } + return 0; } diff --git a/drivers/media/radio/radio-maestro.c b/drivers/media/radio/radio-maestro.c index 08f1051979c..6af61bfeb17 100644 --- a/drivers/media/radio/radio-maestro.c +++ b/drivers/media/radio/radio-maestro.c @@ -299,7 +299,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations maestro_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops maestro_ioctl_ops = { @@ -383,22 +383,20 @@ static int __devinit maestro_probe(struct pci_dev *pdev, dev->vdev.release = video_device_release_empty; video_set_drvdata(&dev->vdev, dev); + if (!radio_power_on(dev)) { + retval = -EIO; + goto errfr1; + } + retval = video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr); if (retval) { v4l2_err(v4l2_dev, "can't register video device!\n"); goto errfr1; } - if (!radio_power_on(dev)) { - retval = -EIO; - goto errunr; - } - v4l2_info(v4l2_dev, "version " DRIVER_VERSION "\n"); return 0; -errunr: - video_unregister_device(&dev->vdev); errfr1: v4l2_device_unregister(v4l2_dev); errfr: diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c index 255d40df4b4..6459a220b0d 100644 --- a/drivers/media/radio/radio-maxiradio.c +++ b/drivers/media/radio/radio-maxiradio.c @@ -346,7 +346,7 @@ static int vidioc_s_ctrl(struct file *file, void *priv, static const struct v4l2_file_operations maxiradio_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops maxiradio_ioctl_ops = { diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c index 4ff885445fd..3fb76e3834c 100644 --- a/drivers/media/radio/radio-miropcm20.c +++ b/drivers/media/radio/radio-miropcm20.c @@ -33,6 +33,7 @@ struct pcm20 { unsigned long freq; int muted; struct snd_miro_aci *aci; + struct mutex lock; }; static struct pcm20 pcm20_card = { @@ -72,7 +73,7 @@ static int pcm20_setfreq(struct pcm20 *dev, unsigned long freq) static const struct v4l2_file_operations pcm20_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static int vidioc_querycap(struct file *file, void *priv, @@ -229,7 +230,7 @@ static int __init pcm20_init(void) return -ENODEV; } strlcpy(v4l2_dev->name, "miropcm20", sizeof(v4l2_dev->name)); - + mutex_init(&dev->lock); res = v4l2_device_register(NULL, v4l2_dev); if (res < 0) { @@ -242,6 +243,7 @@ static int __init pcm20_init(void) dev->vdev.fops = &pcm20_fops; dev->vdev.ioctl_ops = &pcm20_ioctl_ops; dev->vdev.release = video_device_release_empty; + dev->vdev.lock = &dev->lock; video_set_drvdata(&dev->vdev, dev); if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c index b540e8072e9..e6b2d085a44 100644 --- a/drivers/media/radio/radio-mr800.c +++ b/drivers/media/radio/radio-mr800.c @@ -58,7 +58,6 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> diff --git a/drivers/media/radio/radio-rtrack2.c b/drivers/media/radio/radio-rtrack2.c index a79296aac9a..8d6ea591bd1 100644 --- a/drivers/media/radio/radio-rtrack2.c +++ b/drivers/media/radio/radio-rtrack2.c @@ -266,7 +266,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations rtrack2_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops rtrack2_ioctl_ops = { @@ -315,6 +315,10 @@ static int __init rtrack2_init(void) dev->vdev.release = video_device_release_empty; video_set_drvdata(&dev->vdev, dev); + /* mute card - prevents noisy bootups */ + outb(1, dev->io); + dev->muted = 1; + mutex_init(&dev->lock); if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(v4l2_dev); @@ -324,10 +328,6 @@ static int __init rtrack2_init(void) v4l2_info(v4l2_dev, "AIMSlab Radiotrack II card driver.\n"); - /* mute card - prevents noisy bootups */ - outb(1, dev->io); - dev->muted = 1; - return 0; } diff --git a/drivers/media/radio/radio-sf16fmi.c b/drivers/media/radio/radio-sf16fmi.c index 985359d18aa..b5a5f89e238 100644 --- a/drivers/media/radio/radio-sf16fmi.c +++ b/drivers/media/radio/radio-sf16fmi.c @@ -260,7 +260,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations fmi_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops fmi_ioctl_ops = { @@ -382,6 +382,9 @@ static int __init fmi_init(void) mutex_init(&fmi->lock); + /* mute card - prevents noisy bootups */ + fmi_mute(fmi); + if (video_register_device(&fmi->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(v4l2_dev); release_region(fmi->io, 2); @@ -391,8 +394,6 @@ static int __init fmi_init(void) } v4l2_info(v4l2_dev, "card driver at 0x%x\n", fmi->io); - /* mute card - prevents noisy bootups */ - fmi_mute(fmi); return 0; } diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c index 52c7bbb32b8..dc3f04c52d5 100644 --- a/drivers/media/radio/radio-sf16fmr2.c +++ b/drivers/media/radio/radio-sf16fmr2.c @@ -376,7 +376,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations fmr2_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops fmr2_ioctl_ops = { @@ -424,6 +424,10 @@ static int __init fmr2_init(void) fmr2->vdev.release = video_device_release_empty; video_set_drvdata(&fmr2->vdev, fmr2); + /* mute card - prevents noisy bootups */ + fmr2_mute(fmr2->io); + fmr2_product_info(fmr2); + if (video_register_device(&fmr2->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(v4l2_dev); release_region(fmr2->io, 2); @@ -431,11 +435,6 @@ static int __init fmr2_init(void) } v4l2_info(v4l2_dev, "SF16FMR2 radio card driver at 0x%x.\n", fmr2->io); - /* mute card - prevents noisy bootups */ - mutex_lock(&fmr2->lock); - fmr2_mute(fmr2->io); - fmr2_product_info(fmr2); - mutex_unlock(&fmr2->lock); debug_print((KERN_DEBUG "card_type %d\n", fmr2->card_type)); return 0; } diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c index 6a435786b63..726d367ad8d 100644 --- a/drivers/media/radio/radio-si4713.c +++ b/drivers/media/radio/radio-si4713.c @@ -53,7 +53,8 @@ struct radio_si4713_device { /* radio_si4713_fops - file operations interface */ static const struct v4l2_file_operations radio_si4713_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + /* Note: locking is done at the subdev level in the i2c driver. */ + .unlocked_ioctl = video_ioctl2, }; /* Video4Linux Interface */ @@ -291,7 +292,7 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev) goto unregister_v4l2_dev; } - sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, NULL, + sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, pdata->subdev_board_info, NULL); if (!sd) { dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n"); diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c index 789d2ec66e1..0e71d816c72 100644 --- a/drivers/media/radio/radio-tea5764.c +++ b/drivers/media/radio/radio-tea5764.c @@ -142,7 +142,6 @@ struct tea5764_device { struct video_device *videodev; struct tea5764_regs regs; struct mutex mutex; - int users; }; /* I2C code related */ @@ -458,41 +457,10 @@ static int vidioc_s_audio(struct file *file, void *priv, return 0; } -static int tea5764_open(struct file *file) -{ - /* Currently we support only one device */ - struct tea5764_device *radio = video_drvdata(file); - - mutex_lock(&radio->mutex); - /* Only exclusive access */ - if (radio->users) { - mutex_unlock(&radio->mutex); - return -EBUSY; - } - radio->users++; - mutex_unlock(&radio->mutex); - file->private_data = radio; - return 0; -} - -static int tea5764_close(struct file *file) -{ - struct tea5764_device *radio = video_drvdata(file); - - if (!radio) - return -ENODEV; - mutex_lock(&radio->mutex); - radio->users--; - mutex_unlock(&radio->mutex); - return 0; -} - /* File system interface */ static const struct v4l2_file_operations tea5764_fops = { .owner = THIS_MODULE, - .open = tea5764_open, - .release = tea5764_close, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops tea5764_ioctl_ops = { @@ -527,7 +495,7 @@ static int __devinit tea5764_i2c_probe(struct i2c_client *client, int ret; PDEBUG("probe"); - radio = kmalloc(sizeof(struct tea5764_device), GFP_KERNEL); + radio = kzalloc(sizeof(struct tea5764_device), GFP_KERNEL); if (!radio) return -ENOMEM; @@ -555,12 +523,7 @@ static int __devinit tea5764_i2c_probe(struct i2c_client *client, i2c_set_clientdata(client, radio); video_set_drvdata(radio->videodev, radio); - - ret = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr); - if (ret < 0) { - PWARN("Could not register video device!"); - goto errrel; - } + radio->videodev->lock = &radio->mutex; /* initialize and power off the chip */ tea5764_i2c_read(radio); @@ -568,6 +531,12 @@ static int __devinit tea5764_i2c_probe(struct i2c_client *client, tea5764_mute(radio, 1); tea5764_power_down(radio); + ret = video_register_device(radio->videodev, VFL_TYPE_RADIO, radio_nr); + if (ret < 0) { + PWARN("Could not register video device!"); + goto errrel; + } + PINFO("registered."); return 0; errrel: diff --git a/drivers/media/radio/radio-terratec.c b/drivers/media/radio/radio-terratec.c index fc1c860fd43..a3266391705 100644 --- a/drivers/media/radio/radio-terratec.c +++ b/drivers/media/radio/radio-terratec.c @@ -338,7 +338,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations terratec_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops terratec_ioctl_ops = { @@ -389,6 +389,9 @@ static int __init terratec_init(void) mutex_init(&tt->lock); + /* mute card - prevents noisy bootups */ + tt_write_vol(tt, 0); + if (video_register_device(&tt->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(&tt->v4l2_dev); release_region(tt->io, 2); @@ -396,9 +399,6 @@ static int __init terratec_init(void) } v4l2_info(v4l2_dev, "TERRATEC ActivRadio Standalone card driver.\n"); - - /* mute card - prevents noisy bootups */ - tt_write_vol(tt, 0); return 0; } diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c index b8bb3ef47df..a185610b376 100644 --- a/drivers/media/radio/radio-timb.c +++ b/drivers/media/radio/radio-timb.c @@ -34,6 +34,7 @@ struct timbradio { struct v4l2_subdev *sd_dsp; struct video_device video_dev; struct v4l2_device v4l2_dev; + struct mutex lock; }; @@ -142,7 +143,7 @@ static const struct v4l2_ioctl_ops timbradio_ioctl_ops = { static const struct v4l2_file_operations timbradio_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static int __devinit timbradio_probe(struct platform_device *pdev) @@ -164,6 +165,7 @@ static int __devinit timbradio_probe(struct platform_device *pdev) } tr->pdata = *pdata; + mutex_init(&tr->lock); strlcpy(tr->video_dev.name, "Timberdale Radio", sizeof(tr->video_dev.name)); @@ -171,6 +173,7 @@ static int __devinit timbradio_probe(struct platform_device *pdev) tr->video_dev.ioctl_ops = &timbradio_ioctl_ops; tr->video_dev.release = video_device_release_empty; tr->video_dev.minor = -1; + tr->video_dev.lock = &tr->lock; strlcpy(tr->v4l2_dev.name, DRIVER_NAME, sizeof(tr->v4l2_dev.name)); err = v4l2_device_register(NULL, &tr->v4l2_dev); diff --git a/drivers/media/radio/radio-trust.c b/drivers/media/radio/radio-trust.c index 9d6dcf8af5b..22fa9cc28ab 100644 --- a/drivers/media/radio/radio-trust.c +++ b/drivers/media/radio/radio-trust.c @@ -344,7 +344,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations trust_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops trust_ioctl_ops = { @@ -396,14 +396,6 @@ static int __init trust_init(void) tr->vdev.release = video_device_release_empty; video_set_drvdata(&tr->vdev, tr); - if (video_register_device(&tr->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { - v4l2_device_unregister(v4l2_dev); - release_region(tr->io, 2); - return -EINVAL; - } - - v4l2_info(v4l2_dev, "Trust FM Radio card driver v1.0.\n"); - write_i2c(tr, 2, TDA7318_ADDR, 0x80); /* speaker att. LF = 0 dB */ write_i2c(tr, 2, TDA7318_ADDR, 0xa0); /* speaker att. RF = 0 dB */ write_i2c(tr, 2, TDA7318_ADDR, 0xc0); /* speaker att. LR = 0 dB */ @@ -418,6 +410,14 @@ static int __init trust_init(void) /* mute card - prevents noisy bootups */ tr_setmute(tr, 1); + if (video_register_device(&tr->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { + v4l2_device_unregister(v4l2_dev); + release_region(tr->io, 2); + return -EINVAL; + } + + v4l2_info(v4l2_dev, "Trust FM Radio card driver v1.0.\n"); + return 0; } diff --git a/drivers/media/radio/radio-typhoon.c b/drivers/media/radio/radio-typhoon.c index b1f630527dc..8dbbf08f220 100644 --- a/drivers/media/radio/radio-typhoon.c +++ b/drivers/media/radio/radio-typhoon.c @@ -317,7 +317,7 @@ static int vidioc_log_status(struct file *file, void *priv) static const struct v4l2_file_operations typhoon_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops typhoon_ioctl_ops = { @@ -344,18 +344,18 @@ static int __init typhoon_init(void) strlcpy(v4l2_dev->name, "typhoon", sizeof(v4l2_dev->name)); dev->io = io; - dev->curfreq = dev->mutefreq = mutefreq; if (dev->io == -1) { v4l2_err(v4l2_dev, "You must set an I/O address with io=0x316 or io=0x336\n"); return -EINVAL; } - if (dev->mutefreq < 87000 || dev->mutefreq > 108500) { + if (mutefreq < 87000 || mutefreq > 108500) { v4l2_err(v4l2_dev, "You must set a frequency (in kHz) used when muting the card,\n"); v4l2_err(v4l2_dev, "e.g. with \"mutefreq=87500\" (87000 <= mutefreq <= 108500)\n"); return -EINVAL; } + dev->curfreq = dev->mutefreq = mutefreq << 4; mutex_init(&dev->lock); if (!request_region(dev->io, 8, "typhoon")) { @@ -378,17 +378,17 @@ static int __init typhoon_init(void) dev->vdev.ioctl_ops = &typhoon_ioctl_ops; dev->vdev.release = video_device_release_empty; video_set_drvdata(&dev->vdev, dev); + + /* mute card - prevents noisy bootups */ + typhoon_mute(dev); + if (video_register_device(&dev->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { v4l2_device_unregister(&dev->v4l2_dev); release_region(dev->io, 8); return -EINVAL; } v4l2_info(v4l2_dev, "port 0x%x.\n", dev->io); - v4l2_info(v4l2_dev, "mute frequency is %lu kHz.\n", dev->mutefreq); - dev->mutefreq <<= 4; - - /* mute card - prevents noisy bootups */ - typhoon_mute(dev); + v4l2_info(v4l2_dev, "mute frequency is %lu kHz.\n", mutefreq); return 0; } diff --git a/drivers/media/radio/radio-zoltrix.c b/drivers/media/radio/radio-zoltrix.c index f31eab99c94..af99c5bd88c 100644 --- a/drivers/media/radio/radio-zoltrix.c +++ b/drivers/media/radio/radio-zoltrix.c @@ -377,7 +377,7 @@ static int vidioc_s_audio(struct file *file, void *priv, static const struct v4l2_file_operations zoltrix_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops zoltrix_ioctl_ops = { @@ -424,20 +424,6 @@ static int __init zoltrix_init(void) return res; } - strlcpy(zol->vdev.name, v4l2_dev->name, sizeof(zol->vdev.name)); - zol->vdev.v4l2_dev = v4l2_dev; - zol->vdev.fops = &zoltrix_fops; - zol->vdev.ioctl_ops = &zoltrix_ioctl_ops; - zol->vdev.release = video_device_release_empty; - video_set_drvdata(&zol->vdev, zol); - - if (video_register_device(&zol->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { - v4l2_device_unregister(v4l2_dev); - release_region(zol->io, 2); - return -EINVAL; - } - v4l2_info(v4l2_dev, "Zoltrix Radio Plus card driver.\n"); - mutex_init(&zol->lock); /* mute card - prevents noisy bootups */ @@ -452,6 +438,20 @@ static int __init zoltrix_init(void) zol->curvol = 0; zol->stereo = 1; + strlcpy(zol->vdev.name, v4l2_dev->name, sizeof(zol->vdev.name)); + zol->vdev.v4l2_dev = v4l2_dev; + zol->vdev.fops = &zoltrix_fops; + zol->vdev.ioctl_ops = &zoltrix_ioctl_ops; + zol->vdev.release = video_device_release_empty; + video_set_drvdata(&zol->vdev, zol); + + if (video_register_device(&zol->vdev, VFL_TYPE_RADIO, radio_nr) < 0) { + v4l2_device_unregister(v4l2_dev); + release_region(zol->io, 2); + return -EINVAL; + } + v4l2_info(v4l2_dev, "Zoltrix Radio Plus card driver.\n"); + return 0; } diff --git a/drivers/media/radio/si470x/radio-si470x.h b/drivers/media/radio/si470x/radio-si470x.h index ea12782359a..b9914d7a0c9 100644 --- a/drivers/media/radio/si470x/radio-si470x.h +++ b/drivers/media/radio/si470x/radio-si470x.h @@ -31,7 +31,6 @@ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/input.h> #include <linux/version.h> #include <linux/videodev2.h> diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index ac16e815e27..6830d2848bd 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -112,7 +112,7 @@ config VIDEO_IR_I2C # menu "Encoders/decoders and other helper chips" - depends on !VIDEO_HELPER_CHIPS_AUTO + visible if !VIDEO_HELPER_CHIPS_AUTO comment "Audio decoders" diff --git a/drivers/media/video/arv.c b/drivers/media/video/arv.c index 31e7a123d19..f989f2820d8 100644 --- a/drivers/media/video/arv.c +++ b/drivers/media/video/arv.c @@ -712,7 +712,7 @@ static int ar_initialize(struct ar *ar) static const struct v4l2_file_operations ar_fops = { .owner = THIS_MODULE, .read = ar_read, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops ar_ioctl_ops = { diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c index 0453816d4ec..01be89fa5c7 100644 --- a/drivers/media/video/au0828/au0828-cards.c +++ b/drivers/media/video/au0828/au0828-cards.c @@ -212,7 +212,7 @@ void au0828_card_setup(struct au0828_dev *dev) be abstracted out if we ever need to support a different demod) */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "au8522", 0x8e >> 1, NULL); + "au8522", 0x8e >> 1, NULL); if (sd == NULL) printk(KERN_ERR "analog subdev registration failed\n"); } @@ -221,7 +221,7 @@ void au0828_card_setup(struct au0828_dev *dev) if (dev->board.tuner_type != TUNER_ABSENT) { /* Load the tuner module, which does the attach */ sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->board.tuner_addr, NULL); + "tuner", dev->board.tuner_addr, NULL); if (sd == NULL) printk(KERN_ERR "tuner subdev registration fail\n"); diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c index 87d8b006ef7..49efcf660ba 100644 --- a/drivers/media/video/bt8xx/bttv-cards.c +++ b/drivers/media/video/bt8xx/bttv-cards.c @@ -3529,7 +3529,7 @@ void __devinit bttv_init_card2(struct bttv *btv) struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "saa6588", 0, addrs); + &btv->c.i2c_adap, "saa6588", 0, addrs); btv->has_saa6588 = (sd != NULL); } @@ -3554,7 +3554,7 @@ void __devinit bttv_init_card2(struct bttv *btv) }; btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "msp3400", 0, addrs); + &btv->c.i2c_adap, "msp3400", 0, addrs); if (btv->sd_msp34xx) return; goto no_audio; @@ -3568,7 +3568,7 @@ void __devinit bttv_init_card2(struct bttv *btv) }; if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tda7432", 0, addrs)) + &btv->c.i2c_adap, "tda7432", 0, addrs)) return; goto no_audio; } @@ -3576,7 +3576,7 @@ void __devinit bttv_init_card2(struct bttv *btv) case 3: { /* The user specified that we should probe for tvaudio */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs()); + &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; goto no_audio; @@ -3596,11 +3596,11 @@ void __devinit bttv_init_card2(struct bttv *btv) found is really something else (e.g. a tea6300). */ if (!bttv_tvcards[btv->c.type].no_msp34xx) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "msp3400", + &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1)); } else if (bttv_tvcards[btv->c.type].msp34xx_alt) { btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "msp3400", + &btv->c.i2c_adap, "msp3400", 0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1)); } @@ -3616,13 +3616,13 @@ void __devinit bttv_init_card2(struct bttv *btv) }; if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tda7432", 0, addrs)) + &btv->c.i2c_adap, "tda7432", 0, addrs)) return; } /* Now see if we can find one of the tvaudio devices. */ btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs()); + &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs()); if (btv->sd_tvaudio) return; @@ -3646,13 +3646,13 @@ void __devinit bttv_init_tuner(struct bttv *btv) /* Load tuner module before issuing tuner config call! */ if (bttv_tvcards[btv->c.type].has_radio) v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tuner", + &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tuner", + &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); v4l2_i2c_new_subdev(&btv->c.v4l2_dev, - &btv->c.i2c_adap, NULL, "tuner", + &btv->c.i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c index 3da6e80e104..0902ec041c7 100644 --- a/drivers/media/video/bt8xx/bttv-driver.c +++ b/drivers/media/video/bt8xx/bttv-driver.c @@ -42,7 +42,6 @@ #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/kdev_t.h> #include "bttvp.h" @@ -855,7 +854,6 @@ int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit) xbits |= RESOURCE_VIDEO_READ | RESOURCE_VIDEO_STREAM; /* is it free? */ - mutex_lock(&btv->lock); if (btv->resources & xbits) { /* no, someone else uses it */ goto fail; @@ -885,11 +883,9 @@ int check_alloc_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bit) /* it's free, grab it */ fh->resources |= bit; btv->resources |= bit; - mutex_unlock(&btv->lock); return 1; fail: - mutex_unlock(&btv->lock); return 0; } @@ -941,7 +937,6 @@ void free_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bits) /* trying to free ressources not allocated by us ... */ printk("bttv: BUG! (btres)\n"); } - mutex_lock(&btv->lock); fh->resources &= ~bits; btv->resources &= ~bits; @@ -952,8 +947,6 @@ void free_btres_lock(struct bttv *btv, struct bttv_fh *fh, int bits) if (0 == (bits & VBI_RESOURCES)) disclaim_vbi_lines(btv); - - mutex_unlock(&btv->lock); } /* ----------------------------------------------------------------------- */ @@ -1714,28 +1707,20 @@ static int bttv_prepare_buffer(struct videobuf_queue *q,struct bttv *btv, /* Make sure tvnorm and vbi_end remain consistent until we're done. */ - mutex_lock(&btv->lock); norm = btv->tvnorm; /* In this mode capturing always starts at defrect.top (default VDELAY), ignoring cropping parameters. */ if (btv->vbi_end > bttv_tvnorms[norm].cropcap.defrect.top) { - mutex_unlock(&btv->lock); return -EINVAL; } - mutex_unlock(&btv->lock); - c.rect = bttv_tvnorms[norm].cropcap.defrect; } else { - mutex_lock(&btv->lock); - norm = btv->tvnorm; c = btv->crop[!!fh->do_crop]; - mutex_unlock(&btv->lock); - if (width < c.min_scaled_width || width > c.max_scaled_width || height < c.min_scaled_height) @@ -1859,7 +1844,6 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id *id) unsigned int i; int err; - mutex_lock(&btv->lock); err = v4l2_prio_check(&btv->prio, fh->prio); if (err) goto err; @@ -1875,7 +1859,6 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id *id) set_tvnorm(btv, i); err: - mutex_unlock(&btv->lock); return err; } @@ -1899,7 +1882,6 @@ static int bttv_enum_input(struct file *file, void *priv, struct bttv *btv = fh->btv; int rc = 0; - mutex_lock(&btv->lock); if (i->index >= bttv_tvcards[btv->c.type].video_inputs) { rc = -EINVAL; goto err; @@ -1929,7 +1911,6 @@ static int bttv_enum_input(struct file *file, void *priv, i->std = BTTV_NORMS; err: - mutex_unlock(&btv->lock); return rc; } @@ -1939,9 +1920,7 @@ static int bttv_g_input(struct file *file, void *priv, unsigned int *i) struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; - mutex_lock(&btv->lock); *i = btv->input; - mutex_unlock(&btv->lock); return 0; } @@ -1953,7 +1932,6 @@ static int bttv_s_input(struct file *file, void *priv, unsigned int i) int err; - mutex_lock(&btv->lock); err = v4l2_prio_check(&btv->prio, fh->prio); if (unlikely(err)) goto err; @@ -1966,7 +1944,6 @@ static int bttv_s_input(struct file *file, void *priv, unsigned int i) set_input(btv, i, btv->tvnorm); err: - mutex_unlock(&btv->lock); return 0; } @@ -1980,7 +1957,6 @@ static int bttv_s_tuner(struct file *file, void *priv, if (unlikely(0 != t->index)) return -EINVAL; - mutex_lock(&btv->lock); if (unlikely(btv->tuner_type == TUNER_ABSENT)) { err = -EINVAL; goto err; @@ -1996,7 +1972,6 @@ static int bttv_s_tuner(struct file *file, void *priv, btv->audio_mode_gpio(btv, t, 1); err: - mutex_unlock(&btv->lock); return 0; } @@ -2007,10 +1982,8 @@ static int bttv_g_frequency(struct file *file, void *priv, struct bttv_fh *fh = priv; struct bttv *btv = fh->btv; - mutex_lock(&btv->lock); f->type = btv->radio_user ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; f->frequency = btv->freq; - mutex_unlock(&btv->lock); return 0; } @@ -2025,7 +1998,6 @@ static int bttv_s_frequency(struct file *file, void *priv, if (unlikely(f->tuner != 0)) return -EINVAL; - mutex_lock(&btv->lock); err = v4l2_prio_check(&btv->prio, fh->prio); if (unlikely(err)) goto err; @@ -2040,7 +2012,6 @@ static int bttv_s_frequency(struct file *file, void *priv, if (btv->has_matchbox && btv->radio_user) tea5757_set_freq(btv, btv->freq); err: - mutex_unlock(&btv->lock); return 0; } @@ -2173,7 +2144,6 @@ limit_scaled_size_lock (struct bttv_fh * fh, /* Make sure tvnorm, vbi_end and the current cropping parameters remain consistent until we're done. */ - mutex_lock(&btv->lock); b = &bttv_tvnorms[btv->tvnorm].cropcap.bounds; @@ -2251,7 +2221,6 @@ limit_scaled_size_lock (struct bttv_fh * fh, rc = 0; /* success */ fail: - mutex_unlock(&btv->lock); return rc; } @@ -2283,9 +2252,7 @@ verify_window_lock (struct bttv_fh * fh, if (V4L2_FIELD_ANY == field) { __s32 height2; - mutex_lock(&fh->btv->lock); height2 = fh->btv->crop[!!fh->do_crop].rect.height >> 1; - mutex_unlock(&fh->btv->lock); field = (win->w.height > height2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP; @@ -2361,7 +2328,6 @@ static int setup_window_lock(struct bttv_fh *fh, struct bttv *btv, } } - mutex_lock(&fh->cap.vb_lock); /* clip against screen */ if (NULL != btv->fbuf.base) n = btcx_screen_clips(btv->fbuf.fmt.width, btv->fbuf.fmt.height, @@ -2392,13 +2358,6 @@ static int setup_window_lock(struct bttv_fh *fh, struct bttv *btv, fh->ov.field = win->field; fh->ov.setup_ok = 1; - /* - * FIXME: btv is protected by btv->lock mutex, while btv->init - * is protected by fh->cap.vb_lock. This seems to open the - * possibility for some race situations. Maybe the better would - * be to unify those locks or to use another way to store the - * init values that will be consumed by videobuf callbacks - */ btv->init.ov.w.width = win->w.width; btv->init.ov.w.height = win->w.height; btv->init.ov.field = win->field; @@ -2413,7 +2372,6 @@ static int setup_window_lock(struct bttv_fh *fh, struct bttv *btv, bttv_overlay_risc(btv, &fh->ov, fh->ovfmt, new); retval = bttv_switch_overlay(btv,fh,new); } - mutex_unlock(&fh->cap.vb_lock); return retval; } @@ -2527,9 +2485,7 @@ static int bttv_try_fmt_vid_cap(struct file *file, void *priv, if (V4L2_FIELD_ANY == field) { __s32 height2; - mutex_lock(&btv->lock); height2 = btv->crop[!!fh->do_crop].rect.height >> 1; - mutex_unlock(&btv->lock); field = (f->fmt.pix.height > height2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_BOTTOM; @@ -2615,7 +2571,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv, fmt = format_by_fourcc(f->fmt.pix.pixelformat); /* update our state informations */ - mutex_lock(&fh->cap.vb_lock); fh->fmt = fmt; fh->cap.field = f->fmt.pix.field; fh->cap.last = V4L2_FIELD_NONE; @@ -2624,7 +2579,6 @@ static int bttv_s_fmt_vid_cap(struct file *file, void *priv, btv->init.fmt = fmt; btv->init.width = f->fmt.pix.width; btv->init.height = f->fmt.pix.height; - mutex_unlock(&fh->cap.vb_lock); return 0; } @@ -2650,11 +2604,9 @@ static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf) unsigned int i; struct bttv_fh *fh = priv; - mutex_lock(&fh->cap.vb_lock); retval = __videobuf_mmap_setup(&fh->cap, gbuffers, gbufsize, V4L2_MEMORY_MMAP); if (retval < 0) { - mutex_unlock(&fh->cap.vb_lock); return retval; } @@ -2666,7 +2618,6 @@ static int vidiocgmbuf(struct file *file, void *priv, struct video_mbuf *mbuf) for (i = 0; i < gbuffers; i++) mbuf->offsets[i] = i * gbufsize; - mutex_unlock(&fh->cap.vb_lock); return 0; } #endif @@ -2776,10 +2727,8 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on) int retval = 0; if (on) { - mutex_lock(&fh->cap.vb_lock); /* verify args */ if (unlikely(!btv->fbuf.base)) { - mutex_unlock(&fh->cap.vb_lock); return -EINVAL; } if (unlikely(!fh->ov.setup_ok)) { @@ -2788,13 +2737,11 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on) } if (retval) return retval; - mutex_unlock(&fh->cap.vb_lock); } if (!check_alloc_btres_lock(btv, fh, RESOURCE_OVERLAY)) return -EBUSY; - mutex_lock(&fh->cap.vb_lock); if (on) { fh->ov.tvnorm = btv->tvnorm; new = videobuf_sg_alloc(sizeof(*new)); @@ -2806,7 +2753,6 @@ static int bttv_overlay(struct file *file, void *f, unsigned int on) /* switch over */ retval = bttv_switch_overlay(btv, fh, new); - mutex_unlock(&fh->cap.vb_lock); return retval; } @@ -2845,7 +2791,6 @@ static int bttv_s_fbuf(struct file *file, void *f, } /* ok, accept it */ - mutex_lock(&fh->cap.vb_lock); btv->fbuf.base = fb->base; btv->fbuf.fmt.width = fb->fmt.width; btv->fbuf.fmt.height = fb->fmt.height; @@ -2877,7 +2822,6 @@ static int bttv_s_fbuf(struct file *file, void *f, retval = bttv_switch_overlay(btv, fh, new); } } - mutex_unlock(&fh->cap.vb_lock); return retval; } @@ -2956,7 +2900,6 @@ static int bttv_queryctrl(struct file *file, void *priv, c->id >= V4L2_CID_PRIVATE_LASTP1)) return -EINVAL; - mutex_lock(&btv->lock); if (!btv->volume_gpio && (c->id == V4L2_CID_AUDIO_VOLUME)) *c = no_ctl; else { @@ -2964,7 +2907,6 @@ static int bttv_queryctrl(struct file *file, void *priv, *c = (NULL != ctrl) ? *ctrl : no_ctl; } - mutex_unlock(&btv->lock); return 0; } @@ -2975,10 +2917,8 @@ static int bttv_g_parm(struct file *file, void *f, struct bttv_fh *fh = f; struct bttv *btv = fh->btv; - mutex_lock(&btv->lock); v4l2_video_std_frame_period(bttv_tvnorms[btv->tvnorm].v4l2_id, &parm->parm.capture.timeperframe); - mutex_unlock(&btv->lock); return 0; } @@ -2994,7 +2934,6 @@ static int bttv_g_tuner(struct file *file, void *priv, if (0 != t->index) return -EINVAL; - mutex_lock(&btv->lock); t->rxsubchans = V4L2_TUNER_SUB_MONO; bttv_call_all(btv, tuner, g_tuner, t); strcpy(t->name, "Television"); @@ -3006,7 +2945,6 @@ static int bttv_g_tuner(struct file *file, void *priv, if (btv->audio_mode_gpio) btv->audio_mode_gpio(btv, t, 0); - mutex_unlock(&btv->lock); return 0; } @@ -3015,9 +2953,7 @@ static int bttv_g_priority(struct file *file, void *f, enum v4l2_priority *p) struct bttv_fh *fh = f; struct bttv *btv = fh->btv; - mutex_lock(&btv->lock); *p = v4l2_prio_max(&btv->prio); - mutex_unlock(&btv->lock); return 0; } @@ -3029,9 +2965,7 @@ static int bttv_s_priority(struct file *file, void *f, struct bttv *btv = fh->btv; int rc; - mutex_lock(&btv->lock); rc = v4l2_prio_change(&btv->prio, &fh->prio, prio); - mutex_unlock(&btv->lock); return rc; } @@ -3046,9 +2980,7 @@ static int bttv_cropcap(struct file *file, void *priv, cap->type != V4L2_BUF_TYPE_VIDEO_OVERLAY) return -EINVAL; - mutex_lock(&btv->lock); *cap = bttv_tvnorms[btv->tvnorm].cropcap; - mutex_unlock(&btv->lock); return 0; } @@ -3066,9 +2998,7 @@ static int bttv_g_crop(struct file *file, void *f, struct v4l2_crop *crop) inconsistent with fh->width or fh->height and apps do not expect a change here. */ - mutex_lock(&btv->lock); crop->c = btv->crop[!!fh->do_crop].rect; - mutex_unlock(&btv->lock); return 0; } @@ -3092,17 +3022,14 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop) /* Make sure tvnorm, vbi_end and the current cropping parameters remain consistent until we're done. Note read() may change vbi_end in check_alloc_btres_lock(). */ - mutex_lock(&btv->lock); retval = v4l2_prio_check(&btv->prio, fh->prio); if (0 != retval) { - mutex_unlock(&btv->lock); return retval; } retval = -EBUSY; if (locked_btres(fh->btv, VIDEO_RESOURCES)) { - mutex_unlock(&btv->lock); return retval; } @@ -3114,7 +3041,6 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop) b_top = max(b->top, btv->vbi_end); if (b_top + 32 >= b_bottom) { - mutex_unlock(&btv->lock); return retval; } @@ -3137,12 +3063,8 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop) btv->crop[1] = c; - mutex_unlock(&btv->lock); - fh->do_crop = 1; - mutex_lock(&fh->cap.vb_lock); - if (fh->width < c.min_scaled_width) { fh->width = c.min_scaled_width; btv->init.width = c.min_scaled_width; @@ -3159,8 +3081,6 @@ static int bttv_s_crop(struct file *file, void *f, struct v4l2_crop *crop) btv->init.height = c.max_scaled_height; } - mutex_unlock(&fh->cap.vb_lock); - return 0; } @@ -3228,7 +3148,6 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait) return videobuf_poll_stream(file, &fh->vbi, wait); } - mutex_lock(&fh->cap.vb_lock); if (check_btres(fh,RESOURCE_VIDEO_STREAM)) { /* streaming capture */ if (list_empty(&fh->cap.stream)) @@ -3263,7 +3182,6 @@ static unsigned int bttv_poll(struct file *file, poll_table *wait) else rc = 0; err: - mutex_unlock(&fh->cap.vb_lock); return rc; } @@ -3294,23 +3212,11 @@ static int bttv_open(struct file *file) return -ENOMEM; file->private_data = fh; - /* - * btv is protected by btv->lock mutex, while btv->init and other - * streaming vars are protected by fh->cap.vb_lock. We need to take - * care of both locks to avoid troubles. However, vb_lock is used also - * inside videobuf, without calling buf->lock. So, it is a very bad - * idea to hold both locks at the same time. - * Let's first copy btv->init at fh, holding cap.vb_lock, and then work - * with the rest of init, holding btv->lock. - */ - mutex_lock(&fh->cap.vb_lock); *fh = btv->init; - mutex_unlock(&fh->cap.vb_lock); fh->type = type; fh->ov.setup_ok = 0; - mutex_lock(&btv->lock); v4l2_prio_open(&btv->prio, &fh->prio); videobuf_queue_sg_init(&fh->cap, &bttv_video_qops, @@ -3318,13 +3224,13 @@ static int bttv_open(struct file *file) V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, sizeof(struct bttv_buffer), - fh, NULL); + fh, &btv->lock); videobuf_queue_sg_init(&fh->vbi, &bttv_vbi_qops, &btv->c.pci->dev, &btv->s_lock, V4L2_BUF_TYPE_VBI_CAPTURE, V4L2_FIELD_SEQ_TB, sizeof(struct bttv_buffer), - fh, NULL); + fh, &btv->lock); set_tvnorm(btv,btv->tvnorm); set_input(btv, btv->input, btv->tvnorm); @@ -3347,7 +3253,6 @@ static int bttv_open(struct file *file) bttv_vbi_fmt_reset(&fh->vbi_fmt, btv->tvnorm); bttv_field_count(btv); - mutex_unlock(&btv->lock); return 0; } @@ -3356,7 +3261,6 @@ static int bttv_release(struct file *file) struct bttv_fh *fh = file->private_data; struct bttv *btv = fh->btv; - mutex_lock(&btv->lock); /* turn off overlay */ if (check_btres(fh, RESOURCE_OVERLAY)) bttv_switch_overlay(btv,fh,NULL); @@ -3382,14 +3286,8 @@ static int bttv_release(struct file *file) /* free stuff */ - /* - * videobuf uses cap.vb_lock - we should avoid holding btv->lock, - * otherwise we may have dead lock conditions - */ - mutex_unlock(&btv->lock); videobuf_mmap_free(&fh->cap); videobuf_mmap_free(&fh->vbi); - mutex_lock(&btv->lock); v4l2_prio_close(&btv->prio, fh->prio); file->private_data = NULL; kfree(fh); @@ -3399,7 +3297,6 @@ static int bttv_release(struct file *file) if (!btv->users) audio_mute(btv, 1); - mutex_unlock(&btv->lock); return 0; } @@ -3503,11 +3400,8 @@ static int radio_open(struct file *file) if (unlikely(!fh)) return -ENOMEM; file->private_data = fh; - mutex_lock(&fh->cap.vb_lock); *fh = btv->init; - mutex_unlock(&fh->cap.vb_lock); - mutex_lock(&btv->lock); v4l2_prio_open(&btv->prio, &fh->prio); btv->radio_user++; @@ -3515,7 +3409,6 @@ static int radio_open(struct file *file) bttv_call_all(btv, tuner, s_radio); audio_input(btv,TVAUDIO_INPUT_RADIO); - mutex_unlock(&btv->lock); return 0; } @@ -3525,7 +3418,6 @@ static int radio_release(struct file *file) struct bttv *btv = fh->btv; struct rds_command cmd; - mutex_lock(&btv->lock); v4l2_prio_close(&btv->prio, fh->prio); file->private_data = NULL; kfree(fh); @@ -3533,7 +3425,6 @@ static int radio_release(struct file *file) btv->radio_user--; bttv_call_all(btv, core, ioctl, RDS_CMD_CLOSE, &cmd); - mutex_unlock(&btv->lock); return 0; } @@ -3562,7 +3453,6 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) return -EINVAL; if (0 != t->index) return -EINVAL; - mutex_lock(&btv->lock); strcpy(t->name, "Radio"); t->type = V4L2_TUNER_RADIO; @@ -3571,8 +3461,6 @@ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) if (btv->audio_mode_gpio) btv->audio_mode_gpio(btv, t, 0); - mutex_unlock(&btv->lock); - return 0; } @@ -3693,7 +3581,7 @@ static const struct v4l2_file_operations radio_fops = .open = radio_open, .read = radio_read, .release = radio_release, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .poll = radio_poll, }; diff --git a/drivers/media/video/bw-qcam.c b/drivers/media/video/bw-qcam.c index 935e0c9a967..c1193506131 100644 --- a/drivers/media/video/bw-qcam.c +++ b/drivers/media/video/bw-qcam.c @@ -860,7 +860,7 @@ static ssize_t qcam_read(struct file *file, char __user *buf, static const struct v4l2_file_operations qcam_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .read = qcam_read, }; diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c index 6e4b19698c1..24fc00965a1 100644 --- a/drivers/media/video/c-qcam.c +++ b/drivers/media/video/c-qcam.c @@ -718,7 +718,7 @@ static ssize_t qcam_read(struct file *file, char __user *buf, static const struct v4l2_file_operations qcam_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .read = qcam_read, }; diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c index 2934770dacc..0dfff50891e 100644 --- a/drivers/media/video/cafe_ccic.c +++ b/drivers/media/video/cafe_ccic.c @@ -1775,7 +1775,7 @@ static const struct v4l2_file_operations cafe_v4l_fops = { .read = cafe_v4l_read, .poll = cafe_v4l_poll, .mmap = cafe_v4l_mmap, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops cafe_v4l_ioctl_ops = { @@ -2065,8 +2065,8 @@ static int cafe_pci_probe(struct pci_dev *pdev, sensor_cfg.clock_speed = 45; cam->sensor_addr = 0x42; - cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, &cam->i2c_adapter, - NULL, "ov7670", cam->sensor_addr, NULL); + cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter, + "ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL); if (cam->sensor == NULL) { ret = -ENODEV; goto out_smbus; diff --git a/drivers/media/video/cx18/cx18-alsa-pcm.c b/drivers/media/video/cx18/cx18-alsa-pcm.c index 8f55692db36..82d195be919 100644 --- a/drivers/media/video/cx18/cx18-alsa-pcm.c +++ b/drivers/media/video/cx18/cx18-alsa-pcm.c @@ -218,7 +218,13 @@ static int snd_cx18_pcm_capture_close(struct snd_pcm_substream *substream) static int snd_cx18_pcm_ioctl(struct snd_pcm_substream *substream, unsigned int cmd, void *arg) { - return snd_pcm_lib_ioctl(substream, cmd, arg); + struct snd_cx18_card *cxsc = snd_pcm_substream_chip(substream); + int ret; + + snd_cx18_lock(cxsc); + ret = snd_pcm_lib_ioctl(substream, cmd, arg); + snd_cx18_unlock(cxsc); + return ret; } diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c index a09caf88317..e71a026f341 100644 --- a/drivers/media/video/cx18/cx18-i2c.c +++ b/drivers/media/video/cx18/cx18-i2c.c @@ -122,15 +122,15 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx) if (hw == CX18_HW_TUNER) { /* special tuner group handling */ sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, - adap, NULL, type, 0, cx->card_i2c->radio); + adap, type, 0, cx->card_i2c->radio); if (sd != NULL) sd->grp_id = hw; sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, - adap, NULL, type, 0, cx->card_i2c->demod); + adap, type, 0, cx->card_i2c->demod); if (sd != NULL) sd->grp_id = hw; sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, - adap, NULL, type, 0, cx->card_i2c->tv); + adap, type, 0, cx->card_i2c->tv); if (sd != NULL) sd->grp_id = hw; return sd != NULL ? 0 : -1; @@ -144,7 +144,7 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx) return -1; /* It's an I2C device other than an analog tuner or IR chip */ - sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, NULL, type, hw_addrs[idx], + sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, type, hw_addrs[idx], NULL); if (sd != NULL) sd->grp_id = hw; diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c index 9045f1ece0e..ab461e27d9d 100644 --- a/drivers/media/video/cx18/cx18-streams.c +++ b/drivers/media/video/cx18/cx18-streams.c @@ -41,7 +41,7 @@ static struct v4l2_file_operations cx18_v4l2_enc_fops = { .read = cx18_v4l2_read, .open = cx18_v4l2_open, /* FIXME change to video_ioctl2 if serialization lock can be removed */ - .ioctl = cx18_v4l2_ioctl, + .unlocked_ioctl = cx18_v4l2_ioctl, .release = cx18_v4l2_close, .poll = cx18_v4l2_enc_poll, }; diff --git a/drivers/media/video/cx231xx/cx231xx-417.c b/drivers/media/video/cx231xx/cx231xx-417.c index aab21f3ce47..4c7cac3b625 100644 --- a/drivers/media/video/cx231xx/cx231xx-417.c +++ b/drivers/media/video/cx231xx/cx231xx-417.c @@ -31,7 +31,6 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> -#include <linux/smp_lock.h> #include <linux/vmalloc.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> @@ -1927,10 +1926,9 @@ static int mpeg_open(struct file *file) dev = h; } - if (dev == NULL) { - unlock_kernel(); + if (dev == NULL) return -ENODEV; - } + mutex_lock(&dev->lock); /* allocate + initialize per filehandle data */ diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c index 56c2d8195ac..2c78d188bb0 100644 --- a/drivers/media/video/cx231xx/cx231xx-cards.c +++ b/drivers/media/video/cx231xx/cx231xx-cards.c @@ -560,7 +560,7 @@ void cx231xx_card_setup(struct cx231xx *dev) if (dev->board.decoder == CX231XX_AVDECODER) { dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[0].i2c_adap, - NULL, "cx25840", 0x88 >> 1, NULL); + "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840 == NULL) cx231xx_info("cx25840 subdev registration failure\n"); cx25840_call(dev, core, load_fw); @@ -571,7 +571,7 @@ void cx231xx_card_setup(struct cx231xx *dev) if (dev->board.tuner_type != TUNER_ABSENT) { dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, - NULL, "tuner", + "tuner", dev->tuner_addr, NULL); if (dev->sd_tuner == NULL) cx231xx_info("tuner subdev registration failure\n"); diff --git a/drivers/media/video/cx23885/cx23885-417.c b/drivers/media/video/cx23885/cx23885-417.c index a6cc12f8736..9a98dc55f65 100644 --- a/drivers/media/video/cx23885/cx23885-417.c +++ b/drivers/media/video/cx23885/cx23885-417.c @@ -31,7 +31,6 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> @@ -1576,12 +1575,8 @@ static int mpeg_open(struct file *file) /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh), GFP_KERNEL); - if (NULL == fh) { - unlock_kernel(); + if (!fh) return -ENOMEM; - } - - lock_kernel(); file->private_data = fh; fh->dev = dev; @@ -1592,8 +1587,6 @@ static int mpeg_open(struct file *file) V4L2_FIELD_INTERLACED, sizeof(struct cx23885_buffer), fh, NULL); - unlock_kernel(); - return 0; } diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c index db054004e46..8861309268b 100644 --- a/drivers/media/video/cx23885/cx23885-cards.c +++ b/drivers/media/video/cx23885/cx23885-cards.c @@ -1247,7 +1247,7 @@ void cx23885_card_setup(struct cx23885_dev *dev) case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200: dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[2].i2c_adap, - NULL, "cx25840", 0x88 >> 1, NULL); + "cx25840", 0x88 >> 1, NULL); if (dev->sd_cx25840) { dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE; v4l2_subdev_call(dev->sd_cx25840, core, load_fw); diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c index 93af9c65b48..8b2fb8a4375 100644 --- a/drivers/media/video/cx23885/cx23885-video.c +++ b/drivers/media/video/cx23885/cx23885-video.c @@ -26,7 +26,6 @@ #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/kthread.h> @@ -743,8 +742,6 @@ static int video_open(struct file *file) if (NULL == fh) return -ENOMEM; - lock_kernel(); - file->private_data = fh; fh->dev = dev; fh->radio = radio; @@ -762,8 +759,6 @@ static int video_open(struct file *file) dprintk(1, "post videobuf_queue_init()\n"); - unlock_kernel(); - return 0; } @@ -1512,10 +1507,10 @@ int cx23885_video_register(struct cx23885_dev *dev) if (dev->tuner_addr) sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_bus[1].i2c_adap, - NULL, "tuner", dev->tuner_addr, NULL); + "tuner", dev->tuner_addr, NULL); else sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_bus[1].i2c_adap, NULL, + &dev->i2c_bus[1].i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV)); if (sd) { struct tuner_setup tun_setup; diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c index 417d1d5c73c..d7c94848249 100644 --- a/drivers/media/video/cx88/cx88-blackbird.c +++ b/drivers/media/video/cx88/cx88-blackbird.c @@ -33,7 +33,6 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> -#include <linux/smp_lock.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/cx2341x.h> diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c index b26fcba8600..9b9e169cce9 100644 --- a/drivers/media/video/cx88/cx88-cards.c +++ b/drivers/media/video/cx88/cx88-cards.c @@ -3515,19 +3515,18 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr) later code configures a tea5767. */ v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - NULL, "tuner", - 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); + "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); if (has_demod) v4l2_i2c_new_subdev(&core->v4l2_dev, - &core->i2c_adap, NULL, "tuner", + &core->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (core->board.tuner_addr == ADDR_UNSET) { v4l2_i2c_new_subdev(&core->v4l2_dev, - &core->i2c_adap, NULL, "tuner", + &core->i2c_adap, "tuner", 0, has_demod ? tv_addrs + 4 : tv_addrs); } else { v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - NULL, "tuner", core->board.tuner_addr, NULL); + "tuner", core->board.tuner_addr, NULL); } } diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c index d2f159daa8b..62cea954940 100644 --- a/drivers/media/video/cx88/cx88-video.c +++ b/drivers/media/video/cx88/cx88-video.c @@ -31,7 +31,6 @@ #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> @@ -1896,14 +1895,13 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev, if (core->board.audio_chip == V4L2_IDENT_WM8775) v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, - NULL, "wm8775", 0x36 >> 1, NULL); + "wm8775", 0x36 >> 1, NULL); if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) { /* This probes for a tda9874 as is used on some Pixelview Ultra boards. */ - v4l2_i2c_new_subdev(&core->v4l2_dev, - &core->i2c_adap, - NULL, "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); + v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, + "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); } switch (core->boardnr) { diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c index d8e38cc4ec4..7333a9bb254 100644 --- a/drivers/media/video/davinci/vpfe_capture.c +++ b/drivers/media/video/davinci/vpfe_capture.c @@ -1986,7 +1986,6 @@ static __init int vpfe_probe(struct platform_device *pdev) vpfe_dev->sd[i] = v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev, i2c_adap, - NULL, &sdinfo->board_info, NULL); if (vpfe_dev->sd[i]) { diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c index 6ac6acd1635..193abab6b35 100644 --- a/drivers/media/video/davinci/vpif_capture.c +++ b/drivers/media/video/davinci/vpif_capture.c @@ -2013,7 +2013,6 @@ static __init int vpif_probe(struct platform_device *pdev) vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, i2c_adap, - NULL, &subdevdata->board_info, NULL); diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c index 685f6a6ee60..412c65d54fe 100644 --- a/drivers/media/video/davinci/vpif_display.c +++ b/drivers/media/video/davinci/vpif_display.c @@ -1553,7 +1553,7 @@ static __init int vpif_probe(struct platform_device *pdev) for (i = 0; i < subdev_count; i++) { vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, - i2c_adap, NULL, + i2c_adap, &subdevdata[i].board_info, NULL); if (!vpif_obj.sd[i]) { diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c index 54859233f31..f7e9168157a 100644 --- a/drivers/media/video/em28xx/em28xx-cards.c +++ b/drivers/media/video/em28xx/em28xx-cards.c @@ -2554,39 +2554,39 @@ void em28xx_card_setup(struct em28xx *dev) /* request some modules */ if (dev->board.has_msp34xx) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "msp3400", 0, msp3400_addrs); + "msp3400", 0, msp3400_addrs); if (dev->board.decoder == EM28XX_SAA711X) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "saa7115_auto", 0, saa711x_addrs); + "saa7115_auto", 0, saa711x_addrs); if (dev->board.decoder == EM28XX_TVP5150) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tvp5150", 0, tvp5150_addrs); + "tvp5150", 0, tvp5150_addrs); if (dev->em28xx_sensor == EM28XX_MT9V011) { struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "mt9v011", 0, mt9v011_addrs); + &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs); v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); } if (dev->board.adecoder == EM28XX_TVAUDIO) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tvaudio", dev->board.tvaudio_addr, NULL); + "tvaudio", dev->board.tvaudio_addr, NULL); if (dev->board.tuner_type != TUNER_ABSENT) { int has_demod = (dev->tda9887_conf & TDA9887_PRESENT); if (dev->board.radio.type) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->board.radio_addr, NULL); + "tuner", dev->board.radio_addr, NULL); if (has_demod) v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (dev->tuner_addr == 0) { enum v4l2_i2c_tuner_type type = @@ -2594,14 +2594,14 @@ void em28xx_card_setup(struct em28xx *dev) struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(type)); if (sd) dev->tuner_addr = v4l2_i2c_subdev_addr(sd); } else { v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->tuner_addr, NULL); + "tuner", dev->tuner_addr, NULL); } } diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c index a5cfc76b40b..bb164099ea2 100644 --- a/drivers/media/video/et61x251/et61x251_core.c +++ b/drivers/media/video/et61x251/et61x251_core.c @@ -2530,7 +2530,7 @@ static const struct v4l2_file_operations et61x251_fops = { .owner = THIS_MODULE, .open = et61x251_open, .release = et61x251_release, - .ioctl = et61x251_ioctl, + .unlocked_ioctl = et61x251_ioctl, .read = et61x251_read, .poll = et61x251_poll, .mmap = et61x251_mmap, diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c index 9a075d83dd1..b8faff2dd71 100644 --- a/drivers/media/video/fsl-viu.c +++ b/drivers/media/video/fsl-viu.c @@ -1486,7 +1486,7 @@ static int __devinit viu_of_probe(struct platform_device *op, ad = i2c_get_adapter(0); viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad, - NULL, "saa7113", VIU_VIDEO_DECODER_ADDR, NULL); + "saa7113", VIU_VIDEO_DECODER_ADDR, NULL); viu_dev->vidq.timeout.function = viu_vid_timeout; viu_dev->vidq.timeout.data = (unsigned long)viu_dev; diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c index 330dadc0010..e23de57e2c7 100644 --- a/drivers/media/video/gspca/sonixj.c +++ b/drivers/media/video/gspca/sonixj.c @@ -63,7 +63,10 @@ struct sd { #define QUALITY_DEF 80 u8 jpegqual; /* webcam quality */ + u8 reg01; + u8 reg17; u8 reg18; + u8 flags; s8 ag_cnt; #define AG_CNT_START 13 @@ -96,6 +99,22 @@ enum sensors { SENSOR_SP80708, }; +/* device flags */ +#define PDN_INV 1 /* inverse pin S_PWR_DN / sn_xxx tables */ + +/* sn9c1xx definitions */ +/* register 0x01 */ +#define S_PWR_DN 0x01 /* sensor power down */ +#define S_PDN_INV 0x02 /* inverse pin S_PWR_DN */ +#define V_TX_EN 0x04 /* video transfer enable */ +#define LED 0x08 /* output to pin LED */ +#define SCL_SEL_OD 0x20 /* open-drain mode */ +#define SYS_SEL_48M 0x40 /* system clock 0: 24MHz, 1: 48MHz */ +/* register 0x17 */ +#define MCK_SIZE_MASK 0x1f /* sensor master clock */ +#define SEN_CLK_EN 0x20 /* enable sensor clock */ +#define DEF_EN 0x80 /* defect pixel by 0: soft, 1: hard */ + /* V4L2 controls supported by the driver */ static void setbrightness(struct gspca_dev *gspca_dev); static void setcontrast(struct gspca_dev *gspca_dev); @@ -1755,141 +1774,6 @@ static void po2030n_probe(struct gspca_dev *gspca_dev) } } -static void bridge_init(struct gspca_dev *gspca_dev, - const u8 *sn9c1xx) -{ - struct sd *sd = (struct sd *) gspca_dev; - u8 reg0102[2]; - const u8 *reg9a; - static const u8 reg9a_def[] = - {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; - static const u8 reg9a_spec[] = - {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; - static const u8 regd4[] = {0x60, 0x00, 0x00}; - - /* sensor clock already enabled in sd_init */ - /* reg_w1(gspca_dev, 0xf1, 0x00); */ - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); - - /* configure gpio */ - reg0102[0] = sn9c1xx[1]; - reg0102[1] = sn9c1xx[2]; - if (gspca_dev->audio) - reg0102[1] |= 0x04; /* keep the audio connection */ - reg_w(gspca_dev, 0x01, reg0102, 2); - reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); - reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); - switch (sd->sensor) { - case SENSOR_GC0307: - case SENSOR_OV7660: - case SENSOR_PO1030: - case SENSOR_PO2030N: - case SENSOR_SOI768: - case SENSOR_SP80708: - reg9a = reg9a_spec; - break; - default: - reg9a = reg9a_def; - break; - } - reg_w(gspca_dev, 0x9a, reg9a, 6); - - reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); - - reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); - - switch (sd->sensor) { - case SENSOR_ADCM1700: - reg_w1(gspca_dev, 0x01, 0x43); - reg_w1(gspca_dev, 0x17, 0x62); - reg_w1(gspca_dev, 0x01, 0x42); - reg_w1(gspca_dev, 0x01, 0x42); - break; - case SENSOR_GC0307: - msleep(50); - reg_w1(gspca_dev, 0x01, 0x61); - reg_w1(gspca_dev, 0x17, 0x22); - reg_w1(gspca_dev, 0x01, 0x60); - reg_w1(gspca_dev, 0x01, 0x40); - msleep(50); - break; - case SENSOR_MI0360B: - reg_w1(gspca_dev, 0x01, 0x61); - reg_w1(gspca_dev, 0x17, 0x60); - reg_w1(gspca_dev, 0x01, 0x60); - reg_w1(gspca_dev, 0x01, 0x40); - break; - case SENSOR_MT9V111: - reg_w1(gspca_dev, 0x01, 0x61); - reg_w1(gspca_dev, 0x17, 0x61); - reg_w1(gspca_dev, 0x01, 0x60); - reg_w1(gspca_dev, 0x01, 0x40); - break; - case SENSOR_OM6802: - msleep(10); - reg_w1(gspca_dev, 0x02, 0x73); - reg_w1(gspca_dev, 0x17, 0x60); - reg_w1(gspca_dev, 0x01, 0x22); - msleep(100); - reg_w1(gspca_dev, 0x01, 0x62); - reg_w1(gspca_dev, 0x17, 0x64); - reg_w1(gspca_dev, 0x17, 0x64); - reg_w1(gspca_dev, 0x01, 0x42); - msleep(10); - reg_w1(gspca_dev, 0x01, 0x42); - i2c_w8(gspca_dev, om6802_init0[0]); - i2c_w8(gspca_dev, om6802_init0[1]); - msleep(15); - reg_w1(gspca_dev, 0x02, 0x71); - msleep(150); - break; - case SENSOR_OV7630: - reg_w1(gspca_dev, 0x01, 0x61); - reg_w1(gspca_dev, 0x17, 0xe2); - reg_w1(gspca_dev, 0x01, 0x60); - reg_w1(gspca_dev, 0x01, 0x40); - break; - case SENSOR_OV7648: - reg_w1(gspca_dev, 0x01, 0x63); - reg_w1(gspca_dev, 0x17, 0x20); - reg_w1(gspca_dev, 0x01, 0x62); - reg_w1(gspca_dev, 0x01, 0x42); - break; - case SENSOR_PO1030: - case SENSOR_SOI768: - reg_w1(gspca_dev, 0x01, 0x61); - reg_w1(gspca_dev, 0x17, 0x20); - reg_w1(gspca_dev, 0x01, 0x60); - reg_w1(gspca_dev, 0x01, 0x40); - break; - case SENSOR_PO2030N: - case SENSOR_OV7660: - reg_w1(gspca_dev, 0x01, 0x63); - reg_w1(gspca_dev, 0x17, 0x20); - reg_w1(gspca_dev, 0x01, 0x62); - reg_w1(gspca_dev, 0x01, 0x42); - break; - case SENSOR_SP80708: - reg_w1(gspca_dev, 0x01, 0x63); - reg_w1(gspca_dev, 0x17, 0x20); - reg_w1(gspca_dev, 0x01, 0x62); - reg_w1(gspca_dev, 0x01, 0x42); - msleep(100); - reg_w1(gspca_dev, 0x02, 0x62); - break; - default: -/* case SENSOR_HV7131R: */ -/* case SENSOR_MI0360: */ -/* case SENSOR_MO4000: */ - reg_w1(gspca_dev, 0x01, 0x43); - reg_w1(gspca_dev, 0x17, 0x61); - reg_w1(gspca_dev, 0x01, 0x42); - if (sd->sensor == SENSOR_HV7131R) - hv7131r_probe(gspca_dev); - break; - } -} - /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) @@ -1898,7 +1782,8 @@ static int sd_config(struct gspca_dev *gspca_dev, struct cam *cam; sd->bridge = id->driver_info >> 16; - sd->sensor = id->driver_info; + sd->sensor = id->driver_info >> 8; + sd->flags = id->driver_info; cam = &gspca_dev->cam; if (sd->sensor == SENSOR_ADCM1700) { @@ -1929,7 +1814,7 @@ static int sd_init(struct gspca_dev *gspca_dev) /* setup a selector by bridge */ reg_w1(gspca_dev, 0xf1, 0x01); reg_r(gspca_dev, 0x00, 1); - reg_w1(gspca_dev, 0xf1, gspca_dev->usb_buf[0]); + reg_w1(gspca_dev, 0xf1, 0x00); reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */ regF1 = gspca_dev->usb_buf[0]; if (gspca_dev->usb_err < 0) @@ -2423,10 +2308,17 @@ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; - u8 reg1, reg17; + u8 reg01, reg17; + u8 reg0102[2]; const u8 *sn9c1xx; const u8 (*init)[8]; + const u8 *reg9a; int mode; + static const u8 reg9a_def[] = + {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; + static const u8 reg9a_spec[] = + {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; + static const u8 regd4[] = {0x60, 0x00, 0x00}; static const u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; static const u8 CA_adcm1700[] = @@ -2448,7 +2340,85 @@ static int sd_start(struct gspca_dev *gspca_dev) /* initialize the bridge */ sn9c1xx = sn_tb[sd->sensor]; - bridge_init(gspca_dev, sn9c1xx); + + /* sensor clock already enabled in sd_init */ + /* reg_w1(gspca_dev, 0xf1, 0x00); */ + reg01 = sn9c1xx[1]; + if (sd->flags & PDN_INV) + reg01 ^= S_PDN_INV; /* power down inverted */ + reg_w1(gspca_dev, 0x01, reg01); + + /* configure gpio */ + reg0102[0] = reg01; + reg0102[1] = sn9c1xx[2]; + if (gspca_dev->audio) + reg0102[1] |= 0x04; /* keep the audio connection */ + reg_w(gspca_dev, 0x01, reg0102, 2); + reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); + reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); + switch (sd->sensor) { + case SENSOR_GC0307: + case SENSOR_OV7660: + case SENSOR_PO1030: + case SENSOR_PO2030N: + case SENSOR_SOI768: + case SENSOR_SP80708: + reg9a = reg9a_spec; + break; + default: + reg9a = reg9a_def; + break; + } + reg_w(gspca_dev, 0x9a, reg9a, 6); + + reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); + + reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); + + reg17 = sn9c1xx[0x17]; + switch (sd->sensor) { + case SENSOR_GC0307: + msleep(50); /*fixme: is it useful? */ + break; + case SENSOR_OM6802: + msleep(10); + reg_w1(gspca_dev, 0x02, 0x73); + reg17 |= SEN_CLK_EN; + reg_w1(gspca_dev, 0x17, reg17); + reg_w1(gspca_dev, 0x01, 0x22); + msleep(100); + reg01 = SCL_SEL_OD | S_PDN_INV; + reg17 &= MCK_SIZE_MASK; + reg17 |= 0x04; /* clock / 4 */ + break; + } + reg01 |= SYS_SEL_48M; + reg_w1(gspca_dev, 0x01, reg01); + reg17 |= SEN_CLK_EN; + reg_w1(gspca_dev, 0x17, reg17); + reg01 &= ~S_PWR_DN; /* sensor power on */ + reg_w1(gspca_dev, 0x01, reg01); + reg01 &= ~SYS_SEL_48M; + reg_w1(gspca_dev, 0x01, reg01); + + switch (sd->sensor) { + case SENSOR_HV7131R: + hv7131r_probe(gspca_dev); /*fixme: is it useful? */ + break; + case SENSOR_OM6802: + msleep(10); + reg_w1(gspca_dev, 0x01, reg01); + i2c_w8(gspca_dev, om6802_init0[0]); + i2c_w8(gspca_dev, om6802_init0[1]); + msleep(15); + reg_w1(gspca_dev, 0x02, 0x71); + msleep(150); + break; + case SENSOR_SP80708: + msleep(100); + reg_w1(gspca_dev, 0x02, 0x62); + break; + } /* initialize the sensor */ i2c_w_seq(gspca_dev, sensor_init[sd->sensor]); @@ -2476,30 +2446,11 @@ static int sd_start(struct gspca_dev *gspca_dev) } reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); switch (sd->sensor) { - case SENSOR_GC0307: - reg17 = 0xa2; - break; - case SENSOR_MT9V111: - case SENSOR_MI0360B: - reg17 = 0xe0; - break; - case SENSOR_ADCM1700: - case SENSOR_OV7630: - reg17 = 0xe2; - break; - case SENSOR_OV7648: - reg17 = 0x20; - break; - case SENSOR_OV7660: - case SENSOR_SOI768: - reg17 = 0xa0; - break; - case SENSOR_PO1030: - case SENSOR_PO2030N: - reg17 = 0xa0; + case SENSOR_OM6802: +/* case SENSOR_OV7648: * fixme: sometimes */ break; default: - reg17 = 0x60; + reg17 |= DEF_EN; break; } reg_w1(gspca_dev, 0x17, reg17); @@ -2546,95 +2497,67 @@ static int sd_start(struct gspca_dev *gspca_dev) init = NULL; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; - if (mode) - reg1 = 0x46; /* 320x240: clk 48Mhz, video trf enable */ - else - reg1 = 0x06; /* 640x480: clk 24Mhz, video trf enable */ - reg17 = 0x61; /* 0x:20: enable sensor clock */ + reg01 |= SYS_SEL_48M | V_TX_EN; + reg17 &= ~MCK_SIZE_MASK; + reg17 |= 0x02; /* clock / 2 */ switch (sd->sensor) { case SENSOR_ADCM1700: init = adcm1700_sensor_param1; - reg1 = 0x46; - reg17 = 0xe2; break; case SENSOR_GC0307: init = gc0307_sensor_param1; - reg17 = 0xa2; - reg1 = 0x44; + break; + case SENSOR_HV7131R: + case SENSOR_MI0360: + if (mode) + reg01 |= SYS_SEL_48M; /* 320x240: clk 48Mhz */ + else + reg01 &= ~SYS_SEL_48M; /* 640x480: clk 24Mhz */ + reg17 &= ~MCK_SIZE_MASK; + reg17 |= 0x01; /* clock / 1 */ break; case SENSOR_MI0360B: init = mi0360b_sensor_param1; - reg1 &= ~0x02; /* don't inverse pin S_PWR_DN */ - reg17 = 0xe2; break; case SENSOR_MO4000: - if (mode) { -/* reg1 = 0x46; * 320 clk 48Mhz 60fp/s */ - reg1 = 0x06; /* clk 24Mz */ - } else { - reg17 = 0x22; /* 640 MCKSIZE */ -/* reg1 = 0x06; * 640 clk 24Mz (done) */ + if (mode) { /* if 320x240 */ + reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ + reg17 &= ~MCK_SIZE_MASK; + reg17 |= 0x01; /* clock / 1 */ } break; case SENSOR_MT9V111: init = mt9v111_sensor_param1; - if (mode) { - reg1 = 0x04; /* 320 clk 48Mhz */ - } else { -/* reg1 = 0x06; * 640 clk 24Mz (done) */ - reg17 = 0xc2; - } break; case SENSOR_OM6802: init = om6802_sensor_param1; - reg17 = 0x64; /* 640 MCKSIZE */ + if (!mode) { /* if 640x480 */ + reg17 &= ~MCK_SIZE_MASK; + reg17 |= 0x01; /* clock / 4 */ + } break; case SENSOR_OV7630: init = ov7630_sensor_param1; - reg17 = 0xe2; - reg1 = 0x44; break; case SENSOR_OV7648: init = ov7648_sensor_param1; - reg17 = 0x21; -/* reg1 = 0x42; * 42 - 46? */ + reg17 &= ~MCK_SIZE_MASK; + reg17 |= 0x01; /* clock / 1 */ break; case SENSOR_OV7660: init = ov7660_sensor_param1; - if (sd->bridge == BRIDGE_SN9C120) { - if (mode) { /* 320x240 - 160x120 */ - reg17 = 0xa2; - reg1 = 0x44; /* 48 Mhz, video trf eneble */ - } - } else { - reg17 = 0x22; - reg1 = 0x06; /* 24 Mhz, video trf eneble - * inverse power down */ - } break; case SENSOR_PO1030: init = po1030_sensor_param1; - reg17 = 0xa2; - reg1 = 0x44; break; case SENSOR_PO2030N: init = po2030n_sensor_param1; - reg1 = 0x46; - reg17 = 0xa2; break; case SENSOR_SOI768: init = soi768_sensor_param1; - reg1 = 0x44; - reg17 = 0xa2; break; case SENSOR_SP80708: init = sp80708_sensor_param1; - if (mode) { -/*?? reg1 = 0x04; * 320 clk 48Mhz */ - } else { - reg1 = 0x46; /* 640 clk 48Mz */ - reg17 = 0xa2; - } break; } @@ -2684,7 +2607,9 @@ static int sd_start(struct gspca_dev *gspca_dev) setjpegqual(gspca_dev); reg_w1(gspca_dev, 0x17, reg17); - reg_w1(gspca_dev, 0x01, reg1); + reg_w1(gspca_dev, 0x01, reg01); + sd->reg01 = reg01; + sd->reg17 = reg17; sethvflip(gspca_dev); setbrightness(gspca_dev); @@ -2706,41 +2631,64 @@ static void sd_stopN(struct gspca_dev *gspca_dev) { 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 }; static const u8 stopsoi768[] = { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 }; - u8 data; - const u8 *sn9c1xx; + u8 reg01; + u8 reg17; - data = 0x0b; + reg01 = sd->reg01; + reg17 = sd->reg17 & ~SEN_CLK_EN; switch (sd->sensor) { + case SENSOR_ADCM1700: case SENSOR_GC0307: - data = 0x29; + case SENSOR_PO2030N: + case SENSOR_SP80708: + reg01 |= LED; + reg_w1(gspca_dev, 0x01, reg01); + reg01 &= ~(LED | V_TX_EN); + reg_w1(gspca_dev, 0x01, reg01); +/* reg_w1(gspca_dev, 0x02, 0x??); * LED off ? */ break; case SENSOR_HV7131R: + reg01 &= ~V_TX_EN; + reg_w1(gspca_dev, 0x01, reg01); i2c_w8(gspca_dev, stophv7131); - data = 0x2b; break; case SENSOR_MI0360: case SENSOR_MI0360B: + reg01 &= ~V_TX_EN; + reg_w1(gspca_dev, 0x01, reg01); +/* reg_w1(gspca_dev, 0x02, 0x40); * LED off ? */ i2c_w8(gspca_dev, stopmi0360); - data = 0x29; break; - case SENSOR_OV7648: - i2c_w8(gspca_dev, stopov7648); - /* fall thru */ case SENSOR_MT9V111: - case SENSOR_OV7630: + case SENSOR_OM6802: case SENSOR_PO1030: - data = 0x29; + reg01 &= ~V_TX_EN; + reg_w1(gspca_dev, 0x01, reg01); + break; + case SENSOR_OV7630: + case SENSOR_OV7648: + reg01 &= ~V_TX_EN; + reg_w1(gspca_dev, 0x01, reg01); + i2c_w8(gspca_dev, stopov7648); + break; + case SENSOR_OV7660: + reg01 &= ~V_TX_EN; + reg_w1(gspca_dev, 0x01, reg01); break; case SENSOR_SOI768: i2c_w8(gspca_dev, stopsoi768); - data = 0x29; break; } - sn9c1xx = sn_tb[sd->sensor]; - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); - reg_w1(gspca_dev, 0x17, sn9c1xx[0x17]); - reg_w1(gspca_dev, 0x01, sn9c1xx[1]); - reg_w1(gspca_dev, 0x01, data); + + reg01 |= SCL_SEL_OD; + reg_w1(gspca_dev, 0x01, reg01); + reg01 |= S_PWR_DN; /* sensor power down */ + reg_w1(gspca_dev, 0x01, reg01); + reg_w1(gspca_dev, 0x17, reg17); + reg01 &= ~SYS_SEL_48M; /* clock 24MHz */ + reg_w1(gspca_dev, 0x01, reg01); + reg01 |= LED; + reg_w1(gspca_dev, 0x01, reg01); /* Don't disable sensor clock as that disables the button on the cam */ /* reg_w1(gspca_dev, 0xf1, 0x01); */ } @@ -2954,14 +2902,18 @@ static const struct sd_desc sd_desc = { /* -- module initialisation -- */ #define BS(bridge, sensor) \ .driver_info = (BRIDGE_ ## bridge << 16) \ - | SENSOR_ ## sensor + | (SENSOR_ ## sensor << 8) +#define BSF(bridge, sensor, flags) \ + .driver_info = (BRIDGE_ ## bridge << 16) \ + | (SENSOR_ ## sensor << 8) \ + | (flags) static const __devinitdata struct usb_device_id device_table[] = { #if !defined CONFIG_USB_SN9C102 && !defined CONFIG_USB_SN9C102_MODULE {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)}, {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)}, #endif - {USB_DEVICE(0x045e, 0x00f5), BS(SN9C105, OV7660)}, - {USB_DEVICE(0x045e, 0x00f7), BS(SN9C105, OV7660)}, + {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, PDN_INV)}, + {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, PDN_INV)}, {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)}, diff --git a/drivers/media/video/imx074.c b/drivers/media/video/imx074.c index 380e459f899..27b5dfdfbb9 100644 --- a/drivers/media/video/imx074.c +++ b/drivers/media/video/imx074.c @@ -451,7 +451,6 @@ static int imx074_probe(struct i2c_client *client, ret = imx074_video_probe(icd, client); if (ret < 0) { icd->ops = NULL; - i2c_set_clientdata(client, NULL); kfree(priv); return ret; } @@ -468,7 +467,6 @@ static int imx074_remove(struct i2c_client *client) icd->ops = NULL; if (icl->free_bus) icl->free_bus(icl); - i2c_set_clientdata(client, NULL); client->driver = NULL; kfree(priv); diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c index 5a000c65ae9..ce4a7537590 100644 --- a/drivers/media/video/ir-kbd-i2c.c +++ b/drivers/media/video/ir-kbd-i2c.c @@ -44,7 +44,6 @@ #include <linux/errno.h> #include <linux/slab.h> #include <linux/i2c.h> -#include <linux/i2c-id.h> #include <linux/workqueue.h> #include <media/ir-core.h> diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c index 9e8039ac909..665191c9b40 100644 --- a/drivers/media/video/ivtv/ivtv-i2c.c +++ b/drivers/media/video/ivtv/ivtv-i2c.c @@ -239,19 +239,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx) return -1; if (hw == IVTV_HW_TUNER) { /* special tuner handling */ - sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, - 0, itv->card_i2c->radio); + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, + itv->card_i2c->radio); if (sd) sd->grp_id = 1 << idx; - sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, - 0, itv->card_i2c->demod); + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, + itv->card_i2c->demod); if (sd) sd->grp_id = 1 << idx; - sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, - 0, itv->card_i2c->tv); + sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0, + itv->card_i2c->tv); if (sd) sd->grp_id = 1 << idx; return sd ? 0 : -1; @@ -267,17 +264,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx) /* It's an I2C device other than an analog tuner or IR chip */ if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) { sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, 0, I2C_ADDRS(hw_addrs[idx])); + adap, type, 0, I2C_ADDRS(hw_addrs[idx])); } else if (hw == IVTV_HW_CX25840) { struct cx25840_platform_data pdata; pdata.pvr150_workaround = itv->pvr150_workaround; sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev, - adap, NULL, type, 0, &pdata, hw_addrs[idx], - NULL); + adap, type, 0, &pdata, hw_addrs[idx], NULL); } else { sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, - adap, NULL, type, hw_addrs[idx], NULL); + adap, type, hw_addrs[idx], NULL); } if (sd) sd->grp_id = 1 << idx; diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c index 2be23bccd3c..48d2c2419c1 100644 --- a/drivers/media/video/meye.c +++ b/drivers/media/video/meye.c @@ -1659,7 +1659,7 @@ static const struct v4l2_file_operations meye_fops = { .open = meye_open, .release = meye_release, .mmap = meye_mmap, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .poll = meye_poll, }; @@ -1831,12 +1831,6 @@ static int __devinit meye_probe(struct pci_dev *pcidev, msleep(1); mchip_set(MCHIP_MM_INTA, MCHIP_MM_INTA_HIC_1_MASK); - if (video_register_device(meye.vdev, VFL_TYPE_GRABBER, - video_nr) < 0) { - v4l2_err(v4l2_dev, "video_register_device failed\n"); - goto outvideoreg; - } - mutex_init(&meye.lock); init_waitqueue_head(&meye.proc_list); meye.brightness = 32 << 10; @@ -1858,6 +1852,12 @@ static int __devinit meye_probe(struct pci_dev *pcidev, sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAPICTURE, 0); sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERAAGC, 48); + if (video_register_device(meye.vdev, VFL_TYPE_GRABBER, + video_nr) < 0) { + v4l2_err(v4l2_dev, "video_register_device failed\n"); + goto outvideoreg; + } + v4l2_info(v4l2_dev, "Motion Eye Camera Driver v%s.\n", MEYE_DRIVER_VERSION); v4l2_info(v4l2_dev, "mchip KL5A72002 rev. %d, base %lx, irq %d\n", diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c index 4a27862da30..072bd2d1cfa 100644 --- a/drivers/media/video/mx2_camera.c +++ b/drivers/media/video/mx2_camera.c @@ -31,6 +31,7 @@ #include <media/v4l2-common.h> #include <media/v4l2-dev.h> +#include <media/videobuf-core.h> #include <media/videobuf-dma-contig.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> @@ -903,8 +904,6 @@ static int mx2_camera_set_crop(struct soc_camera_device *icd, static int mx2_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - struct mx2_camera_dev *pcdev = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; @@ -943,8 +942,6 @@ static int mx2_camera_set_fmt(struct soc_camera_device *icd, static int mx2_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { - struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); - struct mx2_camera_dev *pcdev = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; @@ -1024,13 +1021,13 @@ static int mx2_camera_querycap(struct soc_camera_host *ici, return 0; } -static int mx2_camera_reqbufs(struct soc_camera_file *icf, +static int mx2_camera_reqbufs(struct soc_camera_device *icd, struct v4l2_requestbuffers *p) { int i; for (i = 0; i < p->count; i++) { - struct mx2_buffer *buf = container_of(icf->vb_vidq.bufs[i], + struct mx2_buffer *buf = container_of(icd->vb_vidq.bufs[i], struct mx2_buffer, vb); INIT_LIST_HEAD(&buf->vb.queue); } @@ -1151,9 +1148,9 @@ err_out: static unsigned int mx2_camera_poll(struct file *file, poll_table *pt) { - struct soc_camera_file *icf = file->private_data; + struct soc_camera_device *icd = file->private_data; - return videobuf_poll_stream(file, &icf->vb_vidq, pt); + return videobuf_poll_stream(file, &icd->vb_vidq, pt); } static struct soc_camera_host_ops mx2_soc_camera_host_ops = { diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c index 29c5fc34813..aa871c2936b 100644 --- a/drivers/media/video/mx3_camera.c +++ b/drivers/media/video/mx3_camera.c @@ -27,6 +27,7 @@ #include <mach/ipu.h> #include <mach/mx3_camera.h> +#include <mach/dma.h> #define MX3_CAM_DRV_NAME "mx3-camera" @@ -638,6 +639,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg) struct dma_chan_request *rq = arg; struct mx3_camera_pdata *pdata; + if (!imx_dma_is_ipu(chan)) + return false; + if (!rq) return false; diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c index 94ba698d0ad..4e8fd965f15 100644 --- a/drivers/media/video/mxb.c +++ b/drivers/media/video/mxb.c @@ -185,17 +185,17 @@ static int mxb_probe(struct saa7146_dev *dev) } mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "saa7111", I2C_SAA7111A, NULL); + "saa7111", I2C_SAA7111A, NULL); mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tea6420", I2C_TEA6420_1, NULL); + "tea6420", I2C_TEA6420_1, NULL); mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tea6420", I2C_TEA6420_2, NULL); + "tea6420", I2C_TEA6420_2, NULL); mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tea6415c", I2C_TEA6415C, NULL); + "tea6415c", I2C_TEA6415C, NULL); mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tda9840", I2C_TDA9840, NULL); + "tda9840", I2C_TDA9840, NULL); mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, - NULL, "tuner", I2C_TUNER, NULL); + "tuner", I2C_TUNER, NULL); /* check if all devices are present */ if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c || diff --git a/drivers/media/video/omap1_camera.c b/drivers/media/video/omap1_camera.c index 7c30e62b50d..cbfd07f2d9d 100644 --- a/drivers/media/video/omap1_camera.c +++ b/drivers/media/video/omap1_camera.c @@ -235,7 +235,7 @@ static void free_buffer(struct videobuf_queue *vq, struct omap1_cam_buf *buf, BUG_ON(in_interrupt()); - videobuf_waiton(vb, 0, 0); + videobuf_waiton(vq, vb, 0, 0); if (vb_mode == OMAP1_CAM_DMA_CONTIG) { videobuf_dma_contig_free(vq, vb); @@ -504,7 +504,7 @@ static void omap1_videobuf_queue(struct videobuf_queue *vq, * empty. Since the transfer of the DMA programming register set * content to the DMA working register set is done automatically * by the DMA hardware, this can pretty well happen while we - * are keeping the lock here. Levae fetching it from the queue + * are keeping the lock here. Leave fetching it from the queue * to be done when a next DMA interrupt occures instead. */ return; @@ -1365,12 +1365,12 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q, videobuf_queue_dma_contig_init(q, &omap1_videobuf_ops, icd->dev.parent, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, - sizeof(struct omap1_cam_buf), icd); + sizeof(struct omap1_cam_buf), icd, NULL); else videobuf_queue_sg_init(q, &omap1_videobuf_ops, icd->dev.parent, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, - sizeof(struct omap1_cam_buf), icd); + sizeof(struct omap1_cam_buf), icd, NULL); /* use videobuf mode (auto)selected with the module parameter */ pcdev->vb_mode = sg_mode ? OMAP1_CAM_DMA_SG : OMAP1_CAM_DMA_CONTIG; @@ -1386,7 +1386,7 @@ static void omap1_cam_init_videobuf(struct videobuf_queue *q, } } -static int omap1_cam_reqbufs(struct soc_camera_file *icf, +static int omap1_cam_reqbufs(struct soc_camera_device *icd, struct v4l2_requestbuffers *p) { int i; @@ -1398,7 +1398,7 @@ static int omap1_cam_reqbufs(struct soc_camera_file *icf, * it hadn't triggered */ for (i = 0; i < p->count; i++) { - struct omap1_cam_buf *buf = container_of(icf->vb_vidq.bufs[i], + struct omap1_cam_buf *buf = container_of(icd->vb_vidq.bufs[i], struct omap1_cam_buf, vb); buf->inwork = 0; INIT_LIST_HEAD(&buf->vb.queue); @@ -1485,10 +1485,10 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd, static unsigned int omap1_cam_poll(struct file *file, poll_table *pt) { - struct soc_camera_file *icf = file->private_data; + struct soc_camera_device *icd = file->private_data; struct omap1_cam_buf *buf; - buf = list_entry(icf->vb_vidq.stream.next, struct omap1_cam_buf, + buf = list_entry(icd->vb_vidq.stream.next, struct omap1_cam_buf, vb.stream); poll_wait(file, &buf->vb.done, pt); diff --git a/drivers/media/video/ov6650.c b/drivers/media/video/ov6650.c index b7cfeab0948..cf93de98806 100644 --- a/drivers/media/video/ov6650.c +++ b/drivers/media/video/ov6650.c @@ -754,7 +754,7 @@ static int ov6650_g_fmt(struct v4l2_subdev *sd, static bool is_unscaled_ok(int width, int height, struct v4l2_rect *rect) { - return (width > rect->width >> 1 || height > rect->height >> 1); + return width > rect->width >> 1 || height > rect->height >> 1; } static u8 to_clkrc(struct v4l2_fract *timeperframe, @@ -840,8 +840,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) coma_mask |= COMA_BW | COMA_BYTE_SWAP | COMA_WORD_SWAP; coma_set |= COMA_RAW_RGB | COMA_RGB; break; - case 0: - break; default: dev_err(&client->dev, "Pixel format not handled: 0x%x\n", code); return -EINVAL; @@ -1176,7 +1174,6 @@ static int ov6650_probe(struct i2c_client *client, if (ret) { icd->ops = NULL; - i2c_set_clientdata(client, NULL); kfree(priv); } @@ -1187,7 +1184,6 @@ static int ov6650_remove(struct i2c_client *client) { struct ov6650 *priv = to_ov6650(client); - i2c_set_clientdata(client, NULL); kfree(priv); return 0; } diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c index 7129b50757d..7551907f8c2 100644 --- a/drivers/media/video/pms.c +++ b/drivers/media/video/pms.c @@ -932,7 +932,7 @@ static ssize_t pms_read(struct file *file, char __user *buf, static const struct v4l2_file_operations pms_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .read = pms_read, }; diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c index bef202752cc..66ad516bdfd 100644 --- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c @@ -2088,16 +2088,14 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw, " Setting up with specified i2c address 0x%x", mid, i2caddr[0]); sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, - NULL, fname, - i2caddr[0], NULL); + fname, i2caddr[0], NULL); } else { pvr2_trace(PVR2_TRACE_INIT, "Module ID %u:" " Setting up with address probe list", mid); sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, - NULL, fname, - 0, i2caddr); + fname, 0, i2caddr); } if (!sd) { diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c index e62beb4efdb..f3dc89da4c4 100644 --- a/drivers/media/video/pwc/pwc-if.c +++ b/drivers/media/video/pwc/pwc-if.c @@ -62,7 +62,6 @@ #include <linux/module.h> #include <linux/poll.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #ifdef CONFIG_USB_PWC_INPUT_EVDEV #include <linux/usb/input.h> #endif diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c index f5a46c45871..a845753665c 100644 --- a/drivers/media/video/s2255drv.c +++ b/drivers/media/video/s2255drv.c @@ -49,7 +49,6 @@ #include <linux/videodev2.h> #include <linux/version.h> #include <linux/mm.h> -#include <linux/smp_lock.h> #include <media/videobuf-vmalloc.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c index e8f13d3e2df..1b93207c89e 100644 --- a/drivers/media/video/s5p-fimc/fimc-capture.c +++ b/drivers/media/video/s5p-fimc/fimc-capture.c @@ -44,7 +44,7 @@ static struct v4l2_subdev *fimc_subdev_register(struct fimc_dev *fimc, return ERR_PTR(-ENOMEM); sd = v4l2_i2c_new_subdev_board(&vid_cap->v4l2_dev, i2c_adap, - MODULE_NAME, isp_info->board_info, NULL); + isp_info->board_info, NULL); if (!sd) { v4l2_err(&vid_cap->v4l2_dev, "failed to acquire subdev\n"); return NULL; diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c index 10a6cbf6a79..1d4d0a49ea5 100644 --- a/drivers/media/video/saa7134/saa7134-cards.c +++ b/drivers/media/video/saa7134/saa7134-cards.c @@ -6661,6 +6661,18 @@ struct pci_device_id saa7134_pci_tbl[] = { .subdevice = 0x2804, .driver_data = SAA7134_BOARD_TECHNOTREND_BUDGET_T3000, }, { + .vendor = PCI_VENDOR_ID_PHILIPS, + .device = PCI_DEVICE_ID_PHILIPS_SAA7133, + .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ + .subdevice = 0x7190, + .driver_data = SAA7134_BOARD_BEHOLD_H7, + }, { + .vendor = PCI_VENDOR_ID_PHILIPS, + .device = PCI_DEVICE_ID_PHILIPS_SAA7133, + .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ + .subdevice = 0x7090, + .driver_data = SAA7134_BOARD_BEHOLD_A7, + }, { /* --- boards without eeprom + subsystem ID --- */ .vendor = PCI_VENDOR_ID_PHILIPS, .device = PCI_DEVICE_ID_PHILIPS_SAA7134, @@ -6698,18 +6710,6 @@ struct pci_device_id saa7134_pci_tbl[] = { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .driver_data = SAA7134_BOARD_UNKNOWN, - }, { - .vendor = PCI_VENDOR_ID_PHILIPS, - .device = PCI_DEVICE_ID_PHILIPS_SAA7133, - .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ - .subdevice = 0x7190, - .driver_data = SAA7134_BOARD_BEHOLD_H7, - }, { - .vendor = PCI_VENDOR_ID_PHILIPS, - .device = PCI_DEVICE_ID_PHILIPS_SAA7133, - .subvendor = 0x5ace, /* Beholder Intl. Ltd. */ - .subdevice = 0x7090, - .driver_data = SAA7134_BOARD_BEHOLD_A7, },{ /* --- end of list --- */ } @@ -7551,22 +7551,22 @@ int saa7134_board_init2(struct saa7134_dev *dev) so we do not need to probe for a radio tuner device. */ if (dev->radio_type != UNSET) v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", dev->radio_addr, NULL); if (has_demod) v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); if (dev->tuner_addr == ADDR_UNSET) { enum v4l2_i2c_tuner_type type = has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(type)); } else { v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "tuner", + &dev->i2c_adap, "tuner", dev->tuner_addr, NULL); } } diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c index 764d7d219fe..756a2781226 100644 --- a/drivers/media/video/saa7134/saa7134-core.c +++ b/drivers/media/video/saa7134/saa7134-core.c @@ -991,7 +991,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev, if (card_is_empress(dev)) { struct v4l2_subdev *sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "saa6752hs", + "saa6752hs", saa7134_boards[dev->board].empress_addr, NULL); if (sd) @@ -1002,7 +1002,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev, struct v4l2_subdev *sd; sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, - &dev->i2c_adap, NULL, "saa6588", + &dev->i2c_adap, "saa6588", 0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr)); if (sd) { printk(KERN_INFO "%s: found RDS decoder\n", dev->name); diff --git a/drivers/media/video/saa7134/saa7134-empress.c b/drivers/media/video/saa7134/saa7134-empress.c index 1467a30a434..b890aafe7d6 100644 --- a/drivers/media/video/saa7134/saa7134-empress.c +++ b/drivers/media/video/saa7134/saa7134-empress.c @@ -21,7 +21,6 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> -#include <linux/smp_lock.h> #include <linux/delay.h> #include "saa7134-reg.h" diff --git a/drivers/media/video/saa7164/saa7164.h b/drivers/media/video/saa7164/saa7164.h index 1d9c5cbbbc5..041ae8e20f6 100644 --- a/drivers/media/video/saa7164/saa7164.h +++ b/drivers/media/video/saa7164/saa7164.h @@ -58,7 +58,6 @@ #include <media/tveeprom.h> #include <media/videobuf-dma-sg.h> #include <media/videobuf-dvb.h> -#include <linux/smp_lock.h> #include <dvb_demux.h> #include <dvb_frontend.h> #include <dvb_net.h> diff --git a/drivers/media/video/se401.c b/drivers/media/video/se401.c index 41d0166c0f9..41360d7c3e9 100644 --- a/drivers/media/video/se401.c +++ b/drivers/media/video/se401.c @@ -31,7 +31,6 @@ static const char version[] = "0.24"; #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/pagemap.h> #include <linux/usb.h> #include "se401.h" @@ -951,9 +950,9 @@ static int se401_open(struct file *file) struct usb_se401 *se401 = (struct usb_se401 *)dev; int err = 0; - lock_kernel(); + mutex_lock(&se401->lock); if (se401->user) { - unlock_kernel(); + mutex_unlock(&se401->lock); return -EBUSY; } se401->fbuf = rvmalloc(se401->maxframesize * SE401_NUMFRAMES); @@ -962,7 +961,7 @@ static int se401_open(struct file *file) else err = -ENOMEM; se401->user = !err; - unlock_kernel(); + mutex_unlock(&se401->lock); return err; } diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c index 0f4906136b8..07cf0c6c7c1 100644 --- a/drivers/media/video/sh_vou.c +++ b/drivers/media/video/sh_vou.c @@ -75,6 +75,7 @@ struct sh_vou_device { int pix_idx; struct videobuf_buffer *active; enum sh_vou_status status; + struct mutex fop_lock; }; struct sh_vou_file { @@ -235,7 +236,7 @@ static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb) vb->state = VIDEOBUF_NEEDS_INIT; } -/* Locking: caller holds vq->vb_lock mutex */ +/* Locking: caller holds fop_lock mutex */ static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { @@ -257,7 +258,7 @@ static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count, return 0; } -/* Locking: caller holds vq->vb_lock mutex */ +/* Locking: caller holds fop_lock mutex */ static int sh_vou_buf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) @@ -306,7 +307,7 @@ static int sh_vou_buf_prepare(struct videobuf_queue *vq, return 0; } -/* Locking: caller holds vq->vb_lock mutex and vq->irqlock spinlock */ +/* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */ static void sh_vou_buf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { @@ -1190,7 +1191,7 @@ static int sh_vou_open(struct file *file) V4L2_BUF_TYPE_VIDEO_OUTPUT, V4L2_FIELD_NONE, sizeof(struct videobuf_buffer), vdev, - NULL); + &vou_dev->fop_lock); return 0; } @@ -1292,7 +1293,7 @@ static const struct v4l2_file_operations sh_vou_fops = { .owner = THIS_MODULE, .open = sh_vou_open, .release = sh_vou_release, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .mmap = sh_vou_mmap, .poll = sh_vou_poll, }; @@ -1331,6 +1332,7 @@ static int __devinit sh_vou_probe(struct platform_device *pdev) INIT_LIST_HEAD(&vou_dev->queue); spin_lock_init(&vou_dev->lock); + mutex_init(&vou_dev->fop_lock); atomic_set(&vou_dev->use_count, 0); vou_dev->pdata = vou_pdata; vou_dev->status = SH_VOU_IDLE; @@ -1388,6 +1390,7 @@ static int __devinit sh_vou_probe(struct platform_device *pdev) vdev->tvnorms |= V4L2_STD_PAL; vdev->v4l2_dev = &vou_dev->v4l2_dev; vdev->release = video_device_release; + vdev->lock = &vou_dev->fop_lock; vou_dev->vdev = vdev; video_set_drvdata(vdev, vou_dev); @@ -1406,7 +1409,7 @@ static int __devinit sh_vou_probe(struct platform_device *pdev) goto ereset; subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap, - NULL, vou_pdata->board_info, NULL); + vou_pdata->board_info, NULL); if (!subdev) { ret = -ENOMEM; goto ei2cnd; diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c index 28e19daadec..f49fbfb7dc1 100644 --- a/drivers/media/video/sn9c102/sn9c102_core.c +++ b/drivers/media/video/sn9c102/sn9c102_core.c @@ -3238,7 +3238,7 @@ static const struct v4l2_file_operations sn9c102_fops = { .owner = THIS_MODULE, .open = sn9c102_open, .release = sn9c102_release, - .ioctl = sn9c102_ioctl, + .unlocked_ioctl = sn9c102_ioctl, .read = sn9c102_read, .poll = sn9c102_poll, .mmap = sn9c102_mmap, diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c index 43848a751d1..335120c2021 100644 --- a/drivers/media/video/soc_camera.c +++ b/drivers/media/video/soc_camera.c @@ -896,7 +896,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd, icl->board_info->platform_data = icd; subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap, - NULL, icl->board_info, NULL); + icl->board_info, NULL); if (!subdev) goto ei2cnd; diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c index f07a0f6b71c..b5afe5f841c 100644 --- a/drivers/media/video/stk-webcam.c +++ b/drivers/media/video/stk-webcam.c @@ -27,7 +27,6 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/usb.h> #include <linux/mm.h> @@ -673,14 +672,11 @@ static int v4l_stk_open(struct file *fp) vdev = video_devdata(fp); dev = vdev_to_camera(vdev); - lock_kernel(); if (dev == NULL || !is_present(dev)) { - unlock_kernel(); return -ENXIO; } fp->private_data = dev; usb_autopm_get_interface(dev->interface); - unlock_kernel(); return 0; } diff --git a/drivers/media/video/tlg2300/pd-main.c b/drivers/media/video/tlg2300/pd-main.c index 4555f4a5f4c..c91424c0c13 100644 --- a/drivers/media/video/tlg2300/pd-main.c +++ b/drivers/media/video/tlg2300/pd-main.c @@ -36,7 +36,6 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/firmware.h> -#include <linux/smp_lock.h> #include "vendorcmds.h" #include "pd-common.h" @@ -485,15 +484,11 @@ static void poseidon_disconnect(struct usb_interface *interface) /*unregister v4l2 device */ v4l2_device_unregister(&pd->v4l2_dev); - lock_kernel(); - { - pd_dvb_usb_device_exit(pd); - poseidon_fm_exit(pd); + pd_dvb_usb_device_exit(pd); + poseidon_fm_exit(pd); - poseidon_audio_free(pd); - pd_video_exit(pd); - } - unlock_kernel(); + poseidon_audio_free(pd); + pd_video_exit(pd); usb_set_intfdata(interface, NULL); kref_put(&pd->kref, poseidon_delete); diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c index 5d6fd01f918..dc17cce2fbb 100644 --- a/drivers/media/video/usbvideo/vicam.c +++ b/drivers/media/video/usbvideo/vicam.c @@ -43,7 +43,6 @@ #include <linux/vmalloc.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/firmware.h> #include <linux/ihex.h> @@ -483,29 +482,28 @@ vicam_open(struct file *file) return -EINVAL; } - /* the videodev_lock held above us protects us from - * simultaneous opens...for now. we probably shouldn't - * rely on this fact forever. + /* cam_lock/open_count protects us from simultaneous opens + * ... for now. we probably shouldn't rely on this fact forever. */ - lock_kernel(); + mutex_lock(&cam->cam_lock); if (cam->open_count > 0) { printk(KERN_INFO "vicam_open called on already opened camera"); - unlock_kernel(); + mutex_unlock(&cam->cam_lock); return -EBUSY; } cam->raw_image = kmalloc(VICAM_MAX_READ_SIZE, GFP_KERNEL); if (!cam->raw_image) { - unlock_kernel(); + mutex_unlock(&cam->cam_lock); return -ENOMEM; } cam->framebuf = rvmalloc(VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); if (!cam->framebuf) { kfree(cam->raw_image); - unlock_kernel(); + mutex_unlock(&cam->cam_lock); return -ENOMEM; } @@ -513,10 +511,17 @@ vicam_open(struct file *file) if (!cam->cntrlbuf) { kfree(cam->raw_image); rvfree(cam->framebuf, VICAM_MAX_FRAME_SIZE * VICAM_FRAMES); - unlock_kernel(); + mutex_unlock(&cam->cam_lock); return -ENOMEM; } + cam->needsDummyRead = 1; + cam->open_count++; + + file->private_data = cam; + mutex_unlock(&cam->cam_lock); + + // First upload firmware, then turn the camera on if (!cam->is_initialized) { @@ -527,12 +532,6 @@ vicam_open(struct file *file) set_camera_power(cam, 1); - cam->needsDummyRead = 1; - cam->open_count++; - - file->private_data = cam; - unlock_kernel(); - return 0; } diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c index e3bbae26e3c..81dd53bb526 100644 --- a/drivers/media/video/usbvision/usbvision-i2c.c +++ b/drivers/media/video/usbvision/usbvision-i2c.c @@ -251,7 +251,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision) hit-and-miss. */ mdelay(10); v4l2_i2c_new_subdev(&usbvision->v4l2_dev, - &usbvision->i2c_adap, NULL, + &usbvision->i2c_adap, "saa7115_auto", 0, saa711x_addrs); break; } @@ -261,14 +261,14 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision) struct tuner_setup tun_setup; sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, - &usbvision->i2c_adap, NULL, + &usbvision->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); /* depending on whether we found a demod or not, select the tuner type. */ type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, - &usbvision->i2c_adap, NULL, + &usbvision->i2c_adap, "tuner", 0, v4l2_i2c_tuner_addrs(type)); if (sd == NULL) diff --git a/drivers/media/video/usbvision/usbvision-video.c b/drivers/media/video/usbvision/usbvision-video.c index db6b828594f..011c0c38699 100644 --- a/drivers/media/video/usbvision/usbvision-video.c +++ b/drivers/media/video/usbvision/usbvision-video.c @@ -50,7 +50,6 @@ #include <linux/list.h> #include <linux/timer.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/vmalloc.h> diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c index f169f773667..59f8a9ad379 100644 --- a/drivers/media/video/uvc/uvc_ctrl.c +++ b/drivers/media/video/uvc/uvc_ctrl.c @@ -785,7 +785,7 @@ static void __uvc_find_control(struct uvc_entity *entity, __u32 v4l2_id, } } -struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, +static struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, __u32 v4l2_id, struct uvc_control_mapping **mapping) { struct uvc_control *ctrl = NULL; @@ -944,6 +944,52 @@ done: return ret; } +/* + * Mapping V4L2 controls to UVC controls can be straighforward if done well. + * Most of the UVC controls exist in V4L2, and can be mapped directly. Some + * must be grouped (for instance the Red Balance, Blue Balance and Do White + * Balance V4L2 controls use the White Balance Component UVC control) or + * otherwise translated. The approach we take here is to use a translation + * table for the controls that can be mapped directly, and handle the others + * manually. + */ +int uvc_query_v4l2_menu(struct uvc_video_chain *chain, + struct v4l2_querymenu *query_menu) +{ + struct uvc_menu_info *menu_info; + struct uvc_control_mapping *mapping; + struct uvc_control *ctrl; + u32 index = query_menu->index; + u32 id = query_menu->id; + int ret; + + memset(query_menu, 0, sizeof(*query_menu)); + query_menu->id = id; + query_menu->index = index; + + ret = mutex_lock_interruptible(&chain->ctrl_mutex); + if (ret < 0) + return -ERESTARTSYS; + + ctrl = uvc_find_control(chain, query_menu->id, &mapping); + if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) { + ret = -EINVAL; + goto done; + } + + if (query_menu->index >= mapping->menu_count) { + ret = -EINVAL; + goto done; + } + + menu_info = &mapping->menu_info[query_menu->index]; + strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); + +done: + mutex_unlock(&chain->ctrl_mutex); + return ret; +} + /* -------------------------------------------------------------------------- * Control transactions diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c index ed6d5449741..f14581bd707 100644 --- a/drivers/media/video/uvc/uvc_queue.c +++ b/drivers/media/video/uvc/uvc_queue.c @@ -90,6 +90,39 @@ void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, } /* + * Free the video buffers. + * + * This function must be called with the queue lock held. + */ +static int __uvc_free_buffers(struct uvc_video_queue *queue) +{ + unsigned int i; + + for (i = 0; i < queue->count; ++i) { + if (queue->buffer[i].vma_use_count != 0) + return -EBUSY; + } + + if (queue->count) { + vfree(queue->mem); + queue->count = 0; + } + + return 0; +} + +int uvc_free_buffers(struct uvc_video_queue *queue) +{ + int ret; + + mutex_lock(&queue->mutex); + ret = __uvc_free_buffers(queue); + mutex_unlock(&queue->mutex); + + return ret; +} + +/* * Allocate the video buffers. * * Pages are reserved to make sure they will not be swapped, as they will be @@ -110,7 +143,7 @@ int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers, mutex_lock(&queue->mutex); - if ((ret = uvc_free_buffers(queue)) < 0) + if ((ret = __uvc_free_buffers(queue)) < 0) goto done; /* Bail out if no buffers should be allocated. */ @@ -152,28 +185,6 @@ done: } /* - * Free the video buffers. - * - * This function must be called with the queue lock held. - */ -int uvc_free_buffers(struct uvc_video_queue *queue) -{ - unsigned int i; - - for (i = 0; i < queue->count; ++i) { - if (queue->buffer[i].vma_use_count != 0) - return -EBUSY; - } - - if (queue->count) { - vfree(queue->mem); - queue->count = 0; - } - - return 0; -} - -/* * Check if buffers have been allocated. */ int uvc_queue_allocated(struct uvc_video_queue *queue) @@ -369,6 +380,82 @@ done: } /* + * VMA operations. + */ +static void uvc_vm_open(struct vm_area_struct *vma) +{ + struct uvc_buffer *buffer = vma->vm_private_data; + buffer->vma_use_count++; +} + +static void uvc_vm_close(struct vm_area_struct *vma) +{ + struct uvc_buffer *buffer = vma->vm_private_data; + buffer->vma_use_count--; +} + +static const struct vm_operations_struct uvc_vm_ops = { + .open = uvc_vm_open, + .close = uvc_vm_close, +}; + +/* + * Memory-map a video buffer. + * + * This function implements video buffers memory mapping and is intended to be + * used by the device mmap handler. + */ +int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) +{ + struct uvc_buffer *uninitialized_var(buffer); + struct page *page; + unsigned long addr, start, size; + unsigned int i; + int ret = 0; + + start = vma->vm_start; + size = vma->vm_end - vma->vm_start; + + mutex_lock(&queue->mutex); + + for (i = 0; i < queue->count; ++i) { + buffer = &queue->buffer[i]; + if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) + break; + } + + if (i == queue->count || size != queue->buf_size) { + ret = -EINVAL; + goto done; + } + + /* + * VM_IO marks the area as being an mmaped region for I/O to a + * device. It also prevents the region from being core dumped. + */ + vma->vm_flags |= VM_IO; + + addr = (unsigned long)queue->mem + buffer->buf.m.offset; + while (size > 0) { + page = vmalloc_to_page((void *)addr); + if ((ret = vm_insert_page(vma, start, page)) < 0) + goto done; + + start += PAGE_SIZE; + addr += PAGE_SIZE; + size -= PAGE_SIZE; + } + + vma->vm_ops = &uvc_vm_ops; + vma->vm_private_data = buffer; + uvc_vm_open(vma); + +done: + mutex_unlock(&queue->mutex); + return ret; +} + +/* * Poll the video queue. * * This function implements video queue polling and is intended to be used by diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c index 6d15de9b520..8cf61e8a634 100644 --- a/drivers/media/video/uvc/uvc_v4l2.c +++ b/drivers/media/video/uvc/uvc_v4l2.c @@ -101,40 +101,6 @@ done: */ /* - * Mapping V4L2 controls to UVC controls can be straighforward if done well. - * Most of the UVC controls exist in V4L2, and can be mapped directly. Some - * must be grouped (for instance the Red Balance, Blue Balance and Do White - * Balance V4L2 controls use the White Balance Component UVC control) or - * otherwise translated. The approach we take here is to use a translation - * table for the controls that can be mapped directly, and handle the others - * manually. - */ -static int uvc_v4l2_query_menu(struct uvc_video_chain *chain, - struct v4l2_querymenu *query_menu) -{ - struct uvc_menu_info *menu_info; - struct uvc_control_mapping *mapping; - struct uvc_control *ctrl; - u32 index = query_menu->index; - u32 id = query_menu->id; - - ctrl = uvc_find_control(chain, query_menu->id, &mapping); - if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) - return -EINVAL; - - if (query_menu->index >= mapping->menu_count) - return -EINVAL; - - memset(query_menu, 0, sizeof(*query_menu)); - query_menu->id = id; - query_menu->index = index; - - menu_info = &mapping->menu_info[query_menu->index]; - strlcpy(query_menu->name, menu_info->name, sizeof query_menu->name); - return 0; -} - -/* * Find the frame interval closest to the requested frame interval for the * given frame format and size. This should be done by the device as part of * the Video Probe and Commit negotiation, but some hardware don't implement @@ -260,12 +226,14 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream, * developers test their webcams with the Linux driver as well as with * the Windows driver). */ + mutex_lock(&stream->mutex); if (stream->dev->quirks & UVC_QUIRK_PROBE_EXTRAFIELDS) probe->dwMaxVideoFrameSize = stream->ctrl.dwMaxVideoFrameSize; /* Probe the device. */ ret = uvc_probe_video(stream, probe); + mutex_unlock(&stream->mutex); if (ret < 0) goto done; @@ -289,14 +257,21 @@ done: static int uvc_v4l2_get_format(struct uvc_streaming *stream, struct v4l2_format *fmt) { - struct uvc_format *format = stream->cur_format; - struct uvc_frame *frame = stream->cur_frame; + struct uvc_format *format; + struct uvc_frame *frame; + int ret = 0; if (fmt->type != stream->type) return -EINVAL; - if (format == NULL || frame == NULL) - return -EINVAL; + mutex_lock(&stream->mutex); + format = stream->cur_format; + frame = stream->cur_frame; + + if (format == NULL || frame == NULL) { + ret = -EINVAL; + goto done; + } fmt->fmt.pix.pixelformat = format->fcc; fmt->fmt.pix.width = frame->wWidth; @@ -307,7 +282,9 @@ static int uvc_v4l2_get_format(struct uvc_streaming *stream, fmt->fmt.pix.colorspace = format->colorspace; fmt->fmt.pix.priv = 0; - return 0; +done: + mutex_unlock(&stream->mutex); + return ret; } static int uvc_v4l2_set_format(struct uvc_streaming *stream, @@ -321,18 +298,24 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream, if (fmt->type != stream->type) return -EINVAL; - if (uvc_queue_allocated(&stream->queue)) - return -EBUSY; - ret = uvc_v4l2_try_format(stream, fmt, &probe, &format, &frame); if (ret < 0) return ret; + mutex_lock(&stream->mutex); + + if (uvc_queue_allocated(&stream->queue)) { + ret = -EBUSY; + goto done; + } + memcpy(&stream->ctrl, &probe, sizeof probe); stream->cur_format = format; stream->cur_frame = frame; - return 0; +done: + mutex_unlock(&stream->mutex); + return ret; } static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream, @@ -343,7 +326,10 @@ static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream, if (parm->type != stream->type) return -EINVAL; + mutex_lock(&stream->mutex); numerator = stream->ctrl.dwFrameInterval; + mutex_unlock(&stream->mutex); + denominator = 10000000; uvc_simplify_fraction(&numerator, &denominator, 8, 333); @@ -370,7 +356,6 @@ static int uvc_v4l2_get_streamparm(struct uvc_streaming *stream, static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream, struct v4l2_streamparm *parm) { - struct uvc_frame *frame = stream->cur_frame; struct uvc_streaming_control probe; struct v4l2_fract timeperframe; uint32_t interval; @@ -379,28 +364,36 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream, if (parm->type != stream->type) return -EINVAL; - if (uvc_queue_streaming(&stream->queue)) - return -EBUSY; - if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) timeperframe = parm->parm.capture.timeperframe; else timeperframe = parm->parm.output.timeperframe; - memcpy(&probe, &stream->ctrl, sizeof probe); interval = uvc_fraction_to_interval(timeperframe.numerator, timeperframe.denominator); - uvc_trace(UVC_TRACE_FORMAT, "Setting frame interval to %u/%u (%u).\n", timeperframe.numerator, timeperframe.denominator, interval); - probe.dwFrameInterval = uvc_try_frame_interval(frame, interval); + + mutex_lock(&stream->mutex); + + if (uvc_queue_streaming(&stream->queue)) { + mutex_unlock(&stream->mutex); + return -EBUSY; + } + + memcpy(&probe, &stream->ctrl, sizeof probe); + probe.dwFrameInterval = + uvc_try_frame_interval(stream->cur_frame, interval); /* Probe the device with the new settings. */ ret = uvc_probe_video(stream, &probe); - if (ret < 0) + if (ret < 0) { + mutex_unlock(&stream->mutex); return ret; + } memcpy(&stream->ctrl, &probe, sizeof probe); + mutex_unlock(&stream->mutex); /* Return the actual frame period. */ timeperframe.numerator = probe.dwFrameInterval; @@ -528,11 +521,9 @@ static int uvc_v4l2_release(struct file *file) if (uvc_has_privileges(handle)) { uvc_video_enable(stream, 0); - mutex_lock(&stream->queue.mutex); if (uvc_free_buffers(&stream->queue) < 0) uvc_printk(KERN_ERR, "uvc_v4l2_release: Unable to " "free buffers.\n"); - mutex_unlock(&stream->queue.mutex); } /* Release the file handle. */ @@ -624,7 +615,7 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) } case VIDIOC_QUERYMENU: - return uvc_v4l2_query_menu(chain, arg); + return uvc_query_v4l2_menu(chain, arg); case VIDIOC_G_EXT_CTRLS: { @@ -905,15 +896,17 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) case VIDIOC_CROPCAP: { struct v4l2_cropcap *ccap = arg; - struct uvc_frame *frame = stream->cur_frame; if (ccap->type != stream->type) return -EINVAL; ccap->bounds.left = 0; ccap->bounds.top = 0; - ccap->bounds.width = frame->wWidth; - ccap->bounds.height = frame->wHeight; + + mutex_lock(&stream->mutex); + ccap->bounds.width = stream->cur_frame->wWidth; + ccap->bounds.height = stream->cur_frame->wHeight; + mutex_unlock(&stream->mutex); ccap->defrect = ccap->bounds; @@ -930,8 +923,6 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) case VIDIOC_REQBUFS: { struct v4l2_requestbuffers *rb = arg; - unsigned int bufsize = - stream->ctrl.dwMaxVideoFrameSize; if (rb->type != stream->type || rb->memory != V4L2_MEMORY_MMAP) @@ -940,7 +931,10 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) if ((ret = uvc_acquire_privileges(handle)) < 0) return ret; - ret = uvc_alloc_buffers(&stream->queue, rb->count, bufsize); + mutex_lock(&stream->mutex); + ret = uvc_alloc_buffers(&stream->queue, rb->count, + stream->ctrl.dwMaxVideoFrameSize); + mutex_unlock(&stream->mutex); if (ret < 0) return ret; @@ -988,7 +982,9 @@ static long uvc_v4l2_do_ioctl(struct file *file, unsigned int cmd, void *arg) if (!uvc_has_privileges(handle)) return -EBUSY; + mutex_lock(&stream->mutex); ret = uvc_video_enable(stream, 1); + mutex_unlock(&stream->mutex); if (ret < 0) return ret; break; @@ -1068,79 +1064,14 @@ static ssize_t uvc_v4l2_read(struct file *file, char __user *data, return -EINVAL; } -/* - * VMA operations. - */ -static void uvc_vm_open(struct vm_area_struct *vma) -{ - struct uvc_buffer *buffer = vma->vm_private_data; - buffer->vma_use_count++; -} - -static void uvc_vm_close(struct vm_area_struct *vma) -{ - struct uvc_buffer *buffer = vma->vm_private_data; - buffer->vma_use_count--; -} - -static const struct vm_operations_struct uvc_vm_ops = { - .open = uvc_vm_open, - .close = uvc_vm_close, -}; - static int uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma) { struct uvc_fh *handle = file->private_data; struct uvc_streaming *stream = handle->stream; - struct uvc_video_queue *queue = &stream->queue; - struct uvc_buffer *uninitialized_var(buffer); - struct page *page; - unsigned long addr, start, size; - unsigned int i; - int ret = 0; uvc_trace(UVC_TRACE_CALLS, "uvc_v4l2_mmap\n"); - start = vma->vm_start; - size = vma->vm_end - vma->vm_start; - - mutex_lock(&queue->mutex); - - for (i = 0; i < queue->count; ++i) { - buffer = &queue->buffer[i]; - if ((buffer->buf.m.offset >> PAGE_SHIFT) == vma->vm_pgoff) - break; - } - - if (i == queue->count || size != queue->buf_size) { - ret = -EINVAL; - goto done; - } - - /* - * VM_IO marks the area as being an mmaped region for I/O to a - * device. It also prevents the region from being core dumped. - */ - vma->vm_flags |= VM_IO; - - addr = (unsigned long)queue->mem + buffer->buf.m.offset; - while (size > 0) { - page = vmalloc_to_page((void *)addr); - if ((ret = vm_insert_page(vma, start, page)) < 0) - goto done; - - start += PAGE_SIZE; - addr += PAGE_SIZE; - size -= PAGE_SIZE; - } - - vma->vm_ops = &uvc_vm_ops; - vma->vm_private_data = buffer; - uvc_vm_open(vma); - -done: - mutex_unlock(&queue->mutex); - return ret; + return uvc_queue_mmap(&stream->queue, vma); } static unsigned int uvc_v4l2_poll(struct file *file, poll_table *wait) @@ -1157,7 +1088,7 @@ const struct v4l2_file_operations uvc_fops = { .owner = THIS_MODULE, .open = uvc_v4l2_open, .release = uvc_v4l2_release, - .ioctl = uvc_v4l2_ioctl, + .unlocked_ioctl = uvc_v4l2_ioctl, .read = uvc_v4l2_read, .mmap = uvc_v4l2_mmap, .poll = uvc_v4l2_poll, diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 5555f010283..5673d673504 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c @@ -293,8 +293,6 @@ int uvc_probe_video(struct uvc_streaming *stream, unsigned int i; int ret; - mutex_lock(&stream->mutex); - /* Perform probing. The device should adjust the requested values * according to its capabilities. However, some devices, namely the * first generation UVC Logitech webcams, don't implement the Video @@ -346,7 +344,6 @@ int uvc_probe_video(struct uvc_streaming *stream, } done: - mutex_unlock(&stream->mutex); return ret; } diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index d97cf6d6a4f..45f01e7e13d 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h @@ -436,7 +436,9 @@ struct uvc_streaming { struct uvc_streaming_control ctrl; struct uvc_format *cur_format; struct uvc_frame *cur_frame; - + /* Protect access to ctrl, cur_format, cur_frame and hardware video + * probe control. + */ struct mutex mutex; unsigned int frozen : 1; @@ -574,6 +576,8 @@ extern int uvc_queue_enable(struct uvc_video_queue *queue, int enable); extern void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect); extern struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf); +extern int uvc_queue_mmap(struct uvc_video_queue *queue, + struct vm_area_struct *vma); extern unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait); extern int uvc_queue_allocated(struct uvc_video_queue *queue); @@ -606,10 +610,10 @@ extern int uvc_status_suspend(struct uvc_device *dev); extern int uvc_status_resume(struct uvc_device *dev); /* Controls */ -extern struct uvc_control *uvc_find_control(struct uvc_video_chain *chain, - __u32 v4l2_id, struct uvc_control_mapping **mapping); extern int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, struct v4l2_queryctrl *v4l2_ctrl); +extern int uvc_query_v4l2_menu(struct uvc_video_chain *chain, + struct v4l2_querymenu *query_menu); extern int uvc_ctrl_add_mapping(struct uvc_video_chain *chain, const struct uvc_control_mapping *mapping); diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c index 9294282b5ad..b5eb1f3950b 100644 --- a/drivers/media/video/v4l2-common.c +++ b/drivers/media/video/v4l2-common.c @@ -368,18 +368,15 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init); /* Load an i2c sub-device. */ struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, const char *module_name, - struct i2c_board_info *info, const unsigned short *probe_addrs) + struct i2c_adapter *adapter, struct i2c_board_info *info, + const unsigned short *probe_addrs) { struct v4l2_subdev *sd = NULL; struct i2c_client *client; BUG_ON(!v4l2_dev); - if (module_name) - request_module(module_name); - else - request_module(I2C_MODULE_PREFIX "%s", info->type); + request_module(I2C_MODULE_PREFIX "%s", info->type); /* Create the i2c client */ if (info->addr == 0 && probe_addrs) @@ -432,8 +429,7 @@ error: EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, - struct i2c_adapter *adapter, - const char *module_name, const char *client_type, + struct i2c_adapter *adapter, const char *client_type, int irq, void *platform_data, u8 addr, const unsigned short *probe_addrs) { @@ -447,8 +443,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, info.irq = irq; info.platform_data = platform_data; - return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name, - &info, probe_addrs); + return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs); } EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg); diff --git a/drivers/media/video/v4l2-compat-ioctl32.c b/drivers/media/video/v4l2-compat-ioctl32.c index 86294ed35c9..e30e8dfb620 100644 --- a/drivers/media/video/v4l2-compat-ioctl32.c +++ b/drivers/media/video/v4l2-compat-ioctl32.c @@ -18,7 +18,6 @@ #include <linux/videodev.h> #include <linux/videodev2.h> #include <linux/module.h> -#include <linux/smp_lock.h> #include <media/v4l2-ioctl.h> #ifdef CONFIG_COMPAT diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 0ca7978654b..359e23290a7 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c @@ -25,7 +25,6 @@ #include <linux/init.h> #include <linux/kmod.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/system.h> @@ -187,12 +186,12 @@ static ssize_t v4l2_read(struct file *filp, char __user *buf, size_t sz, loff_t *off) { struct video_device *vdev = video_devdata(filp); - int ret = -EIO; + int ret = -ENODEV; if (!vdev->fops->read) return -EINVAL; - if (vdev->lock) - mutex_lock(vdev->lock); + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) + return -ERESTARTSYS; if (video_is_registered(vdev)) ret = vdev->fops->read(filp, buf, sz, off); if (vdev->lock) @@ -204,12 +203,12 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf, size_t sz, loff_t *off) { struct video_device *vdev = video_devdata(filp); - int ret = -EIO; + int ret = -ENODEV; if (!vdev->fops->write) return -EINVAL; - if (vdev->lock) - mutex_lock(vdev->lock); + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) + return -ERESTARTSYS; if (video_is_registered(vdev)) ret = vdev->fops->write(filp, buf, sz, off); if (vdev->lock) @@ -220,10 +219,10 @@ static ssize_t v4l2_write(struct file *filp, const char __user *buf, static unsigned int v4l2_poll(struct file *filp, struct poll_table_struct *poll) { struct video_device *vdev = video_devdata(filp); - int ret = DEFAULT_POLLMASK; + int ret = POLLERR | POLLHUP; if (!vdev->fops->poll) - return ret; + return DEFAULT_POLLMASK; if (vdev->lock) mutex_lock(vdev->lock); if (video_is_registered(vdev)) @@ -239,18 +238,45 @@ static long v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) int ret = -ENODEV; if (vdev->fops->unlocked_ioctl) { - if (vdev->lock) - mutex_lock(vdev->lock); + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) + return -ERESTARTSYS; if (video_is_registered(vdev)) ret = vdev->fops->unlocked_ioctl(filp, cmd, arg); if (vdev->lock) mutex_unlock(vdev->lock); } else if (vdev->fops->ioctl) { - /* TODO: convert all drivers to unlocked_ioctl */ - lock_kernel(); + /* This code path is a replacement for the BKL. It is a major + * hack but it will have to do for those drivers that are not + * yet converted to use unlocked_ioctl. + * + * There are two options: if the driver implements struct + * v4l2_device, then the lock defined there is used to + * serialize the ioctls. Otherwise the v4l2 core lock defined + * below is used. This lock is really bad since it serializes + * completely independent devices. + * + * Both variants suffer from the same problem: if the driver + * sleeps, then it blocks all ioctls since the lock is still + * held. This is very common for VIDIOC_DQBUF since that + * normally waits for a frame to arrive. As a result any other + * ioctl calls will proceed very, very slowly since each call + * will have to wait for the VIDIOC_QBUF to finish. Things that + * should take 0.01s may now take 10-20 seconds. + * + * The workaround is to *not* take the lock for VIDIOC_DQBUF. + * This actually works OK for videobuf-based drivers, since + * videobuf will take its own internal lock. + */ + static DEFINE_MUTEX(v4l2_ioctl_mutex); + struct mutex *m = vdev->v4l2_dev ? + &vdev->v4l2_dev->ioctl_lock : &v4l2_ioctl_mutex; + + if (cmd != VIDIOC_DQBUF && mutex_lock_interruptible(m)) + return -ERESTARTSYS; if (video_is_registered(vdev)) ret = vdev->fops->ioctl(filp, cmd, arg); - unlock_kernel(); + if (cmd != VIDIOC_DQBUF) + mutex_unlock(m); } else ret = -ENOTTY; @@ -264,8 +290,8 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm) if (!vdev->fops->mmap) return ret; - if (vdev->lock) - mutex_lock(vdev->lock); + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) + return -ERESTARTSYS; if (video_is_registered(vdev)) ret = vdev->fops->mmap(filp, vm); if (vdev->lock) @@ -283,7 +309,7 @@ static int v4l2_open(struct inode *inode, struct file *filp) mutex_lock(&videodev_lock); vdev = video_devdata(filp); /* return ENODEV if the video device has already been removed. */ - if (vdev == NULL) { + if (vdev == NULL || !video_is_registered(vdev)) { mutex_unlock(&videodev_lock); return -ENODEV; } @@ -291,8 +317,10 @@ static int v4l2_open(struct inode *inode, struct file *filp) video_get(vdev); mutex_unlock(&videodev_lock); if (vdev->fops->open) { - if (vdev->lock) - mutex_lock(vdev->lock); + if (vdev->lock && mutex_lock_interruptible(vdev->lock)) { + ret = -ERESTARTSYS; + goto err; + } if (video_is_registered(vdev)) ret = vdev->fops->open(filp); else @@ -301,6 +329,7 @@ static int v4l2_open(struct inode *inode, struct file *filp) mutex_unlock(vdev->lock); } +err: /* decrease the refcount in case of an error */ if (ret) video_put(vdev); @@ -595,7 +624,12 @@ void video_unregister_device(struct video_device *vdev) if (!vdev || !video_is_registered(vdev)) return; + mutex_lock(&videodev_lock); + /* This must be in a critical section to prevent a race with v4l2_open. + * Once this bit has been cleared video_get may never be called again. + */ clear_bit(V4L2_FL_REGISTERED, &vdev->flags); + mutex_unlock(&videodev_lock); device_unregister(&vdev->dev); } EXPORT_SYMBOL(video_unregister_device); diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index 0b08f96b74a..7fe6f92af48 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c @@ -35,6 +35,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) INIT_LIST_HEAD(&v4l2_dev->subdevs); spin_lock_init(&v4l2_dev->lock); + mutex_init(&v4l2_dev->ioctl_lock); v4l2_dev->dev = dev; if (dev == NULL) { /* If dev == NULL, then name must be filled in by the caller */ diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c index 02a21bccae1..9eda7cc0312 100644 --- a/drivers/media/video/via-camera.c +++ b/drivers/media/video/via-camera.c @@ -1360,7 +1360,7 @@ static __devinit int viacam_probe(struct platform_device *pdev) */ sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31); cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter, - "ov7670", "ov7670", 0x42 >> 1, NULL); + "ov7670", 0x42 >> 1, NULL); if (cam->sensor == NULL) { dev_err(&pdev->dev, "Unable to find the sensor!\n"); ret = -ENODEV; diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c index e5e005dc155..7e7eec48f8b 100644 --- a/drivers/media/video/vino.c +++ b/drivers/media/video/vino.c @@ -4334,10 +4334,10 @@ static int __init vino_module_init(void) vino_drvdata->decoder = v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, - NULL, "saa7191", 0, I2C_ADDRS(0x45)); + "saa7191", 0, I2C_ADDRS(0x45)); vino_drvdata->camera = v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, - NULL, "indycam", 0, I2C_ADDRS(0x2b)); + "indycam", 0, I2C_ADDRS(0x2b)); dprintk("init complete!\n"); diff --git a/drivers/media/video/w9966.c b/drivers/media/video/w9966.c index 635420d8d84..019ee206cbe 100644 --- a/drivers/media/video/w9966.c +++ b/drivers/media/video/w9966.c @@ -815,7 +815,7 @@ out: static const struct v4l2_file_operations w9966_fops = { .owner = THIS_MODULE, - .ioctl = video_ioctl2, + .unlocked_ioctl = video_ioctl2, .read = w9966_v4l_read, }; diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h index 37fe16181e3..27f05551183 100644 --- a/drivers/media/video/zoran/zoran.h +++ b/drivers/media/video/zoran/zoran.h @@ -388,6 +388,7 @@ struct zoran { struct videocodec *vfe; /* video front end */ struct mutex resource_lock; /* prevent evil stuff */ + struct mutex other_lock; /* please merge with above */ u8 initialized; /* flag if zoran has been correctly initialized */ int user; /* number of current users */ diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c index 0aac376c3f7..e520abf9f4c 100644 --- a/drivers/media/video/zoran/zoran_card.c +++ b/drivers/media/video/zoran/zoran_card.c @@ -1227,6 +1227,7 @@ static int __devinit zoran_probe(struct pci_dev *pdev, snprintf(ZR_DEVNAME(zr), sizeof(ZR_DEVNAME(zr)), "MJPEG[%u]", zr->id); spin_lock_init(&zr->spinlock); mutex_init(&zr->resource_lock); + mutex_init(&zr->other_lock); if (pci_enable_device(pdev)) goto zr_unreg; pci_read_config_byte(zr->pci_dev, PCI_CLASS_REVISION, &zr->revision); @@ -1342,13 +1343,12 @@ static int __devinit zoran_probe(struct pci_dev *pdev, } zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, - &zr->i2c_adapter, NULL, zr->card.i2c_decoder, + &zr->i2c_adapter, zr->card.i2c_decoder, 0, zr->card.addrs_decoder); if (zr->card.i2c_encoder) zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, - &zr->i2c_adapter, - NULL, zr->card.i2c_encoder, + &zr->i2c_adapter, zr->card.i2c_encoder, 0, zr->card.addrs_encoder); dprintk(2, diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c index 401082b853f..67a52e844ae 100644 --- a/drivers/media/video/zoran/zoran_driver.c +++ b/drivers/media/video/zoran/zoran_driver.c @@ -49,7 +49,6 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/wait.h> @@ -913,7 +912,7 @@ static int zoran_open(struct file *file) dprintk(2, KERN_INFO "%s: %s(%s, pid=[%d]), users(-)=%d\n", ZR_DEVNAME(zr), __func__, current->comm, task_pid_nr(current), zr->user + 1); - lock_kernel(); + mutex_lock(&zr->other_lock); if (zr->user >= 2048) { dprintk(1, KERN_ERR "%s: too many users (%d) on device\n", @@ -963,14 +962,14 @@ static int zoran_open(struct file *file) file->private_data = fh; fh->zr = zr; zoran_open_init_session(fh); - unlock_kernel(); + mutex_unlock(&zr->other_lock); return 0; fail_fh: kfree(fh); fail_unlock: - unlock_kernel(); + mutex_unlock(&zr->other_lock); dprintk(2, KERN_INFO "%s: open failed (%d), users(-)=%d\n", ZR_DEVNAME(zr), res, zr->user); @@ -989,7 +988,7 @@ zoran_close(struct file *file) /* kernel locks (fs/device.c), so don't do that ourselves * (prevents deadlocks) */ - /*mutex_lock(&zr->resource_lock);*/ + mutex_lock(&zr->other_lock); zoran_close_end_session(fh); @@ -1023,6 +1022,7 @@ zoran_close(struct file *file) encoder_call(zr, video, s_routing, 2, 0, 0); } } + mutex_unlock(&zr->other_lock); file->private_data = NULL; kfree(fh->overlay_mask); @@ -3370,11 +3370,26 @@ static const struct v4l2_ioctl_ops zoran_ioctl_ops = { #endif }; +/* please use zr->resource_lock consistently and kill this wrapper */ +static long zoran_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct zoran_fh *fh = file->private_data; + struct zoran *zr = fh->zr; + int ret; + + mutex_lock(&zr->other_lock); + ret = video_ioctl2(file, cmd, arg); + mutex_unlock(&zr->other_lock); + + return ret; +} + static const struct v4l2_file_operations zoran_fops = { .owner = THIS_MODULE, .open = zoran_open, .release = zoran_close, - .ioctl = video_ioctl2, + .unlocked_ioctl = zoran_ioctl, .read = zoran_read, .write = zoran_write, .mmap = zoran_mmap, diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c index e15220ff52f..d784c36707c 100644 --- a/drivers/message/fusion/mptfc.c +++ b/drivers/message/fusion/mptfc.c @@ -97,8 +97,7 @@ static u8 mptfcInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; static int mptfc_target_alloc(struct scsi_target *starget); static int mptfc_slave_alloc(struct scsi_device *sdev); -static int mptfc_qcmd(struct scsi_cmnd *SCpnt, - void (*done)(struct scsi_cmnd *)); +static int mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt); static void mptfc_target_destroy(struct scsi_target *starget); static void mptfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout); static void __devexit mptfc_remove(struct pci_dev *pdev); @@ -650,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev) } static int -mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { struct mptfc_rport_info *ri; struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device)); @@ -681,6 +680,8 @@ mptfc_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) return mptscsih_qcmd(SCpnt,done); } +static DEF_SCSI_QCMD(mptfc_qcmd) + /* * mptfc_display_port_link_speed - displaying link speed * @ioc: Pointer to MPT_ADAPTER structure diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 83a5115f025..d48c2c6058e 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -1889,7 +1889,7 @@ mptsas_slave_alloc(struct scsi_device *sdev) } static int -mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { MPT_SCSI_HOST *hd; MPT_ADAPTER *ioc; @@ -1913,6 +1913,8 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) return mptscsih_qcmd(SCpnt,done); } +static DEF_SCSI_QCMD(mptsas_qcmd) + /** * mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout * if the device under question is currently in the diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c index 0e2803155ae..6d9568d2ec5 100644 --- a/drivers/message/fusion/mptspi.c +++ b/drivers/message/fusion/mptspi.c @@ -780,7 +780,7 @@ static int mptspi_slave_configure(struct scsi_device *sdev) } static int -mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host); VirtDevice *vdevice = SCpnt->device->hostdata; @@ -805,6 +805,8 @@ mptspi_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) return mptscsih_qcmd(SCpnt,done); } +static DEF_SCSI_QCMD(mptspi_qcmd) + static void mptspi_slave_destroy(struct scsi_device *sdev) { struct scsi_target *starget = scsi_target(sdev); diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c index ea6b2197da8..97bdf82ec90 100644 --- a/drivers/message/i2o/i2o_scsi.c +++ b/drivers/message/i2o/i2o_scsi.c @@ -506,7 +506,7 @@ static struct i2o_driver i2o_scsi_driver = { * Locks: takes the controller lock on error path only */ -static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, +static int i2o_scsi_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done) (struct scsi_cmnd *)) { struct i2o_controller *c; @@ -688,7 +688,9 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt, exit: return rc; -}; +} + +static DEF_SCSI_QCMD(i2o_scsi_queuecommand) /** * i2o_scsi_abort - abort a running command diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c index f9b91ba8900..644d4cd071c 100644 --- a/drivers/misc/apds9802als.c +++ b/drivers/misc/apds9802als.c @@ -123,7 +123,7 @@ static ssize_t als_sensing_range_store(struct device *dev, { struct i2c_client *client = to_i2c_client(dev); struct als_data *data = i2c_get_clientdata(client); - unsigned int ret_val; + int ret_val; unsigned long val; if (strict_strtoul(buf, 10, &val)) @@ -251,7 +251,6 @@ static int apds9802als_probe(struct i2c_client *client, return res; als_error1: - i2c_set_clientdata(client, NULL); kfree(data); return res; } diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c index cee632e645e..d79a972f2c7 100644 --- a/drivers/misc/bh1770glc.c +++ b/drivers/misc/bh1770glc.c @@ -649,7 +649,7 @@ static ssize_t bh1770_power_state_store(struct device *dev, { struct bh1770_chip *chip = dev_get_drvdata(dev); unsigned long value; - size_t ret; + ssize_t ret; if (strict_strtoul(buf, 0, &value)) return -EINVAL; @@ -659,8 +659,12 @@ static ssize_t bh1770_power_state_store(struct device *dev, pm_runtime_get_sync(dev); ret = bh1770_lux_rate(chip, chip->lux_rate_index); - ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE); + if (ret < 0) { + pm_runtime_put(dev); + goto leave; + } + ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE); if (ret < 0) { pm_runtime_put(dev); goto leave; diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c index 34fe835921c..307aada5fff 100644 --- a/drivers/misc/isl29020.c +++ b/drivers/misc/isl29020.c @@ -87,7 +87,7 @@ static ssize_t als_sensing_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); - unsigned int ret_val; + int ret_val; unsigned long val; if (strict_strtoul(buf, 10, &val)) @@ -106,6 +106,8 @@ static ssize_t als_sensing_range_store(struct device *dev, val = 4; ret_val = i2c_smbus_read_byte_data(client, 0x00); + if (ret_val < 0) + return ret_val; ret_val &= 0xFC; /*reset the bit before setting them */ ret_val |= val - 1; @@ -181,9 +183,7 @@ static int isl29020_probe(struct i2c_client *client, static int isl29020_remove(struct i2c_client *client) { - struct als_data *data = i2c_get_clientdata(client); sysfs_remove_group(&client->dev.kobj, &m_als_gr); - kfree(data); return 0; } @@ -243,6 +243,6 @@ static void __exit sensor_isl29020_exit(void) module_init(sensor_isl29020_init); module_exit(sensor_isl29020_exit); -MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); +MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>"); MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index d551f09ccb7..6956f7e7d43 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -439,18 +439,23 @@ xpc_discovery(void) * nodes that can comprise an access protection grouping. The access * protection is in regards to memory, IOI and IPI. */ - max_regions = 64; region_size = xp_region_size; - switch (region_size) { - case 128: - max_regions *= 2; - case 64: - max_regions *= 2; - case 32: - max_regions *= 2; - region_size = 16; - DBUG_ON(!is_shub2()); + if (is_uv()) + max_regions = 256; + else { + max_regions = 64; + + switch (region_size) { + case 128: + max_regions *= 2; + case 64: + max_regions *= 2; + case 32: + max_regions *= 2; + region_size = 16; + DBUG_ON(!is_shub2()); + } } for (region = 0; region < max_regions; region++) { diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8f86d702e46..31ae07a3657 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1559,7 +1559,7 @@ void mmc_stop_host(struct mmc_host *host) if (host->caps & MMC_CAP_DISABLE) cancel_delayed_work(&host->disable); - cancel_delayed_work(&host->detect); + cancel_delayed_work_sync(&host->detect); mmc_flush_scheduled_work(); /* clear pm flags now and let card drivers set them as needed */ diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 995261f7fd7..77f93c3b880 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -375,7 +375,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; - int err, ddr = MMC_SDR_MODE; + int err, ddr = 0; u32 cid[4]; unsigned int max_dtr; @@ -562,7 +562,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, 1 << bus_width, ddr); err = 0; } else { - mmc_card_set_ddr_mode(card); + if (ddr) + mmc_card_set_ddr_mode(card); + else + ddr = MMC_SDR_MODE; + mmc_set_bus_width_ddr(card->host, bus_width, ddr); } } diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index c3ad1058cd3..efef5f94ac4 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -547,9 +547,11 @@ static void mmc_sdio_detect(struct mmc_host *host) BUG_ON(!host->card); /* Make sure card is powered before detecting it */ - err = pm_runtime_get_sync(&host->card->dev); - if (err < 0) - goto out; + if (host->caps & MMC_CAP_POWER_OFF_CARD) { + err = pm_runtime_get_sync(&host->card->dev); + if (err < 0) + goto out; + } mmc_claim_host(host); @@ -560,6 +562,20 @@ static void mmc_sdio_detect(struct mmc_host *host) mmc_release_host(host); + /* + * Tell PM core it's OK to power off the card now. + * + * The _sync variant is used in order to ensure that the card + * is left powered off in case an error occurred, and the card + * is going to be removed. + * + * Since there is no specific reason to believe a new user + * is about to show up at this point, the _sync variant is + * desirable anyway. + */ + if (host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_sync(&host->card->dev); + out: if (err) { mmc_sdio_remove(host); @@ -568,9 +584,6 @@ out: mmc_detach_bus(host); mmc_release_host(host); } - - /* Tell PM core that we're done */ - pm_runtime_put(&host->card->dev); } /* @@ -718,16 +731,21 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) card = host->card; /* - * Let runtime PM core know our card is active + * Enable runtime PM only if supported by host+card+board */ - err = pm_runtime_set_active(&card->dev); - if (err) - goto remove; + if (host->caps & MMC_CAP_POWER_OFF_CARD) { + /* + * Let runtime PM core know our card is active + */ + err = pm_runtime_set_active(&card->dev); + if (err) + goto remove; - /* - * Enable runtime PM for this card - */ - pm_runtime_enable(&card->dev); + /* + * Enable runtime PM for this card + */ + pm_runtime_enable(&card->dev); + } /* * The number of functions on the card is encoded inside @@ -745,9 +763,10 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) goto remove; /* - * Enable Runtime PM for this func + * Enable Runtime PM for this func (if supported) */ - pm_runtime_enable(&card->sdio_func[i]->dev); + if (host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_enable(&card->sdio_func[i]->dev); } mmc_release_host(host); diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 2716c7ab6bb..203da443e33 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -17,6 +17,7 @@ #include <linux/pm_runtime.h> #include <linux/mmc/card.h> +#include <linux/mmc/host.h> #include <linux/mmc/sdio_func.h> #include "sdio_cis.h" @@ -132,9 +133,11 @@ static int sdio_bus_probe(struct device *dev) * it should call pm_runtime_put_noidle() in its probe routine and * pm_runtime_get_noresume() in its remove routine. */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto out; + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto out; + } /* Set the default block size so the driver is sure it's something * sensible. */ @@ -151,7 +154,8 @@ static int sdio_bus_probe(struct device *dev) return 0; disable_runtimepm: - pm_runtime_put_noidle(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_noidle(dev); out: return ret; } @@ -160,12 +164,14 @@ static int sdio_bus_remove(struct device *dev) { struct sdio_driver *drv = to_sdio_driver(dev->driver); struct sdio_func *func = dev_to_sdio_func(dev); - int ret; + int ret = 0; /* Make sure card is powered before invoking ->remove() */ - ret = pm_runtime_get_sync(dev); - if (ret < 0) - goto out; + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { + ret = pm_runtime_get_sync(dev); + if (ret < 0) + goto out; + } drv->remove(func); @@ -178,10 +184,12 @@ static int sdio_bus_remove(struct device *dev) } /* First, undo the increment made directly above */ - pm_runtime_put_noidle(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_noidle(dev); /* Then undo the runtime PM settings in sdio_bus_probe() */ - pm_runtime_put_noidle(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_put_noidle(dev); out: return ret; @@ -191,6 +199,8 @@ out: static int sdio_bus_pm_prepare(struct device *dev) { + struct sdio_func *func = dev_to_sdio_func(dev); + /* * Resume an SDIO device which was suspended at run time at this * point, in order to allow standard SDIO suspend/resume paths @@ -212,7 +222,8 @@ static int sdio_bus_pm_prepare(struct device *dev) * since there is little point in failing system suspend if a * device can't be resumed. */ - pm_runtime_resume(dev); + if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) + pm_runtime_resume(dev); return 0; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 82a1079bbdc..5d46021cbb5 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1002,7 +1002,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, * Monitor a 0->1 transition first */ if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { - while ((!(OMAP_HSMMC_READ(host, SYSCTL) & bit)) + while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) && (i++ < limit)) cpu_relax(); } diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 2e9cca19c90..9b82910b9db 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -17,6 +17,7 @@ #include <linux/clk.h> #include <linux/mmc/host.h> #include <linux/mmc/sdhci-pltfm.h> +#include <mach/hardware.h> #include "sdhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" @@ -112,6 +113,13 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd clk_enable(clk); pltfm_host->clk = clk; + if (cpu_is_mx35() || cpu_is_mx51()) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + + /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */ + if (cpu_is_mx25() || cpu_is_mx35()) + host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; + return 0; } @@ -133,10 +141,8 @@ static struct sdhci_ops sdhci_esdhc_ops = { }; struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_MULTIBLOCK - | SDHCI_QUIRK_BROKEN_ADMA, + .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA, /* ADMA has issues. Might be fixable */ - /* NO_MULTIBLOCK might be MX35 only (Errata: ENGcm07207) */ .ops = &sdhci_esdhc_ops, .init = esdhc_pltfm_init, .exit = esdhc_pltfm_exit, diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 55746bac2f4..3d9c2460d43 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -149,11 +149,11 @@ static const struct sdhci_pci_fixes sdhci_cafe = { * ADMA operation is disabled for Moorestown platform due to * hardware bugs. */ -static int mrst_hc1_probe(struct sdhci_pci_chip *chip) +static int mrst_hc_probe(struct sdhci_pci_chip *chip) { /* - * slots number is fixed here for MRST as SDIO3 is never used and has - * hardware bugs. + * slots number is fixed here for MRST as SDIO3/5 are never used and + * have hardware bugs. */ chip->num_slots = 1; return 0; @@ -163,9 +163,9 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, }; -static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1 = { +static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, - .probe = mrst_hc1_probe, + .probe = mrst_hc_probe, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { @@ -538,7 +538,15 @@ static const struct pci_device_id pci_ids[] __devinitdata = { .device = PCI_DEVICE_ID_INTEL_MRST_SD1, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, - .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_MRST_SD2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, }, { @@ -637,6 +645,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; + mmc_pm_flag_t slot_pm_flags; mmc_pm_flag_t pm_flags = 0; int i, ret; @@ -657,7 +666,11 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) return ret; } - pm_flags |= slot->host->mmc->pm_flags; + slot_pm_flags = slot->host->mmc->pm_flags; + if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) + sdhci_enable_irq_wakeups(slot->host); + + pm_flags |= slot_pm_flags; } if (chip->fixes && chip->fixes->suspend) { @@ -671,8 +684,10 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) pci_save_state(pdev); if (pm_flags & MMC_PM_KEEP_POWER) { - if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) + if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) { + pci_pme_active(pdev, true); pci_enable_wake(pdev, PCI_D3hot, 1); + } pci_set_power_state(pdev, PCI_D3hot); } else { pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c index fc406ac5d19..5a61208cbc6 100644 --- a/drivers/mmc/host/sdhci-pxa.c +++ b/drivers/mmc/host/sdhci-pxa.c @@ -141,6 +141,10 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev) if (pdata->quirks) host->quirks |= pdata->quirks; + /* If slot design supports 8 bit data, indicate this to MMC. */ + if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 782c0ee3c92..a25db426c91 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1185,17 +1185,31 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (host->ops->platform_send_init_74_clocks) host->ops->platform_send_init_74_clocks(host, ios->power_mode); - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - - if (ios->bus_width == MMC_BUS_WIDTH_8) - ctrl |= SDHCI_CTRL_8BITBUS; - else - ctrl &= ~SDHCI_CTRL_8BITBUS; + /* + * If your platform has 8-bit width support but is not a v3 controller, + * or if it requires special setup code, you should implement that in + * platform_8bit_width(). + */ + if (host->ops->platform_8bit_width) + host->ops->platform_8bit_width(host, ios->bus_width); + else { + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + if (ios->bus_width == MMC_BUS_WIDTH_8) { + ctrl &= ~SDHCI_CTRL_4BITBUS; + if (host->version >= SDHCI_SPEC_300) + ctrl |= SDHCI_CTRL_8BITBUS; + } else { + if (host->version >= SDHCI_SPEC_300) + ctrl &= ~SDHCI_CTRL_8BITBUS; + if (ios->bus_width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + } + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + } - if (ios->bus_width == MMC_BUS_WIDTH_4) - ctrl |= SDHCI_CTRL_4BITBUS; - else - ctrl &= ~SDHCI_CTRL_4BITBUS; + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); if ((ios->timing == MMC_TIMING_SD_HS || ios->timing == MMC_TIMING_MMC_HS) @@ -1681,6 +1695,16 @@ int sdhci_resume_host(struct sdhci_host *host) EXPORT_SYMBOL_GPL(sdhci_resume_host); +void sdhci_enable_irq_wakeups(struct sdhci_host *host) +{ + u8 val; + val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); + val |= SDHCI_WAKE_ON_INT; + sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); +} + +EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); + #endif /* CONFIG_PM */ /*****************************************************************************\ @@ -1845,11 +1869,19 @@ int sdhci_add_host(struct sdhci_host *host) mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; + mmc->f_max = host->max_clk; mmc->caps |= MMC_CAP_SDIO_IRQ; + /* + * A controller may support 8-bit width, but the board itself + * might not have the pins brought out. Boards that support + * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in + * their platform code before calling sdhci_add_host(), and we + * won't assume 8-bit width for hosts without that CAP. + */ if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) - mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; + mmc->caps |= MMC_CAP_4_BIT_DATA; if (caps & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index b7b8a3b28b0..e42d7f00c06 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -76,7 +76,7 @@ #define SDHCI_CTRL_ADMA1 0x08 #define SDHCI_CTRL_ADMA32 0x10 #define SDHCI_CTRL_ADMA64 0x18 -#define SDHCI_CTRL_8BITBUS 0x20 +#define SDHCI_CTRL_8BITBUS 0x20 #define SDHCI_POWER_CONTROL 0x29 #define SDHCI_POWER_ON 0x01 @@ -87,6 +87,9 @@ #define SDHCI_BLOCK_GAP_CONTROL 0x2A #define SDHCI_WAKE_UP_CONTROL 0x2B +#define SDHCI_WAKE_ON_INT 0x01 +#define SDHCI_WAKE_ON_INSERT 0x02 +#define SDHCI_WAKE_ON_REMOVE 0x04 #define SDHCI_CLOCK_CONTROL 0x2C #define SDHCI_DIVIDER_SHIFT 8 @@ -152,6 +155,7 @@ #define SDHCI_CLOCK_BASE_SHIFT 8 #define SDHCI_MAX_BLOCK_MASK 0x00030000 #define SDHCI_MAX_BLOCK_SHIFT 16 +#define SDHCI_CAN_DO_8BIT 0x00040000 #define SDHCI_CAN_DO_ADMA2 0x00080000 #define SDHCI_CAN_DO_ADMA1 0x00100000 #define SDHCI_CAN_DO_HISPD 0x00200000 @@ -212,6 +216,8 @@ struct sdhci_ops { unsigned int (*get_max_clock)(struct sdhci_host *host); unsigned int (*get_min_clock)(struct sdhci_host *host); unsigned int (*get_timeout_clock)(struct sdhci_host *host); + int (*platform_8bit_width)(struct sdhci_host *host, + int width); void (*platform_send_init_74_clocks)(struct sdhci_host *host, u8 power_mode); unsigned int (*get_ro)(struct sdhci_host *host); @@ -317,6 +323,7 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead); #ifdef CONFIG_PM extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); extern int sdhci_resume_host(struct sdhci_host *host); +extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); #endif #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index b4ead4a13c9..f8f65df9b01 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c @@ -425,7 +425,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id struct usb_device *usb_dev = interface_to_usbdev(intf); struct mmc_host *mmc; struct ushc_data *ushc; - int ret = -ENOMEM; + int ret; mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); if (mmc == NULL) @@ -462,11 +462,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id mmc->max_blk_count = 511; ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->int_urb == NULL) + if (ushc->int_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); - if (ushc->int_data == NULL) + if (ushc->int_data == NULL) { + ret = -ENOMEM; goto err; + } usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, usb_rcvintpipe(usb_dev, intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), @@ -475,11 +479,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id intf->cur_altsetting->endpoint[0].desc.bInterval); ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->cbw_urb == NULL) + if (ushc->cbw_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); - if (ushc->cbw == NULL) + if (ushc->cbw == NULL) { + ret = -ENOMEM; goto err; + } ushc->cbw->signature = USHC_CBW_SIGNATURE; usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), @@ -487,15 +495,21 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id cbw_callback, ushc); ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->data_urb == NULL) + if (ushc->data_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); - if (ushc->csw_urb == NULL) + if (ushc->csw_urb == NULL) { + ret = -ENOMEM; goto err; + } ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); - if (ushc->csw == NULL) + if (ushc->csw == NULL) { + ret = -ENOMEM; goto err; + } usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), ushc->csw, sizeof(struct ushc_csw), csw_callback, ushc); diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c index dd90880048c..d8ae634d347 100644 --- a/drivers/mtd/maps/pxa2xx-flash.c +++ b/drivers/mtd/maps/pxa2xx-flash.c @@ -51,7 +51,7 @@ struct pxa2xx_flash_info { static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; -static int __init pxa2xx_flash_probe(struct platform_device *pdev) +static int __devinit pxa2xx_flash_probe(struct platform_device *pdev) { struct flash_platform_data *flash = pdev->dev.platform_data; struct pxa2xx_flash_info *info; diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index cd41c58b5bb..15682ec8530 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -7,7 +7,6 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#define CONFIG_MTD_NAND_OMAP_HWECC #include <linux/platform_device.h> #include <linux/dma-mapping.h> diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index c2960ac9f39..811775aa8ee 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c @@ -482,10 +482,17 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) uint32_t data = 0; struct ubi_vid_hdr vid_hdr; - addr = (loff_t)pnum * ubi->peb_size + ubi->vid_hdr_aloffset; + /* + * It is important to first invalidate the EC header, and then the VID + * header. Otherwise a power cut may lead to valid EC header and + * invalid VID header, in which case UBI will treat this PEB as + * corrupted and will try to preserve it, and print scary warnings (see + * the header comment in scan.c for more information). + */ + addr = (loff_t)pnum * ubi->peb_size; err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); if (!err) { - addr -= ubi->vid_hdr_aloffset; + addr += ubi->vid_hdr_aloffset; err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); if (!err) @@ -494,18 +501,24 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum) /* * We failed to write to the media. This was observed with Spansion - * S29GL512N NOR flash. Most probably the eraseblock erasure was - * interrupted at a very inappropriate moment, so it became unwritable. - * In this case we probably anyway have garbage in this PEB. + * S29GL512N NOR flash. Most probably the previously eraseblock erasure + * was interrupted at a very inappropriate moment, so it became + * unwritable. In this case we probably anyway have garbage in this + * PEB. */ err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); - if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) - /* - * The VID header is corrupted, so we can safely erase this - * PEB and not afraid that it will be treated as a valid PEB in - * case of an unclean reboot. - */ - return 0; + if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) { + struct ubi_ec_hdr ec_hdr; + + err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0); + if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) + /* + * Both VID and EC headers are corrupted, so we can + * safely erase this PEB and not afraid that it will be + * treated as a valid PEB in case of an unclean reboot. + */ + return 0; + } /* * The PEB contains a valid VID header, but we cannot invalidate it. diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index 3c631863bf4..79ca304fc4d 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -787,16 +787,15 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, * erased, so it became unstable and corrupted, and should be * erased. */ - return 0; + err = 0; + goto out_unlock; } if (err) - return err; + goto out_unlock; - if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) { - mutex_unlock(&ubi->buf_mutex); - return 0; - } + if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) + goto out_unlock; ubi_err("PEB %d contains corrupted VID header, and the data does not " "contain all 0xFF, this may be a non-UBI PEB or a severe VID " @@ -806,8 +805,11 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr, pnum, ubi->leb_start, ubi->leb_size); ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, ubi->peb_buf1, ubi->leb_size, 1); + err = 1; + +out_unlock: mutex_unlock(&ubi->buf_mutex); - return 1; + return err; } /** @@ -951,6 +953,10 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, * impossible to distinguish it from a PEB which just * contains garbage because of a power cut during erase * operation. So we just schedule this PEB for erasure. + * + * Besides, in case of NOR flash, we deliberatly + * corrupt both headers because NOR flash erasure is + * slow and can start from the end. */ err = 0; else diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index e1da258bbfb..0a92436f053 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -699,7 +699,8 @@ DEFINE_WINDOW_IO(32) #define DEVICE_PCI(dev) NULL #endif -#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL) +#define VORTEX_PCI(vp) \ + ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) #ifdef CONFIG_EISA #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) @@ -707,7 +708,8 @@ DEFINE_WINDOW_IO(32) #define DEVICE_EISA(dev) NULL #endif -#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL) +#define VORTEX_EISA(vp) \ + ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) /* The action to take with a media selection timer tick. Note that we deviate from the 3Com order by checking 10base2 before AUI. diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index ac422cd332e..dd16e83933a 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c @@ -490,13 +490,11 @@ static inline unsigned int cp_rx_csum_ok (u32 status) { unsigned int protocol = (status >> 16) & 0x3; - if (likely((protocol == RxProtoTCP) && (!(status & TCPFail)))) + if (((protocol == RxProtoTCP) && !(status & TCPFail)) || + ((protocol == RxProtoUDP) && !(status & UDPFail))) return 1; - else if ((protocol == RxProtoUDP) && (!(status & UDPFail))) - return 1; - else if ((protocol == RxProtoIP) && (!(status & IPFail))) - return 1; - return 0; + else + return 0; } static int cp_rx_poll(struct napi_struct *napi, int budget) diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f6668cdaac8..4f1755bddf6 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -2543,10 +2543,10 @@ config PCH_GBE depends on PCI select MII ---help--- - This is a gigabit ethernet driver for Topcliff PCH. - Topcliff PCH is the platform controller hub that is used in Intel's + This is a gigabit ethernet driver for EG20T PCH. + EG20T PCH is the platform controller hub that is used in Intel's general embedded platform. - Topcliff PCH has Gigabit Ethernet interface. + EG20T PCH has Gigabit Ethernet interface. Using this interface, it is able to access system devices connected to Gigabit Ethernet. This driver enables Gigabit Ethernet function. @@ -2945,6 +2945,18 @@ source "drivers/s390/net/Kconfig" source "drivers/net/caif/Kconfig" +config TILE_NET + tristate "Tilera GBE/XGBE network driver support" + depends on TILE + default y + select CRC32 + help + This is a standard Linux network device driver for the + on-chip Tilera Gigabit Ethernet and XAUI interfaces. + + To compile this driver as a module, choose M here: the module + will be called tile_net. + config XEN_NETDEV_FRONTEND tristate "Xen network device frontend driver" depends on XEN diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 652fc6b9803..b90738d1399 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/ obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ obj-$(CONFIG_PCH_GBE) += pch_gbe/ +obj-$(CONFIG_TILE_NET) += tile/ diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c index 919080b2c3a..1bf67200994 100644 --- a/drivers/net/atl1c/atl1c_hw.c +++ b/drivers/net/atl1c/atl1c_hw.c @@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw) addr[0] = addr[1] = 0; AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); if (atl1c_check_eeprom_exist(hw)) { - if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { /* Enable OTP CLK */ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { otp_ctrl_data |= OTP_CTRL_CLK_EN; diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c index 43579b3b24a..53363108994 100644 --- a/drivers/net/atlx/atl1.c +++ b/drivers/net/atlx/atl1.c @@ -3043,7 +3043,6 @@ static int __devinit atl1_probe(struct pci_dev *pdev, atl1_pcie_patch(adapter); /* assume we have no link for now */ netif_carrier_off(netdev); - netif_stop_queue(netdev); setup_timer(&adapter->phy_config_timer, atl1_phy_config, (unsigned long)adapter); diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 43489f89c14..53eff9ba6e9 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset) spin_lock_irqsave(&aup->lock, flags); if (force_reset || (!aup->mac_enabled)) { - writel(MAC_EN_CLOCK_ENABLE, &aup->enable); + writel(MAC_EN_CLOCK_ENABLE, aup->enable); au_sync_delay(2); writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 - | MAC_EN_CLOCK_ENABLE), &aup->enable); + | MAC_EN_CLOCK_ENABLE), aup->enable); au_sync_delay(2); aup->mac_enabled = 1; @@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev) au1000_hard_stop(dev); - writel(MAC_EN_CLOCK_ENABLE, &aup->enable); + writel(MAC_EN_CLOCK_ENABLE, aup->enable); au_sync_delay(2); - writel(0, &aup->enable); + writel(0, aup->enable); au_sync_delay(2); aup->tx_full = 0; @@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev) /* set a random MAC now in case platform_data doesn't provide one */ random_ether_addr(dev->dev_addr); - writel(0, &aup->enable); + writel(0, aup->enable); aup->mac_enabled = 0; pd = pdev->dev.platform_data; diff --git a/drivers/net/b44.c b/drivers/net/b44.c index c6e86315b3f..2e2b76258ab 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c @@ -381,11 +381,11 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote) __b44_set_flow_ctrl(bp, pause_enab); } -#ifdef SSB_DRIVER_MIPS -extern char *nvram_get(char *name); +#ifdef CONFIG_BCM47XX +#include <asm/mach-bcm47xx/nvram.h> static void b44_wap54g10_workaround(struct b44 *bp) { - const char *str; + char buf[20]; u32 val; int err; @@ -394,10 +394,9 @@ static void b44_wap54g10_workaround(struct b44 *bp) * see https://dev.openwrt.org/ticket/146 * check and reset bit "isolate" */ - str = nvram_get("boardnum"); - if (!str) + if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0) return; - if (simple_strtoul(str, NULL, 0) == 2) { + if (simple_strtoul(buf, NULL, 0) == 2) { err = __b44_readphy(bp, 0, MII_BMCR, &val); if (err) goto error; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 36eca1ce75d..e4465d222a7 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1235,7 +1235,7 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id, i = 0; netdev_for_each_mc_addr(ha, netdev) - memcpy(req->mac[i].byte, ha->addr, ETH_ALEN); + memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN); } else { req->promiscuous = 1; } diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index c36cd2ffbad..93354eee2cf 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -2458,6 +2458,12 @@ int be_load_fw(struct be_adapter *adapter, u8 *func) int status, i = 0, num_imgs = 0; const u8 *p; + if (!netif_running(adapter->netdev)) { + dev_err(&adapter->pdev->dev, + "Firmware load not allowed (interface is down)\n"); + return -EPERM; + } + strcpy(fw_file, func); status = request_firmware(&fw, fw_file, &adapter->pdev->dev); diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 9eea225deca..d255428122f 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -20,8 +20,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.60.00-3" -#define DRV_MODULE_RELDATE "2010/10/19" +#define DRV_MODULE_VERSION "1.60.01-0" +#define DRV_MODULE_RELDATE "2010/11/12" #define BNX2X_BC_VER 0x040200 #define BNX2X_MULTI_QUEUE diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 459614d2d7b..0af361e4e3d 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -1680,7 +1680,7 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) rc = XMIT_PLAIN; else { - if (skb->protocol == htons(ETH_P_IPV6)) { + if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { rc = XMIT_CSUM_V6; if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) rc |= XMIT_CSUM_TCP; @@ -1782,15 +1782,15 @@ exit_lbl: } #endif -static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, - struct eth_tx_parse_bd_e2 *pbd, - u32 xmit_type) +static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, + u32 xmit_type) { - pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) << - ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT; + *parsing_data |= (skb_shinfo(skb)->gso_size << + ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & + ETH_TX_PARSE_BD_E2_LSO_MSS; if ((xmit_type & XMIT_GSO_V6) && (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) - pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; + *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; } /** @@ -1835,15 +1835,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, * @return header len */ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, - struct eth_tx_parse_bd_e2 *pbd, - u32 xmit_type) + u32 *parsing_data, u32 xmit_type) { - pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) << - ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT; + *parsing_data |= ((tcp_hdrlen(skb)/4) << + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW; - pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) - - skb->data) / 2) << - ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT; + *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) << + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & + ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data; } @@ -1912,6 +1912,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; + u32 pbd_e2_parsing_data = 0; u16 pkt_prod, bd_prod; int nbd, fp_index; dma_addr_t mapping; @@ -2033,8 +2034,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); /* Set PBD in checksum offload case */ if (xmit_type & XMIT_CSUM) - hlen = bnx2x_set_pbd_csum_e2(bp, - skb, pbd_e2, xmit_type); + hlen = bnx2x_set_pbd_csum_e2(bp, skb, + &pbd_e2_parsing_data, + xmit_type); } else { pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x; memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x)); @@ -2076,10 +2078,18 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd, hlen, bd_prod, ++nbd); if (CHIP_IS_E2(bp)) - bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type); + bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, + xmit_type); else bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); } + + /* Set the PBD's parsing_data field if not zero + * (for the chips newer than 57711). + */ + if (pbd_e2_parsing_data) + pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data); + tx_data_bd = (struct eth_tx_bd *)tx_start_bd; /* Handle fragmented skb */ diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h index 18c8e23a0e8..4cfd4e9b558 100644 --- a/drivers/net/bnx2x/bnx2x_hsi.h +++ b/drivers/net/bnx2x/bnx2x_hsi.h @@ -244,7 +244,14 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */ u16 xgxs_config_tx[4]; /* 0x1A0 */ - u32 Reserved1[57]; /* 0x1A8 */ + u32 Reserved1[56]; /* 0x1A8 */ + u32 default_cfg; /* 0x288 */ + /* Enable BAM on KR */ +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK 0x00100000 +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT 20 +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED 0x00000000 +#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED 0x00100000 + u32 speed_capability_mask2; /* 0x28C */ #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK 0x0000FFFF #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT 0 diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h index a306b0e46b6..66df29fcf75 100644 --- a/drivers/net/bnx2x/bnx2x_init_ops.h +++ b/drivers/net/bnx2x/bnx2x_init_ops.h @@ -838,7 +838,7 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count, /**************************************************************************** * SRC initializations ****************************************************************************/ - +#ifdef BCM_CNIC /* called during init func stage */ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, dma_addr_t t2_mapping, int src_cid_count) @@ -862,5 +862,5 @@ static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2, U64_HI((u64)t2_mapping + (src_cid_count-1) * sizeof(struct src_ent))); } - +#endif #endif /* BNX2X_INIT_OPS_H */ diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c index 2326774df84..58091961925 100644 --- a/drivers/net/bnx2x/bnx2x_link.c +++ b/drivers/net/bnx2x/bnx2x_link.c @@ -610,7 +610,7 @@ static u8 bnx2x_bmac_enable(struct link_params *params, /* reset and unreset the BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - udelay(10); + msleep(1); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); @@ -3525,13 +3525,19 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1); /* Enable CL37 BAM */ - bnx2x_cl45_read(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8073_BAM, &val); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, - MDIO_AN_REG_8073_BAM, val | 1); + if (REG_RD(bp, params->shmem_base + + offsetof(struct shmem_region, dev_info. + port_hw_config[params->port].default_cfg)) & + PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { + bnx2x_cl45_read(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, &val); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, + MDIO_AN_REG_8073_BAM, val | 1); + DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); + } if (params->loopback_mode == LOOPBACK_EXT) { bnx2x_807x_force_10G(bp, phy); DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n"); @@ -5302,7 +5308,7 @@ static u8 bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 autoneg_val, an_1000_val, an_10_100_val; - bnx2x_wait_reset_complete(bp, phy); + bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 1 << NIG_LATCH_BC_ENABLE_MI_INT); @@ -5431,6 +5437,7 @@ static u8 bnx2x_8481_config_init(struct bnx2x_phy *phy, /* HW reset */ bnx2x_ext_phy_hw_reset(bp, params->port); + bnx2x_wait_reset_complete(bp, phy); bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15); return bnx2x_848xx_cmn_config_init(phy, params, vars); @@ -5441,7 +5448,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u8 port = params->port, initialize = 1; + u8 port, initialize = 1; u16 val; u16 temp; u32 actual_phy_selection; @@ -5450,11 +5457,16 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy, /* This is just for MDIO_CTL_REG_84823_MEDIA register. */ msleep(1); + if (CHIP_IS_E2(bp)) + port = BP_PATH(bp); + else + port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); - msleep(200); /* 100 is not enough */ - + bnx2x_wait_reset_complete(bp, phy); + /* Wait for GPHY to come out of reset */ + msleep(50); /* BCM84823 requires that XGXS links up first @ 10G for normal behavior */ temp = vars->line_speed; @@ -5625,7 +5637,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy, struct link_params *params) { struct bnx2x *bp = params->bp; - u8 port = params->port; + u8 port; + if (CHIP_IS_E2(bp)) + port = BP_PATH(bp); + else + port = params->port; bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); @@ -6928,7 +6944,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, u8 reset_ext_phy) { struct bnx2x *bp = params->bp; - u8 phy_index, port = params->port; + u8 phy_index, port = params->port, clear_latch_ind = 0; DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); /* disable attentions */ vars->link_status = 0; @@ -6966,9 +6982,18 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars, params->phy[phy_index].link_reset( ¶ms->phy[phy_index], params); + if (params->phy[phy_index].flags & + FLAGS_REARM_LATCH_SIGNAL) + clear_latch_ind = 1; } } + if (clear_latch_ind) { + /* Clear latching indication */ + bnx2x_rearm_latch_signal(bp, port, 0); + bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4, + 1 << NIG_LATCH_BC_ENABLE_MI_INT); + } if (params->phy[INT_PHY].link_reset) params->phy[INT_PHY].link_reset( ¶ms->phy[INT_PHY], params); @@ -6999,6 +7024,7 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, s8 port; s8 port_of_path = 0; + bnx2x_ext_phy_hw_reset(bp, 0); /* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { u32 shmem_base, shmem2_base; @@ -7021,7 +7047,8 @@ static u8 bnx2x_8073_common_init_phy(struct bnx2x *bp, return -EINVAL; } /* disable attentions */ - bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, + bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + + port_of_path*4, (NIG_MASK_XGXS0_LINK_STATUS | NIG_MASK_XGXS0_LINK10G | NIG_MASK_SERDES0_LINK_STATUS | @@ -7132,7 +7159,7 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, (1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT))); REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); - bnx2x_ext_phy_hw_reset(bp, 1); + bnx2x_ext_phy_hw_reset(bp, 0); msleep(5); for (port = 0; port < PORT_MAX; port++) { u32 shmem_base, shmem2_base; diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index e9ad16f00b5..9709b856966 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -9064,7 +9064,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, default: pr_err("Unknown board_type (%ld), aborting\n", ent->driver_data); - return ENODEV; + return -ENODEV; } cid_count += CNIC_CONTEXT_USE; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index bdb68a60038..d0ea760ce41 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -171,7 +171,7 @@ MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link /*----------------------------- Global variables ----------------------------*/ #ifdef CONFIG_NET_POLL_CONTROLLER -cpumask_var_t netpoll_block_tx; +atomic_t netpoll_block_tx = ATOMIC_INIT(0); #endif static const char * const version = @@ -878,8 +878,10 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev) rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (in_dev) { + read_lock(&in_dev->mc_list_lock); for (im = in_dev->mc_list; im; im = im->next) ip_mc_rejoin_group(im); + read_unlock(&in_dev->mc_list_lock); } rcu_read_unlock(); @@ -1574,7 +1576,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) /* If this is the first slave, then we need to set the master's hardware * address to be the same as the slave's. */ - if (bond->slave_cnt == 0) + if (is_zero_ether_addr(bond->dev->dev_addr)) memcpy(bond->dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); @@ -5297,13 +5299,6 @@ static int __init bonding_init(void) if (res) goto out; -#ifdef CONFIG_NET_POLL_CONTROLLER - if (!alloc_cpumask_var(&netpoll_block_tx, GFP_KERNEL)) { - res = -ENOMEM; - goto out; - } -#endif - res = register_pernet_subsys(&bond_net_ops); if (res) goto out; @@ -5332,9 +5327,6 @@ err: rtnl_link_unregister(&bond_link_ops); err_link: unregister_pernet_subsys(&bond_net_ops); -#ifdef CONFIG_NET_POLL_CONTROLLER - free_cpumask_var(netpoll_block_tx); -#endif goto out; } @@ -5351,7 +5343,10 @@ static void __exit bonding_exit(void) unregister_pernet_subsys(&bond_net_ops); #ifdef CONFIG_NET_POLL_CONTROLLER - free_cpumask_var(netpoll_block_tx); + /* + * Make sure we don't have an imbalance on our netpoll blocking + */ + WARN_ON(atomic_read(&netpoll_block_tx)); #endif } diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 4eedb12df6c..c2f081352a0 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -119,26 +119,22 @@ #ifdef CONFIG_NET_POLL_CONTROLLER -extern cpumask_var_t netpoll_block_tx; +extern atomic_t netpoll_block_tx; static inline void block_netpoll_tx(void) { - preempt_disable(); - BUG_ON(cpumask_test_and_set_cpu(smp_processor_id(), - netpoll_block_tx)); + atomic_inc(&netpoll_block_tx); } static inline void unblock_netpoll_tx(void) { - BUG_ON(!cpumask_test_and_clear_cpu(smp_processor_id(), - netpoll_block_tx)); - preempt_enable(); + atomic_dec(&netpoll_block_tx); } static inline int is_netpoll_tx_blocked(struct net_device *dev) { if (unlikely(dev->priv_flags & IFF_IN_NETPOLL)) - return cpumask_test_cpu(smp_processor_id(), netpoll_block_tx); + return atomic_read(&netpoll_block_tx); return 0; } #else diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c index 1cd90da86f1..32b1c6fb2de 100644 --- a/drivers/net/caif/caif_shm_u5500.c +++ b/drivers/net/caif/caif_shm_u5500.c @@ -5,7 +5,7 @@ * License terms: GNU General Public License (GPL) version 2 */ -#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt +#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt #include <linux/version.h> #include <linux/init.h> diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c index 19f9c065666..80511167f35 100644 --- a/drivers/net/caif/caif_shmcore.c +++ b/drivers/net/caif/caif_shmcore.c @@ -6,7 +6,7 @@ * License terms: GNU General Public License (GPL) version 2 */ -#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt +#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt #include <linux/spinlock.h> #include <linux/sched.h> diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index 8427533fe31..20da1996d35 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c @@ -33,6 +33,9 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>"); MODULE_DESCRIPTION("CAIF SPI driver"); +/* Returns the number of padding bytes for alignment. */ +#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1))))) + static int spi_loop; module_param(spi_loop, bool, S_IRUGO); MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); @@ -41,7 +44,10 @@ MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode."); module_param(spi_frm_align, int, S_IRUGO); MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment."); -/* SPI padding options. */ +/* + * SPI padding options. + * Warning: must be a base of 2 (& operation used) and can not be zero ! + */ module_param(spi_up_head_align, int, S_IRUGO); MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment."); @@ -240,15 +246,13 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf, static const struct file_operations dbgfs_state_fops = { .open = dbgfs_open, .read = dbgfs_state, - .owner = THIS_MODULE, - .llseek = default_llseek, + .owner = THIS_MODULE }; static const struct file_operations dbgfs_frame_fops = { .open = dbgfs_open, .read = dbgfs_frame, - .owner = THIS_MODULE, - .llseek = default_llseek, + .owner = THIS_MODULE }; static inline void dev_debugfs_add(struct cfspi *cfspi) @@ -337,6 +341,9 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len) u8 *dst = buf; caif_assert(buf); + if (cfspi->slave && !cfspi->slave_talked) + cfspi->slave_talked = true; + do { struct sk_buff *skb; struct caif_payload_info *info; @@ -357,8 +364,8 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute head offset i.e. number of bytes to add to * get the start of the payload aligned. */ - if (spi_up_head_align) { - spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); + if (spi_up_head_align > 1) { + spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align); *dst = (u8)(spad - 1); dst += spad; } @@ -373,7 +380,7 @@ int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute tail offset i.e. number of bytes to add to * get the complete CAIF frame aligned. */ - epad = (skb->len + spad) & spi_up_tail_align; + epad = PAD_POW2((skb->len + spad), spi_up_tail_align); dst += epad; dev_kfree_skb(skb); @@ -417,14 +424,14 @@ int cfspi_xmitlen(struct cfspi *cfspi) * Compute head offset i.e. number of bytes to add to * get the start of the payload aligned. */ - if (spi_up_head_align) - spad = 1 + ((info->hdr_len + 1) & spi_up_head_align); + if (spi_up_head_align > 1) + spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align); /* * Compute tail offset i.e. number of bytes to add to * get the complete CAIF frame aligned. */ - epad = (skb->len + spad) & spi_up_tail_align; + epad = PAD_POW2((skb->len + spad), spi_up_tail_align); if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) { skb_queue_tail(&cfspi->chead, skb); @@ -433,6 +440,7 @@ int cfspi_xmitlen(struct cfspi *cfspi) } else { /* Put back packet. */ skb_queue_head(&cfspi->qhead, skb); + break; } } while (pkts <= CAIF_MAX_SPI_PKTS); @@ -453,6 +461,15 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc) { struct cfspi *cfspi = (struct cfspi *)ifc->priv; + /* + * The slave device is the master on the link. Interrupts before the + * slave has transmitted are considered spurious. + */ + if (cfspi->slave && !cfspi->slave_talked) { + printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n"); + return; + } + if (!in_interrupt()) spin_lock(&cfspi->lock); if (assert) { @@ -465,7 +482,8 @@ static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc) spin_unlock(&cfspi->lock); /* Wake up the xfer thread. */ - wake_up_interruptible(&cfspi->wait); + if (assert) + wake_up_interruptible(&cfspi->wait); } static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc) @@ -523,7 +541,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute head offset i.e. number of bytes added to * get the start of the payload aligned. */ - if (spi_down_head_align) { + if (spi_down_head_align > 1) { spad = 1 + *src; src += spad; } @@ -564,7 +582,7 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len) * Compute tail offset i.e. number of bytes added to * get the complete CAIF frame aligned. */ - epad = (pkt_len + spad) & spi_down_tail_align; + epad = PAD_POW2((pkt_len + spad), spi_down_tail_align); src += epad; } while ((src - buf) < len); @@ -617,19 +635,28 @@ int cfspi_spi_probe(struct platform_device *pdev) ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d", cfspi_setup); - if (!dev) - return -ENODEV; + if (!ndev) + return -ENOMEM; cfspi = netdev_priv(ndev); netif_stop_queue(ndev); cfspi->ndev = ndev; cfspi->pdev = pdev; - /* Set flow info */ + /* Set flow info. */ cfspi->flow_off_sent = 0; cfspi->qd_low_mark = LOW_WATER_MARK; cfspi->qd_high_mark = HIGH_WATER_MARK; + /* Set slave info. */ + if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) { + cfspi->slave = true; + cfspi->slave_talked = false; + } else { + cfspi->slave = false; + cfspi->slave_talked = false; + } + /* Assign the SPI device. */ cfspi->dev = dev; /* Assign the device ifc to this SPI interface. */ diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c index 2111dbfea6f..1b9943a4eda 100644 --- a/drivers/net/caif/caif_spi_slave.c +++ b/drivers/net/caif/caif_spi_slave.c @@ -36,10 +36,15 @@ static inline int forward_to_spi_cmd(struct cfspi *cfspi) #endif int spi_frm_align = 2; -int spi_up_head_align = 1; -int spi_up_tail_align; -int spi_down_head_align = 3; -int spi_down_tail_align = 1; + +/* + * SPI padding options. + * Warning: must be a base of 2 (& operation used) and can not be zero ! + */ +int spi_up_head_align = 1 << 1; +int spi_up_tail_align = 1 << 0; +int spi_down_head_align = 1 << 2; +int spi_down_tail_align = 1 << 1; #ifdef CONFIG_DEBUG_FS static inline void debugfs_store_prev(struct cfspi *cfspi) diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 407d4e27207..046d846c652 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -3341,7 +3341,6 @@ static int __devinit init_one(struct pci_dev *pdev, adapter->name = adapter->port[i]->name; __set_bit(i, &adapter->registered_device_map); - netif_tx_stop_all_queues(adapter->port[i]); } } if (!adapter->registered_device_map) { diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index f17703f410b..f50bc98310f 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c @@ -3736,7 +3736,6 @@ static int __devinit init_one(struct pci_dev *pdev, __set_bit(i, &adapter->registered_device_map); adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i; - netif_tx_stop_all_queues(adapter->port[i]); } } if (!adapter->registered_device_map) { diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c index bb813d94aea..e97521c801e 100644 --- a/drivers/net/cxgb4/t4_hw.c +++ b/drivers/net/cxgb4/t4_hw.c @@ -2408,7 +2408,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, if (index < NEXACT_MAC) ret++; else if (hash) - *hash |= (1 << hash_mac_addr(addr[i])); + *hash |= (1ULL << hash_mac_addr(addr[i])); } return ret; } diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 555ecc5a2e9..6bf464afa90 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c @@ -753,7 +753,9 @@ static int cxgb4vf_open(struct net_device *dev) if (err) return err; set_bit(pi->port_id, &adapter->open_device_map); - link_start(dev); + err = link_start(dev); + if (err) + return err; netif_tx_start_all_queues(dev); return 0; } @@ -814,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) } /* - * Collect up to maxaddrs worth of a netdevice's unicast addresses into an - * array of addrss pointers and return the number collected. + * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. */ -static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int maxaddrs) +static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) { + unsigned int index = 0; unsigned int naddr = 0; const struct netdev_hw_addr *ha; - for_each_dev_addr(dev, ha) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } + for_each_dev_addr(dev, ha) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } return naddr; } /* - * Collect up to maxaddrs worth of a netdevice's multicast addresses into an - * array of addrss pointers and return the number collected. + * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting + * at a specified offset within the list, into an array of addrss pointers and + * return the number collected. */ -static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int maxaddrs) +static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, + const u8 **addr, + unsigned int offset, + unsigned int maxaddrs) { + unsigned int index = 0; unsigned int naddr = 0; const struct netdev_hw_addr *ha; - netdev_for_each_mc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } + netdev_for_each_mc_addr(ha, dev) + if (index++ >= offset) { + addr[naddr++] = ha->addr; + if (naddr >= maxaddrs) + break; + } return naddr; } @@ -860,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) u64 mhash = 0; u64 uhash = 0; bool free = true; - u16 filt_idx[7]; + unsigned int offset, naddr; const u8 *addr[7]; - int ret, naddr = 0; + int ret; const struct port_info *pi = netdev_priv(dev); /* first do the secondary unicast addresses */ - naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); - if (naddr > 0) { + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_uc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, filt_idx, &uhash, sleep); + naddr, addr, NULL, &uhash, sleep); if (ret < 0) return ret; @@ -877,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) } /* next set up the multicast addresses */ - naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); - if (naddr > 0) { + for (offset = 0; ; offset += naddr) { + naddr = collect_netdev_mc_list_addrs(dev, addr, offset, + ARRAY_SIZE(addr)); + if (naddr == 0) + break; + ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, filt_idx, &mhash, sleep); + naddr, addr, NULL, &mhash, sleep); if (ret < 0) return ret; + free = false; } return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, @@ -1103,18 +1122,6 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr) return 0; } -/* - * Return a TX Queue on which to send the specified skb. - */ -static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb) -{ - /* - * XXX For now just use the default hash but we probably want to - * XXX look at other possibilities ... - */ - return skb_tx_hash(dev, skb); -} - #ifdef CONFIG_NET_POLL_CONTROLLER /* * Poll all of our receive queues. This is called outside of normal interrupt @@ -2075,6 +2082,22 @@ static int adap_init0(struct adapter *adapter) } /* + * Some environments do not properly handle PCIE FLRs -- e.g. in Linux + * 2.6.31 and later we can't call pci_reset_function() in order to + * issue an FLR because of a self- deadlock on the device semaphore. + * Meanwhile, the OS infrastructure doesn't issue FLRs in all the + * cases where they're needed -- for instance, some versions of KVM + * fail to reset "Assigned Devices" when the VM reboots. Therefore we + * use the firmware based reset in order to reset any per function + * state. + */ + err = t4vf_fw_reset(adapter); + if (err < 0) { + dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err); + return err; + } + + /* * Grab basic operational parameters. These will predominantly have * been set up by the Physical Function Driver or will be hard coded * into the adapter. We just have to live with them ... Note that @@ -2246,6 +2269,7 @@ static void __devinit cfg_queues(struct adapter *adapter) { struct sge *s = &adapter->sge; int q10g, n10g, qidx, pidx, qs; + size_t iqe_size; /* * We should not be called till we know how many Queue Sets we can @@ -2290,6 +2314,13 @@ static void __devinit cfg_queues(struct adapter *adapter) s->ethqsets = qidx; /* + * The Ingress Queue Entry Size for our various Response Queues needs + * to be big enough to accommodate the largest message we can receive + * from the chip/firmware; which is 64 bytes ... + */ + iqe_size = 64; + + /* * Set up default Queue Set parameters ... Start off with the * shortest interrupt holdoff timer. */ @@ -2297,7 +2328,7 @@ static void __devinit cfg_queues(struct adapter *adapter) struct sge_eth_rxq *rxq = &s->ethrxq[qs]; struct sge_eth_txq *txq = &s->ethtxq[qs]; - init_rspq(&rxq->rspq, 0, 0, 1024, L1_CACHE_BYTES); + init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size); rxq->fl.size = 72; txq->q.size = 1024; } @@ -2306,8 +2337,7 @@ static void __devinit cfg_queues(struct adapter *adapter) * The firmware event queue is used for link state changes and * notifications of TX DMA completions. */ - init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, - L1_CACHE_BYTES); + init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size); /* * The forwarded interrupt queue is used when we're in MSI interrupt @@ -2323,7 +2353,7 @@ static void __devinit cfg_queues(struct adapter *adapter) * any time ... */ init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1, - L1_CACHE_BYTES); + iqe_size); } /* @@ -2417,7 +2447,6 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { .ndo_get_stats = cxgb4vf_get_stats, .ndo_set_rx_mode = cxgb4vf_set_rxmode, .ndo_set_mac_address = cxgb4vf_set_mac_addr, - .ndo_select_queue = cxgb4vf_select_queue, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = cxgb4vf_do_ioctl, .ndo_change_mtu = cxgb4vf_change_mtu, @@ -2600,7 +2629,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, pi->xact_addr_filt = -1; pi->rx_offload = RX_CSO; netif_carrier_off(netdev); - netif_tx_stop_all_queues(netdev); netdev->irq = pdev->irq; netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | @@ -2625,7 +2653,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, netdev->do_ioctl = cxgb4vf_do_ioctl; netdev->change_mtu = cxgb4vf_change_mtu; netdev->set_mac_address = cxgb4vf_set_mac_addr; - netdev->select_queue = cxgb4vf_select_queue; #ifdef CONFIG_NET_POLL_CONTROLLER netdev->poll_controller = cxgb4vf_poll_controller; #endif @@ -2844,6 +2871,14 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = { CH_DEVICE(0x4800, 0), /* T440-dbg */ CH_DEVICE(0x4801, 0), /* T420-cr */ CH_DEVICE(0x4802, 0), /* T422-cr */ + CH_DEVICE(0x4803, 0), /* T440-cr */ + CH_DEVICE(0x4804, 0), /* T420-bch */ + CH_DEVICE(0x4805, 0), /* T440-bch */ + CH_DEVICE(0x4806, 0), /* T460-ch */ + CH_DEVICE(0x4807, 0), /* T420-so */ + CH_DEVICE(0x4808, 0), /* T420-cx */ + CH_DEVICE(0x4809, 0), /* T420-bt */ + CH_DEVICE(0x480a, 0), /* T404-bt */ { 0, } }; diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c index f10864ddafb..ecf0770bf0f 100644 --- a/drivers/net/cxgb4vf/sge.c +++ b/drivers/net/cxgb4vf/sge.c @@ -154,13 +154,14 @@ enum { */ RX_COPY_THRES = 256, RX_PULL_LEN = 128, -}; -/* - * Can't define this in the above enum because PKTSHIFT isn't a constant in - * the VF Driver ... - */ -#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT) + /* + * Main body length for sk_buffs used for RX Ethernet packets with + * fragments. Should be >= RX_PULL_LEN but possibly bigger to give + * pskb_may_pull() some room. + */ + RX_SKB_LEN = 512, +}; /* * Software state per TX descriptor. @@ -1355,6 +1356,67 @@ out_free: } /** + * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list + * @gl: the gather list + * @skb_len: size of sk_buff main body if it carries fragments + * @pull_len: amount of data to move to the sk_buff's main body + * + * Builds an sk_buff from the given packet gather list. Returns the + * sk_buff or %NULL if sk_buff allocation failed. + */ +struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl, + unsigned int skb_len, unsigned int pull_len) +{ + struct sk_buff *skb; + struct skb_shared_info *ssi; + + /* + * If the ingress packet is small enough, allocate an skb large enough + * for all of the data and copy it inline. Otherwise, allocate an skb + * with enough room to pull in the header and reference the rest of + * the data via the skb fragment list. + * + * Below we rely on RX_COPY_THRES being less than the smallest Rx + * buff! size, which is expected since buffers are at least + * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one + * fragment. + */ + if (gl->tot_len <= RX_COPY_THRES) { + /* small packets have only one fragment */ + skb = alloc_skb(gl->tot_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, gl->tot_len); + skb_copy_to_linear_data(skb, gl->va, gl->tot_len); + } else { + skb = alloc_skb(skb_len, GFP_ATOMIC); + if (unlikely(!skb)) + goto out; + __skb_put(skb, pull_len); + skb_copy_to_linear_data(skb, gl->va, pull_len); + + ssi = skb_shinfo(skb); + ssi->frags[0].page = gl->frags[0].page; + ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; + ssi->frags[0].size = gl->frags[0].size - pull_len; + if (gl->nfrags > 1) + memcpy(&ssi->frags[1], &gl->frags[1], + (gl->nfrags-1) * sizeof(skb_frag_t)); + ssi->nr_frags = gl->nfrags; + + skb->len = gl->tot_len; + skb->data_len = skb->len - pull_len; + skb->truesize += skb->data_len; + + /* Get a reference for the last page, we don't own it */ + get_page(gl->frags[gl->nfrags - 1].page); + } + +out: + return skb; +} + +/** * t4vf_pktgl_free - free a packet gather list * @gl: the gather list * @@ -1463,10 +1525,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, { struct sk_buff *skb; struct port_info *pi; - struct skb_shared_info *ssi; const struct cpl_rx_pkt *pkt = (void *)&rsp[1]; bool csum_ok = pkt->csum_calc && !pkt->err_vec; - unsigned int len = be16_to_cpu(pkt->len); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); /* @@ -1481,42 +1541,14 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, } /* - * If the ingress packet is small enough, allocate an skb large enough - * for all of the data and copy it inline. Otherwise, allocate an skb - * with enough room to pull in the header and reference the rest of - * the data via the skb fragment list. + * Convert the Packet Gather List into an skb. */ - if (len <= RX_COPY_THRES) { - /* small packets have only one fragment */ - skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC); - if (!skb) - goto nomem; - __skb_put(skb, gl->frags[0].size); - skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size); - } else { - skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC); - if (!skb) - goto nomem; - __skb_put(skb, RX_PKT_PULL_LEN); - skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN); - - ssi = skb_shinfo(skb); - ssi->frags[0].page = gl->frags[0].page; - ssi->frags[0].page_offset = (gl->frags[0].page_offset + - RX_PKT_PULL_LEN); - ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN; - if (gl->nfrags > 1) - memcpy(&ssi->frags[1], &gl->frags[1], - (gl->nfrags-1) * sizeof(skb_frag_t)); - ssi->nr_frags = gl->nfrags; - skb->len = len + PKTSHIFT; - skb->data_len = skb->len - RX_PKT_PULL_LEN; - skb->truesize += skb->data_len; - - /* Get a reference for the last page, we don't own it */ - get_page(gl->frags[gl->nfrags - 1].page); + skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN); + if (unlikely(!skb)) { + t4vf_pktgl_free(gl); + rxq->stats.rx_drops++; + return 0; } - __skb_pull(skb, PKTSHIFT); skb->protocol = eth_type_trans(skb, rspq->netdev); skb_record_rx_queue(skb, rspq->idx); @@ -1549,11 +1581,6 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, netif_receive_skb(skb); return 0; - -nomem: - t4vf_pktgl_free(gl); - rxq->stats.rx_drops++; - return 0; } /** @@ -1679,6 +1706,7 @@ int process_responses(struct sge_rspq *rspq, int budget) } len = RSPD_LEN(len); } + gl.tot_len = len; /* * Gather packet fragments. diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h index 873cb7d86c5..a65c80aed1f 100644 --- a/drivers/net/cxgb4vf/t4vf_common.h +++ b/drivers/net/cxgb4vf/t4vf_common.h @@ -235,6 +235,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd, int __devinit t4vf_wait_dev_ready(struct adapter *); int __devinit t4vf_port_init(struct adapter *, int); +int t4vf_fw_reset(struct adapter *); int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index ea1c123f0cb..19520afe1a1 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c @@ -326,6 +326,25 @@ int __devinit t4vf_port_init(struct adapter *adapter, int pidx) } /** + * t4vf_fw_reset - issue a reset to FW + * @adapter: the adapter + * + * Issues a reset command to FW. For a Physical Function this would + * result in the Firmware reseting all of its state. For a Virtual + * Function this just resets the state associated with the VF. + */ +int t4vf_fw_reset(struct adapter *adapter) +{ + struct fw_reset_cmd cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) | + FW_CMD_WRITE); + cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd)); + return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); +} + +/** * t4vf_query_params - query FW or device parameters * @adapter: the adapter * @nparams: the number of parameters @@ -995,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) { - int i, ret; + int offset, ret = 0; + unsigned nfilters = 0; + unsigned int rem = naddr; struct fw_vi_mac_cmd cmd, rpl; - struct fw_vi_mac_exact *p; - size_t len16; - if (naddr > ARRAY_SIZE(cmd.u.exact)) + if (naddr > FW_CLS_TCAM_NUM_ENTRIES) return -EINVAL; - len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, - u.exact[naddr]), 16); - memset(&cmd, 0, sizeof(cmd)); - cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | - FW_CMD_REQUEST | - FW_CMD_WRITE | - (free ? FW_CMD_EXEC : 0) | - FW_VI_MAC_CMD_VIID(viid)); - cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | - FW_CMD_LEN16(len16)); + for (offset = 0; offset < naddr; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) + ? rem + : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | + FW_CMD_REQUEST | + FW_CMD_WRITE | + (free ? FW_CMD_EXEC : 0) | + FW_VI_MAC_CMD_VIID(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | + FW_CMD_LEN16(len16)); + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID | + FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } - for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) { - p->valid_to_idx = - cpu_to_be16(FW_VI_MAC_CMD_VALID | - FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); - memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); - } - ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); - if (ret) - return ret; - - for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { - u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); - - if (idx) - idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES - ? 0xffff - : index); - if (index < FW_CLS_TCAM_NUM_ENTRIES) - ret++; - else if (hash) - *hash |= (1 << hash_mac_addr(addr[i])); + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, + sleep_ok); + if (ret && ret != -ENOMEM) + break; + + for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_GET( + be16_to_cpu(p->valid_to_idx)); + + if (idx) + idx[offset+i] = + (index >= FW_CLS_TCAM_NUM_ENTRIES + ? 0xffff + : index); + if (index < FW_CLS_TCAM_NUM_ENTRIES) + nfilters++; + else if (hash) + *hash |= (1ULL << hash_mac_addr(addr[offset+i])); + } + + free = false; + offset += fw_naddr; + rem -= fw_naddr; } + + /* + * If there were no errors or we merely ran out of room in our MAC + * address arena, return the number of filters actually written. + */ + if (ret == 0 || ret == -ENOMEM) + ret = nfilters; return ret; } diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 4686c3983fc..4d62f7bfa03 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -31,7 +31,7 @@ char e1000_driver_name[] = "e1000"; static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; -#define DRV_VERSION "7.3.21-k6-NAPI" +#define DRV_VERSION "7.3.21-k8-NAPI" const char e1000_driver_version[] = DRV_VERSION; static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; @@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter) struct net_device *netdev = adapter->netdev; u32 rctl, tctl; - /* signal that we're down so the interrupt handler does not - * reschedule our watchdog timer */ - set_bit(__E1000_DOWN, &adapter->flags); /* disable receives in the hardware */ rctl = er32(RCTL); @@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter) e1000_irq_disable(adapter); + /* + * Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * timers and tasks from rescheduling. + */ + set_bit(__E1000_DOWN, &adapter->flags); + del_timer_sync(&adapter->tx_fifo_stall_timer); del_timer_sync(&adapter->watchdog_timer); del_timer_sync(&adapter->phy_info_timer); diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c index 75b099ce49c..1f37ee6b2a2 100644 --- a/drivers/net/ehea/ehea_ethtool.c +++ b/drivers/net/ehea/ehea_ethtool.c @@ -261,6 +261,13 @@ static void ehea_get_ethtool_stats(struct net_device *dev, } +static int ehea_set_flags(struct net_device *dev, u32 data) +{ + return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO + | ETH_FLAG_TXVLAN + | ETH_FLAG_RXVLAN); +} + const struct ethtool_ops ehea_ethtool_ops = { .get_settings = ehea_get_settings, .get_drvinfo = ehea_get_drvinfo, @@ -273,6 +280,8 @@ const struct ethtool_ops ehea_ethtool_ops = { .get_ethtool_stats = ehea_get_ethtool_stats, .get_rx_csum = ehea_get_rx_csum, .set_settings = ehea_set_settings, + .get_flags = ethtool_op_get_flags, + .set_flags = ehea_set_flags, .nway_reset = ehea_nway_reset, /* Restart autonegotiation */ }; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 182b2a7be8d..b95f087cd5a 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -400,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes) skb_arr_rq1[index] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); if (!skb_arr_rq1[index]) { + ehea_info("Unable to allocate enough skb in the array\n"); pr->rq1_skba.os_skbs = fill_wqes - i; break; } @@ -422,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a) struct net_device *dev = pr->port->netdev; int i; - for (i = 0; i < pr->rq1_skba.len; i++) { + if (nr_rq1a > pr->rq1_skba.len) { + ehea_error("NR_RQ1A bigger than skb array len\n"); + return; + } + + for (i = 0; i < nr_rq1a; i++) { skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb_arr_rq1[i]) + if (!skb_arr_rq1[i]) { + ehea_info("No enough memory to allocate skb array\n"); break; + } } /* Ring doorbell */ - ehea_update_rq1a(pr->qp, nr_rq1a); + ehea_update_rq1a(pr->qp, i); } static int ehea_refill_rq_def(struct ehea_port_res *pr, @@ -675,7 +683,7 @@ static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe, int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) && pr->port->vgrp); - if (use_lro) { + if (skb->dev->features & NETIF_F_LRO) { if (vlan_extracted) lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb, pr->port->vgrp, @@ -735,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev, skb = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); - if (!skb) + if (!skb) { + ehea_info("Not enough memory to allocate skb\n"); break; + } } skb_copy_to_linear_data(skb, ((char *)cqe) + 64, cqe->num_bytes_transfered - 4); @@ -777,7 +787,7 @@ static int ehea_proc_rwqes(struct net_device *dev, } cqe = ehea_poll_rq1(qp, &wqe_index); } - if (use_lro) + if (dev->features & NETIF_F_LRO) lro_flush_all(&pr->lro_mgr); pr->rx_packets += processed; @@ -3268,6 +3278,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, | NETIF_F_LLTX; dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT; + if (use_lro) + dev->features |= NETIF_F_LRO; + INIT_WORK(&port->reset_task, ehea_reset_port); ret = register_netdev(dev); diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index a466ef91dd4..aa28b270c04 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -1962,7 +1962,8 @@ static void enic_poll_controller(struct net_device *netdev) case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < enic->rq_count; i++) { intr = enic_msix_rq_intr(enic, i); - enic_isr_msix_rq(enic->msix_entry[intr].vector, enic); + enic_isr_msix_rq(enic->msix_entry[intr].vector, + &enic->napi[i]); } intr = enic_msix_wq_intr(enic, i); enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index f860072e2f6..44be037705a 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -578,11 +578,10 @@ static int gfar_parse_group(struct device_node *np, irq_of_parse_and_map(np, 1); priv->gfargrp[priv->num_grps].interruptError = irq_of_parse_and_map(np,2); - if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 || - priv->gfargrp[priv->num_grps].interruptReceive < 0 || - priv->gfargrp[priv->num_grps].interruptError < 0) { + if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ || + priv->gfargrp[priv->num_grps].interruptReceive == NO_IRQ || + priv->gfargrp[priv->num_grps].interruptError == NO_IRQ) return -EINVAL; - } } priv->gfargrp[priv->num_grps].grp_id = priv->num_grps; diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c index 5c566ebc54b..3bc8e276ba4 100644 --- a/drivers/net/gianfar_ethtool.c +++ b/drivers/net/gianfar_ethtool.c @@ -635,9 +635,10 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~WAKE_MAGIC) return -EINVAL; + device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); + spin_lock_irqsave(&priv->bflock, flags); - priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0; - device_set_wakeup_enable(&dev->dev, priv->wol_en); + priv->wol_en = !!device_may_wakeup(&dev->dev); spin_unlock_irqrestore(&priv->bflock, flags); return 0; diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 385dc3204cb..06bb9b79945 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2871,7 +2871,6 @@ static int __devinit emac_probe(struct platform_device *ofdev, SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops); netif_carrier_off(ndev); - netif_stop_queue(ndev); err = register_netdev(ndev); if (err) { diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index ab9f675c5b8..fe337bd121a 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -104,6 +104,8 @@ static void ri_tasklet(unsigned long dev) rcu_read_unlock(); dev_kfree_skb(skb); stats->tx_dropped++; + if (skb_queue_len(&dp->tq) != 0) + goto resched; break; } rcu_read_unlock(); diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index dc019809234..aa93655c3aa 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c @@ -88,16 +88,14 @@ static const char *ipg_brand_name[] = { "IC PLUS IP1000 1000/100/10 based NIC", "Sundance Technology ST2021 based NIC", "Tamarack Microelectronics TC9020/9021 based NIC", - "Tamarack Microelectronics TC9020/9021 based NIC", "D-Link NIC IP1000A" }; static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, - { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, - { PCI_VDEVICE(DLINK, 0x9021), 3 }, - { PCI_VDEVICE(DLINK, 0x4020), 4 }, + { PCI_VDEVICE(DLINK, 0x9021), 2 }, + { PCI_VDEVICE(DLINK, 0x4020), 3 }, { 0, } }; diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c index 00b38bccd6d..52a7c86af66 100644 --- a/drivers/net/irda/sh_sir.c +++ b/drivers/net/irda/sh_sir.c @@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate) /* Baud Rate Error Correction x 10000 */ u32 rate_err_array[] = { - 0000, 0625, 1250, 1875, + 0, 625, 1250, 1875, 2500, 3125, 3750, 4375, 5000, 5625, 6250, 6875, 7500, 8125, 8750, 9375, diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 2bd3eb4ee5a..eee0b298bd3 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, #ifdef IXGBE_FCOE /* adjust for FCoE Sequence Offload */ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) - && (skb->protocol == htons(ETH_P_FCOE)) && - skb_is_gso(skb)) { + && skb_is_gso(skb) + && vlan_get_protocol(skb) == + htons(ETH_P_FCOE)) { hlen = skb_transport_offset(skb) + sizeof(struct fc_frame_header) + sizeof(struct fcoe_crc_eof); @@ -4770,6 +4771,9 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) adapter->rx_ring[i] = NULL; } + adapter->num_tx_queues = 0; + adapter->num_rx_queues = 0; + ixgbe_free_q_vectors(adapter); ixgbe_reset_interrupt_capability(adapter); } @@ -5823,7 +5827,7 @@ static void ixgbe_watchdog_task(struct work_struct *work) static int ixgbe_tso(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, u8 *hdr_len) + u32 tx_flags, u8 *hdr_len, __be16 protocol) { struct ixgbe_adv_tx_context_desc *context_desc; unsigned int i; @@ -5841,7 +5845,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, l4len = tcp_hdrlen(skb); *hdr_len += l4len; - if (skb->protocol == htons(ETH_P_IP)) { + if (protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = 0; iph->check = 0; @@ -5880,7 +5884,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT); - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); @@ -5906,16 +5910,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter, return false; } -static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) +static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb, + __be16 protocol) { u32 rtn = 0; - __be16 protocol; - - if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) - protocol = ((const struct vlan_ethhdr *)skb->data)-> - h_vlan_encapsulated_proto; - else - protocol = skb->protocol; switch (protocol) { case cpu_to_be16(ETH_P_IP): @@ -5943,7 +5941,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) default: if (unlikely(net_ratelimit())) e_warn(probe, "partial checksum but proto=%x!\n", - skb->protocol); + protocol); break; } @@ -5952,7 +5950,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb) static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags) + struct sk_buff *skb, u32 tx_flags, + __be16 protocol) { struct ixgbe_adv_tx_context_desc *context_desc; unsigned int i; @@ -5981,7 +5980,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, IXGBE_ADVTXD_DTYP_CTXT); if (skb->ip_summed == CHECKSUM_PARTIAL) - type_tucmd_mlhl |= ixgbe_psum(adapter, skb); + type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol); context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); /* use index zero for tx checksum offload */ @@ -6179,7 +6178,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, } static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, - int queue, u32 tx_flags) + int queue, u32 tx_flags, __be16 protocol) { struct ixgbe_atr_input atr_input; struct tcphdr *th; @@ -6190,7 +6189,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, u8 l4type = 0; /* Right now, we support IPv4 only */ - if (skb->protocol != htons(ETH_P_IP)) + if (protocol != htons(ETH_P_IP)) return; /* check if we're UDP or TCP */ if (iph->protocol == IPPROTO_TCP) { @@ -6257,10 +6256,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) { struct ixgbe_adapter *adapter = netdev_priv(dev); int txq = smp_processor_id(); - #ifdef IXGBE_FCOE - if ((skb->protocol == htons(ETH_P_FCOE)) || - (skb->protocol == htons(ETH_P_FIP))) { + __be16 protocol; + + protocol = vlan_get_protocol(skb); + + if ((protocol == htons(ETH_P_FCOE)) || + (protocol == htons(ETH_P_FIP))) { if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); txq += adapter->ring_feature[RING_F_FCOE].mask; @@ -6303,6 +6305,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev int tso; int count = 0; unsigned int f; + __be16 protocol; + + protocol = vlan_get_protocol(skb); if (vlan_tx_tag_present(skb)) { tx_flags |= vlan_tx_tag_get(skb); @@ -6323,8 +6328,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev /* for FCoE with DCB, we force the priority to what * was specified by the switch */ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED && - (skb->protocol == htons(ETH_P_FCOE) || - skb->protocol == htons(ETH_P_FIP))) { + (protocol == htons(ETH_P_FCOE) || + protocol == htons(ETH_P_FIP))) { #ifdef CONFIG_IXGBE_DCB if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK @@ -6334,7 +6339,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev } #endif /* flag for FCoE offloads */ - if (skb->protocol == htons(ETH_P_FCOE)) + if (protocol == htons(ETH_P_FCOE)) tx_flags |= IXGBE_TX_FLAGS_FCOE; } #endif @@ -6368,9 +6373,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev tx_flags |= IXGBE_TX_FLAGS_FSO; #endif /* IXGBE_FCOE */ } else { - if (skb->protocol == htons(ETH_P_IP)) + if (protocol == htons(ETH_P_IP)) tx_flags |= IXGBE_TX_FLAGS_IPV4; - tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); + tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, + protocol); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -6378,7 +6384,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev if (tso) tx_flags |= IXGBE_TX_FLAGS_TSO; - else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && + else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags, + protocol) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IXGBE_TX_FLAGS_CSUM; } @@ -6392,7 +6399,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev test_bit(__IXGBE_FDIR_INIT_DONE, &tx_ring->reinit_state)) { ixgbe_atr(adapter, skb, tx_ring->queue_index, - tx_flags); + tx_flags, protocol); tx_ring->atr_count = 0; } } diff --git a/drivers/net/jme.c b/drivers/net/jme.c index d85edf3119c..c57d9a43cec 100644 --- a/drivers/net/jme.c +++ b/drivers/net/jme.c @@ -2955,11 +2955,7 @@ jme_init_one(struct pci_dev *pdev, * Tell stack that we are not ready to work until open() */ netif_carrier_off(netdev); - netif_stop_queue(netdev); - /* - * Register netdev - */ rc = register_netdev(netdev); if (rc) { pr_err("Cannot register net device\n"); diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c index b68eee2414c..7a7e18ba278 100644 --- a/drivers/net/mlx4/fw.c +++ b/drivers/net/mlx4/fw.c @@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); dev_cap->bf_reg_size = 1 << (field & 0x1f); MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); + if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) { + mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f); + field = 3; + } dev_cap->bf_regs_per_page = 1 << (field & 0x3f); mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index a75ba951740..e1d30d7f207 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -41,9 +41,6 @@ MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); -MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME); -MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME); -MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME); MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); char netxen_nic_driver_name[] = "netxen_nic"; diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 472056b4744..03a1d280105 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c @@ -1,6 +1,6 @@ /* * Copyright (C) 1999 - 2010 Intel Corporation. - * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. + * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD. * * This code was derived from the Intel e1000e Linux driver. * @@ -2464,8 +2464,8 @@ static void __exit pch_gbe_exit_module(void) module_init(pch_gbe_init_module); module_exit(pch_gbe_exit_module); -MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver"); -MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>"); +MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver"); +MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c index 2510146fc56..ef0996a0eaa 100644 --- a/drivers/net/pch_gbe/pch_gbe_param.c +++ b/drivers/net/pch_gbe/pch_gbe_param.c @@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) .err = "using default of " __MODULE_STRING(PCH_GBE_DEFAULT_TXD), .def = PCH_GBE_DEFAULT_TXD, - .arg = { .r = { .min = PCH_GBE_MIN_TXD } }, - .arg = { .r = { .max = PCH_GBE_MAX_TXD } } + .arg = { .r = { .min = PCH_GBE_MIN_TXD, + .max = PCH_GBE_MAX_TXD } } }; struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; tx_ring->count = TxDescriptors; @@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter) .err = "using default of " __MODULE_STRING(PCH_GBE_DEFAULT_RXD), .def = PCH_GBE_DEFAULT_RXD, - .arg = { .r = { .min = PCH_GBE_MIN_RXD } }, - .arg = { .r = { .max = PCH_GBE_MAX_RXD } } + .arg = { .r = { .min = PCH_GBE_MIN_RXD, + .max = PCH_GBE_MAX_RXD } } }; struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; rx_ring->count = RxDescriptors; diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index d2e166e29dd..8a4d19e5de0 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c @@ -111,13 +111,14 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id); typedef struct axnet_dev_t { struct pcmcia_device *p_dev; - caddr_t base; - struct timer_list watchdog; - int stale, fast_poll; - u_short link_status; - u_char duplex_flag; - int phy_id; - int flags; + caddr_t base; + struct timer_list watchdog; + int stale, fast_poll; + u_short link_status; + u_char duplex_flag; + int phy_id; + int flags; + int active_low; } axnet_dev_t; static inline axnet_dev_t *PRIV(struct net_device *dev) @@ -322,6 +323,8 @@ static int axnet_config(struct pcmcia_device *link) if (info->flags & IS_AX88790) outb(0x10, dev->base_addr + AXNET_GPIO); /* select Internal PHY */ + info->active_low = 0; + for (i = 0; i < 32; i++) { j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); @@ -329,15 +332,18 @@ static int axnet_config(struct pcmcia_device *link) if ((j != 0) && (j != 0xffff)) break; } - /* Maybe PHY is in power down mode. (PPD_SET = 1) - Bit 2 of CCSR is active low. */ if (i == 32) { + /* Maybe PHY is in power down mode. (PPD_SET = 1) + Bit 2 of CCSR is active low. */ pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); for (i = 0; i < 32; i++) { j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1); j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2); if (j == j2) continue; - if ((j != 0) && (j != 0xffff)) break; + if ((j != 0) && (j != 0xffff)) { + info->active_low = 1; + break; + } } } @@ -383,8 +389,12 @@ static int axnet_suspend(struct pcmcia_device *link) static int axnet_resume(struct pcmcia_device *link) { struct net_device *dev = link->priv; + axnet_dev_t *info = PRIV(dev); if (link->open) { + if (info->active_low == 1) + pcmcia_write_config_byte(link, CISREG_CCSR, 0x04); + axnet_reset_8390(dev); AX88190_init(dev, 1); netif_device_attach(dev); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index cb3d13e4e07..35fda5ac812 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -64,7 +64,7 @@ config BCM63XX_PHY config ICPLUS_PHY tristate "Drivers for ICPlus PHYs" ---help--- - Currently supports the IP175C PHY. + Currently supports the IP175C and IP1001 PHYs. config REALTEK_PHY tristate "Drivers for Realtek PHYs" diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index c1d2d251fe8..9a09e24c30b 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -30,7 +30,7 @@ #include <asm/irq.h> #include <asm/uaccess.h> -MODULE_DESCRIPTION("ICPlus IP175C PHY driver"); +MODULE_DESCRIPTION("ICPlus IP175C/IC1001 PHY drivers"); MODULE_AUTHOR("Michael Barkowski"); MODULE_LICENSE("GPL"); @@ -89,6 +89,33 @@ static int ip175c_config_init(struct phy_device *phydev) return 0; } +static int ip1001_config_init(struct phy_device *phydev) +{ + int err, value; + + /* Software Reset PHY */ + value = phy_read(phydev, MII_BMCR); + value |= BMCR_RESET; + err = phy_write(phydev, MII_BMCR, value); + if (err < 0) + return err; + + do { + value = phy_read(phydev, MII_BMCR); + } while (value & BMCR_RESET); + + /* Additional delay (2ns) used to adjust RX clock phase + * at GMII/ RGMII interface */ + value = phy_read(phydev, 16); + value |= 0x3; + + err = phy_write(phydev, 16, value); + if (err < 0) + return err; + + return err; +} + static int ip175c_read_status(struct phy_device *phydev) { if (phydev->addr == 4) /* WAN port */ @@ -121,21 +148,43 @@ static struct phy_driver ip175c_driver = { .driver = { .owner = THIS_MODULE,}, }; -static int __init ip175c_init(void) +static struct phy_driver ip1001_driver = { + .phy_id = 0x02430d90, + .name = "ICPlus IP1001", + .phy_id_mask = 0x0ffffff0, + .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause, + .config_init = &ip1001_config_init, + .config_aneg = &genphy_config_aneg, + .read_status = &genphy_read_status, + .suspend = genphy_suspend, + .resume = genphy_resume, + .driver = { .owner = THIS_MODULE,}, +}; + +static int __init icplus_init(void) { + int ret = 0; + + ret = phy_driver_register(&ip1001_driver); + if (ret < 0) + return -ENODEV; + return phy_driver_register(&ip175c_driver); } -static void __exit ip175c_exit(void) +static void __exit icplus_exit(void) { + phy_driver_unregister(&ip1001_driver); phy_driver_unregister(&ip175c_driver); } -module_init(ip175c_init); -module_exit(ip175c_exit); +module_init(icplus_init); +module_exit(icplus_exit); static struct mdio_device_id __maybe_unused icplus_tbl[] = { { 0x02430d80, 0x0ffffff0 }, + { 0x02430d90, 0x0ffffff0 }, { } }; diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index f0bd1a1aba3..e8b9c53c304 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -30,11 +30,14 @@ #include <linux/ethtool.h> #include <linux/phy.h> #include <linux/marvell_phy.h> +#include <linux/of.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> +#define MII_MARVELL_PHY_PAGE 22 + #define MII_M1011_IEVENT 0x13 #define MII_M1011_IEVENT_CLEAR 0x0000 @@ -80,7 +83,6 @@ #define MII_88E1121_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_PAGE 3 #define MII_88E1121_PHY_LED_DEF 0x0030 -#define MII_88E1121_PHY_PAGE 22 #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 @@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev) return 0; } +#ifdef CONFIG_OF_MDIO +/* + * Set and/or override some configuration registers based on the + * marvell,reg-init property stored in the of_node for the phydev. + * + * marvell,reg-init = <reg-page reg mask value>,...; + * + * There may be one or more sets of <reg-page reg mask value>: + * + * reg-page: which register bank to use. + * reg: the register. + * mask: if non-zero, ANDed with existing register value. + * value: ORed with the masked value and written to the regiser. + * + */ +static int marvell_of_reg_init(struct phy_device *phydev) +{ + const __be32 *paddr; + int len, i, saved_page, current_page, page_changed, ret; + + if (!phydev->dev.of_node) + return 0; + + paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len); + if (!paddr || len < (4 * sizeof(*paddr))) + return 0; + + saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE); + if (saved_page < 0) + return saved_page; + page_changed = 0; + current_page = saved_page; + + ret = 0; + len /= sizeof(*paddr); + for (i = 0; i < len - 3; i += 4) { + u16 reg_page = be32_to_cpup(paddr + i); + u16 reg = be32_to_cpup(paddr + i + 1); + u16 mask = be32_to_cpup(paddr + i + 2); + u16 val_bits = be32_to_cpup(paddr + i + 3); + int val; + + if (reg_page != current_page) { + current_page = reg_page; + page_changed = 1; + ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page); + if (ret < 0) + goto err; + } + + val = 0; + if (mask) { + val = phy_read(phydev, reg); + if (val < 0) { + ret = val; + goto err; + } + val &= mask; + } + val |= val_bits; + + ret = phy_write(phydev, reg, val); + if (ret < 0) + goto err; + + } +err: + if (page_changed) { + i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page); + if (ret == 0) + ret = i; + } + return ret; +} +#else +static int marvell_of_reg_init(struct phy_device *phydev) +{ + return 0; +} +#endif /* CONFIG_OF_MDIO */ + static int m88e1121_config_aneg(struct phy_device *phydev) { int err, oldpage, mscr; - oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); - err = phy_write(phydev, MII_88E1121_PHY_PAGE, + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_MSCR_PAGE); if (err < 0) return err; @@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev) return err; } - phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); + phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err < 0) @@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev) if (err < 0) return err; - oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); - phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); + phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); - phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); + phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); err = genphy_config_aneg(phydev); @@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev) { int err, oldpage, mscr; - oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); + oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); - err = phy_write(phydev, MII_88E1121_PHY_PAGE, + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_MSCR_PAGE); if (err < 0) return err; @@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev) if (err < 0) return err; - err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); if (err < 0) return err; @@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev) return err; } + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; err = phy_write(phydev, MII_BMCR, BMCR_RESET); if (err < 0) @@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev) int err; /* Change address */ - err = phy_write(phydev, 0x16, 0x0002); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); if (err < 0) return err; @@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev) return err; /* Change address */ - err = phy_write(phydev, 0x16, 0x0003); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003); if (err < 0) return err; @@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev) if (err < 0) return err; + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; + /* Reset address */ - err = phy_write(phydev, 0x16, 0x0); + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); + if (err < 0) + return err; + + err = phy_write(phydev, MII_BMCR, BMCR_RESET); + if (err < 0) + return err; + + return 0; +} + +static int m88e1149_config_init(struct phy_device *phydev) +{ + int err; + + /* Change address */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002); + if (err < 0) + return err; + + /* Enable 1000 Mbit */ + err = phy_write(phydev, 0x15, 0x1048); + if (err < 0) + return err; + + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; + + /* Reset address */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0); if (err < 0) return err; @@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev) } } + err = marvell_of_reg_init(phydev); + if (err < 0) + return err; + return 0; } @@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = { .driver = { .owner = THIS_MODULE }, }, { + .phy_id = MARVELL_PHY_ID_88E1149R, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E1149R", + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &m88e1149_config_init, + .config_aneg = &m88e1118_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &marvell_ack_interrupt, + .config_intr = &marvell_config_intr, + .driver = { .owner = THIS_MODULE }, + }, + { .phy_id = MARVELL_PHY_ID_88E1240, .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1240", @@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = { { 0x01410e10, 0xfffffff0 }, { 0x01410cb0, 0xfffffff0 }, { 0x01410cd0, 0xfffffff0 }, + { 0x01410e50, 0xfffffff0 }, { 0x01410e30, 0xfffffff0 }, { 0x01410e90, 0xfffffff0 }, { } diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 09cf56d0416..39659976a1a 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c @@ -2584,16 +2584,16 @@ ppp_create_interface(struct net *net, int unit, int *retp) */ dev_net_set(dev, net); - ret = -EEXIST; mutex_lock(&pn->all_ppp_mutex); if (unit < 0) { unit = unit_get(&pn->units_idr, ppp); if (unit < 0) { - *retp = unit; + ret = unit; goto out2; } } else { + ret = -EEXIST; if (unit_find(&pn->units_idr, unit)) goto out2; /* unit already exists */ /* @@ -2668,10 +2668,10 @@ static void ppp_shutdown_interface(struct ppp *ppp) ppp->closing = 1; ppp_unlock(ppp); unregister_netdev(ppp->dev); + unit_put(&pn->units_idr, ppp->file.index); } else ppp_unlock(ppp); - unit_put(&pn->units_idr, ppp->file.index); ppp->file.dead = 1; ppp->owner = NULL; wake_up_interruptible(&ppp->file.rwait); @@ -2859,8 +2859,7 @@ static void __exit ppp_cleanup(void) * by holding all_ppp_mutex */ -/* associate pointer with specified number */ -static int unit_set(struct idr *p, void *ptr, int n) +static int __unit_alloc(struct idr *p, void *ptr, int n) { int unit, err; @@ -2871,10 +2870,24 @@ again: } err = idr_get_new_above(p, ptr, n, &unit); - if (err == -EAGAIN) - goto again; + if (err < 0) { + if (err == -EAGAIN) + goto again; + return err; + } + + return unit; +} + +/* associate pointer with specified number */ +static int unit_set(struct idr *p, void *ptr, int n) +{ + int unit; - if (unit != n) { + unit = __unit_alloc(p, ptr, n); + if (unit < 0) + return unit; + else if (unit != n) { idr_remove(p, unit); return -EINVAL; } @@ -2885,19 +2898,7 @@ again: /* get new free unit number and associate pointer with it */ static int unit_get(struct idr *p, void *ptr) { - int unit, err; - -again: - if (!idr_pre_get(p, GFP_KERNEL)) { - printk(KERN_ERR "PPP: No free memory for idr\n"); - return -ENOMEM; - } - - err = idr_get_new_above(p, ptr, 0, &unit); - if (err == -EAGAIN) - goto again; - - return unit; + return __unit_alloc(p, ptr, 0); } /* put unit number back to a pool */ diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index d72fb0519a2..78c0e3c9b2b 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -948,7 +948,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) abort: kfree_skb(skb); - return 0; + return 1; } /************************************************************************ diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c index 7a298cdf9ab..a3dcd04be22 100644 --- a/drivers/net/qlcnic/qlcnic_main.c +++ b/drivers/net/qlcnic/qlcnic_main.c @@ -1450,7 +1450,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, netdev->irq = adapter->msix_entries[0].vector; netif_carrier_off(netdev); - netif_stop_queue(netdev); err = register_netdev(netdev); if (err) { diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 22821398fc6..9787dff90d3 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -2083,6 +2083,7 @@ struct ql_adapter { u32 mailbox_in; u32 mailbox_out; struct mbox_params idc_mbc; + struct mutex mpi_mutex; int tx_ring_size; int rx_ring_size; diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index c30e0fe55a3..2555b1d34f3 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -62,15 +62,15 @@ static const u32 default_msg = /* NETIF_MSG_PKTDATA | */ NETIF_MSG_HW | NETIF_MSG_WOL | 0; -static int debug = 0x00007fff; /* defaults above */ -module_param(debug, int, 0); +static int debug = -1; /* defaults above */ +module_param(debug, int, 0664); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define MSIX_IRQ 0 #define MSI_IRQ 1 #define LEG_IRQ 2 static int qlge_irq_type = MSIX_IRQ; -module_param(qlge_irq_type, int, MSIX_IRQ); +module_param(qlge_irq_type, int, 0664); MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); static int qlge_mpi_coredump; @@ -4629,6 +4629,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log); init_completion(&qdev->ide_completion); + mutex_init(&qdev->mpi_mutex); if (!cards_found) { dev_info(&pdev->dev, "%s\n", DRV_STRING); diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index 0e7c7c7ee16..a2e919bcb3c 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -534,6 +534,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) int status; unsigned long count; + mutex_lock(&qdev->mpi_mutex); /* Begin polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); @@ -603,6 +604,7 @@ done: end: /* End polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); + mutex_unlock(&qdev->mpi_mutex); return status; } @@ -1099,9 +1101,7 @@ int ql_wait_fifo_empty(struct ql_adapter *qdev) static int ql_set_port_cfg(struct ql_adapter *qdev) { int status; - rtnl_lock(); status = ql_mb_set_port_cfg(qdev); - rtnl_unlock(); if (status) return status; status = ql_idc_wait(qdev); @@ -1122,9 +1122,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work) container_of(work, struct ql_adapter, mpi_port_cfg_work.work); int status; - rtnl_lock(); status = ql_mb_get_port_cfg(qdev); - rtnl_unlock(); if (status) { netif_err(qdev, drv, qdev->ndev, "Bug: Failed to get port config data.\n"); @@ -1167,7 +1165,6 @@ void ql_mpi_idc_work(struct work_struct *work) u32 aen; int timeout; - rtnl_lock(); aen = mbcp->mbox_out[1] >> 16; timeout = (mbcp->mbox_out[1] >> 8) & 0xf; @@ -1231,7 +1228,6 @@ void ql_mpi_idc_work(struct work_struct *work) } break; } - rtnl_unlock(); } void ql_mpi_work(struct work_struct *work) @@ -1242,7 +1238,7 @@ void ql_mpi_work(struct work_struct *work) struct mbox_params *mbcp = &mbc; int err = 0; - rtnl_lock(); + mutex_lock(&qdev->mpi_mutex); /* Begin polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); @@ -1259,7 +1255,7 @@ void ql_mpi_work(struct work_struct *work) /* End polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); - rtnl_unlock(); + mutex_unlock(&qdev->mpi_mutex); ql_enable_completion_interrupt(qdev, 0); } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index d88ce9fb1cb..53b13deade9 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -744,26 +744,36 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr) mdio_write(ioaddr, MII_BMCR, val & 0xffff); } -static void rtl8169_check_link_status(struct net_device *dev, +static void __rtl8169_check_link_status(struct net_device *dev, struct rtl8169_private *tp, - void __iomem *ioaddr) + void __iomem *ioaddr, + bool pm) { unsigned long flags; spin_lock_irqsave(&tp->lock, flags); if (tp->link_ok(ioaddr)) { /* This is to cancel a scheduled suspend if there's one. */ - pm_request_resume(&tp->pci_dev->dev); + if (pm) + pm_request_resume(&tp->pci_dev->dev); netif_carrier_on(dev); netif_info(tp, ifup, dev, "link up\n"); } else { netif_carrier_off(dev); netif_info(tp, ifdown, dev, "link down\n"); - pm_schedule_suspend(&tp->pci_dev->dev, 100); + if (pm) + pm_schedule_suspend(&tp->pci_dev->dev, 100); } spin_unlock_irqrestore(&tp->lock, flags); } +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + __rtl8169_check_link_status(dev, tp, ioaddr, false); +} + #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) static u32 __rtl8169_get_wol(struct rtl8169_private *tp) @@ -846,10 +856,10 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) else tp->features &= ~RTL_FEATURE_WOL; __rtl8169_set_wol(tp, wol->wolopts); - device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); - spin_unlock_irq(&tp->lock); + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + return 0; } @@ -2931,7 +2941,7 @@ static const struct rtl_cfg_info { .hw_start = rtl_hw_start_8168, .region = 2, .align = 8, - .intr_event = SYSErr | RxFIFOOver | LinkChg | RxOverflow | + .intr_event = SYSErr | LinkChg | RxOverflow | TxErr | TxOK | RxOK | RxErr, .napi_event = TxErr | TxOK | RxOK | RxOverflow, .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, @@ -4440,8 +4450,7 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) u32 status = opts1 & RxProtoMask; if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || - ((status == RxProtoUDP) && !(opts1 & UDPFail)) || - ((status == RxProtoIP) && !(opts1 & IPFail))) + ((status == RxProtoUDP) && !(opts1 & UDPFail))) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); @@ -4588,7 +4597,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) } /* Work around for rx fifo overflow */ - if (unlikely(status & RxFIFOOver)) { + if (unlikely(status & RxFIFOOver) && + (tp->mac_version == RTL_GIGA_MAC_VER_11)) { netif_stop_queue(dev); rtl8169_tx_timeout(dev); break; @@ -4600,7 +4610,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) } if (status & LinkChg) - rtl8169_check_link_status(dev, tp, ioaddr); + __rtl8169_check_link_status(dev, tp, ioaddr, true); /* We need to see the lastest version of tp->intr_mask to * avoid ignoring an MSI interrupt and having to wait for @@ -4890,11 +4900,7 @@ static int rtl8169_runtime_idle(struct device *device) struct net_device *dev = pci_get_drvdata(pdev); struct rtl8169_private *tp = netdev_priv(dev); - if (!tp->TxDescArray) - return 0; - - rtl8169_check_link_status(dev, tp, tp->mmio_addr); - return -EBUSY; + return tp->TxDescArray ? -EBUSY : 0; } static const struct dev_pm_ops rtl8169_pm_ops = { diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 05df20e4797..fb83cdd9464 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -197,7 +197,9 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); static void efx_remove_channels(struct efx_nic *efx); static void efx_remove_port(struct efx_nic *efx); +static void efx_init_napi(struct efx_nic *efx); static void efx_fini_napi(struct efx_nic *efx); +static void efx_fini_napi_channel(struct efx_channel *channel); static void efx_fini_struct(struct efx_nic *efx); static void efx_start_all(struct efx_nic *efx); static void efx_stop_all(struct efx_nic *efx); @@ -335,8 +337,10 @@ void efx_process_channel_now(struct efx_channel *channel) /* Disable interrupts and wait for ISRs to complete */ efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) + if (efx->legacy_irq) { synchronize_irq(efx->legacy_irq); + efx->legacy_irq_enabled = false; + } if (channel->irq) synchronize_irq(channel->irq); @@ -351,6 +355,8 @@ void efx_process_channel_now(struct efx_channel *channel) efx_channel_processed(channel); napi_enable(&channel->napi_str); + if (efx->legacy_irq) + efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); } @@ -426,6 +432,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) *channel = *old_channel; + channel->napi_dev = NULL; memset(&channel->eventq, 0, sizeof(channel->eventq)); rx_queue = &channel->rx_queue; @@ -736,9 +743,13 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) if (rc) goto rollback; + efx_init_napi(efx); + /* Destroy old channels */ - for (i = 0; i < efx->n_channels; i++) + for (i = 0; i < efx->n_channels; i++) { + efx_fini_napi_channel(other_channel[i]); efx_remove_channel(other_channel[i]); + } out: /* Free unused channel structures */ for (i = 0; i < efx->n_channels; i++) @@ -1400,6 +1411,8 @@ static void efx_start_all(struct efx_nic *efx) efx_start_channel(channel); } + if (efx->legacy_irq) + efx->legacy_irq_enabled = true; efx_nic_enable_interrupts(efx); /* Switch to event based MCDI completions after enabling interrupts. @@ -1460,8 +1473,10 @@ static void efx_stop_all(struct efx_nic *efx) /* Disable interrupts and wait for ISR to complete */ efx_nic_disable_interrupts(efx); - if (efx->legacy_irq) + if (efx->legacy_irq) { synchronize_irq(efx->legacy_irq); + efx->legacy_irq_enabled = false; + } efx_for_each_channel(channel, efx) { if (channel->irq) synchronize_irq(channel->irq); @@ -1593,7 +1608,7 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) * **************************************************************************/ -static int efx_init_napi(struct efx_nic *efx) +static void efx_init_napi(struct efx_nic *efx) { struct efx_channel *channel; @@ -1602,18 +1617,21 @@ static int efx_init_napi(struct efx_nic *efx) netif_napi_add(channel->napi_dev, &channel->napi_str, efx_poll, napi_weight); } - return 0; +} + +static void efx_fini_napi_channel(struct efx_channel *channel) +{ + if (channel->napi_dev) + netif_napi_del(&channel->napi_str); + channel->napi_dev = NULL; } static void efx_fini_napi(struct efx_nic *efx) { struct efx_channel *channel; - efx_for_each_channel(channel, efx) { - if (channel->napi_dev) - netif_napi_del(&channel->napi_str); - channel->napi_dev = NULL; - } + efx_for_each_channel(channel, efx) + efx_fini_napi_channel(channel); } /************************************************************************** @@ -2335,9 +2353,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) if (rc) goto fail1; - rc = efx_init_napi(efx); - if (rc) - goto fail2; + efx_init_napi(efx); rc = efx->type->init(efx); if (rc) { @@ -2368,7 +2384,6 @@ static int efx_pci_probe_main(struct efx_nic *efx) efx->type->fini(efx); fail3: efx_fini_napi(efx); - fail2: efx_remove_all(efx); fail1: return rc; diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 0a7e26d73b5..b137c889152 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -621,6 +621,7 @@ struct efx_filter_state; * @pci_dev: The PCI device * @type: Controller type attributes * @legacy_irq: IRQ number + * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? * @workqueue: Workqueue for port reconfigures and the HW monitor. * Work items do not hold and must not acquire RTNL. * @workqueue_name: Name of workqueue @@ -709,6 +710,7 @@ struct efx_nic { struct pci_dev *pci_dev; const struct efx_nic_type *type; int legacy_irq; + bool legacy_irq_enabled; struct workqueue_struct *workqueue; char workqueue_name[16]; struct work_struct reset_work; diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c index 41c36b9a424..67cb0c96838 100644 --- a/drivers/net/sfc/nic.c +++ b/drivers/net/sfc/nic.c @@ -1418,6 +1418,12 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) u32 queues; int syserr; + /* Could this be ours? If interrupts are disabled then the + * channel state may not be valid. + */ + if (!efx->legacy_irq_enabled) + return result; + /* Read the ISR which also ACKs the interrupts */ efx_readd(efx, ®, FR_BZ_INT_ISR0); queues = EFX_EXTRACT_DWORD(reg, 0, 31); diff --git a/drivers/net/skge.c b/drivers/net/skge.c index bfec2e0f527..220e0398f1d 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -3858,7 +3858,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, /* device is off until link detection */ netif_carrier_off(dev); - netif_stop_queue(dev); return dev; } diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h index 52f38e12a87..50f712e99e9 100644 --- a/drivers/net/smsc911x.h +++ b/drivers/net/smsc911x.h @@ -22,7 +22,7 @@ #define __SMSC911X_H__ #define TX_FIFO_LOW_THRESHOLD ((u32)1600) -#define SMSC911X_EEPROM_SIZE ((u32)7) +#define SMSC911X_EEPROM_SIZE ((u32)128) #define USE_DEBUG 0 /* This is the maximum number of packets to be received every diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 06bc6034ce8..2114837809e 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c @@ -1509,6 +1509,8 @@ static int stmmac_probe(struct net_device *dev) pr_warning("\tno valid MAC address;" "please, use ifconfig or nwhwconfig!\n"); + spin_lock_init(&priv->lock); + ret = register_netdev(dev); if (ret) { pr_err("%s: ERROR %i registering the device\n", @@ -1520,8 +1522,6 @@ static int stmmac_probe(struct net_device *dev) dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); - spin_lock_init(&priv->lock); - return ret; } diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile new file mode 100644 index 00000000000..f634f142cab --- /dev/null +++ b/drivers/net/tile/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for the TILE on-chip networking support. +# + +obj-$(CONFIG_TILE_NET) += tile_net.o +ifdef CONFIG_TILEGX +tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o +else +tile_net-objs := tilepro.o +endif diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c new file mode 100644 index 00000000000..0e6bac5ec65 --- /dev/null +++ b/drivers/net/tile/tilepro.c @@ -0,0 +1,2406 @@ +/* + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/moduleparam.h> +#include <linux/sched.h> +#include <linux/kernel.h> /* printk() */ +#include <linux/slab.h> /* kmalloc() */ +#include <linux/errno.h> /* error codes */ +#include <linux/types.h> /* size_t */ +#include <linux/interrupt.h> +#include <linux/in.h> +#include <linux/netdevice.h> /* struct device, and other headers */ +#include <linux/etherdevice.h> /* eth_type_trans */ +#include <linux/skbuff.h> +#include <linux/ioctl.h> +#include <linux/cdev.h> +#include <linux/hugetlb.h> +#include <linux/in6.h> +#include <linux/timer.h> +#include <linux/io.h> +#include <asm/checksum.h> +#include <asm/homecache.h> + +#include <hv/drv_xgbe_intf.h> +#include <hv/drv_xgbe_impl.h> +#include <hv/hypervisor.h> +#include <hv/netio_intf.h> + +/* For TSO */ +#include <linux/ip.h> +#include <linux/tcp.h> + + +/* There is no singlethread_cpu, so schedule work on the current cpu. */ +#define singlethread_cpu -1 + + +/* + * First, "tile_net_init_module()" initializes all four "devices" which + * can be used by linux. + * + * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes + * the network cpus, then uses "tile_net_open_aux()" to initialize + * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all + * the tiles, provide buffers to LIPP, allow ingress to start, and + * turn on hypervisor interrupt handling (and NAPI) on all tiles. + * + * If registration fails due to the link being down, then "retry_work" + * is used to keep calling "tile_net_open_inner()" until it succeeds. + * + * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to + * stop egress, drain the LIPP buffers, unregister all the tiles, stop + * LIPP/LEPP, and wipe the LEPP queue. + * + * We start out with the ingress interrupt enabled on each CPU. When + * this interrupt fires, we disable it, and call "napi_schedule()". + * This will cause "tile_net_poll()" to be called, which will pull + * packets from the netio queue, filtering them out, or passing them + * to "netif_receive_skb()". If our budget is exhausted, we will + * return, knowing we will be called again later. Otherwise, we + * reenable the ingress interrupt, and call "napi_complete()". + * + * + * NOTE: The use of "native_driver" ensures that EPP exists, and that + * "epp_sendv" is legal, and that "LIPP" is being used. + * + * NOTE: Failing to free completions for an arbitrarily long time + * (which is defined to be illegal) does in fact cause bizarre + * problems. The "egress_timer" helps prevent this from happening. + * + * NOTE: The egress code can be interrupted by the interrupt handler. + */ + + +/* HACK: Allow use of "jumbo" packets. */ +/* This should be 1500 if "jumbo" is not set in LIPP. */ +/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ +/* ISSUE: This has not been thoroughly tested (except at 1500). */ +#define TILE_NET_MTU 1500 + +/* HACK: Define to support GSO. */ +/* ISSUE: This may actually hurt performance of the TCP blaster. */ +/* #define TILE_NET_GSO */ + +/* Define this to collapse "duplicate" acks. */ +/* #define IGNORE_DUP_ACKS */ + +/* HACK: Define this to verify incoming packets. */ +/* #define TILE_NET_VERIFY_INGRESS */ + +/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ +#define TILE_NET_TX_QUEUE_LEN 0 + +/* Define to dump packets (prints out the whole packet on tx and rx). */ +/* #define TILE_NET_DUMP_PACKETS */ + +/* Define to enable debug spew (all PDEBUG's are enabled). */ +/* #define TILE_NET_DEBUG */ + + +/* Define to activate paranoia checks. */ +/* #define TILE_NET_PARANOIA */ + +/* Default transmit lockup timeout period, in jiffies. */ +#define TILE_NET_TIMEOUT (5 * HZ) + +/* Default retry interval for bringing up the NetIO interface, in jiffies. */ +#define TILE_NET_RETRY_INTERVAL (5 * HZ) + +/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ +#define TILE_NET_DEVS 4 + + + +/* Paranoia. */ +#if NET_IP_ALIGN != LIPP_PACKET_PADDING +#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." +#endif + + +/* Debug print. */ +#ifdef TILE_NET_DEBUG +#define PDEBUG(fmt, args...) net_printk(fmt, ## args) +#else +#define PDEBUG(fmt, args...) +#endif + + +MODULE_AUTHOR("Tilera"); +MODULE_LICENSE("GPL"); + + +#define IS_MULTICAST(mac_addr) \ + (((u8 *)(mac_addr))[0] & 0x01) + +#define IS_BROADCAST(mac_addr) \ + (((u16 *)(mac_addr))[0] == 0xffff) + + +/* + * Queue of incoming packets for a specific cpu and device. + * + * Includes a pointer to the "system" data, and the actual "user" data. + */ +struct tile_netio_queue { + netio_queue_impl_t *__system_part; + netio_queue_user_impl_t __user_part; + +}; + + +/* + * Statistics counters for a specific cpu and device. + */ +struct tile_net_stats_t { + u32 rx_packets; + u32 rx_bytes; + u32 tx_packets; + u32 tx_bytes; +}; + + +/* + * Info for a specific cpu and device. + * + * ISSUE: There is a "dev" pointer in "napi" as well. + */ +struct tile_net_cpu { + /* The NAPI struct. */ + struct napi_struct napi; + /* Packet queue. */ + struct tile_netio_queue queue; + /* Statistics. */ + struct tile_net_stats_t stats; + /* ISSUE: Is this needed? */ + bool napi_enabled; + /* True if this tile has succcessfully registered with the IPP. */ + bool registered; + /* True if the link was down last time we tried to register. */ + bool link_down; + /* True if "egress_timer" is scheduled. */ + bool egress_timer_scheduled; + /* Number of small sk_buffs which must still be provided. */ + unsigned int num_needed_small_buffers; + /* Number of large sk_buffs which must still be provided. */ + unsigned int num_needed_large_buffers; + /* A timer for handling egress completions. */ + struct timer_list egress_timer; +}; + + +/* + * Info for a specific device. + */ +struct tile_net_priv { + /* Our network device. */ + struct net_device *dev; + /* The actual egress queue. */ + lepp_queue_t *epp_queue; + /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */ + spinlock_t cmd_lock; + /* Protects "epp_queue->comp_head". */ + spinlock_t comp_lock; + /* The hypervisor handle for this interface. */ + int hv_devhdl; + /* The intr bit mask that IDs this device. */ + u32 intr_id; + /* True iff "tile_net_open_aux()" has succeeded. */ + int partly_opened; + /* True iff "tile_net_open_inner()" has succeeded. */ + int fully_opened; + /* Effective network cpus. */ + struct cpumask network_cpus_map; + /* Number of network cpus. */ + int network_cpus_count; + /* Credits per network cpu. */ + int network_cpus_credits; + /* Network stats. */ + struct net_device_stats stats; + /* For NetIO bringup retries. */ + struct delayed_work retry_work; + /* Quick access to per cpu data. */ + struct tile_net_cpu *cpu[NR_CPUS]; +}; + + +/* + * The actual devices (xgbe0, xgbe1, gbe0, gbe1). + */ +static struct net_device *tile_net_devs[TILE_NET_DEVS]; + +/* + * The "tile_net_cpu" structures for each device. + */ +static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); +static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); + + +/* + * True if "network_cpus" was specified. + */ +static bool network_cpus_used; + +/* + * The actual cpus in "network_cpus". + */ +static struct cpumask network_cpus_map; + + + +#ifdef TILE_NET_DEBUG +/* + * printk with extra stuff. + * + * We print the CPU we're running in brackets. + */ +static void net_printk(char *fmt, ...) +{ + int i; + int len; + va_list args; + static char buf[256]; + + len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); + va_start(args, fmt); + i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); + va_end(args); + buf[255] = '\0'; + pr_notice(buf); +} +#endif + + +#ifdef TILE_NET_DUMP_PACKETS +/* + * Dump a packet. + */ +static void dump_packet(unsigned char *data, unsigned long length, char *s) +{ + unsigned long i; + static unsigned int count; + + pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", + data, length, s, count++); + + pr_info("\n"); + + for (i = 0; i < length; i++) { + if ((i & 0xf) == 0) + sprintf(buf, "%8.8lx:", i); + sprintf(buf + strlen(buf), " %2.2x", data[i]); + if ((i & 0xf) == 0xf || i == length - 1) + pr_info("%s\n", buf); + } +} +#endif + + +/* + * Provide support for the __netio_fastio1() swint + * (see <hv/drv_xgbe_intf.h> for how it is used). + * + * The fastio swint2 call may clobber all the caller-saved registers. + * It rarely clobbers memory, but we allow for the possibility in + * the signature just to be on the safe side. + * + * Also, gcc doesn't seem to allow an input operand to be + * clobbered, so we fake it with dummy outputs. + * + * This function can't be static because of the way it is declared + * in the netio header. + */ +inline int __netio_fastio1(u32 fastio_index, u32 arg0) +{ + long result, clobber_r1, clobber_r10; + asm volatile("swint2" + : "=R00" (result), + "=R01" (clobber_r1), "=R10" (clobber_r10) + : "R10" (fastio_index), "R01" (arg0) + : "memory", "r2", "r3", "r4", + "r5", "r6", "r7", "r8", "r9", + "r11", "r12", "r13", "r14", + "r15", "r16", "r17", "r18", "r19", + "r20", "r21", "r22", "r23", "r24", + "r25", "r26", "r27", "r28", "r29"); + return result; +} + + +/* + * Provide a linux buffer to LIPP. + */ +static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, + void *va, bool small) +{ + struct tile_netio_queue *queue = &info->queue; + + /* Convert "va" and "small" to "linux_buffer_t". */ + unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; + + __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); +} + + +/* + * Provide a linux buffer for LIPP. + */ +static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, + bool small) +{ + /* ISSUE: What should we use here? */ + unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; + + /* Round up to ensure to avoid "false sharing" with last cache line. */ + unsigned int buffer_size = + (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + + CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); + + /* + * ISSUE: Since CPAs are 38 bits, and we can only encode the + * high 31 bits in a "linux_buffer_t", the low 7 bits must be + * zero, and thus, we must align the actual "va" mod 128. + */ + const unsigned long align = 128; + + struct sk_buff *skb; + void *va; + + struct sk_buff **skb_ptr; + + /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */ + /* and also "reserves" that many bytes. */ + /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */ + int len = sizeof(*skb_ptr) + align + buffer_size; + + while (1) { + + /* Allocate (or fail). */ + skb = dev_alloc_skb(len); + if (skb == NULL) + return false; + + /* Make room for a back-pointer to 'skb'. */ + skb_reserve(skb, sizeof(*skb_ptr)); + + /* Make sure we are aligned. */ + skb_reserve(skb, -(long)skb->data & (align - 1)); + + /* This address is given to IPP. */ + va = skb->data; + + if (small) + break; + + /* ISSUE: This has never been observed! */ + /* Large buffers must not span a huge page. */ + if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0) + break; + pr_err("Leaking unaligned linux buffer at %p.\n", va); + } + + /* Skip two bytes to satisfy LIPP assumptions. */ + /* Note that this aligns IP on a 16 byte boundary. */ + /* ISSUE: Do this when the packet arrives? */ + skb_reserve(skb, NET_IP_ALIGN); + + /* Save a back-pointer to 'skb'. */ + skb_ptr = va - sizeof(*skb_ptr); + *skb_ptr = skb; + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(skb->data, buffer_size); + + /* Make sure "skb_ptr" has been flushed. */ + __insn_mf(); + +#ifdef TILE_NET_PARANOIA +#if CHIP_HAS_CBOX_HOME_MAP() + if (hash_default) { + HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); + if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) + panic("Non-coherent ingress buffer!"); + } +#endif +#endif + + /* Provide the new buffer. */ + tile_net_provide_linux_buffer(info, va, small); + + return true; +} + + +/* + * Provide linux buffers for LIPP. + */ +static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) +{ + while (info->num_needed_small_buffers != 0) { + if (!tile_net_provide_needed_buffer(info, true)) + goto oops; + info->num_needed_small_buffers--; + } + + while (info->num_needed_large_buffers != 0) { + if (!tile_net_provide_needed_buffer(info, false)) + goto oops; + info->num_needed_large_buffers--; + } + + return; + +oops: + + /* Add a description to the page allocation failure dump. */ + pr_notice("Could not provide a linux buffer to LIPP.\n"); +} + + +/* + * Grab some LEPP completions, and store them in "comps", of size + * "comps_size", and return the number of completions which were + * stored, so the caller can free them. + * + * If "pending" is not NULL, it will be set to true if there might + * still be some pending completions caused by this tile, else false. + */ +static unsigned int tile_net_lepp_grab_comps(struct net_device *dev, + struct sk_buff *comps[], + unsigned int comps_size, + bool *pending) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + lepp_queue_t *eq = priv->epp_queue; + + unsigned int n = 0; + + unsigned int comp_head; + unsigned int comp_busy; + unsigned int comp_tail; + + spin_lock(&priv->comp_lock); + + comp_head = eq->comp_head; + comp_busy = eq->comp_busy; + comp_tail = eq->comp_tail; + + while (comp_head != comp_busy && n < comps_size) { + comps[n++] = eq->comps[comp_head]; + LEPP_QINC(comp_head); + } + + if (pending != NULL) + *pending = (comp_head != comp_tail); + + eq->comp_head = comp_head; + + spin_unlock(&priv->comp_lock); + + return n; +} + + +/* + * Make sure the egress timer is scheduled. + * + * Note that we use "schedule if not scheduled" logic instead of the more + * obvious "reschedule" logic, because "reschedule" is fairly expensive. + */ +static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) +{ + if (!info->egress_timer_scheduled) { + mod_timer_pinned(&info->egress_timer, jiffies + 1); + info->egress_timer_scheduled = true; + } +} + + +/* + * The "function" for "info->egress_timer". + * + * This timer will reschedule itself as long as there are any pending + * completions expected (on behalf of any tile). + * + * ISSUE: Realistically, will the timer ever stop scheduling itself? + * + * ISSUE: This timer is almost never actually needed, so just use a global + * timer that can run on any tile. + * + * ISSUE: Maybe instead track number of expected completions, and free + * only that many, resetting to zero if "pending" is ever false. + */ +static void tile_net_handle_egress_timer(unsigned long arg) +{ + struct tile_net_cpu *info = (struct tile_net_cpu *)arg; + struct net_device *dev = info->napi.dev; + + struct sk_buff *olds[32]; + unsigned int wanted = 32; + unsigned int i, nolds = 0; + bool pending; + + /* The timer is no longer scheduled. */ + info->egress_timer_scheduled = false; + + nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending); + + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + + /* Reschedule timer if needed. */ + if (pending) + tile_net_schedule_egress_timer(info); +} + + +#ifdef IGNORE_DUP_ACKS + +/* + * Help detect "duplicate" ACKs. These are sequential packets (for a + * given flow) which are exactly 66 bytes long, sharing everything but + * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, + * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are + * +N, and the Tstamps are usually identical. + * + * NOTE: Apparently truly duplicate acks (with identical "ack" values), + * should not be collapsed, as they are used for some kind of flow control. + */ +static bool is_dup_ack(char *s1, char *s2, unsigned int len) +{ + int i; + + unsigned long long ignorable = 0; + + /* Identification. */ + ignorable |= (1ULL << 0x12); + ignorable |= (1ULL << 0x13); + + /* Header checksum. */ + ignorable |= (1ULL << 0x18); + ignorable |= (1ULL << 0x19); + + /* ACK. */ + ignorable |= (1ULL << 0x2a); + ignorable |= (1ULL << 0x2b); + ignorable |= (1ULL << 0x2c); + ignorable |= (1ULL << 0x2d); + + /* WinSize. */ + ignorable |= (1ULL << 0x30); + ignorable |= (1ULL << 0x31); + + /* Checksum. */ + ignorable |= (1ULL << 0x32); + ignorable |= (1ULL << 0x33); + + for (i = 0; i < len; i++, ignorable >>= 1) { + + if ((ignorable & 1) || (s1[i] == s2[i])) + continue; + +#ifdef TILE_NET_DEBUG + /* HACK: Mention non-timestamp diffs. */ + if (i < 0x38 && i != 0x2f && + net_ratelimit()) + pr_info("Diff at 0x%x\n", i); +#endif + + return false; + } + +#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS + /* HACK: Do not suppress truly duplicate ACKs. */ + /* ISSUE: Is this actually necessary or helpful? */ + if (s1[0x2a] == s2[0x2a] && + s1[0x2b] == s2[0x2b] && + s1[0x2c] == s2[0x2c] && + s1[0x2d] == s2[0x2d]) { + return false; + } +#endif + + return true; +} + +#endif + + + +/* + * Like "tile_net_handle_packets()", but just discard packets. + */ +static void tile_net_discard_packets(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + while (qup->__packet_receive_read != + qsp->__packet_receive_queue.__packet_write) { + + int index = qup->__packet_receive_read; + + int index2_aux = index + sizeof(netio_pkt_t); + int index2 = + ((index2_aux == + qsp->__packet_receive_queue.__last_packet_plus_one) ? + 0 : index2_aux); + + netio_pkt_t *pkt = (netio_pkt_t *) + ((unsigned long) &qsp[1] + index); + + /* Extract the "linux_buffer_t". */ + unsigned int buffer = pkt->__packet.word; + + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + kfree_skb(skb); + + /* Consume this packet. */ + qup->__packet_receive_read = index2; + } +} + + +/* + * Handle the next packet. Return true if "processed", false if "filtered". + */ +static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) +{ + struct net_device *dev = info->napi.dev; + + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + struct tile_net_stats_t *stats = &info->stats; + + int filter; + + int index2_aux = index + sizeof(netio_pkt_t); + int index2 = + ((index2_aux == + qsp->__packet_receive_queue.__last_packet_plus_one) ? + 0 : index2_aux); + + netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); + + netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); + + /* Extract the packet size. */ + unsigned long len = + (NETIO_PKT_CUSTOM_LENGTH(pkt) + + NET_IP_ALIGN - NETIO_PACKET_PADDING); + + /* Extract the "linux_buffer_t". */ + unsigned int buffer = pkt->__packet.word; + + /* Extract "small" (vs "large"). */ + bool small = ((buffer & 1) != 0); + + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Extract the packet data pointer. */ + /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ + unsigned char *buf = va + NET_IP_ALIGN; + +#ifdef IGNORE_DUP_ACKS + + static int other; + static int final; + static int keep; + static int skip; + +#endif + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(buf, len); + + /* ISSUE: Is this needed? */ + dev->last_rx = jiffies; + +#ifdef TILE_NET_DUMP_PACKETS + dump_packet(buf, len, "rx"); +#endif /* TILE_NET_DUMP_PACKETS */ + +#ifdef TILE_NET_VERIFY_INGRESS + if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && + NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { + /* + * FIXME: This complains about UDP packets + * with a "zero" checksum (bug 6624). + */ +#ifdef TILE_NET_PANIC_ON_BAD + dump_packet(buf, len, "rx"); + panic("Bad L4 checksum."); +#else + pr_warning("Bad L4 checksum on %d byte packet.\n", len); +#endif + } + if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && + NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { + dump_packet(buf, len, "rx"); + panic("Bad L3 checksum."); + } + switch (NETIO_PKT_STATUS_M(metadata, pkt)) { + case NETIO_PKT_STATUS_OVERSIZE: + if (len >= 64) { + dump_packet(buf, len, "rx"); + panic("Unexpected OVERSIZE."); + } + break; + case NETIO_PKT_STATUS_BAD: +#ifdef TILE_NET_PANIC_ON_BAD + dump_packet(buf, len, "rx"); + panic("Unexpected BAD packet."); +#else + pr_warning("Unexpected BAD %d byte packet.\n", len); +#endif + } +#endif + + filter = 0; + + if (!(dev->flags & IFF_UP)) { + /* Filter packets received before we're up. */ + filter = 1; + } else if (!(dev->flags & IFF_PROMISC)) { + /* + * FIXME: Implement HW multicast filter. + */ + if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) { + /* Filter packets not for our address. */ + const u8 *mine = dev->dev_addr; + filter = compare_ether_addr(mine, buf); + } + } + +#ifdef IGNORE_DUP_ACKS + + if (len != 66) { + /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */ + + other++; + + } else if (index2 == + qsp->__packet_receive_queue.__packet_write) { + + final++; + + } else { + + netio_pkt_t *pkt2 = (netio_pkt_t *) + ((unsigned long) &qsp[1] + index2); + + netio_pkt_metadata_t *metadata2 = + NETIO_PKT_METADATA(pkt2); + + /* Extract the packet size. */ + unsigned long len2 = + (NETIO_PKT_CUSTOM_LENGTH(pkt2) + + NET_IP_ALIGN - NETIO_PACKET_PADDING); + + if (len2 == 66 && + NETIO_PKT_FLOW_HASH_M(metadata, pkt) == + NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) { + + /* Extract the "linux_buffer_t". */ + unsigned int buffer2 = pkt2->__packet.word; + + /* Convert "linux_buffer_t" to "va". */ + void *va2 = + __va((phys_addr_t)(buffer2 >> 1) << 7); + + /* Extract the packet data pointer. */ + /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ + unsigned char *buf2 = va2 + NET_IP_ALIGN; + + /* Invalidate the packet buffer. */ + if (!hash_default) + __inv_buffer(buf2, len2); + + if (is_dup_ack(buf, buf2, len)) { + skip++; + filter = 1; + } else { + keep++; + } + } + } + + if (net_ratelimit()) + pr_info("Other %d Final %d Keep %d Skip %d.\n", + other, final, keep, skip); + +#endif + + if (filter) { + + /* ISSUE: Update "drop" statistics? */ + + tile_net_provide_linux_buffer(info, va, small); + + } else { + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + /* Paranoia. */ + if (skb->data != buf) + panic("Corrupt linux buffer from LIPP! " + "VA=%p, skb=%p, skb->data=%p\n", + va, skb, skb->data); + + /* Encode the actual packet length. */ + skb_put(skb, len); + + /* NOTE: This call also sets "skb->dev = dev". */ + skb->protocol = eth_type_trans(skb, dev); + + /* ISSUE: Discard corrupt packets? */ + /* ISSUE: Discard packets with bad checksums? */ + + /* Avoid recomputing TCP/UDP checksums. */ + if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netif_receive_skb(skb); + + stats->rx_packets++; + stats->rx_bytes += len; + + if (small) + info->num_needed_small_buffers++; + else + info->num_needed_large_buffers++; + } + + /* Return four credits after every fourth packet. */ + if (--qup->__receive_credit_remaining == 0) { + u32 interval = qup->__receive_credit_interval; + qup->__receive_credit_remaining = interval; + __netio_fastio_return_credits(qup->__fastio_index, interval); + } + + /* Consume this packet. */ + qup->__packet_receive_read = index2; + + return !filter; +} + + +/* + * Handle some packets for the given device on the current CPU. + * + * ISSUE: The "rotting packet" race condition occurs if a packet + * arrives after the queue appears to be empty, and before the + * hypervisor interrupt is re-enabled. + */ +static int tile_net_poll(struct napi_struct *napi, int budget) +{ + struct net_device *dev = napi->dev; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_netio_queue *queue = &info->queue; + netio_queue_impl_t *qsp = queue->__system_part; + netio_queue_user_impl_t *qup = &queue->__user_part; + + unsigned int work = 0; + + while (1) { + int index = qup->__packet_receive_read; + if (index == qsp->__packet_receive_queue.__packet_write) + break; + + if (tile_net_poll_aux(info, index)) { + if (++work >= budget) + goto done; + } + } + + napi_complete(&info->napi); + + /* Re-enable hypervisor interrupts. */ + enable_percpu_irq(priv->intr_id); + + /* HACK: Avoid the "rotting packet" problem. */ + if (qup->__packet_receive_read != + qsp->__packet_receive_queue.__packet_write) + napi_schedule(&info->napi); + + /* ISSUE: Handle completions? */ + +done: + + tile_net_provide_needed_buffers(info); + + return work; +} + + +/* + * Handle an ingress interrupt for the given device on the current cpu. + */ +static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable hypervisor interrupt. */ + disable_percpu_irq(priv->intr_id); + + napi_schedule(&info->napi); + + return IRQ_HANDLED; +} + + +/* + * One time initialization per interface. + */ +static int tile_net_open_aux(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + int ret; + int dummy; + unsigned int epp_lotar; + + /* + * Find out where EPP memory should be homed. + */ + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), + NETIO_EPP_SHM_OFF); + if (ret < 0) { + pr_err("could not read epp_shm_queue lotar.\n"); + return -EIO; + } + + /* + * Home the page on the EPP. + */ + { + int epp_home = hv_lotar_to_cpu(epp_lotar); + struct page *page = virt_to_page(priv->epp_queue); + homecache_change_page_home(page, 0, epp_home); + } + + /* + * Register the EPP shared memory queue. + */ + { + netio_ipp_address_t ea = { + .va = 0, + .pa = __pa(priv->epp_queue), + .pte = hv_pte(0), + .size = PAGE_SIZE, + }; + ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); + ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); + ret = hv_dev_pwrite(priv->hv_devhdl, 0, + (HV_VirtAddr)&ea, + sizeof(ea), + NETIO_EPP_SHM_OFF); + if (ret < 0) + return -EIO; + } + + /* + * Start LIPP/LEPP. + */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { + pr_warning("Failed to start LIPP/LEPP.\n"); + return -EIO; + } + + return 0; +} + + +/* + * Register with hypervisor on each CPU. + * + * Strangely, this function does important things even if it "fails", + * which is especially common if the link is not up yet. Hopefully + * these things are all "harmless" if done twice! + */ +static void tile_net_register(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info; + + struct tile_netio_queue *queue; + + /* Only network cpus can receive packets. */ + int queue_id = + cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; + + netio_input_config_t config = { + .flags = 0, + .num_receive_packets = priv->network_cpus_credits, + .queue_id = queue_id + }; + + int ret = 0; + netio_queue_impl_t *queuep; + + PDEBUG("tile_net_register(queue_id %d)\n", queue_id); + + if (!strcmp(dev->name, "xgbe0")) + info = &__get_cpu_var(hv_xgbe0); + else if (!strcmp(dev->name, "xgbe1")) + info = &__get_cpu_var(hv_xgbe1); + else if (!strcmp(dev->name, "gbe0")) + info = &__get_cpu_var(hv_gbe0); + else if (!strcmp(dev->name, "gbe1")) + info = &__get_cpu_var(hv_gbe1); + else + BUG(); + + /* Initialize the egress timer. */ + init_timer(&info->egress_timer); + info->egress_timer.data = (long)info; + info->egress_timer.function = tile_net_handle_egress_timer; + + priv->cpu[my_cpu] = info; + + /* + * Register ourselves with the IPP. + */ + ret = hv_dev_pwrite(priv->hv_devhdl, 0, + (HV_VirtAddr)&config, + sizeof(netio_input_config_t), + NETIO_IPP_INPUT_REGISTER_OFF); + PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", + ret); + if (ret < 0) { + printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF" + " failure %d\n", ret); + info->link_down = (ret == NETIO_LINK_DOWN); + return; + } + + /* + * Get the pointer to our queue's system part. + */ + + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&queuep, + sizeof(netio_queue_impl_t *), + NETIO_IPP_INPUT_REGISTER_OFF); + PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", + ret); + PDEBUG("queuep %p\n", queuep); + if (ret <= 0) { + /* ISSUE: Shouldn't this be a fatal error? */ + pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); + return; + } + + queue = &info->queue; + + queue->__system_part = queuep; + + memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); + + /* This is traditionally "config.num_receive_packets / 2". */ + queue->__user_part.__receive_credit_interval = 4; + queue->__user_part.__receive_credit_remaining = + queue->__user_part.__receive_credit_interval; + + /* + * Get a fastio index from the hypervisor. + * ISSUE: Shouldn't this check the result? + */ + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)&queue->__user_part.__fastio_index, + sizeof(queue->__user_part.__fastio_index), + NETIO_IPP_GET_FASTIO_OFF); + PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); + + netif_napi_add(dev, &info->napi, tile_net_poll, 64); + + /* Now we are registered. */ + info->registered = true; +} + + +/* + * Unregister with hypervisor on each CPU. + */ +static void tile_net_unregister(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + int ret = 0; + int dummy = 0; + + /* Do nothing if never registered. */ + if (info == NULL) + return; + + /* Do nothing if already unregistered. */ + if (!info->registered) + return; + + /* + * Unregister ourselves with LIPP. + */ + ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); + PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n", + ret); + if (ret < 0) { + /* FIXME: Just panic? */ + pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF" + " failure %d\n", ret); + } + + /* + * Discard all packets still in our NetIO queue. Hopefully, + * once the unregister call is complete, there will be no + * packets still in flight on the IDN. + */ + tile_net_discard_packets(dev); + + /* Reset state. */ + info->num_needed_small_buffers = 0; + info->num_needed_large_buffers = 0; + + /* Cancel egress timer. */ + del_timer(&info->egress_timer); + info->egress_timer_scheduled = false; + + netif_napi_del(&info->napi); + + /* Now we are unregistered. */ + info->registered = false; +} + + +/* + * Helper function for "tile_net_stop()". + * + * Also used to handle registration failure in "tile_net_open_inner()", + * when "fully_opened" is known to be false, and the various extra + * steps in "tile_net_stop()" are not necessary. ISSUE: It might be + * simpler if we could just call "tile_net_stop()" anyway. + */ +static void tile_net_stop_aux(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + int dummy = 0; + + /* Unregister all tiles, so LIPP will stop delivering packets. */ + on_each_cpu(tile_net_unregister, (void *)dev, 1); + + /* Stop LIPP/LEPP. */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) + panic("Failed to stop LIPP/LEPP!\n"); + + priv->partly_opened = 0; +} + + +/* + * Disable ingress interrupts for the given device on the current cpu. + */ +static void tile_net_disable_intr(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Disable hypervisor interrupt. */ + disable_percpu_irq(priv->intr_id); + + /* Disable NAPI if needed. */ + if (info != NULL && info->napi_enabled) { + napi_disable(&info->napi); + info->napi_enabled = false; + } +} + + +/* + * Enable ingress interrupts for the given device on the current cpu. + */ +static void tile_net_enable_intr(void *dev_ptr) +{ + struct net_device *dev = (struct net_device *)dev_ptr; + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + + /* Enable hypervisor interrupt. */ + enable_percpu_irq(priv->intr_id); + + /* Enable NAPI. */ + napi_enable(&info->napi); + info->napi_enabled = true; +} + + +/* + * tile_net_open_inner does most of the work of bringing up the interface. + * It's called from tile_net_open(), and also from tile_net_retry_open(). + * The return value is 0 if the interface was brought up, < 0 if + * tile_net_open() should return the return value as an error, and > 0 if + * tile_net_open() should return success and schedule a work item to + * periodically retry the bringup. + */ +static int tile_net_open_inner(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info; + struct tile_netio_queue *queue; + unsigned int irq; + int i; + + /* + * First try to register just on the local CPU, and handle any + * semi-expected "link down" failure specially. Note that we + * do NOT call "tile_net_stop_aux()", unlike below. + */ + tile_net_register(dev); + info = priv->cpu[my_cpu]; + if (!info->registered) { + if (info->link_down) + return 1; + return -EAGAIN; + } + + /* + * Now register everywhere else. If any registration fails, + * even for "link down" (which might not be possible), we + * clean up using "tile_net_stop_aux()". + */ + smp_call_function(tile_net_register, (void *)dev, 1); + for_each_online_cpu(i) { + if (!priv->cpu[i]->registered) { + tile_net_stop_aux(dev); + return -EAGAIN; + } + } + + queue = &info->queue; + + /* + * Set the device intr bit mask. + * The tile_net_register above sets per tile __intr_id. + */ + priv->intr_id = queue->__system_part->__intr_id; + BUG_ON(!priv->intr_id); + + /* + * Register the device interrupt handler. + * The __ffs() function returns the index into the interrupt handler + * table from the interrupt bit mask which should have one bit + * and one bit only set. + */ + irq = __ffs(priv->intr_id); + tile_irq_activate(irq, TILE_IRQ_PERCPU); + BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, + 0, dev->name, (void *)dev) != 0); + + /* ISSUE: How could "priv->fully_opened" ever be "true" here? */ + + if (!priv->fully_opened) { + + int dummy = 0; + + /* Allocate initial buffers. */ + + int max_buffers = + priv->network_cpus_count * priv->network_cpus_credits; + + info->num_needed_small_buffers = + min(LIPP_SMALL_BUFFERS, max_buffers); + + info->num_needed_large_buffers = + min(LIPP_LARGE_BUFFERS, max_buffers); + + tile_net_provide_needed_buffers(info); + + if (info->num_needed_small_buffers != 0 || + info->num_needed_large_buffers != 0) + panic("Insufficient memory for buffer stack!"); + + /* Start LIPP/LEPP and activate "ingress" at the shim. */ + if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, + sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) + panic("Failed to activate the LIPP Shim!\n"); + + priv->fully_opened = 1; + } + + /* On each tile, enable the hypervisor to trigger interrupts. */ + /* ISSUE: Do this before starting LIPP/LEPP? */ + on_each_cpu(tile_net_enable_intr, (void *)dev, 1); + + /* Start our transmit queue. */ + netif_start_queue(dev); + + return 0; +} + + +/* + * Called periodically to retry bringing up the NetIO interface, + * if it doesn't come up cleanly during tile_net_open(). + */ +static void tile_net_open_retry(struct work_struct *w) +{ + struct delayed_work *dw = + container_of(w, struct delayed_work, work); + + struct tile_net_priv *priv = + container_of(dw, struct tile_net_priv, retry_work); + + /* + * Try to bring the NetIO interface up. If it fails, reschedule + * ourselves to try again later; otherwise, tell Linux we now have + * a working link. ISSUE: What if the return value is negative? + */ + if (tile_net_open_inner(priv->dev)) + schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, + TILE_NET_RETRY_INTERVAL); + else + netif_carrier_on(priv->dev); +} + + +/* + * Called when a network interface is made active. + * + * Returns 0 on success, negative value on failure. + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + * + * If the actual link is not available yet, then we tell Linux that + * we have no carrier, and we keep checking until the link comes up. + */ +static int tile_net_open(struct net_device *dev) +{ + int ret = 0; + struct tile_net_priv *priv = netdev_priv(dev); + + /* + * We rely on priv->partly_opened to tell us if this is the + * first time this interface is being brought up. If it is + * set, the IPP was already initialized and should not be + * initialized again. + */ + if (!priv->partly_opened) { + + int count; + int credits; + + /* Initialize LIPP/LEPP, and start the Shim. */ + ret = tile_net_open_aux(dev); + if (ret < 0) { + pr_err("tile_net_open_aux failed: %d\n", ret); + return ret; + } + + /* Analyze the network cpus. */ + + if (network_cpus_used) + cpumask_copy(&priv->network_cpus_map, + &network_cpus_map); + else + cpumask_copy(&priv->network_cpus_map, cpu_online_mask); + + + count = cpumask_weight(&priv->network_cpus_map); + + /* Limit credits to available buffers, and apply min. */ + credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); + + /* Apply "GBE" max limit. */ + /* ISSUE: Use higher limit for XGBE? */ + credits = min(NETIO_MAX_RECEIVE_PKTS, credits); + + priv->network_cpus_count = count; + priv->network_cpus_credits = credits; + +#ifdef TILE_NET_DEBUG + pr_info("Using %d network cpus, with %d credits each\n", + priv->network_cpus_count, priv->network_cpus_credits); +#endif + + priv->partly_opened = 1; + } + + /* + * Attempt to bring up the link. + */ + ret = tile_net_open_inner(dev); + if (ret <= 0) { + if (ret == 0) + netif_carrier_on(dev); + return ret; + } + + /* + * We were unable to bring up the NetIO interface, but we want to + * try again in a little bit. Tell Linux that we have no carrier + * so it doesn't try to use the interface before the link comes up + * and then remember to try again later. + */ + netif_carrier_off(dev); + schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, + TILE_NET_RETRY_INTERVAL); + + return 0; +} + + +/* + * Disables a network interface. + * + * Returns 0, this is not allowed to fail. + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + * + * ISSUE: Can this can be called while "tile_net_poll()" is running? + */ +static int tile_net_stop(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + bool pending = true; + + PDEBUG("tile_net_stop()\n"); + + /* ISSUE: Only needed if not yet fully open. */ + cancel_delayed_work_sync(&priv->retry_work); + + /* Can't transmit any more. */ + netif_stop_queue(dev); + + /* + * Disable hypervisor interrupts on each tile. + */ + on_each_cpu(tile_net_disable_intr, (void *)dev, 1); + + /* + * Unregister the interrupt handler. + * The __ffs() function returns the index into the interrupt handler + * table from the interrupt bit mask which should have one bit + * and one bit only set. + */ + if (priv->intr_id) + free_irq(__ffs(priv->intr_id), dev); + + /* + * Drain all the LIPP buffers. + */ + + while (true) { + int buffer; + + /* NOTE: This should never fail. */ + if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, + sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) + break; + + /* Stop when done. */ + if (buffer == 0) + break; + + { + /* Convert "linux_buffer_t" to "va". */ + void *va = __va((phys_addr_t)(buffer >> 1) << 7); + + /* Acquire the associated "skb". */ + struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); + struct sk_buff *skb = *skb_ptr; + + kfree_skb(skb); + } + } + + /* Stop LIPP/LEPP. */ + tile_net_stop_aux(dev); + + + priv->fully_opened = 0; + + + /* + * XXX: ISSUE: It appears that, in practice anyway, by the + * time we get here, there are no pending completions. + */ + while (pending) { + + struct sk_buff *olds[32]; + unsigned int wanted = 32; + unsigned int i, nolds = 0; + + nolds = tile_net_lepp_grab_comps(dev, olds, + wanted, &pending); + + /* ISSUE: We have never actually seen this debug spew. */ + if (nolds != 0) + pr_info("During tile_net_stop(), grabbed %d comps.\n", + nolds); + + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + } + + + /* Wipe the EPP queue. */ + memset(priv->epp_queue, 0, sizeof(lepp_queue_t)); + + /* Evict the EPP queue. */ + finv_buffer(priv->epp_queue, PAGE_SIZE); + + return 0; +} + + +/* + * Prepare the "frags" info for the resulting LEPP command. + * + * If needed, flush the memory used by the frags. + */ +static unsigned int tile_net_tx_frags(lepp_frag_t *frags, + struct sk_buff *skb, + void *b_data, unsigned int b_len) +{ + unsigned int i, n = 0; + + struct skb_shared_info *sh = skb_shinfo(skb); + + phys_addr_t cpa; + + if (b_len != 0) { + + if (!hash_default) + finv_buffer_remote(b_data, b_len); + + cpa = __pa(b_data); + frags[n].cpa_lo = cpa; + frags[n].cpa_hi = cpa >> 32; + frags[n].length = b_len; + frags[n].hash_for_home = hash_default; + n++; + } + + for (i = 0; i < sh->nr_frags; i++) { + + skb_frag_t *f = &sh->frags[i]; + unsigned long pfn = page_to_pfn(f->page); + + /* FIXME: Compute "hash_for_home" properly. */ + /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ + int hash_for_home = hash_default; + + /* FIXME: Hmmm. */ + if (!hash_default) { + void *va = pfn_to_kaddr(pfn) + f->page_offset; + BUG_ON(PageHighMem(f->page)); + finv_buffer_remote(va, f->size); + } + + cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; + frags[n].cpa_lo = cpa; + frags[n].cpa_hi = cpa >> 32; + frags[n].length = f->size; + frags[n].hash_for_home = hash_for_home; + n++; + } + + return n; +} + + +/* + * This function takes "skb", consisting of a header template and a + * payload, and hands it to LEPP, to emit as one or more segments, + * each consisting of a possibly modified header, plus a piece of the + * payload, via a process known as "tcp segmentation offload". + * + * Usually, "data" will contain the header template, of size "sh_len", + * and "sh->frags" will contain "skb->data_len" bytes of payload, and + * there will be "sh->gso_segs" segments. + * + * Sometimes, if "sendfile()" requires copying, we will be called with + * "data" containing the header and payload, with "frags" being empty. + * + * In theory, "sh->nr_frags" could be 3, but in practice, it seems + * that this will never actually happen. + * + * See "emulate_large_send_offload()" for some reference code, which + * does not handle checksumming. + * + * ISSUE: How do we make sure that high memory DMA does not migrate? + */ +static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_net_stats_t *stats = &info->stats; + + struct skb_shared_info *sh = skb_shinfo(skb); + + unsigned char *data = skb->data; + + /* The ip header follows the ethernet header. */ + struct iphdr *ih = ip_hdr(skb); + unsigned int ih_len = ih->ihl * 4; + + /* Note that "nh == ih", by definition. */ + unsigned char *nh = skb_network_header(skb); + unsigned int eh_len = nh - data; + + /* The tcp header follows the ip header. */ + struct tcphdr *th = (struct tcphdr *)(nh + ih_len); + unsigned int th_len = th->doff * 4; + + /* The total number of header bytes. */ + /* NOTE: This may be less than skb_headlen(skb). */ + unsigned int sh_len = eh_len + ih_len + th_len; + + /* The number of payload bytes at "skb->data + sh_len". */ + /* This is non-zero for sendfile() without HIGHDMA. */ + unsigned int b_len = skb_headlen(skb) - sh_len; + + /* The total number of payload bytes. */ + unsigned int d_len = b_len + skb->data_len; + + /* The maximum payload size. */ + unsigned int p_len = sh->gso_size; + + /* The total number of segments. */ + unsigned int num_segs = sh->gso_segs; + + /* The temporary copy of the command. */ + u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; + lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; + + /* Analyze the "frags". */ + unsigned int num_frags = + tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); + + /* The size of the command, including frags and header. */ + size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); + + /* The command header. */ + lepp_tso_cmd_t cmd_init = { + .tso = true, + .header_size = sh_len, + .ip_offset = eh_len, + .tcp_offset = eh_len + ih_len, + .payload_size = p_len, + .num_frags = num_frags, + }; + + unsigned long irqflags; + + lepp_queue_t *eq = priv->epp_queue; + + struct sk_buff *olds[4]; + unsigned int wanted = 4; + unsigned int i, nolds = 0; + + unsigned int cmd_head, cmd_tail, cmd_next; + unsigned int comp_tail; + + unsigned int free_slots; + + + /* Paranoia. */ + BUG_ON(skb->protocol != htons(ETH_P_IP)); + BUG_ON(ih->protocol != IPPROTO_TCP); + BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); + BUG_ON(num_frags > LEPP_MAX_FRAGS); + /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ + BUG_ON(num_segs <= 1); + + + /* Finish preparing the command. */ + + /* Copy the command header. */ + *cmd = cmd_init; + + /* Copy the "header". */ + memcpy(&cmd->frags[num_frags], data, sh_len); + + + /* Prefetch and wait, to minimize time spent holding the spinlock. */ + prefetch_L1(&eq->comp_tail); + prefetch_L1(&eq->cmd_tail); + mb(); + + + /* Enqueue the command. */ + + spin_lock_irqsave(&priv->cmd_lock, irqflags); + + /* + * Handle completions if needed to make room. + * HACK: Spin until there is sufficient room. + */ + free_slots = lepp_num_free_comp_slots(eq); + if (free_slots < 1) { +spin: + nolds += tile_net_lepp_grab_comps(dev, olds + nolds, + wanted - nolds, NULL); + if (lepp_num_free_comp_slots(eq) < 1) + goto spin; + } + + cmd_head = eq->cmd_head; + cmd_tail = eq->cmd_tail; + + /* NOTE: The "gotos" below are untested. */ + + /* Prepare to advance, detecting full queue. */ + cmd_next = cmd_tail + cmd_size; + if (cmd_tail < cmd_head && cmd_next >= cmd_head) + goto spin; + if (cmd_next > LEPP_CMD_LIMIT) { + cmd_next = 0; + if (cmd_next == cmd_head) + goto spin; + } + + /* Copy the command. */ + memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); + + /* Advance. */ + cmd_tail = cmd_next; + + /* Record "skb" for eventual freeing. */ + comp_tail = eq->comp_tail; + eq->comps[comp_tail] = skb; + LEPP_QINC(comp_tail); + eq->comp_tail = comp_tail; + + /* Flush before allowing LEPP to handle the command. */ + __insn_mf(); + + eq->cmd_tail = cmd_tail; + + spin_unlock_irqrestore(&priv->cmd_lock, irqflags); + + if (nolds == 0) + nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); + + /* Handle completions. */ + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + + /* Update stats. */ + stats->tx_packets += num_segs; + stats->tx_bytes += (num_segs * sh_len) + d_len; + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(info); + + return NETDEV_TX_OK; +} + + +/* + * Transmit a packet (called by the kernel via "hard_start_xmit" hook). + */ +static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + int my_cpu = smp_processor_id(); + struct tile_net_cpu *info = priv->cpu[my_cpu]; + struct tile_net_stats_t *stats = &info->stats; + + unsigned long irqflags; + + struct skb_shared_info *sh = skb_shinfo(skb); + + unsigned int len = skb->len; + unsigned char *data = skb->data; + + unsigned int csum_start = skb->csum_start - skb_headroom(skb); + + lepp_frag_t frags[LEPP_MAX_FRAGS]; + + unsigned int num_frags; + + lepp_queue_t *eq = priv->epp_queue; + + struct sk_buff *olds[4]; + unsigned int wanted = 4; + unsigned int i, nolds = 0; + + unsigned int cmd_size = sizeof(lepp_cmd_t); + + unsigned int cmd_head, cmd_tail, cmd_next; + unsigned int comp_tail; + + lepp_cmd_t cmds[LEPP_MAX_FRAGS]; + + unsigned int free_slots; + + + /* + * This is paranoia, since we think that if the link doesn't come + * up, telling Linux we have no carrier will keep it from trying + * to transmit. If it does, though, we can't execute this routine, + * since data structures we depend on aren't set up yet. + */ + if (!info->registered) + return NETDEV_TX_BUSY; + + + /* Save the timestamp. */ + dev->trans_start = jiffies; + + +#ifdef TILE_NET_PARANOIA +#if CHIP_HAS_CBOX_HOME_MAP() + if (hash_default) { + HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); + if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) + panic("Non-coherent egress buffer!"); + } +#endif +#endif + + +#ifdef TILE_NET_DUMP_PACKETS + /* ISSUE: Does not dump the "frags". */ + dump_packet(data, skb_headlen(skb), "tx"); +#endif /* TILE_NET_DUMP_PACKETS */ + + + if (sh->gso_size != 0) + return tile_net_tx_tso(skb, dev); + + + /* Prepare the commands. */ + + num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); + + for (i = 0; i < num_frags; i++) { + + bool final = (i == num_frags - 1); + + lepp_cmd_t cmd = { + .cpa_lo = frags[i].cpa_lo, + .cpa_hi = frags[i].cpa_hi, + .length = frags[i].length, + .hash_for_home = frags[i].hash_for_home, + .send_completion = final, + .end_of_packet = final + }; + + if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { + cmd.compute_checksum = 1; + cmd.checksum_data.bits.start_byte = csum_start; + cmd.checksum_data.bits.count = len - csum_start; + cmd.checksum_data.bits.destination_byte = + csum_start + skb->csum_offset; + } + + cmds[i] = cmd; + } + + + /* Prefetch and wait, to minimize time spent holding the spinlock. */ + prefetch_L1(&eq->comp_tail); + prefetch_L1(&eq->cmd_tail); + mb(); + + + /* Enqueue the commands. */ + + spin_lock_irqsave(&priv->cmd_lock, irqflags); + + /* + * Handle completions if needed to make room. + * HACK: Spin until there is sufficient room. + */ + free_slots = lepp_num_free_comp_slots(eq); + if (free_slots < 1) { +spin: + nolds += tile_net_lepp_grab_comps(dev, olds + nolds, + wanted - nolds, NULL); + if (lepp_num_free_comp_slots(eq) < 1) + goto spin; + } + + cmd_head = eq->cmd_head; + cmd_tail = eq->cmd_tail; + + /* NOTE: The "gotos" below are untested. */ + + /* Copy the commands, or fail. */ + for (i = 0; i < num_frags; i++) { + + /* Prepare to advance, detecting full queue. */ + cmd_next = cmd_tail + cmd_size; + if (cmd_tail < cmd_head && cmd_next >= cmd_head) + goto spin; + if (cmd_next > LEPP_CMD_LIMIT) { + cmd_next = 0; + if (cmd_next == cmd_head) + goto spin; + } + + /* Copy the command. */ + *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; + + /* Advance. */ + cmd_tail = cmd_next; + } + + /* Record "skb" for eventual freeing. */ + comp_tail = eq->comp_tail; + eq->comps[comp_tail] = skb; + LEPP_QINC(comp_tail); + eq->comp_tail = comp_tail; + + /* Flush before allowing LEPP to handle the command. */ + __insn_mf(); + + eq->cmd_tail = cmd_tail; + + spin_unlock_irqrestore(&priv->cmd_lock, irqflags); + + if (nolds == 0) + nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); + + /* Handle completions. */ + for (i = 0; i < nolds; i++) + kfree_skb(olds[i]); + + /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ + stats->tx_packets++; + stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); + + /* Make sure the egress timer is scheduled. */ + tile_net_schedule_egress_timer(info); + + return NETDEV_TX_OK; +} + + +/* + * Deal with a transmit timeout. + */ +static void tile_net_tx_timeout(struct net_device *dev) +{ + PDEBUG("tile_net_tx_timeout()\n"); + PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, + jiffies - dev->trans_start); + + /* XXX: ISSUE: This doesn't seem useful for us. */ + netif_wake_queue(dev); +} + + +/* + * Ioctl commands. + */ +static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + return -EOPNOTSUPP; +} + + +/* + * Get System Network Statistics. + * + * Returns the address of the device statistics structure. + */ +static struct net_device_stats *tile_net_get_stats(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + u32 rx_packets = 0; + u32 tx_packets = 0; + u32 rx_bytes = 0; + u32 tx_bytes = 0; + int i; + + for_each_online_cpu(i) { + if (priv->cpu[i]) { + rx_packets += priv->cpu[i]->stats.rx_packets; + rx_bytes += priv->cpu[i]->stats.rx_bytes; + tx_packets += priv->cpu[i]->stats.tx_packets; + tx_bytes += priv->cpu[i]->stats.tx_bytes; + } + } + + priv->stats.rx_packets = rx_packets; + priv->stats.rx_bytes = rx_bytes; + priv->stats.tx_packets = tx_packets; + priv->stats.tx_bytes = tx_bytes; + + return &priv->stats; +} + + +/* + * Change the "mtu". + * + * The "change_mtu" method is usually not needed. + * If you need it, it must be like this. + */ +static int tile_net_change_mtu(struct net_device *dev, int new_mtu) +{ + PDEBUG("tile_net_change_mtu()\n"); + + /* Check ranges. */ + if ((new_mtu < 68) || (new_mtu > 1500)) + return -EINVAL; + + /* Accept the value. */ + dev->mtu = new_mtu; + + return 0; +} + + +/* + * Change the Ethernet Address of the NIC. + * + * The hypervisor driver does not support changing MAC address. However, + * the IPP does not do anything with the MAC address, so the address which + * gets used on outgoing packets, and which is accepted on incoming packets, + * is completely up to the NetIO program or kernel driver which is actually + * handling them. + * + * Returns 0 on success, negative on failure. + */ +static int tile_net_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + /* ISSUE: Note that "dev_addr" is now a pointer. */ + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + return 0; +} + + +/* + * Obtain the MAC address from the hypervisor. + * This must be done before opening the device. + */ +static int tile_net_get_mac(struct net_device *dev) +{ + struct tile_net_priv *priv = netdev_priv(dev); + + char hv_dev_name[32]; + int len; + + __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; + + int ret; + + /* For example, "xgbe0". */ + strcpy(hv_dev_name, dev->name); + len = strlen(hv_dev_name); + + /* For example, "xgbe/0". */ + hv_dev_name[len] = hv_dev_name[len - 1]; + hv_dev_name[len - 1] = '/'; + len++; + + /* For example, "xgbe/0/native_hash". */ + strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); + + /* Get the hypervisor handle for this device. */ + priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); + PDEBUG("hv_dev_open(%s) returned %d %p\n", + hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); + if (priv->hv_devhdl < 0) { + if (priv->hv_devhdl == HV_ENODEV) + printk(KERN_DEBUG "Ignoring unconfigured device %s\n", + hv_dev_name); + else + printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", + hv_dev_name, priv->hv_devhdl); + return -1; + } + + /* + * Read the hardware address from the hypervisor. + * ISSUE: Note that "dev_addr" is now a pointer. + */ + offset.bits.class = NETIO_PARAM; + offset.bits.addr = NETIO_PARAM_MAC; + ret = hv_dev_pread(priv->hv_devhdl, 0, + (HV_VirtAddr)dev->dev_addr, dev->addr_len, + offset.word); + PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); + if (ret <= 0) { + printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", + dev->name); + /* + * Since the device is configured by the hypervisor but we + * can't get its MAC address, we are most likely running + * the simulator, so let's generate a random MAC address. + */ + random_ether_addr(dev->dev_addr); + } + + return 0; +} + + +static struct net_device_ops tile_net_ops = { + .ndo_open = tile_net_open, + .ndo_stop = tile_net_stop, + .ndo_start_xmit = tile_net_tx, + .ndo_do_ioctl = tile_net_ioctl, + .ndo_get_stats = tile_net_get_stats, + .ndo_change_mtu = tile_net_change_mtu, + .ndo_tx_timeout = tile_net_tx_timeout, + .ndo_set_mac_address = tile_net_set_mac_address +}; + + +/* + * The setup function. + * + * This uses ether_setup() to assign various fields in dev, including + * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. + */ +static void tile_net_setup(struct net_device *dev) +{ + PDEBUG("tile_net_setup()\n"); + + ether_setup(dev); + + dev->netdev_ops = &tile_net_ops; + + dev->watchdog_timeo = TILE_NET_TIMEOUT; + + /* We want lockless xmit. */ + dev->features |= NETIF_F_LLTX; + + /* We support hardware tx checksums. */ + dev->features |= NETIF_F_HW_CSUM; + + /* We support scatter/gather. */ + dev->features |= NETIF_F_SG; + + /* We support TSO. */ + dev->features |= NETIF_F_TSO; + +#ifdef TILE_NET_GSO + /* We support GSO. */ + dev->features |= NETIF_F_GSO; +#endif + + if (hash_default) + dev->features |= NETIF_F_HIGHDMA; + + /* ISSUE: We should support NETIF_F_UFO. */ + + dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; + + dev->mtu = TILE_NET_MTU; +} + + +/* + * Allocate the device structure, register the device, and obtain the + * MAC address from the hypervisor. + */ +static struct net_device *tile_net_dev_init(const char *name) +{ + int ret; + struct net_device *dev; + struct tile_net_priv *priv; + struct page *page; + + /* + * Allocate the device structure. This allocates "priv", calls + * tile_net_setup(), and saves "name". Normally, "name" is a + * template, instantiated by register_netdev(), but not for us. + */ + dev = alloc_netdev(sizeof(*priv), name, tile_net_setup); + if (!dev) { + pr_err("alloc_netdev(%s) failed\n", name); + return NULL; + } + + priv = netdev_priv(dev); + + /* Initialize "priv". */ + + memset(priv, 0, sizeof(*priv)); + + /* Save "dev" for "tile_net_open_retry()". */ + priv->dev = dev; + + INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); + + spin_lock_init(&priv->cmd_lock); + spin_lock_init(&priv->comp_lock); + + /* Allocate "epp_queue". */ + BUG_ON(get_order(sizeof(lepp_queue_t)) != 0); + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); + if (!page) { + free_netdev(dev); + return NULL; + } + priv->epp_queue = page_address(page); + + /* Register the network device. */ + ret = register_netdev(dev); + if (ret) { + pr_err("register_netdev %s failed %d\n", dev->name, ret); + free_page((unsigned long)priv->epp_queue); + free_netdev(dev); + return NULL; + } + + /* Get the MAC address. */ + ret = tile_net_get_mac(dev); + if (ret < 0) { + unregister_netdev(dev); + free_page((unsigned long)priv->epp_queue); + free_netdev(dev); + return NULL; + } + + return dev; +} + + +/* + * Module cleanup. + */ +static void tile_net_cleanup(void) +{ + int i; + + for (i = 0; i < TILE_NET_DEVS; i++) { + if (tile_net_devs[i]) { + struct net_device *dev = tile_net_devs[i]; + struct tile_net_priv *priv = netdev_priv(dev); + unregister_netdev(dev); + finv_buffer(priv->epp_queue, PAGE_SIZE); + free_page((unsigned long)priv->epp_queue); + free_netdev(dev); + } + } +} + + +/* + * Module initialization. + */ +static int tile_net_init_module(void) +{ + pr_info("Tilera IPP Net Driver\n"); + + tile_net_devs[0] = tile_net_dev_init("xgbe0"); + tile_net_devs[1] = tile_net_dev_init("xgbe1"); + tile_net_devs[2] = tile_net_dev_init("gbe0"); + tile_net_devs[3] = tile_net_dev_init("gbe1"); + + return 0; +} + + +#ifndef MODULE +/* + * The "network_cpus" boot argument specifies the cpus that are dedicated + * to handle ingress packets. + * + * The parameter should be in the form "network_cpus=m-n[,x-y]", where + * m, n, x, y are integer numbers that represent the cpus that can be + * neither a dedicated cpu nor a dataplane cpu. + */ +static int __init network_cpus_setup(char *str) +{ + int rc = cpulist_parse_crop(str, &network_cpus_map); + if (rc != 0) { + pr_warning("network_cpus=%s: malformed cpu list\n", + str); + } else { + + /* Remove dedicated cpus. */ + cpumask_and(&network_cpus_map, &network_cpus_map, + cpu_possible_mask); + + + if (cpumask_empty(&network_cpus_map)) { + pr_warning("Ignoring network_cpus='%s'.\n", + str); + } else { + char buf[1024]; + cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); + pr_info("Linux network CPUs: %s\n", buf); + network_cpus_used = true; + } + } + + return 0; +} +__setup("network_cpus=", network_cpus_setup); +#endif + + +module_init(tile_net_init_module); +module_exit(tile_net_cleanup); diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c index 28e1ffb13db..c78a50586c1 100644 --- a/drivers/net/tulip/de2104x.c +++ b/drivers/net/tulip/de2104x.c @@ -2021,7 +2021,6 @@ static int __devinit de_init_one (struct pci_dev *pdev, de->media_timer.data = (unsigned long) de; netif_carrier_off(dev); - netif_stop_queue(dev); /* wake up device, assign resources */ rc = pci_enable_device(pdev); diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c index a9f7d5d1a26..7064e035757 100644 --- a/drivers/net/tulip/dmfe.c +++ b/drivers/net/tulip/dmfe.c @@ -688,9 +688,6 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, DMFE_DBUG(0, "dmfe_start_xmit", 0); - /* Resource flag check */ - netif_stop_queue(dev); - /* Too large packet check */ if (skb->len > MAX_PACKET_SIZE) { pr_err("big packet = %d\n", (u16)skb->len); @@ -698,6 +695,9 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } + /* Resource flag check */ + netif_stop_queue(dev); + spin_lock_irqsave(&db->lock, flags); /* No Tx resource check, it never happen nromally */ diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index f7e370fd8dd..73a3e0d9323 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c @@ -2051,12 +2051,16 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) ugeth_vdbg("%s: IN", __func__); + /* + * Tell the kernel the link is down. + * Must be done before disabling the controller + * or deadlock may happen. + */ + phy_stop(phydev); + /* Disable the controller */ ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); - /* Tell the kernel the link is down */ - phy_stop(phydev); - /* Mask all interrupts */ out_be32(ugeth->uccf->p_uccm, 0x00000000); @@ -2066,9 +2070,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) /* Disable Rx and Tx */ clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); - phy_disconnect(ugeth->phydev); - ugeth->phydev = NULL; - ucc_geth_memclean(ugeth); } @@ -3551,7 +3552,10 @@ static int ucc_geth_close(struct net_device *dev) napi_disable(&ugeth->napi); + cancel_work_sync(&ugeth->timeout_work); ucc_geth_stop(ugeth); + phy_disconnect(ugeth->phydev); + ugeth->phydev = NULL; free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); @@ -3580,8 +3584,12 @@ static void ucc_geth_timeout_work(struct work_struct *work) * Must reset MAC *and* PHY. This is done by reopening * the device. */ - ucc_geth_close(dev); - ucc_geth_open(dev); + netif_tx_stop_all_queues(dev); + ucc_geth_stop(ugeth); + ucc_geth_init_mac(ugeth); + /* Must start PHY here */ + phy_start(ugeth->phydev); + netif_tx_start_all_queues(dev); } netif_tx_schedule_all(dev); @@ -3595,7 +3603,6 @@ static void ucc_geth_timeout(struct net_device *dev) { struct ucc_geth_private *ugeth = netdev_priv(dev); - netif_carrier_off(dev); schedule_work(&ugeth->timeout_work); } diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index 05a95586f3c..055b87ab4f0 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h @@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics { #define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size */ #define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ -#define UCC_GETH_UTFTT_INIT 512 +#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs + due to errata */ /* Gigabit Ethernet (1000 Mbps) */ #define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual FIFO size */ diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index b154a94de03..812edf85d6d 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -958,10 +958,6 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt, /* Packet is complete. Inject into stack. */ /* We have IP packet here */ odev->skb_rx_buf->protocol = cpu_to_be16(ETH_P_IP); - /* don't check it */ - odev->skb_rx_buf->ip_summed = - CHECKSUM_UNNECESSARY; - skb_reset_mac_header(odev->skb_rx_buf); /* Ship it off to the kernel */ @@ -2994,12 +2990,14 @@ static int hso_probe(struct usb_interface *interface, case HSO_INTF_BULK: /* It's a regular bulk interface */ - if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) && - !disable_net) - hso_dev = hso_create_net_device(interface, port_spec); - else + if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) { + if (!disable_net) + hso_dev = + hso_create_net_device(interface, port_spec); + } else { hso_dev = hso_create_bulk_serial_device(interface, port_spec); + } if (!hso_dev) goto exit; break; diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ca7fc9df1cc..c04d49e31f8 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -45,6 +45,7 @@ #include <linux/usb/usbnet.h> #include <linux/slab.h> #include <linux/kernel.h> +#include <linux/pm_runtime.h> #define DRIVER_VERSION "22-Aug-2005" @@ -1273,6 +1274,16 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) struct usb_device *xdev; int status; const char *name; + struct usb_driver *driver = to_usb_driver(udev->dev.driver); + + /* usbnet already took usb runtime pm, so have to enable the feature + * for usb interface, otherwise usb_autopm_get_interface may return + * failure if USB_SUSPEND(RUNTIME_PM) is enabled. + */ + if (!driver->supports_autosuspend) { + driver->supports_autosuspend = 1; + pm_runtime_enable(&udev->dev); + } name = udev->dev.driver->name; info = (struct driver_info *) prod->driver_info; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index bb6b67f6b0c..b6d402806ae 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -986,9 +986,15 @@ static int virtnet_probe(struct virtio_device *vdev) goto unregister; } - vi->status = VIRTIO_NET_S_LINK_UP; - virtnet_update_status(vi); - netif_carrier_on(dev); + /* Assume link up if device can't report link status, + otherwise get link status from config. */ + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { + netif_carrier_off(dev); + virtnet_update_status(vi); + } else { + vi->status = VIRTIO_NET_S_LINK_UP; + netif_carrier_on(dev); + } pr_debug("virtnet: registered device %s\n", dev->name); return 0; diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c index ea476cbd38b..e305274f83f 100644 --- a/drivers/net/wan/hd64572.c +++ b/drivers/net/wan/hd64572.c @@ -293,6 +293,7 @@ static inline void sca_tx_done(port_t *port) struct net_device *dev = port->netdev; card_t* card = port->card; u8 stat; + unsigned count = 0; spin_lock(&port->lock); @@ -316,10 +317,12 @@ static inline void sca_tx_done(port_t *port) dev->stats.tx_bytes += readw(&desc->len); } writeb(0, &desc->stat); /* Free descriptor */ + count++; port->txlast = (port->txlast + 1) % card->tx_ring_buffers; } - netif_wake_queue(dev); + if (count) + netif_wake_queue(dev); spin_unlock(&port->lock); } diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index d81ad839788..24297b274cd 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -498,7 +498,6 @@ norbuff: static int x25_asy_close(struct net_device *dev) { struct x25_asy *sl = netdev_priv(dev); - int err; spin_lock(&sl->lock); if (sl->tty) @@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev) netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; - err = lapb_unregister(dev); - if (err != LAPB_OK) - printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n", - err); spin_unlock(&sl->lock); return 0; } @@ -582,7 +577,7 @@ static int x25_asy_open_tty(struct tty_struct *tty) if (err) return err; /* Done. We have linked the TTY line to a channel. */ - return sl->dev->base_addr; + return 0; } @@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty) static void x25_asy_close_tty(struct tty_struct *tty) { struct x25_asy *sl = tty->disc_data; + int err; /* First make sure we're connected. */ if (!sl || sl->magic != X25_ASY_MAGIC) @@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty) dev_close(sl->dev); rtnl_unlock(); + err = lapb_unregister(sl->dev); + if (err != LAPB_OK) + printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n", + err); + tty->disc_data = NULL; sl->tty = NULL; x25_asy_free(sl); diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 8251946842e..42ed923cdb1 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -1917,7 +1917,8 @@ ath5k_beacon_send(struct ath5k_softc *sc) sc->bmisscount = 0; } - if (sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) { + if ((sc->opmode == NL80211_IFTYPE_AP && sc->num_ap_vifs > 1) || + sc->opmode == NL80211_IFTYPE_MESH_POINT) { u64 tsf = ath5k_hw_get_tsf64(ah); u32 tsftu = TSF_TO_TU(tsf); int slot = ((tsftu % sc->bintval) * ATH_BCBUF) / sc->bintval; @@ -1949,8 +1950,9 @@ ath5k_beacon_send(struct ath5k_softc *sc) /* NB: hw still stops DMA, so proceed */ } - /* refresh the beacon for AP mode */ - if (sc->opmode == NL80211_IFTYPE_AP) + /* refresh the beacon for AP or MESH mode */ + if (sc->opmode == NL80211_IFTYPE_AP || + sc->opmode == NL80211_IFTYPE_MESH_POINT) ath5k_beacon_update(sc->hw, vif); ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); @@ -2851,7 +2853,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw, /* Assign the vap/adhoc to a beacon xmit slot. */ if ((avf->opmode == NL80211_IFTYPE_AP) || - (avf->opmode == NL80211_IFTYPE_ADHOC)) { + (avf->opmode == NL80211_IFTYPE_ADHOC) || + (avf->opmode == NL80211_IFTYPE_MESH_POINT)) { int slot; WARN_ON(list_empty(&sc->bcbuf)); @@ -2870,7 +2873,7 @@ static int ath5k_add_interface(struct ieee80211_hw *hw, sc->bslot[avf->bslot] = vif; if (avf->opmode == NL80211_IFTYPE_AP) sc->num_ap_vifs++; - else + else if (avf->opmode == NL80211_IFTYPE_ADHOC) sc->num_adhoc_vifs++; } diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index a0471f2e1c7..48261b7252d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -410,6 +410,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah, val &= ~(AR_WA_BIT6 | AR_WA_BIT7); } + if (AR_SREV_9280(ah)) + val |= AR_WA_BIT22; + if (AR_SREV_9285E_20(ah)) val |= AR_WA_BIT23; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index c4182359bee..a7b82f0085d 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -55,6 +55,8 @@ #define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ #define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ +#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6)) + static const struct ar9300_eeprom ar9300_default = { .eepromVersion = 2, .templateVersion = 2, @@ -290,20 +292,21 @@ static const struct ar9300_eeprom ar9300_default = { } }, .ctlPowerData_2G = { - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, - { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, + { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } }, - { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } }, - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, + { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, - { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } }, - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, - { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } }, - { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, + { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } }, }, .modalHeader5G = { /* 4 idle,t1,t2,b (4 bits per setting) */ @@ -568,56 +571,56 @@ static const struct ar9300_eeprom ar9300_default = { .ctlPowerData_5G = { { { - {60, 1}, {60, 1}, {60, 1}, {60, 1}, - {60, 1}, {60, 1}, {60, 1}, {60, 0}, + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { - {60, 1}, {60, 1}, {60, 1}, {60, 1}, - {60, 1}, {60, 1}, {60, 1}, {60, 0}, + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { - {60, 0}, {60, 1}, {60, 0}, {60, 1}, - {60, 1}, {60, 1}, {60, 1}, {60, 1}, + CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1), + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { - {60, 0}, {60, 1}, {60, 1}, {60, 0}, - {60, 1}, {60, 0}, {60, 0}, {60, 0}, + CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0), + CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { - {60, 1}, {60, 1}, {60, 1}, {60, 0}, - {60, 0}, {60, 0}, {60, 0}, {60, 0}, + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), + CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { - {60, 1}, {60, 1}, {60, 1}, {60, 1}, - {60, 1}, {60, 0}, {60, 0}, {60, 0}, + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), + CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0), } }, { { - {60, 1}, {60, 1}, {60, 1}, {60, 1}, - {60, 1}, {60, 1}, {60, 1}, {60, 1}, + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1), } }, { { - {60, 1}, {60, 1}, {60, 0}, {60, 1}, - {60, 1}, {60, 1}, {60, 1}, {60, 0}, + CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), + CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0), } }, { { - {60, 1}, {60, 0}, {60, 1}, {60, 1}, - {60, 1}, {60, 1}, {60, 0}, {60, 1}, + CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1), + CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1), } }, } @@ -1827,9 +1830,9 @@ static u16 ar9003_hw_get_direct_edge_power(struct ar9300_eeprom *eep, struct cal_ctl_data_5g *ctl_5g = eep->ctlPowerData_5G; if (is2GHz) - return ctl_2g[idx].ctlEdges[edge].tPower; + return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge]); else - return ctl_5g[idx].ctlEdges[edge].tPower; + return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge]); } static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, @@ -1847,12 +1850,12 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep, if (is2GHz) { if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 1) < freq && - ctl_2g[idx].ctlEdges[edge - 1].flag) - return ctl_2g[idx].ctlEdges[edge - 1].tPower; + CTL_EDGE_FLAGS(ctl_2g[idx].ctlEdges[edge - 1])) + return CTL_EDGE_TPOWER(ctl_2g[idx].ctlEdges[edge - 1]); } else { if (ath9k_hw_fbin2freq(ctl_freqbin[edge - 1], 0) < freq && - ctl_5g[idx].ctlEdges[edge - 1].flag) - return ctl_5g[idx].ctlEdges[edge - 1].tPower; + CTL_EDGE_FLAGS(ctl_5g[idx].ctlEdges[edge - 1])) + return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]); } return AR9300_MAX_RATE_POWER; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index 3c533bb983c..655b3033396 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -261,17 +261,12 @@ struct cal_tgt_pow_ht { u8 tPow2x[14]; } __packed; -struct cal_ctl_edge_pwr { - u8 tPower:6, - flag:2; -} __packed; - struct cal_ctl_data_2g { - struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G]; + u8 ctlEdges[AR9300_NUM_BAND_EDGES_2G]; } __packed; struct cal_ctl_data_5g { - struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G]; + u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G]; } __packed; struct ar9300_eeprom { diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 9b8e7e3fceb..0963071e8f9 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -21,6 +21,7 @@ #include <linux/device.h> #include <linux/leds.h> #include <linux/completion.h> +#include <linux/pm_qos_params.h> #include "debug.h" #include "common.h" @@ -328,7 +329,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); int ath_tx_setup(struct ath_softc *sc, int haltype); -void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); +bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx); void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an); @@ -646,6 +647,8 @@ struct ath_softc { struct ath_descdma txsdma; struct ath_ant_comb ant_comb; + + struct pm_qos_request_list pm_qos_req; }; struct ath_wiphy { diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index 1266333f586..2bbf94d0191 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -240,16 +240,16 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, for (i = 0; (i < num_band_edges) && (pRdEdgesPower[i].bChannel != AR5416_BCHAN_UNUSED); i++) { if (freq == ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz)) { - twiceMaxEdgePower = pRdEdgesPower[i].tPower; + twiceMaxEdgePower = CTL_EDGE_TPOWER(pRdEdgesPower[i].ctl); break; } else if ((i > 0) && (freq < ath9k_hw_fbin2freq(pRdEdgesPower[i].bChannel, is2GHz))) { if (ath9k_hw_fbin2freq(pRdEdgesPower[i - 1].bChannel, is2GHz) < freq && - pRdEdgesPower[i - 1].flag) { + CTL_EDGE_FLAGS(pRdEdgesPower[i - 1].ctl)) { twiceMaxEdgePower = - pRdEdgesPower[i - 1].tPower; + CTL_EDGE_TPOWER(pRdEdgesPower[i - 1].ctl); } break; } diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index dacb45e1b90..dd59f09441a 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -233,6 +233,18 @@ #define AR9287_CHECKSUM_LOCATION (AR9287_EEP_START_LOC + 1) +#define CTL_EDGE_TPOWER(_ctl) ((_ctl) & 0x3f) +#define CTL_EDGE_FLAGS(_ctl) (((_ctl) >> 6) & 0x03) + +#define LNA_CTL_BUF_MODE BIT(0) +#define LNA_CTL_ISEL_LO BIT(1) +#define LNA_CTL_ISEL_HI BIT(2) +#define LNA_CTL_BUF_IN BIT(3) +#define LNA_CTL_FEM_BAND BIT(4) +#define LNA_CTL_LOCAL_BIAS BIT(5) +#define LNA_CTL_FORCE_XPA BIT(6) +#define LNA_CTL_USE_ANT1 BIT(7) + enum eeprom_param { EEP_NFTHRESH_5, EEP_NFTHRESH_2, @@ -378,10 +390,7 @@ struct modal_eep_header { u8 xatten2Margin[AR5416_MAX_CHAINS]; u8 ob_ch1; u8 db_ch1; - u8 useAnt1:1, - force_xpaon:1, - local_bias:1, - femBandSelectUsed:1, xlnabufin:1, xlnaisel:2, xlnabufmode:1; + u8 lna_ctl; u8 miscBits; u16 xpaBiasLvlFreq[3]; u8 futureModal[6]; @@ -535,18 +544,10 @@ struct cal_target_power_ht { u8 tPow2x[8]; } __packed; - -#ifdef __BIG_ENDIAN_BITFIELD -struct cal_ctl_edges { - u8 bChannel; - u8 flag:2, tPower:6; -} __packed; -#else struct cal_ctl_edges { u8 bChannel; - u8 tPower:6, flag:2; + u8 ctl; } __packed; -#endif struct cal_data_op_loop_ar9287 { u8 pwrPdg[2][5]; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index 966b9496a9d..195406db3bd 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -37,7 +37,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah) int addr, eep_start_loc; eep_data = (u16 *)eep; - if (ah->hw_version.devid == 0x7015) + if (AR9287_HTC_DEVID(ah)) eep_start_loc = AR9287_HTC_EEP_START_LOC; else eep_start_loc = AR9287_EEP_START_LOC; diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index 76b4d65472d..a3ccb1b9638 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -451,9 +451,10 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah, ath9k_hw_analog_shift_rmw(ah, AR_AN_TOP2, AR_AN_TOP2_LOCALBIAS, AR_AN_TOP2_LOCALBIAS_S, - pModal->local_bias); + !!(pModal->lna_ctl & + LNA_CTL_LOCAL_BIAS)); REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG, - pModal->force_xpaon); + !!(pModal->lna_ctl & LNA_CTL_FORCE_XPA)); } REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, @@ -1062,15 +1063,19 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah, case 1: break; case 2: - scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; + if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN) + scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN; + else + scaledPower = 0; break; case 3: - scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; + if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN) + scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN; + else + scaledPower = 0; break; } - scaledPower = max((u16)0, scaledPower); - if (IS_CHAN_2GHZ(chan)) { numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; @@ -1428,9 +1433,9 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah, num_ant_config = 1; - if (pBase->version >= 0x0E0D) - if (pModal->useAnt1) - num_ant_config += 1; + if (pBase->version >= 0x0E0D && + (pModal->lna_ctl & LNA_CTL_USE_ANT1)) + num_ant_config += 1; return num_ant_config; } diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 6576f683dba..0de3c3d3c24 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -35,8 +35,14 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ + { USB_DEVICE(0x13D3, 0x3346) }, /* IMC Networks */ + { USB_DEVICE(0x13D3, 0x3348) }, /* Azurewave */ + { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */ + { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */ { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */ + { USB_DEVICE(0x040D, 0x3801) }, /* VIA */ + { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */ { }, }; @@ -540,11 +546,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) return; } - usb_fill_int_urb(urb, hif_dev->udev, + usb_fill_bulk_urb(urb, hif_dev->udev, usb_rcvbulkpipe(hif_dev->udev, USB_REG_IN_PIPE), nskb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, nskb, 1); + ath9k_hif_usb_reg_in_cb, nskb); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { @@ -720,11 +726,11 @@ static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev) if (!skb) goto err; - usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev, + usb_fill_bulk_urb(hif_dev->reg_in_urb, hif_dev->udev, usb_rcvbulkpipe(hif_dev->udev, USB_REG_IN_PIPE), skb->data, MAX_REG_IN_BUF_SIZE, - ath9k_hif_usb_reg_in_cb, skb, 1); + ath9k_hif_usb_reg_in_cb, skb); if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0) goto err; @@ -805,6 +811,8 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) case 0x7010: case 0x7015: case 0x9018: + case 0xA704: + case 0x1200: firm_offset = AR7010_FIRMWARE_TEXT; break; default: @@ -843,14 +851,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) goto err_fw_req; } - /* Alloc URBs */ - ret = ath9k_hif_usb_alloc_urbs(hif_dev); - if (ret) { - dev_err(&hif_dev->udev->dev, - "ath9k_htc: Unable to allocate URBs\n"); - goto err_urb; - } - /* Download firmware */ ret = ath9k_hif_usb_download_fw(hif_dev); if (ret) { @@ -866,16 +866,22 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) */ for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) { endp = &alt->endpoint[idx].desc; - if (((endp->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - == 0x04) && - ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - == USB_ENDPOINT_XFER_INT)) { + if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) + == USB_ENDPOINT_XFER_INT) { endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK; endp->bmAttributes |= USB_ENDPOINT_XFER_BULK; endp->bInterval = 0; } } + /* Alloc URBs */ + ret = ath9k_hif_usb_alloc_urbs(hif_dev); + if (ret) { + dev_err(&hif_dev->udev->dev, + "ath9k_htc: Unable to allocate URBs\n"); + goto err_urb; + } + return 0; err_fw_download: @@ -929,6 +935,8 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface, case 0x7010: case 0x7015: case 0x9018: + case 0xA704: + case 0x1200: if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) hif_dev->fw_name = FIRMWARE_AR7010_1_1; else @@ -1016,6 +1024,13 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface, struct hif_device_usb *hif_dev = (struct hif_device_usb *) usb_get_intfdata(interface); + /* + * The device has to be set to FULLSLEEP mode in case no + * interface is up. + */ + if (!(hif_dev->flags & HIF_USB_START)) + ath9k_htc_suspend(hif_dev->htc_handle); + ath9k_hif_usb_dealloc_urbs(hif_dev); return 0; diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 75ecf6a30d2..c3b561daa6c 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -455,6 +455,8 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv); void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv); void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv); void ath9k_ps_work(struct work_struct *work); +bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, + enum ath9k_power_mode mode); void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); void ath9k_init_leds(struct ath9k_htc_priv *priv); @@ -464,6 +466,7 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, u16 devid, char *product); void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug); #ifdef CONFIG_PM +void ath9k_htc_suspend(struct htc_target *htc_handle); int ath9k_htc_resume(struct htc_target *htc_handle); #endif #ifdef CONFIG_ATH9K_HTC_DEBUGFS diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 3d7b97f1b3a..8776f49ffd4 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -249,6 +249,8 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid) case 0x7010: case 0x7015: case 0x9018: + case 0xA704: + case 0x1200: priv->htc->credits = 45; break; default: @@ -889,6 +891,12 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug) } #ifdef CONFIG_PM + +void ath9k_htc_suspend(struct htc_target *htc_handle) +{ + ath9k_htc_setpower(htc_handle->drv_priv, ATH9K_PM_FULL_SLEEP); +} + int ath9k_htc_resume(struct htc_target *htc_handle) { int ret; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 9a3be8da755..51977caca47 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -63,8 +63,8 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, return mode; } -static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, - enum ath9k_power_mode mode) +bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, + enum ath9k_power_mode mode) { bool ret; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 3d19b5bc937..29d80ca7839 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -121,7 +121,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) tx_hdr.data_type = ATH9K_HTC_NORMAL; } - if (ieee80211_is_data(fc)) { + if (ieee80211_is_data_qos(fc)) { qc = ieee80211_get_qos_ctl(hdr); tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK; } diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index cc13ee11782..c7fbe25cc12 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -484,6 +484,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah) ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, "Failed allocating banks for " "external radio\n"); + ath9k_hw_rf_free_ext_banks(ah); return ecode; } @@ -952,9 +953,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION); break; case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_MONITOR: REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); break; + default: + if (ah->is_monitoring) + REG_WRITE(ah, AR_STA_ID1, val | AR_STA_ID1_KSRCH_MODE); + break; } } @@ -1634,7 +1638,6 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) switch (ah->opmode) { case NL80211_IFTYPE_STATION: - case NL80211_IFTYPE_MONITOR: REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon)); REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); @@ -1663,6 +1666,14 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; break; default: + if (ah->is_monitoring) { + REG_WRITE(ah, AR_NEXT_TBTT_TIMER, + TU_TO_USEC(next_beacon)); + REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff); + REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff); + flags |= AR_TBTT_TIMER_EN; + break; + } ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON, "%s: unsupported opmode: %d\n", __func__, ah->opmode); @@ -2033,7 +2044,8 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) val = REG_READ(ah, AR7010_GPIO_IN); return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0; } else if (AR_SREV_9300_20_OR_LATER(ah)) - return MS_REG_READ(AR9300, gpio) != 0; + return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) & + AR_GPIO_BIT(gpio)) != 0; else if (AR_SREV_9271(ah)) return MS_REG_READ(AR9271, gpio) != 0; else if (AR_SREV_9287_11_OR_LATER(ah)) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index d032939768b..d47d1b4b600 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -622,6 +622,7 @@ struct ath_hw { bool sw_mgmt_crypto; bool is_pciexpress; + bool is_monitoring; bool need_an_top2_fixup; u16 tx_trig_level; diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 95b41db0d86..14b8ab386da 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -661,6 +661,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->flags |= IEEE80211_HW_MFP_CAPABLE; hw->wiphy->interface_modes = + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_WDS) | BIT(NL80211_IFTYPE_STATION) | @@ -756,6 +758,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, ath_init_leds(sc); ath_start_rfkill_poll(sc); + pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + return 0; error_world: @@ -824,6 +829,7 @@ void ath9k_deinit_device(struct ath_softc *sc) } ieee80211_unregister_hw(hw); + pm_qos_remove_request(&sc->pm_qos_req); ath_rx_cleanup(sc); ath_tx_cleanup(sc); ath9k_deinit_softc(sc); diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 8c13479b17c..c996963ab33 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -703,8 +703,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, rs->rs_phyerr = phyerr; } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) rs->rs_status |= ATH9K_RXERR_DECRYPT; - else if ((ads.ds_rxstatus8 & AR_MichaelErr) && - rs->rs_keyix != ATH9K_RXKEYIX_INVALID) + else if (ads.ds_rxstatus8 & AR_MichaelErr) rs->rs_status |= ATH9K_RXERR_MIC; else if (ads.ds_rxstatus8 & AR_KeyMiss) rs->rs_status |= ATH9K_RXERR_DECRYPT; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index b52f1cf8a60..c0c3464d3a8 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -93,11 +93,13 @@ void ath9k_ps_wakeup(struct ath_softc *sc) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); unsigned long flags; + enum ath9k_power_mode power_mode; spin_lock_irqsave(&sc->sc_pm_lock, flags); if (++sc->ps_usecount != 1) goto unlock; + power_mode = sc->sc_ah->power_mode; ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); /* @@ -105,10 +107,12 @@ void ath9k_ps_wakeup(struct ath_softc *sc) * useful data. Better clear them now so that they don't mess up * survey data results. */ - spin_lock(&common->cc_lock); - ath_hw_cycle_counters_update(common); - memset(&common->cc_survey, 0, sizeof(common->cc_survey)); - spin_unlock(&common->cc_lock); + if (power_mode != ATH9K_PM_AWAKE) { + spin_lock(&common->cc_lock); + ath_hw_cycle_counters_update(common); + memset(&common->cc_survey, 0, sizeof(common->cc_survey)); + spin_unlock(&common->cc_lock); + } unlock: spin_unlock_irqrestore(&sc->sc_pm_lock, flags); @@ -240,11 +244,12 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, * the relevant bits of the h/w. */ ath9k_hw_set_interrupts(ah, 0); - ath_drain_all_txq(sc, false); + stopped = ath_drain_all_txq(sc, false); spin_lock_bh(&sc->rx.pcu_lock); - stopped = ath_stoprecv(sc); + if (!ath_stoprecv(sc)) + stopped = false; /* XXX: do not flush receive queue here. We don't want * to flush data frames already in queue because of @@ -1217,6 +1222,7 @@ static int ath9k_start(struct ieee80211_hw *hw) ah->imask |= ATH9K_INT_CST; sc->sc_flags &= ~SC_OP_INVALID; + sc->sc_ah->is_monitoring = false; /* Disable BMISS interrupt when we're not associated */ ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); @@ -1238,6 +1244,8 @@ static int ath9k_start(struct ieee80211_hw *hw) ath9k_btcoex_timer_resume(sc); } + pm_qos_update_request(&sc->pm_qos_req, 55); + mutex_unlock: mutex_unlock(&sc->mutex); @@ -1415,6 +1423,8 @@ static void ath9k_stop(struct ieee80211_hw *hw) sc->sc_flags |= SC_OP_INVALID; + pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); + mutex_unlock(&sc->mutex); ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); @@ -1493,8 +1503,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, ath9k_hw_set_interrupts(ah, ah->imask); if (vif->type == NL80211_IFTYPE_AP || - vif->type == NL80211_IFTYPE_ADHOC || - vif->type == NL80211_IFTYPE_MONITOR) { + vif->type == NL80211_IFTYPE_ADHOC) { sc->sc_flags |= SC_OP_ANI_RUN; ath_start_ani(common); } @@ -1511,7 +1520,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, struct ath_softc *sc = aphy->sc; struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_vif *avp = (void *)vif->drv_priv; - int i; ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); @@ -1525,21 +1533,24 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { + /* Disable SWBA interrupt */ + sc->sc_ah->imask &= ~ATH9K_INT_SWBA; ath9k_ps_wakeup(sc); + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); ath9k_ps_restore(sc); + tasklet_kill(&sc->bcon_tasklet); } ath_beacon_return(sc, avp); sc->sc_flags &= ~SC_OP_BEACONS; - for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) { - if (sc->beacon.bslot[i] == vif) { - printk(KERN_DEBUG "%s: vif had allocated beacon " - "slot\n", __func__); - sc->beacon.bslot[i] = NULL; - sc->beacon.bslot_aphy[i] = NULL; - } + if (sc->nbcnvifs) { + /* Re-enable SWBA interrupt */ + sc->sc_ah->imask |= ATH9K_INT_SWBA; + ath9k_ps_wakeup(sc); + ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask); + ath9k_ps_restore(sc); } sc->nvifs--; @@ -1644,8 +1655,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (changed & IEEE80211_CONF_CHANGE_MONITOR) { if (conf->flags & IEEE80211_CONF_MONITOR) { ath_print(common, ATH_DBG_CONFIG, - "HW opmode set to Monitor mode\n"); - sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR; + "Monitor mode is enabled\n"); + sc->sc_ah->is_monitoring = true; + } else { + ath_print(common, ATH_DBG_CONFIG, + "Monitor mode is disabled\n"); + sc->sc_ah->is_monitoring = false; } } diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index fddb0129bb5..fdc2ec52b42 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -441,7 +441,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) */ if (((sc->sc_ah->opmode != NL80211_IFTYPE_AP) && (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || - (sc->sc_ah->opmode == NL80211_IFTYPE_MONITOR)) + (sc->sc_ah->is_monitoring)) rfilt |= ATH9K_RX_FILTER_PROM; if (sc->rx.rxfilter & FIF_CONTROL) @@ -518,7 +518,7 @@ bool ath_stoprecv(struct ath_softc *sc) bool stopped; spin_lock_bh(&sc->rx.rxbuflock); - ath9k_hw_stoppcurecv(ah); + ath9k_hw_abortpcurecv(ah); ath9k_hw_setrxfilter(ah, 0); stopped = ath9k_hw_stopdmarecv(ah); @@ -838,6 +838,10 @@ static bool ath9k_rx_accept(struct ath_common *common, struct ath_rx_status *rx_stats, bool *decrypt_error) { +#define is_mc_or_valid_tkip_keyix ((is_mc || \ + (rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && \ + test_bit(rx_stats->rs_keyix, common->tkip_keymap)))) + struct ath_hw *ah = common->ah; __le16 fc; u8 rx_status_len = ah->caps.rx_status_len; @@ -879,15 +883,18 @@ static bool ath9k_rx_accept(struct ath_common *common, if (rx_stats->rs_status & ATH9K_RXERR_DECRYPT) { *decrypt_error = true; } else if (rx_stats->rs_status & ATH9K_RXERR_MIC) { + bool is_mc; /* * The MIC error bit is only valid if the frame * is not a control frame or fragment, and it was * decrypted using a valid TKIP key. */ + is_mc = !!is_multicast_ether_addr(hdr->addr1); + if (!ieee80211_is_ctl(fc) && !ieee80211_has_morefrags(fc) && !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) && - test_bit(rx_stats->rs_keyix, common->tkip_keymap)) + is_mc_or_valid_tkip_keyix) rxs->flag |= RX_FLAG_MMIC_ERROR; else rx_stats->rs_status &= ~ATH9K_RXERR_MIC; @@ -897,7 +904,7 @@ static bool ath9k_rx_accept(struct ath_common *common, * decryption and MIC failures. For monitor mode, * we also ignore the CRC error. */ - if (ah->opmode == NL80211_IFTYPE_MONITOR) { + if (ah->is_monitoring) { if (rx_stats->rs_status & ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC | ATH9K_RXERR_CRC)) diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 42976b0a01c..2c6a22fbb0f 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -703,6 +703,7 @@ #define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */ #define AR_WA_ANALOG_SHIFT (1 << 20) #define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */ +#define AR_WA_BIT22 (1 << 22) #define AR9285_WA_DEFAULT 0x004a050b #define AR9280_WA_DEFAULT 0x0040073b #define AR_WA_DEFAULT 0x0000073f @@ -865,7 +866,13 @@ #define AR_DEVID_7010(_ah) \ (((_ah)->hw_version.devid == 0x7010) || \ ((_ah)->hw_version.devid == 0x7015) || \ - ((_ah)->hw_version.devid == 0x9018)) + ((_ah)->hw_version.devid == 0x9018) || \ + ((_ah)->hw_version.devid == 0xA704) || \ + ((_ah)->hw_version.devid == 0x1200)) + +#define AR9287_HTC_DEVID(_ah) \ + (((_ah)->hw_version.devid == 0x7015) || \ + ((_ah)->hw_version.devid == 0x1200)) #define AR_RADIO_SREV_MAJOR 0xf0 #define AR_RAD5133_SREV_MAJOR 0xc0 @@ -977,11 +984,13 @@ enum { #define AR9287_GPIO_IN_VAL_S 11 #define AR9271_GPIO_IN_VAL 0xFFFF0000 #define AR9271_GPIO_IN_VAL_S 16 -#define AR9300_GPIO_IN_VAL 0x0001FFFF -#define AR9300_GPIO_IN_VAL_S 0 #define AR7010_GPIO_IN_VAL 0x0000FFFF #define AR7010_GPIO_IN_VAL_S 0 +#define AR_GPIO_IN 0x404c +#define AR9300_GPIO_IN_VAL 0x0001FFFF +#define AR9300_GPIO_IN_VAL_S 0 + #define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c) #define AR_GPIO_OE_OUT_DRV 0x3 #define AR_GPIO_OE_OUT_DRV_NO 0x0 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index f2ade2402ce..aff04789f79 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1120,7 +1120,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) } } -void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) +bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(sc->sc_ah); @@ -1128,7 +1128,7 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) int i, npend = 0; if (sc->sc_flags & SC_OP_INVALID) - return; + return true; /* Stop beacon queue */ ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); @@ -1142,25 +1142,15 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) } } - if (npend) { - int r; - - ath_print(common, ATH_DBG_FATAL, - "Failed to stop TX DMA. Resetting hardware!\n"); - - spin_lock_bh(&sc->sc_resetlock); - r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); - if (r) - ath_print(common, ATH_DBG_FATAL, - "Unable to reset hardware; reset status %d\n", - r); - spin_unlock_bh(&sc->sc_resetlock); - } + if (npend) + ath_print(common, ATH_DBG_FATAL, "Failed to stop TX DMA!\n"); for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { if (ATH_TXQ_SETUP(sc, i)) ath_draintxq(sc, &sc->tx.txq[i], retry_tx); } + + return !npend; } void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index ae6c006bbc5..546b4e4ec5e 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -291,7 +291,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) if (SUPP(CARL9170FW_WLANTX_CAB)) { ar->hw->wiphy->interface_modes |= - BIT(NL80211_IFTYPE_AP); + BIT(NL80211_IFTYPE_AP) | + BIT(NL80211_IFTYPE_P2P_GO); } } diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 980ae70ea42..dc7b30b170d 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -647,7 +647,7 @@ init: } unlock: - if (err && (vif_id != -1)) { + if (err && (vif_id >= 0)) { vif_priv->active = false; bitmap_release_region(&ar->vif_bitmap, vif_id, 0); ar->vifs--; @@ -1631,7 +1631,8 @@ void *carl9170_alloc(size_t priv_size) * supports these modes. The code which will add the * additional interface_modes is in fw.c. */ - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT); hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_REPORTS_TX_ACK_STATUS | diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index b575c865142..7e6506a77bb 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -810,7 +810,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | AR9170_TX_MAC_BACKOFF); - mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) && + mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & AR9170_TX_MAC_QOS); no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index d8607f4c144..7504ed14c72 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c @@ -82,9 +82,11 @@ static struct usb_device_id carl9170_usb_ids[] = { { USB_DEVICE(0x07d1, 0x3c10) }, /* D-Link DWA 160 A2 */ { USB_DEVICE(0x07d1, 0x3a09) }, + /* D-Link DWA 130 D */ + { USB_DEVICE(0x07d1, 0x3a0f) }, /* Netgear WNA1000 */ { USB_DEVICE(0x0846, 0x9040) }, - /* Netgear WNDA3100 */ + /* Netgear WNDA3100 (v1) */ { USB_DEVICE(0x0846, 0x9010) }, /* Netgear WN111 v2 */ { USB_DEVICE(0x0846, 0x9001), .driver_info = CARL9170_ONE_LED }, @@ -551,12 +553,12 @@ static int carl9170_usb_flush(struct ar9170 *ar) usb_free_urb(urb); } - ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, HZ); + ret = usb_wait_anchor_empty_timeout(&ar->tx_cmd, 1000); if (ret == 0) err = -ETIMEDOUT; /* lets wait a while until the tx - queues are dried out */ - ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, HZ); + ret = usb_wait_anchor_empty_timeout(&ar->tx_anch, 1000); if (ret == 0) err = -ETIMEDOUT; diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 9a55338d957..09e2dfd7b17 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func, err_free_ssb: kfree(sdio); err_disable_func: + sdio_claim_host(func); sdio_disable_func(func); err_release_host: sdio_release_host(func); diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c index 32dee2ce5d3..d5ef696298e 100644 --- a/drivers/net/wireless/ipw2x00/libipw_module.c +++ b/drivers/net/wireless/ipw2x00/libipw_module.c @@ -54,6 +54,7 @@ #define DRV_DESCRIPTION "802.11 data/management/control stack" #define DRV_NAME "libipw" +#define DRV_PROCNAME "ieee80211" #define DRV_VERSION LIBIPW_VERSION #define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>" @@ -293,16 +294,16 @@ static int __init libipw_init(void) struct proc_dir_entry *e; libipw_debug_level = debug; - libipw_proc = proc_mkdir("ieee80211", init_net.proc_net); + libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net); if (libipw_proc == NULL) { - LIBIPW_ERROR("Unable to create " DRV_NAME + LIBIPW_ERROR("Unable to create " DRV_PROCNAME " proc directory\n"); return -EIO; } e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc, &debug_level_proc_fops); if (!e) { - remove_proc_entry(DRV_NAME, init_net.proc_net); + remove_proc_entry(DRV_PROCNAME, init_net.proc_net); libipw_proc = NULL; return -EIO; } @@ -319,7 +320,7 @@ static void __exit libipw_exit(void) #ifdef CONFIG_LIBIPW_DEBUG if (libipw_proc) { remove_proc_entry("debug_level", libipw_proc); - remove_proc_entry(DRV_NAME, init_net.proc_net); + remove_proc_entry(DRV_PROCNAME, init_net.proc_net); libipw_proc = NULL; } #endif /* CONFIG_LIBIPW_DEBUG */ diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index 8f8c4b73f8b..7edf8c2fb8c 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -4000,7 +4000,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e * "the hard way", rather than using device's scan. */ if (iwl3945_mod_params.disable_hw_scan) { - IWL_ERR(priv, "sw scan support is deprecated\n"); + dev_printk(KERN_DEBUG, &(pdev->dev), + "sw scan support is deprecated\n"); iwl3945_hw_ops.hw_scan = NULL; } diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 5046a000503..373930afc26 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -700,8 +700,9 @@ static void lbs_scan_worker(struct work_struct *work) if (priv->scan_channel < priv->scan_req->n_channels) { cancel_delayed_work(&priv->scan_work); - queue_delayed_work(priv->work_thread, &priv->scan_work, - msecs_to_jiffies(300)); + if (!priv->stopping) + queue_delayed_work(priv->work_thread, &priv->scan_work, + msecs_to_jiffies(300)); } /* This is the final data we are about to send */ diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index f062ed58390..cb14c38caf3 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h @@ -36,6 +36,7 @@ struct lbs_private { /* CFG80211 */ struct wireless_dev *wdev; bool wiphy_registered; + bool stopping; struct cfg80211_scan_request *scan_req; u8 assoc_bss[ETH_ALEN]; u8 disassoc_reason; diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index e5685dc317a..b4de0ca10fe 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1170,7 +1170,6 @@ static void if_sdio_remove(struct sdio_func *func) lbs_deb_sdio("call remove card\n"); lbs_stop_card(card->priv); lbs_remove_card(card->priv); - card->priv->surpriseremoved = 1; flush_workqueue(card->workqueue); destroy_workqueue(card->workqueue); diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index 79bcb4e5d2c..ecd4d04b2c3 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -1055,7 +1055,6 @@ static int __devexit libertas_spi_remove(struct spi_device *spi) lbs_stop_card(priv); lbs_remove_card(priv); /* will call free_netdev */ - priv->surpriseremoved = 1; free_irq(spi->irq, card); if_spi_terminate_spi_thread(card); if (card->pdata->teardown) diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 47ce5a6ba12..fcd1bbfc632 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -104,6 +104,7 @@ static int lbs_dev_open(struct net_device *dev) lbs_deb_enter(LBS_DEB_NET); spin_lock_irq(&priv->driver_lock); + priv->stopping = false; if (priv->connect_status == LBS_CONNECTED) netif_carrier_on(dev); @@ -131,10 +132,16 @@ static int lbs_eth_stop(struct net_device *dev) lbs_deb_enter(LBS_DEB_NET); spin_lock_irq(&priv->driver_lock); + priv->stopping = true; netif_stop_queue(dev); spin_unlock_irq(&priv->driver_lock); schedule_work(&priv->mcast_work); + cancel_delayed_work_sync(&priv->scan_work); + if (priv->scan_req) { + cfg80211_scan_done(priv->scan_req, false); + priv->scan_req = NULL; + } lbs_deb_leave(LBS_DEB_NET); return 0; @@ -908,8 +915,6 @@ void lbs_remove_card(struct lbs_private *priv) lbs_free_adapter(priv); lbs_cfg_free(priv); - - priv->dev = NULL; free_netdev(dev); lbs_deb_leave(LBS_DEB_MAIN); diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c index e8e2d0f4763..f3d396e7544 100644 --- a/drivers/net/wireless/orinoco/main.c +++ b/drivers/net/wireless/orinoco/main.c @@ -1392,10 +1392,9 @@ static void orinoco_process_scan_results(struct work_struct *work) orinoco_add_hostscan_results(priv, buf, len); kfree(buf); - } else if (priv->scan_request) { + } else { /* Either abort or complete the scan */ - cfg80211_scan_done(priv->scan_request, (len < 0)); - priv->scan_request = NULL; + orinoco_scan_done(priv, (len < 0)); } spin_lock_irqsave(&priv->scan_lock, flags); @@ -1684,6 +1683,8 @@ static int __orinoco_down(struct orinoco_private *priv) hermes_write_regn(hw, EVACK, 0xffff); } + orinoco_scan_done(priv, true); + /* firmware will have to reassociate */ netif_carrier_off(dev); priv->last_linkstatus = 0xffff; @@ -1762,10 +1763,7 @@ void orinoco_reset(struct work_struct *work) orinoco_unlock(priv, &flags); /* Scanning support: Notify scan cancellation */ - if (priv->scan_request) { - cfg80211_scan_done(priv->scan_request, 1); - priv->scan_request = NULL; - } + orinoco_scan_done(priv, true); if (priv->hard_reset) { err = (*priv->hard_reset)(priv); @@ -1813,6 +1811,12 @@ static int __orinoco_commit(struct orinoco_private *priv) struct net_device *dev = priv->ndev; int err = 0; + /* If we've called commit, we are reconfiguring or bringing the + * interface up. Maintaining countermeasures across this would + * be confusing, so note that we've disabled them. The port will + * be enabled later in orinoco_commit or __orinoco_up. */ + priv->tkip_cm_active = 0; + err = orinoco_hw_program_rids(priv); /* FIXME: what about netif_tx_lock */ diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c index 71b3d68b940..32954c4b243 100644 --- a/drivers/net/wireless/orinoco/orinoco_cs.c +++ b/drivers/net/wireless/orinoco/orinoco_cs.c @@ -151,20 +151,20 @@ orinoco_cs_config(struct pcmcia_device *link) goto failed; } - ret = pcmcia_request_irq(link, orinoco_interrupt); - if (ret) - goto failed; - - /* We initialize the hermes structure before completing PCMCIA - * configuration just in case the interrupt handler gets - * called. */ mem = ioport_map(link->resource[0]->start, resource_size(link->resource[0])); if (!mem) goto failed; + /* We initialize the hermes structure before completing PCMCIA + * configuration just in case the interrupt handler gets + * called. */ hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); + ret = pcmcia_request_irq(link, orinoco_interrupt); + if (ret) + goto failed; + ret = pcmcia_enable_device(link); if (ret) goto failed; diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c index a38a7bd25f1..b9aedf18a04 100644 --- a/drivers/net/wireless/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/orinoco/orinoco_usb.c @@ -57,7 +57,6 @@ #include <linux/fcntl.h> #include <linux/spinlock.h> #include <linux/list.h> -#include <linux/smp_lock.h> #include <linux/usb.h> #include <linux/timer.h> diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c index 4300d9db7d8..86cb54c842e 100644 --- a/drivers/net/wireless/orinoco/scan.c +++ b/drivers/net/wireless/orinoco/scan.c @@ -229,3 +229,11 @@ void orinoco_add_hostscan_results(struct orinoco_private *priv, priv->scan_request = NULL; } } + +void orinoco_scan_done(struct orinoco_private *priv, bool abort) +{ + if (priv->scan_request) { + cfg80211_scan_done(priv->scan_request, abort); + priv->scan_request = NULL; + } +} diff --git a/drivers/net/wireless/orinoco/scan.h b/drivers/net/wireless/orinoco/scan.h index 2dc4e046dbd..27281fb0a6d 100644 --- a/drivers/net/wireless/orinoco/scan.h +++ b/drivers/net/wireless/orinoco/scan.h @@ -16,5 +16,6 @@ void orinoco_add_extscan_result(struct orinoco_private *priv, void orinoco_add_hostscan_results(struct orinoco_private *dev, unsigned char *buf, size_t len); +void orinoco_scan_done(struct orinoco_private *priv, bool abort); #endif /* _ORINOCO_SCAN_H_ */ diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c index fb859a5ad2e..db34c282e59 100644 --- a/drivers/net/wireless/orinoco/spectrum_cs.c +++ b/drivers/net/wireless/orinoco/spectrum_cs.c @@ -214,21 +214,21 @@ spectrum_cs_config(struct pcmcia_device *link) goto failed; } - ret = pcmcia_request_irq(link, orinoco_interrupt); - if (ret) - goto failed; - - /* We initialize the hermes structure before completing PCMCIA - * configuration just in case the interrupt handler gets - * called. */ mem = ioport_map(link->resource[0]->start, resource_size(link->resource[0])); if (!mem) goto failed; + /* We initialize the hermes structure before completing PCMCIA + * configuration just in case the interrupt handler gets + * called. */ hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); hw->eeprom_pda = true; + ret = pcmcia_request_irq(link, orinoco_interrupt); + if (ret) + goto failed; + ret = pcmcia_enable_device(link); if (ret) goto failed; diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c index 93505f93bf9..e5afabee60d 100644 --- a/drivers/net/wireless/orinoco/wext.c +++ b/drivers/net/wireless/orinoco/wext.c @@ -911,10 +911,10 @@ static int orinoco_ioctl_set_auth(struct net_device *dev, */ if (param->value) { priv->tkip_cm_active = 1; - ret = hermes_enable_port(hw, 0); + ret = hermes_disable_port(hw, 0); } else { priv->tkip_cm_active = 0; - ret = hermes_disable_port(hw, 0); + ret = hermes_enable_port(hw, 0); } break; diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index eea1ef2f502..4396d4b9bfb 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -221,9 +221,6 @@ config RT2X00_LIB_LEDS boolean default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) -comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00" - depends on RT2X00_LIB=y && LEDS_CLASS=m - config RT2X00_LIB_DEBUGFS bool "Ralink debugfs support" depends on RT2X00_LIB && MAC80211_DEBUGFS diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 458bb57914a..cdbeec9f83e 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -66,8 +66,8 @@ struct netfront_cb { #define GRANT_INVALID_REF 0 -#define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) -#define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) +#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE) +#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) struct netfront_info { diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index d9f51485bee..9383063d2b1 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c @@ -349,7 +349,6 @@ static struct irq_chip dino_interrupt_type = { .name = "GSC-PCI", .unmask = dino_unmask_irq, .mask = dino_mask_irq, - .ack = no_ack_irq, }; diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 1211974f55a..e860038b0b8 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -186,7 +186,6 @@ static struct irq_chip eisa_interrupt_type = { .name = "EISA", .unmask = eisa_unmask_irq, .mask = eisa_mask_irq, - .ack = no_ack_irq, }; static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) @@ -340,7 +339,7 @@ static int __init eisa_probe(struct parisc_device *dev) setup_irq(2, &irq2_action); for (i = 0; i < 16; i++) { set_irq_chip_and_handler(i, &eisa_interrupt_type, - handle_level_irq); + handle_simple_irq); } EISA_bus = 1; diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c index cce00ed81f3..af212c6a615 100644 --- a/drivers/parisc/eisa_eeprom.c +++ b/drivers/parisc/eisa_eeprom.c @@ -24,7 +24,6 @@ #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/fs.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c index e605298e3ae..772b1939ac2 100644 --- a/drivers/parisc/gsc.c +++ b/drivers/parisc/gsc.c @@ -143,7 +143,6 @@ static struct irq_chip gsc_asic_interrupt_type = { .name = "GSC-ASIC", .unmask = gsc_asic_unmask_irq, .mask = gsc_asic_mask_irq, - .ack = no_ack_irq, }; int gsc_assign_irq(struct irq_chip *type, void *data) @@ -153,7 +152,7 @@ int gsc_assign_irq(struct irq_chip *type, void *data) if (irq > GSC_IRQ_MAX) return NO_IRQ; - set_irq_chip_and_handler(irq, type, handle_level_irq); + set_irq_chip_and_handler(irq, type, handle_simple_irq); set_irq_chip_data(irq, data); return irq++; diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index a3120a09c43..0327894bf23 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -669,6 +669,13 @@ printk("\n"); DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq, vi->eoi_addr, vi->eoi_data); iosapic_eoi(vi->eoi_addr, vi->eoi_data); +} + +static void iosapic_eoi_irq(unsigned int irq) +{ + struct vector_info *vi = get_irq_chip_data(irq); + + iosapic_eoi(vi->eoi_addr, vi->eoi_data); cpu_eoi_irq(irq); } @@ -705,6 +712,7 @@ static struct irq_chip iosapic_interrupt_type = { .unmask = iosapic_unmask_irq, .mask = iosapic_mask_irq, .ack = cpu_ack_irq, + .eoi = iosapic_eoi_irq, #ifdef CONFIG_SMP .set_affinity = iosapic_set_affinity_irq, #endif diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c index 2350e8a86ee..f2f501e5b6a 100644 --- a/drivers/parisc/led.c +++ b/drivers/parisc/led.c @@ -64,6 +64,7 @@ static unsigned int led_diskio __read_mostly = 1; static unsigned int led_lanrxtx __read_mostly = 1; static char lcd_text[32] __read_mostly; static char lcd_text_default[32] __read_mostly; +static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */ static struct workqueue_struct *led_wq; @@ -115,7 +116,7 @@ lcd_info __attribute__((aligned(8))) __read_mostly = .lcd_width = 16, .lcd_cmd_reg_addr = KITTYHAWK_LCD_CMD, .lcd_data_reg_addr = KITTYHAWK_LCD_DATA, - .min_cmd_delay = 40, + .min_cmd_delay = 80, .reset_cmd1 = 0x80, .reset_cmd2 = 0xc0, }; @@ -135,6 +136,9 @@ static int start_task(void) /* Display the default text now */ if (led_type == LED_HASLCD) lcd_print( lcd_text_default ); + /* KittyHawk has no LED support on its LCD */ + if (lcd_no_led_support) return 0; + /* Create the work queue and queue the LED task */ led_wq = create_singlethread_workqueue("led_wq"); queue_delayed_work(led_wq, &led_task, 0); @@ -248,9 +252,13 @@ static int __init led_create_procfs(void) proc_pdc_root = proc_mkdir("pdc", 0); if (!proc_pdc_root) return -1; - ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root, - &led_proc_fops, (void *)LED_NOLCD); /* LED */ - if (!ent) return -1; + + if (!lcd_no_led_support) + { + ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root, + &led_proc_fops, (void *)LED_NOLCD); /* LED */ + if (!ent) return -1; + } if (led_type == LED_HASLCD) { @@ -692,6 +700,7 @@ int __init led_init(void) case 0x58B: /* KittyHawk DC2 100 (K200) */ printk(KERN_INFO "%s: KittyHawk-Machine (hversion 0x%x) found, " "LED detection skipped.\n", __FILE__, CPU_HVERSION); + lcd_no_led_support = 1; goto found; /* use the preinitialized values of lcd_info */ } diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c index 0846dafdfff..28241532c0f 100644 --- a/drivers/parisc/superio.c +++ b/drivers/parisc/superio.c @@ -323,7 +323,6 @@ static struct irq_chip superio_interrupt_type = { .name = SUPERIO, .unmask = superio_unmask_irq, .mask = superio_mask_irq, - .ack = no_ack_irq, }; #ifdef DEBUG_SUPERIO_INIT @@ -354,7 +353,7 @@ int superio_fixup_irq(struct pci_dev *pcidev) #endif for (i = 0; i < 16; i++) { - set_irq_chip_and_handler(i, &superio_interrupt_type, handle_level_irq); + set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq); } /* diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index f01e344cf4b..98e6fdf34d3 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -49,6 +49,7 @@ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o obj-$(CONFIG_X86_VISWS) += setup-irq.o obj-$(CONFIG_MN10300) += setup-bus.o obj-$(CONFIG_MICROBLAZE) += setup-bus.o +obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o # # ACPI Related PCI FW Functions diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5624db8c9ad..69546e9213d 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -64,49 +64,6 @@ void pci_bus_remove_resources(struct pci_bus *bus) } } -/* - * Find the highest-address bus resource below the cursor "res". If the - * cursor is NULL, return the highest resource. - */ -static struct resource *pci_bus_find_resource_prev(struct pci_bus *bus, - unsigned int type, - struct resource *res) -{ - struct resource *r, *prev = NULL; - int i; - - pci_bus_for_each_resource(bus, r, i) { - if (!r) - continue; - - if ((r->flags & IORESOURCE_TYPE_BITS) != type) - continue; - - /* If this resource is at or past the cursor, skip it */ - if (res) { - if (r == res) - continue; - if (r->end > res->end) - continue; - if (r->end == res->end && r->start > res->start) - continue; - } - - if (!prev) - prev = r; - - /* - * A small resource is higher than a large one that ends at - * the same address. - */ - if (r->end > prev->end || - (r->end == prev->end && r->start > prev->start)) - prev = r; - } - - return prev; -} - /** * pci_bus_alloc_resource - allocate a resource from a parent bus * @bus: PCI bus @@ -132,10 +89,9 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, resource_size_t), void *alignf_data) { - int ret = -ENOMEM; + int i, ret = -ENOMEM; struct resource *r; resource_size_t max = -1; - unsigned int type = res->flags & IORESOURCE_TYPE_BITS; type_mask |= IORESOURCE_IO | IORESOURCE_MEM; @@ -143,9 +99,10 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, if (!(res->flags & IORESOURCE_MEM_64)) max = PCIBIOS_MAX_MEM_32; - /* Look for space at highest addresses first */ - r = pci_bus_find_resource_prev(bus, type, NULL); - for ( ; r; r = pci_bus_find_resource_prev(bus, type, r)) { + pci_bus_for_each_resource(bus, r, i) { + if (!r) + continue; + /* type_mask must match */ if ((res->flags ^ r->flags) & type_mask) continue; diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 0157708d474..09933eb9126 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -1417,6 +1417,11 @@ int __init enable_drhd_fault_handling(void) (unsigned long long)drhd->reg_base_addr, ret); return -1; } + + /* + * Clear any previous faults. + */ + dmar_fault(iommu->irq, iommu); } return 0; diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 5becbdee402..2850e64deda 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -276,6 +276,12 @@ int __init ibmphp_access_ebda (void) for (;;) { offset = next_offset; + + /* Make sure what we read is still in the mapped section */ + if (WARN(offset > (ebda_sz * 1024 - 4), + "ibmphp_ebda: next read is beyond ebda_sz\n")) + break; + next_offset = readw (io_mem + offset); /* offset of next blk */ offset += 2; diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index b5a7d9bfcb2..63d5042f207 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -705,17 +705,21 @@ void pci_remove_legacy_files(struct pci_bus *b) #ifdef HAVE_PCI_MMAP -int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) +int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, + enum pci_mmap_api mmap_api) { - unsigned long nr, start, size; + unsigned long nr, start, size, pci_start; + if (pci_resource_len(pdev, resno) == 0) + return 0; nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; start = vma->vm_pgoff; size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; - if (start < size && size - start >= nr) + pci_start = (mmap_api == PCI_MMAP_PROCFS) ? + pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0; + if (start >= pci_start && start < pci_start + size && + start + nr <= pci_start + size) return 1; - WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", - current->comm, start, start+nr, pci_name(pdev), resno, size); return 0; } @@ -745,8 +749,15 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, if (i >= PCI_ROM_RESOURCE) return -ENODEV; - if (!pci_mmap_fits(pdev, i, vma)) + if (!pci_mmap_fits(pdev, i, vma, PCI_MMAP_SYSFS)) { + WARN(1, "process \"%s\" tried to map 0x%08lx bytes " + "at page 0x%08lx on %s BAR %d (start 0x%16Lx, size 0x%16Lx)\n", + current->comm, vma->vm_end-vma->vm_start, vma->vm_pgoff, + pci_name(pdev), i, + (u64)pci_resource_start(pdev, i), + (u64)pci_resource_len(pdev, i)); return -EINVAL; + } /* pci_mmap_page_range() expects the same kind of entry as coming * from /proc/bus/pci/ which is a "user visible" value. If this is diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index e98c8104297..710c8a29be0 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1007,6 +1007,18 @@ static int __pci_enable_device_flags(struct pci_dev *dev, int err; int i, bars = 0; + /* + * Power state could be unknown at this point, either due to a fresh + * boot or a device removal call. So get the current power state + * so that things like MSI message writing will behave as expected + * (e.g. if the device really is in D0 at enable time). + */ + if (dev->pm_cap) { + u16 pmcsr; + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); + } + if (atomic_add_return(1, &dev->enable_cnt) > 1) return 0; /* already enabled */ diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f5c7c382765..7d33f667386 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -22,8 +22,13 @@ extern void pci_remove_firmware_label_files(struct pci_dev *pdev); #endif extern void pci_cleanup_rom(struct pci_dev *dev); #ifdef HAVE_PCI_MMAP +enum pci_mmap_api { + PCI_MMAP_SYSFS, /* mmap on /sys/bus/pci/devices/<BDF>/resource<N> */ + PCI_MMAP_PROCFS /* mmap on /proc/bus/pci/<BDF> */ +}; extern int pci_mmap_fits(struct pci_dev *pdev, int resno, - struct vm_area_struct *vma); + struct vm_area_struct *vmai, + enum pci_mmap_api mmap_api); #endif int pci_probe_reset_function(struct pci_dev *dev); diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 297b72c880a..27911b55c2a 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -10,7 +10,6 @@ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> #include <linux/capability.h> #include <asm/uaccess.h> #include <asm/byteorder.h> @@ -257,7 +256,7 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) /* Make sure the caller is mapping a real resource for this device */ for (i = 0; i < PCI_ROM_RESOURCE; i++) { - if (pci_mmap_fits(dev, i, vma)) + if (pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) break; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f5c63fe9db5..53a786fd0d4 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -2136,6 +2136,24 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, quirk_unhide_mch_dev6); +#ifdef CONFIG_TILE +/* + * The Tilera TILEmpower platform needs to set the link speed + * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed + * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe + * capability register of the PEX8624 PCIe switch. The switch + * supports link speed auto negotiation, but falsely sets + * the link speed to 5GT/s. + */ +static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev) +{ + if (tile_plx_gen1) { + pci_write_config_dword(dev, 0x98, 0x1); + mdelay(50); + } +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1); +#endif /* CONFIG_TILE */ #ifdef CONFIG_PCI_MSI /* Some chipsets do not support MSI. We cannot easily rely on setting @@ -2311,6 +2329,9 @@ static void __devinit nvbridge_check_legacy_irq_routing(struct pci_dev *dev) { u32 cfg; + if (!pci_find_capability(dev, PCI_CAP_ID_HT)) + return; + pci_read_config_dword(dev, 0x74, &cfg); if (cfg & ((1 << 2) | (1 << 15))) { @@ -2746,6 +2767,29 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_m DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ +#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) +#define VTUNCERRMSK_REG 0x1ac +#define VTD_MSK_SPEC_ERRORS (1 << 31) +/* + * This is a quirk for masking vt-d spec defined errors to platform error + * handling logic. With out this, platforms using Intel 7500, 5500 chipsets + * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based + * on the RAS config settings of the platform) when a vt-d fault happens. + * The resulting SMI caused the system to hang. + * + * VT-d spec related errors are already handled by the VT-d OS code, so no + * need to report the same error through other channels. + */ +static void vtd_mask_spec_errors(struct pci_dev *dev) +{ + u32 word; + + pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); + pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); +#endif static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index a87c4985326..3a5a6fcc0ea 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c @@ -13,7 +13,6 @@ #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/msi.h> -#include <xen/xenbus.h> #include <xen/interface/io/pciif.h> #include <asm/xen/pci.h> #include <linux/interrupt.h> @@ -576,8 +575,9 @@ static pci_ers_result_t pcifront_common_process(int cmd, pcidev = pci_get_bus_and_slot(bus, devfn); if (!pcidev || !pcidev->driver) { - dev_err(&pcidev->dev, - "device or driver is NULL\n"); + dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n"); + if (pcidev) + pci_dev_put(pcidev); return result; } pdrv = pcidev->driver; diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 8cbfa067171..96c72e90b79 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c @@ -725,17 +725,17 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, return 0; - err_out_free_res2: +err_out_free_res2: if (irq_mode == 1) free_irq(dev->irq, socket); else del_timer_sync(&socket->poll_timer); - err_out_free_res: +err_out_free_res: pci_release_regions(dev); - err_out_disable: +err_out_disable: pci_disable_device(dev); - err_out_free_mem: +err_out_free_mem: kfree(socket); return ret; } diff --git a/drivers/pcmcia/pd6729.h b/drivers/pcmcia/pd6729.h index 41418d394c5..c8e84bdece3 100644 --- a/drivers/pcmcia/pd6729.h +++ b/drivers/pcmcia/pd6729.h @@ -15,7 +15,7 @@ struct pd6729_socket { int number; int card_irq; - unsigned long io_base; /* base io address of the socket */ + unsigned long io_base; /* base io address of the socket */ struct pcmcia_socket socket; struct timer_list poll_timer; }; diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c index 0ea3b29440e..81af2b3bcc0 100644 --- a/drivers/pcmcia/pxa2xx_sharpsl.c +++ b/drivers/pcmcia/pxa2xx_sharpsl.c @@ -237,7 +237,7 @@ static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = { #ifdef CONFIG_SA1100_COLLIE #include "sa11xx_base.h" -int __init pcmcia_collie_init(struct device *dev) +int __devinit pcmcia_collie_init(struct device *dev) { int ret = -ENODEV; diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c index fd013a1ef47..f1e882272ab 100644 --- a/drivers/pcmcia/sa1100_assabet.c +++ b/drivers/pcmcia/sa1100_assabet.c @@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = { .socket_suspend = assabet_pcmcia_socket_suspend, }; -int pcmcia_assabet_init(struct device *dev) +int __devinit pcmcia_assabet_init(struct device *dev) { int ret = -ENODEV; diff --git a/drivers/pcmcia/sa1100_cerf.c b/drivers/pcmcia/sa1100_cerf.c index 9bf088b1727..30560df8c76 100644 --- a/drivers/pcmcia/sa1100_cerf.c +++ b/drivers/pcmcia/sa1100_cerf.c @@ -97,7 +97,7 @@ static struct pcmcia_low_level cerf_pcmcia_ops = { .socket_suspend = cerf_pcmcia_socket_suspend, }; -int __init pcmcia_cerf_init(struct device *dev) +int __devinit pcmcia_cerf_init(struct device *dev) { int ret = -ENODEV; diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c index 945857f8c28..6b228590b3f 100644 --- a/drivers/pcmcia/sa1100_generic.c +++ b/drivers/pcmcia/sa1100_generic.c @@ -64,7 +64,7 @@ static int (*sa11x0_pcmcia_hw_init[])(struct device *dev) = { #endif }; -static int sa11x0_drv_pcmcia_probe(struct platform_device *dev) +static int __devinit sa11x0_drv_pcmcia_probe(struct platform_device *dev) { int i, ret = -ENODEV; diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c index 56329ad575a..edf8f002889 100644 --- a/drivers/pcmcia/sa1100_h3600.c +++ b/drivers/pcmcia/sa1100_h3600.c @@ -219,7 +219,7 @@ struct pcmcia_low_level h3600_pcmcia_ops = { .socket_suspend = h3600_pcmcia_socket_suspend, }; -int __init pcmcia_h3600_init(struct device *dev) +int __devinit pcmcia_h3600_init(struct device *dev) { int ret = -ENODEV; diff --git a/drivers/pcmcia/sa1100_shannon.c b/drivers/pcmcia/sa1100_shannon.c index c4d51867a05..7ff1b43540b 100644 --- a/drivers/pcmcia/sa1100_shannon.c +++ b/drivers/pcmcia/sa1100_shannon.c @@ -113,7 +113,7 @@ static struct pcmcia_low_level shannon_pcmcia_ops = { .socket_suspend = shannon_pcmcia_socket_suspend, }; -int __init pcmcia_shannon_init(struct device *dev) +int __devinit pcmcia_shannon_init(struct device *dev) { int ret = -ENODEV; diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c index 05bd504e6f1..c998f7aaadb 100644 --- a/drivers/pcmcia/sa1100_simpad.c +++ b/drivers/pcmcia/sa1100_simpad.c @@ -123,7 +123,7 @@ static struct pcmcia_low_level simpad_pcmcia_ops = { .socket_suspend = simpad_pcmcia_socket_suspend, }; -int __init pcmcia_simpad_init(struct device *dev) +int __devinit pcmcia_simpad_init(struct device *dev) { int ret = -ENODEV; diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 689e3c02edb..2fe8cb8e95c 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -57,14 +57,20 @@ module_param(pc_debug, int, 0644); void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func, int lvl, const char *fmt, ...) { + struct va_format vaf; va_list args; if (pc_debug > lvl) { - printk(KERN_DEBUG "skt%u: %s: ", skt->nr, func); va_start(args, fmt); - vprintk(fmt, args); + + vaf.fmt = fmt; + vaf.va = &args; + + printk(KERN_DEBUG "skt%u: %s: %pV", skt->nr, func, &vaf); + va_end(args); } } +EXPORT_SYMBOL(soc_pcmcia_debug); #endif diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 60a5a5c6b50..d235f44fd7a 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -81,6 +81,8 @@ MODULE_PARM_DESC(wapf, "WAPF value"); static int wlan_status = 1; static int bluetooth_status = 1; +static int wimax_status = -1; +static int wwan_status = -1; module_param(wlan_status, int, 0444); MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " @@ -92,6 +94,16 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " "(0 = disabled, 1 = enabled, -1 = don't do anything). " "default is 1"); +module_param(wimax_status, int, 0444); +MODULE_PARM_DESC(wimax_status, "Set the wireless status on boot " + "(0 = disabled, 1 = enabled, -1 = don't do anything). " + "default is 1"); + +module_param(wwan_status, int, 0444); +MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot " + "(0 = disabled, 1 = enabled, -1 = don't do anything). " + "default is 1"); + /* * Some events we use, same for all Asus */ @@ -114,6 +126,8 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " */ #define WL_RSTS 0x01 /* internal Wifi */ #define BT_RSTS 0x02 /* internal Bluetooth */ +#define WM_RSTS 0x08 /* internal wimax */ +#define WW_RSTS 0x20 /* internal wwan */ /* LED */ #define METHOD_MLED "MLED" @@ -132,6 +146,11 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " */ #define METHOD_WLAN "WLED" #define METHOD_BLUETOOTH "BLED" + +/* WWAN and WIMAX */ +#define METHOD_WWAN "GSMC" +#define METHOD_WIMAX "WMXC" + #define METHOD_WL_STATUS "RSTS" /* Brightness */ @@ -883,6 +902,64 @@ static ssize_t store_bluetooth(struct device *dev, } /* + * Wimax + */ +static int asus_wimax_set(struct asus_laptop *asus, int status) +{ + if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) { + pr_warning("Error setting wimax status to %d", status); + return -EIO; + } + return 0; +} + +static ssize_t show_wimax(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS)); +} + +static ssize_t store_wimax(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX); +} + +/* + * Wwan + */ +static int asus_wwan_set(struct asus_laptop *asus, int status) +{ + if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) { + pr_warning("Error setting wwan status to %d", status); + return -EIO; + } + return 0; +} + +static ssize_t show_wwan(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS)); +} + +static ssize_t store_wwan(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sysfs_acpi_set(asus, buf, count, METHOD_WWAN); +} + +/* * Display */ static void asus_set_display(struct asus_laptop *asus, int value) @@ -1202,6 +1279,8 @@ static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL); static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan); static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth, store_bluetooth); +static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); +static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp); static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); @@ -1212,6 +1291,8 @@ static struct attribute *asus_attributes[] = { &dev_attr_infos.attr, &dev_attr_wlan.attr, &dev_attr_bluetooth.attr, + &dev_attr_wimax.attr, + &dev_attr_wwan.attr, &dev_attr_display.attr, &dev_attr_ledd.attr, &dev_attr_ls_level.attr, @@ -1239,6 +1320,13 @@ static mode_t asus_sysfs_is_visible(struct kobject *kobj, } else if (attr == &dev_attr_display.attr) { supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL); + } else if (attr == &dev_attr_wimax.attr) { + supported = + !acpi_check_handle(asus->handle, METHOD_WIMAX, NULL); + + } else if (attr == &dev_attr_wwan.attr) { + supported = !acpi_check_handle(asus->handle, METHOD_WWAN, NULL); + } else if (attr == &dev_attr_ledd.attr) { supported = !acpi_check_handle(handle, METHOD_LEDD, NULL); @@ -1397,7 +1485,8 @@ static int asus_laptop_get_info(struct asus_laptop *asus) /* * The HWRS method return informations about the hardware. - * 0x80 bit is for WLAN, 0x100 for Bluetooth. + * 0x80 bit is for WLAN, 0x100 for Bluetooth, + * 0x40 for WWAN, 0x10 for WIMAX. * The significance of others is yet to be found. */ status = @@ -1440,6 +1529,12 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus) if (wlan_status >= 0) asus_wlan_set(asus, !!wlan_status); + if (wimax_status >= 0) + asus_wimax_set(asus, !!wimax_status); + + if (wwan_status >= 0) + asus_wwan_set(asus, !!wwan_status); + /* Keyboard Backlight is on by default */ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL)) asus_kled_set(asus, 1); diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c index 462ceab93f8..0d50fbbe247 100644 --- a/drivers/platform/x86/eeepc-wmi.c +++ b/drivers/platform/x86/eeepc-wmi.c @@ -298,8 +298,8 @@ static void eeepc_wmi_notify(u32 value, void *context) kfree(obj); } -static int store_cpufv(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { int value; struct acpi_buffer input = { (acpi_size)sizeof(value), &value }; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 1dac659b5e0..9e05af9c41c 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -172,6 +172,8 @@ static int hp_wmi_perform_query(int query, int write, u32 *buffer, bios_return = *((struct bios_return *)obj->buffer.pointer); memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); + + kfree(obj); return 0; } diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 3c2c6b91ecb..94a114aa8e2 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -28,6 +28,7 @@ #include <linux/io.h> #include <linux/sysdev.h> #include <linux/dmi.h> +#include <linux/efi.h> #include <linux/mutex.h> #include <asm/bios_ebda.h> @@ -220,32 +221,13 @@ static void rtl_teardown_sysfs(void) { sysdev_class_unregister(&class_rtl); } -static int dmi_check_cb(const struct dmi_system_id *id) -{ - RTL_DEBUG("found IBM server '%s'\n", id->ident); - return 0; -} - -#define ibm_dmi_entry(NAME, TYPE) \ -{ \ - .ident = NAME, \ - .matches = { \ - DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ - DMI_MATCH(DMI_PRODUCT_NAME, TYPE), \ - }, \ - .callback = dmi_check_cb \ -} static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { - ibm_dmi_entry("BladeCenter LS21", "7971"), - ibm_dmi_entry("BladeCenter LS22", "7901"), - ibm_dmi_entry("BladeCenter HS21 XM", "7995"), - ibm_dmi_entry("BladeCenter HS22", "7870"), - ibm_dmi_entry("BladeCenter HS22V", "7871"), - ibm_dmi_entry("System x3550 M2", "7946"), - ibm_dmi_entry("System x3650 M2", "7947"), - ibm_dmi_entry("System x3550 M3", "7944"), - ibm_dmi_entry("System x3650 M3", "7945"), + { \ + .matches = { \ + DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \ + }, \ + }, { } }; @@ -257,7 +239,7 @@ static int __init ibm_rtl_init(void) { if (force) pr_warning("ibm-rtl: module loaded by force\n"); /* first ensure that we are running on IBM HW */ - else if (!dmi_check_system(ibm_rtl_dmi_table)) + else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table)) return -ENODEV; /* Get the address for the Extended BIOS Data Area */ @@ -302,7 +284,7 @@ static int __init ibm_rtl_init(void) { RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n", rtl_cmd_width, rtl_cmd_type); addr = ioread32(&rtl_table->cmd_port_address); - RTL_DEBUG("addr = %#llx\n", addr); + RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr); plen = rtl_cmd_width/sizeof(char); rtl_cmd_addr = rtl_port_map(addr, plen); RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr); diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c index 42a5469a245..35278ad7e62 100644 --- a/drivers/platform/x86/msi-wmi.c +++ b/drivers/platform/x86/msi-wmi.c @@ -43,16 +43,18 @@ MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"); #define dprintk(msg...) pr_debug(DRV_PFX msg) -#define KEYCODE_BASE 0xD0 -#define MSI_WMI_BRIGHTNESSUP KEYCODE_BASE -#define MSI_WMI_BRIGHTNESSDOWN (KEYCODE_BASE + 1) -#define MSI_WMI_VOLUMEUP (KEYCODE_BASE + 2) -#define MSI_WMI_VOLUMEDOWN (KEYCODE_BASE + 3) +#define SCANCODE_BASE 0xD0 +#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE +#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1) +#define MSI_WMI_VOLUMEUP (SCANCODE_BASE + 2) +#define MSI_WMI_VOLUMEDOWN (SCANCODE_BASE + 3) +#define MSI_WMI_MUTE (SCANCODE_BASE + 4) static struct key_entry msi_wmi_keymap[] = { { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} }, { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} }, { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} }, { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} }, + { KE_KEY, MSI_WMI_MUTE, {KEY_MUTE} }, { KE_END, 0} }; static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1]; @@ -169,7 +171,7 @@ static void msi_wmi_notify(u32 value, void *context) ktime_t diff; cur = ktime_get_real(); diff = ktime_sub(cur, last_pressed[key->code - - KEYCODE_BASE]); + SCANCODE_BASE]); /* Ignore event if the same event happened in a 50 ms timeframe -> Key press may result in 10-20 GPEs */ if (ktime_to_us(diff) < 1000 * 50) { @@ -178,7 +180,7 @@ static void msi_wmi_notify(u32 value, void *context) key->code, ktime_to_us(diff)); return; } - last_pressed[key->code - KEYCODE_BASE] = cur; + last_pressed[key->code - SCANCODE_BASE] = cur; if (key->type == KE_KEY && /* Brightness is served via acpi video driver */ diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 2d61186ad5a..e8c21994b36 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8497,7 +8497,6 @@ static void ibm_exit(struct ibm_struct *ibm) ibm->acpi->type, dispatch_acpi_notify); ibm->flags.acpi_notify_installed = 0; - ibm->flags.acpi_notify_installed = 0; } if (ibm->flags.proc_created) { diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 06f304f46e0..4276da7291b 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -135,6 +135,7 @@ static const struct key_entry toshiba_acpi_keymap[] __initconst = { { KE_KEY, 0x141, { KEY_BRIGHTNESSUP } }, { KE_KEY, 0x142, { KEY_WLAN } }, { KE_KEY, 0x143, { KEY_PROG1 } }, + { KE_KEY, 0x17f, { KEY_FN } }, { KE_KEY, 0xb05, { KEY_PROG2 } }, { KE_KEY, 0xb06, { KEY_WWW } }, { KE_KEY, 0xb07, { KEY_MAIL } }, diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 104b77c87ef..aecd9a9b549 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -755,7 +755,7 @@ static bool guid_already_parsed(const char *guid_string) struct wmi_block *wblock; list_for_each_entry(wblock, &wmi_block_list, list) - if (strncmp(wblock->gblock.guid, guid_string, 16) == 0) + if (memcmp(wblock->gblock.guid, guid_string, 16) == 0) return true; return false; diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c index e73ebefdf3e..315b3112aca 100644 --- a/drivers/pnp/isapnp/proc.c +++ b/drivers/pnp/isapnp/proc.c @@ -21,7 +21,6 @@ #include <linux/isapnp.h> #include <linux/proc_fs.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> extern struct pnp_protocol isapnp_protocol; diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 2d73dfcecdb..57313f4658b 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -180,7 +180,7 @@ struct pnp_protocol pnpacpi_protocol = { }; EXPORT_SYMBOL(pnpacpi_protocol); -static char *pnpacpi_get_id(struct acpi_device *device) +static char *__init pnpacpi_get_id(struct acpi_device *device) { struct acpi_hardware_id *id; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c index 68cf0c99138..7b5080c4556 100644 --- a/drivers/rapidio/rio.c +++ b/drivers/rapidio/rio.c @@ -1159,11 +1159,11 @@ int __devinit rio_init_mports(void) list_for_each_entry(port, &rio_mports, node) { if (!request_mem_region(port->iores.start, - port->iores.end - port->iores.start, + resource_size(&port->iores), port->name)) { printk(KERN_ERR "RIO: Error requesting master port region 0x%016llx-0x%016llx\n", - (u64)port->iores.start, (u64)port->iores.end - 1); + (u64)port->iores.start, (u64)port->iores.end); rc = -ENOMEM; goto out; } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index f1d10c974cd..ba521f0f0fa 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -911,7 +911,7 @@ out: } /** - * set_consumer_device_supply: Bind a regulator to a symbolic supply + * set_consumer_device_supply - Bind a regulator to a symbolic supply * @rdev: regulator source * @consumer_dev: device the supply applies to * @consumer_dev_name: dev_name() string for device supply applies to @@ -1052,7 +1052,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev, printk(KERN_WARNING "%s: could not add device link %s err %d\n", __func__, dev->kobj.name, err); - device_remove_file(dev, ®ulator->dev_attr); goto link_name_err; } } @@ -1268,13 +1267,17 @@ static int _regulator_enable(struct regulator_dev *rdev) { int ret, delay; - /* do we need to enable the supply regulator first */ - if (rdev->supply) { - ret = _regulator_enable(rdev->supply); - if (ret < 0) { - printk(KERN_ERR "%s: failed to enable %s: %d\n", - __func__, rdev_get_name(rdev), ret); - return ret; + if (rdev->use_count == 0) { + /* do we need to enable the supply regulator first */ + if (rdev->supply) { + mutex_lock(&rdev->supply->mutex); + ret = _regulator_enable(rdev->supply); + mutex_unlock(&rdev->supply->mutex); + if (ret < 0) { + printk(KERN_ERR "%s: failed to enable %s: %d\n", + __func__, rdev_get_name(rdev), ret); + return ret; + } } } @@ -1313,10 +1316,12 @@ static int _regulator_enable(struct regulator_dev *rdev) if (ret < 0) return ret; - if (delay >= 1000) + if (delay >= 1000) { mdelay(delay / 1000); - else if (delay) + udelay(delay % 1000); + } else if (delay) { udelay(delay); + } } else if (ret < 0) { printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", @@ -1359,6 +1364,7 @@ static int _regulator_disable(struct regulator_dev *rdev, struct regulator_dev **supply_rdev_ptr) { int ret = 0; + *supply_rdev_ptr = NULL; if (WARN(rdev->use_count <= 0, "unbalanced disables for %s\n", @@ -2346,6 +2352,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, if (init_data->supply_regulator && init_data->supply_regulator_dev) { dev_err(dev, "Supply regulator specified by both name and dev\n"); + ret = -EINVAL; goto scrub; } @@ -2364,6 +2371,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, if (!found) { dev_err(dev, "Failed to find supply %s\n", init_data->supply_regulator); + ret = -ENODEV; goto scrub; } diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index 4597d508a22..ecd99f59dba 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c @@ -465,8 +465,8 @@ static struct regulator_ops mc13783_fixed_regulator_ops = { .get_voltage = mc13783_fixed_regulator_get_voltage, }; -int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, - u32 val) +static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, + u32 val) { struct mc13783 *mc13783 = priv->mc13783; int ret; diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c index 51237fbb1bb..6d20b0454a1 100644 --- a/drivers/regulator/tps6586x-regulator.c +++ b/drivers/regulator/tps6586x-regulator.c @@ -231,8 +231,7 @@ static int tps6586x_dvm_voltages[] = { }; #define TPS6586X_REGULATOR(_id, vdata, _ops, vreg, shift, nbits, \ - ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ -{ \ + ereg0, ebit0, ereg1, ebit1) \ .desc = { \ .name = "REG-" #_id, \ .ops = &tps6586x_regulator_##_ops, \ @@ -248,18 +247,26 @@ static int tps6586x_dvm_voltages[] = { .enable_bit[0] = (ebit0), \ .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \ .enable_bit[1] = (ebit1), \ - .voltages = tps6586x_##vdata##_voltages, \ -} + .voltages = tps6586x_##vdata##_voltages, + +#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ + .go_reg = TPS6586X_##goreg, \ + .go_bit = (gobit), #define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1) \ +{ \ TPS6586X_REGULATOR(_id, vdata, ldo_ops, vreg, shift, nbits, \ - ereg0, ebit0, ereg1, ebit1, 0, 0) + ereg0, ebit0, ereg1, ebit1) \ +} #define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ +{ \ TPS6586X_REGULATOR(_id, vdata, dvm_ops, vreg, shift, nbits, \ - ereg0, ebit0, ereg1, ebit1, goreg, gobit) + ereg0, ebit0, ereg1, ebit1) \ + TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ +} static struct tps6586x_regulator tps6586x_regulator[] = { TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0), @@ -267,11 +274,11 @@ static struct tps6586x_regulator tps6586x_regulator[] = { TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4), TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5), - TPS6586X_LDO(LDO_8, ldo, SUPPLYV1, 5, 3, ENC, 6, END, 6), + TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6), TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), - TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, ENE, 7, ENE, 7), + TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), - TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 1, END, 1), + TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6), TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6), @@ -290,6 +297,10 @@ static inline int tps6586x_regulator_preinit(struct device *parent, uint8_t val1, val2; int ret; + if (ri->enable_reg[0] == ri->enable_reg[1] && + ri->enable_bit[0] == ri->enable_bit[1]) + return 0; + ret = tps6586x_read(parent, ri->enable_reg[0], &val1); if (ret) return ret; @@ -298,14 +309,14 @@ static inline int tps6586x_regulator_preinit(struct device *parent, if (ret) return ret; - if (!(val2 & ri->enable_bit[1])) + if (!(val2 & (1 << ri->enable_bit[1]))) return 0; /* * The regulator is on, but it's enabled with the bit we don't * want to use, so we switch the enable bits */ - if (!(val1 & ri->enable_bit[0])) { + if (!(val1 & (1 << ri->enable_bit[0]))) { ret = tps6586x_set_bits(parent, ri->enable_reg[0], 1 << ri->enable_bit[0]); if (ret) diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 7e5892efc43..a57262a4fa6 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -219,12 +219,12 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) return -EACCES; status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, - message >> 8, 0x15 /* PB_WORD_MSB */ ); - if (status >= 0) + message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB); + if (status < 0) return status; return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, - message, 0x16 /* PB_WORD_LSB */ ); + message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB); } /*----------------------------------------------------------------------*/ diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c index 359d1e04626..f0d63892264 100644 --- a/drivers/rtc/rtc-ds1302.c +++ b/drivers/rtc/rtc-ds1302.c @@ -35,7 +35,7 @@ #ifdef CONFIG_SH_SECUREEDGE5410 #include <asm/rtc.h> -#include <mach/snapgear.h> +#include <mach/secureedge5410.h> #define RTC_RESET 0x1000 #define RTC_IODATA 0x0800 diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index 5efbd5990ff..06e41ed9323 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -761,7 +761,7 @@ err_unmap: clk_put(rtc->clk); iounmap(rtc->regbase); err_badmap: - release_resource(rtc->res); + release_mem_region(rtc->res->start, rtc->regsize); err_badres: kfree(rtc); @@ -786,7 +786,7 @@ static int __exit sh_rtc_remove(struct platform_device *pdev) } iounmap(rtc->regbase); - release_resource(rtc->res); + release_mem_region(rtc->res->start, rtc->regsize); clk_disable(rtc->clk); clk_put(rtc->clk); diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index c71d89dba30..83b4615a3b6 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -17,7 +17,6 @@ #include <linux/device.h> #include <linux/poll.h> #include <linux/mutex.h> -#include <linux/smp_lock.h> #include <linux/err.h> #include <linux/slab.h> diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index eb28fb01a38..f6489eb7e97 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c @@ -14,7 +14,6 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/types.h> -#include <linux/smp_lock.h> #include <asm/compat.h> #include <asm/ccwdev.h> diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 883e2db02bd..e090a307fde 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -17,7 +17,6 @@ #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/mtio.h> -#include <linux/smp_lock.h> #include <linux/compat.h> #include <asm/uaccess.h> diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 6c408670e08..b3a3e8e8656 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c @@ -209,29 +209,79 @@ tape_state_set(struct tape_device *device, enum tape_state newstate) wake_up(&device->state_change_wq); } +struct tape_med_state_work_data { + struct tape_device *device; + enum tape_medium_state state; + struct work_struct work; +}; + +static void +tape_med_state_work_handler(struct work_struct *work) +{ + static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; + static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; + struct tape_med_state_work_data *p = + container_of(work, struct tape_med_state_work_data, work); + struct tape_device *device = p->device; + char *envp[] = { NULL, NULL }; + + switch (p->state) { + case MS_UNLOADED: + pr_info("%s: The tape cartridge has been successfully " + "unloaded\n", dev_name(&device->cdev->dev)); + envp[0] = env_state_unloaded; + kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); + break; + case MS_LOADED: + pr_info("%s: A tape cartridge has been mounted\n", + dev_name(&device->cdev->dev)); + envp[0] = env_state_loaded; + kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); + break; + default: + break; + } + tape_put_device(device); + kfree(p); +} + +static void +tape_med_state_work(struct tape_device *device, enum tape_medium_state state) +{ + struct tape_med_state_work_data *p; + + p = kzalloc(sizeof(*p), GFP_ATOMIC); + if (p) { + INIT_WORK(&p->work, tape_med_state_work_handler); + p->device = tape_get_device(device); + p->state = state; + schedule_work(&p->work); + } +} + void tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) { - if (device->medium_state == newstate) + enum tape_medium_state oldstate; + + oldstate = device->medium_state; + if (oldstate == newstate) return; + device->medium_state = newstate; switch(newstate){ case MS_UNLOADED: device->tape_generic_status |= GMT_DR_OPEN(~0); - if (device->medium_state == MS_LOADED) - pr_info("%s: The tape cartridge has been successfully " - "unloaded\n", dev_name(&device->cdev->dev)); + if (oldstate == MS_LOADED) + tape_med_state_work(device, MS_UNLOADED); break; case MS_LOADED: device->tape_generic_status &= ~GMT_DR_OPEN(~0); - if (device->medium_state == MS_UNLOADED) - pr_info("%s: A tape cartridge has been mounted\n", - dev_name(&device->cdev->dev)); + if (oldstate == MS_UNLOADED) + tape_med_state_work(device, MS_LOADED); break; default: - // print nothing break; } - device->medium_state = newstate; wake_up(&device->state_change_wq); } diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 9f661426e4a..c837d7419a6 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -30,7 +30,6 @@ #include <linux/kmod.h> #include <linux/cdev.h> #include <linux/device.h> -#include <linux/smp_lock.h> #include <linux/string.h> MODULE_AUTHOR @@ -249,27 +248,25 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, char cp_command[80]; char cp_response[160]; char *onoff, *qid_string; + int rc; - memset(cp_command, 0x00, sizeof(cp_command)); - memset(cp_response, 0x00, sizeof(cp_response)); - - onoff = ((action == 1) ? "ON" : "OFF"); + onoff = ((action == 1) ? "ON" : "OFF"); qid_string = ((recording_class_AB == 1) ? " QID * " : ""); - /* + /* * The recording commands needs to be called with option QID * for guests that have previlege classes A or B. * Purging has to be done as separate step, because recording * can't be switched on as long as records are on the queue. * Doing both at the same time doesn't work. */ - - if (purge) { + if (purge && (action == 1)) { + memset(cp_command, 0x00, sizeof(cp_command)); + memset(cp_response, 0x00, sizeof(cp_response)); snprintf(cp_command, sizeof(cp_command), "RECORDING %s PURGE %s", logptr->recording_name, qid_string); - cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); } @@ -279,19 +276,33 @@ static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, logptr->recording_name, onoff, qid_string); - cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); /* The recording command will usually answer with 'Command complete' * on success, but when the specific service was never connected * before then there might be an additional informational message * 'HCPCRC8072I Recording entry not found' before the - * 'Command complete'. So I use strstr rather then the strncmp. + * 'Command complete'. So I use strstr rather then the strncmp. */ if (strstr(cp_response,"Command complete")) - return 0; + rc = 0; else - return -EIO; + rc = -EIO; + /* + * If we turn recording off, we have to purge any remaining records + * afterwards, as a large number of queued records may impact z/VM + * performance. + */ + if (purge && (action == 0)) { + memset(cp_command, 0x00, sizeof(cp_command)); + memset(cp_response, 0x00, sizeof(cp_response)); + snprintf(cp_command, sizeof(cp_command), + "RECORDING %s PURGE %s", + logptr->recording_name, + qid_string); + cpcmd(cp_command, cp_response, sizeof(cp_response), NULL); + } + return rc; } diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 1de672f2103..f7e4ae6bf15 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -13,7 +13,6 @@ #include <linux/cdev.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/cio.h> diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a5050e21715..825951b6b83 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -635,7 +635,7 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) init_subchannel_id(&mchk_schid); mchk_schid.sch_no = crw0->rsid; if (crw1) - mchk_schid.ssid = (crw1->rsid >> 8) & 3; + mchk_schid.ssid = (crw1->rsid >> 4) & 3; /* * Since we are always presented with IPI in the CRW, we have to diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 2ff8a22d425..e8391b89eff 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c @@ -1455,7 +1455,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) break; case IO_SCH_UNREG_ATTACH: case IO_SCH_UNREG: - if (cdev) + if (!cdev) + break; + if (cdev->private->state == DEV_STATE_SENSE_ID) { + /* + * Note: delayed work triggered by this event + * and repeated calls to sch_event are synchronized + * by the above check for work_pending(cdev). + */ + dev_fsm_event(cdev, DEV_EVENT_NOTOPER); + } else ccw_device_set_notoper(cdev); break; case IO_SCH_NOP: diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 752dbee06af..5d9c66627b6 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -292,8 +292,8 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) return; /* reset adapter interrupt indicators */ - put_indicator(irq_ptr->dsci); set_subchannel_ind(irq_ptr, 1); + put_indicator(irq_ptr->dsci); } void __exit tiqdio_unregister_thinints(void) diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index f5221749d18..7fca9c10ffc 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -35,7 +35,6 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/compat.h> -#include <linux/smp_lock.h> #include <linux/slab.h> #include <asm/atomic.h> #include <asm/uaccess.h> diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 6be43eb126b..f47a714538d 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -440,7 +440,6 @@ struct qeth_qdio_out_q { * index of buffer to be filled by driver; state EMPTY or PACKING */ int next_buf_to_fill; - int sync_iqdio_error; /* * number of buffers that are currently filled (PRIMED) * -> these buffers are hardware-owned @@ -695,14 +694,6 @@ struct qeth_mc_mac { int is_vmac; }; -struct qeth_skb_data { - __u32 magic; - int count; -}; - -#define QETH_SKB_MAGIC 0x71657468 -#define QETH_SIGA_CC2_RETRIES 3 - struct qeth_rx { int b_count; int b_index; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 76426706260..e6b2df0e73f 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -877,8 +877,8 @@ out: return; } -static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb) +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf) { int i; struct sk_buff *skb; @@ -887,13 +887,11 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, if (buf->buffer->element[0].flags & 0x40) atomic_dec(&queue->set_pci_flags_count); - if (!qeth_skip_skb) { + skb = skb_dequeue(&buf->skb_list); + while (skb) { + atomic_dec(&skb->users); + dev_kfree_skb_any(skb); skb = skb_dequeue(&buf->skb_list); - while (skb) { - atomic_dec(&skb->users); - dev_kfree_skb_any(skb); - skb = skb_dequeue(&buf->skb_list); - } } for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { if (buf->buffer->element[i].addr && buf->is_header[i]) @@ -909,12 +907,6 @@ static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); } -static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf) -{ - __qeth_clear_output_buffer(queue, buf, 0); -} - void qeth_clear_qdio_buffers(struct qeth_card *card) { int i, j; @@ -2833,7 +2825,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, } } - queue->sync_iqdio_error = 0; queue->card->dev->trans_start = jiffies; if (queue->card->options.performance_stats) { queue->card->perf_stats.outbound_do_qdio_cnt++; @@ -2849,10 +2840,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - queue->card->perf_stats.outbound_do_qdio_start_time; - if (rc > 0) { - if (!(rc & QDIO_ERROR_SIGA_BUSY)) - queue->sync_iqdio_error = rc & 3; - } if (rc) { queue->card->stats.tx_errors += count; /* ignore temporary SIGA errors without busy condition */ @@ -2916,7 +2903,7 @@ void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, { struct qeth_card *card = (struct qeth_card *)card_ptr; - if (card->dev) + if (card->dev && (card->dev->flags & IFF_UP)) napi_schedule(&card->napi); } EXPORT_SYMBOL_GPL(qeth_qdio_start_poll); @@ -2940,7 +2927,6 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; struct qeth_qdio_out_buffer *buffer; int i; - unsigned qeth_send_err; QETH_CARD_TEXT(card, 6, "qdouhdl"); if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) { @@ -2956,9 +2942,8 @@ void qeth_qdio_output_handler(struct ccw_device *ccwdev, } for (i = first_element; i < (first_element + count); ++i) { buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; - qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error); - __qeth_clear_output_buffer(queue, buffer, - (qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0); + qeth_handle_send_error(card, buffer, qdio_error); + qeth_clear_output_buffer(queue, buffer); } atomic_sub(count, &queue->used_buffers); /* check if we need to do something on this outbound queue */ @@ -3183,10 +3168,7 @@ int qeth_do_send_packet_fast(struct qeth_card *card, int offset, int hd_len) { struct qeth_qdio_out_buffer *buffer; - struct sk_buff *skb1; - struct qeth_skb_data *retry_ctrl; int index; - int rc; /* spin until we get the queue ... */ while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, @@ -3205,25 +3187,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card, atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_flush_buffers(queue, index, 1); - if (queue->sync_iqdio_error == 2) { - skb1 = skb_dequeue(&buffer->skb_list); - while (skb1) { - atomic_dec(&skb1->users); - skb1 = skb_dequeue(&buffer->skb_list); - } - retry_ctrl = (struct qeth_skb_data *) &skb->cb[16]; - if (retry_ctrl->magic != QETH_SKB_MAGIC) { - retry_ctrl->magic = QETH_SKB_MAGIC; - retry_ctrl->count = 0; - } - if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) { - retry_ctrl->count++; - rc = dev_queue_xmit(skb); - } else { - dev_kfree_skb_any(skb); - QETH_CARD_TEXT(card, 2, "qrdrop"); - } - } return 0; out: atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index d37c7331f24..0bcd5806bd9 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -156,6 +156,8 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || a_status & ZFCP_STATUS_COMMON_ERP_FAILED) return 0; + if (p_status & ZFCP_STATUS_COMMON_NOESC) + return need; if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; /* fall through */ @@ -188,6 +190,9 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &zfcp_sdev->status); erp_action = &zfcp_sdev->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + erp_action->port = port; + erp_action->sdev = sdev; if (!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; @@ -200,6 +205,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, zfcp_erp_action_dismiss_port(port); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); + erp_action->port = port; if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; @@ -209,6 +216,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, zfcp_erp_action_dismiss_adapter(adapter); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); erp_action = &adapter->erp_action; + memset(erp_action, 0, sizeof(struct zfcp_erp_action)); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; @@ -218,10 +226,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, return NULL; } - memset(erp_action, 0, sizeof(struct zfcp_erp_action)); erp_action->adapter = adapter; - erp_action->port = port; - erp_action->sdev = sdev; erp_action->action = need; erp_action->status = act_status; diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h index 938d5036016..b464ae01086 100644 --- a/drivers/s390/scsi/zfcp_fc.h +++ b/drivers/s390/scsi/zfcp_fc.h @@ -270,7 +270,7 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp, if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) { sense = (char *) &fcp_rsp[1]; if (rsp_flags & FCP_RSP_LEN_VAL) - sense += fcp_rsp->ext.fr_sns_len; + sense += fcp_rsp->ext.fr_rsp_len; sense_len = min(fcp_rsp->ext.fr_sns_len, (u32) SCSI_SENSE_BUFFERSIZE); memcpy(scsi->sense_buffer, sense, sense_len); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index beaf0916cea..2eb7dd56ab8 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -532,9 +532,6 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; adapter->hydra_version = 0; - atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, - &adapter->status); - zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); break; @@ -854,7 +851,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd) zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); - req->data = zfcp_sdev; + req->data = sdev; req->handler = zfcp_fsf_abort_fcp_command_handler; req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; req->qtcb->header.port_handle = zfcp_sdev->port->handle; @@ -2072,8 +2069,6 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) struct fcp_resp_with_ext *fcp_rsp; unsigned long flags; - zfcp_fsf_fcp_handler_common(req); - read_lock_irqsave(&req->adapter->abort_lock, flags); scpnt = req->data; @@ -2082,6 +2077,8 @@ static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) return; } + zfcp_fsf_fcp_handler_common(req); + if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED); goto skip_fsfstatus; @@ -2173,12 +2170,13 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_qdio *qdio = adapter->qdio; struct fsf_qtcb_bottom_io *io; + unsigned long flags; if (unlikely(!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_UNBLOCKED))) return -EBUSY; - spin_lock(&qdio->req_q_lock); + spin_lock_irqsave(&qdio->req_q_lock, flags); if (atomic_read(&qdio->req_q_free) <= 0) { atomic_inc(&qdio->req_q_full); goto out; @@ -2242,7 +2240,7 @@ failed_scsi_cmnd: zfcp_fsf_req_free(req); scsi_cmnd->host_scribble = NULL; out: - spin_unlock(&qdio->req_q_lock); + spin_unlock_irqrestore(&qdio->req_q_lock, flags); return retval; } diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 50286d8707f..63529ed801e 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -76,8 +76,8 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result) scpnt->scsi_done(scpnt); } -static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, - void (*done) (struct scsi_cmnd *)) +static +int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; @@ -87,7 +87,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt, /* reset the status for this request */ scpnt->result = 0; scpnt->host_scribble = NULL; - scpnt->scsi_done = done; scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c index 1119c535a66..20796ebc33c 100644 --- a/drivers/s390/scsi/zfcp_unit.c +++ b/drivers/s390/scsi/zfcp_unit.c @@ -142,6 +142,8 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) return -ENOMEM; } + get_device(&port->dev); + if (device_register(&unit->dev)) { put_device(&unit->dev); return -ENOMEM; @@ -152,8 +154,6 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun) return -EINVAL; } - get_device(&port->dev); - write_lock_irq(&port->unit_list_lock); list_add_tail(&unit->list, &port->unit_list); write_unlock_irq(&port->unit_list_lock); diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index fcf08b3f52c..b7bd5b0cc7a 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c @@ -1765,7 +1765,7 @@ out: } /* End twa_scsi_eh_reset() */ /* This is the main scsi queue function to handle scsi opcodes */ -static int twa_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { int request_id, retval; TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; @@ -1812,6 +1812,8 @@ out: return retval; } /* End twa_scsi_queue() */ +static DEF_SCSI_QCMD(twa_scsi_queue) + /* This function hands scsi cdb's to the firmware */ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg) { diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c index 6a95d111d20..13e39e1fdfe 100644 --- a/drivers/scsi/3w-sas.c +++ b/drivers/scsi/3w-sas.c @@ -1501,7 +1501,7 @@ out: } /* End twl_scsi_eh_reset() */ /* This is the main scsi queue function to handle scsi opcodes */ -static int twl_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { int request_id, retval; TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata; @@ -1536,6 +1536,8 @@ out: return retval; } /* End twl_scsi_queue() */ +static DEF_SCSI_QCMD(twl_scsi_queue) + /* This function tells the controller to shut down */ static void __twl_shutdown(TW_Device_Extension *tw_dev) { diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index b1125341f4c..7fe96ff60c5 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1947,7 +1947,7 @@ static int tw_scsiop_test_unit_ready_complete(TW_Device_Extension *tw_dev, int r } /* End tw_scsiop_test_unit_ready_complete() */ /* This is the main scsi queue function to handle scsi opcodes */ -static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { unsigned char *command = SCpnt->cmnd; int request_id = 0; @@ -2023,6 +2023,8 @@ static int tw_scsi_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd return retval; } /* End tw_scsi_queue() */ +static DEF_SCSI_QCMD(tw_scsi_queue) + /* This function is the interrupt service routine */ static irqreturn_t tw_interrupt(int irq, void *dev_instance) { diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 89fc1c8af86..f672491774e 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -167,7 +167,7 @@ MODULE_LICENSE("GPL"); #include "53c700_d.h" -STATIC int NCR_700_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); +STATIC int NCR_700_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *); STATIC int NCR_700_abort(struct scsi_cmnd * SCpnt); STATIC int NCR_700_bus_reset(struct scsi_cmnd * SCpnt); STATIC int NCR_700_host_reset(struct scsi_cmnd * SCpnt); @@ -1749,8 +1749,8 @@ NCR_700_intr(int irq, void *dev_id) return IRQ_RETVAL(handled); } -STATIC int -NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) +static int +NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) { struct NCR_700_Host_Parameters *hostdata = (struct NCR_700_Host_Parameters *)SCp->device->host->hostdata[0]; @@ -1904,6 +1904,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)) return 0; } +STATIC DEF_SCSI_QCMD(NCR_700_queuecommand) + STATIC int NCR_700_abort(struct scsi_cmnd * SCp) { diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index fc0b4b81d55..f66c33b9ab4 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -2807,7 +2807,7 @@ static int BusLogic_host_reset(struct scsi_cmnd * SCpnt) Outgoing Mailbox for execution by the associated Host Adapter. */ -static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *)) +static int BusLogic_QueueCommand_lck(struct scsi_cmnd *Command, void (*CompletionRoutine) (struct scsi_cmnd *)) { struct BusLogic_HostAdapter *HostAdapter = (struct BusLogic_HostAdapter *) Command->device->host->hostdata; struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[Command->device->id]; @@ -2994,6 +2994,7 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou return 0; } +static DEF_SCSI_QCMD(BusLogic_QueueCommand) #if 0 /* diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h index 73f237a1ed9..649fcb31f26 100644 --- a/drivers/scsi/BusLogic.h +++ b/drivers/scsi/BusLogic.h @@ -1319,7 +1319,7 @@ static inline void BusLogic_IncrementSizeBucket(BusLogic_CommandSizeBuckets_T Co */ static const char *BusLogic_DriverInfo(struct Scsi_Host *); -static int BusLogic_QueueCommand(struct scsi_cmnd *, void (*CompletionRoutine) (struct scsi_cmnd *)); +static int BusLogic_QueueCommand(struct Scsi_Host *h, struct scsi_cmnd *); static int BusLogic_BIOSDiskParameters(struct scsi_device *, struct block_device *, sector_t, int *); static int BusLogic_ProcDirectoryInfo(struct Scsi_Host *, char *, char **, off_t, int, int); static int BusLogic_SlaveConfigure(struct scsi_device *); diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 5d2f148889a..9a5629f94f9 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -952,7 +952,7 @@ static void NCR5380_exit(struct Scsi_Host *instance) * Locks: host lock taken by caller */ -static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) +static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) { struct Scsi_Host *instance = cmd->device->host; struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; @@ -1021,6 +1021,7 @@ static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(NCR5380_queue_command) /** * NCR5380_main - NCR state machines diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h index bdc468c9e1d..fd40a32b1f6 100644 --- a/drivers/scsi/NCR5380.h +++ b/drivers/scsi/NCR5380.h @@ -313,7 +313,7 @@ static void NCR5380_print(struct Scsi_Host *instance); #endif static int NCR5380_abort(Scsi_Cmnd * cmd); static int NCR5380_bus_reset(Scsi_Cmnd * cmd); -static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)); +static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout); diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c index 6961f78742a..c91888a0a23 100644 --- a/drivers/scsi/NCR53c406a.c +++ b/drivers/scsi/NCR53c406a.c @@ -693,7 +693,7 @@ static void wait_intr(void) } #endif -static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) +static int NCR53c406a_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) { int i; @@ -726,6 +726,8 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(NCR53c406a_queue) + static int NCR53c406a_host_reset(Scsi_Cmnd * SCpnt) { DEB(printk("NCR53c406a_reset called\n")); diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c index dbbc601948e..dc5ac6e528c 100644 --- a/drivers/scsi/a100u2w.c +++ b/drivers/scsi/a100u2w.c @@ -911,7 +911,7 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc * queue the command down to the controller */ -static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) +static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) { struct orc_scb *scb; struct orc_host *host; /* Point to Host adapter control block */ @@ -930,6 +930,8 @@ static int inia100_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd return 0; } +static DEF_SCSI_QCMD(inia100_queue) + /***************************************************************************** Function name : inia100_abort Description : Abort a queued command. diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 29c0ed1cf50..2c93d9496d6 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -248,7 +248,7 @@ static struct aac_driver_ident aac_drivers[] = { * TODO: unify with aac_scsi_cmd(). */ -static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int aac_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct aac_dev *dev = (struct aac_dev *)host->hostdata; @@ -267,6 +267,8 @@ static int aac_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd return (aac_scsi_cmd(cmd) ? FAILED : 0); } +static DEF_SCSI_QCMD(aac_queuecommand) + /** * aac_info - Returns the host adapter name * @shost: Scsi host to report on diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c index 0ec3da6f3e1..081c6de92bc 100644 --- a/drivers/scsi/advansys.c +++ b/drivers/scsi/advansys.c @@ -9500,7 +9500,7 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp) * in the 'scp' result field. */ static int -advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) +advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *shost = scp->device->host; int asc_res, result = 0; @@ -9525,6 +9525,8 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) return result; } +static DEF_SCSI_QCMD(advansys_queuecommand) + static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base) { PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) | diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c index 8eab8587ff2..c5169f01c1c 100644 --- a/drivers/scsi/aha152x.c +++ b/drivers/scsi/aha152x.c @@ -1056,7 +1056,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete, * queue a command * */ -static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) +static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) { #if 0 if(*SCpnt->cmnd == REQUEST_SENSE) { @@ -1070,6 +1070,8 @@ static int aha152x_queue(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) return aha152x_internal_queue(SCpnt, NULL, 0, done); } +static DEF_SCSI_QCMD(aha152x_queue) + /* * diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 4f785f254c1..195823a51aa 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c @@ -558,7 +558,7 @@ static void aha1542_intr_handle(struct Scsi_Host *shost) }; } -static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) +static int aha1542_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) { unchar ahacmd = CMD_START_SCSI; unchar direction; @@ -718,6 +718,8 @@ static int aha1542_queuecommand(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(aha1542_queuecommand) + /* Initialize mailboxes */ static void setup_mailboxes(int bse, struct Scsi_Host *shpnt) { diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h index 1db538552d5..b871d2b57f9 100644 --- a/drivers/scsi/aha1542.h +++ b/drivers/scsi/aha1542.h @@ -132,7 +132,7 @@ struct ccb { /* Command Control Block 5.3 */ }; static int aha1542_detect(struct scsi_host_template *); -static int aha1542_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); +static int aha1542_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static int aha1542_bus_reset(Scsi_Cmnd * SCpnt); static int aha1542_dev_reset(Scsi_Cmnd * SCpnt); static int aha1542_host_reset(Scsi_Cmnd * SCpnt); diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c index 0107a4cc333..d058f1ab82b 100644 --- a/drivers/scsi/aha1740.c +++ b/drivers/scsi/aha1740.c @@ -331,7 +331,7 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) return IRQ_RETVAL(handled); } -static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) +static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) { unchar direction; unchar *cmd = (unchar *) SCpnt->cmnd; @@ -503,6 +503,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(aha1740_queuecommand) + /* Query the board for its irq_level and irq_type. Nothing else matters in enhanced mode on an EISA bus. */ diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c index 88ad8482ef5..25d06662447 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm.c @@ -573,7 +573,7 @@ ahd_linux_info(struct Scsi_Host *host) * Queue an SCB to the controller. */ static int -ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) +ahd_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) { struct ahd_softc *ahd; struct ahd_linux_device *dev = scsi_transport_device_data(cmd->device); @@ -588,6 +588,8 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) return rtn; } +static DEF_SCSI_QCMD(ahd_linux_queue) + static struct scsi_target ** ahd_linux_target_in_softc(struct scsi_target *starget) { diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c index aeea7a61478..4a359bb307c 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c @@ -528,7 +528,7 @@ ahc_linux_info(struct Scsi_Host *host) * Queue an SCB to the controller. */ static int -ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) +ahc_linux_queue_lck(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) { struct ahc_softc *ahc; struct ahc_linux_device *dev = scsi_transport_device_data(cmd->device); @@ -548,6 +548,8 @@ ahc_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *)) return rtn; } +static DEF_SCSI_QCMD(ahc_linux_queue) + static inline struct scsi_target ** ahc_linux_target_in_softc(struct scsi_target *starget) { diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c index aee73fafccc..4ff60a08df0 100644 --- a/drivers/scsi/aic7xxx_old.c +++ b/drivers/scsi/aic7xxx_old.c @@ -10234,7 +10234,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd, * Description: * Queue a SCB to the controller. *-F*************************************************************************/ -static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) +static int aic7xxx_queue_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) { struct aic7xxx_host *p; struct aic7xxx_scb *scb; @@ -10292,6 +10292,8 @@ static int aic7xxx_queue(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) return (0); } +static DEF_SCSI_QCMD(aic7xxx_queue) + /*+F************************************************************************* * Function: * aic7xxx_bus_device_reset diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 05a78e515a2..17e3df4f016 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -85,8 +85,7 @@ static int arcmsr_abort(struct scsi_cmnd *); static int arcmsr_bus_reset(struct scsi_cmnd *); static int arcmsr_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *info); -static int arcmsr_queue_command(struct scsi_cmnd *cmd, - void (*done) (struct scsi_cmnd *)); +static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void arcmsr_remove(struct pci_dev *pdev); @@ -2081,7 +2080,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, } } -static int arcmsr_queue_command(struct scsi_cmnd *cmd, +static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; @@ -2124,6 +2123,8 @@ static int arcmsr_queue_command(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(arcmsr_queue_command) + static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index 918ccf81875..ec166726b31 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -2511,7 +2511,7 @@ acornscsi_intr(int irq, void *dev_id) * done - function called on completion, with pointer to command descriptor * Returns : 0, or < 0 on error. */ -int acornscsi_queuecmd(struct scsi_cmnd *SCpnt, +static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata; @@ -2561,6 +2561,8 @@ int acornscsi_queuecmd(struct scsi_cmnd *SCpnt, return 0; } +DEF_SCSI_QCMD(acornscsi_queuecmd) + /* * Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result) * Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2 diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 9e71ac61114..2b2ce21e227 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2198,7 +2198,7 @@ no_command: * Returns: 0 on success, else error. * Notes: io_request_lock is held, interrupts are disabled. */ -int fas216_queue_command(struct scsi_cmnd *SCpnt, +static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; @@ -2240,6 +2240,8 @@ int fas216_queue_command(struct scsi_cmnd *SCpnt, return result; } +DEF_SCSI_QCMD(fas216_queue_command) + /** * fas216_internal_done - trigger restart of a waiting thread in fas216_noqueue_command * @SCpnt: Command to wake @@ -2263,7 +2265,7 @@ static void fas216_internal_done(struct scsi_cmnd *SCpnt) * Returns: scsi result code. * Notes: io_request_lock is held, interrupts are disabled. */ -int fas216_noqueue_command(struct scsi_cmnd *SCpnt, +static int fas216_noqueue_command_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { FAS216_Info *info = (FAS216_Info *)SCpnt->device->host->hostdata; @@ -2277,7 +2279,7 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt, BUG_ON(info->scsi.irq != NO_IRQ); info->internal_done = 0; - fas216_queue_command(SCpnt, fas216_internal_done); + fas216_queue_command_lck(SCpnt, fas216_internal_done); /* * This wastes time, since we can't return until the command is @@ -2310,6 +2312,8 @@ int fas216_noqueue_command(struct scsi_cmnd *SCpnt, return 0; } +DEF_SCSI_QCMD(fas216_noqueue_command) + /* * Error handler timeout function. Indicate that we timed out, * and wake up any error handler process so it can continue. diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h index b65f4cf0eec..f30f8d659dc 100644 --- a/drivers/scsi/arm/fas216.h +++ b/drivers/scsi/arm/fas216.h @@ -331,23 +331,21 @@ extern int fas216_init (struct Scsi_Host *instance); */ extern int fas216_add (struct Scsi_Host *instance, struct device *dev); -/* Function: int fas216_queue_command(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +/* Function: int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) * Purpose : queue a command for adapter to process. - * Params : SCpnt - Command to queue - * done - done function to call once command is complete + * Params : h - host adapter + * : SCpnt - Command to queue * Returns : 0 - success, else error */ -extern int fas216_queue_command(struct scsi_cmnd *, - void (*done)(struct scsi_cmnd *)); +extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); -/* Function: int fas216_noqueue_command(istruct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +/* Function: int fas216_noqueue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt) * Purpose : queue a command for adapter to process, and process it to completion. - * Params : SCpnt - Command to queue - * done - done function to call once command is complete + * Params : h - host adapter + * : SCpnt - Command to queue * Returns : 0 - success, else error */ -extern int fas216_noqueue_command(struct scsi_cmnd *, - void (*done)(struct scsi_cmnd *)); +extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *); /* Function: irqreturn_t fas216_intr (FAS216_Info *info) * Purpose : handle interrupts from the interface to progress a command diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 158ebc3644d..88b2928b4d3 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -910,7 +910,7 @@ static int __init NCR5380_init(struct Scsi_Host *instance, int flags) * */ -static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) +static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) { SETUP_HOSTDATA(cmd->device->host); Scsi_Cmnd *tmp; @@ -1022,6 +1022,8 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(NCR5380_queue_command) + /* * Function : NCR5380_main (void) * diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index ad7a23aef0e..3e8658e2f15 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -572,23 +572,6 @@ static void falcon_get_lock(void) } -/* This is the wrapper function for NCR5380_queue_command(). It just - * tries to get the lock on the ST-DMA (see above) and then calls the - * original function. - */ - -#if 0 -int atari_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) -{ - /* falcon_get_lock(); - * ++guenther: moved to NCR5380_queue_command() to prevent - * race condition, see there for an explanation. - */ - return NCR5380_queue_command(cmd, done); -} -#endif - - int __init atari_scsi_detect(struct scsi_host_template *host) { static int called = 0; diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index ab5bdda6903..76029d570be 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -605,7 +605,7 @@ handled: * * Queue a command to the ATP queue. Called with the host lock held. */ -static int atp870u_queuecommand(struct scsi_cmnd * req_p, +static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p, void (*done) (struct scsi_cmnd *)) { unsigned char c; @@ -694,6 +694,8 @@ static int atp870u_queuecommand(struct scsi_cmnd * req_p, return 0; } +static DEF_SCSI_QCMD(atp870u_queuecommand) + /** * send_s870 - send a command to the controller * @host: host diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h index ceaac65a91f..ff2bd07161f 100644 --- a/drivers/scsi/bfa/bfa.h +++ b/drivers/scsi/bfa/bfa.h @@ -29,13 +29,13 @@ struct bfa_s; typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m); typedef void (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete); -/** +/* * Interrupt message handlers */ void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m); void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); -/** +/* * Request and response queue related defines */ #define BFA_REQQ_NELEMS_MIN (4) @@ -58,9 +58,9 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); #define bfa_reqq_produce(__bfa, __reqq) do { \ (__bfa)->iocfc.req_cq_pi[__reqq]++; \ (__bfa)->iocfc.req_cq_pi[__reqq] &= \ - ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ - bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq], \ - (__bfa)->iocfc.req_cq_pi[__reqq]); \ + ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \ + writel((__bfa)->iocfc.req_cq_pi[__reqq], \ + (__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]); \ mmiowb(); \ } while (0) @@ -76,7 +76,7 @@ void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func); (__index) &= ((__size) - 1); \ } while (0) -/** +/* * Queue element to wait for room in request queue. FIFO order is * maintained when fullfilling requests. */ @@ -86,7 +86,7 @@ struct bfa_reqq_wait_s { void *cbarg; }; -/** +/* * Circular queue usage assignments */ enum { @@ -113,7 +113,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), #define bfa_reqq(__bfa, __reqq) (&(__bfa)->reqq_waitq[__reqq]) -/** +/* * static inline void * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe) */ @@ -130,7 +130,7 @@ bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg), #define bfa_reqq_wcancel(__wqe) list_del(&(__wqe)->qe) -/** +/* * Generic BFA callback element. */ struct bfa_cb_qe_s { @@ -163,7 +163,7 @@ struct bfa_cb_qe_s { } while (0) -/** +/* * PCI devices supported by the current BFA */ struct bfa_pciid_s { @@ -173,7 +173,7 @@ struct bfa_pciid_s { extern char bfa_version[]; -/** +/* * BFA memory resources */ enum bfa_mem_type { @@ -202,19 +202,19 @@ struct bfa_meminfo_s { ((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp) struct bfa_iocfc_regs_s { - bfa_os_addr_t intr_status; - bfa_os_addr_t intr_mask; - bfa_os_addr_t cpe_q_pi[BFI_IOC_MAX_CQS]; - bfa_os_addr_t cpe_q_ci[BFI_IOC_MAX_CQS]; - bfa_os_addr_t cpe_q_depth[BFI_IOC_MAX_CQS]; - bfa_os_addr_t cpe_q_ctrl[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_ci[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_pi[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_depth[BFI_IOC_MAX_CQS]; - bfa_os_addr_t rme_q_ctrl[BFI_IOC_MAX_CQS]; + void __iomem *intr_status; + void __iomem *intr_mask; + void __iomem *cpe_q_pi[BFI_IOC_MAX_CQS]; + void __iomem *cpe_q_ci[BFI_IOC_MAX_CQS]; + void __iomem *cpe_q_depth[BFI_IOC_MAX_CQS]; + void __iomem *cpe_q_ctrl[BFI_IOC_MAX_CQS]; + void __iomem *rme_q_ci[BFI_IOC_MAX_CQS]; + void __iomem *rme_q_pi[BFI_IOC_MAX_CQS]; + void __iomem *rme_q_depth[BFI_IOC_MAX_CQS]; + void __iomem *rme_q_ctrl[BFI_IOC_MAX_CQS]; }; -/** +/* * MSIX vector handlers */ #define BFA_MSIX_MAX_VECTORS 22 @@ -224,7 +224,7 @@ struct bfa_msix_s { bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS]; }; -/** +/* * Chip specific interfaces */ struct bfa_hwif_s { @@ -343,7 +343,7 @@ int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport); -/** +/* *---------------------------------------------------------------------- * BFA public interfaces *---------------------------------------------------------------------- diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h index a989a94c38d..6f021015f1f 100644 --- a/drivers/scsi/bfa/bfa_cb_ioim.h +++ b/drivers/scsi/bfa/bfa_cb_ioim.h @@ -37,18 +37,18 @@ bfad_int_to_lun(u32 luno) } lun; lun.bfa_lun = 0; - lun.scsi_lun[0] = bfa_os_htons(luno); + lun.scsi_lun[0] = cpu_to_be16(luno); return lun.bfa_lun; } -/** +/* * Get LUN for the I/O request */ #define bfa_cb_ioim_get_lun(__dio) \ bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun) -/** +/* * Get CDB for the I/O request */ static inline u8 * @@ -59,7 +59,7 @@ bfa_cb_ioim_get_cdb(struct bfad_ioim_s *dio) return (u8 *) cmnd->cmnd; } -/** +/* * Get I/O direction (read/write) for the I/O request */ static inline enum fcp_iodir @@ -77,7 +77,7 @@ bfa_cb_ioim_get_iodir(struct bfad_ioim_s *dio) return FCP_IODIR_NONE; } -/** +/* * Get IO size in bytes for the I/O request */ static inline u32 @@ -88,7 +88,7 @@ bfa_cb_ioim_get_size(struct bfad_ioim_s *dio) return scsi_bufflen(cmnd); } -/** +/* * Get timeout for the I/O request */ static inline u8 @@ -104,7 +104,7 @@ bfa_cb_ioim_get_timeout(struct bfad_ioim_s *dio) return 0; } -/** +/* * Get Command Reference Number for the I/O request. 0 if none. */ static inline u8 @@ -113,7 +113,7 @@ bfa_cb_ioim_get_crn(struct bfad_ioim_s *dio) return 0; } -/** +/* * Get SAM-3 priority for the I/O request. 0 is default. */ static inline u8 @@ -122,7 +122,7 @@ bfa_cb_ioim_get_priority(struct bfad_ioim_s *dio) return 0; } -/** +/* * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0). */ static inline u8 @@ -148,7 +148,7 @@ bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio) return task_attr; } -/** +/* * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16). */ static inline u8 @@ -159,7 +159,7 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio) return cmnd->cmd_len; } -/** +/* * Assign queue to be used for the I/O request. This value depends on whether * the driver wants to use the queues via any specific algorithm. Currently, * this is not supported. diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c index c2fa07f2485..2345f48dc57 100644 --- a/drivers/scsi/bfa/bfa_core.c +++ b/drivers/scsi/bfa/bfa_core.c @@ -21,11 +21,11 @@ BFA_TRC_FILE(HAL, CORE); -/** +/* * BFA IOC FC related definitions */ -/** +/* * IOC local definitions */ #define BFA_IOCFC_TOV 5000 /* msecs */ @@ -54,7 +54,7 @@ enum { #define DEF_CFG_NUM_SBOOT_TGTS 16 #define DEF_CFG_NUM_SBOOT_LUNS 16 -/** +/* * forward declaration for IOC FC functions */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status); @@ -63,7 +63,7 @@ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg); static void bfa_iocfc_reset_cbfn(void *bfa_arg); static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn; -/** +/* * BFA Interrupt handling functions */ static void @@ -86,7 +86,7 @@ bfa_reqq_resume(struct bfa_s *bfa, int qid) waitq = bfa_reqq(bfa, qid); list_for_each_safe(qe, qen, waitq) { - /** + /* * Callback only as long as there is room in request queue */ if (bfa_reqq_full(bfa, qid)) @@ -104,7 +104,7 @@ bfa_msix_all(struct bfa_s *bfa, int vec) bfa_intx(bfa); } -/** +/* * hal_intr_api */ bfa_boolean_t @@ -113,15 +113,15 @@ bfa_intx(struct bfa_s *bfa) u32 intr, qintr; int queue; - intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); + intr = readl(bfa->iocfc.bfa_regs.intr_status); if (!intr) return BFA_FALSE; - /** + /* * RME completion queue interrupt */ qintr = intr & __HFN_INT_RME_MASK; - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); + writel(qintr, bfa->iocfc.bfa_regs.intr_status); for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { if (intr & (__HFN_INT_RME_Q0 << queue)) @@ -131,11 +131,11 @@ bfa_intx(struct bfa_s *bfa) if (!intr) return BFA_TRUE; - /** + /* * CPE completion queue interrupt */ qintr = intr & __HFN_INT_CPE_MASK; - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr); + writel(qintr, bfa->iocfc.bfa_regs.intr_status); for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) { if (intr & (__HFN_INT_CPE_Q0 << queue)) @@ -153,13 +153,13 @@ bfa_intx(struct bfa_s *bfa) void bfa_intx_enable(struct bfa_s *bfa) { - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask); + writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask); } void bfa_intx_disable(struct bfa_s *bfa) { - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); + writel(-1L, bfa->iocfc.bfa_regs.intr_mask); } void @@ -188,8 +188,8 @@ bfa_isr_enable(struct bfa_s *bfa) __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1); - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask); - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask); + writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status); + writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask); bfa->iocfc.intr_mask = ~intr_unmask; bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0); } @@ -198,7 +198,7 @@ void bfa_isr_disable(struct bfa_s *bfa) { bfa_isr_mode_set(bfa, BFA_FALSE); - bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L); + writel(-1L, bfa->iocfc.bfa_regs.intr_mask); bfa_msix_uninstall(bfa); } @@ -211,7 +211,7 @@ bfa_msix_reqq(struct bfa_s *bfa, int qid) bfa->iocfc.hwif.hw_reqq_ack(bfa, qid); - /** + /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); @@ -259,14 +259,14 @@ bfa_msix_rspq(struct bfa_s *bfa, int qid) } } - /** + /* * update CI */ bfa_rspq_ci(bfa, qid) = pi; - bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi); + writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]); mmiowb(); - /** + /* * Resume any pending requests in the corresponding reqq. */ waitq = bfa_reqq(bfa, qid); @@ -279,7 +279,7 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) { u32 intr, curr_value; - intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status); + intr = readl(bfa->iocfc.bfa_regs.intr_status); if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1)) bfa_msix_lpu(bfa); @@ -289,30 +289,30 @@ bfa_msix_lpu_err(struct bfa_s *bfa, int vec) if (intr) { if (intr & __HFN_INT_LL_HALT) { - /** + /* * If LL_HALT bit is set then FW Init Halt LL Port * Register needs to be cleared as well so Interrupt * Status Register will be cleared. */ - curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt); + curr_value = readl(bfa->ioc.ioc_regs.ll_halt); curr_value &= ~__FW_INIT_HALT_P; - bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value); + writel(curr_value, bfa->ioc.ioc_regs.ll_halt); } if (intr & __HFN_INT_ERR_PSS) { - /** + /* * ERR_PSS bit needs to be cleared as well in case * interrups are shared so driver's interrupt handler is * still called eventhough it is already masked out. */ - curr_value = bfa_reg_read( + curr_value = readl( bfa->ioc.ioc_regs.pss_err_status_reg); curr_value &= __PSS_ERR_STATUS_SET; - bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg, - curr_value); + writel(curr_value, + bfa->ioc.ioc_regs.pss_err_status_reg); } - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr); + writel(intr, bfa->iocfc.bfa_regs.intr_status); bfa_msix_errint(bfa, intr); } } @@ -323,11 +323,11 @@ bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func) bfa_isrs[mc] = isr_func; } -/** +/* * BFA IOC FC related functions */ -/** +/* * hal_ioc_pvt BFA IOC private functions */ @@ -366,7 +366,7 @@ bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len) BFA_CACHELINE_SZ); } -/** +/* * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ */ static void @@ -384,14 +384,14 @@ bfa_iocfc_send_cfg(void *bfa_arg) bfa_iocfc_reset_queues(bfa); - /** + /* * initialize IOC configuration info */ cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; cfg_info->num_cqs = cfg->fwcfg.num_cqs; bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); - /** + /* * dma map REQ and RSP circular queues and shadow pointers */ for (i = 0; i < cfg->fwcfg.num_cqs; i++) { @@ -400,17 +400,17 @@ bfa_iocfc_send_cfg(void *bfa_arg) bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i], iocfc->req_cq_shadow_ci[i].pa); cfg_info->req_cq_elems[i] = - bfa_os_htons(cfg->drvcfg.num_reqq_elems); + cpu_to_be16(cfg->drvcfg.num_reqq_elems); bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i], iocfc->rsp_cq_ba[i].pa); bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i], iocfc->rsp_cq_shadow_pi[i].pa); cfg_info->rsp_cq_elems[i] = - bfa_os_htons(cfg->drvcfg.num_rspq_elems); + cpu_to_be16(cfg->drvcfg.num_rspq_elems); } - /** + /* * Enable interrupt coalescing if it is driver init path * and not ioc disable/enable path. */ @@ -419,7 +419,7 @@ bfa_iocfc_send_cfg(void *bfa_arg) iocfc->cfgdone = BFA_FALSE; - /** + /* * dma map IOC configuration itself */ bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ, @@ -440,9 +440,9 @@ bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, iocfc->bfa = bfa; iocfc->action = BFA_IOCFC_ACT_NONE; - bfa_os_assign(iocfc->cfg, *cfg); + iocfc->cfg = *cfg; - /** + /* * Initialize chip specific handlers. */ if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) { @@ -503,13 +503,13 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, for (i = 0; i < cfg->fwcfg.num_cqs; i++) { iocfc->req_cq_ba[i].kva = dm_kva; iocfc->req_cq_ba[i].pa = dm_pa; - bfa_os_memset(dm_kva, 0, per_reqq_sz); + memset(dm_kva, 0, per_reqq_sz); dm_kva += per_reqq_sz; dm_pa += per_reqq_sz; iocfc->rsp_cq_ba[i].kva = dm_kva; iocfc->rsp_cq_ba[i].pa = dm_pa; - bfa_os_memset(dm_kva, 0, per_rspq_sz); + memset(dm_kva, 0, per_rspq_sz); dm_kva += per_rspq_sz; dm_pa += per_rspq_sz; } @@ -559,7 +559,7 @@ bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg, } } -/** +/* * Start BFA submodules. */ static void @@ -573,7 +573,7 @@ bfa_iocfc_start_submod(struct bfa_s *bfa) hal_mods[i]->start(bfa); } -/** +/* * Disable BFA submodules. */ static void @@ -623,7 +623,7 @@ bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl) complete(&bfad->disable_comp); } -/** +/* * Update BFA configuration from firmware configuration. */ static void @@ -634,15 +634,15 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa) struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg; fwcfg->num_cqs = fwcfg->num_cqs; - fwcfg->num_ioim_reqs = bfa_os_ntohs(fwcfg->num_ioim_reqs); - fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs); - fwcfg->num_fcxp_reqs = bfa_os_ntohs(fwcfg->num_fcxp_reqs); - fwcfg->num_uf_bufs = bfa_os_ntohs(fwcfg->num_uf_bufs); - fwcfg->num_rports = bfa_os_ntohs(fwcfg->num_rports); + fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs); + fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs); + fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs); + fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs); + fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports); iocfc->cfgdone = BFA_TRUE; - /** + /* * Configuration is complete - initialize/start submodules */ bfa_fcport_init(bfa); @@ -665,7 +665,7 @@ bfa_iocfc_reset_queues(struct bfa_s *bfa) } } -/** +/* * IOC enable request is complete */ static void @@ -684,7 +684,7 @@ bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) bfa_iocfc_send_cfg(bfa); } -/** +/* * IOC disable request is complete */ static void @@ -705,7 +705,7 @@ bfa_iocfc_disable_cbfn(void *bfa_arg) } } -/** +/* * Notify sub-modules of hardware failure. */ static void @@ -723,7 +723,7 @@ bfa_iocfc_hbfail_cbfn(void *bfa_arg) bfa); } -/** +/* * Actions on chip-reset completion. */ static void @@ -735,11 +735,11 @@ bfa_iocfc_reset_cbfn(void *bfa_arg) bfa_isr_enable(bfa); } -/** +/* * hal_ioc_public */ -/** +/* * Query IOC memory requirement information. */ void @@ -754,7 +754,7 @@ bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, *km_len += bfa_ioc_debug_trcsz(bfa_auto_recover); } -/** +/* * Query IOC memory requirement information. */ void @@ -772,7 +772,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, ioc->trcmod = bfa->trcmod; bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod); - /** + /* * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC. */ if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC) @@ -790,7 +790,7 @@ bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, INIT_LIST_HEAD(&bfa->reqq_waitq[i]); } -/** +/* * Query IOC memory requirement information. */ void @@ -799,7 +799,7 @@ bfa_iocfc_detach(struct bfa_s *bfa) bfa_ioc_detach(&bfa->ioc); } -/** +/* * Query IOC memory requirement information. */ void @@ -809,7 +809,7 @@ bfa_iocfc_init(struct bfa_s *bfa) bfa_ioc_enable(&bfa->ioc); } -/** +/* * IOC start called from bfa_start(). Called to start IOC operations * at driver instantiation for this instance. */ @@ -820,7 +820,7 @@ bfa_iocfc_start(struct bfa_s *bfa) bfa_iocfc_start_submod(bfa); } -/** +/* * IOC stop called from bfa_stop(). Called only when driver is unloaded * for this instance. */ @@ -876,12 +876,12 @@ bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr) attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce; attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ? - bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) : - bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay); + be16_to_cpu(iocfc->cfginfo->intr_attr.delay) : + be16_to_cpu(iocfc->cfgrsp->intr_attr.delay); attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ? - bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) : - bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency); + be16_to_cpu(iocfc->cfginfo->intr_attr.latency) : + be16_to_cpu(iocfc->cfgrsp->intr_attr.latency); attr->config = iocfc->cfg; } @@ -893,8 +893,8 @@ bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr) struct bfi_iocfc_set_intr_req_s *m; iocfc->cfginfo->intr_attr.coalesce = attr->coalesce; - iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay); - iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency); + iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay); + iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency); if (!bfa_iocfc_is_operational(bfa)) return BFA_STATUS_OK; @@ -924,7 +924,7 @@ bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa) iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1); bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa); } -/** +/* * Enable IOC after it is disabled. */ void @@ -953,7 +953,7 @@ bfa_iocfc_is_operational(struct bfa_s *bfa) return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone; } -/** +/* * Return boot target port wwns -- read from boot information in flash. */ void @@ -998,11 +998,11 @@ bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport) return cfgrsp->pbc_cfg.nvports; } -/** +/* * hal_api */ -/** +/* * Use this function query the memory requirement of the BFA library. * This function needs to be called before bfa_attach() to get the * memory required of the BFA layer for a given driver configuration. @@ -1038,7 +1038,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) bfa_assert((cfg != NULL) && (meminfo != NULL)); - bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); + memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s)); meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type = BFA_MEM_TYPE_KVA; meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type = @@ -1055,7 +1055,7 @@ bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo) meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len; } -/** +/* * Use this function to do attach the driver instance with the BFA * library. This function will not trigger any HW initialization * process (which will be done in bfa_init() call) @@ -1092,7 +1092,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_assert((cfg != NULL) && (meminfo != NULL)); - /** + /* * initialize all memory pointers for iterative allocation */ for (i = 0; i < BFA_MEM_TYPE_MAX; i++) { @@ -1109,7 +1109,7 @@ bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_com_port_attach(bfa, meminfo); } -/** +/* * Use this function to delete a BFA IOC. IOC should be stopped (by * calling bfa_stop()) before this function call. * @@ -1146,7 +1146,7 @@ bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog) bfa->plog = plog; } -/** +/* * Initialize IOC. * * This function will return immediately, when the IOC initialization is @@ -1169,7 +1169,7 @@ bfa_init(struct bfa_s *bfa) bfa_iocfc_init(bfa); } -/** +/* * Use this function initiate the IOC configuration setup. This function * will return immediately. * @@ -1183,7 +1183,7 @@ bfa_start(struct bfa_s *bfa) bfa_iocfc_start(bfa); } -/** +/* * Use this function quiese the IOC. This function will return immediately, * when the IOC is actually stopped, the bfad->comp will be set. * @@ -1243,7 +1243,7 @@ bfa_attach_fcs(struct bfa_s *bfa) bfa->fcs = BFA_TRUE; } -/** +/* * Periodic timer heart beat from driver */ void @@ -1252,7 +1252,7 @@ bfa_timer_tick(struct bfa_s *bfa) bfa_timer_beat(&bfa->timer_mod); } -/** +/* * Return the list of PCI vendor/device id lists supported by this * BFA instance. */ @@ -1270,7 +1270,7 @@ bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids) *pciids = __pciids; } -/** +/* * Use this function query the default struct bfa_iocfc_cfg_s value (compiled * into BFA layer). The OS driver can then turn back and overwrite entries that * have been configured by the user. @@ -1328,7 +1328,7 @@ bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr) bfa_ioc_get_attr(&bfa->ioc, ioc_attr); } -/** +/* * Retrieve firmware trace information on IOC failure. */ bfa_status_t @@ -1337,7 +1337,7 @@ bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen) return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen); } -/** +/* * Clear the saved firmware trace information of an IOC. */ void @@ -1346,7 +1346,7 @@ bfa_debug_fwsave_clear(struct bfa_s *bfa) bfa_ioc_debug_fwsave_clear(&bfa->ioc); } -/** +/* * Fetch firmware trace data. * * @param[in] bfa BFA instance @@ -1362,7 +1362,7 @@ bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen) return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen); } -/** +/* * Dump firmware memory. * * @param[in] bfa BFA instance @@ -1378,7 +1378,7 @@ bfa_debug_fwcore(struct bfa_s *bfa, void *buf, u32 *offset, int *buflen) { return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen); } -/** +/* * Reset hw semaphore & usage cnt regs and initialize. */ void @@ -1388,7 +1388,7 @@ bfa_chip_reset(struct bfa_s *bfa) bfa_ioc_pll_init(&bfa->ioc); } -/** +/* * Fetch firmware statistics data. * * @param[in] bfa BFA instance diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h index 7260c74620f..99f242b9aa3 100644 --- a/drivers/scsi/bfa/bfa_cs.h +++ b/drivers/scsi/bfa/bfa_cs.h @@ -15,7 +15,7 @@ * General Public License for more details. */ -/** +/* * bfa_cs.h BFA common services */ @@ -24,7 +24,7 @@ #include "bfa_os_inc.h" -/** +/* * BFA TRC */ @@ -73,7 +73,7 @@ enum { #define BFA_TRC_MOD_SH 10 #define BFA_TRC_MOD(__mod) ((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH) -/** +/* * Define a new tracing file (module). Module should match one defined above. */ #define BFA_TRC_FILE(__mod, __submod) \ @@ -155,7 +155,7 @@ __bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data) #define bfa_trc_fp(_trcp, _data) #endif -/** +/* * @ BFA LOG interfaces */ #define bfa_assert(__cond) do { \ @@ -249,13 +249,13 @@ bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) #define bfa_q_is_on_q(_q, _qe) \ bfa_q_is_on_q_func(_q, (struct list_head *)(_qe)) -/** +/* * @ BFA state machine interfaces */ typedef void (*bfa_sm_t)(void *sm, int event); -/** +/* * oc - object class eg. bfa_ioc * st - state, eg. reset * otype - object type, eg. struct bfa_ioc_s @@ -269,7 +269,7 @@ typedef void (*bfa_sm_t)(void *sm, int event); #define bfa_sm_get_state(_sm) ((_sm)->sm) #define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state)) -/** +/* * For converting from state machine function to state encoding. */ struct bfa_sm_table_s { @@ -279,12 +279,12 @@ struct bfa_sm_table_s { }; #define BFA_SM(_sm) ((bfa_sm_t)(_sm)) -/** +/* * State machine with entry actions. */ typedef void (*bfa_fsm_t)(void *fsm, int event); -/** +/* * oc - object class eg. bfa_ioc * st - state, eg. reset * otype - object type, eg. struct bfa_ioc_s @@ -314,7 +314,7 @@ bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm) return smt[i].state; } -/** +/* * @ Generic wait counter. */ @@ -340,7 +340,7 @@ bfa_wc_down(struct bfa_wc_s *wc) wc->wc_resume(wc->wc_cbarg); } -/** +/* * Initialize a waiting counter. */ static inline void @@ -352,7 +352,7 @@ bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) bfa_wc_up(wc); } -/** +/* * Wait for counter to reach zero */ static inline void diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h index d49877ff514..4b5b9e35abb 100644 --- a/drivers/scsi/bfa/bfa_defs.h +++ b/drivers/scsi/bfa/bfa_defs.h @@ -24,7 +24,7 @@ #define BFA_MFG_SERIALNUM_SIZE 11 #define STRSZ(_n) (((_n) + 4) & ~3) -/** +/* * Manufacturing card type */ enum { @@ -45,7 +45,7 @@ enum { #pragma pack(1) -/** +/* * Check if Mezz card */ #define bfa_mfg_is_mezz(type) (( \ @@ -55,7 +55,7 @@ enum { (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ (type) == BFA_MFG_TYPE_LIGHTNING)) -/** +/* * Check if the card having old wwn/mac handling */ #define bfa_mfg_is_old_wwn_mac_model(type) (( \ @@ -78,12 +78,12 @@ do { \ (m)[2] = t & 0xFF; \ } while (0) -/** +/* * VPD data length */ #define BFA_MFG_VPD_LEN 512 -/** +/* * VPD vendor tag */ enum { @@ -97,7 +97,7 @@ enum { BFA_MFG_VPD_PCI_BRCD = 0xf8, /* PCI VPD Brocade */ }; -/** +/* * All numerical fields are in big-endian format. */ struct bfa_mfg_vpd_s { @@ -112,7 +112,7 @@ struct bfa_mfg_vpd_s { #pragma pack() -/** +/* * Status return values */ enum bfa_status { @@ -167,11 +167,11 @@ enum bfa_boolean { #define BFA_STRING_32 32 #define BFA_VERSION_LEN 64 -/** +/* * ---------------------- adapter definitions ------------ */ -/** +/* * BFA adapter level attributes. */ enum { @@ -215,7 +215,7 @@ struct bfa_adapter_attr_s { u8 trunk_capable; }; -/** +/* * ---------------------- IOC definitions ------------ */ @@ -224,7 +224,7 @@ enum { BFA_IOC_CHIP_REV_LEN = 8, }; -/** +/* * Driver and firmware versions. */ struct bfa_ioc_driver_attr_s { @@ -236,7 +236,7 @@ struct bfa_ioc_driver_attr_s { char ob_ver[BFA_VERSION_LEN]; /* openboot version */ }; -/** +/* * IOC PCI device attributes */ struct bfa_ioc_pci_attr_s { @@ -249,7 +249,7 @@ struct bfa_ioc_pci_attr_s { char chip_rev[BFA_IOC_CHIP_REV_LEN]; /* chip revision */ }; -/** +/* * IOC states */ enum bfa_ioc_state { @@ -267,7 +267,7 @@ enum bfa_ioc_state { BFA_IOC_ENABLING = 12, /* IOC is being enabled */ }; -/** +/* * IOC firmware stats */ struct bfa_fw_ioc_stats_s { @@ -279,7 +279,7 @@ struct bfa_fw_ioc_stats_s { u32 unknown_reqs; }; -/** +/* * IOC driver stats */ struct bfa_ioc_drv_stats_s { @@ -296,7 +296,7 @@ struct bfa_ioc_drv_stats_s { u32 enable_replies; }; -/** +/* * IOC statistics */ struct bfa_ioc_stats_s { @@ -310,7 +310,7 @@ enum bfa_ioc_type_e { BFA_IOC_TYPE_LL = 3, }; -/** +/* * IOC attributes returned in queries */ struct bfa_ioc_attr_s { @@ -323,11 +323,11 @@ struct bfa_ioc_attr_s { u8 rsvd[7]; /* 64bit align */ }; -/** +/* * ---------------------- mfg definitions ------------ */ -/** +/* * Checksum size */ #define BFA_MFG_CHKSUM_SIZE 16 @@ -340,7 +340,7 @@ struct bfa_ioc_attr_s { #pragma pack(1) -/** +/* * All numerical fields are in big-endian format. */ struct bfa_mfg_block_s { @@ -373,11 +373,11 @@ struct bfa_mfg_block_s { #pragma pack() -/** +/* * ---------------------- pci definitions ------------ */ -/** +/* * PCI device and vendor ID information */ enum { @@ -392,14 +392,14 @@ enum { ((devid) == BFA_PCI_DEVICE_ID_CT || \ (devid) == BFA_PCI_DEVICE_ID_CT_FC) -/** +/* * PCI sub-system device and vendor ID information */ enum { BFA_PCI_FCOE_SSDEVICE_ID = 0x14, }; -/** +/* * Maximum number of device address ranges mapped through different BAR(s) */ #define BFA_PCI_ACCESS_RANGES 1 @@ -430,7 +430,7 @@ enum { #define BOOT_CFG_REV1 1 #define BOOT_CFG_VLAN 1 -/** +/* * Boot options setting. Boot options setting determines from where * to get the boot lun information */ @@ -442,7 +442,7 @@ enum bfa_boot_bootopt { }; #pragma pack(1) -/** +/* * Boot lun information. */ struct bfa_boot_bootlun_s { @@ -451,7 +451,7 @@ struct bfa_boot_bootlun_s { }; #pragma pack() -/** +/* * BOOT boot configuraton */ struct bfa_boot_pbc_s { diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h index 96905d30182..191d34a58b9 100644 --- a/drivers/scsi/bfa/bfa_defs_fcs.h +++ b/drivers/scsi/bfa/bfa_defs_fcs.h @@ -21,7 +21,7 @@ #include "bfa_fc.h" #include "bfa_defs_svc.h" -/** +/* * VF states */ enum bfa_vf_state { @@ -35,7 +35,7 @@ enum bfa_vf_state { BFA_VF_ISOLATED = 7, /* port isolated due to vf_id mismatch */ }; -/** +/* * VF statistics */ struct bfa_vf_stats_s { @@ -55,7 +55,7 @@ struct bfa_vf_stats_s { u32 resvd; /* padding for 64 bit alignment */ }; -/** +/* * VF attributes returned in queries */ struct bfa_vf_attr_s { @@ -67,7 +67,7 @@ struct bfa_vf_attr_s { #define BFA_FCS_MAX_LPORTS 256 #define BFA_FCS_FABRIC_IPADDR_SZ 16 -/** +/* * symbolic names for base port/virtual port */ #define BFA_SYMNAME_MAXLEN 128 /* 128 bytes */ @@ -75,7 +75,7 @@ struct bfa_lport_symname_s { char symname[BFA_SYMNAME_MAXLEN]; }; -/** +/* * Roles of FCS port: * - FCP IM and FCP TM roles cannot be enabled together for a FCS port * - Create multiple ports if both IM and TM functions required. @@ -86,19 +86,19 @@ enum bfa_lport_role { BFA_LPORT_ROLE_FCP_MAX = BFA_LPORT_ROLE_FCP_IM, }; -/** +/* * FCS port configuration. */ struct bfa_lport_cfg_s { wwn_t pwwn; /* port wwn */ wwn_t nwwn; /* node wwn */ struct bfa_lport_symname_s sym_name; /* vm port symbolic name */ - bfa_boolean_t preboot_vp; /* vport created from PBC */ + bfa_boolean_t preboot_vp; /* vport created from PBC */ enum bfa_lport_role roles; /* FCS port roles */ u8 tag[16]; /* opaque tag from application */ }; -/** +/* * FCS port states */ enum bfa_lport_state { @@ -108,7 +108,7 @@ enum bfa_lport_state { BFA_LPORT_OFFLINE = 3, /* No login to fabric */ }; -/** +/* * FCS port type. */ enum bfa_lport_type { @@ -116,7 +116,7 @@ enum bfa_lport_type { BFA_LPORT_TYPE_VIRTUAL, }; -/** +/* * FCS port offline reason. */ enum bfa_lport_offline_reason { @@ -128,7 +128,7 @@ enum bfa_lport_offline_reason { BFA_LPORT_OFFLINE_FAB_LOGOUT, }; -/** +/* * FCS lport info. */ struct bfa_lport_info_s { @@ -150,7 +150,7 @@ struct bfa_lport_info_s { }; -/** +/* * FCS port statistics */ struct bfa_lport_stats_s { @@ -222,7 +222,7 @@ struct bfa_lport_stats_s { * (max retry of plogi) */ }; -/** +/* * BFA port attribute returned in queries */ struct bfa_lport_attr_s { @@ -239,7 +239,7 @@ struct bfa_lport_attr_s { }; -/** +/* * VPORT states */ enum bfa_vport_state { @@ -258,7 +258,7 @@ enum bfa_vport_state { BFA_FCS_VPORT_MAX_STATE, }; -/** +/* * vport statistics */ struct bfa_vport_stats_s { @@ -296,7 +296,7 @@ struct bfa_vport_stats_s { u32 rsvd; }; -/** +/* * BFA vport attribute returned in queries */ struct bfa_vport_attr_s { @@ -305,7 +305,7 @@ struct bfa_vport_attr_s { u32 rsvd; }; -/** +/* * FCS remote port states */ enum bfa_rport_state { @@ -321,7 +321,7 @@ enum bfa_rport_state { BFA_RPORT_NSDISC = 9, /* re-discover rport */ }; -/** +/* * Rport Scsi Function : Initiator/Target. */ enum bfa_rport_function { @@ -329,7 +329,7 @@ enum bfa_rport_function { BFA_RPORT_TARGET = 0x02, /* SCSI Target */ }; -/** +/* * port/node symbolic names for rport */ #define BFA_RPORT_SYMNAME_MAXLEN 255 @@ -337,7 +337,7 @@ struct bfa_rport_symname_s { char symname[BFA_RPORT_SYMNAME_MAXLEN]; }; -/** +/* * FCS remote port statistics */ struct bfa_rport_stats_s { @@ -374,7 +374,7 @@ struct bfa_rport_stats_s { struct bfa_rport_hal_stats_s hal_stats; /* BFA rport stats */ }; -/** +/* * FCS remote port attributes returned in queries */ struct bfa_rport_attr_s { @@ -411,7 +411,7 @@ struct bfa_rport_remote_link_stats_s { #define BFA_MAX_IO_INDEX 7 #define BFA_NO_IO_INDEX 9 -/** +/* * FCS itnim states */ enum bfa_itnim_state { @@ -425,7 +425,7 @@ enum bfa_itnim_state { BFA_ITNIM_INITIATIOR = 7, /* initiator */ }; -/** +/* * FCS remote port statistics */ struct bfa_itnim_stats_s { @@ -443,7 +443,7 @@ struct bfa_itnim_stats_s { u32 rsvd; /* padding for 64 bit alignment */ }; -/** +/* * FCS itnim attributes returned in queries */ struct bfa_itnim_attr_s { diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h index 56226fcf947..e24e9f7ca81 100644 --- a/drivers/scsi/bfa/bfa_defs_svc.h +++ b/drivers/scsi/bfa/bfa_defs_svc.h @@ -27,7 +27,7 @@ #define BFA_IOCFCOE_INTR_DELAY 25 #define BFA_IOCFCOE_INTR_LATENCY 5 -/** +/* * Interrupt coalescing configuration. */ #pragma pack(1) @@ -38,7 +38,7 @@ struct bfa_iocfc_intr_attr_s { u16 delay; /* delay in microseconds */ }; -/** +/* * IOC firmware configuraton */ struct bfa_iocfc_fwcfg_s { @@ -71,7 +71,7 @@ struct bfa_iocfc_drvcfg_s { u32 rsvd; }; -/** +/* * IOC configuration */ struct bfa_iocfc_cfg_s { @@ -79,7 +79,7 @@ struct bfa_iocfc_cfg_s { struct bfa_iocfc_drvcfg_s drvcfg; /* driver side config */ }; -/** +/* * IOC firmware IO stats */ struct bfa_fw_io_stats_s { @@ -152,7 +152,7 @@ struct bfa_fw_io_stats_s { */ }; -/** +/* * IOC port firmware stats */ @@ -262,7 +262,7 @@ struct bfa_fw_fcoe_stats_s { u32 mac_invalids; /* Invalid mac assigned */ }; -/** +/* * IOC firmware FCoE port stats */ struct bfa_fw_fcoe_port_stats_s { @@ -270,7 +270,7 @@ struct bfa_fw_fcoe_port_stats_s { struct bfa_fw_fip_stats_s fip_stats; }; -/** +/* * IOC firmware FC uport stats */ struct bfa_fw_fc_uport_stats_s { @@ -278,7 +278,7 @@ struct bfa_fw_fc_uport_stats_s { struct bfa_fw_port_lksm_stats_s lksm_stats; }; -/** +/* * IOC firmware FC port stats */ union bfa_fw_fc_port_stats_s { @@ -286,7 +286,7 @@ union bfa_fw_fc_port_stats_s { struct bfa_fw_fcoe_port_stats_s fcoe_stats; }; -/** +/* * IOC firmware port stats */ struct bfa_fw_port_stats_s { @@ -295,7 +295,7 @@ struct bfa_fw_port_stats_s { union bfa_fw_fc_port_stats_s fc_port; }; -/** +/* * fcxchg module statistics */ struct bfa_fw_fcxchg_stats_s { @@ -308,7 +308,7 @@ struct bfa_fw_lpsm_stats_s { u32 cls_tx; }; -/** +/* * Trunk statistics */ struct bfa_fw_trunk_stats_s { @@ -334,7 +334,7 @@ struct bfa_fw_advsm_stats_s { u32 elp_dropped; /* ELP dropped */ }; -/** +/* * IOCFC firmware stats */ struct bfa_fw_iocfc_stats_s { @@ -345,7 +345,7 @@ struct bfa_fw_iocfc_stats_s { u32 set_intr_reqs; /* set interrupt reqs */ }; -/** +/* * IOC attributes returned in queries */ struct bfa_iocfc_attr_s { @@ -353,7 +353,7 @@ struct bfa_iocfc_attr_s { struct bfa_iocfc_intr_attr_s intr_attr; /* interrupt attr */ }; -/** +/* * Eth_sndrcv mod stats */ struct bfa_fw_eth_sndrcv_stats_s { @@ -361,7 +361,7 @@ struct bfa_fw_eth_sndrcv_stats_s { u32 rsvd; /* 64bit align */ }; -/** +/* * CT MAC mod stats */ struct bfa_fw_mac_mod_stats_s { @@ -379,7 +379,7 @@ struct bfa_fw_mac_mod_stats_s { u32 rsvd; /* 64bit align */ }; -/** +/* * CT MOD stats */ struct bfa_fw_ct_mod_stats_s { @@ -391,7 +391,7 @@ struct bfa_fw_ct_mod_stats_s { u32 rsvd; /* 64bit align */ }; -/** +/* * IOC firmware stats */ struct bfa_fw_stats_s { @@ -412,7 +412,7 @@ struct bfa_fw_stats_s { #define BFA_IOCFC_PATHTOV_MAX 60 #define BFA_IOCFC_QDEPTH_MAX 2000 -/** +/* * QoS states */ enum bfa_qos_state { @@ -420,7 +420,7 @@ enum bfa_qos_state { BFA_QOS_OFFLINE = 2, /* QoS is offline */ }; -/** +/* * QoS Priority levels. */ enum bfa_qos_priority { @@ -430,7 +430,7 @@ enum bfa_qos_priority { BFA_QOS_LOW = 3, /* QoS Priority Level Low */ }; -/** +/* * QoS bandwidth allocation for each priority level */ enum bfa_qos_bw_alloc { @@ -439,7 +439,7 @@ enum bfa_qos_bw_alloc { BFA_QOS_BW_LOW = 10, /* bandwidth allocation for Low */ }; #pragma pack(1) -/** +/* * QoS attribute returned in QoS Query */ struct bfa_qos_attr_s { @@ -448,7 +448,7 @@ struct bfa_qos_attr_s { u32 total_bb_cr; /* Total BB Credits */ }; -/** +/* * These fields should be displayed only from the CLI. * There will be a separate BFAL API (get_qos_vc_attr ?) * to retrieve this. @@ -471,7 +471,7 @@ struct bfa_qos_vc_attr_s { * total_vc_count */ }; -/** +/* * QoS statistics */ struct bfa_qos_stats_s { @@ -489,7 +489,7 @@ struct bfa_qos_stats_s { u32 rsvd; /* padding for 64 bit alignment */ }; -/** +/* * FCoE statistics */ struct bfa_fcoe_stats_s { @@ -540,7 +540,7 @@ struct bfa_fcoe_stats_s { u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */ }; -/** +/* * QoS or FCoE stats (fcport stats excluding physical FC port stats) */ union bfa_fcport_stats_u { @@ -639,7 +639,7 @@ enum bfa_port_states { BFA_PORT_ST_MAX_STATE, }; -/** +/* * Port operational type (in sync with SNIA port type). */ enum bfa_port_type { @@ -651,7 +651,7 @@ enum bfa_port_type { BFA_PORT_TYPE_VPORT = 22, /* NPIV - virtual port */ }; -/** +/* * Port topology setting. A port's topology and fabric login status * determine its operational type. */ @@ -662,7 +662,7 @@ enum bfa_port_topology { BFA_PORT_TOPOLOGY_AUTO = 3, /* auto topology selection */ }; -/** +/* * Physical port loopback types. */ enum bfa_port_opmode { @@ -679,7 +679,7 @@ enum bfa_port_opmode { (_mode == BFA_PORT_OPMODE_LB_SLW) || \ (_mode == BFA_PORT_OPMODE_LB_EXT)) -/** +/* * Port link state */ enum bfa_port_linkstate { @@ -687,7 +687,7 @@ enum bfa_port_linkstate { BFA_PORT_LINKDOWN = 2, /* Physical port/Trunk link down */ }; -/** +/* * Port link state reason code */ enum bfa_port_linkstate_rsn { @@ -733,7 +733,7 @@ enum bfa_port_linkstate_rsn { CEE_ISCSI_PRI_OVERLAP_FCOE_PRI = 43 }; #pragma pack(1) -/** +/* * Physical port configuration */ struct bfa_port_cfg_s { @@ -753,7 +753,7 @@ struct bfa_port_cfg_s { }; #pragma pack() -/** +/* * Port attribute values. */ struct bfa_port_attr_s { @@ -800,7 +800,7 @@ struct bfa_port_attr_s { u8 rsvd1[6]; }; -/** +/* * Port FCP mappings. */ struct bfa_port_fcpmap_s { @@ -815,7 +815,7 @@ struct bfa_port_fcpmap_s { char luid[256]; }; -/** +/* * Port RNID info. */ struct bfa_port_rnid_s { @@ -848,7 +848,7 @@ struct bfa_fcport_fcf_s { mac_t mac; /* FCF mac */ }; -/** +/* * Trunk states for BCU/BFAL */ enum bfa_trunk_state { @@ -857,7 +857,7 @@ enum bfa_trunk_state { BFA_TRUNK_OFFLINE = 2, /* Trunk is offline */ }; -/** +/* * VC attributes for trunked link */ struct bfa_trunk_vc_attr_s { @@ -867,7 +867,7 @@ struct bfa_trunk_vc_attr_s { u16 vc_credits[8]; }; -/** +/* * Link state information */ struct bfa_port_link_s { @@ -959,7 +959,7 @@ struct bfa_rport_hal_stats_s { u32 rsvd; }; #pragma pack(1) -/** +/* * Rport's QoS attributes */ struct bfa_rport_qos_attr_s { @@ -987,7 +987,7 @@ struct bfa_itnim_ioprofile_s { struct bfa_itnim_latency_s io_latency; }; -/** +/* * FC physical port statistics. */ struct bfa_port_fc_stats_s { @@ -1022,7 +1022,7 @@ struct bfa_port_fc_stats_s { u64 err_enc; /* Encoding err frame_8b10b */ }; -/** +/* * Eth Physical Port statistics. */ struct bfa_port_eth_stats_s { @@ -1070,7 +1070,7 @@ struct bfa_port_eth_stats_s { u64 tx_iscsi_zero_pause; /* Tx iSCSI zero pause */ }; -/** +/* * Port statistics. */ union bfa_port_stats_u { diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c index 14127646dc5..0222d7c88a9 100644 --- a/drivers/scsi/bfa/bfa_drv.c +++ b/drivers/scsi/bfa/bfa_drv.c @@ -17,7 +17,7 @@ #include "bfa_modules.h" -/** +/* * BFA module list terminated by NULL */ struct bfa_module_s *hal_mods[] = { @@ -31,7 +31,7 @@ struct bfa_module_s *hal_mods[] = { NULL }; -/** +/* * Message handlers for various modules. */ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { @@ -70,7 +70,7 @@ bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = { }; -/** +/* * Message handlers for mailbox command classes */ bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = { diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h index 6eff705564e..e929d25b09e 100644 --- a/drivers/scsi/bfa/bfa_fc.h +++ b/drivers/scsi/bfa/bfa_fc.h @@ -1029,7 +1029,7 @@ struct link_e2e_beacon_req_s { struct link_e2e_beacon_param_s beacon_parm; }; -/** +/* * If RPSC request is sent to the Domain Controller, the request is for * all the ports within that domain (TODO - I don't think FOS implements * this...). @@ -1049,7 +1049,7 @@ struct fc_rpsc_acc_s { struct fc_rpsc_speed_info_s speed_info[1]; }; -/** +/* * If RPSC2 request is sent to the Domain Controller, */ #define FC_BRCD_TOKEN 0x42524344 @@ -1094,7 +1094,7 @@ struct fc_rpsc2_acc_s { struct fc_rpsc2_port_info_s port_info[1]; /* port information */ }; -/** +/* * bit fields so that multiple classes can be specified */ enum fc_cos { @@ -1131,7 +1131,7 @@ struct fc_alpabm_s { #define FC_VF_ID_MAX 0xEFF #define FC_VF_ID_CTL 0xFEF /* control VF_ID */ -/** +/* * Virtual Fabric Tagging header format * @caution This is defined only in BIG ENDIAN format. */ @@ -1463,7 +1463,7 @@ struct fcgs_gidpn_resp_s { u32 dap:24; /* port identifier */ }; -/** +/* * RFT_ID */ struct fcgs_rftid_req_s { @@ -1472,7 +1472,7 @@ struct fcgs_rftid_req_s { u32 fc4_type[8]; /* fc4 types */ }; -/** +/* * RFF_ID : Register FC4 features. */ @@ -1487,7 +1487,7 @@ struct fcgs_rffid_req_s { u32 fc4_type:8; /* corresponding FC4 Type */ }; -/** +/* * GID_FT Request */ struct fcgs_gidft_req_s { @@ -1497,7 +1497,7 @@ struct fcgs_gidft_req_s { u8 fc4_type; /* FC_TYPE_FCP for SCSI devices */ }; /* GID_FT Request */ -/** +/* * GID_FT Response */ struct fcgs_gidft_resp_s { @@ -1506,7 +1506,7 @@ struct fcgs_gidft_resp_s { u32 pid:24; /* port identifier */ }; /* GID_FT Response */ -/** +/* * RSPN_ID */ struct fcgs_rspnid_req_s { @@ -1516,7 +1516,7 @@ struct fcgs_rspnid_req_s { u8 spn[256]; /* symbolic port name */ }; -/** +/* * RPN_ID */ struct fcgs_rpnid_req_s { @@ -1525,7 +1525,7 @@ struct fcgs_rpnid_req_s { wwn_t port_name; }; -/** +/* * RNN_ID */ struct fcgs_rnnid_req_s { @@ -1534,7 +1534,7 @@ struct fcgs_rnnid_req_s { wwn_t node_name; }; -/** +/* * RCS_ID */ struct fcgs_rcsid_req_s { @@ -1543,7 +1543,7 @@ struct fcgs_rcsid_req_s { u32 cos; }; -/** +/* * RPT_ID */ struct fcgs_rptid_req_s { @@ -1553,7 +1553,7 @@ struct fcgs_rptid_req_s { u32 rsvd1:24; }; -/** +/* * GA_NXT Request */ struct fcgs_ganxt_req_s { @@ -1561,7 +1561,7 @@ struct fcgs_ganxt_req_s { u32 port_id:24; }; -/** +/* * GA_NXT Response */ struct fcgs_ganxt_rsp_s { diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c index b7d2657ca82..9c725314b51 100644 --- a/drivers/scsi/bfa/bfa_fcbuild.c +++ b/drivers/scsi/bfa/bfa_fcbuild.c @@ -94,13 +94,13 @@ fcbuild_init(void) */ plogi_tmpl.csp.verhi = FC_PH_VER_PH_3; plogi_tmpl.csp.verlo = FC_PH_VER_4_3; - plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004); + plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004); plogi_tmpl.csp.ciro = 0x1; plogi_tmpl.csp.cisc = 0x0; plogi_tmpl.csp.altbbcred = 0x0; - plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF); - plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002); - plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000); + plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF); + plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002); + plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000); plogi_tmpl.class3.class_valid = 1; plogi_tmpl.class3.sequential = 1; @@ -112,7 +112,7 @@ fcbuild_init(void) */ prli_tmpl.command = FC_ELS_PRLI; prli_tmpl.pglen = 0x10; - prli_tmpl.pagebytes = bfa_os_htons(0x0014); + prli_tmpl.pagebytes = cpu_to_be16(0x0014); prli_tmpl.parampage.type = FC_TYPE_FCP; prli_tmpl.parampage.imagepair = 1; prli_tmpl.parampage.servparams.rxrdisab = 1; @@ -137,7 +137,7 @@ fcbuild_init(void) static void fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) { - bfa_os_memset(fchs, 0, sizeof(struct fchs_s)); + memset(fchs, 0, sizeof(struct fchs_s)); fchs->routing = FC_RTG_FC4_DEV_DATA; fchs->cat_info = FC_CAT_UNSOLICIT_CTRL; @@ -148,9 +148,9 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) fchs->rx_id = FC_RXID_ANY; fchs->d_id = (d_id); fchs->s_id = (s_id); - fchs->ox_id = bfa_os_htons(ox_id); + fchs->ox_id = cpu_to_be16(ox_id); - /** + /* * @todo no need to set ox_id for request * no need to set rx_id for response */ @@ -159,16 +159,16 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id) void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { - bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); + memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s)); fchs->d_id = (d_id); fchs->s_id = (s_id); - fchs->ox_id = bfa_os_htons(ox_id); + fchs->ox_id = cpu_to_be16(ox_id); } static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { - bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); + memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s)); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; @@ -198,7 +198,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len) static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { - bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); + memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s)); fchs->d_id = d_id; fchs->s_id = s_id; fchs->ox_id = ox_id; @@ -211,7 +211,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, { struct fc_logi_s *plogi = (struct fc_logi_s *) (pld); - bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s)); plogi->els_cmd.els_code = els_code; if (els_code == FC_ELS_PLOGI) @@ -219,10 +219,10 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, else fc_els_rsp_build(fchs, d_id, s_id, ox_id); - plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size); + plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size); - bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); - bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); + memcpy(&plogi->port_name, &port_name, sizeof(wwn_t)); + memcpy(&plogi->node_name, &node_name, sizeof(wwn_t)); return sizeof(struct fc_logi_s); } @@ -235,12 +235,12 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); u32 *vvl_info; - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); flogi->els_cmd.els_code = FC_ELS_FLOGI; fc_els_req_build(fchs, d_id, s_id, ox_id); - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->port_name = port_name; flogi->node_name = node_name; @@ -253,14 +253,14 @@ fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, /* set AUTH capability */ flogi->csp.security = set_auth; - flogi->csp.bbcred = bfa_os_htons(local_bb_credits); + flogi->csp.bbcred = cpu_to_be16(local_bb_credits); /* Set brcd token in VVL */ vvl_info = (u32 *)&flogi->vvl[0]; /* set the flag to indicate the presence of VVL */ flogi->csp.npiv_supp = 1; /* @todo. field name is not correct */ - vvl_info[0] = bfa_os_htonl(FLOGI_VVL_BRCD); + vvl_info[0] = cpu_to_be32(FLOGI_VVL_BRCD); return sizeof(struct fc_logi_s); } @@ -272,15 +272,15 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, { u32 d_id = 0; - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); flogi->els_cmd.els_code = FC_ELS_ACC; - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->port_name = port_name; flogi->node_name = node_name; - flogi->csp.bbcred = bfa_os_htons(local_bb_credits); + flogi->csp.bbcred = cpu_to_be16(local_bb_credits); return sizeof(struct fc_logi_s); } @@ -291,12 +291,12 @@ fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id, { u32 d_id = bfa_os_hton3b(FC_FABRIC_PORT); - bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); + memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s)); flogi->els_cmd.els_code = FC_ELS_FDISC; fc_els_req_build(fchs, d_id, s_id, ox_id); - flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size); + flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size); flogi->port_name = port_name; flogi->node_name = node_name; @@ -346,7 +346,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) if (!plogi->class3.class_valid) return FC_PARSE_FAILURE; - if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) + if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ)) return FC_PARSE_FAILURE; return FC_PARSE_OK; @@ -363,8 +363,8 @@ fc_plogi_parse(struct fchs_s *fchs) if (plogi->class3.class_valid != 1) return FC_PARSE_FAILURE; - if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ) - || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ) + if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ) + || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ) || (plogi->class3.rxsz == 0)) return FC_PARSE_FAILURE; @@ -378,7 +378,7 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, struct fc_prli_s *prli = (struct fc_prli_s *) (pld); fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); + memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); prli->command = FC_ELS_PRLI; prli->parampage.servparams.initiator = 1; @@ -397,7 +397,7 @@ fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, struct fc_prli_s *prli = (struct fc_prli_s *) (pld); fc_els_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); + memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s)); prli->command = FC_ELS_ACC; @@ -448,7 +448,7 @@ fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id, { fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s)); + memset(logo, '\0', sizeof(struct fc_logo_s)); logo->els_cmd.els_code = FC_ELS_LOGO; logo->nport_id = (s_id); logo->orig_port_name = port_name; @@ -461,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name, u8 els_code) { - bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s)); + memset(adisc, '\0', sizeof(struct fc_adisc_s)); adisc->els_cmd.els_code = els_code; @@ -537,7 +537,7 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) if (pdisc->class3.class_valid != 1) return FC_PARSE_FAILURE; - if ((bfa_os_ntohs(pdisc->class3.rxsz) < + if ((be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ - sizeof(struct fchs_s))) || (pdisc->class3.rxsz == 0)) return FC_PARSE_FAILURE; @@ -554,11 +554,11 @@ fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name) u16 fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id) { - bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); + memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s)); fchs->cat_info = FC_CAT_ABTS; fchs->d_id = (d_id); fchs->s_id = (s_id); - fchs->ox_id = bfa_os_htons(ox_id); + fchs->ox_id = cpu_to_be16(ox_id); return sizeof(struct fchs_s); } @@ -582,9 +582,9 @@ fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id, /* * build rrq payload */ - bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); + memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s)); rrq->s_id = (s_id); - rrq->ox_id = bfa_os_htons(rrq_oxid); + rrq->ox_id = cpu_to_be16(rrq_oxid); rrq->rx_id = FC_RXID_ANY; return sizeof(struct fc_rrq_s); @@ -598,7 +598,7 @@ fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id, fc_els_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s)); + memset(acc, 0, sizeof(struct fc_els_cmd_s)); acc->els_code = FC_ELS_ACC; return sizeof(struct fc_els_cmd_s); @@ -610,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id, u8 reason_code_expl) { fc_els_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); + memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s)); ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT; ls_rjt->reason_code = reason_code; @@ -626,7 +626,7 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id, { fc_bls_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); + memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s)); fchs->rx_id = rx_id; @@ -641,7 +641,7 @@ fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id, u32 s_id, u16 ox_id) { fc_els_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); + memset(els_cmd, 0, sizeof(struct fc_els_cmd_s)); els_cmd->els_code = FC_ELS_ACC; return sizeof(struct fc_els_cmd_s); @@ -656,10 +656,10 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code) if (els_code == FC_ELS_PRLO) { prlo = (struct fc_prlo_s *) (fc_frame + 1); - num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16; + num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16; } else { tprlo = (struct fc_tprlo_s *) (fc_frame + 1); - num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; + num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; } return num_pages; } @@ -672,11 +672,11 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, fc_els_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4); + memset(tprlo_acc, 0, (num_pages * 16) + 4); tprlo_acc->command = FC_ELS_ACC; tprlo_acc->page_len = 0x10; - tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); + tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { tprlo_acc->tprlo_acc_params[page].opa_valid = 0; @@ -685,7 +685,7 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc, tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0; tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0; } - return bfa_os_ntohs(tprlo_acc->payload_len); + return be16_to_cpu(tprlo_acc->payload_len); } u16 @@ -696,10 +696,10 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, fc_els_rsp_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4); + memset(prlo_acc, 0, (num_pages * 16) + 4); prlo_acc->command = FC_ELS_ACC; prlo_acc->page_len = 0x10; - prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4); + prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { prlo_acc->prlo_acc_params[page].opa_valid = 0; @@ -709,7 +709,7 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id, prlo_acc->prlo_acc_params[page].resp_process_assc = 0; } - return bfa_os_ntohs(prlo_acc->payload_len); + return be16_to_cpu(prlo_acc->payload_len); } u16 @@ -718,7 +718,7 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id, { fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); + memset(rnid, 0, sizeof(struct fc_rnid_cmd_s)); rnid->els_cmd.els_code = FC_ELS_RNID; rnid->node_id_data_format = data_format; @@ -732,7 +732,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, struct fc_rnid_common_id_data_s *common_id_data, struct fc_rnid_general_topology_data_s *gen_topo_data) { - bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); + memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); @@ -745,7 +745,7 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id, if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) { rnid_acc->specific_id_data_length = sizeof(struct fc_rnid_general_topology_data_s); - bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data); + rnid_acc->gen_topology_data = *gen_topo_data; return sizeof(struct fc_rnid_acc_s); } else { return sizeof(struct fc_rnid_acc_s) - @@ -760,7 +760,7 @@ fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id, { fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); + memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s)); rpsc->els_cmd.els_code = FC_ELS_RPSC; return sizeof(struct fc_rpsc_cmd_s); @@ -775,11 +775,11 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id, fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0); - bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); + memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s)); rpsc2->els_cmd.els_code = FC_ELS_RPSC; - rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN); - rpsc2->num_pids = bfa_os_htons(npids); + rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN); + rpsc2->num_pids = cpu_to_be16(npids); for (i = 0; i < npids; i++) rpsc2->pid_list[i].pid = pid_list[i]; @@ -791,18 +791,18 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id, u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed) { - bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); + memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s)); fc_els_rsp_build(fchs, d_id, s_id, ox_id); rpsc_acc->command = FC_ELS_ACC; - rpsc_acc->num_entries = bfa_os_htons(1); + rpsc_acc->num_entries = cpu_to_be16(1); rpsc_acc->speed_info[0].port_speed_cap = - bfa_os_htons(oper_speed->port_speed_cap); + cpu_to_be16(oper_speed->port_speed_cap); rpsc_acc->speed_info[0].port_op_speed = - bfa_os_htons(oper_speed->port_op_speed); + cpu_to_be16(oper_speed->port_op_speed); return sizeof(struct fc_rpsc_acc_s); } @@ -830,12 +830,12 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, { struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1); - bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); + memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s)); pdisc->els_cmd.els_code = FC_ELS_PDISC; fc_els_req_build(fchs, d_id, s_id, ox_id); - pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size); + pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size); pdisc->port_name = port_name; pdisc->node_name = node_name; @@ -859,7 +859,7 @@ fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name) if (!pdisc->class3.class_valid) return FC_PARSE_NWWN_NOT_EQUAL; - if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) + if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ)) return FC_PARSE_RXSZ_INVAL; return FC_PARSE_OK; @@ -873,10 +873,10 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int page; fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(prlo, 0, (num_pages * 16) + 4); + memset(prlo, 0, (num_pages * 16) + 4); prlo->command = FC_ELS_PRLO; prlo->page_len = 0x10; - prlo->payload_len = bfa_os_htons((num_pages * 16) + 4); + prlo->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { prlo->prlo_params[page].type = FC_TYPE_FCP; @@ -886,7 +886,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, prlo->prlo_params[page].resp_process_assc = 0; } - return bfa_os_ntohs(prlo->payload_len); + return be16_to_cpu(prlo->payload_len); } u16 @@ -901,7 +901,7 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len) if (prlo->command != FC_ELS_ACC) return FC_PARSE_FAILURE; - num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16; + num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16; for (page = 0; page < num_pages; page++) { if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP) @@ -931,10 +931,10 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, int page; fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(tprlo, 0, (num_pages * 16) + 4); + memset(tprlo, 0, (num_pages * 16) + 4); tprlo->command = FC_ELS_TPRLO; tprlo->page_len = 0x10; - tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4); + tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4); for (page = 0; page < num_pages; page++) { tprlo->tprlo_params[page].type = FC_TYPE_FCP; @@ -950,7 +950,7 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, } } - return bfa_os_ntohs(tprlo->payload_len); + return be16_to_cpu(tprlo->payload_len); } u16 @@ -965,7 +965,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len) if (tprlo->command != FC_ELS_ACC) return FC_PARSE_ACC_INVAL; - num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16; + num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16; for (page = 0; page < num_pages; page++) { if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP) @@ -1011,32 +1011,32 @@ fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id, static void fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) { - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); + memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_DIRSERVICE; cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER; - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } static void fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code) { - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); + memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER; - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } static void fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code, u8 sub_type) { - bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s)); + memset(cthdr, 0, sizeof(struct ct_hdr_s)); cthdr->rev_id = CT_GS3_REVISION; cthdr->gs_type = CT_GSTYPE_MGMTSERVICE; cthdr->gs_sub_type = sub_type; - cthdr->cmd_rsp_code = bfa_os_htons(cmd_code); + cthdr->cmd_rsp_code = cpu_to_be16(cmd_code); } u16 @@ -1050,7 +1050,7 @@ fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN); - bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); + memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s)); gidpn->port_name = port_name; return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s); } @@ -1066,7 +1066,7 @@ fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID); - bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); + memset(gpnid, 0, sizeof(fcgs_gpnid_req_t)); gpnid->dap = port_id; return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s); } @@ -1082,7 +1082,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID); - bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); + memset(gnnid, 0, sizeof(fcgs_gnnid_req_t)); gnnid->dap = port_id; return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s); } @@ -1090,7 +1090,7 @@ fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr) { - if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { + if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) { if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY) return FC_PARSE_BUSY; else @@ -1108,7 +1108,7 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, fc_els_req_build(fchs, d_id, s_id, ox_id); - bfa_os_memset(scr, 0, sizeof(struct fc_scr_s)); + memset(scr, 0, sizeof(struct fc_scr_s)); scr->command = FC_ELS_SCR; scr->reg_func = FC_SCR_REG_FUNC_FULL; if (set_br_reg) @@ -1129,7 +1129,7 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, rscn->pagelen = sizeof(rscn->event[0]); payldlen = sizeof(u32) + rscn->pagelen; - rscn->payldlen = bfa_os_htons(payldlen); + rscn->payldlen = cpu_to_be16(payldlen); rscn->event[0].format = FC_RSCN_FORMAT_PORTID; rscn->event[0].portid = s_id; @@ -1149,14 +1149,14 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); - bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); + memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); rftid->dap = s_id; /* By default, FCP FC4 Type is registered */ index = FC_TYPE_FCP >> 5; type_value = 1 << (FC_TYPE_FCP % 32); - rftid->fc4_type[index] = bfa_os_htonl(type_value); + rftid->fc4_type[index] = cpu_to_be32(type_value); return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); } @@ -1172,10 +1172,10 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID); - bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); + memset(rftid, 0, sizeof(struct fcgs_rftid_req_s)); rftid->dap = s_id; - bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, + memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap, (bitmap_size < 32 ? bitmap_size : 32)); return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s); @@ -1192,7 +1192,7 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID); - bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); + memset(rffid, 0, sizeof(struct fcgs_rffid_req_s)); rffid->dap = s_id; rffid->fc4ftr_bits = fc4_ftrs; @@ -1214,7 +1214,7 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id, fc_gs_fchdr_build(fchs, d_id, s_id, ox_id); fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID); - bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); + memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s)); rspnid->dap = s_id; rspnid->spn_len = (u8) strlen((char *)name); @@ -1235,7 +1235,7 @@ fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type) fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT); - bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); + memset(gidft, 0, sizeof(struct fcgs_gidft_req_s)); gidft->fc4_type = fc4_type; gidft->domain_id = 0; gidft->area_id = 0; @@ -1254,7 +1254,7 @@ fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID); - bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); + memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s)); rpnid->port_id = port_id; rpnid->port_name = port_name; @@ -1272,7 +1272,7 @@ fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID); - bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); + memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s)); rnnid->port_id = port_id; rnnid->node_name = node_name; @@ -1291,7 +1291,7 @@ fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID); - bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); + memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s)); rcsid->port_id = port_id; rcsid->cos = cos; @@ -1309,7 +1309,7 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id, fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID); - bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); + memset(rptid, 0, sizeof(struct fcgs_rptid_req_s)); rptid->port_id = port_id; rptid->port_type = port_type; @@ -1326,7 +1326,7 @@ fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id) fc_gs_fchdr_build(fchs, d_id, s_id, 0); fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT); - bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); + memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s)); ganxt->port_id = port_id; return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s); @@ -1365,7 +1365,7 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask) index = fc4_type >> 5; type_value = 1 << (fc4_type % 32); - ptr[index] = bfa_os_htonl(type_value); + ptr[index] = cpu_to_be32(type_value); } @@ -1383,7 +1383,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD, CT_GSSUBTYPE_CFGSERVER); - bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t)); + memset(gmal, 0, sizeof(fcgs_gmal_req_t)); gmal->wwn = wwn; return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t); @@ -1403,7 +1403,7 @@ fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn) fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD, CT_GSSUBTYPE_CFGSERVER); - bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t)); + memset(gfn, 0, sizeof(fcgs_gfn_req_t)); gfn->wwn = wwn; return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t); diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c index 33c8dd51f47..135c4427801 100644 --- a/drivers/scsi/bfa/bfa_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcpim.c @@ -26,7 +26,7 @@ BFA_MODULE(fcpim); (__l->__stats += __r->__stats) -/** +/* * BFA ITNIM Related definitions */ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); @@ -72,7 +72,7 @@ static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim); } \ } while (0) -/** +/* * bfa_itnim_sm BFA itnim state machine */ @@ -89,7 +89,7 @@ enum bfa_itnim_event { BFA_ITNIM_SM_QRESUME = 9, /* queue space available */ }; -/** +/* * BFA IOIM related definitions */ #define bfa_ioim_move_to_comp_q(__ioim) do { \ @@ -107,11 +107,11 @@ enum bfa_itnim_event { if ((__fcpim)->profile_start) \ (__fcpim)->profile_start(__ioim); \ } while (0) -/** +/* * hal_ioim_sm */ -/** +/* * IO state machine events */ enum bfa_ioim_event { @@ -136,11 +136,11 @@ enum bfa_ioim_event { }; -/** +/* * BFA TSKIM related definitions */ -/** +/* * task management completion handling */ #define bfa_tskim_qcomp(__tskim, __cbfn) do { \ @@ -165,7 +165,7 @@ enum bfa_tskim_event { BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */ }; -/** +/* * forward declaration for BFA ITNIM functions */ static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim); @@ -183,7 +183,7 @@ static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim); static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim); -/** +/* * forward declaration of ITNIM state machine */ static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, @@ -217,7 +217,7 @@ static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, enum bfa_itnim_event event); -/** +/* * forward declaration for BFA IOIM functions */ static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim); @@ -233,7 +233,7 @@ static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete); static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim); -/** +/* * forward declaration of BFA IO state machine */ static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, @@ -261,7 +261,7 @@ static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event); -/** +/* * forward declaration for BFA TSKIM functions */ static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete); @@ -276,7 +276,7 @@ static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim); static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim); -/** +/* * forward declaration of BFA TSKIM state machine */ static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, @@ -294,11 +294,11 @@ static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event); -/** +/* * hal_fcpim_mod BFA FCP Initiator Mode module */ -/** +/* * Compute and return memory needed by FCP(im) module. */ static void @@ -307,7 +307,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, { bfa_itnim_meminfo(cfg, km_len, dm_len); - /** + /* * IO memory */ if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN) @@ -320,7 +320,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, *dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN; - /** + /* * task management command memory */ if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN) @@ -463,7 +463,7 @@ bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats, struct bfa_itnim_s *itnim; /* accumulate IO stats from itnim */ - bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); + memset(stats, 0, sizeof(struct bfa_itnim_iostats_s)); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; if (itnim->rport->rport_info.lp_tag != lp_tag) @@ -480,7 +480,7 @@ bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats) struct bfa_itnim_s *itnim; /* accumulate IO stats from itnim */ - bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s)); + memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s)); list_for_each_safe(qe, qen, &fcpim->itnim_q) { itnim = (struct bfa_itnim_s *) qe; bfa_fcpim_add_stats(modstats, &(itnim->stats)); @@ -560,7 +560,7 @@ bfa_fcpim_clr_modstats(struct bfa_s *bfa) itnim = (struct bfa_itnim_s *) qe; bfa_itnim_clear_stats(itnim); } - bfa_os_memset(&fcpim->del_itn_stats, 0, + memset(&fcpim->del_itn_stats, 0, sizeof(struct bfa_fcpim_del_itn_stats_s)); return BFA_STATUS_OK; @@ -604,11 +604,11 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state) -/** +/* * BFA ITNIM module state machine functions */ -/** +/* * Beginning/unallocated state - no events expected. */ static void @@ -629,7 +629,7 @@ bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) } } -/** +/* * Beginning state, only online event expected. */ static void @@ -660,7 +660,7 @@ bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) } } -/** +/* * Waiting for itnim create response from firmware. */ static void @@ -732,7 +732,7 @@ bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim, } } -/** +/* * Waiting for itnim create response from firmware, a delete is pending. */ static void @@ -760,7 +760,7 @@ bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim, } } -/** +/* * Online state - normal parking state. */ static void @@ -802,7 +802,7 @@ bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) } } -/** +/* * Second level error recovery need. */ static void @@ -833,7 +833,7 @@ bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) } } -/** +/* * Going offline. Waiting for active IO cleanup. */ static void @@ -870,7 +870,7 @@ bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim, } } -/** +/* * Deleting itnim. Waiting for active IO cleanup. */ static void @@ -898,7 +898,7 @@ bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim, } } -/** +/* * Rport offline. Fimrware itnim is being deleted - awaiting f/w response. */ static void @@ -955,7 +955,7 @@ bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim, } } -/** +/* * Offline state. */ static void @@ -987,7 +987,7 @@ bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event) } } -/** +/* * IOC h/w failed state. */ static void @@ -1023,7 +1023,7 @@ bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim, } } -/** +/* * Itnim is deleted, waiting for firmware response to delete. */ static void @@ -1068,7 +1068,7 @@ bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim, } } -/** +/* * Initiate cleanup of all IOs on an IOC failure. */ static void @@ -1088,7 +1088,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) bfa_ioim_iocdisable(ioim); } - /** + /* * For IO request in pending queue, we pretend an early timeout. */ list_for_each_safe(qe, qen, &itnim->pending_q) { @@ -1102,7 +1102,7 @@ bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim) } } -/** +/* * IO cleanup completion */ static void @@ -1114,7 +1114,7 @@ bfa_itnim_cleanp_comp(void *itnim_cbarg) bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP); } -/** +/* * Initiate cleanup of all IOs. */ static void @@ -1129,7 +1129,7 @@ bfa_itnim_cleanup(struct bfa_itnim_s *itnim) list_for_each_safe(qe, qen, &itnim->io_q) { ioim = (struct bfa_ioim_s *) qe; - /** + /* * Move IO to a cleanup queue from active queue so that a later * TM will not pickup this IO. */ @@ -1176,7 +1176,7 @@ __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete) bfa_cb_itnim_sler(itnim->ditn); } -/** +/* * Call to resume any I/O requests waiting for room in request queue. */ static void @@ -1190,7 +1190,7 @@ bfa_itnim_qresume(void *cbarg) -/** +/* * bfa_itnim_public */ @@ -1210,7 +1210,7 @@ void bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len, u32 *dm_len) { - /** + /* * ITN memory */ *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s); @@ -1229,7 +1229,7 @@ bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) fcpim->itnim_arr = itnim; for (i = 0; i < fcpim->num_itnims; i++, itnim++) { - bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s)); + memset(itnim, 0, sizeof(struct bfa_itnim_s)); itnim->bfa = bfa; itnim->fcpim = fcpim; itnim->reqq = BFA_REQQ_QOS_LO; @@ -1264,7 +1264,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) itnim->msg_no++; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(itnim->bfa, itnim->reqq); @@ -1281,7 +1281,7 @@ bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim) m->msg_no = itnim->msg_no; bfa_stats(itnim, fw_create); - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(itnim->bfa, itnim->reqq); @@ -1293,7 +1293,7 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) { struct bfi_itnim_delete_req_s *m; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(itnim->bfa, itnim->reqq); @@ -1307,14 +1307,14 @@ bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim) m->fw_handle = itnim->rport->fw_handle; bfa_stats(itnim, fw_delete); - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(itnim->bfa, itnim->reqq); return BFA_TRUE; } -/** +/* * Cleanup all pending failed inflight requests. */ static void @@ -1329,7 +1329,7 @@ bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov) } } -/** +/* * Start all pending IO requests. */ static void @@ -1339,12 +1339,12 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) bfa_itnim_iotov_stop(itnim); - /** + /* * Abort all inflight IO requests in the queue */ bfa_itnim_delayed_comp(itnim, BFA_FALSE); - /** + /* * Start all pending IO requests. */ while (!list_empty(&itnim->pending_q)) { @@ -1354,7 +1354,7 @@ bfa_itnim_iotov_online(struct bfa_itnim_s *itnim) } } -/** +/* * Fail all pending IO requests */ static void @@ -1362,12 +1362,12 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) { struct bfa_ioim_s *ioim; - /** + /* * Fail all inflight IO requests in the queue */ bfa_itnim_delayed_comp(itnim, BFA_TRUE); - /** + /* * Fail any pending IO requests. */ while (!list_empty(&itnim->pending_q)) { @@ -1377,7 +1377,7 @@ bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim) } } -/** +/* * IO TOV timer callback. Fail any pending IO requests. */ static void @@ -1392,7 +1392,7 @@ bfa_itnim_iotov(void *itnim_arg) bfa_cb_itnim_tov(itnim->ditn); } -/** +/* * Start IO TOV timer for failing back pending IO requests in offline state. */ static void @@ -1407,7 +1407,7 @@ bfa_itnim_iotov_start(struct bfa_itnim_s *itnim) } } -/** +/* * Stop IO TOV timer. */ static void @@ -1419,7 +1419,7 @@ bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim) } } -/** +/* * Stop IO TOV timer. */ static void @@ -1459,11 +1459,11 @@ bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim) -/** +/* * bfa_itnim_public */ -/** +/* * Itnim interrupt processing. */ void @@ -1509,7 +1509,7 @@ bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -/** +/* * bfa_itnim_api */ @@ -1552,7 +1552,7 @@ bfa_itnim_offline(struct bfa_itnim_s *itnim) bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE); } -/** +/* * Return true if itnim is considered offline for holding off IO request. * IO is not held if itnim is being deleted. */ @@ -1597,17 +1597,17 @@ void bfa_itnim_clear_stats(struct bfa_itnim_s *itnim) { int j; - bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats)); - bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); + memset(&itnim->stats, 0, sizeof(itnim->stats)); + memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile)); for (j = 0; j < BFA_IOBUCKET_MAX; j++) itnim->ioprofile.io_latency.min[j] = ~0; } -/** +/* * BFA IO module state machine functions */ -/** +/* * IO is not started (unallocated). */ static void @@ -1657,7 +1657,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; case BFA_IOIM_SM_ABORT: - /** + /* * IO in pending queue can get abort requests. Complete abort * requests immediately. */ @@ -1672,7 +1672,7 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO is waiting for SG pages. */ static void @@ -1719,7 +1719,7 @@ bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO is active. */ static void @@ -1803,7 +1803,7 @@ bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO is retried with new tag. */ static void @@ -1844,7 +1844,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; case BFA_IOIM_SM_ABORT: - /** in this state IO abort is done. + /* in this state IO abort is done. * Waiting for IO tag resource free. */ bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free); @@ -1857,7 +1857,7 @@ bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO is being aborted, waiting for completion from firmware. */ static void @@ -1919,7 +1919,7 @@ bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO is being cleaned up (implicit abort), waiting for completion from * firmware. */ @@ -1937,7 +1937,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; case BFA_IOIM_SM_ABORT: - /** + /* * IO is already being aborted implicitly */ ioim->io_cbfn = __bfa_cb_ioim_abort; @@ -1969,7 +1969,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; case BFA_IOIM_SM_CLEANUP: - /** + /* * IO can be in cleanup state already due to TM command. * 2nd cleanup request comes from ITN offline event. */ @@ -1980,7 +1980,7 @@ bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO is waiting for room in request CQ */ static void @@ -2024,7 +2024,7 @@ bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * Active IO is being aborted, waiting for room in request CQ. */ static void @@ -2075,7 +2075,7 @@ bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * Active IO is being cleaned up, waiting for room in request CQ. */ static void @@ -2091,7 +2091,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) break; case BFA_IOIM_SM_ABORT: - /** + /* * IO is alraedy being cleaned up implicitly */ ioim->io_cbfn = __bfa_cb_ioim_abort; @@ -2125,7 +2125,7 @@ bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO bfa callback is pending. */ static void @@ -2152,7 +2152,7 @@ bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO bfa callback is pending. IO resource cannot be freed. */ static void @@ -2185,7 +2185,7 @@ bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) } } -/** +/* * IO is completed, waiting resource free from firmware. */ static void @@ -2214,7 +2214,7 @@ bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event) -/** +/* * hal_ioim_private */ @@ -2247,7 +2247,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg; if (m->io_status == BFI_IOIM_STS_OK) { - /** + /* * setup sense information, if present */ if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) && @@ -2256,15 +2256,15 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete) snsinfo = ioim->iosp->snsinfo; } - /** + /* * setup residue value correctly for normal completions */ if (m->resid_flags == FCP_RESID_UNDER) { - residue = bfa_os_ntohl(m->residue); + residue = be32_to_cpu(m->residue); bfa_stats(ioim->itnim, iocomp_underrun); } if (m->resid_flags == FCP_RESID_OVER) { - residue = bfa_os_ntohl(m->residue); + residue = be32_to_cpu(m->residue); residue = -residue; bfa_stats(ioim->itnim, iocomp_overrun); } @@ -2327,7 +2327,7 @@ bfa_ioim_sgpg_alloced(void *cbarg) bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED); } -/** +/* * Send I/O request to firmware. */ static bfa_boolean_t @@ -2343,7 +2343,7 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) struct scatterlist *sg; struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(ioim->bfa, ioim->reqq); @@ -2354,14 +2354,14 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) return BFA_FALSE; } - /** + /* * build i/o request message next */ - m->io_tag = bfa_os_htons(ioim->iotag); + m->io_tag = cpu_to_be16(ioim->iotag); m->rport_hdl = ioim->itnim->rport->fw_handle; m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio); - /** + /* * build inline IO SG element here */ sge = &m->sges[0]; @@ -2387,18 +2387,17 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) sge->flags = BFI_SGE_PGDLEN; bfa_sge_to_be(sge); - /** + /* * set up I/O command parameters */ - bfa_os_assign(m->cmnd, cmnd_z0); + m->cmnd = cmnd_z0; m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio); m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio); - bfa_os_assign(m->cmnd.cdb, - *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio)); + m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio); fcp_dl = bfa_cb_ioim_get_size(ioim->dio); - m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl); + m->cmnd.fcp_dl = cpu_to_be32(fcp_dl); - /** + /* * set up I/O message header */ switch (m->cmnd.iodir) { @@ -2427,28 +2426,28 @@ bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim) m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio); m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio); - /** + /* * Handle large CDB (>16 bytes). */ m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) - FCP_CMND_CDB_LEN) / sizeof(u32); if (m->cmnd.addl_cdb_len) { - bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *) + memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *) bfa_cb_ioim_get_cdb(ioim->dio) + 1, m->cmnd.addl_cdb_len * sizeof(u32)); fcp_cmnd_fcpdl(&m->cmnd) = - bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio)); + cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio)); } #endif - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(ioim->bfa, ioim->reqq); return BFA_TRUE; } -/** +/* * Setup any additional SG pages needed.Inline SG element is setup * at queuing time. */ @@ -2459,7 +2458,7 @@ bfa_ioim_sge_setup(struct bfa_ioim_s *ioim) bfa_assert(ioim->nsges > BFI_SGE_INLINE); - /** + /* * allocate SG pages needed */ nsgpgs = BFA_SGPG_NPAGE(ioim->nsges); @@ -2508,7 +2507,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) sge->sg_len = sg_dma_len(sg); pgcumsz += sge->sg_len; - /** + /* * set flags */ if (i < (nsges - 1)) @@ -2523,7 +2522,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg); - /** + /* * set the link element of each page */ if (sgeid == ioim->nsges) { @@ -2540,7 +2539,7 @@ bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim) } while (sgeid < ioim->nsges); } -/** +/* * Send I/O abort request to firmware. */ static bfa_boolean_t @@ -2549,14 +2548,14 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim) struct bfi_ioim_abort_req_s *m; enum bfi_ioim_h2i msgop; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(ioim->bfa, ioim->reqq); if (!m) return BFA_FALSE; - /** + /* * build i/o request message next */ if (ioim->iosp->abort_explicit) @@ -2565,17 +2564,17 @@ bfa_ioim_send_abort(struct bfa_ioim_s *ioim) msgop = BFI_IOIM_H2I_IOCLEANUP_REQ; bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa)); - m->io_tag = bfa_os_htons(ioim->iotag); + m->io_tag = cpu_to_be16(ioim->iotag); m->abort_tag = ++ioim->abort_tag; - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(ioim->bfa, ioim->reqq); return BFA_TRUE; } -/** +/* * Call to resume any I/O requests waiting for room in request queue. */ static void @@ -2591,7 +2590,7 @@ bfa_ioim_qresume(void *cbarg) static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim) { - /** + /* * Move IO from itnim queue to fcpim global queue since itnim will be * freed. */ @@ -2624,13 +2623,13 @@ bfa_ioim_is_abortable(struct bfa_ioim_s *ioim) return BFA_TRUE; } -/** +/* * or after the link comes back. */ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) { - /** + /* * If path tov timer expired, failback with PATHTOV status - these * IO requests are not normally retried by IO stack. * @@ -2645,7 +2644,7 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) } bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim); - /** + /* * Move IO to fcpim global queue since itnim will be * freed. */ @@ -2655,11 +2654,11 @@ bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov) -/** +/* * hal_ioim_friend */ -/** +/* * Memory allocation and initialization. */ void @@ -2671,7 +2670,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) u8 *snsinfo; u32 snsbufsz; - /** + /* * claim memory first */ ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo); @@ -2682,7 +2681,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) fcpim->ioim_sp_arr = iosp; bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs); - /** + /* * Claim DMA memory for per IO sense data. */ snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN; @@ -2694,7 +2693,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) snsinfo = fcpim->snsbase.kva; bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa); - /** + /* * Initialize ioim free queues */ INIT_LIST_HEAD(&fcpim->ioim_free_q); @@ -2706,7 +2705,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) /* * initialize IOIM */ - bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s)); + memset(ioim, 0, sizeof(struct bfa_ioim_s)); ioim->iotag = i; ioim->bfa = fcpim->bfa; ioim->fcpim = fcpim; @@ -2723,7 +2722,7 @@ bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) } } -/** +/* * Driver detach time call. */ void @@ -2740,7 +2739,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) u16 iotag; enum bfa_ioim_event evt = BFA_IOIM_SM_COMP; - iotag = bfa_os_ntohs(rsp->io_tag); + iotag = be16_to_cpu(rsp->io_tag); ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); bfa_assert(ioim->iotag == iotag); @@ -2750,7 +2749,7 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) bfa_trc(ioim->bfa, rsp->reuse_io_tag); if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active)) - bfa_os_assign(ioim->iosp->comp_rspmsg, *m); + ioim->iosp->comp_rspmsg = *m; switch (rsp->io_status) { case BFI_IOIM_STS_OK: @@ -2823,7 +2822,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) struct bfa_ioim_s *ioim; u16 iotag; - iotag = bfa_os_ntohs(rsp->io_tag); + iotag = be16_to_cpu(rsp->io_tag); ioim = BFA_IOIM_FROM_TAG(fcpim, iotag); bfa_assert(ioim->iotag == iotag); @@ -2837,7 +2836,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m) void bfa_ioim_profile_start(struct bfa_ioim_s *ioim) { - ioim->start_time = bfa_os_get_clock(); + ioim->start_time = jiffies; } void @@ -2845,7 +2844,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) { u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio); u32 index = bfa_ioim_get_index(fcp_dl); - u64 end_time = bfa_os_get_clock(); + u64 end_time = jiffies; struct bfa_itnim_latency_s *io_lat = &(ioim->itnim->ioprofile.io_latency); u32 val = (u32)(end_time - ioim->start_time); @@ -2859,7 +2858,7 @@ bfa_ioim_profile_comp(struct bfa_ioim_s *ioim) io_lat->max[index] : val; io_lat->avg[index] += val; } -/** +/* * Called by itnim to clean up IO while going offline. */ void @@ -2882,7 +2881,7 @@ bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim) bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP); } -/** +/* * IOC failure handling. */ void @@ -2893,7 +2892,7 @@ bfa_ioim_iocdisable(struct bfa_ioim_s *ioim) bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL); } -/** +/* * IO offline TOV popped. Fail the pending IO. */ void @@ -2905,11 +2904,11 @@ bfa_ioim_tov(struct bfa_ioim_s *ioim) -/** +/* * hal_ioim_api */ -/** +/* * Allocate IOIM resource for initiator mode I/O request. */ struct bfa_ioim_s * @@ -2919,7 +2918,7 @@ bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio, struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); struct bfa_ioim_s *ioim; - /** + /* * alocate IOIM resource */ bfa_q_deq(&fcpim->ioim_free_q, &ioim); @@ -2970,7 +2969,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim) bfa_ioim_cb_profile_start(ioim->fcpim, ioim); - /** + /* * Obtain the queue over which this request has to be issued */ ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ? @@ -2980,7 +2979,7 @@ bfa_ioim_start(struct bfa_ioim_s *ioim) bfa_sm_send_event(ioim, BFA_IOIM_SM_START); } -/** +/* * Driver I/O abort request. */ bfa_status_t @@ -2999,11 +2998,11 @@ bfa_ioim_abort(struct bfa_ioim_s *ioim) } -/** +/* * BFA TSKIM state machine functions */ -/** +/* * Task management command beginning state. */ static void @@ -3016,7 +3015,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) bfa_sm_set_state(tskim, bfa_tskim_sm_active); bfa_tskim_gather_ios(tskim); - /** + /* * If device is offline, do not send TM on wire. Just cleanup * any pending IO requests and complete TM request. */ @@ -3040,7 +3039,7 @@ bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) } } -/** +/* * brief * TM command is active, awaiting completion from firmware to * cleanup IO requests in TM scope. @@ -3077,7 +3076,7 @@ bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) } } -/** +/* * An active TM is being cleaned up since ITN is offline. Awaiting cleanup * completion event from firmware. */ @@ -3088,7 +3087,7 @@ bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) switch (event) { case BFA_TSKIM_SM_DONE: - /** + /* * Ignore and wait for ABORT completion from firmware. */ break; @@ -3121,7 +3120,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; case BFA_TSKIM_SM_CLEANUP: - /** + /* * Ignore, TM command completed on wire. * Notify TM conmpletion on IO cleanup completion. */ @@ -3138,7 +3137,7 @@ bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) } } -/** +/* * Task management command is waiting for room in request CQ */ static void @@ -3153,7 +3152,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) break; case BFA_TSKIM_SM_CLEANUP: - /** + /* * No need to send TM on wire since ITN is offline. */ bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup); @@ -3173,7 +3172,7 @@ bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) } } -/** +/* * Task management command is active, awaiting for room in request CQ * to send clean up request. */ @@ -3186,7 +3185,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, switch (event) { case BFA_TSKIM_SM_DONE: bfa_reqq_wcancel(&tskim->reqq_wait); - /** + /* * * Fall through !!! */ @@ -3208,7 +3207,7 @@ bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim, } } -/** +/* * BFA callback is pending */ static void @@ -3236,7 +3235,7 @@ bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event) -/** +/* * hal_tskim_private */ @@ -3289,7 +3288,7 @@ bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun) return BFA_FALSE; } -/** +/* * Gather affected IO requests and task management commands. */ static void @@ -3301,7 +3300,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) INIT_LIST_HEAD(&tskim->io_q); - /** + /* * Gather any active IO requests first. */ list_for_each_safe(qe, qen, &itnim->io_q) { @@ -3313,7 +3312,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) } } - /** + /* * Failback any pending IO requests immediately. */ list_for_each_safe(qe, qen, &itnim->pending_q) { @@ -3327,7 +3326,7 @@ bfa_tskim_gather_ios(struct bfa_tskim_s *tskim) } } -/** +/* * IO cleanup completion */ static void @@ -3339,7 +3338,7 @@ bfa_tskim_cleanp_comp(void *tskim_cbarg) bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE); } -/** +/* * Gather affected IO requests and task management commands. */ static void @@ -3359,7 +3358,7 @@ bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim) bfa_wc_wait(&tskim->wc); } -/** +/* * Send task management request to firmware. */ static bfa_boolean_t @@ -3368,33 +3367,33 @@ bfa_tskim_send(struct bfa_tskim_s *tskim) struct bfa_itnim_s *itnim = tskim->itnim; struct bfi_tskim_req_s *m; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(tskim->bfa, itnim->reqq); if (!m) return BFA_FALSE; - /** + /* * build i/o request message next */ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ, bfa_lpuid(tskim->bfa)); - m->tsk_tag = bfa_os_htons(tskim->tsk_tag); + m->tsk_tag = cpu_to_be16(tskim->tsk_tag); m->itn_fhdl = tskim->itnim->rport->fw_handle; m->t_secs = tskim->tsecs; m->lun = tskim->lun; m->tm_flags = tskim->tm_cmnd; - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(tskim->bfa, itnim->reqq); return BFA_TRUE; } -/** +/* * Send abort request to cleanup an active TM to firmware. */ static bfa_boolean_t @@ -3403,29 +3402,29 @@ bfa_tskim_send_abort(struct bfa_tskim_s *tskim) struct bfa_itnim_s *itnim = tskim->itnim; struct bfi_tskim_abortreq_s *m; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(tskim->bfa, itnim->reqq); if (!m) return BFA_FALSE; - /** + /* * build i/o request message next */ bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ, bfa_lpuid(tskim->bfa)); - m->tsk_tag = bfa_os_htons(tskim->tsk_tag); + m->tsk_tag = cpu_to_be16(tskim->tsk_tag); - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(tskim->bfa, itnim->reqq); return BFA_TRUE; } -/** +/* * Call to resume task management cmnd waiting for room in request queue. */ static void @@ -3437,7 +3436,7 @@ bfa_tskim_qresume(void *cbarg) bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME); } -/** +/* * Cleanup IOs associated with a task mangement command on IOC failures. */ static void @@ -3454,11 +3453,11 @@ bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim) -/** +/* * hal_tskim_friend */ -/** +/* * Notification on completions from related ioim. */ void @@ -3467,7 +3466,7 @@ bfa_tskim_iodone(struct bfa_tskim_s *tskim) bfa_wc_down(&tskim->wc); } -/** +/* * Handle IOC h/w failure notification from itnim. */ void @@ -3478,7 +3477,7 @@ bfa_tskim_iocdisable(struct bfa_tskim_s *tskim) bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL); } -/** +/* * Cleanup TM command and associated IOs as part of ITNIM offline. */ void @@ -3489,7 +3488,7 @@ bfa_tskim_cleanup(struct bfa_tskim_s *tskim) bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP); } -/** +/* * Memory allocation and initialization. */ void @@ -3507,7 +3506,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) /* * initialize TSKIM */ - bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s)); + memset(tskim, 0, sizeof(struct bfa_tskim_s)); tskim->tsk_tag = i; tskim->bfa = fcpim->bfa; tskim->fcpim = fcpim; @@ -3525,7 +3524,7 @@ bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo) void bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim) { - /** + /* * @todo */ } @@ -3536,14 +3535,14 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa); struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m; struct bfa_tskim_s *tskim; - u16 tsk_tag = bfa_os_ntohs(rsp->tsk_tag); + u16 tsk_tag = be16_to_cpu(rsp->tsk_tag); tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag); bfa_assert(tskim->tsk_tag == tsk_tag); tskim->tsk_status = rsp->tsk_status; - /** + /* * Firmware sends BFI_TSKIM_STS_ABORTED status for abort * requests. All other statuses are for normal completions. */ @@ -3558,7 +3557,7 @@ bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -/** +/* * hal_tskim_api */ @@ -3585,7 +3584,7 @@ bfa_tskim_free(struct bfa_tskim_s *tskim) list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q); } -/** +/* * Start a task management command. * * @param[in] tskim BFA task management command instance diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h index 3bf343160aa..db53717eeb4 100644 --- a/drivers/scsi/bfa/bfa_fcpim.h +++ b/drivers/scsi/bfa/bfa_fcpim.h @@ -104,7 +104,7 @@ struct bfa_fcpim_mod_s { bfa_fcpim_profile_t profile_start; }; -/** +/* * BFA IO (initiator mode) */ struct bfa_ioim_s { @@ -137,7 +137,7 @@ struct bfa_ioim_sp_s { struct bfa_tskim_s *tskim; /* Relevant TM cmd */ }; -/** +/* * BFA Task management command (initiator mode) */ struct bfa_tskim_s { @@ -160,7 +160,7 @@ struct bfa_tskim_s { }; -/** +/* * BFA i-t-n (initiator mode) */ struct bfa_itnim_s { @@ -303,7 +303,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, struct bfa_itnim_ioprofile_s *ioprofile); #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq) -/** +/* * BFA completion callback for bfa_itnim_online(). * * @param[in] itnim FCS or driver itnim instance @@ -312,7 +312,7 @@ bfa_status_t bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim, */ void bfa_cb_itnim_online(void *itnim); -/** +/* * BFA completion callback for bfa_itnim_offline(). * * @param[in] itnim FCS or driver itnim instance @@ -323,7 +323,7 @@ void bfa_cb_itnim_offline(void *itnim); void bfa_cb_itnim_tov_begin(void *itnim); void bfa_cb_itnim_tov(void *itnim); -/** +/* * BFA notification to FCS/driver for second level error recovery. * * Atleast one I/O request has timedout and target is unresponsive to @@ -351,7 +351,7 @@ void bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov); -/** +/* * I/O completion notification. * * @param[in] dio driver IO structure @@ -368,7 +368,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, u8 scsi_status, int sns_len, u8 *sns_info, s32 residue); -/** +/* * I/O good completion notification. * * @param[in] dio driver IO structure @@ -377,7 +377,7 @@ void bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio, */ void bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio); -/** +/* * I/O abort completion notification * * @param[in] dio driver IO that was aborted diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c index 9cebbe30a67..c94502dfac6 100644 --- a/drivers/scsi/bfa/bfa_fcs.c +++ b/drivers/scsi/bfa/bfa_fcs.c @@ -15,7 +15,7 @@ * General Public License for more details. */ -/** +/* * bfa_fcs.c BFA FCS main */ @@ -25,7 +25,7 @@ BFA_TRC_FILE(FCS, FCS); -/** +/* * FCS sub-modules */ struct bfa_fcs_mod_s { @@ -43,7 +43,7 @@ static struct bfa_fcs_mod_s fcs_modules[] = { bfa_fcs_fabric_modexit }, }; -/** +/* * fcs_api BFA FCS API */ @@ -58,11 +58,11 @@ bfa_fcs_exit_comp(void *fcs_cbarg) -/** +/* * fcs_api BFA FCS API */ -/** +/* * fcs attach -- called once to initialize data structures at driver attach time */ void @@ -86,7 +86,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad, } } -/** +/* * fcs initialization, called once after bfa initialization is complete */ void @@ -110,7 +110,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs) } } -/** +/* * Start FCS operations. */ void @@ -119,7 +119,7 @@ bfa_fcs_start(struct bfa_fcs_s *fcs) bfa_fcs_fabric_modstart(fcs); } -/** +/* * brief * FCS driver details initialization. * @@ -138,7 +138,7 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs, bfa_fcs_fabric_psymb_init(&fcs->fabric); } -/** +/* * brief * FCS FDMI Driver Parameter Initialization * @@ -154,7 +154,7 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable) fcs->fdmi_enabled = fdmi_enable; } -/** +/* * brief * FCS instance cleanup and exit. * @@ -196,7 +196,7 @@ bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs) bfa_wc_down(&fcs->wc); } -/** +/* * Fabric module implementation. */ @@ -232,11 +232,11 @@ static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg, u32 rsp_len, u32 resid_len, struct fchs_s *rspfchs); -/** +/* * fcs_fabric_sm fabric state machine functions */ -/** +/* * Fabric state machine events */ enum bfa_fcs_fabric_event { @@ -286,7 +286,7 @@ static void bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); static void bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, enum bfa_fcs_fabric_event event); -/** +/* * Beginning state before fabric creation. */ static void @@ -312,7 +312,7 @@ bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * Beginning state before fabric creation. */ static void @@ -345,7 +345,7 @@ bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * Link is down, awaiting LINK UP event from port. This is also the * first state at fabric creation. */ @@ -375,7 +375,7 @@ bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * FLOGI is in progress, awaiting FLOGI reply. */ static void @@ -468,7 +468,7 @@ bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * Authentication is in progress, awaiting authentication results. */ static void @@ -508,7 +508,7 @@ bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * Authentication failed */ static void @@ -534,7 +534,7 @@ bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * Port is in loopback mode. */ static void @@ -560,7 +560,7 @@ bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * There is no attached fabric - private loop or NPort-to-NPort topology. */ static void @@ -593,7 +593,7 @@ bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * Fabric is online - normal operating state. */ static void @@ -628,7 +628,7 @@ bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * Exchanging virtual fabric parameters. */ static void @@ -652,7 +652,7 @@ bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric, } } -/** +/* * EVFP exchange complete and VFT tagging is enabled. */ static void @@ -663,7 +663,7 @@ bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric, bfa_trc(fabric->fcs, event); } -/** +/* * Port is isolated after EVFP exchange due to VF_ID mismatch (N and F). */ static void @@ -684,7 +684,7 @@ bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric, fabric->event_arg.swp_vfid); } -/** +/* * Fabric is being deleted, awaiting vport delete completions. */ static void @@ -714,7 +714,7 @@ bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric, -/** +/* * fcs_fabric_private fabric private functions */ @@ -728,7 +728,7 @@ bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric) port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc); } -/** +/* * Port Symbolic Name Creation for base port. */ void @@ -789,7 +789,7 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric) port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0; } -/** +/* * bfa lps login completion callback */ void @@ -867,7 +867,7 @@ bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status) bfa_trc(fabric->fcs, fabric->is_npiv); bfa_trc(fabric->fcs, fabric->is_auth); } -/** +/* * Allocate and send FLOGI. */ static void @@ -897,7 +897,7 @@ bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric) bfa_fcs_fabric_set_opertype(fabric); fabric->stats.fabric_onlines++; - /** + /* * notify online event to base and then virtual ports */ bfa_fcs_lport_online(&fabric->bport); @@ -917,7 +917,7 @@ bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric) bfa_trc(fabric->fcs, fabric->fabric_name); fabric->stats.fabric_offlines++; - /** + /* * notify offline event first to vports and then base port. */ list_for_each_safe(qe, qen, &fabric->vport_q) { @@ -939,7 +939,7 @@ bfa_fcs_fabric_delay(void *cbarg) bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED); } -/** +/* * Delete all vports and wait for vport delete completions. */ static void @@ -965,11 +965,11 @@ bfa_fcs_fabric_delete_comp(void *cbarg) bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP); } -/** +/* * fcs_fabric_public fabric public functions */ -/** +/* * Attach time initialization. */ void @@ -978,9 +978,9 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) struct bfa_fcs_fabric_s *fabric; fabric = &fcs->fabric; - bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); + memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s)); - /** + /* * Initialize base fabric. */ fabric->fcs = fcs; @@ -989,7 +989,7 @@ bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs) fabric->lps = bfa_lps_alloc(fcs->bfa); bfa_assert(fabric->lps); - /** + /* * Initialize fabric delete completion handler. Fabric deletion is * complete when the last vport delete is complete. */ @@ -1007,7 +1007,7 @@ bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs) bfa_trc(fcs, 0); } -/** +/* * Module cleanup */ void @@ -1017,7 +1017,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) bfa_trc(fcs, 0); - /** + /* * Cleanup base fabric. */ fabric = &fcs->fabric; @@ -1025,7 +1025,7 @@ bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs) bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE); } -/** +/* * Fabric module start -- kick starts FCS actions */ void @@ -1038,7 +1038,7 @@ bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs) bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START); } -/** +/* * Suspend fabric activity as part of driver suspend. */ void @@ -1064,7 +1064,7 @@ bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric) return fabric->oper_type; } -/** +/* * Link up notification from BFA physical port module. */ void @@ -1074,7 +1074,7 @@ bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric) bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP); } -/** +/* * Link down notification from BFA physical port module. */ void @@ -1084,7 +1084,7 @@ bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric) bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN); } -/** +/* * A child vport is being created in the fabric. * * Call from vport module at vport creation. A list of base port and vports @@ -1099,7 +1099,7 @@ void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, struct bfa_fcs_vport_s *vport) { - /** + /* * - add vport to fabric's vport_q */ bfa_trc(fabric->fcs, fabric->vf_id); @@ -1109,7 +1109,7 @@ bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric, bfa_wc_up(&fabric->wc); } -/** +/* * A child vport is being deleted from fabric. * * Vport is being deleted. @@ -1123,7 +1123,7 @@ bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric, bfa_wc_down(&fabric->wc); } -/** +/* * Base port is deleted. */ void @@ -1133,7 +1133,7 @@ bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric) } -/** +/* * Check if fabric is online. * * param[in] fabric - Fabric instance. This can be a base fabric or vf. @@ -1146,7 +1146,7 @@ bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric) return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online); } -/** +/* * brief * */ @@ -1158,7 +1158,7 @@ bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs, return BFA_STATUS_OK; } -/** +/* * Lookup for a vport withing a fabric given its pwwn */ struct bfa_fcs_vport_s * @@ -1176,7 +1176,7 @@ bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn) return NULL; } -/** +/* * In a given fabric, return the number of lports. * * param[in] fabric - Fabric instance. This can be a base fabric or vf. @@ -1214,7 +1214,7 @@ bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric) return oui; } -/** +/* * Unsolicited frame receive handling. */ void @@ -1230,7 +1230,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, bfa_trc(fabric->fcs, len); bfa_trc(fabric->fcs, pid); - /** + /* * Look for our own FLOGI frames being looped back. This means an * external loopback cable is in place. Our own FLOGI frames are * sometimes looped back when switch port gets temporarily bypassed. @@ -1242,7 +1242,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, return; } - /** + /* * FLOGI/EVFP exchanges should be consumed by base fabric. */ if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) { @@ -1252,7 +1252,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, } if (fabric->bport.pid == pid) { - /** + /* * All authentication frames should be routed to auth */ bfa_trc(fabric->fcs, els_cmd->els_code); @@ -1266,7 +1266,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, return; } - /** + /* * look for a matching local port ID */ list_for_each(qe, &fabric->vport_q) { @@ -1280,7 +1280,7 @@ bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len); } -/** +/* * Unsolicited frames to be processed by fabric. */ static void @@ -1304,7 +1304,7 @@ bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs, } } -/** +/* * Process incoming FLOGI */ static void @@ -1329,7 +1329,7 @@ bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric, return; } - fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred); + fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred); bport->port_topo.pn2n.rem_port_wwn = flogi->port_name; bport->port_topo.pn2n.reply_oxid = fchs->ox_id; @@ -1351,7 +1351,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) struct fchs_s fchs; fcxp = bfa_fcs_fcxp_alloc(fabric->fcs); - /** + /* * Do not expect this failure -- expect remote node to retry */ if (!fcxp) @@ -1370,7 +1370,7 @@ bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric) FC_MAX_PDUSZ, 0); } -/** +/* * Flogi Acc completion callback. */ static void @@ -1417,130 +1417,7 @@ bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric, } } -/** - * fcs_vf_api virtual fabrics API - */ - -/** - * Enable VF mode. - * - * @param[in] fcs fcs module instance - * @param[in] vf_id default vf_id of port, FC_VF_ID_NULL - * to use standard default vf_id of 1. - * - * @retval BFA_STATUS_OK vf mode is enabled - * @retval BFA_STATUS_BUSY Port is active. Port must be disabled - * before VF mode can be enabled. - */ -bfa_status_t -bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id) -{ - return BFA_STATUS_OK; -} - -/** - * Disable VF mode. - * - * @param[in] fcs fcs module instance - * - * @retval BFA_STATUS_OK vf mode is disabled - * @retval BFA_STATUS_BUSY VFs are present and being used. All - * VFs must be deleted before disabling - * VF mode. - */ -bfa_status_t -bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs) -{ - return BFA_STATUS_OK; -} - -/** - * Create a new VF instance. - * - * A new VF is created using the given VF configuration. A VF is identified - * by VF id. No duplicate VF creation is allowed with the same VF id. Once - * a VF is created, VF is automatically started after link initialization - * and EVFP exchange is completed. - * - * param[in] vf - FCS vf data structure. Memory is - * allocated by caller (driver) - * param[in] fcs - FCS module - * param[in] vf_cfg - VF configuration - * param[in] vf_drv - Opaque handle back to the driver's - * virtual vf structure - * - * retval BFA_STATUS_OK VF creation is successful - * retval BFA_STATUS_FAILED VF creation failed - * retval BFA_STATUS_EEXIST A VF exists with the given vf_id - */ -bfa_status_t -bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id, - struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv) -{ - bfa_trc(fcs, vf_id); - return BFA_STATUS_OK; -} - -/** - * Use this function to delete a BFA VF object. VF object should - * be stopped before this function call. - * - * param[in] vf - pointer to bfa_vf_t. - * - * retval BFA_STATUS_OK On vf deletion success - * retval BFA_STATUS_BUSY VF is not in a stopped state - * retval BFA_STATUS_INPROGRESS VF deletion in in progress - */ -bfa_status_t -bfa_fcs_vf_delete(bfa_fcs_vf_t *vf) -{ - bfa_trc(vf->fcs, vf->vf_id); - return BFA_STATUS_OK; -} - - -/** - * Returns attributes of the given VF. - * - * param[in] vf pointer to bfa_vf_t. - * param[out] vf_attr vf attributes returned - * - * return None - */ -void -bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr) -{ - bfa_trc(vf->fcs, vf->vf_id); -} - -/** - * Return statistics associated with the given vf. - * - * param[in] vf pointer to bfa_vf_t. - * param[out] vf_stats vf statistics returned - * - * @return None - */ -void -bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats) -{ - bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s)); -} - -/** - * clear statistics associated with the given vf. - * - * param[in] vf pointer to bfa_vf_t. - * - * @return None - */ -void -bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf) -{ - bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s)); -} - -/** +/* * Returns FCS vf structure for a given vf_id. * * param[in] vf_id - VF_ID @@ -1558,81 +1435,7 @@ bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id) return NULL; } -/** - * Return the list of VFs configured. - * - * param[in] fcs fcs module instance - * param[out] vf_ids returned list of vf_ids - * param[in,out] nvfs in:size of vf_ids array, - * out:total elements present, - * actual elements returned is limited by the size - * - * return Driver VF structure - */ -void -bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) -{ - bfa_trc(fcs, *nvfs); -} - -/** - * Return the list of all VFs visible from fabric. - * - * param[in] fcs fcs module instance - * param[out] vf_ids returned list of vf_ids - * param[in,out] nvfs in:size of vf_ids array, - * out:total elements present, - * actual elements returned is limited by the size - * - * return Driver VF structure - */ -void -bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs) -{ - bfa_trc(fcs, *nvfs); -} - -/** - * Return the list of local logical ports present in the given VF. - * - * param[in] vf vf for which logical ports are returned - * param[out] lpwwn returned logical port wwn list - * param[in,out] nlports in:size of lpwwn list; - * out:total elements present, - * actual elements returned is limited by the size - */ -void -bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports) -{ - struct list_head *qe; - struct bfa_fcs_vport_s *vport; - int i; - struct bfa_fcs_s *fcs; - - if (vf == NULL || lpwwn == NULL || *nlports == 0) - return; - - fcs = vf->fcs; - - bfa_trc(fcs, vf->vf_id); - bfa_trc(fcs, (u32) *nlports); - - i = 0; - lpwwn[i++] = vf->bport.port_cfg.pwwn; - - list_for_each(qe, &vf->vport_q) { - if (i >= *nlports) - break; - - vport = (struct bfa_fcs_vport_s *) qe; - lpwwn[i++] = vport->lport.port_cfg.pwwn; - } - - bfa_trc(fcs, i); - *nlports = i; -} - -/** +/* * BFA FCS PPORT ( physical port) */ static void @@ -1662,11 +1465,11 @@ bfa_fcs_port_attach(struct bfa_fcs_s *fcs) bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs); } -/** +/* * BFA FCS UF ( Unsolicited Frames) */ -/** +/* * BFA callback for unsolicited frame receive handler. * * @param[in] cbarg callback arg for receive handler @@ -1683,7 +1486,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) struct fc_vft_s *vft; struct bfa_fcs_fabric_s *fabric; - /** + /* * check for VFT header */ if (fchs->routing == FC_RTG_EXT_HDR && @@ -1695,7 +1498,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) else fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id); - /** + /* * drop frame if vfid is unknown */ if (!fabric) { @@ -1705,7 +1508,7 @@ bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf) return; } - /** + /* * skip vft header */ fchs = (struct fchs_s *) (vft + 1); diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h index d75045df1e7..9cb6a55977c 100644 --- a/drivers/scsi/bfa/bfa_fcs.h +++ b/drivers/scsi/bfa/bfa_fcs.h @@ -196,7 +196,7 @@ struct bfa_fcs_fabric_s { #define bfa_fcs_fabric_is_switched(__f) \ ((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED) -/** +/* * The design calls for a single implementation of base fabric and vf. */ #define bfa_fcs_vf_t struct bfa_fcs_fabric_s @@ -216,7 +216,7 @@ struct bfa_fcs_fabric_s; #define bfa_fcs_lport_t struct bfa_fcs_lport_s -/** +/* * Symbolic Name related defines * Total bytes 255. * Physical Port's symbolic name 128 bytes. @@ -239,7 +239,7 @@ struct bfa_fcs_fabric_s; #define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ 48 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ 16 -/** +/* * Get FC port ID for a logical port. */ #define bfa_fcs_lport_get_fcid(_lport) ((_lport)->pid) @@ -262,7 +262,7 @@ bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port) #define bfa_fcs_lport_get_fabric_ipaddr(_lport) \ ((_lport)->fabric->fabric_ip_addr) -/** +/* * bfa fcs port public functions */ @@ -342,7 +342,7 @@ struct bfa_fcs_vport_s { #define bfa_fcs_vport_get_port(vport) \ ((struct bfa_fcs_lport_s *)(&vport->port)) -/** +/* * bfa fcs vport public functions */ bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, @@ -393,7 +393,7 @@ struct bfa_fcs_rpf_s { enum bfa_port_speed rpsc_speed; /* Current Speed from RPSC. O if RPSC fails */ enum bfa_port_speed assigned_speed; - /** + /* * Speed assigned by the user. will be used if RPSC is * not supported by the rport. */ @@ -434,7 +434,7 @@ bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport) return rport->bfa_rport; } -/** +/* * bfa fcs rport API functions */ bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, @@ -573,7 +573,7 @@ bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim) return itnim->bfa_itnim; } -/** +/* * bfa fcs FCP Initiator mode API functions */ void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim, @@ -677,22 +677,9 @@ void bfa_fcs_exit(struct bfa_fcs_s *fcs); void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod); void bfa_fcs_start(struct bfa_fcs_s *fcs); -/** +/* * bfa fcs vf public functions */ -bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id); -bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs); -bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, - u16 vf_id, struct bfa_lport_cfg_s *port_cfg, - struct bfad_vf_s *vf_drv); -bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf); -void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); -void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs); -void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr); -void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, - struct bfa_vf_stats_s *vf_stats); -void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf); -void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports); bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id); u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric); @@ -729,11 +716,11 @@ u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric); void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs); void bfa_fcs_port_attach(struct bfa_fcs_s *fcs); -/** +/* * BFA FCS callback interfaces */ -/** +/* * fcb Main fcs callbacks */ @@ -742,7 +729,7 @@ struct bfad_vf_s; struct bfad_vport_s; struct bfad_rport_s; -/** +/* * lport callbacks */ struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad, @@ -754,19 +741,19 @@ void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv); -/** +/* * vport callbacks */ void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s); -/** +/* * rport callbacks */ bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport, struct bfad_rport_s **rport_drv); -/** +/* * itnim callbacks */ void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c index 569dfefab70..9662bcdeb41 100644 --- a/drivers/scsi/bfa/bfa_fcs_fcpim.c +++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c @@ -15,7 +15,7 @@ * General Public License for more details. */ -/** +/* * fcpim.c - FCP initiator mode i-t nexus state machine */ @@ -38,7 +38,7 @@ static void bfa_fcs_itnim_prli_response(void *fcsarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); -/** +/* * fcs_itnim_sm FCS itnim state machine events */ @@ -84,7 +84,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = { {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, }; -/** +/* * fcs_itnim_sm FCS itnim state machine */ @@ -494,11 +494,11 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) -/** +/* * itnim_public FCS ITNIM public interfaces */ -/** +/* * Called by rport when a new rport is created. * * @param[in] rport - remote port. @@ -554,7 +554,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) return itnim; } -/** +/* * Called by rport to delete the instance of FCPIM. * * @param[in] rport - remote port. @@ -566,7 +566,7 @@ bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); } -/** +/* * Notification from rport that PLOGI is complete to initiate FC-4 session. */ void @@ -586,7 +586,7 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim) } } -/** +/* * Called by rport to handle a remote device offline. */ void @@ -596,7 +596,7 @@ bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); } -/** +/* * Called by rport when remote port is known to be an initiator from * PRLI received. */ @@ -608,7 +608,7 @@ bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); } -/** +/* * Called by rport to check if the itnim is online. */ bfa_status_t @@ -625,7 +625,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) } } -/** +/* * BFA completion callback for bfa_itnim_online(). */ void @@ -637,7 +637,7 @@ bfa_cb_itnim_online(void *cbarg) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); } -/** +/* * BFA completion callback for bfa_itnim_offline(). */ void @@ -649,7 +649,7 @@ bfa_cb_itnim_offline(void *cb_arg) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); } -/** +/* * Mark the beginning of PATH TOV handling. IO completion callbacks * are still pending. */ @@ -661,7 +661,7 @@ bfa_cb_itnim_tov_begin(void *cb_arg) bfa_trc(itnim->fcs, itnim->rport->pwwn); } -/** +/* * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. */ void @@ -674,7 +674,7 @@ bfa_cb_itnim_tov(void *cb_arg) itnim_drv->state = ITNIM_STATE_TIMEOUT; } -/** +/* * BFA notification to FCS/driver for second level error recovery. * * Atleast one I/O request has timedout and target is unresponsive to @@ -736,7 +736,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; - bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); + memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } @@ -753,7 +753,7 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn) if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; - bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); + memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c index b522bf30247..377cbfff6f2 100644 --- a/drivers/scsi/bfa/bfa_fcs_lport.c +++ b/drivers/scsi/bfa/bfa_fcs_lport.c @@ -15,10 +15,6 @@ * General Public License for more details. */ -/** - * bfa_fcs_lport.c BFA FCS port - */ - #include "bfa_fcs.h" #include "bfa_fcbuild.h" #include "bfa_fc.h" @@ -26,10 +22,6 @@ BFA_TRC_FILE(FCS, PORT); -/** - * Forward declarations - */ - static void bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, u8 reason_code, u8 reason_code_expl); @@ -72,7 +64,7 @@ static struct { bfa_fcs_lport_n2n_offline}, }; -/** +/* * fcs_port_sm FCS logical port state machine */ @@ -240,7 +232,7 @@ bfa_fcs_lport_sm_deleting( } } -/** +/* * fcs_port_pvt */ @@ -272,7 +264,7 @@ bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs, FC_MAX_PDUSZ, 0); } -/** +/* * Process incoming plogi from a remote port. */ static void @@ -303,7 +295,7 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, return; } - /** + /* * Direct Attach P2P mode : verify address assigned by the r-port. */ if ((!bfa_fcs_fabric_is_switched(port->fabric)) && @@ -319,12 +311,12 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, port->pid = rx_fchs->d_id; } - /** + /* * First, check if we know the device by pwwn. */ rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name); if (rport) { - /** + /* * Direct Attach P2P mode : handle address assigned by r-port. */ if ((!bfa_fcs_fabric_is_switched(port->fabric)) && @@ -337,37 +329,37 @@ bfa_fcs_lport_plogi(struct bfa_fcs_lport_s *port, return; } - /** + /* * Next, lookup rport by PID. */ rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id); if (!rport) { - /** + /* * Inbound PLOGI from a new device. */ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); return; } - /** + /* * Rport is known only by PID. */ if (rport->pwwn) { - /** + /* * This is a different device with the same pid. Old device * disappeared. Send implicit LOGO to old device. */ bfa_assert(rport->pwwn != plogi->port_name); bfa_fcs_rport_logo_imp(rport); - /** + /* * Inbound PLOGI from a new device (with old PID). */ bfa_fcs_rport_plogi_create(port, rx_fchs, plogi); return; } - /** + /* * PLOGI crossing each other. */ bfa_assert(rport->pwwn == WWN_NULL); @@ -479,12 +471,12 @@ static void bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port, struct fc_rnid_general_topology_data_s *gen_topo_data) { - bfa_os_memset(gen_topo_data, 0, + memset(gen_topo_data, 0, sizeof(struct fc_rnid_general_topology_data_s)); - gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST); + gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST); gen_topo_data->phy_port_num = 0; /* @todo */ - gen_topo_data->num_attached_nodes = bfa_os_htonl(1); + gen_topo_data->num_attached_nodes = cpu_to_be32(1); } static void @@ -598,10 +590,10 @@ bfa_fcs_lport_deleted(struct bfa_fcs_lport_s *port) -/** +/* * fcs_lport_api BFA FCS port API */ -/** +/* * Module initialization */ void @@ -610,7 +602,7 @@ bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs) } -/** +/* * Module cleanup */ void @@ -619,7 +611,7 @@ bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs) bfa_fcs_modexit_comp(fcs); } -/** +/* * Unsolicited frame receive handling. */ void @@ -637,7 +629,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, return; } - /** + /* * First, handle ELSs that donot require a login. */ /* @@ -673,7 +665,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, bfa_fcs_lport_abts_acc(lport, fchs); return; } - /** + /* * look for a matching remote port ID */ rport = bfa_fcs_lport_get_rport_by_pid(lport, pid); @@ -686,7 +678,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, return; } - /** + /* * Only handles ELS frames for now. */ if (fchs->type != FC_TYPE_ELS) { @@ -702,20 +694,20 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, } if (els_cmd->els_code == FC_ELS_LOGO) { - /** + /* * @todo Handle LOGO frames received. */ return; } if (els_cmd->els_code == FC_ELS_PRLI) { - /** + /* * @todo Handle PRLI frames received. */ return; } - /** + /* * Unhandled ELS frames. Send a LS_RJT. */ bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP, @@ -723,7 +715,7 @@ bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, } -/** +/* * PID based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * @@ -742,7 +734,7 @@ bfa_fcs_lport_get_rport_by_pid(struct bfa_fcs_lport_s *port, u32 pid) return NULL; } -/** +/* * PWWN based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * @@ -761,7 +753,7 @@ bfa_fcs_lport_get_rport_by_pwwn(struct bfa_fcs_lport_s *port, wwn_t pwwn) return NULL; } -/** +/* * NWWN based Lookup for a R-Port in the Port R-Port Queue */ struct bfa_fcs_rport_s * @@ -780,7 +772,7 @@ bfa_fcs_lport_get_rport_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t nwwn) return NULL; } -/** +/* * Called by rport module when new rports are discovered. */ void @@ -792,7 +784,7 @@ bfa_fcs_lport_add_rport( port->num_rports++; } -/** +/* * Called by rport module to when rports are deleted. */ void @@ -807,7 +799,7 @@ bfa_fcs_lport_del_rport( bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT); } -/** +/* * Called by fabric for base port when fabric login is complete. * Called by vport for virtual ports when FDISC is complete. */ @@ -817,7 +809,7 @@ bfa_fcs_lport_online(struct bfa_fcs_lport_s *port) bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE); } -/** +/* * Called by fabric for base port when fabric goes offline. * Called by vport for virtual ports when virtual port becomes offline. */ @@ -827,7 +819,7 @@ bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port) bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE); } -/** +/* * Called by fabric to delete base lport and associated resources. * * Called by vport to delete lport and associated resources. Should call @@ -839,7 +831,7 @@ bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port) bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE); } -/** +/* * Return TRUE if port is online, else return FALSE */ bfa_boolean_t @@ -848,7 +840,7 @@ bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port) return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online); } -/** +/* * Attach time initialization of logical ports. */ void @@ -865,7 +857,7 @@ bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs, lport->num_rports = 0; } -/** +/* * Logical port initialization of base or virtual port. * Called by fabric for base port or by vport for virtual ports. */ @@ -878,7 +870,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; - bfa_os_assign(lport->port_cfg, *port_cfg); + lport->port_cfg = *port_cfg; lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport, lport->port_cfg.roles, @@ -894,7 +886,7 @@ bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport, bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE); } -/** +/* * fcs_lport_api */ @@ -934,11 +926,11 @@ bfa_fcs_lport_get_attr( } } -/** +/* * bfa_fcs_lport_fab port fab functions */ -/** +/* * Called by port to initialize fabric services of the base port. */ static void @@ -949,7 +941,7 @@ bfa_fcs_lport_fab_init(struct bfa_fcs_lport_s *port) bfa_fcs_lport_ms_init(port); } -/** +/* * Called by port to notify transition to online state. */ static void @@ -959,7 +951,7 @@ bfa_fcs_lport_fab_online(struct bfa_fcs_lport_s *port) bfa_fcs_lport_scn_online(port); } -/** +/* * Called by port to notify transition to offline state. */ static void @@ -970,11 +962,11 @@ bfa_fcs_lport_fab_offline(struct bfa_fcs_lport_s *port) bfa_fcs_lport_ms_offline(port); } -/** +/* * bfa_fcs_lport_n2n functions */ -/** +/* * Called by fcs/port to initialize N2N topology. */ static void @@ -982,7 +974,7 @@ bfa_fcs_lport_n2n_init(struct bfa_fcs_lport_s *port) { } -/** +/* * Called by fcs/port to notify transition to online state. */ static void @@ -1006,7 +998,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn, sizeof(wwn_t)) > 0) { port->pid = N2N_LOCAL_PID; - /** + /* * First, check if we know the device by pwwn. */ rport = bfa_fcs_lport_get_rport_by_pwwn(port, @@ -1035,7 +1027,7 @@ bfa_fcs_lport_n2n_online(struct bfa_fcs_lport_s *port) } } -/** +/* * Called by fcs/port to notify transition to offline state. */ static void @@ -1094,11 +1086,11 @@ static void bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_hba_attr_s *hba_attr); static void bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_fdmi_port_attr_s *port_attr); -/** +/* * fcs_fdmi_sm FCS FDMI state machine */ -/** +/* * FDMI State Machine events */ enum port_fdmi_event { @@ -1143,7 +1135,7 @@ static void bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, static void bfa_fcs_lport_fdmi_sm_disabled( struct bfa_fcs_lport_fdmi_s *fdmi, enum port_fdmi_event event); -/** +/* * Start in offline state - awaiting MS to send start. */ static void @@ -1510,7 +1502,7 @@ bfa_fcs_lport_fdmi_sm_online(struct bfa_fcs_lport_fdmi_s *fdmi, bfa_sm_fault(port->fcs, event); } } -/** +/* * FDMI is disabled state. */ static void @@ -1525,7 +1517,7 @@ bfa_fcs_lport_fdmi_sm_disabled(struct bfa_fcs_lport_fdmi_s *fdmi, /* No op State. It can only be enabled at Driver Init. */ } -/** +/* * RHBA : Register HBA Attributes. */ static void @@ -1549,7 +1541,7 @@ bfa_fcs_lport_fdmi_send_rhba(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) fdmi->fcxp = fcxp; pyld = bfa_fcxp_get_reqbuf(fcxp); - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); + memset(pyld, 0, FC_MAX_PDUSZ); len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), FDMI_RHBA); @@ -1584,7 +1576,7 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr); rhba->hba_id = bfa_fcs_lport_get_pwwn(port); - rhba->port_list.num_ports = bfa_os_htonl(1); + rhba->port_list.num_ports = cpu_to_be32(1); rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port); len = sizeof(rhba->hba_id) + sizeof(rhba->port_list); @@ -1601,86 +1593,69 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) * Node Name */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME); attr->len = sizeof(wwn_t); memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * Manufacturer */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER); attr->len = (u16) strlen(fcs_hba_attr->manufacturer); memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * Serial Number */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM); attr->len = (u16) strlen(fcs_hba_attr->serial_num); memcpy(attr->value, fcs_hba_attr->serial_num, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * Model */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL); attr->len = (u16) strlen(fcs_hba_attr->model); memcpy(attr->value, fcs_hba_attr->model, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * Model Desc */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC); attr->len = (u16) strlen(fcs_hba_attr->model_desc); memcpy(attr->value, fcs_hba_attr->model_desc, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* @@ -1688,18 +1663,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) */ if (fcs_hba_attr->hw_version[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION); attr->len = (u16) strlen(fcs_hba_attr->hw_version); memcpy(attr->value, fcs_hba_attr->hw_version, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); } @@ -1707,18 +1678,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) * Driver Version */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION); attr->len = (u16) strlen(fcs_hba_attr->driver_version); memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len;; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* @@ -1726,18 +1693,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) */ if (fcs_hba_attr->option_rom_ver[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION); attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver); memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); } @@ -1745,18 +1708,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) * f/w Version = driver version */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION); attr->len = (u16) strlen(fcs_hba_attr->driver_version); memcpy(attr->value, fcs_hba_attr->driver_version, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* @@ -1764,18 +1723,14 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) */ if (fcs_hba_attr->os_name[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME); attr->len = (u16) strlen(fcs_hba_attr->os_name); memcpy(attr->value, fcs_hba_attr->os_name, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); } @@ -1783,22 +1738,20 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struct bfa_fcs_lport_fdmi_s *fdmi, u8 *pyld) * MAX_CT_PAYLOAD */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT); + attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT); attr->len = sizeof(fcs_hba_attr->max_ct_pyld); memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len); len += attr->len; count++; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * Update size of payload */ - len += ((sizeof(attr->type) + - sizeof(attr->len)) * count); + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); - rhba->hba_attr_blk.attr_count = bfa_os_htonl(count); + rhba->hba_attr_blk.attr_count = cpu_to_be32(count); return len; } @@ -1825,7 +1778,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); @@ -1837,7 +1790,7 @@ bfa_fcs_lport_fdmi_rhba_response(void *fcsarg, struct bfa_fcxp_s *fcxp, bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); } -/** +/* * RPRT : Register Port */ static void @@ -1861,7 +1814,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) fdmi->fcxp = fcxp; pyld = bfa_fcxp_get_reqbuf(fcxp); - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); + memset(pyld, 0, FC_MAX_PDUSZ); len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), FDMI_RPRT); @@ -1879,7 +1832,7 @@ bfa_fcs_lport_fdmi_send_rprt(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT); } -/** +/* * This routine builds Port Attribute Block that used in RPA, RPRT commands. */ static u16 @@ -1909,56 +1862,54 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, * FC4 Types */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES); + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES); attr->len = sizeof(fcs_port_attr.supp_fc4_types); memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; ++count; attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * Supported Speed */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED); + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED); attr->len = sizeof(fcs_port_attr.supp_speed); memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; ++count; attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * current Port Speed */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED); + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED); attr->len = sizeof(fcs_port_attr.curr_speed); memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* * max frame size */ attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE); + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE); attr->len = sizeof(fcs_port_attr.max_frm_size); memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); /* @@ -1966,18 +1917,14 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, */ if (fcs_port_attr.os_device_name[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME); + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME); attr->len = (u16) strlen(fcs_port_attr.os_device_name); memcpy(attr->value, fcs_port_attr.os_device_name, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); } /* @@ -1985,27 +1932,22 @@ bfa_fcs_lport_fdmi_build_portattr_block(struct bfa_fcs_lport_fdmi_s *fdmi, */ if (fcs_port_attr.host_name[0] != '\0') { attr = (struct fdmi_attr_s *) curr_ptr; - attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME); + attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME); attr->len = (u16) strlen(fcs_port_attr.host_name); memcpy(attr->value, fcs_port_attr.host_name, attr->len); - attr->len = fc_roundup(attr->len, sizeof(u32)); /* variable - *fields need - *to be 4 byte - *aligned */ + attr->len = fc_roundup(attr->len, sizeof(u32)); curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len; len += attr->len; ++count; - attr->len = - bfa_os_htons(attr->len + sizeof(attr->type) + + attr->len = cpu_to_be16(attr->len + sizeof(attr->type) + sizeof(attr->len)); } /* * Update size of payload */ - port_attrib->attr_count = bfa_os_htonl(count); - len += ((sizeof(attr->type) + - sizeof(attr->len)) * count); + port_attrib->attr_count = cpu_to_be32(count); + len += ((sizeof(attr->type) + sizeof(attr->len)) * count); return len; } @@ -2050,7 +1992,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); @@ -2062,7 +2004,7 @@ bfa_fcs_lport_fdmi_rprt_response(void *fcsarg, struct bfa_fcxp_s *fcxp, bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR); } -/** +/* * RPA : Register Port Attributes. */ static void @@ -2086,15 +2028,13 @@ bfa_fcs_lport_fdmi_send_rpa(void *fdmi_cbarg, struct bfa_fcxp_s *fcxp_alloced) fdmi->fcxp = fcxp; pyld = bfa_fcxp_get_reqbuf(fcxp); - bfa_os_memset(pyld, 0, FC_MAX_PDUSZ); + memset(pyld, 0, FC_MAX_PDUSZ); len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port), FDMI_RPA); - attr_len = - bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, - (u8 *) ((struct ct_hdr_s *) pyld - + 1)); + attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi, + (u8 *) ((struct ct_hdr_s *) pyld + 1)); bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len + attr_len, &fchs, @@ -2143,7 +2083,7 @@ bfa_fcs_lport_fdmi_rpa_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK); @@ -2170,7 +2110,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_lport_s *port = fdmi->ms->port; struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; - bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); + memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s)); bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc, hba_attr->manufacturer); @@ -2204,7 +2144,7 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi, sizeof(driver_info->host_os_patch)); } - hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ); + hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ); } void @@ -2215,7 +2155,7 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info; struct bfa_port_attr_s pport_attr; - bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); + memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s)); /* * get pport attributes from hal @@ -2230,17 +2170,17 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi, /* * Supported Speeds */ - port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS); + port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS); /* * Current Speed */ - port_attr->curr_speed = bfa_os_htonl(pport_attr.speed); + port_attr->curr_speed = cpu_to_be32(pport_attr.speed); /* * Max PDU Size. */ - port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ); + port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ); /* * OS device Name @@ -2321,11 +2261,11 @@ static void bfa_fcs_lport_ms_gfn_response(void *fcsarg, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); -/** +/* * fcs_ms_sm FCS MS state machine */ -/** +/* * MS State Machine events */ enum port_ms_event { @@ -2360,7 +2300,7 @@ static void bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); static void bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms, enum port_ms_event event); -/** +/* * Start in offline state - awaiting NS to send start. */ static void @@ -2432,7 +2372,7 @@ bfa_fcs_lport_ms_sm_plogi(struct bfa_fcs_lport_ms_s *ms, */ bfa_fcs_lport_fdmi_online(ms); - /** + /* * if this is a Vport, go to online state. */ if (ms->port->vport) { @@ -2595,7 +2535,7 @@ bfa_fcs_lport_ms_sm_gmal_retry(struct bfa_fcs_lport_ms_s *ms, bfa_sm_fault(ms->port->fcs, event); } } -/** +/* * ms_pvt MS local functions */ @@ -2657,12 +2597,12 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1); - num_entries = bfa_os_ntohl(gmal_resp->ms_len); + num_entries = be32_to_cpu(gmal_resp->ms_len); if (num_entries == 0) { bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); return; @@ -2795,7 +2735,7 @@ bfa_fcs_lport_ms_sm_gfn_retry(struct bfa_fcs_lport_ms_s *ms, bfa_sm_fault(ms->port->fcs, event); } } -/** +/* * ms_pvt MS local functions */ @@ -2853,7 +2793,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { gfn_resp = (wwn_t *)(cthdr + 1); @@ -2871,7 +2811,7 @@ bfa_fcs_lport_ms_gfn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR); } -/** +/* * ms_pvt MS local functions */ @@ -3017,7 +2957,7 @@ bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port) bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN); } -/** +/* * @page ns_sm_info VPORT NS State Machine * * @section ns_sm_interactions VPORT NS State Machine Interactions @@ -3080,11 +3020,11 @@ static void bfa_fcs_lport_ns_process_gidft_pids( u32 *pid_buf, u32 n_pids); static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port); -/** +/* * fcs_ns_sm FCS nameserver interface state machine */ -/** +/* * VPort NS State Machine events */ enum vport_ns_event { @@ -3139,7 +3079,7 @@ static void bfa_fcs_lport_ns_sm_gid_ft_retry(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); static void bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, enum vport_ns_event event); -/** +/* * Start in offline state - awaiting linkup */ static void @@ -3628,7 +3568,7 @@ bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns, -/** +/* * ns_pvt Nameserver local functions */ @@ -3724,7 +3664,7 @@ bfa_fcs_lport_ns_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } } -/** +/* * Register the symbolic port name. */ static void @@ -3738,7 +3678,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) u8 symbl[256]; u8 *psymbl = &symbl[0]; - bfa_os_memset(symbl, 0, sizeof(symbl)); + memset(symbl, 0, sizeof(symbl)); bfa_trc(port->fcs, port->port_cfg.pwwn); @@ -3755,7 +3695,7 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced) * for V-Port, form a Port Symbolic Name */ if (port->vport) { - /** + /* * For Vports, we append the vport's port symbolic name * to that of the base port. */ @@ -3815,7 +3755,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rspnid_accepts++; @@ -3829,7 +3769,7 @@ bfa_fcs_lport_ns_rspn_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } -/** +/* * Register FC4-Types */ static void @@ -3887,7 +3827,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rftid_accepts++; @@ -3901,7 +3841,7 @@ bfa_fcs_lport_ns_rft_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } -/** +/* * Register FC4-Features : Should be done after RFT_ID */ static void @@ -3964,7 +3904,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { port->stats.ns_rffid_accepts++; @@ -3982,7 +3922,7 @@ bfa_fcs_lport_ns_rff_id_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } else bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR); } -/** +/* * Query Fabric for FC4-Types Devices. * * TBD : Need to use a local (FCS private) response buffer, since the response @@ -4058,7 +3998,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); switch (cthdr->cmd_rsp_code) { @@ -4102,7 +4042,7 @@ bfa_fcs_lport_ns_gid_ft_response(void *fcsarg, struct bfa_fcxp_s *fcxp, } } -/** +/* * This routine will be called by bfa_timer on timer timeouts. * * param[in] port - pointer to bfa_fcs_lport_t. @@ -4166,7 +4106,7 @@ bfa_fcs_lport_ns_process_gidft_pids(struct bfa_fcs_lport_s *port, u32 *pid_buf, } } -/** +/* * fcs_ns_public FCS nameserver public interfaces */ @@ -4227,7 +4167,7 @@ bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port) } } -/** +/* * FCS SCN */ @@ -4250,11 +4190,11 @@ static void bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs); static void bfa_fcs_lport_scn_timeout(void *arg); -/** +/* * fcs_scm_sm FCS SCN state machine */ -/** +/* * VPort SCN State Machine events */ enum port_scn_event { @@ -4278,7 +4218,7 @@ static void bfa_fcs_lport_scn_sm_scr_retry(struct bfa_fcs_lport_scn_s *scn, static void bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, enum port_scn_event event); -/** +/* * Starting state - awaiting link up. */ static void @@ -4382,11 +4322,11 @@ bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn, -/** +/* * fcs_scn_private FCS SCN private functions */ -/** +/* * This routine will be called to send a SCR command. */ static void @@ -4499,7 +4439,7 @@ bfa_fcs_lport_scn_send_ls_acc(struct bfa_fcs_lport_s *port, FC_MAX_PDUSZ, 0); } -/** +/* * This routine will be called by bfa_timer on timer timeouts. * * param[in] vport - pointer to bfa_fcs_lport_t. @@ -4522,7 +4462,7 @@ bfa_fcs_lport_scn_timeout(void *arg) -/** +/* * fcs_scn_public FCS state change notification public interfaces */ @@ -4563,7 +4503,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid) bfa_trc(port->fcs, rpid); - /** + /* * If this is an unknown device, then it just came online. * Otherwise let rport handle the RSCN event. */ @@ -4579,7 +4519,7 @@ bfa_fcs_lport_scn_portid_rscn(struct bfa_fcs_lport_s *port, u32 rpid) bfa_fcs_rport_scn(rport); } -/** +/* * rscn format based PID comparison */ #define __fc_pid_match(__c0, __c1, __fmt) \ @@ -4624,7 +4564,7 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, int i = 0, j; num_entries = - (bfa_os_ntohs(rscn->payldlen) - + (be16_to_cpu(rscn->payldlen) - sizeof(u32)) / sizeof(rscn->event[0]); bfa_trc(port->fcs, num_entries); @@ -4691,18 +4631,18 @@ bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port, } } - /** - * If any of area, domain or fabric RSCN is received, do a fresh discovery - * to find new devices. + /* + * If any of area, domain or fabric RSCN is received, do a fresh + * discovery to find new devices. */ if (nsquery) bfa_fcs_lport_ns_query(port); } -/** +/* * BFA FCS port */ -/** +/* * fcs_port_api BFA FCS port API */ struct bfa_fcs_lport_s * @@ -4943,10 +4883,10 @@ bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port, void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port) { - bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); + memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s)); } -/** +/* * FCS virtual port state machine */ @@ -4967,11 +4907,11 @@ static void bfa_fcs_vport_timeout(void *vport_arg); static void bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport); static void bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport); -/** +/* * fcs_vport_sm FCS virtual port state machine */ -/** +/* * VPort State Machine events */ enum bfa_fcs_vport_event { @@ -5024,7 +4964,7 @@ static struct bfa_sm_table_s vport_sm_table[] = { {BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR} }; -/** +/* * Beginning state. */ static void @@ -5045,7 +4985,7 @@ bfa_fcs_vport_sm_uninit(struct bfa_fcs_vport_s *vport, } } -/** +/* * Created state - a start event is required to start up the state machine. */ static void @@ -5062,7 +5002,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc); bfa_fcs_vport_do_fdisc(vport); } else { - /** + /* * Fabric is offline or not NPIV capable, stay in * offline state. */ @@ -5078,7 +5018,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, case BFA_FCS_VPORT_SM_ONLINE: case BFA_FCS_VPORT_SM_OFFLINE: - /** + /* * Ignore ONLINE/OFFLINE events from fabric * till vport is started. */ @@ -5089,7 +5029,7 @@ bfa_fcs_vport_sm_created(struct bfa_fcs_vport_s *vport, } } -/** +/* * Offline state - awaiting ONLINE event from fabric SM. */ static void @@ -5127,7 +5067,7 @@ bfa_fcs_vport_sm_offline(struct bfa_fcs_vport_s *vport, } -/** +/* * FDISC is sent and awaiting reply from fabric. */ static void @@ -5174,7 +5114,7 @@ bfa_fcs_vport_sm_fdisc(struct bfa_fcs_vport_s *vport, } } -/** +/* * FDISC attempt failed - a timer is active to retry FDISC. */ static void @@ -5208,7 +5148,7 @@ bfa_fcs_vport_sm_fdisc_retry(struct bfa_fcs_vport_s *vport, } } -/** +/* * Vport is online (FDISC is complete). */ static void @@ -5235,7 +5175,7 @@ bfa_fcs_vport_sm_online(struct bfa_fcs_vport_s *vport, } } -/** +/* * Vport is being deleted - awaiting lport delete completion to send * LOGO to fabric. */ @@ -5264,7 +5204,7 @@ bfa_fcs_vport_sm_deleting(struct bfa_fcs_vport_s *vport, } } -/** +/* * Error State. * This state will be set when the Vport Creation fails due * to errors like Dup WWN. In this state only operation allowed @@ -5288,7 +5228,7 @@ bfa_fcs_vport_sm_error(struct bfa_fcs_vport_s *vport, } } -/** +/* * Lport cleanup is in progress since vport is being deleted. Fabric is * offline, so no LOGO is needed to complete vport deletion. */ @@ -5313,7 +5253,7 @@ bfa_fcs_vport_sm_cleanup(struct bfa_fcs_vport_s *vport, } } -/** +/* * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup * is done. */ @@ -5347,10 +5287,10 @@ bfa_fcs_vport_sm_logo(struct bfa_fcs_vport_s *vport, -/** +/* * fcs_vport_private FCS virtual port private functions */ -/** +/* * This routine will be called to send a FDISC command. */ static void @@ -5397,7 +5337,7 @@ bfa_fcs_vport_fdisc_rejected(struct bfa_fcs_vport_s *vport) } } -/** +/* * Called to send a logout to the fabric. Used when a V-Port is * deleted/stopped. */ @@ -5411,7 +5351,7 @@ bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport) } -/** +/* * This routine will be called by bfa_timer on timer timeouts. * * param[in] vport - pointer to bfa_fcs_vport_t. @@ -5449,11 +5389,11 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport) -/** +/* * fcs_vport_public FCS virtual port public interfaces */ -/** +/* * Online notification from fabric SM. */ void @@ -5463,7 +5403,7 @@ bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE); } -/** +/* * Offline notification from fabric SM. */ void @@ -5473,7 +5413,7 @@ bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE); } -/** +/* * Cleanup notification from fabric SM on link timer expiry. */ void @@ -5481,7 +5421,7 @@ bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport) { vport->vport_stats.fab_cleanup++; } -/** +/* * delete notification from fabric SM. To be invoked from within FCS. */ void @@ -5490,7 +5430,7 @@ bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE); } -/** +/* * Delete completion callback from associated lport */ void @@ -5501,11 +5441,11 @@ bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport) -/** +/* * fcs_vport_api Virtual port API */ -/** +/* * Use this function to instantiate a new FCS vport object. This * function will not trigger any HW initialization process (which will be * done in vport_start() call) @@ -5555,7 +5495,7 @@ bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, return BFA_STATUS_OK; } -/** +/* * Use this function to instantiate a new FCS PBC vport object. This * function will not trigger any HW initialization process (which will be * done in vport_start() call) @@ -5585,7 +5525,7 @@ bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport, struct bfa_fcs_s *fcs, return rc; } -/** +/* * Use this function to findout if this is a pbc vport or not. * * @param[in] vport - pointer to bfa_fcs_vport_t. @@ -5603,7 +5543,7 @@ bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport) } -/** +/* * Use this function initialize the vport. * * @param[in] vport - pointer to bfa_fcs_vport_t. @@ -5618,7 +5558,7 @@ bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport) return BFA_STATUS_OK; } -/** +/* * Use this function quiese the vport object. This function will return * immediately, when the vport is actually stopped, the * bfa_drv_vport_stop_cb() will be called. @@ -5635,7 +5575,7 @@ bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport) return BFA_STATUS_OK; } -/** +/* * Use this function to delete a vport object. Fabric object should * be stopped before this function call. * @@ -5657,7 +5597,7 @@ bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport) return BFA_STATUS_OK; } -/** +/* * Use this function to get vport's current status info. * * param[in] vport pointer to bfa_fcs_vport_t. @@ -5672,13 +5612,13 @@ bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport, if (vport == NULL || attr == NULL) return; - bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s)); + memset(attr, 0, sizeof(struct bfa_vport_attr_s)); bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr); attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm); } -/** +/* * Use this function to get vport's statistics. * * param[in] vport pointer to bfa_fcs_vport_t. @@ -5693,7 +5633,7 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, *stats = vport->vport_stats; } -/** +/* * Use this function to clear vport's statistics. * * param[in] vport pointer to bfa_fcs_vport_t. @@ -5703,10 +5643,10 @@ bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport, void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport) { - bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); + memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s)); } -/** +/* * Lookup a virtual port. Excludes base port from lookup. */ struct bfa_fcs_vport_s * @@ -5728,7 +5668,7 @@ bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs, u16 vf_id, wwn_t vpwwn) return vport; } -/** +/* * FDISC Response */ void @@ -5784,7 +5724,7 @@ bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status) } } -/** +/* * LOGO response */ void @@ -5794,7 +5734,7 @@ bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg) bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK); } -/** +/* * Received clear virtual link */ void diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c index 635f0cd8871..47f35c0ef29 100644 --- a/drivers/scsi/bfa/bfa_fcs_rport.c +++ b/drivers/scsi/bfa/bfa_fcs_rport.c @@ -15,7 +15,7 @@ * General Public License for more details. */ -/** +/* * rport.c Remote port implementation. */ @@ -75,7 +75,7 @@ static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, u16 len); static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport); -/** +/* * fcs_rport_sm FCS rport state machine events */ @@ -172,7 +172,7 @@ static struct bfa_sm_table_s rport_sm_table[] = { {BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC}, }; -/** +/* * Beginning state. */ static void @@ -210,7 +210,7 @@ bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event) } } -/** +/* * PLOGI is being sent. */ static void @@ -262,7 +262,7 @@ bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport, } } -/** +/* * PLOGI is being sent. */ static void @@ -287,7 +287,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_PLOGI_RCVD: case RPSM_EVENT_SCN: - /** + /* * Ignore, SCN is possibly online notification. */ break; @@ -309,7 +309,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, break; case RPSM_EVENT_HCB_OFFLINE: - /** + /* * Ignore BFA callback, on a PLOGI receive we call bfa offline. */ break; @@ -319,7 +319,7 @@ bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport, } } -/** +/* * PLOGI is sent. */ static void @@ -380,7 +380,7 @@ bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport, } } -/** +/* * PLOGI is sent. */ static void @@ -475,7 +475,7 @@ bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event) } } -/** +/* * PLOGI is complete. Awaiting BFA rport online callback. FC-4s * are offline. */ @@ -519,7 +519,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, break; case RPSM_EVENT_SCN: - /** + /* * @todo * Ignore SCN - PLOGI just completed, FC-4 login should detect * device failures. @@ -531,7 +531,7 @@ bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport is ONLINE. FC-4s active. */ static void @@ -580,7 +580,7 @@ bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event) } } -/** +/* * An SCN event is received in ONLINE state. NS query is being sent * prior to ADISC authentication with rport. FC-4s are paused. */ @@ -604,7 +604,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, break; case RPSM_EVENT_SCN: - /** + /* * ignore SCN, wait for response to query itself */ break; @@ -638,7 +638,7 @@ bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport, } } -/** +/* * An SCN event is received in ONLINE state. NS query is sent to rport. * FC-4s are paused. */ @@ -697,7 +697,7 @@ bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event) } } -/** +/* * An SCN event is received in ONLINE state. ADISC is being sent for * authenticating with rport. FC-4s are paused. */ @@ -748,7 +748,7 @@ bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport, } } -/** +/* * An SCN event is received in ONLINE state. ADISC is to rport. * FC-4s are paused. */ @@ -765,7 +765,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) break; case RPSM_EVENT_PLOGI_RCVD: - /** + /* * Too complex to cleanup FC-4 & rport and then acc to PLOGI. * At least go offline when a PLOGI is received. */ @@ -787,7 +787,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) break; case RPSM_EVENT_SCN: - /** + /* * already processing RSCN */ break; @@ -810,7 +810,7 @@ bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event) } } -/** +/* * Rport has sent LOGO. Awaiting FC-4 offline completion callback. */ static void @@ -841,7 +841,7 @@ bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport, } } -/** +/* * LOGO needs to be sent to rport. Awaiting FC-4 offline completion * callback. */ @@ -864,7 +864,7 @@ bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport is going offline. Awaiting FC-4 offline completion callback. */ static void @@ -886,7 +886,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: case RPSM_EVENT_ADDRESS_CHANGE: - /** + /* * rport is already going offline. * SCN - ignore and wait till transitioning to offline state */ @@ -901,7 +901,7 @@ bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport is offline. FC-4s are offline. Awaiting BFA rport offline * callback. */ @@ -945,7 +945,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_SCN: case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: - /** + /* * Ignore, already offline. */ break; @@ -955,7 +955,7 @@ bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport is offline. FC-4s are offline. Awaiting BFA rport offline * callback to send LOGO accept. */ @@ -1009,7 +1009,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, case RPSM_EVENT_LOGO_RCVD: case RPSM_EVENT_PRLO_RCVD: - /** + /* * Ignore - already processing a LOGO. */ break; @@ -1019,7 +1019,7 @@ bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport is being deleted. FC-4s are offline. * Awaiting BFA rport offline * callback to send LOGO. @@ -1048,7 +1048,7 @@ bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport is being deleted. FC-4s are offline. LOGO is being sent. */ static void @@ -1082,7 +1082,7 @@ bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport is offline. FC-4s are offline. BFA rport is offline. * Timer active to delete stale rport. */ @@ -1142,7 +1142,7 @@ bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event) } } -/** +/* * Rport address has changed. Nameserver discovery request is being sent. */ static void @@ -1199,7 +1199,7 @@ bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport, } } -/** +/* * Nameserver discovery failed. Waiting for timeout to retry. */ static void @@ -1263,7 +1263,7 @@ bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport, } } -/** +/* * Rport address has changed. Nameserver discovery request is sent. */ static void @@ -1329,13 +1329,13 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, bfa_fcs_rport_send_prlo_acc(rport); break; case RPSM_EVENT_SCN: - /** + /* * ignore, wait for NS query response */ break; case RPSM_EVENT_LOGO_RCVD: - /** + /* * Not logged-in yet. Accept LOGO. */ bfa_fcs_rport_send_logo_acc(rport); @@ -1354,7 +1354,7 @@ bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport, -/** +/* * fcs_rport_private FCS RPORT provate functions */ @@ -1415,7 +1415,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp); - /** + /* * Check for failure first. */ if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) { @@ -1436,7 +1436,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, return; } - /** + /* * PLOGI is complete. Make sure this device is not one of the known * device with a new FC port address. */ @@ -1468,7 +1468,7 @@ bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, } } - /** + /* * Normal login path -- no evil twins. */ rport->stats.plogi_accs++; @@ -1621,7 +1621,7 @@ bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_trc(rport->fcs, rport->pwwn); cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { /* Check if the pid is the same as before. */ @@ -1691,7 +1691,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_trc(rport->fcs, rport->pwwn); cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp); - cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code); + cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code); if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) { bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED); @@ -1722,7 +1722,7 @@ bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, } } -/** +/* * Called to send a logout to the rport. */ static void @@ -1759,7 +1759,7 @@ bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced) bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT); } -/** +/* * Send ACC for a LOGO received. */ static void @@ -1788,7 +1788,7 @@ bfa_fcs_rport_send_logo_acc(void *rport_cbarg) FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0); } -/** +/* * brief * This routine will be called by bfa_timer on timer timeouts. * @@ -1961,7 +1961,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) struct bfa_fcs_rport_s *rport; struct bfad_rport_s *rport_drv; - /** + /* * allocate rport */ if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv) @@ -1979,7 +1979,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) rport->pid = rpid; rport->pwwn = pwwn; - /** + /* * allocate BFA rport */ rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport); @@ -1989,7 +1989,7 @@ bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid) return NULL; } - /** + /* * allocate FC-4s */ bfa_assert(bfa_fcs_lport_is_initiator(port)); @@ -2021,7 +2021,7 @@ bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; - /** + /* * - delete FC-4s * - delete BFA rport * - remove from queue of rports @@ -2093,7 +2093,7 @@ bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport) } } -/** +/* * Update rport parameters from PLOGI or PLOGI accept. */ static void @@ -2101,14 +2101,14 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) { bfa_fcs_lport_t *port = rport->port; - /** + /* * - port name * - node name */ rport->pwwn = plogi->port_name; rport->nwwn = plogi->node_name; - /** + /* * - class of service */ rport->fc_cos = 0; @@ -2118,16 +2118,16 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) if (plogi->class2.class_valid) rport->fc_cos |= FC_CLASS_2; - /** + /* * - CISC * - MAX receive frame size */ rport->cisc = plogi->csp.cisc; - rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz); + rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz); - bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); + bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); bfa_trc(port->fcs, port->fabric->bb_credit); - /** + /* * Direct Attach P2P mode : * This is to handle a bug (233476) in IBM targets in Direct Attach * Mode. Basically, in FLOGI Accept the target would have @@ -2136,19 +2136,19 @@ bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi) * in PLOGI. */ if ((!bfa_fcs_fabric_is_switched(port->fabric)) && - (bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) { + (be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) { - bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred)); + bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred)); bfa_trc(port->fcs, port->fabric->bb_credit); - port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred); + port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred); bfa_fcport_set_tx_bbcredit(port->fcs->bfa, port->fabric->bb_credit); } } -/** +/* * Called to handle LOGO received from an existing remote port. */ static void @@ -2164,11 +2164,11 @@ bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs) -/** +/* * fcs_rport_public FCS rport public interfaces */ -/** +/* * Called by bport/vport to create a remote port instance for a discovered * remote device. * @@ -2191,7 +2191,7 @@ bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid) return rport; } -/** +/* * Called to create a rport for which only the wwn is known. * * @param[in] port - base port @@ -2211,7 +2211,7 @@ bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn) bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC); return rport; } -/** +/* * Called by bport in private loop topology to indicate that a * rport has been discovered and plogi has been completed. * @@ -2233,7 +2233,7 @@ bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs, bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP); } -/** +/* * Called by bport/vport to handle PLOGI received from a new remote port. * If an existing rport does a plogi, it will be handled separately. */ @@ -2272,7 +2272,7 @@ wwn_compare(wwn_t wwn1, wwn_t wwn2) return 0; } -/** +/* * Called by bport/vport to handle PLOGI received from an existing * remote port. */ @@ -2280,7 +2280,7 @@ void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, struct fc_logi_s *plogi) { - /** + /* * @todo Handle P2P and initiator-initiator. */ @@ -2289,7 +2289,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, rport->reply_oxid = rx_fchs->ox_id; bfa_trc(rport->fcs, rport->reply_oxid); - /** + /* * In Switched fabric topology, * PLOGI to each other. If our pwwn is smaller, ignore it, * if it is not a well known address. @@ -2307,7 +2307,7 @@ bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD); } -/** +/* * Called by bport/vport to delete a remote port instance. * * Rport delete is called under the following conditions: @@ -2321,7 +2321,7 @@ bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport) bfa_sm_send_event(rport, RPSM_EVENT_DELETE); } -/** +/* * Called by bport/vport to when a target goes offline. * */ @@ -2331,7 +2331,7 @@ bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport) bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); } -/** +/* * Called by bport in n2n when a target (attached port) becomes online. * */ @@ -2340,7 +2340,7 @@ bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport) { bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND); } -/** +/* * Called by bport/vport to notify SCN for the remote port */ void @@ -2350,7 +2350,7 @@ bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport) bfa_sm_send_event(rport, RPSM_EVENT_SCN); } -/** +/* * Called by fcpim to notify that the ITN cleanup is done. */ void @@ -2359,7 +2359,7 @@ bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport) bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); } -/** +/* * Called by fcptm to notify that the ITN cleanup is done. */ void @@ -2368,7 +2368,7 @@ bfa_fcs_rport_tin_ack(struct bfa_fcs_rport_s *rport) bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE); } -/** +/* * brief * This routine BFA callback for bfa_rport_online() call. * @@ -2391,7 +2391,7 @@ bfa_cb_rport_online(void *cbarg) bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE); } -/** +/* * brief * This routine BFA callback for bfa_rport_offline() call. * @@ -2413,7 +2413,7 @@ bfa_cb_rport_offline(void *cbarg) bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE); } -/** +/* * brief * This routine is a static BFA callback when there is a QoS flow_id * change notification @@ -2437,7 +2437,7 @@ bfa_cb_rport_qos_scn_flowid(void *cbarg, bfa_trc(rport->fcs, rport->pwwn); } -/** +/* * brief * This routine is a static BFA callback when there is a QoS priority * change notification @@ -2461,7 +2461,7 @@ bfa_cb_rport_qos_scn_prio(void *cbarg, bfa_trc(rport->fcs, rport->pwwn); } -/** +/* * Called to process any unsolicted frames from this remote port */ void @@ -2470,7 +2470,7 @@ bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport) bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP); } -/** +/* * Called to process any unsolicted frames from this remote port */ void @@ -2577,7 +2577,7 @@ bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs, FC_MAX_PDUSZ, 0); } -/** +/* * Return state of rport. */ int @@ -2586,7 +2586,7 @@ bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport) return bfa_sm_to_state(rport_sm_table, rport->sm); } -/** +/* * brief * Called by the Driver to set rport delete/ageout timeout * @@ -2613,15 +2613,15 @@ bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id) -/** +/* * Remote port implementation. */ -/** +/* * fcs_rport_api FCS rport API. */ -/** +/* * Direct API to add a target by port wwn. This interface is used, for * example, by bios when target pwwn is known from boot lun configuration. */ @@ -2634,7 +2634,7 @@ bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn, return BFA_STATUS_OK; } -/** +/* * Direct API to remove a target and its associated resources. This * interface is used, for example, by driver to remove target * ports from the target list for a VM. @@ -2663,7 +2663,7 @@ bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport_in) } -/** +/* * Remote device status for display/debug. */ void @@ -2674,7 +2674,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, bfa_fcs_lport_t *port = rport->port; bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; - bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); + memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); rport_attr->pid = rport->pid; rport_attr->pwwn = rport->pwwn; @@ -2704,7 +2704,7 @@ bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport, } } -/** +/* * Per remote device statistics. */ void @@ -2717,7 +2717,7 @@ bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport, void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport) { - bfa_os_memset((char *)&rport->stats, 0, + memset((char *)&rport->stats, 0, sizeof(struct bfa_rport_stats_s)); } @@ -2767,7 +2767,7 @@ bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport, bfa_port_speed_t speed) -/** +/* * Remote port features (RPF) implementation. */ @@ -2786,7 +2786,7 @@ static void bfa_fcs_rpf_rpsc2_response(void *fcsarg, static void bfa_fcs_rpf_timeout(void *arg); -/** +/* * fcs_rport_ftrs_sm FCS rport state machine events */ @@ -2981,7 +2981,7 @@ bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event) bfa_sm_fault(rport->fcs, event); } } -/** +/* * Called when Rport is created. */ void @@ -2995,7 +2995,7 @@ bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport) bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit); } -/** +/* * Called when Rport becomes online */ void @@ -3010,7 +3010,7 @@ bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport) bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE); } -/** +/* * Called when Rport becomes offline */ void @@ -3090,16 +3090,16 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp); if (rpsc2_acc->els_cmd == FC_ELS_ACC) { rport->stats.rpsc_accs++; - num_ents = bfa_os_ntohs(rpsc2_acc->num_pids); + num_ents = be16_to_cpu(rpsc2_acc->num_pids); bfa_trc(rport->fcs, num_ents); if (num_ents > 0) { bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid); bfa_trc(rport->fcs, - bfa_os_ntohs(rpsc2_acc->port_info[0].pid)); + be16_to_cpu(rpsc2_acc->port_info[0].pid)); bfa_trc(rport->fcs, - bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); + be16_to_cpu(rpsc2_acc->port_info[0].speed)); bfa_trc(rport->fcs, - bfa_os_ntohs(rpsc2_acc->port_info[0].index)); + be16_to_cpu(rpsc2_acc->port_info[0].index)); bfa_trc(rport->fcs, rpsc2_acc->port_info[0].type); @@ -3109,7 +3109,7 @@ bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, } rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed( - bfa_os_ntohs(rpsc2_acc->port_info[0].speed)); + be16_to_cpu(rpsc2_acc->port_info[0].speed)); bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP); } diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c index c787d3af088..d8464ae6007 100644 --- a/drivers/scsi/bfa/bfa_hw_cb.c +++ b/drivers/scsi/bfa/bfa_hw_cb.c @@ -22,7 +22,7 @@ void bfa_hwcb_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; - bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { @@ -60,8 +60,8 @@ bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq) static void bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq) { - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, - __HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq)); + writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq), + bfa->iocfc.bfa_regs.intr_status); } void @@ -72,8 +72,8 @@ bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq) static void bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq) { - bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, - __HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq)); + writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), + bfa->iocfc.bfa_regs.intr_status); } void @@ -102,7 +102,7 @@ bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, *num_vecs = __HFN_NUMINTS; } -/** +/* * No special setup required for crossbow -- vector assignments are implicit. */ void @@ -129,7 +129,7 @@ bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs) bfa->msix.handler[i] = bfa_msix_lpu_err; } -/** +/* * Crossbow -- dummy, interrupts are masked */ void @@ -142,7 +142,7 @@ bfa_hwcb_msix_uninstall(struct bfa_s *bfa) { } -/** +/* * No special enable/disable -- vector assignments are implicit. */ void diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c index c97ebafec5e..b0efbc713ff 100644 --- a/drivers/scsi/bfa/bfa_hw_ct.c +++ b/drivers/scsi/bfa/bfa_hw_ct.c @@ -31,15 +31,15 @@ static void bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec) { int fn = bfa_ioc_pcifn(&bfa->ioc); - bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); if (msix) - bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec); + writel(vec, kva + __ct_msix_err_vec_reg[fn]); else - bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0); + writel(0, kva + __ct_msix_err_vec_reg[fn]); } -/** +/* * Dummy interrupt handler for handling spurious interrupt during chip-reinit. */ static void @@ -51,7 +51,7 @@ void bfa_hwct_reginit(struct bfa_s *bfa) { struct bfa_iocfc_regs_s *bfa_regs = &bfa->iocfc.bfa_regs; - bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc); + void __iomem *kva = bfa_ioc_bar0(&bfa->ioc); int i, q, fn = bfa_ioc_pcifn(&bfa->ioc); if (fn == 0) { @@ -88,8 +88,8 @@ bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq) { u32 r32; - r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); - bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32); + r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); + writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]); } void @@ -97,8 +97,8 @@ bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq) { u32 r32; - r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); - bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32); + r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); + writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); } void @@ -110,7 +110,7 @@ bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *msix_vecs_bmap, *num_vecs = BFA_MSIX_CT_MAX; } -/** +/* * Setup MSI-X vector for catapult */ void @@ -156,7 +156,7 @@ bfa_hwct_msix_uninstall(struct bfa_s *bfa) bfa->msix.handler[i] = bfa_hwct_msix_dummy; } -/** +/* * Enable MSI-X vectors */ void diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c index 6795b247791..54475b53a5a 100644 --- a/drivers/scsi/bfa/bfa_ioc.c +++ b/drivers/scsi/bfa/bfa_ioc.c @@ -23,7 +23,7 @@ BFA_TRC_FILE(CNA, IOC); -/** +/* * IOC local definitions */ #define BFA_IOC_TOV 3000 /* msecs */ @@ -49,7 +49,7 @@ BFA_TRC_FILE(CNA, IOC); BFA_TRC_MAX * sizeof(struct bfa_trc_s))) #define BFA_DBG_FWTRC_OFF(_fn) (BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn)) -/** +/* * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ @@ -73,7 +73,7 @@ BFA_TRC_FILE(CNA, IOC); #define bfa_ioc_mbox_cmd_pending(__ioc) \ (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ - bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd)) + readl((__ioc)->ioc_regs.hfn_mbox_cmd)) bfa_boolean_t bfa_auto_recover = BFA_TRUE; @@ -101,11 +101,11 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc_s *ioc); static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc); static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc); -/** +/* * hal_ioc_sm */ -/** +/* * IOC state machine definitions/declarations */ enum ioc_event { @@ -144,7 +144,7 @@ static struct bfa_sm_table_s ioc_sm_table[] = { {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, }; -/** +/* * IOCPF state machine definitions/declarations */ @@ -174,7 +174,7 @@ static void bfa_iocpf_stop(struct bfa_ioc_s *ioc); static void bfa_iocpf_timeout(void *ioc_arg); static void bfa_iocpf_sem_timeout(void *ioc_arg); -/** +/* * IOCPF state machine events */ enum iocpf_event { @@ -191,7 +191,7 @@ enum iocpf_event { IOCPF_E_TIMEOUT = 11, /* f/w response timeout */ }; -/** +/* * IOCPF states */ enum bfa_iocpf_state { @@ -232,11 +232,11 @@ static struct bfa_sm_table_s iocpf_sm_table[] = { {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, }; -/** +/* * IOC State Machine */ -/** +/* * Beginning state. IOC uninit state. */ @@ -245,7 +245,7 @@ bfa_ioc_sm_uninit_entry(struct bfa_ioc_s *ioc) { } -/** +/* * IOC is in uninit state. */ static void @@ -262,7 +262,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc_s *ioc, enum ioc_event event) bfa_sm_fault(ioc, event); } } -/** +/* * Reset entry actions -- initialize state machine */ static void @@ -271,7 +271,7 @@ bfa_ioc_sm_reset_entry(struct bfa_ioc_s *ioc) bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); } -/** +/* * IOC is in reset state. */ static void @@ -304,7 +304,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc_s *ioc) bfa_iocpf_enable(ioc); } -/** +/* * Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ @@ -352,7 +352,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc_s *ioc) bfa_ioc_send_getattr(ioc); } -/** +/* * IOC configuration in progress. Timer is active. */ static void @@ -447,7 +447,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc_s *ioc) BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n"); } -/** +/* * IOC is being disabled */ static void @@ -474,7 +474,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc_s *ioc, enum ioc_event event) } } -/** +/* * IOC disable completion entry. */ static void @@ -514,7 +514,7 @@ bfa_ioc_sm_initfail_entry(struct bfa_ioc_s *ioc) ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); } -/** +/* * Hardware initialization failed. */ static void @@ -528,7 +528,7 @@ bfa_ioc_sm_initfail(struct bfa_ioc_s *ioc, enum ioc_event event) break; case IOC_E_FAILED: - /** + /* * Initialization failure during iocpf init retry. */ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); @@ -556,7 +556,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) struct bfa_ioc_hbfail_notify_s *notify; struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; - /** + /* * Notify driver and common modules registered for notification. */ ioc->cbfn->hbfail_cbfn(ioc->bfa); @@ -569,7 +569,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc_s *ioc) "Heart Beat of IOC has failed\n"); } -/** +/* * IOC failure. */ static void @@ -580,7 +580,7 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) switch (event) { case IOC_E_FAILED: - /** + /* * Initialization failure during iocpf recovery. * !!! Fall through !!! */ @@ -608,12 +608,12 @@ bfa_ioc_sm_fail(struct bfa_ioc_s *ioc, enum ioc_event event) -/** +/* * IOCPF State Machine */ -/** +/* * Reset entry actions -- initialize state machine */ static void @@ -623,7 +623,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf_s *iocpf) iocpf->auto_recover = bfa_auto_recover; } -/** +/* * Beginning state. IOC is in reset state. */ static void @@ -646,7 +646,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf_s *iocpf, enum iocpf_event event) } } -/** +/* * Semaphore should be acquired for version check. */ static void @@ -655,7 +655,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf_s *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** +/* * Awaiting h/w semaphore to continue with version check. */ static void @@ -692,7 +692,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf_s *iocpf, enum iocpf_event event) } } -/** +/* * Notify enable completion callback. */ static void @@ -708,7 +708,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf_s *iocpf) bfa_iocpf_timer_start(iocpf->ioc); } -/** +/* * Awaiting firmware version match. */ static void @@ -739,7 +739,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf_s *iocpf, enum iocpf_event event) } } -/** +/* * Request for semaphore. */ static void @@ -748,7 +748,7 @@ bfa_iocpf_sm_semwait_entry(struct bfa_iocpf_s *iocpf) bfa_ioc_hw_sem_get(iocpf->ioc); } -/** +/* * Awaiting semaphore for h/w initialzation. */ static void @@ -782,7 +782,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf_s *iocpf) bfa_ioc_reset(iocpf->ioc, BFA_FALSE); } -/** +/* * Hardware is being initialized. Interrupts are enabled. * Holding hardware semaphore lock. */ @@ -839,7 +839,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf_s *iocpf) bfa_ioc_send_enable(iocpf->ioc); } -/** +/* * Host IOC function is being enabled, awaiting response from firmware. * Semaphore is acquired. */ @@ -866,8 +866,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) case IOCPF_E_TIMEOUT: iocpf->retry_count++; if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) { - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, - BFI_IOC_UNINIT); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); break; } @@ -944,7 +943,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf_s *iocpf) bfa_ioc_send_disable(iocpf->ioc); } -/** +/* * IOC is being disabled */ static void @@ -968,7 +967,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) */ case IOCPF_E_TIMEOUT: - bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); break; @@ -980,7 +979,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf_s *iocpf, enum iocpf_event event) } } -/** +/* * IOC disable completion entry. */ static void @@ -1018,7 +1017,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf_s *iocpf) bfa_iocpf_timer_start(iocpf->ioc); } -/** +/* * Hardware initialization failed. */ static void @@ -1053,18 +1052,18 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) static void bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) { - /** + /* * Mark IOC as failed in hardware and stop firmware. */ bfa_ioc_lpu_stop(iocpf->ioc); - bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL); + writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate); - /** + /* * Notify other functions on HB failure. */ bfa_ioc_notify_hbfail(iocpf->ioc); - /** + /* * Flush any queued up mailbox requests. */ bfa_ioc_mbox_hbfail(iocpf->ioc); @@ -1073,7 +1072,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf) bfa_iocpf_recovery_timer_start(iocpf->ioc); } -/** +/* * IOC is in failed state. */ static void @@ -1101,7 +1100,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf_s *iocpf, enum iocpf_event event) -/** +/* * hal_ioc_pvt BFA IOC private functions */ @@ -1113,7 +1112,7 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) ioc->cbfn->disable_cbfn(ioc->bfa); - /** + /* * Notify common modules registered for notification. */ list_for_each(qe, &ioc->hb_notify_q) { @@ -1123,18 +1122,18 @@ bfa_ioc_disable_comp(struct bfa_ioc_s *ioc) } bfa_boolean_t -bfa_ioc_sem_get(bfa_os_addr_t sem_reg) +bfa_ioc_sem_get(void __iomem *sem_reg) { u32 r32; int cnt = 0; #define BFA_SEM_SPINCNT 3000 - r32 = bfa_reg_read(sem_reg); + r32 = readl(sem_reg); while (r32 && (cnt < BFA_SEM_SPINCNT)) { cnt++; - bfa_os_udelay(2); - r32 = bfa_reg_read(sem_reg); + udelay(2); + r32 = readl(sem_reg); } if (r32 == 0) @@ -1145,9 +1144,9 @@ bfa_ioc_sem_get(bfa_os_addr_t sem_reg) } void -bfa_ioc_sem_release(bfa_os_addr_t sem_reg) +bfa_ioc_sem_release(void __iomem *sem_reg) { - bfa_reg_write(sem_reg, 1); + writel(1, sem_reg); } static void @@ -1155,11 +1154,11 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) { u32 r32; - /** + /* * First read to the semaphore register will return 0, subsequent reads * will return 1. Semaphore is released by writing 1 to the register */ - r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); + r32 = readl(ioc->ioc_regs.ioc_sem_reg); if (r32 == 0) { bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); return; @@ -1171,7 +1170,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc) void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc) { - bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1); + writel(1, ioc->ioc_regs.ioc_sem_reg); } static void @@ -1180,7 +1179,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc) bfa_sem_timer_stop(ioc); } -/** +/* * Initialize LPU local memory (aka secondary memory / SRAM) */ static void @@ -1190,7 +1189,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) int i; #define PSS_LMEM_INIT_TIME 10000 - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); pss_ctl &= ~__PSS_LMEM_RESET; pss_ctl |= __PSS_LMEM_INIT_EN; @@ -1198,18 +1197,18 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) * i2c workaround 12.5khz clock */ pss_ctl |= __PSS_I2C_CLK_DIV(3UL); - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); - /** + /* * wait for memory initialization to be complete */ i = 0; do { - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); i++; } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); - /** + /* * If memory initialization is not successful, IOC timeout will catch * such failures. */ @@ -1217,7 +1216,7 @@ bfa_ioc_lmem_init(struct bfa_ioc_s *ioc) bfa_trc(ioc, pss_ctl); pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } static void @@ -1225,13 +1224,13 @@ bfa_ioc_lpu_start(struct bfa_ioc_s *ioc) { u32 pss_ctl; - /** + /* * Take processor out of reset. */ - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); pss_ctl &= ~__PSS_LPU0_RESET; - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } static void @@ -1239,16 +1238,16 @@ bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc) { u32 pss_ctl; - /** + /* * Put processors in reset. */ - pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg); + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); - bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); } -/** +/* * Get driver and firmware versions. */ void @@ -1261,7 +1260,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) pgnum = bfa_ioc_smem_pgnum(ioc, loff); pgoff = bfa_ioc_smem_pgoff(ioc, loff); - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32)); i++) { @@ -1271,7 +1270,7 @@ bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) } } -/** +/* * Returns TRUE if same. */ bfa_boolean_t @@ -1296,7 +1295,7 @@ bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr) return BFA_TRUE; } -/** +/* * Return true if current running version is valid. Firmware signature and * execution context (driver/bios) must match. */ @@ -1305,7 +1304,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) { struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr; - /** + /* * If bios/efi boot (flash based) -- return true */ if (bfa_ioc_is_bios_optrom(ioc)) @@ -1321,7 +1320,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) return BFA_FALSE; } - if (bfa_os_swap32(fwhdr.param) != boot_env) { + if (swab32(fwhdr.param) != boot_env) { bfa_trc(ioc, fwhdr.param); bfa_trc(ioc, boot_env); return BFA_FALSE; @@ -1330,7 +1329,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc_s *ioc, u32 boot_env) return bfa_ioc_fwver_cmp(ioc, &fwhdr); } -/** +/* * Conditionally flush any pending message from firmware at start. */ static void @@ -1338,9 +1337,9 @@ bfa_ioc_msgflush(struct bfa_ioc_s *ioc) { u32 r32; - r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); if (r32) - bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); + writel(1, ioc->ioc_regs.lpu_mbox_cmd); } @@ -1352,7 +1351,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) u32 boot_type; u32 boot_env; - ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); if (force) ioc_fwstate = BFI_IOC_UNINIT; @@ -1362,7 +1361,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) boot_type = BFI_BOOT_TYPE_NORMAL; boot_env = BFI_BOOT_LOADER_OS; - /** + /* * Flash based firmware boot BIOS env. */ if (bfa_ioc_is_bios_optrom(ioc)) { @@ -1370,7 +1369,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) boot_env = BFI_BOOT_LOADER_BIOS; } - /** + /* * Flash based firmware boot UEFI env. */ if (bfa_ioc_is_uefi(ioc)) { @@ -1378,7 +1377,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) boot_env = BFI_BOOT_LOADER_UEFI; } - /** + /* * check if firmware is valid */ fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? @@ -1389,7 +1388,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) return; } - /** + /* * If hardware initialization is in progress (initialized by other IOC), * just wait for an initialization completion interrupt. */ @@ -1398,7 +1397,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) return; } - /** + /* * If IOC function is disabled and firmware version is same, * just re-enable IOC. * @@ -1409,7 +1408,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) if (ioc_fwstate == BFI_IOC_DISABLED || (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) { - /** + /* * When using MSI-X any pending firmware ready event should * be flushed. Otherwise MSI-X interrupts are not delivered. */ @@ -1419,7 +1418,7 @@ bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force) return; } - /** + /* * Initialize the h/w for any other states. */ bfa_ioc_boot(ioc, boot_type, boot_env); @@ -1449,17 +1448,17 @@ bfa_ioc_mbox_send(struct bfa_ioc_s *ioc, void *ioc_msg, int len) * first write msg to mailbox registers */ for (i = 0; i < len / sizeof(u32); i++) - bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), - bfa_os_wtole(msgp[i])); + writel(cpu_to_le32(msgp[i]), + ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) - bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0); + writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); /* * write 1 to mailbox CMD to trigger LPU event */ - bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1); - (void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); + writel(1, ioc->ioc_regs.hfn_mbox_cmd); + (void) readl(ioc->ioc_regs.hfn_mbox_cmd); } static void @@ -1472,7 +1471,7 @@ bfa_ioc_send_enable(struct bfa_ioc_s *ioc) bfa_ioc_portid(ioc)); enable_req.ioc_class = ioc->ioc_mc; bfa_os_gettimeofday(&tv); - enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec); + enable_req.tv_sec = be32_to_cpu(tv.tv_sec); bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s)); } @@ -1503,7 +1502,7 @@ bfa_ioc_hb_check(void *cbarg) struct bfa_ioc_s *ioc = cbarg; u32 hb_count; - hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); + hb_count = readl(ioc->ioc_regs.heartbeat); if (ioc->hb_count == hb_count) { printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count); bfa_ioc_recover(ioc); @@ -1519,7 +1518,7 @@ bfa_ioc_hb_check(void *cbarg) static void bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc) { - ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat); + ioc->hb_count = readl(ioc->ioc_regs.heartbeat); bfa_hb_timer_start(ioc); } @@ -1530,7 +1529,7 @@ bfa_ioc_hb_stop(struct bfa_ioc_s *ioc) } -/** +/* * Initiate a full firmware download. */ static void @@ -1543,7 +1542,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, u32 chunkno = 0; u32 i; - /** + /* * Initialize LMEM first before code download */ bfa_ioc_lmem_init(ioc); @@ -1554,7 +1553,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, pgnum = bfa_ioc_smem_pgnum(ioc, loff); pgoff = bfa_ioc_smem_pgoff(ioc, loff); - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) { @@ -1564,7 +1563,7 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); } - /** + /* * write smem */ bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, @@ -1572,27 +1571,25 @@ bfa_ioc_download_fw(struct bfa_ioc_s *ioc, u32 boot_type, loff += sizeof(u32); - /** + /* * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, - pgnum); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); } } - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, - bfa_ioc_smem_pgnum(ioc, 0)); + writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); /* * Set boot type and boot param at the end. */ bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF, - bfa_os_swap32(boot_type)); + swab32(boot_type)); bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF, - bfa_os_swap32(boot_env)); + swab32(boot_env)); } static void @@ -1601,7 +1598,7 @@ bfa_ioc_reset(struct bfa_ioc_s *ioc, bfa_boolean_t force) bfa_ioc_hwinit(ioc, force); } -/** +/* * Update BFA configuration from firmware configuration. */ static void @@ -1609,14 +1606,14 @@ bfa_ioc_getattr_reply(struct bfa_ioc_s *ioc) { struct bfi_ioc_attr_s *attr = ioc->attr; - attr->adapter_prop = bfa_os_ntohl(attr->adapter_prop); - attr->card_type = bfa_os_ntohl(attr->card_type); - attr->maxfrsize = bfa_os_ntohs(attr->maxfrsize); + attr->adapter_prop = be32_to_cpu(attr->adapter_prop); + attr->card_type = be32_to_cpu(attr->card_type); + attr->maxfrsize = be16_to_cpu(attr->maxfrsize); bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); } -/** +/* * Attach time initialization of mbox logic. */ static void @@ -1632,7 +1629,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc_s *ioc) } } -/** +/* * Mbox poll timer -- restarts any pending mailbox requests. */ static void @@ -1642,27 +1639,27 @@ bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc) struct bfa_mbox_cmd_s *cmd; u32 stat; - /** + /* * If no command pending, do nothing */ if (list_empty(&mod->cmd_q)) return; - /** + /* * If previous command is not yet fetched by firmware, do nothing */ - stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); if (stat) return; - /** + /* * Enqueue command to firmware. */ bfa_q_deq(&mod->cmd_q, &cmd); bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); } -/** +/* * Cleanup any pending requests. */ static void @@ -1675,7 +1672,7 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc) bfa_q_deq(&mod->cmd_q, &cmd); } -/** +/* * Read data from SMEM to host through PCI memmap * * @param[in] ioc memory for IOC @@ -1704,26 +1701,25 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) return BFA_STATUS_FAILED; } - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); len = sz/sizeof(u32); bfa_trc(ioc, len); for (i = 0; i < len; i++) { r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff); - buf[i] = bfa_os_ntohl(r32); + buf[i] = be32_to_cpu(r32); loff += sizeof(u32); - /** + /* * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); } } - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, - bfa_ioc_smem_pgnum(ioc, 0)); + writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); /* * release semaphore. */ @@ -1733,7 +1729,7 @@ bfa_ioc_smem_read(struct bfa_ioc_s *ioc, void *tbuf, u32 soff, u32 sz) return BFA_STATUS_OK; } -/** +/* * Clear SMEM data from host through PCI memmap * * @param[in] ioc memory for IOC @@ -1760,7 +1756,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) return BFA_STATUS_FAILED; } - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); len = sz/sizeof(u32); /* len in words */ bfa_trc(ioc, len); @@ -1768,17 +1764,16 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0); loff += sizeof(u32); - /** + /* * handle page offset wrap around */ loff = PSS_SMEM_PGOFF(loff); if (loff == 0) { pgnum++; - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); } } - bfa_reg_write(ioc->ioc_regs.host_page_num_fn, - bfa_ioc_smem_pgnum(ioc, 0)); + writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn); /* * release semaphore. @@ -1788,7 +1783,7 @@ bfa_ioc_smem_clr(struct bfa_ioc_s *ioc, u32 soff, u32 sz) return BFA_STATUS_OK; } -/** +/* * hal iocpf to ioc interface */ static void @@ -1813,7 +1808,7 @@ static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) { struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad; - /** + /* * Provide enable completion callback. */ ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); @@ -1824,7 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc) -/** +/* * hal_ioc_public */ @@ -1848,43 +1843,43 @@ bfa_ioc_pll_init(struct bfa_ioc_s *ioc) return BFA_STATUS_OK; } -/** +/* * Interface used by diag module to do firmware boot with memory test * as the entry vector. */ void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env) { - bfa_os_addr_t rb; + void __iomem *rb; bfa_ioc_stats(ioc, ioc_boots); if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) return; - /** + /* * Initialize IOC state of all functions on a chip reset. */ rb = ioc->pcidev.pci_bar_kva; if (boot_type == BFI_BOOT_TYPE_MEMTEST) { - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST); - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST); + writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG)); } else { - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING); - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING); + writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG)); } bfa_ioc_msgflush(ioc); bfa_ioc_download_fw(ioc, boot_type, boot_env); - /** + /* * Enable interrupts just before starting LPU */ ioc->cbfn->reset_cbfn(ioc->bfa); bfa_ioc_lpu_start(ioc); } -/** +/* * Enable/disable IOC failure auto recovery. */ void @@ -1904,7 +1899,7 @@ bfa_ioc_is_operational(struct bfa_ioc_s *ioc) bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc) { - u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); + u32 r32 = readl(ioc->ioc_regs.ioc_fwstate); return ((r32 != BFI_IOC_UNINIT) && (r32 != BFI_IOC_INITING) && @@ -1918,21 +1913,21 @@ bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg) u32 r32; int i; - /** + /* * read the MBOX msg */ for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); i++) { - r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox + + r32 = readl(ioc->ioc_regs.lpu_mbox + i * sizeof(u32)); - msgp[i] = bfa_os_htonl(r32); + msgp[i] = cpu_to_be32(r32); } - /** + /* * turn off mailbox interrupt by clearing mailbox status */ - bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1); - bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd); + writel(1, ioc->ioc_regs.lpu_mbox_cmd); + readl(ioc->ioc_regs.lpu_mbox_cmd); } void @@ -1971,7 +1966,7 @@ bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *m) } } -/** +/* * IOC attach time initialization and setup. * * @param[in] ioc memory for IOC @@ -1996,7 +1991,7 @@ bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa, struct bfa_ioc_cbfn_s *cbfn, bfa_fsm_send_event(ioc, IOC_E_RESET); } -/** +/* * Driver detach time IOC cleanup. */ void @@ -2005,7 +2000,7 @@ bfa_ioc_detach(struct bfa_ioc_s *ioc) bfa_fsm_send_event(ioc, IOC_E_DETACH); } -/** +/* * Setup IOC PCI properties. * * @param[in] pcidev PCI device information for this IOC @@ -2019,7 +2014,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id); ioc->cna = ioc->ctdev && !ioc->fcmode; - /** + /* * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c */ if (ioc->ctdev) @@ -2031,7 +2026,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, bfa_ioc_reg_init(ioc); } -/** +/* * Initialize IOC dma memory * * @param[in] dm_kva kernel virtual address of IOC dma memory @@ -2040,7 +2035,7 @@ bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev, void bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) { - /** + /* * dma memory for firmware attribute */ ioc->attr_dma.kva = dm_kva; @@ -2048,7 +2043,7 @@ bfa_ioc_mem_claim(struct bfa_ioc_s *ioc, u8 *dm_kva, u64 dm_pa) ioc->attr = (struct bfi_ioc_attr_s *) dm_kva; } -/** +/* * Return size of dma memory required. */ u32 @@ -2073,7 +2068,7 @@ bfa_ioc_disable(struct bfa_ioc_s *ioc) bfa_fsm_send_event(ioc, IOC_E_DISABLE); } -/** +/* * Returns memory required for saving firmware trace in case of crash. * Driver must call this interface to allocate memory required for * automatic saving of firmware trace. Driver should call @@ -2086,7 +2081,7 @@ bfa_ioc_debug_trcsz(bfa_boolean_t auto_recover) return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0; } -/** +/* * Initialize memory for saving firmware trace. Driver must initialize * trace memory before call bfa_ioc_enable(). */ @@ -2109,7 +2104,7 @@ bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr) return PSS_SMEM_PGOFF(fmaddr); } -/** +/* * Register mailbox message handler functions * * @param[in] ioc IOC instance @@ -2125,7 +2120,7 @@ bfa_ioc_mbox_register(struct bfa_ioc_s *ioc, bfa_ioc_mbox_mcfunc_t *mcfuncs) mod->mbhdlr[mc].cbfn = mcfuncs[mc]; } -/** +/* * Register mailbox message handler function, to be called by common modules */ void @@ -2138,7 +2133,7 @@ bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, mod->mbhdlr[mc].cbarg = cbarg; } -/** +/* * Queue a mailbox command request to firmware. Waits if mailbox is busy. * Responsibility of caller to serialize * @@ -2151,7 +2146,7 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) struct bfa_ioc_mbox_mod_s *mod = &ioc->mbox_mod; u32 stat; - /** + /* * If a previous command is pending, queue new command */ if (!list_empty(&mod->cmd_q)) { @@ -2159,22 +2154,22 @@ bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd) return; } - /** + /* * If mailbox is busy, queue command for poll timer */ - stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd); + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); if (stat) { list_add_tail(&cmd->qe, &mod->cmd_q); return; } - /** + /* * mailbox is free -- queue command to firmware */ bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); } -/** +/* * Handle mailbox interrupts */ void @@ -2186,7 +2181,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc) bfa_ioc_msgget(ioc, &m); - /** + /* * Treat IOC message class as special. */ mc = m.mh.msg_class; @@ -2214,7 +2209,7 @@ bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc) ioc->port_id = bfa_ioc_pcifn(ioc); } -/** +/* * return true if IOC is disabled */ bfa_boolean_t @@ -2224,7 +2219,7 @@ bfa_ioc_is_disabled(struct bfa_ioc_s *ioc) bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); } -/** +/* * return true if IOC firmware is different. */ bfa_boolean_t @@ -2243,7 +2238,7 @@ bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc) ((__sm) == BFI_IOC_FAIL) || \ ((__sm) == BFI_IOC_CFG_DISABLED)) -/** +/* * Check if adapter is disabled -- both IOCs should be in a disabled * state. */ @@ -2251,17 +2246,17 @@ bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) { u32 ioc_state; - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + void __iomem *rb = ioc->pcidev.pci_bar_kva; if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled)) return BFA_FALSE; - ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG); + ioc_state = readl(rb + BFA_IOC0_STATE_REG); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) { - ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG); + ioc_state = readl(rb + BFA_IOC1_STATE_REG); if (!bfa_ioc_state_disabled(ioc_state)) return BFA_FALSE; } @@ -2269,7 +2264,7 @@ bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc) return BFA_TRUE; } -/** +/* * Add to IOC heartbeat failure notification queue. To be used by common * modules such as cee, port, diag. */ @@ -2293,7 +2288,7 @@ bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc, bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); - bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd, + memcpy(&ad_attr->vpd, &ioc_attr->vpd, sizeof(struct bfa_mfg_vpd_s)); ad_attr->nports = bfa_ioc_get_nports(ioc); @@ -2343,8 +2338,8 @@ bfa_ioc_get_type(struct bfa_ioc_s *ioc) void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) { - bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); - bfa_os_memcpy((void *)serial_num, + memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN); + memcpy((void *)serial_num, (void *)ioc->attr->brcd_serialnum, BFA_ADAPTER_SERIAL_NUM_LEN); } @@ -2352,8 +2347,8 @@ bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num) void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver) { - bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN); - bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); + memset((void *)fw_ver, 0, BFA_VERSION_LEN); + memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); } void @@ -2361,7 +2356,7 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) { bfa_assert(chip_rev); - bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); + memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN); chip_rev[0] = 'R'; chip_rev[1] = 'e'; @@ -2374,16 +2369,16 @@ bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev) void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver) { - bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN); - bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version, + memset((void *)optrom_ver, 0, BFA_VERSION_LEN); + memcpy(optrom_ver, ioc->attr->optrom_version, BFA_VERSION_LEN); } void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer) { - bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); - bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN); + memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } void @@ -2392,14 +2387,14 @@ bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model) struct bfi_ioc_attr_s *ioc_attr; bfa_assert(model); - bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); + memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN); ioc_attr = ioc->attr; - /** + /* * model name */ - bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", BFA_MFG_NAME, ioc_attr->card_type); } @@ -2446,7 +2441,7 @@ bfa_ioc_get_state(struct bfa_ioc_s *ioc) void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) { - bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); + memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s)); ioc_attr->state = bfa_ioc_get_state(ioc); ioc_attr->port_id = ioc->port_id; @@ -2460,7 +2455,7 @@ bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr) bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); } -/** +/* * hal_wwn_public */ wwn_t @@ -2526,7 +2521,7 @@ bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc) return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id); } -/** +/* * Retrieve saved firmware trace from a prior IOC failure. */ bfa_status_t @@ -2541,12 +2536,12 @@ bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata, int *trclen) if (tlen > ioc->dbg_fwsave_len) tlen = ioc->dbg_fwsave_len; - bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen); + memcpy(trcdata, ioc->dbg_fwsave, tlen); *trclen = tlen; return BFA_STATUS_OK; } -/** +/* * Clear saved firmware trace */ void @@ -2555,7 +2550,7 @@ bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc) ioc->dbg_fwsave_once = BFA_TRUE; } -/** +/* * Retrieve saved firmware trace from a prior IOC failure. */ bfa_status_t @@ -2595,7 +2590,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc) bfa_ioc_send_fwsync(ioc); - /** + /* * After sending a fw sync mbox command wait for it to * take effect. We will not wait for a response because * 1. fw_sync mbox cmd doesn't have a response. @@ -2610,7 +2605,7 @@ bfa_ioc_fwsync(struct bfa_ioc_s *ioc) fwsync_iter--; } -/** +/* * Dump firmware smem */ bfa_status_t @@ -2630,7 +2625,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, loff = *offset; dlen = *buflen; - /** + /* * First smem read, sync smem before proceeding * No need to sync before reading every chunk. */ @@ -2657,7 +2652,7 @@ bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf, return status; } -/** +/* * Firmware statistics */ bfa_status_t @@ -2702,7 +2697,7 @@ bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc) return status; } -/** +/* * Save firmware trace if configured. */ static void @@ -2716,7 +2711,7 @@ bfa_ioc_debug_save(struct bfa_ioc_s *ioc) } } -/** +/* * Firmware failure detected. Start recovery actions. */ static void @@ -2738,7 +2733,7 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc_s *ioc) return; } -/** +/* * hal_iocpf_pvt BFA IOC PF private functions */ @@ -2795,7 +2790,7 @@ bfa_iocpf_sem_timeout(void *ioc_arg) bfa_ioc_hw_sem_get(ioc); } -/** +/* * bfa timer function */ void @@ -2840,7 +2835,7 @@ bfa_timer_beat(struct bfa_timer_mod_s *mod) } } -/** +/* * Should be called with lock protection */ void @@ -2858,7 +2853,7 @@ bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, list_add_tail(&timer->qe, &mod->timer_q); } -/** +/* * Should be called with lock protection */ void diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h index 288c5801aac..9c407a87a1a 100644 --- a/drivers/scsi/bfa/bfa_ioc.h +++ b/drivers/scsi/bfa/bfa_ioc.h @@ -22,29 +22,29 @@ #include "bfa_cs.h" #include "bfi.h" -/** +/* * BFA timer declarations */ typedef void (*bfa_timer_cbfn_t)(void *); -/** +/* * BFA timer data structure */ struct bfa_timer_s { struct list_head qe; bfa_timer_cbfn_t timercb; void *arg; - int timeout; /**< in millisecs. */ + int timeout; /* in millisecs */ }; -/** +/* * Timer module structure */ struct bfa_timer_mod_s { struct list_head timer_q; }; -#define BFA_TIMER_FREQ 200 /**< specified in millisecs */ +#define BFA_TIMER_FREQ 200 /* specified in millisecs */ void bfa_timer_beat(struct bfa_timer_mod_s *mod); void bfa_timer_init(struct bfa_timer_mod_s *mod); @@ -53,7 +53,7 @@ void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer, unsigned int timeout); void bfa_timer_stop(struct bfa_timer_s *timer); -/** +/* * Generic Scatter Gather Element used by driver */ struct bfa_sge_s { @@ -62,9 +62,9 @@ struct bfa_sge_s { }; #define bfa_sge_word_swap(__sge) do { \ - ((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]); \ - ((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]); \ - ((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]); \ + ((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]); \ + ((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]); \ + ((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]); \ } while (0) #define bfa_swap_words(_x) ( \ @@ -80,17 +80,17 @@ struct bfa_sge_s { #define bfa_sgaddr_le(_x) (_x) #endif -/** +/* * PCI device information required by IOC */ struct bfa_pcidev_s { int pci_slot; u8 pci_func; - u16 device_id; - bfa_os_addr_t pci_bar_kva; + u16 device_id; + void __iomem *pci_bar_kva; }; -/** +/* * Structure used to remember the DMA-able memory block's KVA and Physical * Address */ @@ -102,7 +102,7 @@ struct bfa_dma_s { #define BFA_DMA_ALIGN_SZ 256 #define BFA_ROUNDUP(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1)) -/** +/* * smem size for Crossbow and Catapult */ #define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ @@ -125,40 +125,38 @@ __bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa) static inline void __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) { - dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa); - dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa)); + dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa); + dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa)); } struct bfa_ioc_regs_s { - bfa_os_addr_t hfn_mbox_cmd; - bfa_os_addr_t hfn_mbox; - bfa_os_addr_t lpu_mbox_cmd; - bfa_os_addr_t lpu_mbox; - bfa_os_addr_t pss_ctl_reg; - bfa_os_addr_t pss_err_status_reg; - bfa_os_addr_t app_pll_fast_ctl_reg; - bfa_os_addr_t app_pll_slow_ctl_reg; - bfa_os_addr_t ioc_sem_reg; - bfa_os_addr_t ioc_usage_sem_reg; - bfa_os_addr_t ioc_init_sem_reg; - bfa_os_addr_t ioc_usage_reg; - bfa_os_addr_t host_page_num_fn; - bfa_os_addr_t heartbeat; - bfa_os_addr_t ioc_fwstate; - bfa_os_addr_t ll_halt; - bfa_os_addr_t err_set; - bfa_os_addr_t shirq_isr_next; - bfa_os_addr_t shirq_msk_next; - bfa_os_addr_t smem_page_start; + void __iomem *hfn_mbox_cmd; + void __iomem *hfn_mbox; + void __iomem *lpu_mbox_cmd; + void __iomem *lpu_mbox; + void __iomem *pss_ctl_reg; + void __iomem *pss_err_status_reg; + void __iomem *app_pll_fast_ctl_reg; + void __iomem *app_pll_slow_ctl_reg; + void __iomem *ioc_sem_reg; + void __iomem *ioc_usage_sem_reg; + void __iomem *ioc_init_sem_reg; + void __iomem *ioc_usage_reg; + void __iomem *host_page_num_fn; + void __iomem *heartbeat; + void __iomem *ioc_fwstate; + void __iomem *ll_halt; + void __iomem *err_set; + void __iomem *shirq_isr_next; + void __iomem *shirq_msk_next; + void __iomem *smem_page_start; u32 smem_pg0; }; -#define bfa_reg_read(_raddr) bfa_os_reg_read(_raddr) -#define bfa_reg_write(_raddr, _val) bfa_os_reg_write(_raddr, _val) -#define bfa_mem_read(_raddr, _off) bfa_os_mem_read(_raddr, _off) +#define bfa_mem_read(_raddr, _off) swab32(readl(((_raddr) + (_off)))) #define bfa_mem_write(_raddr, _off, _val) \ - bfa_os_mem_write(_raddr, _off, _val) -/** + writel(swab32((_val)), ((_raddr) + (_off))) +/* * IOC Mailbox structures */ struct bfa_mbox_cmd_s { @@ -166,7 +164,7 @@ struct bfa_mbox_cmd_s { u32 msg[BFI_IOC_MSGSZ]; }; -/** +/* * IOC mailbox module */ typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m); @@ -179,7 +177,7 @@ struct bfa_ioc_mbox_mod_s { } mbhdlr[BFI_MC_MAX]; }; -/** +/* * IOC callback function interfaces */ typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); @@ -193,7 +191,7 @@ struct bfa_ioc_cbfn_s { bfa_ioc_reset_cbfn_t reset_cbfn; }; -/** +/* * Heartbeat failure notification queue element. */ struct bfa_ioc_hbfail_notify_s { @@ -202,7 +200,7 @@ struct bfa_ioc_hbfail_notify_s { void *cbarg; }; -/** +/* * Initialize a heartbeat failure notification structure */ #define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \ @@ -249,7 +247,7 @@ struct bfa_ioc_s { }; struct bfa_ioc_hwif_s { - bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode); + bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode); bfa_boolean_t (*ioc_firmware_lock) (struct bfa_ioc_s *ioc); void (*ioc_firmware_unlock) (struct bfa_ioc_s *ioc); void (*ioc_reg_init) (struct bfa_ioc_s *ioc); @@ -267,7 +265,7 @@ struct bfa_ioc_hwif_s { #define bfa_ioc_fetch_stats(__ioc, __stats) \ (((__stats)->drv_stats) = (__ioc)->stats) #define bfa_ioc_clr_stats(__ioc) \ - bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) + memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats)) #define bfa_ioc_maxfrsize(__ioc) ((__ioc)->attr->maxfrsize) #define bfa_ioc_rx_bbcredit(__ioc) ((__ioc)->attr->rx_bbcredit) #define bfa_ioc_speed_sup(__ioc) \ @@ -287,7 +285,7 @@ struct bfa_ioc_hwif_s { #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) -/** +/* * IOC mailbox interface */ void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd); @@ -299,7 +297,7 @@ void bfa_ioc_msgget(struct bfa_ioc_s *ioc, void *mbmsg); void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); -/** +/* * IOC interfaces */ @@ -308,9 +306,9 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc, (__ioc)->fcmode)) bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc); -bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); -bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb); -bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode); +bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode); +bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb); +bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode); #define bfa_ioc_isr_mode_set(__ioc, __msix) \ ((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix)) @@ -370,8 +368,8 @@ void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc); bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc); void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc, struct bfa_ioc_hbfail_notify_s *notify); -bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg); -void bfa_ioc_sem_release(bfa_os_addr_t sem_reg); +bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg); +void bfa_ioc_sem_release(void __iomem *sem_reg); void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc); void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr); @@ -441,7 +439,7 @@ bfa_cb_image_get_size(int type) } } -/** +/* * CNA TRCMOD declaration */ /* diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c index d7ac864d853..90994504385 100644 --- a/drivers/scsi/bfa/bfa_ioc_cb.c +++ b/drivers/scsi/bfa/bfa_ioc_cb.c @@ -34,7 +34,7 @@ static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc); struct bfa_ioc_hwif_s hwif_cb; -/** +/* * Called from bfa_ioc_attach() to map asic specific calls. */ void @@ -52,7 +52,7 @@ bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc) ioc->ioc_hwif = &hwif_cb; } -/** +/* * Return true if firmware of current driver matches the running firmware. */ static bfa_boolean_t @@ -66,17 +66,17 @@ bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc) { } -/** +/* * Notify other functions on HB failure. */ static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc) { - bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); - bfa_reg_read(ioc->ioc_regs.err_set); + writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); + readl(ioc->ioc_regs.err_set); } -/** +/* * Host to LPU mailbox message addresses */ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { @@ -84,7 +84,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 } }; -/** +/* * Host <-> LPU mailbox command/status registers */ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { @@ -96,7 +96,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd[] = { static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) { - bfa_os_addr_t rb; + void __iomem *rb; int pcifn = bfa_ioc_pcifn(ioc); rb = bfa_ioc_bar0(ioc); @@ -113,7 +113,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); } - /** + /* * Host <-> LPU mailbox command/status registers */ ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn; @@ -133,7 +133,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG); ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); - /** + /* * sram memory access */ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); @@ -145,14 +145,14 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc) ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } -/** +/* * Initialize IOC to port mapping. */ static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) { - /** + /* * For crossbow, port id is same as pci function. */ ioc->port_id = bfa_ioc_pcifn(ioc); @@ -160,7 +160,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc) bfa_trc(ioc, ioc->port_id); } -/** +/* * Set interrupt mode for a function: INTX or MSIX */ static void @@ -168,7 +168,7 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) { } -/** +/* * Cleanup hw semaphore and usecnt registers */ static void @@ -180,14 +180,14 @@ bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc) * before we clear it. If it is not locked, writing 1 * will lock it instead of clearing it. */ - bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); + readl(ioc->ioc_regs.ioc_sem_reg); bfa_ioc_hw_sem_release(ioc); } bfa_status_t -bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) +bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode) { u32 pll_sclk, pll_fclk; @@ -199,38 +199,32 @@ bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) __APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) | __APP_PLL_400_JITLMT0_1(3U) | __APP_PLL_400_CNTLMT0_1(3U); - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - bfa_reg_write(rb + APP_PLL_212_CTL_REG, - __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_212_CTL_REG, - __APP_PLL_212_BYPASS | - __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_400_CTL_REG, - __APP_PLL_400_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_400_CTL_REG, - __APP_PLL_400_BYPASS | - __APP_PLL_400_LOGIC_SOFT_RESET); - bfa_os_udelay(2); - bfa_reg_write(rb + APP_PLL_212_CTL_REG, - __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_400_CTL_REG, - __APP_PLL_400_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_212_CTL_REG, - pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_400_CTL_REG, - pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET); - bfa_os_udelay(2000); - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk); - bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); + writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET, + rb + APP_PLL_212_CTL_REG); + writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); + writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET, + rb + APP_PLL_400_CTL_REG); + udelay(2); + writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG); + writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG); + writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET, + rb + APP_PLL_212_CTL_REG); + writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET, + rb + APP_PLL_400_CTL_REG); + udelay(2000); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(pll_sclk, (rb + APP_PLL_212_CTL_REG)); + writel(pll_fclk, (rb + APP_PLL_400_CTL_REG)); return BFA_STATUS_OK; } diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c index f21b82c5f64..115730c0aa7 100644 --- a/drivers/scsi/bfa/bfa_ioc_ct.c +++ b/drivers/scsi/bfa/bfa_ioc_ct.c @@ -34,7 +34,7 @@ static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc); struct bfa_ioc_hwif_s hwif_ct; -/** +/* * Called from bfa_ioc_attach() to map asic specific calls. */ void @@ -52,7 +52,7 @@ bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc) ioc->ioc_hwif = &hwif_ct; } -/** +/* * Return true if firmware of current driver matches the running firmware. */ static bfa_boolean_t @@ -62,13 +62,13 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) u32 usecnt; struct bfi_ioc_image_hdr_s fwhdr; - /** + /* * Firmware match check is relevant only for CNA. */ if (!ioc->cna) return BFA_TRUE; - /** + /* * If bios boot (flash based) -- do not increment usage count */ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < @@ -76,27 +76,27 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) return BFA_TRUE; bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); - /** + /* * If usage count is 0, always return TRUE. */ if (usecnt == 0) { - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1); + writel(1, ioc->ioc_regs.ioc_usage_reg); bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_trc(ioc, usecnt); return BFA_TRUE; } - ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate); + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); bfa_trc(ioc, ioc_fwstate); - /** + /* * Use count cannot be non-zero and chip in uninitialized state. */ bfa_assert(ioc_fwstate != BFI_IOC_UNINIT); - /** + /* * Check if another driver with a different firmware is active */ bfa_ioc_fwver_get(ioc, &fwhdr); @@ -106,11 +106,11 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc) return BFA_FALSE; } - /** + /* * Same firmware version. Increment the reference count. */ usecnt++; - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); bfa_trc(ioc, usecnt); return BFA_TRUE; @@ -121,50 +121,50 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc) { u32 usecnt; - /** + /* * Firmware lock is relevant only for CNA. */ if (!ioc->cna) return; - /** + /* * If bios boot (flash based) -- do not decrement usage count */ if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ) return; - /** + /* * decrement usage count */ bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); - usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); bfa_assert(usecnt > 0); usecnt--; - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt); + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); bfa_trc(ioc, usecnt); bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); } -/** +/* * Notify other functions on HB failure. */ static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc) { if (ioc->cna) { - bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P); + writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); /* Wait for halt to take effect */ - bfa_reg_read(ioc->ioc_regs.ll_halt); + readl(ioc->ioc_regs.ll_halt); } else { - bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET); - bfa_reg_read(ioc->ioc_regs.err_set); + writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); + readl(ioc->ioc_regs.err_set); } } -/** +/* * Host to LPU mailbox message addresses */ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { @@ -174,7 +174,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = { { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } }; -/** +/* * Host <-> LPU mailbox command/status registers - port 0 */ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { @@ -184,7 +184,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = { { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT } }; -/** +/* * Host <-> LPU mailbox command/status registers - port 1 */ static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { @@ -197,7 +197,7 @@ static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = { static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) { - bfa_os_addr_t rb; + void __iomem *rb; int pcifn = bfa_ioc_pcifn(ioc); rb = bfa_ioc_bar0(ioc); @@ -236,7 +236,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); - /** + /* * sram memory access */ ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START); @@ -248,7 +248,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) ioc->ioc_regs.err_set = (rb + ERR_SET_REG); } -/** +/* * Initialize IOC to port mapping. */ @@ -256,13 +256,13 @@ bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc) static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) { - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32; - /** + /* * For catapult, base port id on personality register and IOC type */ - r32 = bfa_reg_read(rb + FNC_PERS_REG); + r32 = readl(rb + FNC_PERS_REG); r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; @@ -270,22 +270,22 @@ bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc) bfa_trc(ioc, ioc->port_id); } -/** +/* * Set interrupt mode for a function: INTX or MSIX */ static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) { - bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva; + void __iomem *rb = ioc->pcidev.pci_bar_kva; u32 r32, mode; - r32 = bfa_reg_read(rb + FNC_PERS_REG); + r32 = readl(rb + FNC_PERS_REG); bfa_trc(ioc, r32); mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & __F0_INTX_STATUS; - /** + /* * If already in desired mode, do not change anything */ if (!msix && mode) @@ -300,10 +300,10 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix) r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); bfa_trc(ioc, r32); - bfa_reg_write(rb + FNC_PERS_REG, r32); + writel(r32, rb + FNC_PERS_REG); } -/** +/* * Cleanup hw semaphore and usecnt registers */ static void @@ -312,7 +312,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) if (ioc->cna) { bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); - bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0); + writel(0, ioc->ioc_regs.ioc_usage_reg); bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); } @@ -321,7 +321,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) * before we clear it. If it is not locked, writing 1 * will lock it instead of clearing it. */ - bfa_reg_read(ioc->ioc_regs.ioc_sem_reg); + readl(ioc->ioc_regs.ioc_sem_reg); bfa_ioc_hw_sem_release(ioc); } @@ -331,17 +331,17 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc) * Check the firmware state to know if pll_init has been completed already */ bfa_boolean_t -bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb) +bfa_ioc_ct_pll_init_complete(void __iomem *rb) { - if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || - (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) + if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) || + (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP)) return BFA_TRUE; return BFA_FALSE; } bfa_status_t -bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) +bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode) { u32 pll_sclk, pll_fclk, r32; @@ -354,56 +354,51 @@ bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode) __APP_PLL_425_JITLMT0_1(3U) | __APP_PLL_425_CNTLMT0_1(1U); if (fcmode) { - bfa_reg_write((rb + OP_MODE), 0); - bfa_reg_write((rb + ETH_MAC_SER_REG), - __APP_EMS_CMLCKSEL | - __APP_EMS_REFCKBUFEN2 | - __APP_EMS_CHANNEL_SEL); + writel(0, (rb + OP_MODE)); + writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 | + __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG)); } else { - bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE); - bfa_reg_write((rb + ETH_MAC_SER_REG), - __APP_EMS_REFCKBUFEN1); + writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); + writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG)); } - bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT); - bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT); - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU); - bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | - __APP_PLL_312_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | - __APP_PLL_425_LOGIC_SOFT_RESET); - bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | - __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE); - bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | - __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE); - bfa_reg_read(rb + HOSTFN0_INT_MSK); - bfa_os_udelay(2000); - bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU); - bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU); - bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk | - __APP_PLL_312_ENABLE); - bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk | - __APP_PLL_425_ENABLE); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET, + rb + APP_PLL_312_CTL_REG); + writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET, + rb + APP_PLL_425_CTL_REG); + writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE, + rb + APP_PLL_312_CTL_REG); + writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE, + rb + APP_PLL_425_CTL_REG); + readl(rb + HOSTFN0_INT_MSK); + udelay(2000); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG); + writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG); if (!fcmode) { - bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P); - bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P); + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); } - r32 = bfa_reg_read((rb + PSS_CTL_REG)); + r32 = readl((rb + PSS_CTL_REG)); r32 &= ~__PSS_LMEM_RESET; - bfa_reg_write((rb + PSS_CTL_REG), r32); - bfa_os_udelay(1000); + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); if (!fcmode) { - bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0); - bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0); + writel(0, (rb + PMM_1T_RESET_REG_P0)); + writel(0, (rb + PMM_1T_RESET_REG_P1)); } - bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START); - bfa_os_udelay(1000); - r32 = bfa_reg_read((rb + MBIST_STAT_REG)); - bfa_reg_write((rb + MBIST_CTL_REG), 0); + writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); + udelay(1000); + r32 = readl((rb + MBIST_STAT_REG)); + writel(0, (rb + MBIST_CTL_REG)); return BFA_STATUS_OK; } diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h index 2cd52733867..15407ab39e7 100644 --- a/drivers/scsi/bfa/bfa_modules.h +++ b/drivers/scsi/bfa/bfa_modules.h @@ -15,7 +15,7 @@ * General Public License for more details. */ -/** +/* * bfa_modules.h BFA modules */ @@ -52,7 +52,7 @@ enum { }; -/** +/* * Macro to define a new BFA module */ #define BFA_MODULE(__mod) \ @@ -80,7 +80,7 @@ enum { #define BFA_CACHELINE_SZ (256) -/** +/* * Structure used to interact between different BFA sub modules * * Each sub module needs to implement only the entry points relevant to it (and diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h index 788a250ffb8..65df62ef437 100644 --- a/drivers/scsi/bfa/bfa_os_inc.h +++ b/drivers/scsi/bfa/bfa_os_inc.h @@ -15,10 +15,6 @@ * General Public License for more details. */ -/** - * Contains declarations all OS Specific files needed for BFA layer - */ - #ifndef __BFA_OS_INC_H__ #define __BFA_OS_INC_H__ @@ -44,11 +40,6 @@ #define __BIGENDIAN #endif -static inline u64 bfa_os_get_clock(void) -{ - return jiffies; -} - static inline u64 bfa_os_get_log_time(void) { u64 system_time = 0; @@ -63,13 +54,6 @@ static inline u64 bfa_os_get_log_time(void) #define bfa_io_lat_clock_res_div HZ #define bfa_io_lat_clock_res_mul 1000 -#define BFA_ASSERT(p) do { \ - if (!(p)) { \ - printk(KERN_ERR "assert(%s) failed at %s:%d\n", \ - #p, __FILE__, __LINE__); \ - } \ -} while (0) - #define BFA_LOG(level, bfad, mask, fmt, arg...) \ do { \ if (((mask) == 4) || (level[1] <= '4')) \ @@ -81,22 +65,6 @@ do { \ ((_x) & 0x00ff00) | \ (((_x) & 0xff0000) >> 16)) -#define bfa_swap_8b(_x) \ - ((((_x) & 0xff00000000000000ull) >> 56) \ - | (((_x) & 0x00ff000000000000ull) >> 40) \ - | (((_x) & 0x0000ff0000000000ull) >> 24) \ - | (((_x) & 0x000000ff00000000ull) >> 8) \ - | (((_x) & 0x00000000ff000000ull) << 8) \ - | (((_x) & 0x0000000000ff0000ull) << 24) \ - | (((_x) & 0x000000000000ff00ull) << 40) \ - | (((_x) & 0x00000000000000ffull) << 56)) - -#define bfa_os_swap32(_x) \ - ((((_x) & 0xff) << 24) | \ - (((_x) & 0x0000ff00) << 8) | \ - (((_x) & 0x00ff0000) >> 8) | \ - (((_x) & 0xff000000) >> 24)) - #define bfa_os_swap_sgaddr(_x) ((u64)( \ (((u64)(_x) & (u64)0x00000000000000ffull) << 32) | \ (((u64)(_x) & (u64)0x000000000000ff00ull) << 32) | \ @@ -108,59 +76,27 @@ do { \ (((u64)(_x) & (u64)0xff00000000000000ull) >> 32))) #ifndef __BIGENDIAN -#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \ - (((_x) & 0x00ff) << 8))) -#define bfa_os_htonl(_x) bfa_os_swap32(_x) -#define bfa_os_htonll(_x) bfa_swap_8b(_x) -#define bfa_os_hton3b(_x) bfa_swap_3b(_x) -#define bfa_os_wtole(_x) (_x) +#define bfa_os_hton3b(_x) bfa_swap_3b(_x) #define bfa_os_sgaddr(_x) (_x) - #else - -#define bfa_os_htons(_x) (_x) -#define bfa_os_htonl(_x) (_x) #define bfa_os_hton3b(_x) (_x) -#define bfa_os_htonll(_x) (_x) -#define bfa_os_wtole(_x) bfa_os_swap32(_x) #define bfa_os_sgaddr(_x) bfa_os_swap_sgaddr(_x) - #endif -#define bfa_os_ntohs(_x) bfa_os_htons(_x) -#define bfa_os_ntohl(_x) bfa_os_htonl(_x) -#define bfa_os_ntohll(_x) bfa_os_htonll(_x) #define bfa_os_ntoh3b(_x) bfa_os_hton3b(_x) - #define bfa_os_u32(__pa64) ((__pa64) >> 32) -#define bfa_os_memset memset -#define bfa_os_memcpy memcpy -#define bfa_os_udelay udelay -#define bfa_os_vsprintf vsprintf -#define bfa_os_snprintf snprintf - -#define bfa_os_assign(__t, __s) __t = __s -#define bfa_os_addr_t void __iomem * - -#define bfa_os_reg_read(_raddr) readl(_raddr) -#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr)) -#define bfa_os_mem_read(_raddr, _off) \ - bfa_os_swap32(readl(((_raddr) + (_off)))) -#define bfa_os_mem_write(_raddr, _off, _val) \ - writel(bfa_os_swap32((_val)), ((_raddr) + (_off))) - -#define BFA_TRC_TS(_trcm) \ - ({ \ - struct timeval tv; \ - \ - do_gettimeofday(&tv); \ - (tv.tv_sec*1000000+tv.tv_usec); \ - }) +#define BFA_TRC_TS(_trcm) \ + ({ \ + struct timeval tv; \ + \ + do_gettimeofday(&tv); \ + (tv.tv_sec*1000000+tv.tv_usec); \ + }) #define boolean_t int -/** +/* * For current time stamp, OS API will fill-in */ struct bfa_timeval_s { diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c index b6d170a13be..fff96226a38 100644 --- a/drivers/scsi/bfa/bfa_port.c +++ b/drivers/scsi/bfa/bfa_port.c @@ -37,16 +37,16 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats) t0 = dip[i]; t1 = dip[i + 1]; #ifdef __BIGENDIAN - dip[i] = bfa_os_ntohl(t0); - dip[i + 1] = bfa_os_ntohl(t1); + dip[i] = be32_to_cpu(t0); + dip[i + 1] = be32_to_cpu(t1); #else - dip[i] = bfa_os_ntohl(t1); - dip[i + 1] = bfa_os_ntohl(t0); + dip[i] = be32_to_cpu(t1); + dip[i + 1] = be32_to_cpu(t0); #endif } } -/** +/* * bfa_port_enable_isr() * * @@ -63,7 +63,7 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status) port->endis_cbfn(port->endis_cbarg, status); } -/** +/* * bfa_port_disable_isr() * * @@ -80,7 +80,7 @@ bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status) port->endis_cbfn(port->endis_cbarg, status); } -/** +/* * bfa_port_get_stats_isr() * * @@ -112,7 +112,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status) } } -/** +/* * bfa_port_clear_stats_isr() * * @@ -129,7 +129,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) port->stats_status = status; port->stats_busy = BFA_FALSE; - /** + /* * re-initialize time stamp for stats reset */ bfa_os_gettimeofday(&tv); @@ -141,7 +141,7 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status) } } -/** +/* * bfa_port_isr() * * @@ -189,7 +189,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m) } } -/** +/* * bfa_port_meminfo() * * @@ -203,7 +203,7 @@ bfa_port_meminfo(void) return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ); } -/** +/* * bfa_port_mem_claim() * * @@ -220,7 +220,7 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa) port->stats_dma.pa = dma_pa; } -/** +/* * bfa_port_enable() * * Send the Port enable request to the f/w @@ -264,7 +264,7 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, return BFA_STATUS_OK; } -/** +/* * bfa_port_disable() * * Send the Port disable request to the f/w @@ -308,7 +308,7 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn, return BFA_STATUS_OK; } -/** +/* * bfa_port_get_stats() * * Send the request to the f/w to fetch Port statistics. @@ -348,7 +348,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats, return BFA_STATUS_OK; } -/** +/* * bfa_port_clear_stats() * * @@ -385,7 +385,7 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn, return BFA_STATUS_OK; } -/** +/* * bfa_port_hbfail() * * @@ -415,7 +415,7 @@ bfa_port_hbfail(void *arg) } } -/** +/* * bfa_port_attach() * * @@ -449,7 +449,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port); bfa_ioc_hbfail_register(port->ioc, &port->hbfail); - /** + /* * initialize time stamp for stats reset */ bfa_os_gettimeofday(&tv); @@ -458,7 +458,7 @@ bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, bfa_trc(port, 0); } -/** +/* * bfa_port_detach() * * diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c index aa1dc749b28..c768143f480 100644 --- a/drivers/scsi/bfa/bfa_svc.c +++ b/drivers/scsi/bfa/bfa_svc.c @@ -29,7 +29,7 @@ BFA_MODULE(fcport); BFA_MODULE(rport); BFA_MODULE(uf); -/** +/* * LPS related definitions */ #define BFA_LPS_MIN_LPORTS (1) @@ -41,7 +41,7 @@ BFA_MODULE(uf); #define BFA_LPS_MAX_VPORTS_SUPP_CB 255 #define BFA_LPS_MAX_VPORTS_SUPP_CT 190 -/** +/* * lps_pvt BFA LPS private functions */ @@ -55,7 +55,7 @@ enum bfa_lps_event { BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */ }; -/** +/* * FC PORT related definitions */ /* @@ -67,7 +67,7 @@ enum bfa_lps_event { (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE)) -/** +/* * BFA port state machine events */ enum bfa_fcport_sm_event { @@ -82,7 +82,7 @@ enum bfa_fcport_sm_event { BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */ }; -/** +/* * BFA port link notification state machine events */ @@ -92,7 +92,7 @@ enum bfa_fcport_ln_sm_event { BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */ }; -/** +/* * RPORT related definitions */ #define bfa_rport_offline_cb(__rp) do { \ @@ -126,7 +126,7 @@ enum bfa_rport_event { BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */ }; -/** +/* * forward declarations FCXP related functions */ static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete); @@ -138,7 +138,7 @@ static void bfa_fcxp_qresume(void *cbarg); static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req); -/** +/* * forward declarations for LPS functions */ static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, @@ -163,7 +163,7 @@ static void bfa_lps_login_comp(struct bfa_lps_s *lps); static void bfa_lps_logout_comp(struct bfa_lps_s *lps); static void bfa_lps_cvl_event(struct bfa_lps_s *lps); -/** +/* * forward declaration for LPS state machine */ static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event); @@ -175,7 +175,7 @@ static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event); static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event); -/** +/* * forward declaration for FC Port functions */ static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport); @@ -193,7 +193,7 @@ static void bfa_fcport_stats_get_timeout(void *cbarg); static void bfa_fcport_stats_clr_timeout(void *cbarg); static void bfa_trunk_iocdisable(struct bfa_s *bfa); -/** +/* * forward declaration for FC PORT state machine */ static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, @@ -252,7 +252,7 @@ static struct bfa_sm_table_s hal_port_sm_table[] = { }; -/** +/* * forward declaration for RPORT related functions */ static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod); @@ -265,7 +265,7 @@ static void __bfa_cb_rport_online(void *cbarg, static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete); -/** +/* * forward declaration for RPORT state machine */ static void bfa_rport_sm_uninit(struct bfa_rport_s *rp, @@ -295,7 +295,7 @@ static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event); -/** +/* * PLOG related definitions */ static int @@ -330,7 +330,7 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) pl_recp = &(plog->plog_recs[tail]); - bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); + memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s)); pl_recp->tv = bfa_os_get_log_time(); BFA_PL_LOG_REC_INCR(plog->tail); @@ -342,9 +342,9 @@ bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec) void bfa_plog_init(struct bfa_plog_s *plog) { - bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s)); + memset((char *)plog, 0, sizeof(struct bfa_plog_s)); - bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); + memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN); plog->head = plog->tail = 0; plog->plog_enabled = 1; } @@ -357,7 +357,7 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid, struct bfa_plog_rec_s lp; if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); lp.mid = mid; lp.eid = event; lp.log_type = BFA_PL_LOG_TYPE_STRING; @@ -381,15 +381,14 @@ bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, num_ints = BFA_PL_INT_LOG_SZ; if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); lp.mid = mid; lp.eid = event; lp.log_type = BFA_PL_LOG_TYPE_INT; lp.misc = misc; for (i = 0; i < num_ints; i++) - bfa_os_assign(lp.log_entry.int_log[i], - intarr[i]); + lp.log_entry.int_log[i] = intarr[i]; lp.log_num_ints = (u8) num_ints; @@ -407,7 +406,7 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid, u32 ints[BFA_PL_INT_LOG_SZ]; if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); ints[0] = tmp_int[0]; ints[1] = tmp_int[1]; @@ -427,7 +426,7 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid, u32 ints[BFA_PL_INT_LOG_SZ]; if (plog->plog_enabled) { - bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); + memset(&lp, 0, sizeof(struct bfa_plog_rec_s)); ints[0] = tmp_int[0]; ints[1] = tmp_int[1]; @@ -462,7 +461,7 @@ bfa_plog_get_setting(struct bfa_plog_s *plog) return (bfa_boolean_t)plog->plog_enabled; } -/** +/* * fcxp_pvt BFA FCXP private functions */ @@ -485,7 +484,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) mod->req_pld_list_pa = dm_pa; dm_kva += buf_pool_sz; dm_pa += buf_pool_sz; - bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz); + memset(mod->req_pld_list_kva, 0, buf_pool_sz); /* * Initialize the fcxp rsp payload list @@ -495,7 +494,7 @@ claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) mod->rsp_pld_list_pa = dm_pa; dm_kva += buf_pool_sz; dm_pa += buf_pool_sz; - bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); + memset(mod->rsp_pld_list_kva, 0, buf_pool_sz); bfa_meminfo_dma_virt(mi) = dm_kva; bfa_meminfo_dma_phys(mi) = dm_pa; @@ -508,7 +507,7 @@ claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi) struct bfa_fcxp_s *fcxp; fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi); - bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); + memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps); INIT_LIST_HEAD(&mod->fcxp_free_q); INIT_LIST_HEAD(&mod->fcxp_active_q); @@ -559,11 +558,11 @@ bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); - bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); + memset(mod, 0, sizeof(struct bfa_fcxp_mod_s)); mod->bfa = bfa; mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs; - /** + /* * Initialize FCXP request and response payload sizes. */ mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ; @@ -741,20 +740,20 @@ hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp) { struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa); struct bfa_fcxp_s *fcxp; - u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag); + u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag); bfa_trc(bfa, fcxp_tag); - fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len); + fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len); - /** + /* * @todo f/w should not set residue to non-0 when everything * is received. */ if (fcxp_rsp->req_status == BFA_STATUS_OK) fcxp_rsp->residue_len = 0; else - fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len); + fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len); fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag); @@ -856,7 +855,7 @@ hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp, } } -/** +/* * Handler to resume sending fcxp when space in available in cpe queue. */ static void @@ -871,7 +870,7 @@ bfa_fcxp_qresume(void *cbarg) bfa_fcxp_queue(fcxp, send_req); } -/** +/* * Queue fcxp send request to foimrware. */ static void @@ -885,26 +884,26 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ, bfa_lpuid(bfa)); - send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag); + send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag); if (rport) { send_req->rport_fw_hndl = rport->fw_handle; - send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz); + send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz); if (send_req->max_frmsz == 0) - send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); + send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); } else { send_req->rport_fw_hndl = 0; - send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ); + send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ); } - send_req->vf_id = bfa_os_htons(reqi->vf_id); + send_req->vf_id = cpu_to_be16(reqi->vf_id); send_req->lp_tag = reqi->lp_tag; send_req->class = reqi->class; send_req->rsp_timeout = rspi->rsp_timeout; send_req->cts = reqi->cts; send_req->fchs = reqi->fchs; - send_req->req_len = bfa_os_htonl(reqi->req_tot_len); - send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen); + send_req->req_len = cpu_to_be32(reqi->req_tot_len); + send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen); /* * setup req sgles @@ -955,11 +954,11 @@ bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req) bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP)); } -/** +/* * hal_fcxp_api BFA FCXP API */ -/** +/* * Allocate an FCXP instance to send a response or to send a request * that has a response. Request/response buffers are allocated by caller. * @@ -1005,7 +1004,7 @@ bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles, return fcxp; } -/** +/* * Get the internal request buffer pointer * * @param[in] fcxp BFA fcxp pointer @@ -1032,7 +1031,7 @@ bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp) return mod->req_pld_sz; } -/** +/* * Get the internal response buffer pointer * * @param[in] fcxp BFA fcxp pointer @@ -1052,7 +1051,7 @@ bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp) return rspbuf; } -/** +/* * Free the BFA FCXP * * @param[in] fcxp BFA fcxp pointer @@ -1069,7 +1068,7 @@ bfa_fcxp_free(struct bfa_fcxp_s *fcxp) bfa_fcxp_put(fcxp); } -/** +/* * Send a FCXP request * * @param[in] fcxp BFA fcxp pointer @@ -1103,7 +1102,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_trc(bfa, fcxp->fcxp_tag); - /** + /* * setup request/response info */ reqi->bfa_rport = rport; @@ -1118,7 +1117,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp; fcxp->send_cbarg = cbarg; - /** + /* * If no room in CPE queue, wait for space in request queue */ send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP); @@ -1132,7 +1131,7 @@ bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport, bfa_fcxp_queue(fcxp, send_req); } -/** +/* * Abort a BFA FCXP * * @param[in] fcxp BFA fcxp pointer @@ -1186,7 +1185,7 @@ bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) { - /** + /* * If waiting for room in request queue, cancel reqq wait * and free fcxp. */ @@ -1202,7 +1201,7 @@ bfa_fcxp_discard(struct bfa_fcxp_s *fcxp) -/** +/* * hal_fcxp_public BFA FCXP public functions */ @@ -1229,11 +1228,11 @@ bfa_fcxp_get_maxrsp(struct bfa_s *bfa) } -/** +/* * BFA LPS state machine functions */ -/** +/* * Init state -- no login */ static void @@ -1285,7 +1284,7 @@ bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event) } } -/** +/* * login is in progress -- awaiting response from firmware */ static void @@ -1327,7 +1326,7 @@ bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event) } } -/** +/* * login pending - awaiting space in request queue */ static void @@ -1359,7 +1358,7 @@ bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event) } } -/** +/* * login complete */ static void @@ -1400,7 +1399,7 @@ bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event) } } -/** +/* * logout in progress - awaiting firmware response */ static void @@ -1424,7 +1423,7 @@ bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event) } } -/** +/* * logout pending -- awaiting space in request queue */ static void @@ -1451,11 +1450,11 @@ bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event) -/** +/* * lps_pvt BFA LPS private functions */ -/** +/* * return memory requirement */ static void @@ -1468,7 +1467,7 @@ bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS; } -/** +/* * bfa module attach at initialization time */ static void @@ -1479,7 +1478,7 @@ bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_lps_s *lps; int i; - bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s)); + memset(mod, 0, sizeof(struct bfa_lps_mod_s)); mod->num_lps = BFA_LPS_MAX_LPORTS; if (cfg->drvcfg.min_cfg) mod->num_lps = BFA_LPS_MIN_LPORTS; @@ -1516,7 +1515,7 @@ bfa_lps_stop(struct bfa_s *bfa) { } -/** +/* * IOC in disabled state -- consider all lps offline */ static void @@ -1532,7 +1531,7 @@ bfa_lps_iocdisable(struct bfa_s *bfa) } } -/** +/* * Firmware login response */ static void @@ -1550,7 +1549,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) lps->fport = rsp->f_port; lps->npiv_en = rsp->npiv_en; lps->lp_pid = rsp->lp_pid; - lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit); + lps->pr_bbcred = be16_to_cpu(rsp->bb_credit); lps->pr_pwwn = rsp->port_name; lps->pr_nwwn = rsp->node_name; lps->auth_req = rsp->auth_req; @@ -1579,7 +1578,7 @@ bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp) bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } -/** +/* * Firmware logout response */ static void @@ -1594,7 +1593,7 @@ bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp) bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP); } -/** +/* * Firmware received a Clear virtual link request (for FCoE) */ static void @@ -1608,7 +1607,7 @@ bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl) bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL); } -/** +/* * Space is available in request queue, resume queueing request to firmware. */ static void @@ -1619,7 +1618,7 @@ bfa_lps_reqq_resume(void *lps_arg) bfa_sm_send_event(lps, BFA_LPS_SM_RESUME); } -/** +/* * lps is freed -- triggered by vport delete */ static void @@ -1632,7 +1631,7 @@ bfa_lps_free(struct bfa_lps_s *lps) list_add_tail(&lps->qe, &mod->lps_free_q); } -/** +/* * send login request to firmware */ static void @@ -1648,7 +1647,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps) m->lp_tag = lps->lp_tag; m->alpa = lps->alpa; - m->pdu_size = bfa_os_htons(lps->pdusz); + m->pdu_size = cpu_to_be16(lps->pdusz); m->pwwn = lps->pwwn; m->nwwn = lps->nwwn; m->fdisc = lps->fdisc; @@ -1657,7 +1656,7 @@ bfa_lps_send_login(struct bfa_lps_s *lps) bfa_reqq_produce(lps->bfa, lps->reqq); } -/** +/* * send logout request to firmware */ static void @@ -1676,7 +1675,7 @@ bfa_lps_send_logout(struct bfa_lps_s *lps) bfa_reqq_produce(lps->bfa, lps->reqq); } -/** +/* * Indirect login completion handler for non-fcs */ static void @@ -1693,7 +1692,7 @@ bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete) bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); } -/** +/* * Login completion handler -- direct call for fcs, queue for others */ static void @@ -1711,7 +1710,7 @@ bfa_lps_login_comp(struct bfa_lps_s *lps) bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status); } -/** +/* * Indirect logout completion handler for non-fcs */ static void @@ -1726,7 +1725,7 @@ bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete) bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); } -/** +/* * Logout completion handler -- direct call for fcs, queue for others */ static void @@ -1741,7 +1740,7 @@ bfa_lps_logout_comp(struct bfa_lps_s *lps) bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg); } -/** +/* * Clear virtual link completion handler for non-fcs */ static void @@ -1757,7 +1756,7 @@ bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete) bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg); } -/** +/* * Received Clear virtual link event --direct call for fcs, * queue for others */ @@ -1777,7 +1776,7 @@ bfa_lps_cvl_event(struct bfa_lps_s *lps) -/** +/* * lps_public BFA LPS public functions */ @@ -1790,7 +1789,7 @@ bfa_lps_get_max_vport(struct bfa_s *bfa) return BFA_LPS_MAX_VPORTS_SUPP_CB; } -/** +/* * Allocate a lport srvice tag. */ struct bfa_lps_s * @@ -1810,7 +1809,7 @@ bfa_lps_alloc(struct bfa_s *bfa) return lps; } -/** +/* * Free lport service tag. This can be called anytime after an alloc. * No need to wait for any pending login/logout completions. */ @@ -1820,7 +1819,7 @@ bfa_lps_delete(struct bfa_lps_s *lps) bfa_sm_send_event(lps, BFA_LPS_SM_DELETE); } -/** +/* * Initiate a lport login. */ void @@ -1837,7 +1836,7 @@ bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz, bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } -/** +/* * Initiate a lport fdisc login. */ void @@ -1854,7 +1853,7 @@ bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn, bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN); } -/** +/* * Initiate a lport logout (flogi). */ void @@ -1863,7 +1862,7 @@ bfa_lps_flogo(struct bfa_lps_s *lps) bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); } -/** +/* * Initiate a lport FDSIC logout. */ void @@ -1872,7 +1871,7 @@ bfa_lps_fdisclogo(struct bfa_lps_s *lps) bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT); } -/** +/* * Discard a pending login request -- should be called only for * link down handling. */ @@ -1882,7 +1881,7 @@ bfa_lps_discard(struct bfa_lps_s *lps) bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE); } -/** +/* * Return lport services tag */ u8 @@ -1891,7 +1890,7 @@ bfa_lps_get_tag(struct bfa_lps_s *lps) return lps->lp_tag; } -/** +/* * Return lport services tag given the pid */ u8 @@ -1910,7 +1909,7 @@ bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid) return 0; } -/** +/* * return if fabric login indicates support for NPIV */ bfa_boolean_t @@ -1919,7 +1918,7 @@ bfa_lps_is_npiv_en(struct bfa_lps_s *lps) return lps->npiv_en; } -/** +/* * Return TRUE if attached to F-Port, else return FALSE */ bfa_boolean_t @@ -1928,7 +1927,7 @@ bfa_lps_is_fport(struct bfa_lps_s *lps) return lps->fport; } -/** +/* * Return TRUE if attached to a Brocade Fabric */ bfa_boolean_t @@ -1936,7 +1935,7 @@ bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps) { return lps->brcd_switch; } -/** +/* * return TRUE if authentication is required */ bfa_boolean_t @@ -1951,7 +1950,7 @@ bfa_lps_get_extstatus(struct bfa_lps_s *lps) return lps->ext_status; } -/** +/* * return port id assigned to the lport */ u32 @@ -1960,7 +1959,7 @@ bfa_lps_get_pid(struct bfa_lps_s *lps) return lps->lp_pid; } -/** +/* * return port id assigned to the base lport */ u32 @@ -1971,7 +1970,7 @@ bfa_lps_get_base_pid(struct bfa_s *bfa) return BFA_LPS_FROM_TAG(mod, 0)->lp_pid; } -/** +/* * Return bb_credit assigned in FLOGI response */ u16 @@ -1980,7 +1979,7 @@ bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps) return lps->pr_bbcred; } -/** +/* * Return peer port name */ wwn_t @@ -1989,7 +1988,7 @@ bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps) return lps->pr_pwwn; } -/** +/* * Return peer node name */ wwn_t @@ -1998,7 +1997,7 @@ bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps) return lps->pr_nwwn; } -/** +/* * return reason code if login request is rejected */ u8 @@ -2007,7 +2006,7 @@ bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps) return lps->lsrjt_rsn; } -/** +/* * return explanation code if login request is rejected */ u8 @@ -2016,7 +2015,7 @@ bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps) return lps->lsrjt_expl; } -/** +/* * Return fpma/spma MAC for lport */ mac_t @@ -2025,7 +2024,7 @@ bfa_lps_get_lp_mac(struct bfa_lps_s *lps) return lps->lp_mac; } -/** +/* * LPS firmware message class handler. */ void @@ -2055,7 +2054,7 @@ bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m) } } -/** +/* * FC PORT state machine functions */ static void @@ -2066,7 +2065,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, switch (event) { case BFA_FCPORT_SM_START: - /** + /* * Start event after IOC is configured and BFA is started. */ if (bfa_fcport_send_enable(fcport)) { @@ -2080,7 +2079,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_ENABLE: - /** + /* * Port is persistently configured to be in enabled state. Do * not change state. Port enabling is done when START event is * received. @@ -2088,7 +2087,7 @@ bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_DISABLE: - /** + /* * If a port is persistently configured to be disabled, the * first event will a port disable request. */ @@ -2124,13 +2123,13 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_ENABLE: - /** + /* * Already enable is in progress. */ break; case BFA_FCPORT_SM_DISABLE: - /** + /* * Just send disable request to firmware when room becomes * available in request queue. */ @@ -2145,7 +2144,7 @@ bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport, case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: - /** + /* * Possible to get link events when doing back-to-back * enable/disables. */ @@ -2184,7 +2183,7 @@ bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_ENABLE: - /** + /* * Already being enabled. */ break; @@ -2257,13 +2256,13 @@ bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_LINKDOWN: - /** + /* * Possible to get link down event. */ break; case BFA_FCPORT_SM_ENABLE: - /** + /* * Already enabled. */ break; @@ -2306,7 +2305,7 @@ bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport, switch (event) { case BFA_FCPORT_SM_ENABLE: - /** + /* * Already enabled. */ break; @@ -2399,14 +2398,14 @@ bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_DISABLE: - /** + /* * Already being disabled. */ break; case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: - /** + /* * Possible to get link events when doing back-to-back * enable/disables. */ @@ -2453,7 +2452,7 @@ bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport, case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: - /** + /* * Possible to get link events when doing back-to-back * enable/disables. */ @@ -2483,7 +2482,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_DISABLE: - /** + /* * Already being disabled. */ break; @@ -2508,7 +2507,7 @@ bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport, case BFA_FCPORT_SM_LINKUP: case BFA_FCPORT_SM_LINKDOWN: - /** + /* * Possible to get link events when doing back-to-back * enable/disables. */ @@ -2533,7 +2532,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, switch (event) { case BFA_FCPORT_SM_START: - /** + /* * Ignore start event for a port that is disabled. */ break; @@ -2557,7 +2556,7 @@ bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport, break; case BFA_FCPORT_SM_DISABLE: - /** + /* * Already disabled. */ break; @@ -2587,14 +2586,14 @@ bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport, break; default: - /** + /* * Ignore all other events. */ ; } } -/** +/* * Port is enabled. IOC is down/failed. */ static void @@ -2613,14 +2612,14 @@ bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport, break; default: - /** + /* * Ignore all events. */ ; } } -/** +/* * Port is disabled. IOC is down/failed. */ static void @@ -2639,14 +2638,14 @@ bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport, break; default: - /** + /* * Ignore all events. */ ; } } -/** +/* * Link state is down */ static void @@ -2666,7 +2665,7 @@ bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln, } } -/** +/* * Link state is waiting for down notification */ static void @@ -2689,7 +2688,7 @@ bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln, } } -/** +/* * Link state is waiting for down notification and there is a pending up */ static void @@ -2713,7 +2712,7 @@ bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln, } } -/** +/* * Link state is up */ static void @@ -2733,7 +2732,7 @@ bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln, } } -/** +/* * Link state is waiting for up notification */ static void @@ -2756,7 +2755,7 @@ bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln, } } -/** +/* * Link state is waiting for up notification and there is a pending down */ static void @@ -2780,7 +2779,7 @@ bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln, } } -/** +/* * Link state is waiting for up notification and there are pending down and up */ static void @@ -2806,7 +2805,7 @@ bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln, -/** +/* * hal_port_private */ @@ -2821,7 +2820,7 @@ __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete) bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION); } -/** +/* * Send SCN notification to upper layers. * trunk - false if caller is fcport to ignore fcport event in trunked mode */ @@ -2897,7 +2896,7 @@ bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo) bfa_meminfo_dma_phys(meminfo) = dm_pa; } -/** +/* * Memory initialization. */ static void @@ -2909,7 +2908,7 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, struct bfa_fcport_ln_s *ln = &fcport->ln; struct bfa_timeval_s tv; - bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s)); + memset(fcport, 0, sizeof(struct bfa_fcport_s)); fcport->bfa = bfa; ln->fcport = fcport; @@ -2918,13 +2917,13 @@ bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_sm_set_state(fcport, bfa_fcport_sm_uninit); bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn); - /** + /* * initialize time stamp for stats reset */ bfa_os_gettimeofday(&tv); fcport->stats_reset_time = tv.tv_sec; - /** + /* * initialize and set default configuration */ port_cfg->topology = BFA_PORT_TOPOLOGY_P2P; @@ -2942,7 +2941,7 @@ bfa_fcport_detach(struct bfa_s *bfa) { } -/** +/* * Called when IOC is ready. */ static void @@ -2951,7 +2950,7 @@ bfa_fcport_start(struct bfa_s *bfa) bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START); } -/** +/* * Called before IOC is stopped. */ static void @@ -2961,7 +2960,7 @@ bfa_fcport_stop(struct bfa_s *bfa) bfa_trunk_iocdisable(bfa); } -/** +/* * Called when IOC failure is detected. */ static void @@ -2986,18 +2985,17 @@ bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport) fcport->myalpa = 0; /* QoS Details */ - bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr); - bfa_os_assign(fcport->qos_vc_attr, - pevent->link_state.vc_fcf.qos_vc_attr); + fcport->qos_attr = pevent->link_state.qos_attr; + fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr; - /** + /* * update trunk state if applicable */ if (!fcport->cfg.trunked) trunk->attr.state = BFA_TRUNK_DISABLED; /* update FCoE specific */ - fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan); + fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan); bfa_trc(fcport->bfa, fcport->speed); bfa_trc(fcport->bfa, fcport->topology); @@ -3010,7 +3008,7 @@ bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport) fcport->topology = BFA_PORT_TOPOLOGY_NONE; } -/** +/* * Send port enable message to firmware. */ static bfa_boolean_t @@ -3018,13 +3016,13 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport) { struct bfi_fcport_enable_req_s *m; - /** + /* * Increment message tag before queue check, so that responses to old * requests are discarded. */ fcport->msgtag++; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); @@ -3040,19 +3038,19 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport) m->pwwn = fcport->pwwn; m->port_cfg = fcport->cfg; m->msgtag = fcport->msgtag; - m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize); + m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize); bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo); bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi); - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); return BFA_TRUE; } -/** +/* * Send port disable message to firmware. */ static bfa_boolean_t @@ -3060,13 +3058,13 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport) { struct bfi_fcport_req_s *m; - /** + /* * Increment message tag before queue check, so that responses to old * requests are discarded. */ fcport->msgtag++; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); @@ -3080,7 +3078,7 @@ bfa_fcport_send_disable(struct bfa_fcport_s *fcport) bfa_lpuid(fcport->bfa)); m->msgtag = fcport->msgtag; - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); @@ -3105,7 +3103,7 @@ bfa_fcport_send_txcredit(void *port_cbarg) struct bfa_fcport_s *fcport = port_cbarg; struct bfi_fcport_set_svc_params_req_s *m; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT); @@ -3116,9 +3114,9 @@ bfa_fcport_send_txcredit(void *port_cbarg) bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ, bfa_lpuid(fcport->bfa)); - m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit); + m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit); - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); @@ -3134,7 +3132,7 @@ bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d, /* Now swap the 32 bit fields */ for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i) - dip[i] = bfa_os_ntohl(sip[i]); + dip[i] = be32_to_cpu(sip[i]); } static void @@ -3148,11 +3146,11 @@ bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d, for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32)); i = i + 2) { #ifdef __BIGENDIAN - dip[i] = bfa_os_ntohl(sip[i]); - dip[i + 1] = bfa_os_ntohl(sip[i + 1]); + dip[i] = be32_to_cpu(sip[i]); + dip[i + 1] = be32_to_cpu(sip[i + 1]); #else - dip[i] = bfa_os_ntohl(sip[i + 1]); - dip[i + 1] = bfa_os_ntohl(sip[i]); + dip[i] = be32_to_cpu(sip[i + 1]); + dip[i + 1] = be32_to_cpu(sip[i]); #endif } } @@ -3223,7 +3221,7 @@ bfa_fcport_send_stats_get(void *cbarg) } fcport->stats_qfull = BFA_FALSE; - bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ, bfa_lpuid(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); @@ -3237,7 +3235,7 @@ __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete) if (complete) { struct bfa_timeval_s tv; - /** + /* * re-initialize time stamp for stats reset */ bfa_os_gettimeofday(&tv); @@ -3285,13 +3283,13 @@ bfa_fcport_send_stats_clear(void *cbarg) } fcport->stats_qfull = BFA_FALSE; - bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s)); + memset(msg, 0, sizeof(struct bfi_fcport_req_s)); bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ, bfa_lpuid(fcport->bfa)); bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT); } -/** +/* * Handle trunk SCN event from firmware. */ static void @@ -3312,7 +3310,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) bfa_trc(fcport->bfa, scn->trunk_state); bfa_trc(fcport->bfa, scn->trunk_speed); - /** + /* * Save off new state for trunk attribute query */ state_prev = trunk->attr.state; @@ -3327,7 +3325,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) lattr->trunk_wwn = tlink->trunk_wwn; lattr->fctl = tlink->fctl; lattr->speed = tlink->speed; - lattr->deskew = bfa_os_ntohl(tlink->deskew); + lattr->deskew = be32_to_cpu(tlink->deskew); if (tlink->state == BFA_TRUNK_LINK_STATE_UP) { fcport->speed = tlink->speed; @@ -3360,7 +3358,7 @@ bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn) BFA_PL_EID_TRUNK_SCN, 0, "Trunk down"); } - /** + /* * Notify upper layers if trunk state changed. */ if ((state_prev != trunk->attr.state) || @@ -3376,7 +3374,7 @@ bfa_trunk_iocdisable(struct bfa_s *bfa) struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); int i = 0; - /** + /* * In trunked mode, notify upper layers that link is down */ if (fcport->cfg.trunked) { @@ -3400,11 +3398,11 @@ bfa_trunk_iocdisable(struct bfa_s *bfa) -/** +/* * hal_port_public */ -/** +/* * Called to initialize port attributes */ void @@ -3412,7 +3410,7 @@ bfa_fcport_init(struct bfa_s *bfa) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - /** + /* * Initialize port attributes from IOC hardware data. */ bfa_fcport_set_wwns(fcport); @@ -3426,7 +3424,7 @@ bfa_fcport_init(struct bfa_s *bfa) bfa_assert(fcport->speed_sup); } -/** +/* * Firmware message handler. */ void @@ -3507,11 +3505,11 @@ bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg) -/** +/* * hal_port_api */ -/** +/* * Registered callback for port events. */ void @@ -3552,7 +3550,7 @@ bfa_fcport_disable(struct bfa_s *bfa) return BFA_STATUS_OK; } -/** +/* * Configure port speed. */ bfa_status_t @@ -3574,7 +3572,7 @@ bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed) return BFA_STATUS_OK; } -/** +/* * Get current speed. */ enum bfa_port_speed @@ -3585,7 +3583,7 @@ bfa_fcport_get_speed(struct bfa_s *bfa) return fcport->speed; } -/** +/* * Configure port topology. */ bfa_status_t @@ -3610,7 +3608,7 @@ bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology) return BFA_STATUS_OK; } -/** +/* * Get current topology. */ enum bfa_port_topology @@ -3710,7 +3708,7 @@ bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit) bfa_fcport_send_txcredit(fcport); } -/** +/* * Get port attributes. */ @@ -3729,7 +3727,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) { struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); - bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s)); + memset(attr, 0, sizeof(struct bfa_port_attr_s)); attr->nwwn = fcport->nwwn; attr->pwwn = fcport->pwwn; @@ -3737,7 +3735,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc); attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc); - bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg, + memcpy(&attr->pport_cfg, &fcport->cfg, sizeof(struct bfa_port_cfg_s)); /* speed attributes */ attr->pport_cfg.speed = fcport->cfg.speed; @@ -3770,7 +3768,7 @@ bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr) #define BFA_FCPORT_STATS_TOV 1000 -/** +/* * Fetch port statistics (FCQoS or FCoE). */ bfa_status_t @@ -3796,7 +3794,7 @@ bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, return BFA_STATUS_OK; } -/** +/* * Reset port statistics (FCQoS or FCoE). */ bfa_status_t @@ -3820,7 +3818,7 @@ bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) return BFA_STATUS_OK; } -/** +/* * Fetch FCQoS port statistics */ bfa_status_t @@ -3833,7 +3831,7 @@ bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); } -/** +/* * Reset FCoE port statistics */ bfa_status_t @@ -3845,7 +3843,7 @@ bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg) return bfa_fcport_clear_stats(bfa, cbfn, cbarg); } -/** +/* * Fetch FCQoS port statistics */ bfa_status_t @@ -3858,7 +3856,7 @@ bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats, return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg); } -/** +/* * Reset FCoE port statistics */ bfa_status_t @@ -3876,7 +3874,7 @@ bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr) struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa); qos_attr->state = fcport->qos_attr.state; - qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr); + qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr); } void @@ -3887,10 +3885,10 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr; u32 i = 0; - qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count); - qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit); + qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count); + qos_vc_attr->shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit); qos_vc_attr->elp_opmode_flags = - bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags); + be32_to_cpu(bfa_vc_attr->elp_opmode_flags); /* Individual VC info */ while (i < qos_vc_attr->total_vc_count) { @@ -3904,7 +3902,7 @@ bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa, } } -/** +/* * Fetch port attributes. */ bfa_boolean_t @@ -3939,7 +3937,7 @@ bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off) if (ioc_type == BFA_IOC_TYPE_FC) { fcport->cfg.qos_enabled = on_off; - /** + /* * Notify fcpim of the change in QoS state */ bfa_fcpim_update_ioredirect(bfa); @@ -3959,7 +3957,7 @@ bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off) fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS; } -/** +/* * Configure default minimum ratelim speed */ bfa_status_t @@ -3980,7 +3978,7 @@ bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed) return BFA_STATUS_OK; } -/** +/* * Get default minimum ratelim speed */ enum bfa_port_speed @@ -4095,10 +4093,10 @@ bfa_trunk_disable(struct bfa_s *bfa) } -/** +/* * Rport State machine functions */ -/** +/* * Beginning state, only online event expected. */ static void @@ -4151,7 +4149,7 @@ bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event) } } -/** +/* * Waiting for rport create response from firmware. */ static void @@ -4188,7 +4186,7 @@ bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event) } } -/** +/* * Request queue is full, awaiting queue resume to send create request. */ static void @@ -4229,7 +4227,7 @@ bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) } } -/** +/* * Online state - normal parking state. */ static void @@ -4275,9 +4273,9 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority); qos_scn->old_qos_attr.qos_flow_id = - bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id); + be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id); qos_scn->new_qos_attr.qos_flow_id = - bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id); + be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id); if (qos_scn->old_qos_attr.qos_flow_id != qos_scn->new_qos_attr.qos_flow_id) @@ -4297,7 +4295,7 @@ bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event) } } -/** +/* * Firmware rport is being deleted - awaiting f/w response. */ static void @@ -4360,7 +4358,7 @@ bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) } } -/** +/* * Offline state. */ static void @@ -4395,7 +4393,7 @@ bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event) } } -/** +/* * Rport is deleted, waiting for firmware response to delete. */ static void @@ -4447,7 +4445,7 @@ bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event) } } -/** +/* * Waiting for rport create response from firmware. A delete is pending. */ static void @@ -4478,7 +4476,7 @@ bfa_rport_sm_delete_pending(struct bfa_rport_s *rp, } } -/** +/* * Waiting for rport create response from firmware. Rport offline is pending. */ static void @@ -4513,7 +4511,7 @@ bfa_rport_sm_offline_pending(struct bfa_rport_s *rp, } } -/** +/* * IOC h/w failed. */ static void @@ -4553,7 +4551,7 @@ bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event) -/** +/* * bfa_rport_private BFA rport private functions */ @@ -4612,12 +4610,12 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, !(mod->num_rports & (mod->num_rports - 1))); for (i = 0; i < mod->num_rports; i++, rp++) { - bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s)); + memset(rp, 0, sizeof(struct bfa_rport_s)); rp->bfa = bfa; rp->rport_tag = i; bfa_sm_set_state(rp, bfa_rport_sm_uninit); - /** + /* * - is unused */ if (i) @@ -4626,7 +4624,7 @@ bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp); } - /** + /* * consume memory */ bfa_meminfo_kva(meminfo) = (u8 *) rp; @@ -4687,7 +4685,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp) { struct bfi_rport_create_req_s *m; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); @@ -4699,7 +4697,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp) bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ, bfa_lpuid(rp->bfa)); m->bfa_handle = rp->rport_tag; - m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz); + m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz); m->pid = rp->rport_info.pid; m->lp_tag = rp->rport_info.lp_tag; m->local_pid = rp->rport_info.local_pid; @@ -4708,7 +4706,7 @@ bfa_rport_send_fwcreate(struct bfa_rport_s *rp) m->vf_id = rp->rport_info.vf_id; m->cisc = rp->rport_info.cisc; - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); @@ -4720,7 +4718,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp) { struct bfi_rport_delete_req_s *m; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); @@ -4733,7 +4731,7 @@ bfa_rport_send_fwdelete(struct bfa_rport_s *rp) bfa_lpuid(rp->bfa)); m->fw_handle = rp->fw_handle; - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); @@ -4745,7 +4743,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp) { struct bfa_rport_speed_req_s *m; - /** + /* * check for room in queue to send request now */ m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT); @@ -4759,7 +4757,7 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp) m->fw_handle = rp->fw_handle; m->speed = (u8)rp->rport_info.speed; - /** + /* * queue I/O message to firmware */ bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT); @@ -4768,11 +4766,11 @@ bfa_rport_send_fwspeed(struct bfa_rport_s *rp) -/** +/* * bfa_rport_public */ -/** +/* * Rport interrupt processing. */ void @@ -4814,7 +4812,7 @@ bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m) -/** +/* * bfa_rport_api */ @@ -4849,7 +4847,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) { bfa_assert(rport_info->max_frmsz != 0); - /** + /* * Some JBODs are seen to be not setting PDU size correctly in PLOGI * responses. Default to minimum size. */ @@ -4858,7 +4856,7 @@ bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info) rport_info->max_frmsz = FC_MIN_PDUSZ; } - bfa_os_assign(rport->rport_info, *rport_info); + rport->rport_info = *rport_info; bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE); } @@ -4890,22 +4888,22 @@ bfa_rport_get_qos_attr(struct bfa_rport_s *rport, struct bfa_rport_qos_attr_s *qos_attr) { qos_attr->qos_priority = rport->qos_attr.qos_priority; - qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id); + qos_attr->qos_flow_id = be32_to_cpu(rport->qos_attr.qos_flow_id); } void bfa_rport_clear_stats(struct bfa_rport_s *rport) { - bfa_os_memset(&rport->stats, 0, sizeof(rport->stats)); + memset(&rport->stats, 0, sizeof(rport->stats)); } -/** +/* * SGPG related functions */ -/** +/* * Compute and return memory needed by FCP(im) module. */ static void @@ -4957,8 +4955,8 @@ bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1))); for (i = 0; i < mod->num_sgpgs; i++) { - bfa_os_memset(hsgpg, 0, sizeof(*hsgpg)); - bfa_os_memset(sgpg, 0, sizeof(*sgpg)); + memset(hsgpg, 0, sizeof(*hsgpg)); + memset(sgpg, 0, sizeof(*sgpg)); hsgpg->sgpg = sgpg; sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa); @@ -4997,7 +4995,7 @@ bfa_sgpg_iocdisable(struct bfa_s *bfa) -/** +/* * hal_sgpg_public BFA SGPG public functions */ @@ -5039,7 +5037,7 @@ bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg) if (list_empty(&mod->sgpg_wait_q)) return; - /** + /* * satisfy as many waiting requests as possible */ do { @@ -5067,11 +5065,11 @@ bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) wqe->nsgpg_total = wqe->nsgpg = nsgpg; - /** + /* * allocate any left to this one first */ if (mod->free_sgpgs) { - /** + /* * no one else is waiting for SGPG */ bfa_assert(list_empty(&mod->sgpg_wait_q)); @@ -5105,7 +5103,7 @@ bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), wqe->cbarg = cbarg; } -/** +/* * UF related functions */ /* @@ -5136,7 +5134,7 @@ claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz; bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz; - bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); + memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz); } static void @@ -5153,11 +5151,11 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs; i++, uf_bp_msg++) { - bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); + memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s)); uf_bp_msg->buf_tag = i; buf_len = sizeof(struct bfa_uf_buf_s); - uf_bp_msg->buf_len = bfa_os_htons(buf_len); + uf_bp_msg->buf_len = cpu_to_be16(buf_len); bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST, bfa_lpuid(ufm->bfa)); @@ -5173,7 +5171,7 @@ claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) bfa_sge_to_be(&sge[1]); } - /** + /* * advance pointer beyond consumed memory */ bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg; @@ -5194,7 +5192,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) * Initialize UFs and queue it in UF free queue */ for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) { - bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s)); + memset(uf, 0, sizeof(struct bfa_uf_s)); uf->bfa = ufm->bfa; uf->uf_tag = i; uf->pb_len = sizeof(struct bfa_uf_buf_s); @@ -5203,7 +5201,7 @@ claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi) list_add_tail(&uf->qe, &ufm->uf_free_q); } - /** + /* * advance memory pointer */ bfa_meminfo_kva(mi) = (u8 *) uf; @@ -5241,7 +5239,7 @@ bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg, { struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa); - bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); + memset(ufm, 0, sizeof(struct bfa_uf_mod_s)); ufm->bfa = bfa; ufm->num_ufs = cfg->fwcfg.num_uf_bufs; INIT_LIST_HEAD(&ufm->uf_free_q); @@ -5279,7 +5277,7 @@ bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf) if (!uf_post_msg) return BFA_STATUS_FAILED; - bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], + memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag], sizeof(struct bfi_uf_buf_post_s)); bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP); @@ -5310,8 +5308,8 @@ uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m) u8 *buf = &uf_buf->d[0]; struct fchs_s *fchs; - m->frm_len = bfa_os_ntohs(m->frm_len); - m->xfr_len = bfa_os_ntohs(m->xfr_len); + m->frm_len = be16_to_cpu(m->frm_len); + m->xfr_len = be16_to_cpu(m->xfr_len); fchs = (struct fchs_s *)uf_buf; @@ -5365,11 +5363,11 @@ bfa_uf_start(struct bfa_s *bfa) -/** +/* * hal_uf_api */ -/** +/* * Register handler for all unsolicted recieve frames. * * @param[in] bfa BFA instance @@ -5385,7 +5383,7 @@ bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg) ufm->cbarg = cbarg; } -/** +/* * Free an unsolicited frame back to BFA. * * @param[in] uf unsolicited frame to be freed @@ -5401,7 +5399,7 @@ bfa_uf_free(struct bfa_uf_s *uf) -/** +/* * uf_pub BFA uf module public functions */ void diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h index 9921dad0d03..e2349d5cdb9 100644 --- a/drivers/scsi/bfa/bfa_svc.h +++ b/drivers/scsi/bfa/bfa_svc.h @@ -22,12 +22,12 @@ #include "bfi_ms.h" -/** +/* * Scatter-gather DMA related defines */ #define BFA_SGPG_MIN (16) -/** +/* * Alignment macro for SG page allocation */ #define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \ @@ -48,7 +48,7 @@ struct bfa_sgpg_s { union bfi_addr_u sgpg_pa; /* pa of SG page */ }; -/** +/* * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of * SG pages required. */ @@ -75,7 +75,7 @@ void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs); void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe); -/** +/* * FCXP related defines */ #define BFA_FCXP_MIN (1) @@ -115,12 +115,12 @@ typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp); -/** +/* * Information needed for a FCXP request */ struct bfa_fcxp_req_info_s { struct bfa_rport_s *bfa_rport; - /** Pointer to the bfa rport that was + /* Pointer to the bfa rport that was * returned from bfa_rport_create(). * This could be left NULL for WKA or * for FCXP interactions before the @@ -137,11 +137,10 @@ struct bfa_fcxp_req_info_s { struct bfa_fcxp_rsp_info_s { struct fchs_s rsp_fchs; - /** !< Response frame's FC header will + /* Response frame's FC header will * be sent back in this field */ u8 rsp_timeout; - /** !< timeout in seconds, 0-no response - */ + /* timeout in seconds, 0-no response */ u8 rsvd2[3]; u32 rsp_maxlen; /* max response length expected */ }; @@ -218,7 +217,7 @@ struct bfa_fcxp_wqe_s { void bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -/** +/* * RPORT related defines */ #define BFA_RPORT_MIN 4 @@ -232,7 +231,7 @@ struct bfa_rport_mod_s { #define BFA_RPORT_MOD(__bfa) (&(__bfa)->modules.rport_mod) -/** +/* * Convert rport tag to RPORT */ #define BFA_RPORT_FROM_TAG(__bfa, _tag) \ @@ -244,7 +243,7 @@ struct bfa_rport_mod_s { */ void bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -/** +/* * BFA rport information. */ struct bfa_rport_info_s { @@ -259,7 +258,7 @@ struct bfa_rport_info_s { enum bfa_port_speed speed; /* Rport's current speed */ }; -/** +/* * BFA rport data structure */ struct bfa_rport_s { @@ -282,7 +281,7 @@ struct bfa_rport_s { #define BFA_RPORT_FC_COS(_rport) ((_rport)->rport_info.fc_class) -/** +/* * UF - unsolicited receive related defines */ @@ -305,7 +304,7 @@ struct bfa_uf_s { struct bfa_sge_s sges[BFI_SGE_INLINE_MAX]; }; -/** +/* * Callback prototype for unsolicited frame receive handler. * * @param[in] cbarg callback arg for receive handler @@ -338,7 +337,7 @@ void bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); #define BFA_UF_BUFSZ (2 * 1024 + 256) -/** +/* * @todo private */ struct bfa_uf_buf_s { @@ -346,7 +345,7 @@ struct bfa_uf_buf_s { }; -/** +/* * LPS - bfa lport login/logout service interface */ struct bfa_lps_s { @@ -397,14 +396,14 @@ struct bfa_lps_mod_s { void bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg); -/** +/* * FCPORT related defines */ #define BFA_FCPORT(_bfa) (&((_bfa)->modules.port)) typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status); -/** +/* * Link notification data structure */ struct bfa_fcport_ln_s { @@ -418,7 +417,7 @@ struct bfa_fcport_trunk_s { struct bfa_trunk_attr_s attr; }; -/** +/* * BFA FC port data structure */ struct bfa_fcport_s { @@ -613,7 +612,7 @@ void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg); void bfa_uf_free(struct bfa_uf_s *uf); -/** +/* * bfa lport service api */ diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 4d8784e06e1..1f938974b84 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -15,7 +15,7 @@ * General Public License for more details. */ -/** +/* * bfad.c Linux driver PCI interface module. */ #include <linux/module.h> @@ -151,7 +151,7 @@ bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event); static void bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event); -/** +/* * Beginning state for the driver instance, awaiting the pci_probe event */ static void @@ -181,7 +181,7 @@ bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event) } } -/** +/* * Driver Instance is created, awaiting event INIT to initialize the bfad */ static void @@ -364,7 +364,7 @@ bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event) } } -/** +/* * BFA callbacks */ void @@ -376,7 +376,7 @@ bfad_hcb_comp(void *arg, bfa_status_t status) complete(&fcomp->comp); } -/** +/* * bfa_init callback */ void @@ -401,7 +401,7 @@ bfa_cb_init(void *drv, bfa_status_t init_status) complete(&bfad->comp); } -/** +/* * BFA_FCS callbacks */ struct bfad_port_s * @@ -457,7 +457,7 @@ bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles, } } -/** +/* * FCS RPORT alloc callback, after successful PLOGI by FCS */ bfa_status_t @@ -478,7 +478,7 @@ ext: return rc; } -/** +/* * FCS PBC VPORT Create */ void @@ -663,7 +663,7 @@ ext: return rc; } -/** +/* * Create a vport under a vf. */ bfa_status_t @@ -716,30 +716,6 @@ ext: return rc; } -/** - * Create a vf and its base vport implicitely. - */ -bfa_status_t -bfad_vf_create(struct bfad_s *bfad, u16 vf_id, - struct bfa_lport_cfg_s *port_cfg) -{ - struct bfad_vf_s *vf; - int rc = BFA_STATUS_OK; - - vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL); - if (!vf) { - rc = BFA_STATUS_FAILED; - goto ext; - } - - rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg, - vf); - if (rc != BFA_STATUS_OK) - kfree(vf); -ext: - return rc; -} - void bfad_bfa_tmo(unsigned long data) { @@ -885,20 +861,6 @@ bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad) pci_set_drvdata(pdev, NULL); } -void -bfad_fcs_port_cfg(struct bfad_s *bfad) -{ - struct bfa_lport_cfg_s port_cfg; - struct bfa_port_attr_s attr; - char symname[BFA_SYMNAME_MAXLEN]; - - sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no); - memcpy(port_cfg.sym_name.symname, symname, strlen(symname)); - bfa_fcport_get_attr(&bfad->bfa, &attr); - port_cfg.nwwn = attr.nwwn; - port_cfg.pwwn = attr.pwwn; -} - bfa_status_t bfad_drv_init(struct bfad_s *bfad) { @@ -1089,9 +1051,6 @@ bfad_start_ops(struct bfad_s *bfad) { bfa_fcs_init(&bfad->bfa_fcs); spin_unlock_irqrestore(&bfad->bfad_lock, flags); - /* PPORT FCS config */ - bfad_fcs_port_cfg(bfad); - retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM); if (retval != BFA_STATUS_OK) { if (bfa_sm_cmp_state(bfad, bfad_sm_initializing)) @@ -1181,7 +1140,7 @@ bfad_worker(void *ptr) return 0; } -/** +/* * BFA driver interrupt functions */ irqreturn_t @@ -1240,7 +1199,7 @@ bfad_msix(int irq, void *dev_id) return IRQ_HANDLED; } -/** +/* * Initialize the MSIX entry table. */ static void @@ -1293,7 +1252,7 @@ bfad_install_msix_handler(struct bfad_s *bfad) return 0; } -/** +/* * Setup MSIX based interrupt. */ int @@ -1374,7 +1333,7 @@ bfad_remove_intr(struct bfad_s *bfad) } } -/** +/* * PCI probe entry. */ int @@ -1460,7 +1419,7 @@ out: return error; } -/** +/* * PCI remove entry. */ void @@ -1541,7 +1500,7 @@ static struct pci_driver bfad_pci_driver = { .remove = __devexit_p(bfad_pci_remove), }; -/** +/* * Driver module init. */ static int __init @@ -1581,7 +1540,7 @@ ext: return error; } -/** +/* * Driver module exit. */ static void __exit diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index d8843720eac..ed9fff440b5 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -15,14 +15,14 @@ * General Public License for more details. */ -/** +/* * bfa_attr.c Linux driver configuration interface module. */ #include "bfad_drv.h" #include "bfad_im.h" -/** +/* * FC transport template entry, get SCSI target port ID. */ void @@ -48,7 +48,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget) spin_unlock_irqrestore(&bfad->bfad_lock, flags); } -/** +/* * FC transport template entry, get SCSI target nwwn. */ void @@ -70,11 +70,11 @@ bfad_im_get_starget_node_name(struct scsi_target *starget) if (itnim) node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim); - fc_starget_node_name(starget) = bfa_os_htonll(node_name); + fc_starget_node_name(starget) = cpu_to_be64(node_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } -/** +/* * FC transport template entry, get SCSI target pwwn. */ void @@ -96,11 +96,11 @@ bfad_im_get_starget_port_name(struct scsi_target *starget) if (itnim) port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim); - fc_starget_port_name(starget) = bfa_os_htonll(port_name); + fc_starget_port_name(starget) = cpu_to_be64(port_name); spin_unlock_irqrestore(&bfad->bfad_lock, flags); } -/** +/* * FC transport template entry, get SCSI host port ID. */ void @@ -114,7 +114,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost) bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port)); } -/** +/* * FC transport template entry, get SCSI host port type. */ static void @@ -146,7 +146,7 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost) } } -/** +/* * FC transport template entry, get SCSI host port state. */ static void @@ -183,7 +183,7 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost) } } -/** +/* * FC transport template entry, get SCSI host active fc4s. */ static void @@ -202,7 +202,7 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost) fc_host_active_fc4s(shost)[7] = 1; } -/** +/* * FC transport template entry, get SCSI host link speed. */ static void @@ -236,7 +236,7 @@ bfad_im_get_host_speed(struct Scsi_Host *shost) } } -/** +/* * FC transport template entry, get SCSI host port type. */ static void @@ -249,11 +249,11 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost) fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port); - fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn); + fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn); } -/** +/* * FC transport template entry, get BFAD statistics. */ static struct fc_host_statistics * @@ -304,7 +304,7 @@ bfad_im_get_stats(struct Scsi_Host *shost) return hstats; } -/** +/* * FC transport template entry, reset BFAD statistics. */ static void @@ -331,7 +331,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost) return; } -/** +/* * FC transport template entry, get rport loss timeout. */ static void @@ -347,7 +347,7 @@ bfad_im_get_rport_loss_tmo(struct fc_rport *rport) spin_unlock_irqrestore(&bfad->bfad_lock, flags); } -/** +/* * FC transport template entry, set rport loss timeout. */ static void @@ -633,7 +633,7 @@ struct fc_function_template bfad_im_vport_fc_function_template = { .set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo, }; -/** +/* * Scsi_Host_attrs SCSI host attributes */ static ssize_t @@ -733,7 +733,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr, u64 nwwn; nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port); - return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn)); + return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn)); } static ssize_t diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c index 69ed1c4a903..1fedeeb4ac1 100644 --- a/drivers/scsi/bfa/bfad_debugfs.c +++ b/drivers/scsi/bfa/bfad_debugfs.c @@ -318,7 +318,7 @@ bfad_debugfs_write_regrd(struct file *file, const char __user *buf, regbuf = (u32 *)bfad->regdata; spin_lock_irqsave(&bfad->bfad_lock, flags); for (i = 0; i < len; i++) { - *regbuf = bfa_reg_read(reg_addr); + *regbuf = readl(reg_addr); regbuf++; reg_addr += sizeof(u32); } @@ -361,7 +361,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf, reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr); spin_lock_irqsave(&bfad->bfad_lock, flags); - bfa_reg_write(reg_addr, val); + writel(val, reg_addr); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return nbytes; diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h index 98420bbb4f3..97f9b6c0937 100644 --- a/drivers/scsi/bfa/bfad_drv.h +++ b/drivers/scsi/bfa/bfad_drv.h @@ -15,11 +15,11 @@ * General Public License for more details. */ -/** +/* * Contains base driver definitions. */ -/** +/* * bfa_drv.h Linux driver data structures. */ @@ -309,7 +309,6 @@ void bfad_bfa_tmo(unsigned long data); void bfad_init_timer(struct bfad_s *bfad); int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad); void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad); -void bfad_fcs_port_cfg(struct bfad_s *bfad); void bfad_drv_uninit(struct bfad_s *bfad); int bfad_worker(void *ptr); void bfad_debugfs_init(struct bfad_port_s *port); diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index d950ee44016..8ca967dee66 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -15,7 +15,7 @@ * General Public License for more details. */ -/** +/* * bfad_im.c Linux driver IM module. */ @@ -30,8 +30,7 @@ DEFINE_IDR(bfad_im_port_index); struct scsi_transport_template *bfad_im_scsi_transport_template; struct scsi_transport_template *bfad_im_scsi_vport_transport_template; static void bfad_im_itnim_work_handler(struct work_struct *work); -static int bfad_im_queuecommand(struct scsi_cmnd *cmnd, - void (*done)(struct scsi_cmnd *)); +static int bfad_im_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmnd); static int bfad_im_slave_alloc(struct scsi_device *sdev); static void bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim); @@ -164,10 +163,10 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk, wake_up(wq); } -/** +/* * Scsi_Host_template SCSI host template */ -/** +/* * Scsi_Host template entry, returns BFAD PCI info. */ static const char * @@ -196,7 +195,7 @@ bfad_im_info(struct Scsi_Host *shost) return bfa_buf; } -/** +/* * Scsi_Host template entry, aborts the specified SCSI command. * * Returns: SUCCESS or FAILED. @@ -280,7 +279,7 @@ out: return rc; } -/** +/* * Scsi_Host template entry, resets a LUN and abort its all commands. * * Returns: SUCCESS or FAILED. @@ -319,7 +318,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd) goto out; } - /** + /* * Set host_scribble to NULL to avoid aborting a task command * if happens. */ @@ -346,7 +345,7 @@ out: return rc; } -/** +/* * Scsi_Host template entry, resets the bus and abort all commands. */ static int @@ -396,7 +395,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd) return SUCCESS; } -/** +/* * Scsi_Host template entry slave_destroy. */ static void @@ -406,11 +405,11 @@ bfad_im_slave_destroy(struct scsi_device *sdev) return; } -/** +/* * BFA FCS itnim callbacks */ -/** +/* * BFA FCS itnim alloc callback, after successful PRLI * Context: Interrupt */ @@ -433,7 +432,7 @@ bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim, bfad->bfad_flags |= BFAD_RPORT_ONLINE; } -/** +/* * BFA FCS itnim free callback. * Context: Interrupt. bfad_lock is held */ @@ -471,7 +470,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv) queue_work(im->drv_workq, &itnim_drv->itnim_work); } -/** +/* * BFA FCS itnim online callback. * Context: Interrupt. bfad_lock is held */ @@ -492,7 +491,7 @@ bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv) queue_work(im->drv_workq, &itnim_drv->itnim_work); } -/** +/* * BFA FCS itnim offline callback. * Context: Interrupt. bfad_lock is held */ @@ -519,7 +518,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv) queue_work(im->drv_workq, &itnim_drv->itnim_work); } -/** +/* * Allocate a Scsi_Host for a port. */ int @@ -751,7 +750,7 @@ bfad_os_thread_workq(struct bfad_s *bfad) return BFA_STATUS_OK; } -/** +/* * Scsi_Host template entry. * * Description: @@ -896,7 +895,7 @@ bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id) return NULL; } -/** +/* * Scsi_Host template entry slave_alloc */ static int @@ -915,12 +914,16 @@ bfad_im_slave_alloc(struct scsi_device *sdev) static u32 bfad_im_supported_speeds(struct bfa_s *bfa) { - struct bfa_ioc_attr_s ioc_attr; + struct bfa_ioc_attr_s *ioc_attr; u32 supported_speed = 0; - bfa_get_attr(bfa, &ioc_attr); - if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { - if (ioc_attr.adapter_attr.is_mezz) { + ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL); + if (!ioc_attr) + return 0; + + bfa_get_attr(bfa, ioc_attr); + if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) { + if (ioc_attr->adapter_attr.is_mezz) { supported_speed |= FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; @@ -929,12 +932,13 @@ bfad_im_supported_speeds(struct bfa_s *bfa) FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT; } - } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { + } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) { supported_speed |= FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; - } else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { + } else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) { supported_speed |= FC_PORTSPEED_10GBIT; } + kfree(ioc_attr); return supported_speed; } @@ -944,14 +948,13 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port) struct Scsi_Host *host = im_port->shost; struct bfad_s *bfad = im_port->bfad; struct bfad_port_s *port = im_port->port; - struct bfa_port_attr_s pattr; - struct bfa_lport_attr_s port_attr; char symname[BFA_SYMNAME_MAXLEN]; + struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa); fc_host_node_name(host) = - bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port))); + cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port))); fc_host_port_name(host) = - bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port))); + cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port))); fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa); fc_host_supported_classes(host) = FC_COS_CLASS3; @@ -964,15 +967,12 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port) /* For fibre channel services type 0x20 */ fc_host_supported_fc4s(host)[7] = 1; - bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr); - strncpy(symname, port_attr.port_cfg.sym_name.symname, + strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname, BFA_SYMNAME_MAXLEN); sprintf(fc_host_symbolic_name(host), "%s", symname); fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa); - - bfa_fcport_get_attr(&bfad->bfa, &pattr); - fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize; + fc_host_maxframe_size(host) = fcport->cfg.maxfrsize; } static void @@ -983,9 +983,9 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) struct bfad_itnim_data_s *itnim_data; rport_ids.node_name = - bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); + cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim)); rport_ids.port_name = - bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); + cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim)); rport_ids.port_id = bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim)); rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; @@ -1015,7 +1015,7 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim) return; } -/** +/* * Work queue handler using FC transport service * Context: kernel */ @@ -1115,11 +1115,11 @@ bfad_im_itnim_work_handler(struct work_struct *work) spin_unlock_irqrestore(&bfad->bfad_lock, flags); } -/** +/* * Scsi_Host template entry, queue a SCSI command to the BFAD. */ static int -bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) +bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) { struct bfad_im_port_s *im_port = (struct bfad_im_port_s *) cmnd->device->host->hostdata[0]; @@ -1186,6 +1186,8 @@ out_fail_cmd: return 0; } +static DEF_SCSI_QCMD(bfad_im_queuecommand) + void bfad_os_rport_online_wait(struct bfad_s *bfad) { diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h index 85f2224a573..58796d1284b 100644 --- a/drivers/scsi/bfa/bfi.h +++ b/drivers/scsi/bfa/bfi.h @@ -23,7 +23,7 @@ #pragma pack(1) -/** +/* * BFI FW image type */ #define BFI_FLASH_CHUNK_SZ 256 /* Flash chunk size */ @@ -35,7 +35,7 @@ enum { BFI_IMAGE_MAX, }; -/** +/* * Msg header common to all msgs */ struct bfi_mhdr_s { @@ -68,7 +68,7 @@ struct bfi_mhdr_s { #define BFI_I2H_OPCODE_BASE 128 #define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) -/** +/* **************************************************************************** * * Scatter Gather Element and Page definition @@ -79,7 +79,7 @@ struct bfi_mhdr_s { #define BFI_SGE_INLINE 1 #define BFI_SGE_INLINE_MAX (BFI_SGE_INLINE + 1) -/** +/* * SG Flags */ enum { @@ -90,7 +90,7 @@ enum { BFI_SGE_PGDLEN = 2, /* cumulative data length for page */ }; -/** +/* * DMA addresses */ union bfi_addr_u { @@ -100,7 +100,7 @@ union bfi_addr_u { } a32; }; -/** +/* * Scatter Gather Element */ struct bfi_sge_s { @@ -116,7 +116,7 @@ struct bfi_sge_s { union bfi_addr_u sga; }; -/** +/* * Scatter Gather Page */ #define BFI_SGPG_DATA_SGES 7 @@ -139,7 +139,7 @@ struct bfi_msg_s { u32 pl[BFI_LMSG_PL_WSZ]; }; -/** +/* * Mailbox message structure */ #define BFI_MBMSG_SZ 7 @@ -148,7 +148,7 @@ struct bfi_mbmsg_s { u32 pl[BFI_MBMSG_SZ]; }; -/** +/* * Message Classes */ enum bfi_mclass { @@ -186,7 +186,7 @@ enum bfi_mclass { #define BFI_BOOT_LOADER_BIOS 1 #define BFI_BOOT_LOADER_UEFI 2 -/** +/* *---------------------------------------------------------------------- * IOC *---------------------------------------------------------------------- @@ -208,7 +208,7 @@ enum bfi_ioc_i2h_msgs { BFI_IOC_I2H_HBEAT = BFA_I2HM(5), }; -/** +/* * BFI_IOC_H2I_GETATTR_REQ message */ struct bfi_ioc_getattr_req_s { @@ -242,7 +242,7 @@ struct bfi_ioc_attr_s { u32 card_type; /* card type */ }; -/** +/* * BFI_IOC_I2H_GETATTR_REPLY message */ struct bfi_ioc_getattr_reply_s { @@ -251,19 +251,19 @@ struct bfi_ioc_getattr_reply_s { u8 rsvd[3]; }; -/** +/* * Firmware memory page offsets */ #define BFI_IOC_SMEM_PG0_CB (0x40) #define BFI_IOC_SMEM_PG0_CT (0x180) -/** +/* * Firmware statistic offset */ #define BFI_IOC_FWSTATS_OFF (0x6B40) #define BFI_IOC_FWSTATS_SZ (4096) -/** +/* * Firmware trace offset */ #define BFI_IOC_TRC_OFF (0x4b00) @@ -280,7 +280,7 @@ struct bfi_ioc_image_hdr_s { u32 md5sum[BFI_IOC_MD5SUM_SZ]; }; -/** +/* * BFI_IOC_I2H_READY_EVENT message */ struct bfi_ioc_rdy_event_s { @@ -294,7 +294,7 @@ struct bfi_ioc_hbeat_s { u32 hb_count; /* current heart beat count */ }; -/** +/* * IOC hardware/firmware state */ enum bfi_ioc_state { @@ -340,7 +340,7 @@ enum { ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ BFI_ADAPTER_UNSUPP)) -/** +/* * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */ struct bfi_ioc_ctrl_req_s { @@ -352,7 +352,7 @@ struct bfi_ioc_ctrl_req_s { #define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s; #define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s; -/** +/* * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */ struct bfi_ioc_ctrl_reply_s { @@ -364,7 +364,7 @@ struct bfi_ioc_ctrl_reply_s { #define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s; #define BFI_IOC_MSGSZ 8 -/** +/* * H2I Messages */ union bfi_ioc_h2i_msg_u { @@ -375,7 +375,7 @@ union bfi_ioc_h2i_msg_u { u32 mboxmsg[BFI_IOC_MSGSZ]; }; -/** +/* * I2H Messages */ union bfi_ioc_i2h_msg_u { @@ -385,7 +385,7 @@ union bfi_ioc_i2h_msg_u { }; -/** +/* *---------------------------------------------------------------------- * PBC *---------------------------------------------------------------------- @@ -394,7 +394,7 @@ union bfi_ioc_i2h_msg_u { #define BFI_PBC_MAX_BLUNS 8 #define BFI_PBC_MAX_VPORTS 16 -/** +/* * PBC boot lun configuration */ struct bfi_pbc_blun_s { @@ -402,7 +402,7 @@ struct bfi_pbc_blun_s { lun_t tgt_lun; }; -/** +/* * PBC virtual port configuration */ struct bfi_pbc_vport_s { @@ -410,7 +410,7 @@ struct bfi_pbc_vport_s { wwn_t vp_nwwn; }; -/** +/* * BFI pre-boot configuration information */ struct bfi_pbc_s { @@ -427,7 +427,7 @@ struct bfi_pbc_s { struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS]; }; -/** +/* *---------------------------------------------------------------------- * MSGQ *---------------------------------------------------------------------- @@ -531,7 +531,7 @@ enum bfi_port_i2h { BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), }; -/** +/* * Generic REQ type */ struct bfi_port_generic_req_s { @@ -540,7 +540,7 @@ struct bfi_port_generic_req_s { u32 rsvd; }; -/** +/* * Generic RSP type */ struct bfi_port_generic_rsp_s { @@ -550,7 +550,7 @@ struct bfi_port_generic_rsp_s { u32 msgtag; /* msgtag for reply */ }; -/** +/* * BFI_PORT_H2I_GET_STATS_REQ */ struct bfi_port_get_stats_req_s { diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h index 69ac85f9e93..fa9f6fb9d45 100644 --- a/drivers/scsi/bfa/bfi_ms.h +++ b/drivers/scsi/bfa/bfi_ms.h @@ -41,7 +41,7 @@ struct bfi_iocfc_cfg_s { u16 rsvd_1; u32 endian_sig; /* endian signature of host */ - /** + /* * Request and response circular queue base addresses, size and * shadow index pointers. */ @@ -58,7 +58,7 @@ struct bfi_iocfc_cfg_s { struct bfa_iocfc_intr_attr_s intr_attr; /* IOC interrupt attributes */ }; -/** +/* * Boot target wwn information for this port. This contains either the stored * or discovered boot target port wwns for the port. */ @@ -75,7 +75,7 @@ struct bfi_iocfc_cfgrsp_s { struct bfi_pbc_s pbc_cfg; }; -/** +/* * BFI_IOCFC_H2I_CFG_REQ message */ struct bfi_iocfc_cfg_req_s { @@ -84,7 +84,7 @@ struct bfi_iocfc_cfg_req_s { }; -/** +/* * BFI_IOCFC_I2H_CFG_REPLY message */ struct bfi_iocfc_cfg_reply_s { @@ -95,7 +95,7 @@ struct bfi_iocfc_cfg_reply_s { }; -/** +/* * BFI_IOCFC_H2I_SET_INTR_REQ message */ struct bfi_iocfc_set_intr_req_s { @@ -107,7 +107,7 @@ struct bfi_iocfc_set_intr_req_s { }; -/** +/* * BFI_IOCFC_H2I_UPDATEQ_REQ message */ struct bfi_iocfc_updateq_req_s { @@ -119,7 +119,7 @@ struct bfi_iocfc_updateq_req_s { }; -/** +/* * BFI_IOCFC_I2H_UPDATEQ_RSP message */ struct bfi_iocfc_updateq_rsp_s { @@ -129,7 +129,7 @@ struct bfi_iocfc_updateq_rsp_s { }; -/** +/* * H2I Messages */ union bfi_iocfc_h2i_msg_u { @@ -140,7 +140,7 @@ union bfi_iocfc_h2i_msg_u { }; -/** +/* * I2H Messages */ union bfi_iocfc_i2h_msg_u { @@ -173,7 +173,7 @@ enum bfi_fcport_i2h { }; -/** +/* * Generic REQ type */ struct bfi_fcport_req_s { @@ -181,7 +181,7 @@ struct bfi_fcport_req_s { u32 msgtag; /* msgtag for reply */ }; -/** +/* * Generic RSP type */ struct bfi_fcport_rsp_s { @@ -191,7 +191,7 @@ struct bfi_fcport_rsp_s { u32 msgtag; /* msgtag for reply */ }; -/** +/* * BFI_FCPORT_H2I_ENABLE_REQ */ struct bfi_fcport_enable_req_s { @@ -205,7 +205,7 @@ struct bfi_fcport_enable_req_s { u32 rsvd2; }; -/** +/* * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ */ struct bfi_fcport_set_svc_params_req_s { @@ -214,7 +214,7 @@ struct bfi_fcport_set_svc_params_req_s { u16 rsvd; }; -/** +/* * BFI_FCPORT_I2H_EVENT */ struct bfi_fcport_event_s { @@ -222,7 +222,7 @@ struct bfi_fcport_event_s { struct bfa_port_link_s link_state; }; -/** +/* * BFI_FCPORT_I2H_TRUNK_SCN */ struct bfi_fcport_trunk_link_s { @@ -243,7 +243,7 @@ struct bfi_fcport_trunk_scn_s { struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS]; }; -/** +/* * fcport H2I message */ union bfi_fcport_h2i_msg_u { @@ -255,7 +255,7 @@ union bfi_fcport_h2i_msg_u { struct bfi_fcport_req_s *pstatsclear; }; -/** +/* * fcport I2H message */ union bfi_fcport_i2h_msg_u { @@ -279,7 +279,7 @@ enum bfi_fcxp_i2h { #define BFA_FCXP_MAX_SGES 2 -/** +/* * FCXP send request structure */ struct bfi_fcxp_send_req_s { @@ -299,7 +299,7 @@ struct bfi_fcxp_send_req_s { struct bfi_sge_s rsp_sge[BFA_FCXP_MAX_SGES]; /* response buf */ }; -/** +/* * FCXP send response structure */ struct bfi_fcxp_send_rsp_s { @@ -565,14 +565,14 @@ enum bfi_ioim_i2h { BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2), /* ABORT rsp */ }; -/** +/* * IO command DIF info */ struct bfi_ioim_dif_s { u32 dif_info[4]; }; -/** +/* * FCP IO messages overview * * @note @@ -587,7 +587,7 @@ struct bfi_ioim_req_s { u16 rport_hdl; /* itnim/rport firmware handle */ struct fcp_cmnd_s cmnd; /* IO request info */ - /** + /* * SG elements array within the IO request must be double word * aligned. This aligment is required to optimize SGM setup for the IO. */ @@ -598,7 +598,7 @@ struct bfi_ioim_req_s { struct bfi_ioim_dif_s dif; }; -/** +/* * This table shows various IO status codes from firmware and their * meaning. Host driver can use these status codes to further process * IO completions. @@ -684,7 +684,7 @@ enum bfi_ioim_status { }; #define BFI_IOIM_SNSLEN (256) -/** +/* * I/O response message */ struct bfi_ioim_rsp_s { @@ -746,7 +746,7 @@ enum bfi_tskim_status { BFI_TSKIM_STS_NOT_SUPP = 4, BFI_TSKIM_STS_FAILED = 5, - /** + /* * Defined by BFA */ BFI_TSKIM_STS_TIMEOUT = 10, /* TM request timedout */ diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 99f2b8c5dd6..8c04fada710 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -692,6 +692,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port), atid, tid, status, csk, csk->state, csk->flags); + if (status == CPL_ERR_RTX_NEG_ADVICE) + goto rel_skb; + if (status && status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST && status != CPL_ERR_ARP_MISS) diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 54f50b07dac..8f1b5c8bf90 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -1080,7 +1080,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb, * and is expected to be held on return. * **/ -static int dc395x_queue_command(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct DeviceCtlBlk *dcb; struct ScsiReqBlk *srb; @@ -1154,6 +1154,7 @@ complete: return 0; } +static DEF_SCSI_QCMD(dc395x_queue_command) /* * Return the disk geometry for the given SCSI device. diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index b9bcfa4c7d2..5be3ae15cb7 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -773,6 +773,8 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"ENGENIO", "INF-01-00"}, {"STK", "FLEXLINE 380"}, {"SUN", "CSM100_R_FC"}, + {"SUN", "STK6580_6780"}, + {"SUN", "SUN_6180"}, {NULL, NULL}, }; diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index 23dec006338..cffcb108ac9 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -423,7 +423,7 @@ static int adpt_slave_configure(struct scsi_device * device) return 0; } -static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) +static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) { adpt_hba* pHba = NULL; struct adpt_device* pDev = NULL; /* dpt per device information */ @@ -491,6 +491,8 @@ static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) return adpt_scsi_to_i2o(pHba, cmd, pDev); } +static DEF_SCSI_QCMD(adpt_queue) + static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int geom[]) { diff --git a/drivers/scsi/dpti.h b/drivers/scsi/dpti.h index 337746d4604..beded716f93 100644 --- a/drivers/scsi/dpti.h +++ b/drivers/scsi/dpti.h @@ -29,7 +29,7 @@ */ static int adpt_detect(struct scsi_host_template * sht); -static int adpt_queue(struct scsi_cmnd * cmd, void (*cmdcomplete) (struct scsi_cmnd *)); +static int adpt_queue(struct Scsi_Host *h, struct scsi_cmnd * cmd); static int adpt_abort(struct scsi_cmnd * cmd); static int adpt_reset(struct scsi_cmnd* cmd); static int adpt_release(struct Scsi_Host *host); diff --git a/drivers/scsi/dtc.h b/drivers/scsi/dtc.h index 0b205f8c732..cdc621204b6 100644 --- a/drivers/scsi/dtc.h +++ b/drivers/scsi/dtc.h @@ -36,7 +36,7 @@ static int dtc_abort(Scsi_Cmnd *); static int dtc_biosparam(struct scsi_device *, struct block_device *, sector_t, int*); static int dtc_detect(struct scsi_host_template *); -static int dtc_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); +static int dtc_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static int dtc_bus_reset(Scsi_Cmnd *); #ifndef CMD_PER_LUN diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c index d1c31378f6d..53925ac178f 100644 --- a/drivers/scsi/eata.c +++ b/drivers/scsi/eata.c @@ -505,8 +505,7 @@ static int eata2x_detect(struct scsi_host_template *); static int eata2x_release(struct Scsi_Host *); -static int eata2x_queuecommand(struct scsi_cmnd *, - void (*done) (struct scsi_cmnd *)); +static int eata2x_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static int eata2x_eh_abort(struct scsi_cmnd *); static int eata2x_eh_host_reset(struct scsi_cmnd *); static int eata2x_bios_param(struct scsi_device *, struct block_device *, @@ -1758,7 +1757,7 @@ static void scsi_to_dev_dir(unsigned int i, struct hostdata *ha) } -static int eata2x_queuecommand(struct scsi_cmnd *SCpnt, +static int eata2x_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done) (struct scsi_cmnd *)) { struct Scsi_Host *shost = SCpnt->device->host; @@ -1843,6 +1842,8 @@ static int eata2x_queuecommand(struct scsi_cmnd *SCpnt, return 0; } +static DEF_SCSI_QCMD(eata2x_queuecommand) + static int eata2x_eh_abort(struct scsi_cmnd *SCarg) { struct Scsi_Host *shost = SCarg->device->host; diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c index 60886c19065..4a9641e69f5 100644 --- a/drivers/scsi/eata_pio.c +++ b/drivers/scsi/eata_pio.c @@ -335,7 +335,7 @@ static inline unsigned int eata_pio_send_command(unsigned long base, unsigned ch return 0; } -static int eata_pio_queue(struct scsi_cmnd *cmd, +static int eata_pio_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { unsigned int x, y; @@ -438,6 +438,8 @@ static int eata_pio_queue(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(eata_pio_queue) + static int eata_pio_abort(struct scsi_cmnd *cmd) { unsigned int loop = 100; diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index e2bc779f86c..57558523c1b 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -916,7 +916,7 @@ static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) scsi_track_queue_full(dev, lp->num_tagged - 1); } -static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct scsi_device *dev = cmd->device; struct esp *esp = shost_priv(dev->host); @@ -941,6 +941,8 @@ static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd return 0; } +static DEF_SCSI_QCMD(esp_queuecommand) + static int esp_check_gross_error(struct esp *esp) { if (esp->sreg & ESP_STAT_SPAM) { diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 844d618b84b..d23a538a9df 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -117,7 +117,7 @@ static void fcoe_recv_frame(struct sk_buff *skb); static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *); -module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR); +module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR); __MODULE_PARM_TYPE(create, "string"); MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface"); module_param_call(create_vn2vn, fcoe_create, NULL, @@ -1243,7 +1243,6 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, struct fcoe_interface *fcoe; struct fc_frame_header *fh; struct fcoe_percpu_s *fps; - struct fcoe_port *port; struct ethhdr *eh; unsigned int cpu; @@ -1262,16 +1261,7 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, skb_tail_pointer(skb), skb_end_pointer(skb), skb->csum, skb->dev ? skb->dev->name : "<NULL>"); - /* check for mac addresses */ eh = eth_hdr(skb); - port = lport_priv(lport); - if (compare_ether_addr(eh->h_dest, port->data_src_addr) && - compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) && - compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) { - FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n", - eh->h_dest); - goto err; - } if (is_fip_mode(&fcoe->ctlr) && compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { @@ -1291,6 +1281,12 @@ int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev, skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); fh = (struct fc_frame_header *) skb_transport_header(skb); + if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) { + FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n", + eh->h_dest); + goto err; + } + fr = fcoe_dev_from_skb(skb); fr->fr_dev = lport; fr->ptype = ptype; diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c index aa503d83092..bc17c712320 100644 --- a/drivers/scsi/fcoe/libfcoe.c +++ b/drivers/scsi/fcoe/libfcoe.c @@ -2296,7 +2296,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb) { struct fip_header *fiph; enum fip_vn2vn_subcode sub; - union { + struct { struct fc_rport_priv rdata; struct fcoe_rport frport; } buf; diff --git a/drivers/scsi/fd_mcs.c b/drivers/scsi/fd_mcs.c index 2ad95aa8f58..a2c6135d337 100644 --- a/drivers/scsi/fd_mcs.c +++ b/drivers/scsi/fd_mcs.c @@ -1072,7 +1072,7 @@ static int fd_mcs_release(struct Scsi_Host *shpnt) return 0; } -static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) +static int fd_mcs_queue_lck(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) { struct Scsi_Host *shpnt = SCpnt->device->host; @@ -1122,6 +1122,8 @@ static int fd_mcs_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(fd_mcs_queue) + #if DEBUG_ABORT || DEBUG_RESET static void fd_mcs_print_info(Scsi_Cmnd * SCpnt) { diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index e296bcc57d5..69b7aa54f43 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c @@ -1419,7 +1419,7 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt, +static int fdomain_16x0_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { if (in_command) { @@ -1469,6 +1469,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt, return 0; } +static DEF_SCSI_QCMD(fdomain_16x0_queue) + #if DEBUG_ABORT static void print_info(struct scsi_cmnd *SCpnt) { diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index cbb20b13b22..92f185081e6 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -246,7 +246,7 @@ void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); void fnic_update_mac(struct fc_lport *, u8 *new); void fnic_update_mac_locked(struct fnic *, u8 *new); -int fnic_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); +int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); int fnic_abort_cmd(struct scsi_cmnd *); int fnic_device_reset(struct scsi_cmnd *); int fnic_host_reset(struct scsi_cmnd *); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 198cbab3e89..22d02404d15 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -349,7 +349,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, * Routine to send a scsi cdb * Called with host_lock held and interrupts disabled. */ -int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) +static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct fc_lport *lp; struct fc_rport *rport; @@ -457,6 +457,8 @@ out: return ret; } +DEF_SCSI_QCMD(fnic_queuecommand) + /* * fnic_fcpio_fw_reset_cmpl_handler * Routine to handle fw reset completion diff --git a/drivers/scsi/g_NCR5380.h b/drivers/scsi/g_NCR5380.h index 921764c9ab2..1bcdb7beb77 100644 --- a/drivers/scsi/g_NCR5380.h +++ b/drivers/scsi/g_NCR5380.h @@ -46,7 +46,7 @@ static int generic_NCR5380_abort(Scsi_Cmnd *); static int generic_NCR5380_detect(struct scsi_host_template *); static int generic_NCR5380_release_resources(struct Scsi_Host *); -static int generic_NCR5380_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); +static int generic_NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static int generic_NCR5380_bus_reset(Scsi_Cmnd *); static const char* generic_NCR5380_info(struct Scsi_Host *); diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 5a3f9310101..76365700e2d 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -185,7 +185,7 @@ static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); static void gdth_flush(gdth_ha_str *ha); -static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); +static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, struct gdth_cmndinfo *cmndinfo); static void gdth_scsi_done(struct scsi_cmnd *scp); @@ -4004,7 +4004,7 @@ static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,se } -static int gdth_queuecommand(struct scsi_cmnd *scp, +static int gdth_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { gdth_ha_str *ha = shost_priv(scp->device->host); @@ -4022,6 +4022,8 @@ static int gdth_queuecommand(struct scsi_cmnd *scp, return __gdth_queuecommand(ha, scp, cmndinfo); } +static DEF_SCSI_QCMD(gdth_queuecommand) + static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, struct gdth_cmndinfo *cmndinfo) { @@ -4177,6 +4179,14 @@ static int ioc_general(void __user *arg, char *cmnd) ha = gdth_find_ha(gen.ionode); if (!ha) return -EFAULT; + + if (gen.data_len > INT_MAX) + return -EINVAL; + if (gen.sense_len > INT_MAX) + return -EINVAL; + if (gen.data_len + gen.sense_len > INT_MAX) + return -EINVAL; + if (gen.data_len + gen.sense_len != 0) { if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len, FALSE, &paddr))) diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index c5d0606ad09..a6dea08664f 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -31,7 +31,6 @@ #include <linux/seq_file.h> #include <linux/init.h> #include <linux/spinlock.h> -#include <linux/smp_lock.h> #include <linux/compat.h> #include <linux/blktrace_api.h> #include <linux/uaccess.h> @@ -91,11 +90,7 @@ static const struct pci_device_id hpsa_pci_device_id[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, -#define PCI_DEVICE_ID_HP_CISSF 0x333f - {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, - {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, - PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, - {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, {0,} }; @@ -114,8 +109,6 @@ static struct board_type products[] = { {0x3249103C, "Smart Array P812", &SA5_access}, {0x324a103C, "Smart Array P712m", &SA5_access}, {0x324b103C, "Smart Array P711m", &SA5_access}, - {0x3233103C, "StorageWorks P1210m", &SA5_access}, - {0x333F103C, "StorageWorks P1210m", &SA5_access}, {0x3250103C, "Smart Array", &SA5_access}, {0x3250113C, "Smart Array", &SA5_access}, {0x3250123C, "Smart Array", &SA5_access}, @@ -143,8 +136,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, int cmd_type); -static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)); +static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static void hpsa_scan_start(struct Scsi_Host *); static int hpsa_scan_finished(struct Scsi_Host *sh, unsigned long elapsed_time); @@ -1926,7 +1918,7 @@ sglist_finished: } -static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, +static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct ctlr_info *h; @@ -2020,6 +2012,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(hpsa_scsi_queue_command) + static void hpsa_scan_start(struct Scsi_Host *sh) { struct ctlr_info *h = shost_to_hba(sh); diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c index 0729f150b33..10b65556937 100644 --- a/drivers/scsi/hptiop.c +++ b/drivers/scsi/hptiop.c @@ -751,7 +751,7 @@ static void hptiop_post_req_mv(struct hptiop_hba *hba, MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); } -static int hptiop_queuecommand(struct scsi_cmnd *scp, +static int hptiop_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = scp->device->host; @@ -819,6 +819,8 @@ cmd_done: return 0; } +static DEF_SCSI_QCMD(hptiop_queuecommand) + static const char *hptiop_info(struct Scsi_Host *host) { return driver_name_long; diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c index 9a4b69d4f4e..67fc8ffd52e 100644 --- a/drivers/scsi/ibmmca.c +++ b/drivers/scsi/ibmmca.c @@ -39,7 +39,7 @@ #include <scsi/scsi_host.h> /* Common forward declarations for all Linux-versions: */ -static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *)); +static int ibmmca_queuecommand (struct Scsi_Host *, struct scsi_cmnd *); static int ibmmca_abort (Scsi_Cmnd *); static int ibmmca_host_reset (Scsi_Cmnd *); static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *); @@ -1691,7 +1691,7 @@ static int __devexit ibmmca_remove(struct device *dev) } /* The following routine is the SCSI command queue for the midlevel driver */ -static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) +static int ibmmca_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) { unsigned int ldn; unsigned int scsi_cmd; @@ -1996,6 +1996,8 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(ibmmca_queuecommand) + static int __ibmmca_abort(Scsi_Cmnd * cmd) { /* Abort does not work, as the adapter never generates an interrupt on diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 00d08b25425..57cad7e20ca 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1606,7 +1606,7 @@ static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost) * Returns: * 0 on success / other on failure **/ -static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, +static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) { struct ibmvfc_host *vhost = shost_priv(cmnd->device->host); @@ -1672,6 +1672,8 @@ static int ibmvfc_queuecommand(struct scsi_cmnd *cmnd, return 0; } +static DEF_SCSI_QCMD(ibmvfc_queuecommand) + /** * ibmvfc_sync_completion - Signal that a synchronous command has completed * @evt: ibmvfc event struct diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 67f78a470f5..041958453e2 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -713,7 +713,7 @@ static inline u16 lun_from_dev(struct scsi_device *dev) * @cmd: struct scsi_cmnd to be executed * @done: Callback function to be called when cmd is completed */ -static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, +static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) { struct srp_cmd *srp_cmd; @@ -766,6 +766,8 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd, return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); } +static DEF_SCSI_QCMD(ibmvscsi_queuecommand) + /* ------------------------------------------------------------ * Routines for driver initialization */ diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 4734ab0b3ff..99aa0e5699b 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -926,7 +926,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd) return 0; } -static int imm_queuecommand(struct scsi_cmnd *cmd, +static int imm_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { imm_struct *dev = imm_dev(cmd->device->host); @@ -949,6 +949,8 @@ static int imm_queuecommand(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(imm_queuecommand) + /* * Apparently the disk->capacity attribute is off by 1 sector * for all disk drives. We add the one here, but it should really diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index 52bdc6df6b9..6568aab745a 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c @@ -334,7 +334,7 @@ static uchar calc_sync_xfer(unsigned int period, unsigned int offset) static void in2000_execute(struct Scsi_Host *instance); -static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) +static int in2000_queuecommand_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) { struct Scsi_Host *instance; struct IN2000_hostdata *hostdata; @@ -431,6 +431,8 @@ static int in2000_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)) return 0; } +static DEF_SCSI_QCMD(in2000_queuecommand) + /* diff --git a/drivers/scsi/in2000.h b/drivers/scsi/in2000.h index 0fb8b06b839..5821e1fbce0 100644 --- a/drivers/scsi/in2000.h +++ b/drivers/scsi/in2000.h @@ -396,7 +396,7 @@ struct IN2000_hostdata { flags) static int in2000_detect(struct scsi_host_template *) in2000__INIT; -static int in2000_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); +static int in2000_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static int in2000_abort(Scsi_Cmnd *); static void in2000_setup(char *, int *) in2000__INIT; static int in2000_biosparam(struct scsi_device *, struct block_device *, diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 108797761b9..9627d062e16 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -2639,7 +2639,7 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c * will cause the mid layer to call us again later with the command) */ -static int i91u_queuecommand(struct scsi_cmnd *cmd, +static int i91u_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata; @@ -2656,6 +2656,8 @@ static int i91u_queuecommand(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(i91u_queuecommand) + /** * i91u_bus_reset - reset the SCSI bus * @cmnd: Command block we want to trigger the reset for diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index df9a12c8b37..5bbaee597e8 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -5709,7 +5709,7 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) * SCSI_MLQUEUE_DEVICE_BUSY if device is busy * SCSI_MLQUEUE_HOST_BUSY if host is busy **/ -static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, +static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd, void (*done) (struct scsi_cmnd *)) { struct ipr_ioa_cfg *ioa_cfg; @@ -5792,6 +5792,8 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd, return 0; } +static DEF_SCSI_QCMD(ipr_queuecommand) + /** * ipr_ioctl - IOCTL handler * @sdev: scsi device struct @@ -9025,6 +9027,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = { PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 }, + { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, + PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 }, { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index aa8bb2f2c6e..b28a00f1082 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -82,6 +82,7 @@ #define IPR_SUBS_DEV_ID_57B4 0x033B #define IPR_SUBS_DEV_ID_57B2 0x035F +#define IPR_SUBS_DEV_ID_57C4 0x0354 #define IPR_SUBS_DEV_ID_57C6 0x0357 #define IPR_SUBS_DEV_ID_57CC 0x035C diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index f83a116955f..b2511acd39b 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -232,7 +232,7 @@ static int ips_detect(struct scsi_host_template *); static int ips_release(struct Scsi_Host *); static int ips_eh_abort(struct scsi_cmnd *); static int ips_eh_reset(struct scsi_cmnd *); -static int ips_queue(struct scsi_cmnd *, void (*)(struct scsi_cmnd *)); +static int ips_queue(struct Scsi_Host *, struct scsi_cmnd *); static const char *ips_info(struct Scsi_Host *); static irqreturn_t do_ipsintr(int, void *); static int ips_hainit(ips_ha_t *); @@ -1046,7 +1046,7 @@ static int ips_eh_reset(struct scsi_cmnd *SC) /* Linux obtains io_request_lock before calling this function */ /* */ /****************************************************************************/ -static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) +static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) { ips_ha_t *ha; ips_passthru_t *pt; @@ -1137,6 +1137,8 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)) return (0); } +static DEF_SCSI_QCMD(ips_queue) + /****************************************************************************/ /* */ /* Routine Name: ips_biosparam */ diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 32f67c4b03f..911b2736caf 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -684,10 +684,9 @@ void fc_disc_stop(struct fc_lport *lport) { struct fc_disc *disc = &lport->disc; - if (disc) { + if (disc->pending) cancel_delayed_work_sync(&disc->disc_work); - fc_disc_stop_rports(disc); - } + fc_disc_stop_rports(disc); } /** diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index c797f6b48f0..2924363d142 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -58,8 +58,7 @@ struct kmem_cache *scsi_pkt_cachep; #define FC_SRB_WRITE (1 << 0) /* - * The SCp.ptr should be tested and set under the host lock. NULL indicates - * that the command has been retruned to the scsi layer. + * The SCp.ptr should be tested and set under the scsi_pkt_queue lock */ #define CMD_SP(Cmnd) ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr) #define CMD_ENTRY_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in) @@ -1754,7 +1753,7 @@ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport) * This is the i/o strategy routine, called by the SCSI layer. This routine * is called with the host_lock held. */ -int fc_queuecommand(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) +static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *)) { struct fc_lport *lport; struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); @@ -1852,6 +1851,8 @@ out: spin_lock_irq(lport->host->host_lock); return rc; } + +DEF_SCSI_QCMD(fc_queuecommand) EXPORT_SYMBOL(fc_queuecommand); /** @@ -1880,8 +1881,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) lport = fsp->lp; si = fc_get_scsi_internal(lport); - if (!fsp->cmd) - return; /* * if can_queue ramp down is done then try can_queue ramp up @@ -1891,11 +1890,6 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) fc_fcp_can_queue_ramp_up(lport); sc_cmd = fsp->cmd; - fsp->cmd = NULL; - - if (!sc_cmd->SCp.ptr) - return; - CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status; switch (fsp->status_code) { case FC_COMPLETE: @@ -1971,15 +1965,13 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp) break; } - if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) { - sc_cmd->result = (DID_REQUEUE << 16); - FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n"); - } + if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) + sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16); spin_lock_irqsave(&si->scsi_queue_lock, flags); list_del(&fsp->list); - spin_unlock_irqrestore(&si->scsi_queue_lock, flags); sc_cmd->SCp.ptr = NULL; + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); sc_cmd->scsi_done(sc_cmd); /* release ref from initial allocation in queue command */ @@ -1997,6 +1989,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) { struct fc_fcp_pkt *fsp; struct fc_lport *lport; + struct fc_fcp_internal *si; int rc = FAILED; unsigned long flags; @@ -2006,7 +1999,8 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) else if (!lport->link_up) return rc; - spin_lock_irqsave(lport->host->host_lock, flags); + si = fc_get_scsi_internal(lport); + spin_lock_irqsave(&si->scsi_queue_lock, flags); fsp = CMD_SP(sc_cmd); if (!fsp) { /* command completed while scsi eh was setting up */ @@ -2015,7 +2009,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd) } /* grab a ref so the fsp and sc_cmd cannot be relased from under us */ fc_fcp_pkt_hold(fsp); - spin_unlock_irqrestore(lport->host->host_lock, flags); + spin_unlock_irqrestore(&si->scsi_queue_lock, flags); if (fc_fcp_lock_pkt(fsp)) { /* completed while we were waiting for timer to be deleted */ diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index d9b6e11b0e8..9be63edbf8f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1447,13 +1447,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, } did = fc_frame_did(fp); - - if (!did) { - FC_LPORT_DBG(lport, "Bad FLOGI response\n"); - goto out; - } - - if (fc_frame_payload_op(fp) == ELS_LS_ACC) { + if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) { flp = fc_frame_payload_get(fp, sizeof(*flp)); if (flp) { mfs = ntohs(flp->fl_csp.sp_bb_data) & @@ -1492,8 +1486,10 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, fc_lport_enter_dns(lport); } } - } else + } else { + FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n"); fc_lport_error(lport, fp); + } out: fc_frame_free(fp); diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index b9f2286fe0c..a84ef13ed74 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c @@ -196,9 +196,9 @@ static const char *fc_rport_state(struct fc_rport_priv *rdata) void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) { if (timeout) - rport->dev_loss_tmo = timeout + 5; + rport->dev_loss_tmo = timeout; else - rport->dev_loss_tmo = 30; + rport->dev_loss_tmo = 1; } EXPORT_SYMBOL(fc_set_rport_loss_tmo); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 633e0903635..c15fde808c3 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1599,7 +1599,7 @@ enum { FAILURE_SESSION_NOT_READY, }; -int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) +static int iscsi_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { struct iscsi_cls_session *cls_session; struct Scsi_Host *host; @@ -1736,6 +1736,8 @@ fault: spin_lock(host->host_lock); return 0; } + +DEF_SCSI_QCMD(iscsi_queuecommand) EXPORT_SYMBOL_GPL(iscsi_queuecommand); int iscsi_change_queue_depth(struct scsi_device *sdev, int depth, int reason) diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 55f09e92ab5..29251fabecc 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -189,7 +189,7 @@ int sas_queue_up(struct sas_task *task) * Note: XXX: Remove the host unlock/lock pair when SCSI Core can * call us without holding an IRQ spinlock... */ -int sas_queuecommand(struct scsi_cmnd *cmd, +static int sas_queuecommand_lck(struct scsi_cmnd *cmd, void (*scsi_done)(struct scsi_cmnd *)) __releases(host->host_lock) __acquires(dev->sata_dev.ap->lock) @@ -254,6 +254,8 @@ out: return res; } +DEF_SCSI_QCMD(sas_queuecommand) + static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) { struct sas_task *task = TO_SAS_TASK(cmd); diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index a50aa03b8ac..196de40b906 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -202,9 +202,12 @@ struct lpfc_stats { uint32_t elsRcvPRLO; uint32_t elsRcvPRLI; uint32_t elsRcvLIRR; + uint32_t elsRcvRLS; uint32_t elsRcvRPS; uint32_t elsRcvRPL; uint32_t elsRcvRRQ; + uint32_t elsRcvRTV; + uint32_t elsRcvECHO; uint32_t elsXmitFLOGI; uint32_t elsXmitFDISC; uint32_t elsXmitPLOGI; @@ -549,9 +552,11 @@ struct lpfc_hba { #define ELS_XRI_ABORT_EVENT 0x40 #define ASYNC_EVENT 0x80 #define LINK_DISABLED 0x100 /* Link disabled by user */ -#define FCF_DISC_INPROGRESS 0x200 /* FCF discovery in progress */ -#define HBA_FIP_SUPPORT 0x400 /* FIP support in HBA */ -#define HBA_AER_ENABLED 0x800 /* AER enabled with HBA */ +#define FCF_TS_INPROG 0x200 /* FCF table scan in progress */ +#define FCF_RR_INPROG 0x400 /* FCF roundrobin flogi in progress */ +#define HBA_FIP_SUPPORT 0x800 /* FIP support in HBA */ +#define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ +#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ struct lpfc_dmabuf slim2p; @@ -573,6 +578,7 @@ struct lpfc_hba { /* These fields used to be binfo */ uint32_t fc_pref_DID; /* preferred D_ID */ uint8_t fc_pref_ALPA; /* preferred AL_PA */ + uint32_t fc_edtovResol; /* E_D_TOV timer resolution */ uint32_t fc_edtov; /* E_D_TOV timer value */ uint32_t fc_arbtov; /* ARB_TOV timer value */ uint32_t fc_ratov; /* R_A_TOV timer value */ diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index f681eea5773..c1cbec01345 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -3789,8 +3789,13 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj, break; case MBX_SECURITY_MGMT: case MBX_AUTH_PORT: - if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) + if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { + printk(KERN_WARNING "mbox_read:Command 0x%x " + "is not permitted\n", pmb->mbxCommand); + sysfs_mbox_idle(phba); + spin_unlock_irq(&phba->hbalock); return -EPERM; + } break; case MBX_READ_SPARM64: case MBX_READ_LA: diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index f5d60b55f53..7260c3af555 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -3142,12 +3142,12 @@ lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba, job = menlo->set_job; job->dd_data = NULL; /* so timeout handler does not reply */ - spin_lock_irqsave(&phba->hbalock, flags); + spin_lock(&phba->hbalock); cmdiocbq->iocb_flag |= LPFC_IO_WAKE; if (cmdiocbq->context2 && rspiocbq) memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, &rspiocbq->iocb, sizeof(IOCB_t)); - spin_unlock_irqrestore(&phba->hbalock, flags); + spin_unlock(&phba->hbalock); bmp = menlo->bmp; rspiocbq = menlo->rspiocbq; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 03f4ddc1857..a5f5a093a8a 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -44,6 +44,8 @@ int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *, void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t); void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *); +void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *); + void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *); void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *, struct lpfc_nodelist *); @@ -229,6 +231,7 @@ void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *); int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t); void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t); +int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t); int lpfc_mem_alloc(struct lpfc_hba *, int align); void lpfc_mem_free(struct lpfc_hba *); @@ -271,6 +274,7 @@ int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t, void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t); void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); +void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_dmabuf *); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index e6ca12f6c6c..884f4d32179 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -177,15 +177,18 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, (elscmd == ELS_CMD_LOGO))) switch (elscmd) { case ELS_CMD_FLOGI: - elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) + elsiocb->iocb_flag |= + ((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT) & LPFC_FIP_ELS_ID_MASK); break; case ELS_CMD_FDISC: - elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) + elsiocb->iocb_flag |= + ((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT) & LPFC_FIP_ELS_ID_MASK); break; case ELS_CMD_LOGO: - elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) + elsiocb->iocb_flag |= + ((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT) & LPFC_FIP_ELS_ID_MASK); break; } @@ -517,18 +520,13 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (sp->cmn.edtovResolution) /* E_D_TOV ticks are in nanoseconds */ phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000; + phba->fc_edtovResol = sp->cmn.edtovResolution; phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000; if (phba->fc_topology == TOPOLOGY_LOOP) { spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PUBLIC_LOOP; spin_unlock_irq(shost->host_lock); - } else { - /* - * If we are a N-port connected to a Fabric, fixup sparam's so - * logins to devices on remote loops work. - */ - vport->fc_sparam.cmn.altBbCredit = 1; } vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID; @@ -585,6 +583,10 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); + + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vport); + if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) { lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); @@ -800,7 +802,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus) { /* - * In case of FIP mode, perform round robin FCF failover + * In case of FIP mode, perform roundrobin FCF failover * due to new FCF discovery */ if ((phba->hba_flag & HBA_FIP_SUPPORT) && @@ -808,48 +810,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) && (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, - "2611 FLOGI failed on registered " - "FCF record fcf_index(%d), status: " - "x%x/x%x, tmo:x%x, trying to perform " - "round robin failover\n", + "2611 FLOGI failed on FCF (x%x), " + "status:x%x/x%x, tmo:x%x, perform " + "roundrobin FCF failover\n", phba->fcf.current_rec.fcf_indx, irsp->ulpStatus, irsp->un.ulpWord[4], irsp->ulpTimeout); fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); - if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { - /* - * Exhausted the eligible FCF record list, - * fail through to retry FLOGI on current - * FCF record. - */ - lpfc_printf_log(phba, KERN_WARNING, - LOG_FIP | LOG_ELS, - "2760 Completed one round " - "of FLOGI FCF round robin " - "failover list, retry FLOGI " - "on currently registered " - "FCF index:%d\n", - phba->fcf.current_rec.fcf_indx); - } else { - lpfc_printf_log(phba, KERN_INFO, - LOG_FIP | LOG_ELS, - "2794 FLOGI FCF round robin " - "failover to FCF index x%x\n", - fcf_index); - rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, - fcf_index); - if (rc) - lpfc_printf_log(phba, KERN_WARNING, - LOG_FIP | LOG_ELS, - "2761 FLOGI round " - "robin FCF failover " - "read FCF failed " - "rc:x%x, fcf_index:" - "%d\n", rc, - phba->fcf.current_rec.fcf_indx); - else - goto out; - } + rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index); + if (rc) + goto out; } /* FLOGI failure */ @@ -939,6 +909,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_nlp_put(ndlp); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); goto out; } @@ -947,13 +918,12 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (phba->hba_flag & HBA_FIP_SUPPORT) lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS, - "2769 FLOGI successful on FCF " - "record: current_fcf_index:" - "x%x, terminate FCF round " - "robin failover process\n", + "2769 FLOGI to FCF (x%x) " + "completed successfully\n", phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO); spin_unlock_irq(&phba->hbalock); goto out; } @@ -1175,12 +1145,13 @@ lpfc_initial_flogi(struct lpfc_vport *vport) return 0; } - if (lpfc_issue_els_flogi(vport, ndlp, 0)) + if (lpfc_issue_els_flogi(vport, ndlp, 0)) { /* This decrement of reference count to node shall kick off * the release of the node. */ lpfc_nlp_put(ndlp); - + return 0; + } return 1; } @@ -1645,6 +1616,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm)); sp = (struct serv_parm *) pcmd; + /* + * If we are a N-port connected to a Fabric, fix-up paramm's so logins + * to device on remote loops work. + */ + if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP)) + sp->cmn.altBbCredit = 1; + if (sp->cmn.fcphLow < FC_PH_4_3) sp->cmn.fcphLow = FC_PH_4_3; @@ -3926,6 +3904,64 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, } /** + * lpfc_els_rsp_echo_acc - Issue echo acc response + * @vport: pointer to a virtual N_Port data structure. + * @data: pointer to echo data to return in the accept. + * @oldiocb: pointer to the original lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully issued acc echo response + * 1 - Failed to issue acc echo response + **/ +static int +lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, + struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct lpfc_iocbq *elsiocb; + struct lpfc_sli *psli; + uint8_t *pcmd; + uint16_t cmdsize; + int rc; + + psli = &phba->sli; + cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; + + elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + if (!elsiocb) + return 1; + + elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext; /* Xri */ + /* Xmit ECHO ACC response tag <ulpIoTag> */ + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "2876 Xmit ECHO ACC response tag x%x xri x%x\n", + elsiocb->iotag, elsiocb->iocb.ulpContext); + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); + memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); + + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, + "Issue ACC ECHO: did:x%x flg:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, 0); + + phba->fc_stat.elsXmitACC++; + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + lpfc_nlp_put(ndlp); + elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, + * it could be freed */ + + rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); + if (rc == IOCB_ERROR) { + lpfc_els_free_iocb(phba, elsiocb); + return 1; + } + return 0; +} + +/** * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport * @vport: pointer to a host virtual N_Port data structure. * @@ -4684,6 +4720,30 @@ lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } /** + * lpfc_els_rcv_echo - Process an unsolicited echo iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * Return code + * 0 - Successfully processed echo iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + uint8_t *pcmd; + + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt); + + /* skip over first word of echo command to find echo data */ + pcmd += sizeof(uint32_t); + + lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp); + return 0; +} + +/** * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. @@ -4735,6 +4795,89 @@ lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, } /** + * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd + * @phba: pointer to lpfc hba data structure. + * @pmb: pointer to the driver internal queue element for mailbox command. + * + * This routine is the completion callback function for the MBX_READ_LNK_STAT + * mailbox command. This callback function is to actually send the Accept + * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It + * collects the link statistics from the completion of the MBX_READ_LNK_STAT + * mailbox command, constructs the RPS response with the link statistics + * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC + * response to the RPS. + * + * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp + * will be incremented by 1 for holding the ndlp and the reference to ndlp + * will be stored into the context1 field of the IOCB for the completion + * callback function to the RPS Accept Response ELS IOCB command. + * + **/ +static void +lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + MAILBOX_t *mb; + IOCB_t *icmd; + struct RLS_RSP *rls_rsp; + uint8_t *pcmd; + struct lpfc_iocbq *elsiocb; + struct lpfc_nodelist *ndlp; + uint16_t xri; + uint32_t cmdsize; + + mb = &pmb->u.mb; + + ndlp = (struct lpfc_nodelist *) pmb->context2; + xri = (uint16_t) ((unsigned long)(pmb->context1)); + pmb->context1 = NULL; + pmb->context2 = NULL; + + if (mb->mbxStatus) { + mempool_free(pmb, phba->mbox_mem_pool); + return; + } + + cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t); + mempool_free(pmb, phba->mbox_mem_pool); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + + /* Decrement the ndlp reference count from previous mbox command */ + lpfc_nlp_put(ndlp); + + if (!elsiocb) + return; + + icmd = &elsiocb->iocb; + icmd->ulpContext = xri; + + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); /* Skip past command */ + rls_rsp = (struct RLS_RSP *)pcmd; + + rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt); + rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt); + rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt); + rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt); + rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord); + rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt); + + /* Xmit ELS RLS ACC response tag <ulpIoTag> */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, + "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + elsiocb->iotag, elsiocb->iocb.ulpContext, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi); + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitACC++; + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); +} + +/** * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd * @phba: pointer to lpfc hba data structure. * @pmb: pointer to the driver internal queue element for mailbox command. @@ -4827,7 +4970,155 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } /** - * lpfc_els_rcv_rps - Process an unsolicited rps iocb + * lpfc_els_rcv_rls - Process an unsolicited rls iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Read Port Status (RPL) IOCB received as an + * ELS unsolicited event. It first checks the remote port state. If the + * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE + * state, it invokes the lpfc_els_rsl_reject() routine to send the reject + * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command + * for reading the HBA link statistics. It is for the callback function, + * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command + * to actually sending out RPL Accept (ACC) response. + * + * Return codes + * 0 - Successfully processed rls iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + struct lpfc_dmabuf *pcmd; + struct ls_rjt stat; + + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) + /* reject the unsolicited RPS request and done with it */ + goto reject_out; + + pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC); + if (mbox) { + lpfc_read_lnk_stat(phba, mbox); + mbox->context1 = + (void *)((unsigned long) cmdiocb->iocb.ulpContext); + mbox->context2 = lpfc_nlp_get(ndlp); + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_els_rsp_rls_acc; + if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) + != MBX_NOT_FINISHED) + /* Mbox completion will send ELS Response */ + return 0; + /* Decrement reference count used for the failed mbox + * command. + */ + lpfc_nlp_put(ndlp); + mempool_free(mbox, phba->mbox_mem_pool); + } +reject_out: + /* issue rejection response */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 0; +} + +/** + * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb + * @vport: pointer to a host virtual N_Port data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @ndlp: pointer to a node-list data structure. + * + * This routine processes Read Timout Value (RTV) IOCB received as an + * ELS unsolicited event. It first checks the remote port state. If the + * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE + * state, it invokes the lpfc_els_rsl_reject() routine to send the reject + * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout + * Value (RTV) unsolicited IOCB event. + * + * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp + * will be incremented by 1 for holding the ndlp and the reference to ndlp + * will be stored into the context1 field of the IOCB for the completion + * callback function to the RPS Accept Response ELS IOCB command. + * + * Return codes + * 0 - Successfully processed rtv iocb (currently always return 0) + **/ +static int +lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, + struct lpfc_nodelist *ndlp) +{ + struct lpfc_hba *phba = vport->phba; + struct ls_rjt stat; + struct RTV_RSP *rtv_rsp; + uint8_t *pcmd; + struct lpfc_iocbq *elsiocb; + uint32_t cmdsize; + + + if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && + (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) + /* reject the unsolicited RPS request and done with it */ + goto reject_out; + + cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t); + elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize, + lpfc_max_els_tries, ndlp, + ndlp->nlp_DID, ELS_CMD_ACC); + + if (!elsiocb) + return 1; + + pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt); + *((uint32_t *) (pcmd)) = ELS_CMD_ACC; + pcmd += sizeof(uint32_t); /* Skip past command */ + + /* use the command's xri in the response */ + elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext; + + rtv_rsp = (struct RTV_RSP *)pcmd; + + /* populate RTV payload */ + rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */ + rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov); + bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0); + bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */ + rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov); + + /* Xmit ELS RLS ACC response tag <ulpIoTag> */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, + "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " + "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " + "Data: x%x x%x x%x\n", + elsiocb->iotag, elsiocb->iocb.ulpContext, + ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, + ndlp->nlp_rpi, + rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov); + elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; + phba->fc_stat.elsXmitACC++; + if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR) + lpfc_els_free_iocb(phba, elsiocb); + return 0; + +reject_out: + /* issue rejection response */ + stat.un.b.lsRjtRsvd0 = 0; + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; + stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA; + stat.un.b.vendorUnique = 0; + lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); + return 0; +} + +/* lpfc_els_rcv_rps - Process an unsolicited rps iocb * @vport: pointer to a host virtual N_Port data structure. * @cmdiocb: pointer to lpfc command iocb data structure. * @ndlp: pointer to a node-list data structure. @@ -5017,7 +5308,6 @@ lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; rpl = (RPL *) (lp + 1); - maxsize = be32_to_cpu(rpl->maxsize); /* We support only one port */ @@ -5836,6 +6126,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (newnode) lpfc_nlp_put(ndlp); break; + case ELS_CMD_RLS: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RLS: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvRLS++; + lpfc_els_rcv_rls(vport, elsiocb, ndlp); + if (newnode) + lpfc_nlp_put(ndlp); + break; case ELS_CMD_RPS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RPS: did:x%x/ste:x%x flg:x%x", @@ -5866,6 +6166,15 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (newnode) lpfc_nlp_put(ndlp); break; + case ELS_CMD_RTV: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV RTV: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + phba->fc_stat.elsRcvRTV++; + lpfc_els_rcv_rtv(vport, elsiocb, ndlp); + if (newnode) + lpfc_nlp_put(ndlp); + break; case ELS_CMD_RRQ: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV RRQ: did:x%x/ste:x%x flg:x%x", @@ -5876,6 +6185,16 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (newnode) lpfc_nlp_put(ndlp); break; + case ELS_CMD_ECHO: + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, + "RCV ECHO: did:x%x/ste:x%x flg:x%x", + did, vport->port_state, ndlp->nlp_flag); + + phba->fc_stat.elsRcvECHO++; + lpfc_els_rcv_echo(vport, elsiocb, ndlp); + if (newnode) + lpfc_nlp_put(ndlp); + break; default: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, "RCV ELS cmd: cmd:x%x did:x%x/ste:x%x", @@ -6170,6 +6489,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) default: /* Try to recover from this error */ + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; @@ -6437,6 +6758,10 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); + + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vport); + lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; @@ -6452,7 +6777,7 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * to update the MAC address. */ lpfc_register_new_vport(phba, vport, ndlp); - return ; + goto out; } if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI) diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index a345dde16c8..a5d1695dac3 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -20,6 +20,7 @@ *******************************************************************/ #include <linux/blkdev.h> +#include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/kthread.h> @@ -63,6 +64,7 @@ static uint8_t lpfcAlpaArray[] = { static void lpfc_disc_timeout_handler(struct lpfc_vport *); static void lpfc_disc_flush_list(struct lpfc_vport *vport); static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); +static int lpfc_fcf_inuse(struct lpfc_hba *); void lpfc_terminate_rport_io(struct fc_rport *rport) @@ -160,11 +162,17 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; } -/* - * This function is called from the worker thread when dev_loss_tmo - * expire. - */ -static void +/** + * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler + * @ndlp: Pointer to remote node object. + * + * This function is called from the worker thread when devloss timeout timer + * expires. For SLI4 host, this routine shall return 1 when at lease one + * remote node, including this @ndlp, is still in use of FCF; otherwise, this + * routine shall return 0 when there is no remote node is still in use of FCF + * when devloss timeout happened to this @ndlp. + **/ +static int lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) { struct lpfc_rport_data *rdata; @@ -175,17 +183,21 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) int put_node; int put_rport; int warn_on = 0; + int fcf_inuse = 0; rport = ndlp->rport; if (!rport) - return; + return fcf_inuse; rdata = rport->dd_data; name = (uint8_t *) &ndlp->nlp_portname; vport = ndlp->vport; phba = vport->phba; + if (phba->sli_rev == LPFC_SLI_REV4) + fcf_inuse = lpfc_fcf_inuse(phba); + lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, "rport devlosstmo:did:x%x type:x%x id:x%x", ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); @@ -209,7 +221,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); - return; + return fcf_inuse; } if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { @@ -220,7 +232,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); - return; + return fcf_inuse; } if (ndlp->nlp_type & NLP_FABRIC) { @@ -233,7 +245,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_nlp_put(ndlp); if (put_rport) put_device(&rport->dev); - return; + return fcf_inuse; } if (ndlp->nlp_sid != NLP_NO_SID) { @@ -280,6 +292,74 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); + return fcf_inuse; +} + +/** + * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler + * @phba: Pointer to hba context object. + * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. + * @nlp_did: remote node identifer with devloss timeout. + * + * This function is called from the worker thread after invoking devloss + * timeout handler and releasing the reference count for the ndlp with + * which the devloss timeout was handled for SLI4 host. For the devloss + * timeout of the last remote node which had been in use of FCF, when this + * routine is invoked, it shall be guaranteed that none of the remote are + * in-use of FCF. When devloss timeout to the last remote using the FCF, + * if the FIP engine is neither in FCF table scan process nor roundrobin + * failover process, the in-use FCF shall be unregistered. If the FIP + * engine is in FCF discovery process, the devloss timeout state shall + * be set for either the FCF table scan process or roundrobin failover + * process to unregister the in-use FCF. + **/ +static void +lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, + uint32_t nlp_did) +{ + /* If devloss timeout happened to a remote node when FCF had no + * longer been in-use, do nothing. + */ + if (!fcf_inuse) + return; + + if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { + spin_lock_irq(&phba->hbalock); + if (phba->fcf.fcf_flag & FCF_DISCOVERY) { + if (phba->hba_flag & HBA_DEVLOSS_TMO) { + spin_unlock_irq(&phba->hbalock); + return; + } + phba->hba_flag |= HBA_DEVLOSS_TMO; + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2847 Last remote node (x%x) using " + "FCF devloss tmo\n", nlp_did); + } + if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2868 Devloss tmo to FCF rediscovery " + "in progress\n"); + return; + } + if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2869 Devloss tmo to idle FIP engine, " + "unreg in-use FCF and rescan.\n"); + /* Unregister in-use FCF and rescan */ + lpfc_unregister_fcf_rescan(phba); + return; + } + spin_unlock_irq(&phba->hbalock); + if (phba->hba_flag & FCF_TS_INPROG) + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2870 FCF table scan in progress\n"); + if (phba->hba_flag & FCF_RR_INPROG) + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2871 FLOGI roundrobin FCF failover " + "in progress\n"); + } lpfc_unregister_unused_fcf(phba); } @@ -408,6 +488,8 @@ lpfc_work_list_done(struct lpfc_hba *phba) struct lpfc_work_evt *evtp = NULL; struct lpfc_nodelist *ndlp; int free_evt; + int fcf_inuse; + uint32_t nlp_did; spin_lock_irq(&phba->hbalock); while (!list_empty(&phba->work_list)) { @@ -427,12 +509,17 @@ lpfc_work_list_done(struct lpfc_hba *phba) break; case LPFC_EVT_DEV_LOSS: ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); - lpfc_dev_loss_tmo_handler(ndlp); + fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); free_evt = 0; /* decrement the node reference count held for * this queued work */ + nlp_did = ndlp->nlp_DID; lpfc_nlp_put(ndlp); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_post_dev_loss_tmo_handler(phba, + fcf_inuse, + nlp_did); break; case LPFC_EVT_ONLINE: if (phba->link_state < LPFC_LINK_DOWN) @@ -707,6 +794,8 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) : NLP_EVT_DEVICE_RECOVERY); } if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vport); lpfc_mbx_unreg_vpi(vport); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; @@ -1021,8 +1110,7 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) "2017 REG_FCFI mbxStatus error x%x " "HBA state x%x\n", mboxq->u.mb.mbxStatus, vport->port_state); - mempool_free(mboxq, phba->mbox_mem_pool); - return; + goto fail_out; } /* Start FCoE discovery by sending a FLOGI. */ @@ -1031,20 +1119,30 @@ lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REGISTERED; spin_unlock_irq(&phba->hbalock); + /* If there is a pending FCoE event, restart FCF table scan. */ - if (lpfc_check_pending_fcoe_event(phba, 1)) { - mempool_free(mboxq, phba->mbox_mem_pool); - return; - } + if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) + goto fail_out; + + /* Mark successful completion of FCF table scan */ spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irq(&phba->hbalock); - if (vport->port_state != LPFC_FLOGI) + phba->hba_flag &= ~FCF_TS_INPROG; + if (vport->port_state != LPFC_FLOGI) { + phba->hba_flag |= FCF_RR_INPROG; + spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(vport); + goto out; + } + spin_unlock_irq(&phba->hbalock); + goto out; +fail_out: + spin_lock_irq(&phba->hbalock); + phba->hba_flag &= ~FCF_RR_INPROG; + spin_unlock_irq(&phba->hbalock); +out: mempool_free(mboxq, phba->mbox_mem_pool); - return; } /** @@ -1241,10 +1339,9 @@ lpfc_register_fcf(struct lpfc_hba *phba) int rc; spin_lock_irq(&phba->hbalock); - /* If the FCF is not availabe do nothing. */ if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { - phba->hba_flag &= ~FCF_DISC_INPROGRESS; + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); return; } @@ -1252,19 +1349,22 @@ lpfc_register_fcf(struct lpfc_hba *phba) /* The FCF is already registered, start discovery */ if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; - spin_unlock_irq(&phba->hbalock); - if (phba->pport->port_state != LPFC_FLOGI) + phba->hba_flag &= ~FCF_TS_INPROG; + if (phba->pport->port_state != LPFC_FLOGI) { + phba->hba_flag |= FCF_RR_INPROG; + spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); + return; + } + spin_unlock_irq(&phba->hbalock); return; } spin_unlock_irq(&phba->hbalock); - fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, - GFP_KERNEL); + fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!fcf_mbxq) { spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); return; } @@ -1275,7 +1375,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); spin_unlock_irq(&phba->hbalock); mempool_free(fcf_mbxq, phba->mbox_mem_pool); } @@ -1493,7 +1593,7 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) * FCF discovery, no need to restart FCF discovery. */ if ((phba->link_state >= LPFC_LINK_UP) && - (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) + (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) return 0; lpfc_printf_log(phba, KERN_INFO, LOG_FIP, @@ -1517,14 +1617,14 @@ lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); } else { /* - * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS + * Do not continue FCF discovery and clear FCF_TS_INPROG * flag */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2833 Stop FCF discovery process due to link " "state change (x%x)\n", phba->link_state); spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; + phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); spin_unlock_irq(&phba->hbalock); } @@ -1729,6 +1829,65 @@ lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, } /** + * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf + * @vport: Pointer to vport object. + * @fcf_index: index to next fcf. + * + * This function processing the roundrobin fcf failover to next fcf index. + * When this function is invoked, there will be a current fcf registered + * for flogi. + * Return: 0 for continue retrying flogi on currently registered fcf; + * 1 for stop flogi on currently registered fcf; + */ +int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) +{ + struct lpfc_hba *phba = vport->phba; + int rc; + + if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & HBA_DEVLOSS_TMO) { + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2872 Devloss tmo with no eligible " + "FCF, unregister in-use FCF (x%x) " + "and rescan FCF table\n", + phba->fcf.current_rec.fcf_indx); + lpfc_unregister_fcf_rescan(phba); + goto stop_flogi_current_fcf; + } + /* Mark the end to FLOGI roundrobin failover */ + phba->hba_flag &= ~FCF_RR_INPROG; + /* Allow action to new fcf asynchronous event */ + phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2865 No FCF available, stop roundrobin FCF " + "failover and change port state:x%x/x%x\n", + phba->pport->port_state, LPFC_VPORT_UNKNOWN); + phba->pport->port_state = LPFC_VPORT_UNKNOWN; + goto stop_flogi_current_fcf; + } else { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, + "2794 Try FLOGI roundrobin FCF failover to " + "(x%x)\n", fcf_index); + rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); + if (rc) + lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, + "2761 FLOGI roundrobin FCF failover " + "failed (rc:x%x) to read FCF (x%x)\n", + rc, phba->fcf.current_rec.fcf_indx); + else + goto stop_flogi_current_fcf; + } + return 0; + +stop_flogi_current_fcf: + lpfc_can_disctmo(vport); + return 1; +} + +/** * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. @@ -1756,7 +1915,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) int rc; /* If there is pending FCoE event restart FCF table scan */ - if (lpfc_check_pending_fcoe_event(phba, 0)) { + if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { lpfc_sli4_mbox_cmd_free(phba, mboxq); return; } @@ -1765,12 +1924,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, &next_fcf_index); if (!new_fcf_record) { - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, "2765 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); /* Let next new FCF event trigger fast failover */ spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; + phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_sli4_mbox_cmd_free(phba, mboxq); return; @@ -1787,13 +1946,12 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* * If the fcf record does not match with connect list entries * read the next entry; otherwise, this is an eligible FCF - * record for round robin FCF failover. + * record for roundrobin FCF failover. */ if (!rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, - "2781 FCF record (x%x) failed FCF " - "connection list check, fcf_avail:x%x, " - "fcf_valid:x%x\n", + "2781 FCF (x%x) failed connection " + "list check: (x%x/x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), bf_get(lpfc_fcf_record_fcf_avail, @@ -1803,6 +1961,16 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if ((phba->fcf.fcf_flag & FCF_IN_USE) && lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, new_fcf_record, LPFC_FCOE_IGNORE_VID)) { + if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != + phba->fcf.current_rec.fcf_indx) { + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2862 FCF (x%x) matches property " + "of in-use FCF (x%x)\n", + bf_get(lpfc_fcf_record_fcf_index, + new_fcf_record), + phba->fcf.current_rec.fcf_indx); + goto read_next_fcf; + } /* * In case the current in-use FCF record becomes * invalid/unavailable during FCF discovery that @@ -1813,9 +1981,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2835 Invalid in-use FCF " - "record (x%x) reported, " - "entering fast FCF failover " - "mode scanning.\n", + "(x%x), enter FCF failover " + "table scan.\n", phba->fcf.current_rec.fcf_indx); spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag |= FCF_REDISC_FOV; @@ -1844,22 +2011,29 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (phba->fcf.fcf_flag & FCF_IN_USE) { if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, new_fcf_record, vlan_id)) { - phba->fcf.fcf_flag |= FCF_AVAILABLE; - if (phba->fcf.fcf_flag & FCF_REDISC_PEND) - /* Stop FCF redisc wait timer if pending */ - __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); - else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) - /* If in fast failover, mark it's completed */ - phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; - spin_unlock_irq(&phba->hbalock); - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2836 The new FCF record (x%x) " - "matches the in-use FCF record " - "(x%x)\n", - phba->fcf.current_rec.fcf_indx, + if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == + phba->fcf.current_rec.fcf_indx) { + phba->fcf.fcf_flag |= FCF_AVAILABLE; + if (phba->fcf.fcf_flag & FCF_REDISC_PEND) + /* Stop FCF redisc wait timer */ + __lpfc_sli4_stop_fcf_redisc_wait_timer( + phba); + else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) + /* Fast failover, mark completed */ + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; + spin_unlock_irq(&phba->hbalock); + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2836 New FCF matches in-use " + "FCF (x%x)\n", + phba->fcf.current_rec.fcf_indx); + goto out; + } else + lpfc_printf_log(phba, KERN_ERR, LOG_FIP, + "2863 New FCF (x%x) matches " + "property of in-use FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, - new_fcf_record)); - goto out; + new_fcf_record), + phba->fcf.current_rec.fcf_indx); } /* * Read next FCF record from HBA searching for the matching @@ -1953,8 +2127,8 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) */ if (fcf_rec) { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2840 Update current FCF record " - "with initial FCF record (x%x)\n", + "2840 Update initial FCF candidate " + "with FCF (x%x)\n", bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, @@ -1984,20 +2158,28 @@ read_next_fcf: */ if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, - "2782 No suitable FCF record " - "found during this round of " - "post FCF rediscovery scan: " - "fcf_evt_tag:x%x, fcf_index: " - "x%x\n", + "2782 No suitable FCF found: " + "(x%x/x%x)\n", phba->fcoe_eventtag_at_fcf_scan, bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); + spin_lock_irq(&phba->hbalock); + if (phba->hba_flag & HBA_DEVLOSS_TMO) { + phba->hba_flag &= ~FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + /* Unregister in-use FCF and rescan */ + lpfc_printf_log(phba, KERN_INFO, + LOG_FIP, + "2864 On devloss tmo " + "unreg in-use FCF and " + "rescan FCF table\n"); + lpfc_unregister_fcf_rescan(phba); + return; + } /* - * Let next new FCF event trigger fast - * failover + * Let next new FCF event trigger fast failover */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; + phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); return; } @@ -2015,9 +2197,8 @@ read_next_fcf: /* Replace in-use record with the new record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2842 Replace the current in-use " - "FCF record (x%x) with failover FCF " - "record (x%x)\n", + "2842 Replace in-use FCF (x%x) " + "with failover FCF (x%x)\n", phba->fcf.current_rec.fcf_indx, phba->fcf.failover_rec.fcf_indx); memcpy(&phba->fcf.current_rec, @@ -2029,15 +2210,8 @@ read_next_fcf: * FCF failover. */ spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= - ~(FCF_REDISC_FOV | FCF_REDISC_RRU); + phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; spin_unlock_irq(&phba->hbalock); - /* - * Set up the initial registered FCF index for FLOGI - * round robin FCF failover. - */ - phba->fcf.fcf_rr_init_indx = - phba->fcf.failover_rec.fcf_indx; /* Register to the new FCF record */ lpfc_register_fcf(phba); } else { @@ -2069,28 +2243,6 @@ read_next_fcf: LPFC_FCOE_FCF_GET_FIRST); return; } - - /* - * Otherwise, initial scan or post linkdown rescan, - * register with the best FCF record found so far - * through the FCF scanning process. - */ - - /* - * Mark the initial FCF discovery completed and - * the start of the first round of the roundrobin - * FCF failover. - */ - spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= - ~(FCF_INIT_DISC | FCF_REDISC_RRU); - spin_unlock_irq(&phba->hbalock); - /* - * Set up the initial registered FCF index for FLOGI - * round robin FCF failover - */ - phba->fcf.fcf_rr_init_indx = - phba->fcf.current_rec.fcf_indx; /* Register to the new FCF record */ lpfc_register_fcf(phba); } @@ -2106,11 +2258,11 @@ out: } /** - * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler + * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler * @phba: pointer to lpfc hba data structure. * @mboxq: pointer to mailbox object. * - * This is the callback function for FLOGI failure round robin FCF failover + * This is the callback function for FLOGI failure roundrobin FCF failover * read FCF record mailbox command from the eligible FCF record bmask for * performing the failover. If the FCF read back is not valid/available, it * fails through to retrying FLOGI to the currently registered FCF again. @@ -2125,17 +2277,18 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct fcf_record *new_fcf_record; uint32_t boot_flag, addr_mode; - uint16_t next_fcf_index; + uint16_t next_fcf_index, fcf_index; uint16_t current_fcf_index; uint16_t vlan_id; + int rc; - /* If link state is not up, stop the round robin failover process */ + /* If link state is not up, stop the roundrobin failover process */ if (phba->link_state < LPFC_LINK_UP) { spin_lock_irq(&phba->hbalock); phba->fcf.fcf_flag &= ~FCF_DISCOVERY; + phba->hba_flag &= ~FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); - lpfc_sli4_mbox_cmd_free(phba, mboxq); - return; + goto out; } /* Parse the FCF record from the non-embedded mailbox command */ @@ -2145,23 +2298,47 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, "2766 Mailbox command READ_FCF_RECORD " "failed to retrieve a FCF record.\n"); - goto out; + goto error_out; } /* Get the needed parameters from FCF record */ - lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, - &addr_mode, &vlan_id); + rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, + &addr_mode, &vlan_id); /* Log the FCF record information if turned on */ lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, next_fcf_index); + fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); + if (!rc) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2848 Remove ineligible FCF (x%x) from " + "from roundrobin bmask\n", fcf_index); + /* Clear roundrobin bmask bit for ineligible FCF */ + lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); + /* Perform next round of roundrobin FCF failover */ + fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); + rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); + if (rc) + goto out; + goto error_out; + } + + if (fcf_index == phba->fcf.current_rec.fcf_indx) { + lpfc_printf_log(phba, KERN_INFO, LOG_FIP, + "2760 Perform FLOGI roundrobin FCF failover: " + "FCF (x%x) back to FCF (x%x)\n", + phba->fcf.current_rec.fcf_indx, fcf_index); + /* Wait 500 ms before retrying FLOGI to current FCF */ + msleep(500); + lpfc_initial_flogi(phba->pport); + goto out; + } + /* Upload new FCF record to the failover FCF record */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2834 Update the current FCF record (x%x) " - "with the next FCF record (x%x)\n", - phba->fcf.failover_rec.fcf_indx, - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); + "2834 Update current FCF (x%x) with new FCF (x%x)\n", + phba->fcf.failover_rec.fcf_indx, fcf_index); spin_lock_irq(&phba->hbalock); __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, new_fcf_record, addr_mode, vlan_id, @@ -2178,14 +2355,13 @@ lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) sizeof(struct lpfc_fcf_rec)); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2783 FLOGI round robin FCF failover from FCF " - "(x%x) to FCF (x%x).\n", - current_fcf_index, - bf_get(lpfc_fcf_record_fcf_index, new_fcf_record)); + "2783 Perform FLOGI roundrobin FCF failover: FCF " + "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); +error_out: + lpfc_register_fcf(phba); out: lpfc_sli4_mbox_cmd_free(phba, mboxq); - lpfc_register_fcf(phba); } /** @@ -2194,10 +2370,10 @@ out: * @mboxq: pointer to mailbox object. * * This is the callback function of read FCF record mailbox command for - * updating the eligible FCF bmask for FLOGI failure round robin FCF + * updating the eligible FCF bmask for FLOGI failure roundrobin FCF * failover when a new FCF event happened. If the FCF read back is * valid/available and it passes the connection list check, it updates - * the bmask for the eligible FCF record for round robin failover. + * the bmask for the eligible FCF record for roundrobin failover. */ void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) @@ -2639,7 +2815,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la) * and get the FCF Table. */ spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & FCF_DISC_INPROGRESS) { + if (phba->hba_flag & FCF_TS_INPROG) { spin_unlock_irq(&phba->hbalock); return; } @@ -3906,6 +4082,11 @@ lpfc_unreg_all_rpis(struct lpfc_vport *vport) LPFC_MBOXQ_t *mbox; int rc; + if (phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_unreg_all_rpis(vport); + return; + } + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (mbox) { lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox); @@ -3992,6 +4173,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } spin_lock_irq(&phba->hbalock); + /* Cleanup REG_LOGIN completions which are not yet processed */ + list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { + if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || + (ndlp != (struct lpfc_nodelist *) mb->context2)) + continue; + + mb->context2 = NULL; + mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } + list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == (struct lpfc_nodelist *) mb->context2)) { @@ -5170,6 +5361,8 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) if (ndlp) lpfc_cancel_retry_delay_tmo(vports[i], ndlp); lpfc_cleanup_pending_mbox(vports[i]); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(vports[i]); lpfc_mbx_unreg_vpi(vports[i]); shost = lpfc_shost_from_vport(vports[i]); spin_lock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index a631647051d..9b833345646 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -861,6 +861,47 @@ typedef struct _RPS_RSP { /* Structure is in Big Endian format */ uint32_t crcCnt; } RPS_RSP; +struct RLS { /* Structure is in Big Endian format */ + uint32_t rls; +#define rls_rsvd_SHIFT 24 +#define rls_rsvd_MASK 0x000000ff +#define rls_rsvd_WORD rls +#define rls_did_SHIFT 0 +#define rls_did_MASK 0x00ffffff +#define rls_did_WORD rls +}; + +struct RLS_RSP { /* Structure is in Big Endian format */ + uint32_t linkFailureCnt; + uint32_t lossSyncCnt; + uint32_t lossSignalCnt; + uint32_t primSeqErrCnt; + uint32_t invalidXmitWord; + uint32_t crcCnt; +}; + +struct RTV_RSP { /* Structure is in Big Endian format */ + uint32_t ratov; + uint32_t edtov; + uint32_t qtov; +#define qtov_rsvd0_SHIFT 28 +#define qtov_rsvd0_MASK 0x0000000f +#define qtov_rsvd0_WORD qtov /* reserved */ +#define qtov_edtovres_SHIFT 27 +#define qtov_edtovres_MASK 0x00000001 +#define qtov_edtovres_WORD qtov /* E_D_TOV Resolution */ +#define qtov__rsvd1_SHIFT 19 +#define qtov_rsvd1_MASK 0x0000003f +#define qtov_rsvd1_WORD qtov /* reserved */ +#define qtov_rttov_SHIFT 18 +#define qtov_rttov_MASK 0x00000001 +#define qtov_rttov_WORD qtov /* R_T_TOV value */ +#define qtov_rsvd2_SHIFT 0 +#define qtov_rsvd2_MASK 0x0003ffff +#define qtov_rsvd2_WORD qtov /* reserved */ +}; + + typedef struct _RPL { /* Structure is in Big Endian format */ uint32_t maxsize; uint32_t index; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index bbdcf96800f..6e4bc34e1d0 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -424,79 +424,6 @@ struct lpfc_rcqe { #define FCOE_SOFn3 0x36 }; -struct lpfc_wqe_generic{ - struct ulp_bde64 bde; - uint32_t word3; - uint32_t word4; - uint32_t word5; - uint32_t word6; -#define lpfc_wqe_gen_context_SHIFT 16 -#define lpfc_wqe_gen_context_MASK 0x0000FFFF -#define lpfc_wqe_gen_context_WORD word6 -#define lpfc_wqe_gen_xri_SHIFT 0 -#define lpfc_wqe_gen_xri_MASK 0x0000FFFF -#define lpfc_wqe_gen_xri_WORD word6 - uint32_t word7; -#define lpfc_wqe_gen_lnk_SHIFT 23 -#define lpfc_wqe_gen_lnk_MASK 0x00000001 -#define lpfc_wqe_gen_lnk_WORD word7 -#define lpfc_wqe_gen_erp_SHIFT 22 -#define lpfc_wqe_gen_erp_MASK 0x00000001 -#define lpfc_wqe_gen_erp_WORD word7 -#define lpfc_wqe_gen_pu_SHIFT 20 -#define lpfc_wqe_gen_pu_MASK 0x00000003 -#define lpfc_wqe_gen_pu_WORD word7 -#define lpfc_wqe_gen_class_SHIFT 16 -#define lpfc_wqe_gen_class_MASK 0x00000007 -#define lpfc_wqe_gen_class_WORD word7 -#define lpfc_wqe_gen_command_SHIFT 8 -#define lpfc_wqe_gen_command_MASK 0x000000FF -#define lpfc_wqe_gen_command_WORD word7 -#define lpfc_wqe_gen_status_SHIFT 4 -#define lpfc_wqe_gen_status_MASK 0x0000000F -#define lpfc_wqe_gen_status_WORD word7 -#define lpfc_wqe_gen_ct_SHIFT 2 -#define lpfc_wqe_gen_ct_MASK 0x00000003 -#define lpfc_wqe_gen_ct_WORD word7 - uint32_t abort_tag; - uint32_t word9; -#define lpfc_wqe_gen_request_tag_SHIFT 0 -#define lpfc_wqe_gen_request_tag_MASK 0x0000FFFF -#define lpfc_wqe_gen_request_tag_WORD word9 - uint32_t word10; -#define lpfc_wqe_gen_ccp_SHIFT 24 -#define lpfc_wqe_gen_ccp_MASK 0x000000FF -#define lpfc_wqe_gen_ccp_WORD word10 -#define lpfc_wqe_gen_ccpe_SHIFT 23 -#define lpfc_wqe_gen_ccpe_MASK 0x00000001 -#define lpfc_wqe_gen_ccpe_WORD word10 -#define lpfc_wqe_gen_pv_SHIFT 19 -#define lpfc_wqe_gen_pv_MASK 0x00000001 -#define lpfc_wqe_gen_pv_WORD word10 -#define lpfc_wqe_gen_pri_SHIFT 16 -#define lpfc_wqe_gen_pri_MASK 0x00000007 -#define lpfc_wqe_gen_pri_WORD word10 - uint32_t word11; -#define lpfc_wqe_gen_cq_id_SHIFT 16 -#define lpfc_wqe_gen_cq_id_MASK 0x0000FFFF -#define lpfc_wqe_gen_cq_id_WORD word11 -#define LPFC_WQE_CQ_ID_DEFAULT 0xffff -#define lpfc_wqe_gen_wqec_SHIFT 7 -#define lpfc_wqe_gen_wqec_MASK 0x00000001 -#define lpfc_wqe_gen_wqec_WORD word11 -#define ELS_ID_FLOGI 3 -#define ELS_ID_FDISC 2 -#define ELS_ID_LOGO 1 -#define ELS_ID_DEFAULT 0 -#define lpfc_wqe_gen_els_id_SHIFT 4 -#define lpfc_wqe_gen_els_id_MASK 0x00000003 -#define lpfc_wqe_gen_els_id_WORD word11 -#define lpfc_wqe_gen_cmd_type_SHIFT 0 -#define lpfc_wqe_gen_cmd_type_MASK 0x0000000F -#define lpfc_wqe_gen_cmd_type_WORD word11 - uint32_t payload[4]; -}; - struct lpfc_rqe { uint32_t address_hi; uint32_t address_lo; @@ -2279,9 +2206,36 @@ struct wqe_common { #define wqe_reqtag_MASK 0x0000FFFF #define wqe_reqtag_WORD word9 #define wqe_rcvoxid_SHIFT 16 -#define wqe_rcvoxid_MASK 0x0000FFFF -#define wqe_rcvoxid_WORD word9 +#define wqe_rcvoxid_MASK 0x0000FFFF +#define wqe_rcvoxid_WORD word9 uint32_t word10; +#define wqe_ebde_cnt_SHIFT 0 +#define wqe_ebde_cnt_MASK 0x00000007 +#define wqe_ebde_cnt_WORD word10 +#define wqe_lenloc_SHIFT 7 +#define wqe_lenloc_MASK 0x00000003 +#define wqe_lenloc_WORD word10 +#define LPFC_WQE_LENLOC_NONE 0 +#define LPFC_WQE_LENLOC_WORD3 1 +#define LPFC_WQE_LENLOC_WORD12 2 +#define LPFC_WQE_LENLOC_WORD4 3 +#define wqe_qosd_SHIFT 9 +#define wqe_qosd_MASK 0x00000001 +#define wqe_qosd_WORD word10 +#define wqe_xbl_SHIFT 11 +#define wqe_xbl_MASK 0x00000001 +#define wqe_xbl_WORD word10 +#define wqe_iod_SHIFT 13 +#define wqe_iod_MASK 0x00000001 +#define wqe_iod_WORD word10 +#define LPFC_WQE_IOD_WRITE 0 +#define LPFC_WQE_IOD_READ 1 +#define wqe_dbde_SHIFT 14 +#define wqe_dbde_MASK 0x00000001 +#define wqe_dbde_WORD word10 +#define wqe_wqes_SHIFT 15 +#define wqe_wqes_MASK 0x00000001 +#define wqe_wqes_WORD word10 #define wqe_pri_SHIFT 16 #define wqe_pri_MASK 0x00000007 #define wqe_pri_WORD word10 @@ -2295,18 +2249,26 @@ struct wqe_common { #define wqe_ccpe_MASK 0x00000001 #define wqe_ccpe_WORD word10 #define wqe_ccp_SHIFT 24 -#define wqe_ccp_MASK 0x000000ff -#define wqe_ccp_WORD word10 +#define wqe_ccp_MASK 0x000000ff +#define wqe_ccp_WORD word10 uint32_t word11; -#define wqe_cmd_type_SHIFT 0 -#define wqe_cmd_type_MASK 0x0000000f -#define wqe_cmd_type_WORD word11 -#define wqe_wqec_SHIFT 7 -#define wqe_wqec_MASK 0x00000001 -#define wqe_wqec_WORD word11 -#define wqe_cqid_SHIFT 16 -#define wqe_cqid_MASK 0x0000ffff -#define wqe_cqid_WORD word11 +#define wqe_cmd_type_SHIFT 0 +#define wqe_cmd_type_MASK 0x0000000f +#define wqe_cmd_type_WORD word11 +#define wqe_els_id_SHIFT 4 +#define wqe_els_id_MASK 0x00000003 +#define wqe_els_id_WORD word11 +#define LPFC_ELS_ID_FLOGI 3 +#define LPFC_ELS_ID_FDISC 2 +#define LPFC_ELS_ID_LOGO 1 +#define LPFC_ELS_ID_DEFAULT 0 +#define wqe_wqec_SHIFT 7 +#define wqe_wqec_MASK 0x00000001 +#define wqe_wqec_WORD word11 +#define wqe_cqid_SHIFT 16 +#define wqe_cqid_MASK 0x0000ffff +#define wqe_cqid_WORD word11 +#define LPFC_WQE_CQ_ID_DEFAULT 0xffff }; struct wqe_did { @@ -2325,6 +2287,15 @@ struct wqe_did { #define wqe_xmit_bls_xo_WORD word5 }; +struct lpfc_wqe_generic{ + struct ulp_bde64 bde; + uint32_t word3; + uint32_t word4; + uint32_t word5; + struct wqe_common wqe_com; + uint32_t payload[4]; +}; + struct els_request64_wqe { struct ulp_bde64 bde; uint32_t payload_len; @@ -2356,9 +2327,9 @@ struct els_request64_wqe { struct xmit_els_rsp64_wqe { struct ulp_bde64 bde; - uint32_t rsvd3; + uint32_t response_payload_len; uint32_t rsvd4; - struct wqe_did wqe_dest; + struct wqe_did wqe_dest; struct wqe_common wqe_com; /* words 6-11 */ uint32_t rsvd_12_15[4]; }; @@ -2427,7 +2398,7 @@ struct wqe_rctl_dfctl { struct xmit_seq64_wqe { struct ulp_bde64 bde; - uint32_t paylaod_offset; + uint32_t rsvd3; uint32_t relative_offset; struct wqe_rctl_dfctl wge_ctl; struct wqe_common wqe_com; /* words 6-11 */ @@ -2437,7 +2408,7 @@ struct xmit_seq64_wqe { }; struct xmit_bcast64_wqe { struct ulp_bde64 bde; - uint32_t paylaod_len; + uint32_t seq_payload_len; uint32_t rsvd4; struct wqe_rctl_dfctl wge_ctl; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ @@ -2446,8 +2417,8 @@ struct xmit_bcast64_wqe { struct gen_req64_wqe { struct ulp_bde64 bde; - uint32_t command_len; - uint32_t payload_len; + uint32_t request_payload_len; + uint32_t relative_offset; struct wqe_rctl_dfctl wge_ctl; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ uint32_t rsvd_12_15[4]; @@ -2480,7 +2451,7 @@ struct abort_cmd_wqe { struct fcp_iwrite64_wqe { struct ulp_bde64 bde; - uint32_t payload_len; + uint32_t payload_offset_len; uint32_t total_xfer_len; uint32_t initial_xfer_len; struct wqe_common wqe_com; /* words 6-11 */ @@ -2489,7 +2460,7 @@ struct fcp_iwrite64_wqe { struct fcp_iread64_wqe { struct ulp_bde64 bde; - uint32_t payload_len; /* word 3 */ + uint32_t payload_offset_len; /* word 3 */ uint32_t total_xfer_len; /* word 4 */ uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ @@ -2497,10 +2468,12 @@ struct fcp_iread64_wqe { }; struct fcp_icmnd64_wqe { - struct ulp_bde64 bde; /* words 0-2 */ - uint32_t rsrvd[3]; /* words 3-5 */ + struct ulp_bde64 bde; /* words 0-2 */ + uint32_t rsrvd3; /* word 3 */ + uint32_t rsrvd4; /* word 4 */ + uint32_t rsrvd5; /* word 5 */ struct wqe_common wqe_com; /* words 6-11 */ - uint32_t rsvd_12_15[4]; /* word 12-15 */ + uint32_t rsvd_12_15[4]; /* word 12-15 */ }; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 295c7ddb36c..b3065791f30 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -813,6 +813,7 @@ lpfc_hba_down_post_s3(struct lpfc_hba *phba) return 0; } + /** * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset * @phba: pointer to lpfc HBA data structure. @@ -2234,10 +2235,9 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport) void __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) { - /* Clear pending FCF rediscovery wait and failover in progress flags */ - phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | - FCF_DEAD_DISC | - FCF_ACVL_DISC); + /* Clear pending FCF rediscovery wait flag */ + phba->fcf.fcf_flag &= ~FCF_REDISC_PEND; + /* Now, try to stop the timer */ del_timer(&phba->fcf.redisc_wait); } @@ -2261,6 +2261,8 @@ lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) return; } __lpfc_sli4_stop_fcf_redisc_wait_timer(phba); + /* Clear failover in progress flags */ + phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC); spin_unlock_irq(&phba->hbalock); } @@ -2935,8 +2937,7 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) phba->fcf.fcf_flag |= FCF_REDISC_EVT; spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2776 FCF rediscover wait timer expired, post " - "a worker thread event for FCF table scan\n"); + "2776 FCF rediscover quiescent timer expired\n"); /* wake up worker thread */ lpfc_worker_wake_up(phba); } @@ -3311,35 +3312,34 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF) lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, - "2546 New FCF found event: " - "evt_tag:x%x, fcf_index:x%x\n", + "2546 New FCF event, evt_tag:x%x, " + "index:x%x\n", acqe_fcoe->event_tag, acqe_fcoe->index); else lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_DISCOVERY, - "2788 FCF parameter modified event: " - "evt_tag:x%x, fcf_index:x%x\n", + "2788 FCF param modified event, " + "evt_tag:x%x, index:x%x\n", acqe_fcoe->event_tag, acqe_fcoe->index); if (phba->fcf.fcf_flag & FCF_DISCOVERY) { /* * During period of FCF discovery, read the FCF * table record indexed by the event to update - * FCF round robin failover eligible FCF bmask. + * FCF roundrobin failover eligible FCF bmask. */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, - "2779 Read new FCF record with " - "fcf_index:x%x for updating FCF " - "round robin failover bmask\n", + "2779 Read FCF (x%x) for updating " + "roundrobin FCF failover bmask\n", acqe_fcoe->index); rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index); } /* If the FCF discovery is in progress, do nothing. */ spin_lock_irq(&phba->hbalock); - if (phba->hba_flag & FCF_DISC_INPROGRESS) { + if (phba->hba_flag & FCF_TS_INPROG) { spin_unlock_irq(&phba->hbalock); break; } @@ -3358,15 +3358,15 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, /* Otherwise, scan the entire FCF table and re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, - "2770 Start FCF table scan due to new FCF " - "event: evt_tag:x%x, fcf_index:x%x\n", + "2770 Start FCF table scan per async FCF " + "event, evt_tag:x%x, index:x%x\n", acqe_fcoe->event_tag, acqe_fcoe->index); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, "2547 Issue FCF scan read FCF mailbox " - "command failed 0x%x\n", rc); + "command failed (x%x)\n", rc); break; case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: @@ -3378,9 +3378,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, - "2549 FCF disconnected from network index 0x%x" - " tag 0x%x\n", acqe_fcoe->index, - acqe_fcoe->event_tag); + "2549 FCF (x%x) disconnected from network, " + "tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); /* * If we are in the middle of FCF failover process, clear * the corresponding FCF bit in the roundrobin bitmap. @@ -3494,9 +3493,8 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, - "2773 Start FCF fast failover due " - "to CVL event: evt_tag:x%x\n", - acqe_fcoe->event_tag); + "2773 Start FCF failover per CVL, " + "evt_tag:x%x\n", acqe_fcoe->event_tag); rc = lpfc_sli4_redisc_fcf_table(phba); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP | @@ -3646,8 +3644,7 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) /* Scan FCF table from the first entry to re-discover SAN */ lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, - "2777 Start FCF table scan after FCF " - "rediscovery quiescent period over\n"); + "2777 Start post-quiescent FCF table scan\n"); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY, @@ -4165,7 +4162,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_active_sgl; } - /* Allocate eligible FCF bmask memory for FCF round robin failover */ + /* Allocate eligible FCF bmask memory for FCF roundrobin failover */ longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG; phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL); @@ -7271,6 +7268,51 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba) } /** + * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy + * @phba: Pointer to HBA context object. + * + * This function is called in the SLI4 code path to wait for completion + * of device's XRIs exchange busy. It will check the XRI exchange busy + * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after + * that, it will check the XRI exchange busy on outstanding FCP and ELS + * I/Os every 30 seconds, log error message, and wait forever. Only when + * all XRI exchange busy complete, the driver unload shall proceed with + * invoking the function reset ioctl mailbox command to the CNA and the + * the rest of the driver unload resource release. + **/ +static void +lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) +{ + int wait_time = 0; + int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + + while (!fcp_xri_cmpl || !els_xri_cmpl) { + if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { + if (!fcp_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2877 FCP XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); + if (!els_xri_cmpl) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2878 ELS XRI exchange busy " + "wait time: %d seconds.\n", + wait_time/1000); + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2); + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2; + } else { + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1); + wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1; + } + fcp_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); + els_xri_cmpl = + list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + } +} + +/** * lpfc_sli4_hba_unset - Unset the fcoe hba * @phba: Pointer to HBA context object. * @@ -7315,6 +7357,12 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); } + /* Abort all iocbs associated with the hba */ + lpfc_sli_hba_iocb_abort(phba); + + /* Wait for completion of device XRI exchange busy */ + lpfc_sli4_xri_exchange_busy_wait(phba); + /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 0dfa310cd60..62d0957e1d4 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -797,6 +797,34 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, } /** + * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. + * @vport: pointer to a vport object. + * + * This routine sends mailbox command to unregister all active RPIs for + * a vport. + **/ +void +lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) +{ + struct lpfc_hba *phba = vport->phba; + LPFC_MBOXQ_t *mbox; + int rc; + + mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (mbox) { + lpfc_unreg_login(phba, vport->vpi, + vport->vpi + phba->vpi_base, mbox); + mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ; + mbox->vport = vport; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + mbox->context1 = NULL; + rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + mempool_free(mbox, phba->mbox_mem_pool); + } +} + +/** * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier * @phba: pointer to lpfc hba data structure. * @vpi: virtual N_Port identifier. diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 3a658953486..581837b3c71 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -169,6 +169,7 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) spin_lock_irqsave(shost->host_lock, flags); if (!vport->stat_data_enabled || vport->stat_data_blocked || + !pnode || !pnode->lat_data || (phba->bucket_type == LPFC_NO_BUCKET)) { spin_unlock_irqrestore(shost->host_lock, flags); @@ -2040,6 +2041,9 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode; unsigned long flags; + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) + return; + /* If there is queuefull or busy condition send a scsi event */ if ((cmnd->result == SAM_STAT_TASK_SET_FULL) || (cmnd->result == SAM_STAT_BUSY)) { @@ -2895,7 +2899,7 @@ void lpfc_poll_timeout(unsigned long ptr) * SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily. **/ static int -lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) +lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) { struct Scsi_Host *shost = cmnd->device->host; struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; @@ -3056,6 +3060,8 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(lpfc_queuecommand) + /** * lpfc_abort_handler - scsi_host_template eh_abort_handler entry point * @cmnd: Pointer to scsi_cmnd data structure. @@ -3226,10 +3232,11 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, struct lpfc_scsi_buf *lpfc_cmd; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocbqrsp; + struct lpfc_nodelist *pnode = rdata->pnode; int ret; int status; - if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode)) + if (!pnode || !NLP_CHK_NODE_ACT(pnode)) return FAILED; lpfc_cmd = lpfc_get_scsi_buf(phba); @@ -3256,7 +3263,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, "0702 Issue %s to TGT %d LUN %d " "rpi x%x nlp_flag x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, - rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag); + pnode->nlp_rpi, pnode->nlp_flag); status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING, iocbq, iocbqrsp, lpfc_cmd->timeout); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 0d1e187b005..554efa6623f 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -95,7 +95,7 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) return -ENOMEM; /* set consumption flag every once in a while */ if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL)) - bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1); + bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); @@ -1735,6 +1735,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) struct lpfc_vport *vport = pmb->vport; struct lpfc_dmabuf *mp; struct lpfc_nodelist *ndlp; + struct Scsi_Host *shost; uint16_t rpi, vpi; int rc; @@ -1746,7 +1747,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) && - (phba->sli_rev == LPFC_SLI_REV4)) + (phba->sli_rev == LPFC_SLI_REV4) && + (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0)) lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi); /* @@ -1765,16 +1767,14 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) return; } - /* Unreg VPI, if the REG_VPI succeed after VLink failure */ if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) && !(phba->pport->load_flag & FC_UNLOADING) && !pmb->u.mb.mbxStatus) { - lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb); - pmb->vport = vport; - pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); - if (rc != MBX_NOT_FINISHED) - return; + shost = lpfc_shost_from_vport(vport); + spin_lock_irq(shost->host_lock); + vport->vpi_state |= LPFC_VPI_REGISTERED; + vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; + spin_unlock_irq(shost->host_lock); } if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) { @@ -5921,7 +5921,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution * @phba: Pointer to HBA context object. * - * This routine performs a round robin SCSI command to SLI4 FCP WQ index + * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock * held. * @@ -5965,7 +5965,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, uint16_t abrt_iotag; struct lpfc_iocbq *abrtiocbq; struct ulp_bde64 *bpl = NULL; - uint32_t els_id = ELS_ID_DEFAULT; + uint32_t els_id = LPFC_ELS_ID_DEFAULT; int numBdes, i; struct ulp_bde64 bde; @@ -5982,7 +5982,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); abort_tag = (uint32_t) iocbq->iotag; xritag = iocbq->sli4_xritag; - wqe->words[7] = 0; /* The ct field has moved so reset */ + wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */ /* words0-2 bpl convert bde */ if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) { numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize / @@ -6033,109 +6033,117 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * contains the FCFI and remote N_Port_ID is * in word 5. */ - ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); - bf_set(lpfc_wqe_gen_context, &wqe->generic, - iocbq->iocb.ulpContext); - - bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct); - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); + bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com, + iocbq->iocb.ulpContext); + bf_set(wqe_ct, &wqe->els_req.wqe_com, ct); + bf_set(wqe_pu, &wqe->els_req.wqe_com, 0); /* CCP CCPE PV PRI in word10 were set in the memcpy */ - if (command_type == ELS_COMMAND_FIP) { els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK) >> LPFC_FIP_ELS_ID_SHIFT); } - bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id); - + bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id); + bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0); break; case CMD_XMIT_SEQUENCE64_CX: - bf_set(lpfc_wqe_gen_context, &wqe->generic, - iocbq->iocb.un.ulpWord[3]); - wqe->generic.word3 = 0; - bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); + bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, + iocbq->iocb.un.ulpWord[3]); + bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, + iocbq->iocb.ulpContext); /* The entire sequence is transmitted for this IOCB */ xmit_len = total_len; cmnd = CMD_XMIT_SEQUENCE64_CR; case CMD_XMIT_SEQUENCE64_CR: - /* word3 iocb=io_tag32 wqe=payload_offset */ - /* payload offset used for multilpe outstanding - * sequences on the same exchange - */ - wqe->words[3] = 0; + /* word3 iocb=io_tag32 wqe=reserved */ + wqe->xmit_sequence.rsvd3 = 0; /* word4 relative_offset memcpy */ /* word5 r_ctl/df_ctl memcpy */ - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); + bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0); + bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com, + LPFC_WQE_LENLOC_WORD12); + bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0); wqe->xmit_sequence.xmit_len = xmit_len; command_type = OTHER_COMMAND; break; case CMD_XMIT_BCAST64_CN: - /* word3 iocb=iotag32 wqe=payload_len */ - wqe->words[3] = 0; /* no definition for this in wqe */ + /* word3 iocb=iotag32 wqe=seq_payload_len */ + wqe->xmit_bcast64.seq_payload_len = xmit_len; /* word4 iocb=rsvd wqe=rsvd */ /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */ /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */ - bf_set(lpfc_wqe_gen_ct, &wqe->generic, + bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com, ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com, + LPFC_WQE_LENLOC_WORD3); + bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0); break; case CMD_FCP_IWRITE64_CR: command_type = FCP_COMMAND_DATA_OUT; - /* The struct for wqe fcp_iwrite has 3 fields that are somewhat - * confusing. - * word3 is payload_len: byte offset to the sgl entry for the - * fcp_command. - * word4 is total xfer len, same as the IOCB->ulpParameter. - * word5 is initial xfer len 0 = wait for xfer-ready - */ - - /* Always wait for xfer-ready before sending data */ - wqe->fcp_iwrite.initial_xfer_len = 0; - /* word 4 (xfer length) should have been set on the memcpy */ - - /* allow write to fall through to read */ + /* word3 iocb=iotag wqe=payload_offset_len */ + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ + wqe->fcp_iwrite.payload_offset_len = + xmit_len + sizeof(struct fcp_rsp); + /* word4 iocb=parameter wqe=total_xfer_length memcpy */ + /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ + bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com, + iocbq->iocb.ulpFCP2Rcvy); + bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS); + /* Always open the exchange */ + bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0); + bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, + LPFC_WQE_LENLOC_WORD4); + bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0); + bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU); + break; case CMD_FCP_IREAD64_CR: - /* FCP_CMD is always the 1st sgl entry */ - wqe->fcp_iread.payload_len = + /* word3 iocb=iotag wqe=payload_offset_len */ + /* Add the FCP_CMD and FCP_RSP sizes to get the offset */ + wqe->fcp_iread.payload_offset_len = xmit_len + sizeof(struct fcp_rsp); - - /* word 4 (xfer length) should have been set on the memcpy */ - - bf_set(lpfc_wqe_gen_erp, &wqe->generic, - iocbq->iocb.ulpFCP2Rcvy); - bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS); - /* The XC bit and the XS bit are similar. The driver never - * tracked whether or not the exchange was previouslly open. - * XC = Exchange create, 0 is create. 1 is already open. - * XS = link cmd: 1 do not close the exchange after command. - * XS = 0 close exchange when command completes. - * The only time we would not set the XC bit is when the XS bit - * is set and we are sending our 2nd or greater command on - * this exchange. - */ + /* word4 iocb=parameter wqe=total_xfer_length memcpy */ + /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */ + bf_set(wqe_erp, &wqe->fcp_iread.wqe_com, + iocbq->iocb.ulpFCP2Rcvy); + bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS); /* Always open the exchange */ bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); - - wqe->words[10] &= 0xffff0000; /* zero out ebde count */ - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); - break; + bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, + LPFC_WQE_LENLOC_WORD4); + bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0); + bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU); + break; case CMD_FCP_ICMND64_CR: + /* word3 iocb=IO_TAG wqe=reserved */ + wqe->fcp_icmd.rsrvd3 = 0; + bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0); /* Always open the exchange */ - bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0); - - wqe->words[4] = 0; - wqe->words[10] &= 0xffff0000; /* zero out ebde count */ - bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0); + bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0); + bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, + LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0); break; case CMD_GEN_REQUEST64_CR: - /* word3 command length is described as byte offset to the - * rsp_data. Would always be 16, sizeof(struct sli4_sge) - * sgl[0] = cmnd - * sgl[1] = rsp. - * - */ - wqe->gen_req.command_len = xmit_len; - /* Word4 parameter copied in the memcpy */ - /* Word5 [rctl, type, df_ctl, la] copied in memcpy */ + /* word3 iocb=IO_TAG wqe=request_payload_len */ + wqe->gen_req.request_payload_len = xmit_len; + /* word4 iocb=parameter wqe=relative_offset memcpy */ + /* word5 [rctl, type, df_ctl, la] copied in memcpy */ /* word6 context tag copied in memcpy */ if (iocbq->iocb.ulpCt_h || iocbq->iocb.ulpCt_l) { ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l); @@ -6144,31 +6152,39 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, ct, iocbq->iocb.ulpCommand); return IOCB_ERROR; } - bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0); - bf_set(wqe_tmo, &wqe->gen_req.wqe_com, - iocbq->iocb.ulpTimeout); - - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); + bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0); + bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout); + bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU); + bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ); + bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE); + bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0); command_type = OTHER_COMMAND; break; case CMD_XMIT_ELS_RSP64_CX: /* words0-2 BDE memcpy */ - /* word3 iocb=iotag32 wqe=rsvd */ - wqe->words[3] = 0; + /* word3 iocb=iotag32 wqe=response_payload_len */ + wqe->xmit_els_rsp.response_payload_len = xmit_len; /* word4 iocb=did wge=rsvd. */ - wqe->words[4] = 0; + wqe->xmit_els_rsp.rsvd4 = 0; /* word5 iocb=rsvd wge=did */ bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, iocbq->iocb.un.elsreq64.remoteID); - - bf_set(lpfc_wqe_gen_ct, &wqe->generic, - ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); - - bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU); - bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext); + bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, + ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); + bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU); + bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com, + iocbq->iocb.ulpContext); if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l) - bf_set(lpfc_wqe_gen_context, &wqe->generic, + bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, iocbq->vport->vpi + phba->vpi_base); + bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1); + bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE); + bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com, + LPFC_WQE_LENLOC_WORD3); + bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0); command_type = OTHER_COMMAND; break; case CMD_CLOSE_XRI_CN: @@ -6193,15 +6209,19 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, else bf_set(abort_cmd_ia, &wqe->abort_cmd, 0); bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); - wqe->words[5] = 0; - bf_set(lpfc_wqe_gen_ct, &wqe->generic, + /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */ + wqe->abort_cmd.rsrvd5 = 0; + bf_set(wqe_ct, &wqe->abort_cmd.wqe_com, ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l)); abort_tag = iocbq->iocb.un.acxri.abortIoTag; /* * The abort handler will send us CMD_ABORT_XRI_CN or * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX */ - bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX); + bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, + LPFC_WQE_LENLOC_NONE); cmnd = CMD_ABORT_XRI_CX; command_type = OTHER_COMMAND; xritag = 0; @@ -6235,18 +6255,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1); bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com, iocbq->iocb.ulpContext); + bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com, + LPFC_WQE_LENLOC_NONE); /* Overwrite the pre-set comnd type with OTHER_COMMAND */ command_type = OTHER_COMMAND; break; case CMD_XRI_ABORTED_CX: case CMD_CREATE_XRI_CR: /* Do we expect to use this? */ - /* words0-2 are all 0's no bde */ - /* word3 and word4 are rsvrd */ - wqe->words[3] = 0; - wqe->words[4] = 0; - /* word5 iocb=rsvd wge=did */ - /* There is no remote port id in the IOCB? */ - /* Let this fall through and fail */ case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */ case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */ case CMD_FCP_TRSP64_CX: /* Target mode rcv */ @@ -6257,16 +6273,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, iocbq->iocb.ulpCommand); return IOCB_ERROR; break; - } - bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag); - bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag); - wqe->generic.abort_tag = abort_tag; - bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type); - bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd); - bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass); - bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT); - + bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); + bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); + wqe->generic.wqe_com.abort_tag = abort_tag; + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type); + bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd); + bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass); + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); return 0; } @@ -7257,25 +7271,26 @@ lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } /** - * lpfc_sli_issue_abort_iotag - Abort function for a command iocb + * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb * @phba: Pointer to HBA context object. * @pring: Pointer to driver SLI ring object. * @cmdiocb: Pointer to driver command iocb object. * - * This function issues an abort iocb for the provided command - * iocb. This function is called with hbalock held. - * The function returns 0 when it fails due to memory allocation - * failure or when the command iocb is an abort request. + * This function issues an abort iocb for the provided command iocb down to + * the port. Other than the case the outstanding command iocb is an abort + * request, this function issues abort out unconditionally. This function is + * called with hbalock held. The function returns 0 when it fails due to + * memory allocation failure or when the command iocb is an abort request. **/ -int -lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, +static int +lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *cmdiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_iocbq *abtsiocbp; IOCB_t *icmd = NULL; IOCB_t *iabt = NULL; - int retval = IOCB_ERROR; + int retval; /* * There are certain command types we don't want to abort. And we @@ -7288,18 +7303,6 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) return 0; - /* If we're unloading, don't abort iocb on the ELS ring, but change the - * callback so that nothing happens when it finishes. - */ - if ((vport->load_flag & FC_UNLOADING) && - (pring->ringno == LPFC_ELS_RING)) { - if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) - cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; - else - cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; - goto abort_iotag_exit; - } - /* issue ABTS for this IOCB based on iotag */ abtsiocbp = __lpfc_sli_get_iocbq(phba); if (abtsiocbp == NULL) @@ -7344,6 +7347,63 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (retval) __lpfc_sli_release_iocbq(phba, abtsiocbp); + + /* + * Caller to this routine should check for IOCB_ERROR + * and handle it properly. This routine no longer removes + * iocb off txcmplq and call compl in case of IOCB_ERROR. + */ + return retval; +} + +/** + * lpfc_sli_issue_abort_iotag - Abort function for a command iocb + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * @cmdiocb: Pointer to driver command iocb object. + * + * This function issues an abort iocb for the provided command iocb. In case + * of unloading, the abort iocb will not be issued to commands on the ELS + * ring. Instead, the callback function shall be changed to those commands + * so that nothing happens when them finishes. This function is called with + * hbalock held. The function returns 0 when the command iocb is an abort + * request. + **/ +int +lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, + struct lpfc_iocbq *cmdiocb) +{ + struct lpfc_vport *vport = cmdiocb->vport; + int retval = IOCB_ERROR; + IOCB_t *icmd = NULL; + + /* + * There are certain command types we don't want to abort. And we + * don't want to abort commands that are already in the process of + * being aborted. + */ + icmd = &cmdiocb->iocb; + if (icmd->ulpCommand == CMD_ABORT_XRI_CN || + icmd->ulpCommand == CMD_CLOSE_XRI_CN || + (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0) + return 0; + + /* + * If we're unloading, don't abort iocb on the ELS ring, but change + * the callback so that nothing happens when it finishes. + */ + if ((vport->load_flag & FC_UNLOADING) && + (pring->ringno == LPFC_ELS_RING)) { + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) + cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl; + else + cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl; + goto abort_iotag_exit; + } + + /* Now, we try to issue the abort to the cmdiocb out */ + retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb); + abort_iotag_exit: /* * Caller to this routine should check for IOCB_ERROR @@ -7354,6 +7414,62 @@ abort_iotag_exit: } /** + * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring + * @phba: Pointer to HBA context object. + * @pring: Pointer to driver SLI ring object. + * + * This function aborts all iocbs in the given ring and frees all the iocb + * objects in txq. This function issues abort iocbs unconditionally for all + * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed + * to complete before the return of this function. The caller is not required + * to hold any locks. + **/ +static void +lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) +{ + LIST_HEAD(completions); + struct lpfc_iocbq *iocb, *next_iocb; + + if (pring->ringno == LPFC_ELS_RING) + lpfc_fabric_abort_hba(phba); + + spin_lock_irq(&phba->hbalock); + + /* Take off all the iocbs on txq for cancelling */ + list_splice_init(&pring->txq, &completions); + pring->txq_cnt = 0; + + /* Next issue ABTS for everything on the txcmplq */ + list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) + lpfc_sli_abort_iotag_issue(phba, pring, iocb); + + spin_unlock_irq(&phba->hbalock); + + /* Cancel all the IOCBs from the completions list */ + lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, + IOERR_SLI_ABORTED); +} + +/** + * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba. + * @phba: pointer to lpfc HBA data structure. + * + * This routine will abort all pending and outstanding iocbs to an HBA. + **/ +void +lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba) +{ + struct lpfc_sli *psli = &phba->sli; + struct lpfc_sli_ring *pring; + int i; + + for (i = 0; i < psli->num_rings; i++) { + pring = &psli->ring[i]; + lpfc_sli_iocb_ring_abort(phba, pring); + } +} + +/** * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN * @iocbq: Pointer to driver iocb object. * @vport: Pointer to driver virtual port object. @@ -12242,13 +12358,15 @@ lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index) /* Issue the mailbox command asynchronously */ mboxq->vport = phba->pport; mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec; + + spin_lock_irq(&phba->hbalock); + phba->hba_flag |= FCF_TS_INPROG; + spin_unlock_irq(&phba->hbalock); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) error = -EIO; else { - spin_lock_irq(&phba->hbalock); - phba->hba_flag |= FCF_DISC_INPROGRESS; - spin_unlock_irq(&phba->hbalock); /* Reset eligible FCF count for new scan */ if (fcf_index == LPFC_FCOE_FCF_GET_FIRST) phba->fcf.eligible_fcf_cnt = 0; @@ -12258,21 +12376,21 @@ fail_fcf_scan: if (error) { if (mboxq) lpfc_sli4_mbox_cmd_free(phba, mboxq); - /* FCF scan failed, clear FCF_DISC_INPROGRESS flag */ + /* FCF scan failed, clear FCF_TS_INPROG flag */ spin_lock_irq(&phba->hbalock); - phba->hba_flag &= ~FCF_DISC_INPROGRESS; + phba->hba_flag &= ~FCF_TS_INPROG; spin_unlock_irq(&phba->hbalock); } return error; } /** - * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf. + * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf. * @phba: pointer to lpfc hba data structure. * @fcf_index: FCF table entry offset. * * This routine is invoked to read an FCF record indicated by @fcf_index - * and to use it for FLOGI round robin FCF failover. + * and to use it for FLOGI roundrobin FCF failover. * * Return 0 if the mailbox command is submitted sucessfully, none 0 * otherwise. @@ -12318,7 +12436,7 @@ fail_fcf_read: * @fcf_index: FCF table entry offset. * * This routine is invoked to read an FCF record indicated by @fcf_index to - * determine whether it's eligible for FLOGI round robin failover list. + * determine whether it's eligible for FLOGI roundrobin failover list. * * Return 0 if the mailbox command is submitted sucessfully, none 0 * otherwise. @@ -12364,7 +12482,7 @@ fail_fcf_read: * * This routine is to get the next eligible FCF record index in a round * robin fashion. If the next eligible FCF record index equals to the - * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) + * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF) * shall be returned, otherwise, the next eligible FCF record's index * shall be returned. **/ @@ -12392,28 +12510,10 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) return LPFC_FCOE_FCF_NEXT_NONE; } - /* Check roundrobin failover index bmask stop condition */ - if (next_fcf_index == phba->fcf.fcf_rr_init_indx) { - if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) { - lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, - "2847 Round robin failover FCF index " - "search hit stop condition:x%x\n", - next_fcf_index); - return LPFC_FCOE_FCF_NEXT_NONE; - } - /* The roundrobin failover index bmask updated, start over */ - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2848 Round robin failover FCF index bmask " - "updated, start over\n"); - spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag &= ~FCF_REDISC_RRU; - spin_unlock_irq(&phba->hbalock); - return phba->fcf.fcf_rr_init_indx; - } - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2845 Get next round robin failover " - "FCF index x%x\n", next_fcf_index); + "2845 Get next roundrobin failover FCF (x%x)\n", + next_fcf_index); + return next_fcf_index; } @@ -12422,7 +12522,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba) * @phba: pointer to lpfc hba data structure. * * This routine sets the FCF record index in to the eligible bmask for - * round robin failover search. It checks to make sure that the index + * roundrobin failover search. It checks to make sure that the index * does not go beyond the range of the driver allocated bmask dimension * before setting the bit. * @@ -12434,22 +12534,16 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) { if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, - "2610 HBA FCF index reached driver's " - "book keeping dimension: fcf_index:%d, " - "driver_bmask_max:%d\n", + "2610 FCF (x%x) reached driver's book " + "keeping dimension:x%x\n", fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); return -EINVAL; } /* Set the eligible FCF record index bmask */ set_bit(fcf_index, phba->fcf.fcf_rr_bmask); - /* Set the roundrobin index bmask updated */ - spin_lock_irq(&phba->hbalock); - phba->fcf.fcf_flag |= FCF_REDISC_RRU; - spin_unlock_irq(&phba->hbalock); - lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2790 Set FCF index x%x to round robin failover " + "2790 Set FCF (x%x) to roundrobin FCF failover " "bmask\n", fcf_index); return 0; @@ -12460,7 +12554,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) * @phba: pointer to lpfc hba data structure. * * This routine clears the FCF record index from the eligible bmask for - * round robin failover search. It checks to make sure that the index + * roundrobin failover search. It checks to make sure that the index * does not go beyond the range of the driver allocated bmask dimension * before clearing the bit. **/ @@ -12469,9 +12563,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) { if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { lpfc_printf_log(phba, KERN_ERR, LOG_FIP, - "2762 HBA FCF index goes beyond driver's " - "book keeping dimension: fcf_index:%d, " - "driver_bmask_max:%d\n", + "2762 FCF (x%x) reached driver's book " + "keeping dimension:x%x\n", fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX); return; } @@ -12479,7 +12572,7 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) clear_bit(fcf_index, phba->fcf.fcf_rr_bmask); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2791 Clear FCF index x%x from round robin failover " + "2791 Clear FCF (x%x) from roundrobin failover " "bmask\n", fcf_index); } @@ -12530,8 +12623,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) } } else { lpfc_printf_log(phba, KERN_INFO, LOG_FIP, - "2775 Start FCF rediscovery quiescent period " - "wait timer before scaning FCF table\n"); + "2775 Start FCF rediscover quiescent timer\n"); /* * Start FCF rediscovery wait timer for pending FCF * before rescan FCF record table. diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index a0ca572ec28..c4483feb8b7 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -19,10 +19,16 @@ *******************************************************************/ #define LPFC_ACTIVE_MBOX_WAIT_CNT 100 +#define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000 +#define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 +#define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 #define LPFC_GET_QE_REL_INT 32 #define LPFC_RPI_LOW_WATER_MARK 10 +#define LPFC_UNREG_FCF 1 +#define LPFC_SKIP_UNREG_FCF 0 + /* Amount of time in seconds for waiting FCF rediscovery to complete */ #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */ @@ -163,9 +169,8 @@ struct lpfc_fcf { #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */ #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */ #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */ -#define FCF_REDISC_RRU 0x400 /* Roundrobin bitmap updated */ +#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT) uint32_t addr_mode; - uint16_t fcf_rr_init_indx; uint32_t eligible_fcf_cnt; struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec failover_rec; diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index f93120e4c79..7a1b5b112a0 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.17" +#define LPFC_DRIVER_VERSION "8.3.18" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c index 3ddb4dc62d5..6c42dff0f4d 100644 --- a/drivers/scsi/mac53c94.c +++ b/drivers/scsi/mac53c94.c @@ -66,7 +66,7 @@ static void cmd_done(struct fsc_state *, int result); static void set_dma_cmds(struct fsc_state *, struct scsi_cmnd *); -static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int mac53c94_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct fsc_state *state; @@ -99,6 +99,8 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd * return 0; } +static DEF_SCSI_QCMD(mac53c94_queue) + static int mac53c94_host_reset(struct scsi_cmnd *cmd) { struct fsc_state *state = (struct fsc_state *) cmd->device->host->hostdata; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 7ceb5cf12c6..9aa048525eb 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -366,7 +366,7 @@ mega_runpendq(adapter_t *adapter) * The command queuing entry point for the mid-layer. */ static int -megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) +megaraid_queue_lck(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) { adapter_t *adapter; scb_t *scb; @@ -409,6 +409,8 @@ megaraid_queue(Scsi_Cmnd *scmd, void (*done)(Scsi_Cmnd *)) return busy; } +static DEF_SCSI_QCMD(megaraid_queue) + /** * mega_allocate_scb() * @adapter - pointer to our soft state @@ -4456,7 +4458,7 @@ mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru) scb->idx = CMDID_INT_CMDS; - megaraid_queue(scmd, mega_internal_done); + megaraid_queue_lck(scmd, mega_internal_done); wait_for_completion(&adapter->int_waitq); diff --git a/drivers/scsi/megaraid.h b/drivers/scsi/megaraid.h index 2b4a048cadf..f5644745e24 100644 --- a/drivers/scsi/megaraid.h +++ b/drivers/scsi/megaraid.h @@ -987,7 +987,7 @@ static int mega_query_adapter(adapter_t *); static int issue_scb(adapter_t *, scb_t *); static int mega_setup_mailbox(adapter_t *); -static int megaraid_queue (Scsi_Cmnd *, void (*)(Scsi_Cmnd *)); +static int megaraid_queue (struct Scsi_Host *, struct scsi_cmnd *); static scb_t * mega_build_cmd(adapter_t *, Scsi_Cmnd *, int *); static void __mega_runpendq(adapter_t *); static int issue_scb_block(adapter_t *, u_char *); diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index a7810a106b3..5708cb27d07 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -113,8 +113,7 @@ static int megaraid_mbox_fire_sync_cmd(adapter_t *); static void megaraid_mbox_display_scb(adapter_t *, scb_t *); static void megaraid_mbox_setup_device_map(adapter_t *); -static int megaraid_queue_command(struct scsi_cmnd *, - void (*)(struct scsi_cmnd *)); +static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *); static void megaraid_mbox_runpendq(adapter_t *, scb_t *); static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *, @@ -1484,7 +1483,7 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb) * Queue entry point for mailbox based controllers. */ static int -megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) +megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) { adapter_t *adapter; scb_t *scb; @@ -1513,6 +1512,8 @@ megaraid_queue_command(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) return if_busy; } +static DEF_SCSI_QCMD(megaraid_queue_command) + /** * megaraid_mbox_build_cmd - transform the mid-layer scsi commands * @adapter : controller's soft state diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c index d3c9cdee292..7451bc096a0 100644 --- a/drivers/scsi/megaraid/megaraid_sas.c +++ b/drivers/scsi/megaraid/megaraid_sas.c @@ -10,7 +10,7 @@ * 2 of the License, or (at your option) any later version. * * FILE : megaraid_sas.c - * Version : v00.00.04.17.1-rc1 + * Version : v00.00.04.31-rc1 * * Authors: * (email-id : megaraidlinux@lsi.com) @@ -56,6 +56,15 @@ module_param_named(poll_mode_io, poll_mode_io, int, 0); MODULE_PARM_DESC(poll_mode_io, "Complete cmds from IO path, (default=0)"); +/* + * Number of sectors per IO command + * Will be set in megasas_init_mfi if user does not provide + */ +static unsigned int max_sectors; +module_param_named(max_sectors, max_sectors, int, 0); +MODULE_PARM_DESC(max_sectors, + "Maximum number of sectors per IO command"); + MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux@lsi.com"); @@ -103,6 +112,7 @@ static int megasas_poll_wait_aen; static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait); static u32 support_poll_for_event; static u32 megasas_dbg_lvl; +static u32 support_device_change; /* define lock for aen poll */ spinlock_t poll_aen_lock; @@ -718,6 +728,10 @@ static int megasas_check_reset_gen2(struct megasas_instance *instance, struct megasas_register_set __iomem *regs) { + if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) { + return 1; + } + return 0; } @@ -930,6 +944,7 @@ megasas_make_sgl_skinny(struct megasas_instance *instance, mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); mfi_sgl->sge_skinny[i].phys_addr = sg_dma_address(os_sgl); + mfi_sgl->sge_skinny[i].flag = 0; } } return sge_count; @@ -1319,7 +1334,7 @@ megasas_dump_pending_frames(struct megasas_instance *instance) * @done: Callback entry point */ static int -megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) +megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) { u32 frame_count; struct megasas_cmd *cmd; @@ -1402,6 +1417,8 @@ megasas_queue_command(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(megasas_queue_command) + static struct megasas_instance *megasas_lookup_instance(u16 host_no) { int i; @@ -1557,6 +1574,28 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) } } +static void +megasas_internal_reset_defer_cmds(struct megasas_instance *instance); + +static void +process_fw_state_change_wq(struct work_struct *work); + +void megasas_do_ocr(struct megasas_instance *instance) +{ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || + (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { + *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; + } + instance->instancet->disable_intr(instance->reg_set); + instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; + instance->issuepend_done = 0; + + atomic_set(&instance->fw_outstanding, 0); + megasas_internal_reset_defer_cmds(instance); + process_fw_state_change_wq(&instance->work_init); +} + /** * megasas_wait_for_outstanding - Wait for all outstanding cmds * @instance: Adapter soft state @@ -1574,6 +1613,8 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) unsigned long flags; struct list_head clist_local; struct megasas_cmd *reset_cmd; + u32 fw_state; + u8 kill_adapter_flag; spin_lock_irqsave(&instance->hba_lock, flags); adprecovery = instance->adprecovery; @@ -1659,7 +1700,45 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) msleep(1000); } - if (atomic_read(&instance->fw_outstanding)) { + i = 0; + kill_adapter_flag = 0; + do { + fw_state = instance->instancet->read_fw_status_reg( + instance->reg_set) & MFI_STATE_MASK; + if ((fw_state == MFI_STATE_FAULT) && + (instance->disableOnlineCtrlReset == 0)) { + if (i == 3) { + kill_adapter_flag = 2; + break; + } + megasas_do_ocr(instance); + kill_adapter_flag = 1; + + /* wait for 1 secs to let FW finish the pending cmds */ + msleep(1000); + } + i++; + } while (i <= 3); + + if (atomic_read(&instance->fw_outstanding) && + !kill_adapter_flag) { + if (instance->disableOnlineCtrlReset == 0) { + + megasas_do_ocr(instance); + + /* wait for 5 secs to let FW finish the pending cmds */ + for (i = 0; i < wait_time; i++) { + int outstanding = + atomic_read(&instance->fw_outstanding); + if (!outstanding) + return SUCCESS; + msleep(1000); + } + } + } + + if (atomic_read(&instance->fw_outstanding) || + (kill_adapter_flag == 2)) { printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n"); /* * Send signal to FW to stop processing any pending cmds. @@ -2669,6 +2748,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) return -ENOMEM; } + memset(cmd->frame, 0, total_sz); cmd->frame->io.context = cmd->index; cmd->frame->io.pad_0 = 0; } @@ -3585,6 +3665,27 @@ static int megasas_io_attach(struct megasas_instance *instance) instance->max_fw_cmds - MEGASAS_INT_CMDS; host->this_id = instance->init_id; host->sg_tablesize = instance->max_num_sge; + /* + * Check if the module parameter value for max_sectors can be used + */ + if (max_sectors && max_sectors < instance->max_sectors_per_req) + instance->max_sectors_per_req = max_sectors; + else { + if (max_sectors) { + if (((instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS1078GEN2) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_SAS0079GEN2)) && + (max_sectors <= MEGASAS_MAX_SECTORS)) { + instance->max_sectors_per_req = max_sectors; + } else { + printk(KERN_INFO "megasas: max_sectors should be > 0" + "and <= %d (or < 1MB for GEN2 controller)\n", + instance->max_sectors_per_req); + } + } + } + host->max_sectors = instance->max_sectors_per_req; host->cmd_per_lun = 128; host->max_channel = MEGASAS_MAX_CHANNELS - 1; @@ -4658,6 +4759,15 @@ megasas_sysfs_show_support_poll_for_event(struct device_driver *dd, char *buf) static DRIVER_ATTR(support_poll_for_event, S_IRUGO, megasas_sysfs_show_support_poll_for_event, NULL); + static ssize_t +megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf) +{ + return sprintf(buf, "%u\n", support_device_change); +} + +static DRIVER_ATTR(support_device_change, S_IRUGO, + megasas_sysfs_show_support_device_change, NULL); + static ssize_t megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf) { @@ -4978,6 +5088,7 @@ static int __init megasas_init(void) MEGASAS_EXT_VERSION); support_poll_for_event = 2; + support_device_change = 1; memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info)); @@ -5026,8 +5137,17 @@ static int __init megasas_init(void) if (rval) goto err_dcf_poll_mode_io; + rval = driver_create_file(&megasas_pci_driver.driver, + &driver_attr_support_device_change); + if (rval) + goto err_dcf_support_device_change; + return rval; +err_dcf_support_device_change: + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_poll_mode_io); + err_dcf_poll_mode_io: driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); @@ -5058,6 +5178,10 @@ static void __exit megasas_exit(void) driver_remove_file(&megasas_pci_driver.driver, &driver_attr_dbg_lvl); driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_poll_for_event); + driver_remove_file(&megasas_pci_driver.driver, + &driver_attr_support_device_change); + driver_remove_file(&megasas_pci_driver.driver, &driver_attr_release_date); driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version); diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 16a4f68a34b..ad16f5e6004 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -18,9 +18,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.04.17.1-rc1" -#define MEGASAS_RELDATE "Oct. 29, 2009" -#define MEGASAS_EXT_VERSION "Thu. Oct. 29, 11:41:51 PST 2009" +#define MEGASAS_VERSION "00.00.04.31-rc1" +#define MEGASAS_RELDATE "May 3, 2010" +#define MEGASAS_EXT_VERSION "Mon. May 3, 11:41:51 PST 2010" /* * Device IDs @@ -706,6 +706,7 @@ struct megasas_ctrl_info { #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ MEGASAS_MAX_DEV_PER_CHANNEL) +#define MEGASAS_MAX_SECTORS (2*1024) #define MEGASAS_DBG_LVL 1 #define MEGASAS_FW_BUSY 1 diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c index 1f784fde251..197aa1b3f0f 100644 --- a/drivers/scsi/mesh.c +++ b/drivers/scsi/mesh.c @@ -1627,7 +1627,7 @@ static void cmd_complete(struct mesh_state *ms) * Called by midlayer with host locked to queue a new * request */ -static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int mesh_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct mesh_state *ms; @@ -1648,6 +1648,8 @@ static int mesh_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(mesh_queue) + /* * Called to handle interrupts, either call by the interrupt * handler (do_mesh_interrupt) or by other functions in diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 16e99b68635..1a96a00418a 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -3315,7 +3315,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full */ static int -_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) { struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host); struct MPT2SAS_DEVICE *sas_device_priv_data; @@ -3441,6 +3441,8 @@ _scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) return SCSI_MLQUEUE_HOST_BUSY; } +static DEF_SCSI_QCMD(_scsih_qcmd) + /** * _scsih_normalize_sense - normalize descriptor and fixed format sense data * @sense_buffer: sense data returned by target diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index d013a2aa2fd..46cc3825638 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -8029,7 +8029,7 @@ static int ncr53c8xx_slave_configure(struct scsi_device *device) return 0; } -static int ncr53c8xx_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) +static int ncr53c8xx_queue_command_lck (struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct ncb *np = ((struct host_data *) cmd->device->host->hostdata)->ncb; unsigned long flags; @@ -8068,6 +8068,8 @@ printk("ncr53c8xx : command successfully queued\n"); return sts; } +static DEF_SCSI_QCMD(ncr53c8xx_queue_command) + irqreturn_t ncr53c8xx_intr(int irq, void *dev_id) { unsigned long flags; diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 4c1e5454520..6b8b021400f 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -196,8 +196,7 @@ static void __exit exit_nsp32 (void); static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); static int nsp32_detect (struct pci_dev *pdev); -static int nsp32_queuecommand(struct scsi_cmnd *, - void (*done)(struct scsi_cmnd *)); +static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static const char *nsp32_info (struct Scsi_Host *); static int nsp32_release (struct Scsi_Host *); @@ -909,7 +908,7 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) return TRUE; } -static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; nsp32_target *target; @@ -1050,6 +1049,8 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ return 0; } +static DEF_SCSI_QCMD(nsp32_queuecommand) + /* initialize asic */ static int nsp32hw_init(nsp32_hw_data *data) { diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c index e88bbdde49c..b37c8a3c1bb 100644 --- a/drivers/scsi/osd/osd_initiator.c +++ b/drivers/scsi/osd/osd_initiator.c @@ -452,10 +452,6 @@ void osd_end_request(struct osd_request *or) { struct request *rq = or->request; - _osd_free_seg(or, &or->set_attr); - _osd_free_seg(or, &or->enc_get_attr); - _osd_free_seg(or, &or->get_attr); - if (rq) { if (rq->next_rq) { _put_request(rq->next_rq); @@ -464,6 +460,12 @@ void osd_end_request(struct osd_request *or) _put_request(rq); } + + _osd_free_seg(or, &or->get_attr); + _osd_free_seg(or, &or->enc_get_attr); + _osd_free_seg(or, &or->set_attr); + _osd_free_seg(or, &or->cdb_cont); + _osd_request_free(or); } EXPORT_SYMBOL(osd_end_request); @@ -547,6 +549,12 @@ static int _osd_realloc_seg(struct osd_request *or, return 0; } +static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes) +{ + OSD_DEBUG("total_bytes=%d\n", total_bytes); + return _osd_realloc_seg(or, &or->cdb_cont, total_bytes); +} + static int _alloc_set_attr_list(struct osd_request *or, const struct osd_attr *oa, unsigned nelem, unsigned add_bytes) { @@ -885,6 +893,199 @@ int osd_req_read_kern(struct osd_request *or, } EXPORT_SYMBOL(osd_req_read_kern); +static int _add_sg_continuation_descriptor(struct osd_request *or, + const struct osd_sg_entry *sglist, unsigned numentries, u64 *len) +{ + struct osd_sg_continuation_descriptor *oscd; + u32 oscd_size; + unsigned i; + int ret; + + oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]); + + if (!or->cdb_cont.total_bytes) { + /* First time, jump over the header, we will write to: + * cdb_cont.buff + cdb_cont.total_bytes + */ + or->cdb_cont.total_bytes = + sizeof(struct osd_continuation_segment_header); + } + + ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size); + if (unlikely(ret)) + return ret; + + oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes; + oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST); + oscd->hdr.pad_length = 0; + oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd)); + + *len = 0; + /* copy the sg entries and convert to network byte order */ + for (i = 0; i < numentries; i++) { + oscd->entries[i].offset = cpu_to_be64(sglist[i].offset); + oscd->entries[i].len = cpu_to_be64(sglist[i].len); + *len += sglist[i].len; + } + + or->cdb_cont.total_bytes += oscd_size; + OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n", + or->cdb_cont.total_bytes, oscd_size, numentries); + return 0; +} + +static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key) +{ + struct request_queue *req_q = osd_request_queue(or->osd_dev); + struct bio *bio; + struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); + struct osd_continuation_segment_header *cont_seg_hdr; + + if (!or->cdb_cont.total_bytes) + return 0; + + cont_seg_hdr = or->cdb_cont.buff; + cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2; + cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action; + + /* create a bio for continuation segment */ + bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes, + GFP_KERNEL); + if (IS_ERR(bio)) + return PTR_ERR(bio); + + bio->bi_rw |= REQ_WRITE; + + /* integrity check the continuation before the bio is linked + * with the other data segments since the continuation + * integrity is separate from the other data segments. + */ + osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key); + + cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes); + + /* we can't use _req_append_segment, because we need to link in the + * continuation bio to the head of the bio list - the + * continuation segment (if it exists) is always the first segment in + * the out data buffer. + */ + bio->bi_next = or->out.bio; + or->out.bio = bio; + or->out.total_bytes += or->cdb_cont.total_bytes; + + return 0; +} + +/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an + * @sglist that has the scatter gather entries. Scatter-gather enables a write + * of multiple none-contiguous areas of an object, in a single call. The extents + * may overlap and/or be in any order. The only constrain is that: + * total_bytes(sglist) >= total_bytes(bio) + */ +int osd_req_write_sg(struct osd_request *or, + const struct osd_obj_id *obj, struct bio *bio, + const struct osd_sg_entry *sglist, unsigned numentries) +{ + u64 len; + int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); + + if (ret) + return ret; + osd_req_write(or, obj, 0, bio, len); + + return 0; +} +EXPORT_SYMBOL(osd_req_write_sg); + +/* osd_req_read_sg: Read multiple extents of an object into @bio + * See osd_req_write_sg + */ +int osd_req_read_sg(struct osd_request *or, + const struct osd_obj_id *obj, struct bio *bio, + const struct osd_sg_entry *sglist, unsigned numentries) +{ + u64 len; + int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len); + + if (ret) + return ret; + osd_req_read(or, obj, 0, bio, len); + + return 0; +} +EXPORT_SYMBOL(osd_req_read_sg); + +/* SG-list write/read Kern API + * + * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array + * of sg_entries. @numentries indicates how many pointers and sg_entries there + * are. By requiring an array of buff pointers. This allows a caller to do a + * single write/read and scatter into multiple buffers. + * NOTE: Each buffer + len should not cross a page boundary. + */ +static struct bio *_create_sg_bios(struct osd_request *or, + void **buff, const struct osd_sg_entry *sglist, unsigned numentries) +{ + struct request_queue *q = osd_request_queue(or->osd_dev); + struct bio *bio; + unsigned i; + + bio = bio_kmalloc(GFP_KERNEL, numentries); + if (unlikely(!bio)) { + OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < numentries; i++) { + unsigned offset = offset_in_page(buff[i]); + struct page *page = virt_to_page(buff[i]); + unsigned len = sglist[i].len; + unsigned added_len; + + BUG_ON(offset + len > PAGE_SIZE); + added_len = bio_add_pc_page(q, bio, page, len, offset); + if (unlikely(len != added_len)) { + OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n", + len, added_len); + bio_put(bio); + return ERR_PTR(-ENOMEM); + } + } + + return bio; +} + +int osd_req_write_sg_kern(struct osd_request *or, + const struct osd_obj_id *obj, void **buff, + const struct osd_sg_entry *sglist, unsigned numentries) +{ + struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); + if (IS_ERR(bio)) + return PTR_ERR(bio); + + bio->bi_rw |= REQ_WRITE; + osd_req_write_sg(or, obj, bio, sglist, numentries); + + return 0; +} +EXPORT_SYMBOL(osd_req_write_sg_kern); + +int osd_req_read_sg_kern(struct osd_request *or, + const struct osd_obj_id *obj, void **buff, + const struct osd_sg_entry *sglist, unsigned numentries) +{ + struct bio *bio = _create_sg_bios(or, buff, sglist, numentries); + if (IS_ERR(bio)) + return PTR_ERR(bio); + + osd_req_read_sg(or, obj, bio, sglist, numentries); + + return 0; +} +EXPORT_SYMBOL(osd_req_read_sg_kern); + + + void osd_req_get_attributes(struct osd_request *or, const struct osd_obj_id *obj) { @@ -1218,17 +1419,18 @@ int osd_req_add_get_attr_page(struct osd_request *or, or->get_attr.buff = attar_page; or->get_attr.total_bytes = max_page_len; - or->set_attr.buff = set_one_attr->val_ptr; - or->set_attr.total_bytes = set_one_attr->len; - cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id); cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len); - /* ocdb->attrs_page.get_attr_offset; */ + + if (!set_one_attr || !set_one_attr->attr_page) + return 0; /* The set is optional */ + + or->set_attr.buff = set_one_attr->val_ptr; + or->set_attr.total_bytes = set_one_attr->len; cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page); cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id); cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len); - /* ocdb->attrs_page.set_attr_offset; */ return 0; } EXPORT_SYMBOL(osd_req_add_get_attr_page); @@ -1248,11 +1450,14 @@ static int _osd_req_finalize_attr_page(struct osd_request *or) if (ret) return ret; + if (or->set_attr.total_bytes == 0) + return 0; + /* set one value */ cdbh->attrs_page.set_attr_offset = osd_req_encode_offset(or, or->out.total_bytes, &out_padding); - ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL, + ret = _req_append_segment(or, out_padding, &or->set_attr, NULL, &or->out); return ret; } @@ -1276,7 +1481,8 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1, } static int _osd_req_finalize_data_integrity(struct osd_request *or, - bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key) + bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes, + const u8 *cap_key) { struct osd_security_parameters *sec_parms = _osd_req_sec_params(or); int ret; @@ -1307,7 +1513,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or, or->out.last_seg = NULL; /* they are now all chained to request sign them all together */ - osd_sec_sign_data(&or->out_data_integ, or->out.req->bio, + osd_sec_sign_data(&or->out_data_integ, out_data_bio, cap_key); } @@ -1403,6 +1609,8 @@ int osd_finalize_request(struct osd_request *or, { struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb); bool has_in, has_out; + /* Save for data_integrity without the cdb_continuation */ + struct bio *out_data_bio = or->out.bio; u64 out_data_bytes = or->out.total_bytes; int ret; @@ -1418,9 +1626,14 @@ int osd_finalize_request(struct osd_request *or, osd_set_caps(&or->cdb, cap); has_in = or->in.bio || or->get_attr.total_bytes; - has_out = or->out.bio || or->set_attr.total_bytes || - or->enc_get_attr.total_bytes; + has_out = or->out.bio || or->cdb_cont.total_bytes || + or->set_attr.total_bytes || or->enc_get_attr.total_bytes; + ret = _osd_req_finalize_cdb_cont(or, cap_key); + if (ret) { + OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n"); + return ret; + } ret = _init_blk_request(or, has_in, has_out); if (ret) { OSD_DEBUG("_init_blk_request failed\n"); @@ -1458,7 +1671,8 @@ int osd_finalize_request(struct osd_request *or, } ret = _osd_req_finalize_data_integrity(or, has_in, has_out, - out_data_bytes, cap_key); + out_data_bio, out_data_bytes, + cap_key); if (ret) return ret; diff --git a/drivers/scsi/pas16.h b/drivers/scsi/pas16.h index 8dc5b1a5f5d..a04281cace2 100644 --- a/drivers/scsi/pas16.h +++ b/drivers/scsi/pas16.h @@ -118,7 +118,7 @@ static int pas16_abort(Scsi_Cmnd *); static int pas16_biosparam(struct scsi_device *, struct block_device *, sector_t, int*); static int pas16_detect(struct scsi_host_template *); -static int pas16_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); +static int pas16_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static int pas16_bus_reset(Scsi_Cmnd *); #ifndef CMD_PER_LUN diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 9326c2c1488..be3f33d31a9 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -184,7 +184,7 @@ static void nsp_scsi_done(struct scsi_cmnd *SCpnt) SCpnt->scsi_done(SCpnt); } -static int nsp_queuecommand(struct scsi_cmnd *SCpnt, +static int nsp_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { #ifdef NSP_DEBUG @@ -264,6 +264,8 @@ static int nsp_queuecommand(struct scsi_cmnd *SCpnt, return 0; } +static DEF_SCSI_QCMD(nsp_queuecommand) + /* * setup PIO FIFO transfer mode and enable/disable to data out */ diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h index d68c9f267c5..7fc9a9d0a44 100644 --- a/drivers/scsi/pcmcia/nsp_cs.h +++ b/drivers/scsi/pcmcia/nsp_cs.h @@ -299,8 +299,7 @@ static int nsp_proc_info ( off_t offset, int length, int inout); -static int nsp_queuecommand(struct scsi_cmnd *SCpnt, - void (* done)(struct scsi_cmnd *SCpnt)); +static int nsp_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *SCpnt); /* Error handler */ /*static int nsp_eh_abort (struct scsi_cmnd *SCpnt);*/ diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c index 0ae27cb5cd6..8552296edaa 100644 --- a/drivers/scsi/pcmcia/sym53c500_cs.c +++ b/drivers/scsi/pcmcia/sym53c500_cs.c @@ -547,7 +547,7 @@ SYM53C500_info(struct Scsi_Host *SChost) } static int -SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) +SYM53C500_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { int i; int port_base = SCpnt->device->host->io_port; @@ -583,6 +583,8 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(SYM53C500_queue) + static int SYM53C500_host_reset(struct scsi_cmnd *SCpnt) { diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index 8e38ca8cd10..7f064f9ca82 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -50,7 +50,6 @@ #include <linux/dma-mapping.h> #include <linux/pci.h> #include <linux/interrupt.h> -#include <linux/smp_lock.h> #include <scsi/libsas.h> #include <scsi/scsi_tcq.h> #include <scsi/sas_ata.h> diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 4b8765785ae..300d59f389d 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -62,6 +62,7 @@ static unsigned int pmcraid_debug_log; static unsigned int pmcraid_disable_aen; static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST; +static unsigned int pmcraid_enable_msix; /* * Data structures to support multiple adapters by the LLD. @@ -1594,10 +1595,12 @@ static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance) cfg_entry = &ccn_hcam->cfg_entry; fw_version = be16_to_cpu(pinstance->inq_data->fw_version); - pmcraid_info - ("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n", + pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \ + res: %x:%x:%x:%x\n", pinstance->ccn.hcam->ilid, pinstance->ccn.hcam->op_code, + ((pinstance->ccn.hcam->timestamp1) | + ((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)), pinstance->ccn.hcam->notification_type, pinstance->ccn.hcam->notification_lost, pinstance->ccn.hcam->flags, @@ -1850,6 +1853,7 @@ static void pmcraid_process_ccn(struct pmcraid_cmd *cmd) * none */ static void pmcraid_initiate_reset(struct pmcraid_instance *); +static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd); static void pmcraid_process_ldn(struct pmcraid_cmd *cmd) { @@ -1881,6 +1885,10 @@ static void pmcraid_process_ldn(struct pmcraid_cmd *cmd) lock_flags); return; } + if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) { + pinstance->timestamp_error = 1; + pmcraid_set_timestamp(cmd); + } } else { dev_info(&pinstance->pdev->dev, "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc); @@ -3363,7 +3371,7 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) sg_size = buflen; for (i = 0; i < num_elem; i++) { - page = alloc_pages(GFP_KERNEL|GFP_DMA, order); + page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order); if (!page) { for (j = i - 1; j >= 0; j--) __free_pages(sg_page(&scatterlist[j]), order); @@ -3471,7 +3479,7 @@ static int pmcraid_copy_sglist( * SCSI_MLQUEUE_DEVICE_BUSY if device is busy * SCSI_MLQUEUE_HOST_BUSY if host is busy */ -static int pmcraid_queuecommand( +static int pmcraid_queuecommand_lck( struct scsi_cmnd *scsi_cmd, void (*done) (struct scsi_cmnd *) ) @@ -3577,6 +3585,8 @@ static int pmcraid_queuecommand( return rc; } +static DEF_SCSI_QCMD(pmcraid_queuecommand) + /** * pmcraid_open -char node "open" entry, allowed only users with admin access */ @@ -3739,6 +3749,7 @@ static long pmcraid_ioctl_passthrough( unsigned long request_buffer; unsigned long request_offset; unsigned long lock_flags; + void *ioasa; u32 ioasc; int request_size; int buffer_size; @@ -3780,6 +3791,11 @@ static long pmcraid_ioctl_passthrough( rc = __copy_from_user(buffer, (struct pmcraid_passthrough_ioctl_buffer *) arg, sizeof(struct pmcraid_passthrough_ioctl_buffer)); + + ioasa = + (void *)(arg + + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); + if (rc) { pmcraid_err("ioctl: can't copy passthrough buffer\n"); rc = -EFAULT; @@ -3947,22 +3963,14 @@ static long pmcraid_ioctl_passthrough( } out_handle_response: - /* If the command failed for any reason, copy entire IOASA buffer and - * return IOCTL success. If copying IOASA to user-buffer fails, return + /* copy entire IOASA buffer and return IOCTL success. + * If copying IOASA to user-buffer fails, return * EFAULT */ - if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) { - void *ioasa = - (void *)(arg + - offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa)); - - pmcraid_info("command failed with %x\n", - le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)); - if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, - sizeof(struct pmcraid_ioasa))) { - pmcraid_err("failed to copy ioasa buffer to user\n"); - rc = -EFAULT; - } + if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, + sizeof(struct pmcraid_ioasa))) { + pmcraid_err("failed to copy ioasa buffer to user\n"); + rc = -EFAULT; } /* If the data transfer was from device, copy the data onto user @@ -4684,7 +4692,8 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) int rc; struct pci_dev *pdev = pinstance->pdev; - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { + if ((pmcraid_enable_msix) && + (pci_find_capability(pdev, PCI_CAP_ID_MSIX))) { int num_hrrq = PMCRAID_NUM_MSIX_VECTORS; struct msix_entry entries[PMCRAID_NUM_MSIX_VECTORS]; int i; @@ -5147,6 +5156,16 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance) pinstance->inq_data = NULL; pinstance->inq_data_baddr = 0; } + + if (pinstance->timestamp_data != NULL) { + pci_free_consistent(pinstance->pdev, + sizeof(struct pmcraid_timestamp_data), + pinstance->timestamp_data, + pinstance->timestamp_data_baddr); + + pinstance->timestamp_data = NULL; + pinstance->timestamp_data_baddr = 0; + } } /** @@ -5205,6 +5224,20 @@ static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance) return -ENOMEM; } + /* allocate DMAable memory for set timestamp data buffer */ + pinstance->timestamp_data = pci_alloc_consistent( + pinstance->pdev, + sizeof(struct pmcraid_timestamp_data), + &pinstance->timestamp_data_baddr); + + if (pinstance->timestamp_data == NULL) { + pmcraid_err("couldn't allocate DMA memory for \ + set time_stamp \n"); + pmcraid_release_buffers(pinstance); + return -ENOMEM; + } + + /* Initialize all the command blocks and add them to free pool. No * need to lock (free_pool_lock) as this is done in initialization * itself @@ -5610,6 +5643,68 @@ static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd) } /** + * pmcraid_set_timestamp - set the timestamp to IOAFP + * + * @cmd: pointer to pmcraid_cmd structure + * + * Return Value + * 0 for success or non-zero for failure cases + */ +static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd) +{ + struct pmcraid_instance *pinstance = cmd->drv_inst; + struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; + __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN); + struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl; + + struct timeval tv; + __le64 timestamp; + + do_gettimeofday(&tv); + timestamp = tv.tv_sec * 1000; + + pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp); + pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8); + pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16); + pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24); + pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32); + pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40); + + pmcraid_reinit_cmdblk(cmd); + ioarcb->request_type = REQ_TYPE_SCSI; + ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE); + ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP; + ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION; + memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len)); + + ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) + + offsetof(struct pmcraid_ioarcb, + add_data.u.ioadl[0])); + ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc)); + ioarcb->ioarcb_bus_addr &= ~(0x1FULL); + + ioarcb->request_flags0 |= NO_LINK_DESCS; + ioarcb->request_flags0 |= TRANSFER_DIR_WRITE; + ioarcb->data_transfer_length = + cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); + ioadl = &(ioarcb->add_data.u.ioadl[0]); + ioadl->flags = IOADL_FLAGS_LAST_DESC; + ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr); + ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data)); + + if (!pinstance->timestamp_error) { + pinstance->timestamp_error = 0; + pmcraid_send_cmd(cmd, pmcraid_set_supported_devs, + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); + } else { + pmcraid_send_cmd(cmd, pmcraid_return_cmd, + PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler); + return; + } +} + + +/** * pmcraid_init_res_table - Initialize the resource table * @cmd: pointer to pmcraid command struct * @@ -5720,7 +5815,7 @@ static void pmcraid_init_res_table(struct pmcraid_cmd *cmd) /* release the resource list lock */ spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags); - pmcraid_set_supported_devs(cmd); + pmcraid_set_timestamp(cmd); } /** @@ -6054,10 +6149,10 @@ out_init: static void __exit pmcraid_exit(void) { pmcraid_netlink_release(); - class_destroy(pmcraid_class); unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS); pci_unregister_driver(&pmcraid_driver); + class_destroy(pmcraid_class); } module_init(pmcraid_init); diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index 6cfa0145a1d..4db210d9394 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h @@ -42,7 +42,7 @@ */ #define PMCRAID_DRIVER_NAME "PMC MaxRAID" #define PMCRAID_DEVFILE "pmcsas" -#define PMCRAID_DRIVER_VERSION "2.0.2" +#define PMCRAID_DRIVER_VERSION "1.0.3" #define PMCRAID_DRIVER_DATE __DATE__ #define PMCRAID_FW_VERSION_1 0x002 @@ -184,6 +184,7 @@ #define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE 0x05250000 #define PMCRAID_IOASC_AC_TERMINATED_BY_HOST 0x0B5A0000 #define PMCRAID_IOASC_UA_BUS_WAS_RESET 0x06290000 +#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC 0x06908B00 #define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER 0x06298000 /* Driver defined IOASCs */ @@ -332,11 +333,9 @@ struct pmcraid_config_table_entry { __u8 lun[PMCRAID_LUN_LEN]; } __attribute__((packed, aligned(4))); -/* extended configuration table sizes are of 64 bytes in size */ -#define PMCRAID_CFGTE_EXT_SIZE 32 +/* extended configuration table sizes are also of 32 bytes in size */ struct pmcraid_config_table_entry_ext { struct pmcraid_config_table_entry cfgte; - __u8 cfgte_ext[PMCRAID_CFGTE_EXT_SIZE]; }; /* resource types (config_table_entry.resource_type values) */ @@ -561,6 +560,17 @@ struct pmcraid_inquiry_data { __u8 reserved3[16]; }; +#define PMCRAID_TIMESTAMP_LEN 12 +#define PMCRAID_REQ_TM_STR_LEN 6 +#define PMCRAID_SCSI_SET_TIMESTAMP 0xA4 +#define PMCRAID_SCSI_SERVICE_ACTION 0x0F + +struct pmcraid_timestamp_data { + __u8 reserved1[4]; + __u8 timestamp[PMCRAID_REQ_TM_STR_LEN]; /* current time value */ + __u8 reserved2[2]; +}; + /* pmcraid_cmd - LLD representation of SCSI command */ struct pmcraid_cmd { @@ -568,7 +578,6 @@ struct pmcraid_cmd { struct pmcraid_control_block *ioa_cb; dma_addr_t ioa_cb_bus_addr; dma_addr_t dma_handle; - u8 *sense_buffer; /* pointer to mid layer structure of SCSI commands */ struct scsi_cmnd *scsi_cmd; @@ -705,6 +714,9 @@ struct pmcraid_instance { struct pmcraid_inquiry_data *inq_data; dma_addr_t inq_data_baddr; + struct pmcraid_timestamp_data *timestamp_data; + dma_addr_t timestamp_data_baddr; + /* size of configuration table entry, varies based on the firmware */ u32 config_table_entry_size; @@ -791,6 +803,7 @@ struct pmcraid_instance { #define SHUTDOWN_NONE 0x0 #define SHUTDOWN_NORMAL 0x1 #define SHUTDOWN_ABBREV 0x2 + u32 timestamp_error:1; /* indicate set timestamp for out of sync */ }; @@ -1056,10 +1069,10 @@ struct pmcraid_passthrough_ioctl_buffer { #define PMCRAID_PASSTHROUGH_IOCTL 'F' #define DRV_IOCTL(n, size) \ - _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) + _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) #define FMW_IOCTL(n, size) \ - _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) + _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) /* * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd. diff --git a/drivers/scsi/ppa.c b/drivers/scsi/ppa.c index 7bc2d796e40..d164c963936 100644 --- a/drivers/scsi/ppa.c +++ b/drivers/scsi/ppa.c @@ -798,7 +798,7 @@ static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd) return 0; } -static int ppa_queuecommand(struct scsi_cmnd *cmd, +static int ppa_queuecommand_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *)) { ppa_struct *dev = ppa_dev(cmd->device->host); @@ -821,6 +821,8 @@ static int ppa_queuecommand(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(ppa_queuecommand) + /* * Apparently the disk->capacity attribute is off by 1 sector * for all disk drives. We add the one here, but it should really diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 92ffbb51049..cd178b9e40c 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c @@ -211,7 +211,7 @@ static int ps3rom_write_request(struct ps3_storage_device *dev, return 0; } -static int ps3rom_queuecommand(struct scsi_cmnd *cmd, +static int ps3rom_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct ps3rom_private *priv = shost_priv(cmd->device->host); @@ -260,6 +260,8 @@ static int ps3rom_queuecommand(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(ps3rom_queuecommand) + static int decode_lv1_status(u64 status, unsigned char *sense_key, unsigned char *asc, unsigned char *ascq) { diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index b8166ecfd0e..5dec684bf01 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -727,7 +727,7 @@ qla1280_info(struct Scsi_Host *host) * context which is a big NO! NO!. **************************************************************************/ static int -qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) +qla1280_queuecommand_lck(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata; @@ -756,6 +756,8 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *)) return status; } +static DEF_SCSI_QCMD(qla1280_queuecommand) + enum action { ABORT_COMMAND, DEVICE_RESET, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 2ff4342ae36..bc8194f7462 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1538,6 +1538,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) if (!fcport) return; + /* Now that the rport has been deleted, set the fcport state to + FCS_DEVICE_DEAD */ + atomic_set(&fcport->state, FCS_DEVICE_DEAD); + /* * Transport has effectively 'deleted' the rport, clear * all local references. diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index fdfbf83a633..31a4121a2be 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1307,6 +1307,125 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job) } static int +qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha, + uint8_t is_update) +{ + uint32_t start = 0; + int valid = 0; + + bsg_job->reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + return -EINVAL; + + start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; + if (start > ha->optrom_size) + return -EINVAL; + + if (ha->optrom_state != QLA_SWAITING) + return -EBUSY; + + ha->optrom_region_start = start; + + if (is_update) { + if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) + valid = 1; + else if (start == (ha->flt_region_boot * 4) || + start == (ha->flt_region_fw * 4)) + valid = 1; + else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || + IS_QLA8XXX_TYPE(ha)) + valid = 1; + if (!valid) { + qla_printk(KERN_WARNING, ha, + "Invalid start region 0x%x/0x%x.\n", + start, bsg_job->request_payload.payload_len); + return -EINVAL; + } + + ha->optrom_region_size = start + + bsg_job->request_payload.payload_len > ha->optrom_size ? + ha->optrom_size - start : + bsg_job->request_payload.payload_len; + ha->optrom_state = QLA_SWRITING; + } else { + ha->optrom_region_size = start + + bsg_job->reply_payload.payload_len > ha->optrom_size ? + ha->optrom_size - start : + bsg_job->reply_payload.payload_len; + ha->optrom_state = QLA_SREADING; + } + + ha->optrom_buffer = vmalloc(ha->optrom_region_size); + if (!ha->optrom_buffer) { + qla_printk(KERN_WARNING, ha, + "Read: Unable to allocate memory for optrom retrieval " + "(%x).\n", ha->optrom_region_size); + + ha->optrom_state = QLA_SWAITING; + return -ENOMEM; + } + + memset(ha->optrom_buffer, 0, ha->optrom_region_size); + return 0; +} + +static int +qla2x00_read_optrom(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + + rval = qla2x00_optrom_setup(bsg_job, ha, 0); + if (rval) + return rval; + + ha->isp_ops->read_optrom(vha, ha->optrom_buffer, + ha->optrom_region_start, ha->optrom_region_size); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, ha->optrom_buffer, + ha->optrom_region_size); + + bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; + bsg_job->reply->result = DID_OK; + vfree(ha->optrom_buffer); + ha->optrom_buffer = NULL; + ha->optrom_state = QLA_SWAITING; + bsg_job->job_done(bsg_job); + return rval; +} + +static int +qla2x00_update_optrom(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + + rval = qla2x00_optrom_setup(bsg_job, ha, 1); + if (rval) + return rval; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, ha->optrom_buffer, + ha->optrom_region_size); + + ha->isp_ops->write_optrom(vha, ha->optrom_buffer, + ha->optrom_region_start, ha->optrom_region_size); + + bsg_job->reply->result = DID_OK; + vfree(ha->optrom_buffer); + ha->optrom_buffer = NULL; + ha->optrom_state = QLA_SWAITING; + bsg_job->job_done(bsg_job); + return rval; +} + +static int qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) { switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { @@ -1328,6 +1447,12 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) case QL_VND_FCP_PRIO_CFG_CMD: return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job); + case QL_VND_READ_FLASH: + return qla2x00_read_optrom(bsg_job); + + case QL_VND_UPDATE_FLASH: + return qla2x00_update_optrom(bsg_job); + default: bsg_job->reply->result = (DID_ERROR << 16); bsg_job->job_done(bsg_job); diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index cc7c52f87a1..074a999c701 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -14,6 +14,8 @@ #define QL_VND_A84_MGMT_CMD 0x04 #define QL_VND_IIDMA 0x05 #define QL_VND_FCP_PRIO_CFG_CMD 0x06 +#define QL_VND_READ_FLASH 0x07 +#define QL_VND_UPDATE_FLASH 0x08 /* BSG definations for interpreting CommandSent field */ #define INT_DEF_LB_LOOPBACK_CMD 0 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e1d3ad40a94..9ce539d4557 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1700,9 +1700,7 @@ typedef struct fc_port { atomic_t state; uint32_t flags; - int port_login_retry_count; int login_retry; - atomic_t port_down_timer; struct fc_rport *rport, *drport; u32 supported_classes; @@ -2411,7 +2409,6 @@ struct qla_hw_data { uint32_t enable_target_reset :1; uint32_t enable_lip_full_login :1; uint32_t enable_led_scheme :1; - uint32_t inta_enabled :1; uint32_t msi_enabled :1; uint32_t msix_enabled :1; uint32_t disable_serdes :1; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index c33dec827e1..9382a816c13 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -92,6 +92,7 @@ extern int ql2xshiftctondsd; extern int ql2xdbwr; extern int ql2xdontresethba; extern int ql2xasynctmfenable; +extern int ql2xgffidenable; extern int ql2xenabledif; extern int ql2xenablehba_err_chk; extern int ql2xtargetreset; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 3cafbef4073..259f5113749 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -71,7 +71,7 @@ qla2x00_ctx_sp_free(srb_t *sp) struct srb_iocb *iocb = ctx->u.iocb_cmd; struct scsi_qla_host *vha = sp->fcport->vha; - del_timer_sync(&iocb->timer); + del_timer(&iocb->timer); kfree(iocb); kfree(ctx); mempool_free(sp, sp->fcport->vha->hw->srb_mempool); @@ -1344,6 +1344,13 @@ cont_alloc: qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for " "firmware dump!!!\n", dump_size / 1024); + if (ha->fce) { + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, + ha->fce_dma); + ha->fce = NULL; + ha->fce_dma = 0; + } + if (ha->eft) { dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft, ha->eft_dma); @@ -1818,14 +1825,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha) qla2x00_init_response_q_entries(rsp); } - spin_lock_irqsave(&ha->vport_slock, flags); + spin_lock(&ha->vport_slock); /* Clear RSCN queue. */ list_for_each_entry(vp, &ha->vp_list, list) { vp->rscn_in_ptr = 0; vp->rscn_out_ptr = 0; } - spin_unlock_irqrestore(&ha->vport_slock, flags); + spin_unlock(&ha->vport_slock); ha->isp_ops->config_rings(vha); @@ -2916,21 +2923,13 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) void qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) { - struct qla_hw_data *ha = vha->hw; - fcport->vha = vha; fcport->login_retry = 0; - fcport->port_login_retry_count = ha->port_down_retry_count * - PORT_RETRY_TIME; - atomic_set(&fcport->port_down_timer, ha->port_down_retry_count * - PORT_RETRY_TIME); fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); qla2x00_iidma_fcport(vha, fcport); - - atomic_set(&fcport->state, FCS_ONLINE); - qla2x00_reg_remote_port(vha, fcport); + atomic_set(&fcport->state, FCS_ONLINE); } /* @@ -3292,8 +3291,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha, continue; /* Bypass ports whose FCP-4 type is not FCP_SCSI */ - if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && - new_fcport->fc4_type != FC4_TYPE_UNKNOWN) + if (ql2xgffidenable && + (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && + new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) continue; /* Locate matching device in database. */ diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 579f0285466..4c1ba6263eb 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -992,8 +992,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, ha = vha->hw; DEBUG18(printk(KERN_DEBUG - "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__, - vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd))); + "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__, + vha->host_no, sp, scsi_get_prot_op(sp->cmd))); cmd_pkt->vp_index = sp->fcport->vp_idx; @@ -1061,6 +1061,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, fcp_cmnd->additional_cdb_len |= 2; int_to_scsilun(sp->cmd->device->lun, &fcp_cmnd->lun); + host_to_fcp_swap((uint8_t *)&fcp_cmnd->lun, sizeof(fcp_cmnd->lun)); memcpy(fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(fcp_cmnd_len); cmd_pkt->fcp_cmnd_dseg_address[0] = cpu_to_le32( diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index e0e43d9e7ed..7f77898486a 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1240,12 +1240,6 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; break; - case LSC_SCODE_CMD_FAILED: - if ((iop[1] & 0xff) == 0x05) { - data[0] = MBS_NOT_LOGGED_IN; - break; - } - /* Fall through. */ default: data[0] = MBS_COMMAND_ERROR; break; @@ -1431,9 +1425,8 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, rsp->status_srb = sp; DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) " - "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no, - cp->device->channel, cp->device->id, cp->device->lun, cp, - cp->serial_number)); + "cmd=%p\n", __func__, sp->fcport->vha->host_no, + cp->device->channel, cp->device->id, cp->device->lun, cp)); if (sense_len) DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len)); } @@ -1757,6 +1750,8 @@ check_scsi_status: case CS_INCOMPLETE: case CS_PORT_UNAVAILABLE: case CS_TIMEOUT: + case CS_RESET: + /* * We are going to have the fc class block the rport * while we try to recover so instruct the mid layer @@ -1781,10 +1776,6 @@ check_scsi_status: qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); break; - case CS_RESET: - cp->result = DID_TRANSPORT_DISRUPTED << 16; - break; - case CS_ABORTED: cp->result = DID_RESET << 16; break; @@ -1801,10 +1792,10 @@ out: if (logit) DEBUG2(qla_printk(KERN_INFO, ha, "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) " - "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x " + "oxid=0x%x cdb=%02x%02x%02x len=0x%x " "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no, cp->device->id, cp->device->lun, comp_status, scsi_status, - cp->result, ox_id, cp->serial_number, cp->cmnd[0], + cp->result, ox_id, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len)); @@ -2500,14 +2491,15 @@ skip_msix: skip_msi: ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler, - IRQF_SHARED, QLA2XXX_DRIVER_NAME, rsp); + ha->flags.msi_enabled ? 0 : IRQF_SHARED, + QLA2XXX_DRIVER_NAME, rsp); if (ret) { qla_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d already in use.\n", ha->pdev->irq); goto fail; } - ha->flags.inta_enabled = 1; + clear_risc_ints: /* diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 8d9edfb3980..ae2acacc000 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -2749,6 +2749,7 @@ sufficient_dsds: goto queuing_error_fcp_cmnd; int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* build FCP_CMND IU */ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd)); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 800ea926975..2c0876c81a3 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -160,6 +160,11 @@ MODULE_PARM_DESC(ql2xtargetreset, "Enable target reset." "Default is 1 - use hw defaults."); +int ql2xgffidenable; +module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR); +MODULE_PARM_DESC(ql2xgffidenable, + "Enables GFF_ID checks of port type. " + "Default is 0 - Do not use GFF_ID information."); int ql2xasynctmfenable; module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR); @@ -174,8 +179,7 @@ static int qla2xxx_slave_alloc(struct scsi_device *); static int qla2xxx_scan_finished(struct Scsi_Host *, unsigned long time); static void qla2xxx_scan_start(struct Scsi_Host *); static void qla2xxx_slave_destroy(struct scsi_device *); -static int qla2xxx_queuecommand(struct scsi_cmnd *cmd, - void (*fn)(struct scsi_cmnd *)); +static int qla2xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int qla2xxx_eh_abort(struct scsi_cmnd *); static int qla2xxx_eh_device_reset(struct scsi_cmnd *); static int qla2xxx_eh_target_reset(struct scsi_cmnd *); @@ -255,6 +259,7 @@ static void qla2x00_rst_aen(scsi_qla_host_t *); static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t, struct req_que **, struct rsp_que **); +static void qla2x00_free_fw_dump(struct qla_hw_data *); static void qla2x00_mem_free(struct qla_hw_data *); static void qla2x00_sp_free_dma(srb_t *); @@ -529,7 +534,7 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport, } static int -qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; @@ -539,6 +544,7 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) srb_t *sp; int rval; + spin_unlock_irq(vha->host->host_lock); if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) cmd->result = DID_NO_CONNECT << 16; @@ -553,10 +559,6 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) goto qc24_fail_command; } - /* Close window on fcport/rport state-transitioning. */ - if (fcport->drport) - goto qc24_target_busy; - if (!vha->flags.difdix_supported && scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { DEBUG2(qla_printk(KERN_ERR, ha, @@ -567,15 +569,14 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) } if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || - atomic_read(&base_vha->loop_state) == LOOP_DEAD) { + atomic_read(&fcport->state) == FCS_DEVICE_LOST || + atomic_read(&base_vha->loop_state) == LOOP_DEAD) { cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } goto qc24_target_busy; } - spin_unlock_irq(vha->host->host_lock); - sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done); if (!sp) goto qc24_host_busy_lock; @@ -597,14 +598,18 @@ qc24_host_busy_lock: return SCSI_MLQUEUE_HOST_BUSY; qc24_target_busy: + spin_lock_irq(vha->host->host_lock); return SCSI_MLQUEUE_TARGET_BUSY; qc24_fail_command: + spin_lock_irq(vha->host->host_lock); done(cmd); return 0; } +static DEF_SCSI_QCMD(qla2xxx_queuecommand) + /* * qla2x00_eh_wait_on_command @@ -824,81 +829,58 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); srb_t *sp; - int ret, i; + int ret = SUCCESS; unsigned int id, lun; - unsigned long serial; unsigned long flags; int wait = 0; struct qla_hw_data *ha = vha->hw; - struct req_que *req = vha->req; - srb_t *spt; - int got_ref = 0; fc_block_scsi_eh(cmd); if (!CMD_SP(cmd)) return SUCCESS; - ret = SUCCESS; - id = cmd->device->id; lun = cmd->device->lun; - serial = cmd->serial_number; - spt = (srb_t *) CMD_SP(cmd); - if (!spt) - return SUCCESS; - /* Check active list for command command. */ spin_lock_irqsave(&ha->hardware_lock, flags); - for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) { - sp = req->outstanding_cmds[i]; - - if (sp == NULL) - continue; - if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) && - !IS_PROT_IO(sp)) - continue; - if (sp->cmd != cmd) - continue; + sp = (srb_t *) CMD_SP(cmd); + if (!sp) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return SUCCESS; + } - DEBUG2(printk("%s(%ld): aborting sp %p from RISC." - " pid=%ld.\n", __func__, vha->host_no, sp, serial)); + DEBUG2(printk("%s(%ld): aborting sp %p from RISC.", + __func__, vha->host_no, sp)); - /* Get a reference to the sp and drop the lock.*/ - sp_get(sp); - got_ref++; + /* Get a reference to the sp and drop the lock.*/ + sp_get(sp); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - DEBUG2(printk("%s(%ld): abort_command " - "mbx failed.\n", __func__, vha->host_no)); - ret = FAILED; - } else { - DEBUG3(printk("%s(%ld): abort_command " - "mbx success.\n", __func__, vha->host_no)); - wait = 1; - } - spin_lock_irqsave(&ha->hardware_lock, flags); - break; - } spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (ha->isp_ops->abort_command(sp)) { + DEBUG2(printk("%s(%ld): abort_command " + "mbx failed.\n", __func__, vha->host_no)); + ret = FAILED; + } else { + DEBUG3(printk("%s(%ld): abort_command " + "mbx success.\n", __func__, vha->host_no)); + wait = 1; + } + qla2x00_sp_compl(ha, sp); /* Wait for the command to be returned. */ if (wait) { if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) { qla_printk(KERN_ERR, ha, - "scsi(%ld:%d:%d): Abort handler timed out -- %lx " - "%x.\n", vha->host_no, id, lun, serial, ret); + "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n", + vha->host_no, id, lun, ret); ret = FAILED; } } - if (got_ref) - qla2x00_sp_compl(ha, sp); - qla_printk(KERN_INFO, ha, - "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n", - vha->host_no, id, lun, wait, serial, ret); + "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n", + vha->host_no, id, lun, wait, ret); return ret; } @@ -1043,13 +1025,11 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata; int ret = FAILED; unsigned int id, lun; - unsigned long serial; fc_block_scsi_eh(cmd); id = cmd->device->id; lun = cmd->device->lun; - serial = cmd->serial_number; if (!fcport) return ret; @@ -1104,14 +1084,12 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) struct qla_hw_data *ha = vha->hw; int ret = FAILED; unsigned int id, lun; - unsigned long serial; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); fc_block_scsi_eh(cmd); id = cmd->device->id; lun = cmd->device->lun; - serial = cmd->serial_number; if (!fcport) return ret; @@ -1974,6 +1952,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->bars = bars; ha->mem_only = mem_only; spin_lock_init(&ha->hardware_lock); + spin_lock_init(&ha->vport_slock); /* Set ISP-type information. */ qla2x00_set_isp_flags(ha); @@ -2085,6 +2064,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->init_cb_size = sizeof(struct mid_init_cb_81xx); ha->gid_list_info_size = 8; ha->optrom_size = OPTROM_SIZE_82XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; ha->isp_ops = &qla82xx_isp_ops; ha->flash_conf_off = FARX_ACCESS_FLASH_CONF; ha->flash_data_off = FARX_ACCESS_FLASH_DATA; @@ -2342,6 +2322,42 @@ probe_out: } static void +qla2x00_shutdown(struct pci_dev *pdev) +{ + scsi_qla_host_t *vha; + struct qla_hw_data *ha; + + vha = pci_get_drvdata(pdev); + ha = vha->hw; + + /* Turn-off FCE trace */ + if (ha->flags.fce_enabled) { + qla2x00_disable_fce_trace(vha, NULL, NULL); + ha->flags.fce_enabled = 0; + } + + /* Turn-off EFT trace */ + if (ha->eft) + qla2x00_disable_eft_trace(vha); + + /* Stop currently executing firmware. */ + qla2x00_try_to_stop_firmware(vha); + + /* Turn adapter off line */ + vha->flags.online = 0; + + /* turn-off interrupts on the card */ + if (ha->interrupts_on) { + vha->flags.init_done = 0; + ha->isp_ops->disable_intrs(ha); + } + + qla2x00_free_irqs(vha); + + qla2x00_free_fw_dump(ha); +} + +static void qla2x00_remove_one(struct pci_dev *pdev) { scsi_qla_host_t *base_vha, *vha; @@ -2597,12 +2613,12 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) continue; if (atomic_read(&fcport->state) == FCS_ONLINE) { + atomic_set(&fcport->state, FCS_DEVICE_LOST); if (defer) qla2x00_schedule_rport_del(vha, fcport, defer); else if (vha->vp_idx == fcport->vp_idx) qla2x00_schedule_rport_del(vha, fcport, defer); } - atomic_set(&fcport->state, FCS_DEVICE_LOST); } } @@ -2830,28 +2846,48 @@ fail: } /* -* qla2x00_mem_free -* Frees all adapter allocated memory. +* qla2x00_free_fw_dump +* Frees fw dump stuff. * * Input: -* ha = adapter block pointer. +* ha = adapter block pointer. */ static void -qla2x00_mem_free(struct qla_hw_data *ha) +qla2x00_free_fw_dump(struct qla_hw_data *ha) { - if (ha->srb_mempool) - mempool_destroy(ha->srb_mempool); - if (ha->fce) dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, - ha->fce_dma); + ha->fce_dma); if (ha->fw_dump) { if (ha->eft) dma_free_coherent(&ha->pdev->dev, - ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); + ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma); vfree(ha->fw_dump); } + ha->fce = NULL; + ha->fce_dma = 0; + ha->eft = NULL; + ha->eft_dma = 0; + ha->fw_dump = NULL; + ha->fw_dumped = 0; + ha->fw_dump_reading = 0; +} + +/* +* qla2x00_mem_free +* Frees all adapter allocated memory. +* +* Input: +* ha = adapter block pointer. +*/ +static void +qla2x00_mem_free(struct qla_hw_data *ha) +{ + qla2x00_free_fw_dump(ha); + + if (ha->srb_mempool) + mempool_destroy(ha->srb_mempool); if (ha->dcbx_tlv) dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE, @@ -2925,8 +2961,6 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->srb_mempool = NULL; ha->ctx_mempool = NULL; - ha->eft = NULL; - ha->eft_dma = 0; ha->sns_cmd = NULL; ha->sns_cmd_dma = 0; ha->ct_sns = NULL; @@ -2946,10 +2980,6 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->gid_list = NULL; ha->gid_list_dma = 0; - - ha->fw_dump = NULL; - ha->fw_dumped = 0; - ha->fw_dump_reading = 0; } struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, @@ -3547,11 +3577,9 @@ void qla2x00_timer(scsi_qla_host_t *vha) { unsigned long cpu_flags = 0; - fc_port_t *fcport; int start_dpc = 0; int index; srb_t *sp; - int t; uint16_t w; struct qla_hw_data *ha = vha->hw; struct req_que *req; @@ -3567,34 +3595,6 @@ qla2x00_timer(scsi_qla_host_t *vha) /* Hardware read to raise pending EEH errors during mailbox waits. */ if (!pci_channel_offline(ha->pdev)) pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w); - /* - * Ports - Port down timer. - * - * Whenever, a port is in the LOST state we start decrementing its port - * down timer every second until it reaches zero. Once it reaches zero - * the port it marked DEAD. - */ - t = 0; - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->port_type != FCT_TARGET) - continue; - - if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { - - if (atomic_read(&fcport->port_down_timer) == 0) - continue; - - if (atomic_dec_and_test(&fcport->port_down_timer) != 0) - atomic_set(&fcport->state, FCS_DEVICE_DEAD); - - DEBUG(printk("scsi(%ld): fcport-%d - port retry count: " - "%d remaining\n", - vha->host_no, - t, atomic_read(&fcport->port_down_timer))); - } - t++; - } /* End of for fcport */ - /* Loop down handler. */ if (atomic_read(&vha->loop_down_timer) > 0 && @@ -4079,6 +4079,7 @@ static struct pci_driver qla2xxx_pci_driver = { .id_table = qla2xxx_pci_tbl, .probe = qla2x00_probe_one, .remove = qla2x00_remove_one, + .shutdown = qla2x00_shutdown, .err_handler = &qla2xxx_err_handler, }; diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 8edbccb3232..cf0075a2d0c 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.04-k0" +#define QLA2XXX_VERSION "8.03.05-k0" #define QLA_DRIVER_MAJOR_VER 8 #define QLA_DRIVER_MINOR_VER 3 -#define QLA_DRIVER_PATCH_VER 4 +#define QLA_DRIVER_PATCH_VER 5 #define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c index cbceb0ebabf..edcf048215d 100644 --- a/drivers/scsi/qla4xxx/ql4_dbg.c +++ b/drivers/scsi/qla4xxx/ql4_dbg.c @@ -30,3 +30,104 @@ void qla4xxx_dump_buffer(void *b, uint32_t size) printk(KERN_INFO "\n"); } +void qla4xxx_dump_registers(struct scsi_qla_host *ha) +{ + uint8_t i; + + if (is_qla8022(ha)) { + for (i = 1; i < MBOX_REG_COUNT; i++) + printk(KERN_INFO "mailbox[%d] = 0x%08X\n", + i, readl(&ha->qla4_8xxx_reg->mailbox_in[i])); + return; + } + + for (i = 0; i < MBOX_REG_COUNT; i++) { + printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, mailbox[i]), i, + readw(&ha->reg->mailbox[i])); + } + + printk(KERN_INFO "0x%02X flash_address = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, flash_address), + readw(&ha->reg->flash_address)); + printk(KERN_INFO "0x%02X flash_data = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, flash_data), + readw(&ha->reg->flash_data)); + printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, ctrl_status), + readw(&ha->reg->ctrl_status)); + + if (is_qla4010(ha)) { + printk(KERN_INFO "0x%02X nvram = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram), + readw(&ha->reg->u1.isp4010.nvram)); + } else if (is_qla4022(ha) | is_qla4032(ha)) { + printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask), + readw(&ha->reg->u1.isp4022.intr_mask)); + printk(KERN_INFO "0x%02X nvram = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram), + readw(&ha->reg->u1.isp4022.nvram)); + printk(KERN_INFO "0x%02X semaphore = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore), + readw(&ha->reg->u1.isp4022.semaphore)); + } + printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, req_q_in), + readw(&ha->reg->req_q_in)); + printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, rsp_q_out), + readw(&ha->reg->rsp_q_out)); + + if (is_qla4010(ha)) { + printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf), + readw(&ha->reg->u2.isp4010.ext_hw_conf)); + printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl), + readw(&ha->reg->u2.isp4010.port_ctrl)); + printk(KERN_INFO "0x%02X port_status = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status), + readw(&ha->reg->u2.isp4010.port_status)); + printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out), + readw(&ha->reg->u2.isp4010.req_q_out)); + printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out), + readw(&ha->reg->u2.isp4010.gp_out)); + printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in), + readw(&ha->reg->u2.isp4010.gp_in)); + printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4010.port_err_status), + readw(&ha->reg->u2.isp4010.port_err_status)); + } else if (is_qla4022(ha) | is_qla4032(ha)) { + printk(KERN_INFO "Page 0 Registers:\n"); + printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf), + readw(&ha->reg->u2.isp4022.p0.ext_hw_conf)); + printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl), + readw(&ha->reg->u2.isp4022.p0.port_ctrl)); + printk(KERN_INFO "0x%02X port_status = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.port_status), + readw(&ha->reg->u2.isp4022.p0.port_status)); + printk(KERN_INFO "0x%02X gp_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out), + readw(&ha->reg->u2.isp4022.p0.gp_out)); + printk(KERN_INFO "0x%02X gp_in = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in), + readw(&ha->reg->u2.isp4022.p0.gp_in)); + printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n", (uint8_t) + offsetof(struct isp_reg, u2.isp4022.p0.port_err_status), + readw(&ha->reg->u2.isp4022.p0.port_err_status)); + printk(KERN_INFO "Page 1 Registers:\n"); + writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), + &ha->reg->ctrl_status); + printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n", + (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out), + readw(&ha->reg->u2.isp4022.p1.req_q_out)); + writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT), + &ha->reg->ctrl_status); + } +} diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 9dc0a6616ed..0f3bfc3da5c 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -24,6 +24,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/mutex.h> +#include <linux/aer.h> #include <net/tcp.h> #include <scsi/scsi.h> @@ -36,24 +37,6 @@ #include "ql4_dbg.h" #include "ql4_nx.h" -#if defined(CONFIG_PCIEAER) -#include <linux/aer.h> -#else -/* AER releated */ -static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev) -{ - return -EINVAL; -} -static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) -{ - return -EINVAL; -} -static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) -{ - return -EINVAL; -} -#endif - #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 #define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 #endif @@ -179,6 +162,7 @@ static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) #define IOCB_TOV_MARGIN 10 #define RELOGIN_TOV 18 #define ISNS_DEREG_TOV 5 +#define HBA_ONLINE_TOV 30 #define MAX_RESET_HA_RETRIES 2 diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 0336c6db8cb..5e757d7fff7 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -416,6 +416,8 @@ struct qla_flt_region { #define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C #define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E +#define MBOX_ASTS_TXSCVR_INSERTED 0x8130 +#define MBOX_ASTS_TXSCVR_REMOVED 0x8131 #define ISNS_EVENT_DATA_RECEIVED 0x0000 #define ISNS_EVENT_CONNECTION_OPENED 0x0001 @@ -446,6 +448,7 @@ struct addr_ctrl_blk { #define FWOPT_SESSION_MODE 0x0040 #define FWOPT_INITIATOR_MODE 0x0020 #define FWOPT_TARGET_MODE 0x0010 +#define FWOPT_ENABLE_CRBDB 0x8000 uint16_t exec_throttle; /* 04-05 */ uint8_t zio_count; /* 06 */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index 95a26fb1626..6575a47501e 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -94,6 +94,7 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha); void qla4xxx_wake_dpc(struct scsi_qla_host *ha); void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha); +void qla4xxx_dump_registers(struct scsi_qla_host *ha); void qla4_8xxx_pci_config(struct scsi_qla_host *); int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 4c9be77ee70..dc01fa3da5d 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -1207,8 +1207,8 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha) break; DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot " - "firmware to complete... ctrl_sts=0x%x\n", - ha->host_no, __func__, ctrl_status)); + "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n", + ha->host_no, __func__, ctrl_status, max_wait_time)); msleep_interruptible(250); } while (!time_after_eq(jiffies, max_wait_time)); @@ -1459,6 +1459,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, exit_init_online: set_bit(AF_ONLINE, &ha->flags); exit_init_hba: + if (is_qla8022(ha) && (status == QLA_ERROR)) { + /* Since interrupts are registered in start_firmware for + * 82xx, release them here if initialize_adapter fails */ + qla4xxx_free_irqs(ha); + } + DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, status == QLA_ERROR ? "FAILED" : "SUCCEDED")); return status; diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 4ef9ba112ee..5ae49fd8784 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -202,19 +202,11 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb, void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha) { uint32_t dbval = 0; - unsigned long wtime; dbval = 0x14 | (ha->func_num << 5); dbval = dbval | (0 << 8) | (ha->request_in << 16); - writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr); - wmb(); - wtime = jiffies + (2 * HZ); - while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval && - !time_after_eq(jiffies, wtime)) { - writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr); - wmb(); - } + qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in); } /** diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 2a1ab63f3eb..7c33fd5943d 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -72,7 +72,7 @@ qla4xxx_status_cont_entry(struct scsi_qla_host *ha, { struct srb *srb = ha->status_srb; struct scsi_cmnd *cmd; - uint8_t sense_len; + uint16_t sense_len; if (srb == NULL) return; @@ -487,6 +487,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, case MBOX_ASTS_SYSTEM_ERROR: /* Log Mailbox registers */ ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__); + qla4xxx_dump_registers(ha); + if (ql4xdontresethba) { DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n", ha->host_no, __func__)); @@ -621,6 +623,18 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, } break; + case MBOX_ASTS_TXSCVR_INSERTED: + DEBUG2(printk(KERN_WARNING + "scsi%ld: AEN %04x Transceiver" + " inserted\n", ha->host_no, mbox_sts[0])); + break; + + case MBOX_ASTS_TXSCVR_REMOVED: + DEBUG2(printk(KERN_WARNING + "scsi%ld: AEN %04x Transceiver" + " removed\n", ha->host_no, mbox_sts[0])); + break; + default: DEBUG2(printk(KERN_WARNING "scsi%ld: AEN %04x UNKNOWN\n", diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index 90021704d8c..2d2f9c879bf 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -299,6 +299,10 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, { memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); + + if (is_qla8022(ha)) + qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0); + mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; mbox_cmd[1] = 0; mbox_cmd[2] = LSDW(init_fw_cb_dma); @@ -472,6 +476,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) init_fw_cb->fw_options |= __constant_cpu_to_le16(FWOPT_SESSION_MODE | FWOPT_INITIATOR_MODE); + + if (is_qla8022(ha)) + init_fw_cb->fw_options |= + __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB); + init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) @@ -592,7 +601,7 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha) } ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n", - ha->host_no, mbox_cmd[2]); + ha->host_no, mbox_sts[2]); return QLA_SUCCESS; } diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 449256f2c5f..474b10d7136 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -839,8 +839,11 @@ qla4_8xxx_rom_lock(struct scsi_qla_host *ha) done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); if (done == 1) break; - if (timeout >= qla4_8xxx_rom_lock_timeout) + if (timeout >= qla4_8xxx_rom_lock_timeout) { + ql4_printk(KERN_WARNING, ha, + "%s: Failed to acquire rom lock", __func__); return -1; + } timeout++; @@ -1078,21 +1081,6 @@ qla4_8xxx_pinit_from_rom(struct scsi_qla_host *ha, int verbose) return 0; } -static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha) -{ - u32 val = 0; - val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ; - val &= QLA82XX_BOOT_LOADER_MN_ISSUE; - if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) { - printk("Memory DIMM SPD not programmed. Assumed valid.\n"); - return 1; - } else if (val) { - printk("Memory DIMM type incorrect. Info:%08X.\n", val); - return 2; - } - return 0; -} - static int qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start) { @@ -1377,8 +1365,6 @@ static int qla4_8xxx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val) } while (--retries); - qla4_8xxx_check_for_bad_spd(ha); - if (!retries) { pegtune_val = qla4_8xxx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); @@ -1540,14 +1526,31 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha) ql4_printk(KERN_INFO, ha, "FW: Attempting to load firmware from flash...\n"); rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw); - if (rval == QLA_SUCCESS) - return rval; - ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n"); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash" + " FAILED...\n"); + return rval; + } return rval; } +static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha) +{ + if (qla4_8xxx_rom_lock(ha)) { + /* Someone else is holding the lock. */ + dev_info(&ha->pdev->dev, "Resetting rom_lock\n"); + } + + /* + * Either we got the lock, or someone + * else died while holding it. + * In either case, unlock. + */ + qla4_8xxx_rom_unlock(ha); +} + /** * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw * @ha: pointer to adapter structure @@ -1557,11 +1560,12 @@ qla4_8xxx_try_start_fw(struct scsi_qla_host *ha) static int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) { - int rval, i, timeout; + int rval = QLA_ERROR; + int i, timeout; uint32_t old_count, count; + int need_reset = 0, peg_stuck = 1; - if (qla4_8xxx_need_reset(ha)) - goto dev_initialize; + need_reset = qla4_8xxx_need_reset(ha); old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); @@ -1570,12 +1574,30 @@ qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha) if (timeout) { qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_FAILED); - return QLA_ERROR; + return rval; } count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); if (count != old_count) + peg_stuck = 0; + } + + if (need_reset) { + /* We are trying to perform a recovery here. */ + if (peg_stuck) + qla4_8xxx_rom_lock_recovery(ha); + goto dev_initialize; + } else { + /* Start of day for this ha context. */ + if (peg_stuck) { + /* Either we are the first or recovery in progress. */ + qla4_8xxx_rom_lock_recovery(ha); + goto dev_initialize; + } else { + /* Firmware already running. */ + rval = QLA_SUCCESS; goto dev_ready; + } } dev_initialize: @@ -1601,7 +1623,7 @@ dev_ready: ql4_printk(KERN_INFO, ha, "HW State: READY\n"); qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY); - return QLA_SUCCESS; + return rval; } /** @@ -1764,20 +1786,9 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha) int retval; retval = qla4_8xxx_device_state_handler(ha); - if (retval == QLA_SUCCESS && - !test_bit(AF_INIT_DONE, &ha->flags)) { + if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags)) retval = qla4xxx_request_irqs(ha); - if (retval != QLA_SUCCESS) { - ql4_printk(KERN_WARNING, ha, - "Failed to reserve interrupt %d already in use.\n", - ha->pdev->irq); - } else { - set_bit(AF_IRQ_ATTACHED, &ha->flags); - ha->host->irq = ha->pdev->irq; - ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n", - __func__, ha->pdev->irq); - } - } + return retval; } diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h index 931ad3f1e91..ff689bf5300 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.h +++ b/drivers/scsi/qla4xxx/ql4_nx.h @@ -24,7 +24,6 @@ #define CRB_CMDPEG_STATE QLA82XX_REG(0x50) #define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) -#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) #define CRB_DMA_SHIFT QLA82XX_REG(0xcc) #define QLA82XX_HW_H0_CH_HUB_ADR 0x05 @@ -529,12 +528,12 @@ # define QLA82XX_CAM_RAM_BASE (QLA82XX_CRB_CAM + 0x02000) # define QLA82XX_CAM_RAM(reg) (QLA82XX_CAM_RAM_BASE + (reg)) -#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED 0x80000000 -#define QLA82XX_BOOT_LOADER_MN_ISSUE 0xff00ffff #define QLA82XX_PORT_MODE_ADDR (QLA82XX_CAM_RAM(0x24)) #define QLA82XX_PEG_HALT_STATUS1 (QLA82XX_CAM_RAM(0xa8)) #define QLA82XX_PEG_HALT_STATUS2 (QLA82XX_CAM_RAM(0xac)) #define QLA82XX_PEG_ALIVE_COUNTER (QLA82XX_CAM_RAM(0xb0)) +#define QLA82XX_CAM_RAM_DB1 (QLA82XX_CAM_RAM(0x1b0)) +#define QLA82XX_CAM_RAM_DB2 (QLA82XX_CAM_RAM(0x1b4)) #define HALT_STATUS_UNRECOVERABLE 0x80000000 #define HALT_STATUS_RECOVERABLE 0x40000000 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 370d40ff152..0d48fb4d104 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -79,8 +79,7 @@ static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); /* * SCSI host template entry points */ -static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, - void (*done) (struct scsi_cmnd *)); +static int qla4xxx_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int qla4xxx_eh_abort(struct scsi_cmnd *cmd); static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd); static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd); @@ -167,8 +166,6 @@ static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) "of (%d) secs exhausted, marking device DEAD.\n", ha->host_no, __func__, ddb_entry->fw_ddb_index, QL4_SESS_RECOVERY_TMO)); - - qla4xxx_wake_dpc(ha); } } @@ -466,7 +463,7 @@ void qla4xxx_srb_compl(struct kref *ref) * completion handling). Unfortunely, it sometimes calls the scheduler * in interrupt context which is a big NO! NO!. **/ -static int qla4xxx_queuecommand(struct scsi_cmnd *cmd, +static int qla4xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct scsi_qla_host *ha = to_qla_host(cmd->device->host); @@ -540,6 +537,8 @@ qc_fail_command: return 0; } +static DEF_SCSI_QCMD(qla4xxx_queuecommand) + /** * qla4xxx_mem_free - frees memory allocated to adapter * @ha: Pointer to host adapter structure. @@ -573,10 +572,6 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) if (ha->nx_pcibase) iounmap( (struct device_reg_82xx __iomem *)ha->nx_pcibase); - - if (ha->nx_db_wr_ptr) - iounmap( - (struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr); } else if (ha->reg) iounmap(ha->reg); pci_release_regions(ha->pdev); @@ -692,7 +687,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha) qla4xxx_wake_dpc(ha); qla4xxx_mailbox_premature_completion(ha); } - } + } else + ha->seconds_since_last_heartbeat = 0; + ha->fw_heartbeat_counter = fw_heartbeat_counter; } @@ -885,7 +882,13 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha) /* Find a command that hasn't completed. */ for (index = 0; index < ha->host->can_queue; index++) { cmd = scsi_host_find_tag(ha->host, index); - if (cmd != NULL) + /* + * We cannot just check if the index is valid, + * becase if we are run from the scsi eh, then + * the scsi/block layer is going to prevent + * the tag from being released. + */ + if (cmd != NULL && CMD_SP(cmd)) break; } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -937,11 +940,14 @@ int qla4xxx_soft_reset(struct scsi_qla_host *ha) { uint32_t max_wait_time; unsigned long flags = 0; - int status = QLA_ERROR; + int status; uint32_t ctrl_status; - qla4xxx_hw_reset(ha); + status = qla4xxx_hw_reset(ha); + if (status != QLA_SUCCESS) + return status; + status = QLA_ERROR; /* Wait until the Network Reset Intr bit is cleared */ max_wait_time = RESET_INTR_TOV; do { @@ -1101,7 +1107,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) ha->host_no, __func__)); status = ha->isp_ops->reset_firmware(ha); if (status == QLA_SUCCESS) { - qla4xxx_cmd_wait(ha); + if (!test_bit(AF_FW_RECOVERY, &ha->flags)) + qla4xxx_cmd_wait(ha); ha->isp_ops->disable_intrs(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); @@ -1118,7 +1125,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) * or if stop_firmware fails for ISP-82xx. * This is the default case for ISP-4xxx */ if (!is_qla8022(ha) || reset_chip) { - qla4xxx_cmd_wait(ha); + if (!test_bit(AF_FW_RECOVERY, &ha->flags)) + qla4xxx_cmd_wait(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); qla4xxx_abort_active_cmds(ha, DID_RESET << 16); DEBUG2(ql4_printk(KERN_INFO, ha, @@ -1471,24 +1479,10 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha) db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */ db_len = pci_resource_len(pdev, 4); - /* mapping of doorbell write pointer */ - ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base + - (ha->pdev->devfn << 12), 4); - if (!ha->nx_db_wr_ptr) { - printk(KERN_ERR - "cannot remap MMIO doorbell-write (%s), aborting\n", - pci_name(pdev)); - goto iospace_error_exit; - } - /* mapping of doorbell read pointer */ - ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + - (ha->pdev->devfn * 8); - if (!ha->nx_db_rd_ptr) - printk(KERN_ERR - "cannot remap MMIO doorbell-read (%s), aborting\n", - pci_name(pdev)); - return 0; + ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 : + QLA82XX_CAM_RAM_DB2); + return 0; iospace_error_exit: return -ENOMEM; } @@ -1960,13 +1954,11 @@ static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha) { unsigned long wait_online; - wait_online = jiffies + (30 * HZ); + wait_online = jiffies + (HBA_ONLINE_TOV * HZ); while (time_before(jiffies, wait_online)) { if (adapter_up(ha)) return QLA_SUCCESS; - else if (ha->retry_reset_ha_cnt == 0) - return QLA_ERROR; msleep(2000); } @@ -2021,6 +2013,7 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) unsigned int id = cmd->device->id; unsigned int lun = cmd->device->lun; unsigned long serial = cmd->serial_number; + unsigned long flags; struct srb *srb = NULL; int ret = SUCCESS; int wait = 0; @@ -2029,12 +2022,14 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd) "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n", ha->host_no, id, lun, cmd, serial); + spin_lock_irqsave(&ha->hardware_lock, flags); srb = (struct srb *) CMD_SP(cmd); - - if (!srb) + if (!srb) { + spin_unlock_irqrestore(&ha->hardware_lock, flags); return SUCCESS; - + } kref_get(&srb->srb_ref); + spin_unlock_irqrestore(&ha->hardware_lock, flags); if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) { DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n", @@ -2267,6 +2262,8 @@ qla4xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) qla4xxx_mailbox_premature_completion(ha); qla4xxx_free_irqs(ha); pci_disable_device(pdev); + /* Return back all IOs */ + qla4xxx_abort_active_cmds(ha, DID_RESET << 16); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: set_bit(AF_EEH_BUSY, &ha->flags); @@ -2290,17 +2287,13 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev) if (!is_aer_supported(ha)) return PCI_ERS_RESULT_NONE; - if (test_bit(AF_FW_RECOVERY, &ha->flags)) { - ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang -- " - "mmio_enabled\n", ha->host_no, __func__); - return PCI_ERS_RESULT_NEED_RESET; - } else - return PCI_ERS_RESULT_RECOVERED; + return PCI_ERS_RESULT_RECOVERED; } -uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) +static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) { uint32_t rval = QLA_ERROR; + uint32_t ret = 0; int fn; struct pci_dev *other_pdev = NULL; @@ -2312,7 +2305,6 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) clear_bit(AF_ONLINE, &ha->flags); qla4xxx_mark_all_devices_missing(ha); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); - qla4xxx_abort_active_cmds(ha, DID_RESET << 16); } fn = PCI_FUNC(ha->pdev->devfn); @@ -2375,7 +2367,16 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) /* Clear driver state register */ qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0); qla4_8xxx_set_drv_active(ha); - ha->isp_ops->enable_intrs(ha); + ret = qla4xxx_request_irqs(ha); + if (ret) { + ql4_printk(KERN_WARNING, ha, "Failed to " + "reserve interrupt %d already in use.\n", + ha->pdev->irq); + rval = QLA_ERROR; + } else { + ha->isp_ops->enable_intrs(ha); + rval = QLA_SUCCESS; + } } qla4_8xxx_idc_unlock(ha); } else { @@ -2387,8 +2388,18 @@ uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) clear_bit(AF_FW_RECOVERY, &ha->flags); rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST); - if (rval == QLA_SUCCESS) - ha->isp_ops->enable_intrs(ha); + if (rval == QLA_SUCCESS) { + ret = qla4xxx_request_irqs(ha); + if (ret) { + ql4_printk(KERN_WARNING, ha, "Failed to" + " reserve interrupt %d already in" + " use.\n", ha->pdev->irq); + rval = QLA_ERROR; + } else { + ha->isp_ops->enable_intrs(ha); + rval = QLA_SUCCESS; + } + } qla4_8xxx_idc_lock(ha); qla4_8xxx_set_drv_active(ha); qla4_8xxx_idc_unlock(ha); @@ -2430,12 +2441,7 @@ qla4xxx_pci_slot_reset(struct pci_dev *pdev) goto exit_slot_reset; } - ret = qla4xxx_request_irqs(ha); - if (ret) { - ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d" - " already in use.\n", pdev->irq); - goto exit_slot_reset; - } + ha->isp_ops->disable_intrs(ha); if (is_qla8022(ha)) { if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) { diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index a77b973f2cb..9bfacf4ed13 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k3" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k4" diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c index 1ad51552d6b..c3a9151ca82 100644 --- a/drivers/scsi/qlogicfas408.c +++ b/drivers/scsi/qlogicfas408.c @@ -439,7 +439,7 @@ irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id) * Queued command */ -int qlogicfas408_queuecommand(struct scsi_cmnd *cmd, +static int qlogicfas408_queuecommand_lck(struct scsi_cmnd *cmd, void (*done) (struct scsi_cmnd *)) { struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); @@ -459,6 +459,8 @@ int qlogicfas408_queuecommand(struct scsi_cmnd *cmd, return 0; } +DEF_SCSI_QCMD(qlogicfas408_queuecommand) + /* * Return bios parameters */ diff --git a/drivers/scsi/qlogicfas408.h b/drivers/scsi/qlogicfas408.h index 260626427a3..2f6c0a16620 100644 --- a/drivers/scsi/qlogicfas408.h +++ b/drivers/scsi/qlogicfas408.h @@ -103,8 +103,7 @@ struct qlogicfas408_priv { #define get_priv_by_host(x) (struct qlogicfas408_priv *)&((x)->hostdata[0]) irqreturn_t qlogicfas408_ihandl(int irq, void *dev_id); -int qlogicfas408_queuecommand(struct scsi_cmnd * cmd, - void (*done) (struct scsi_cmnd *)); +int qlogicfas408_queuecommand(struct Scsi_Host *h, struct scsi_cmnd * cmd); int qlogicfas408_biosparam(struct scsi_device * disk, struct block_device *dev, sector_t capacity, int ip[]); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index f8c561cf751..664c9572d0c 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -1003,7 +1003,7 @@ static int qlogicpti_slave_configure(struct scsi_device *sdev) * * "This code must fly." -davem */ -static int qlogicpti_queuecommand(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) +static int qlogicpti_queuecommand_lck(struct scsi_cmnd *Cmnd, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = Cmnd->device->host; struct qlogicpti *qpti = (struct qlogicpti *) host->hostdata; @@ -1052,6 +1052,8 @@ toss_command: return 1; } +static DEF_SCSI_QCMD(qlogicpti_queuecommand) + static int qlogicpti_return_status(struct Status_Entry *sts, int id) { int host_status = DID_ERROR; diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 348fba0a897..2aeb2e9c4d3 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -634,12 +634,13 @@ void scsi_log_completion(struct scsi_cmnd *cmd, int disposition) * Description: a serial number identifies a request for error recovery * and debugging purposes. Protected by the Host_Lock of host. */ -static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) +void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd *cmd) { cmd->serial_number = host->cmd_serial_number++; if (cmd->serial_number == 0) cmd->serial_number = host->cmd_serial_number++; } +EXPORT_SYMBOL(scsi_cmd_get_serial); /** * scsi_dispatch_command - Dispatch a command to the low-level driver. @@ -651,7 +652,6 @@ static inline void scsi_cmd_get_serial(struct Scsi_Host *host, struct scsi_cmnd int scsi_dispatch_cmd(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; - unsigned long flags = 0; unsigned long timeout; int rtn = 0; @@ -737,23 +737,15 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd) goto out; } - spin_lock_irqsave(host->host_lock, flags); - /* - * AK: unlikely race here: for some reason the timer could - * expire before the serial number is set up below. - * - * TODO: kill serial or move to blk layer - */ - scsi_cmd_get_serial(host, cmd); - if (unlikely(host->shost_state == SHOST_DEL)) { cmd->result = (DID_NO_CONNECT << 16); scsi_done(cmd); } else { trace_scsi_dispatch_cmd_start(cmd); - rtn = host->hostt->queuecommand(cmd, scsi_done); + cmd->scsi_done = scsi_done; + rtn = host->hostt->queuecommand(host, cmd); } - spin_unlock_irqrestore(host->host_lock, flags); + if (rtn) { trace_scsi_dispatch_cmd_error(cmd, rtn); if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 2c36bae3bd4..2f1f9b079b1 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3538,7 +3538,7 @@ static void sdebug_remove_adapter(void) } static -int scsi_debug_queuecommand(struct scsi_cmnd *SCpnt, done_funct_t done) +int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done) { unsigned char *cmd = (unsigned char *) SCpnt->cmnd; int len, k; @@ -3884,6 +3884,8 @@ write: (delay_override ? 0 : scsi_debug_delay)); } +static DEF_SCSI_QCMD(scsi_debug_queuecommand) + static struct scsi_host_template sdebug_driver_template = { .proc_info = scsi_debug_proc_info, .proc_name = sdebug_proc_name, diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 1de30eb83bb..30ac116186f 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) "changed. The Linux SCSI layer does not " "automatically adjust these parameters.\n"); - if (scmd->request->cmd_flags & REQ_HARDBARRIER) - /* - * barrier requests should always retry on UA - * otherwise block will get a spurious error - */ - return NEEDS_RETRY; - else - /* - * for normal (non barrier) commands, pass the - * UA upwards for a determination in the - * completion functions - */ - return SUCCESS; + /* + * Pass the UA upwards for a determination in the completion + * functions. + */ + return SUCCESS; /* these three are not supported */ case COPY_ABORTED: @@ -623,7 +615,7 @@ static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) return rtn; } -static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) +static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) { if (!scmd->device->host->hostt->eh_abort_handler) return FAILED; @@ -631,31 +623,9 @@ static int __scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) return scmd->device->host->hostt->eh_abort_handler(scmd); } -/** - * scsi_try_to_abort_cmd - Ask host to abort a running command. - * @scmd: SCSI cmd to abort from Lower Level. - * - * Notes: - * This function will not return until the user's completion function - * has been called. there is no timeout on this operation. if the - * author of the low-level driver wishes this operation to be timed, - * they can provide this facility themselves. helper functions in - * scsi_error.c can be supplied to make this easier to do. - */ -static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) -{ - /* - * scsi_done was called just after the command timed out and before - * we had a chance to process it. (db) - */ - if (scmd->serial_number == 0) - return SUCCESS; - return __scsi_try_to_abort_cmd(scmd); -} - static void scsi_abort_eh_cmnd(struct scsi_cmnd *scmd) { - if (__scsi_try_to_abort_cmd(scmd) != SUCCESS) + if (scsi_try_to_abort_cmd(scmd) != SUCCESS) if (scsi_try_bus_device_reset(scmd) != SUCCESS) if (scsi_try_target_reset(scmd) != SUCCESS) if (scsi_try_bus_reset(scmd) != SUCCESS) @@ -781,17 +751,15 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, struct Scsi_Host *shost = sdev->host; DECLARE_COMPLETION_ONSTACK(done); unsigned long timeleft; - unsigned long flags; struct scsi_eh_save ses; int rtn; scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); shost->eh_action = &done; - spin_lock_irqsave(shost->host_lock, flags); scsi_log_send(scmd); - shost->hostt->queuecommand(scmd, scsi_eh_done); - spin_unlock_irqrestore(shost->host_lock, flags); + scmd->scsi_done = scsi_eh_done; + shost->hostt->queuecommand(shost, scmd); timeleft = wait_for_completion_timeout(&done, timeout); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8041fe1ab17..4a3842212c5 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1403,11 +1403,6 @@ static void scsi_softirq_done(struct request *rq) INIT_LIST_HEAD(&cmd->eh_entry); - /* - * Set the serial numbers back to zero - */ - cmd->serial_number = 0; - atomic_inc(&cmd->device->iodone_cnt); if (cmd->result) atomic_inc(&cmd->device->ioerr_cnt); @@ -1642,9 +1637,8 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); - /* New queue, no concurrency on queue_flags */ if (!shost->use_clustering) - queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); + q->limits.cluster = 0; /* * set a reasonable default alignment on word boundaries: the @@ -2438,7 +2432,8 @@ scsi_internal_device_unblock(struct scsi_device *sdev) sdev->sdev_state = SDEV_RUNNING; else if (sdev->sdev_state == SDEV_CREATED_BLOCK) sdev->sdev_state = SDEV_CREATED; - else + else if (sdev->sdev_state != SDEV_CANCEL && + sdev->sdev_state != SDEV_OFFLINE) return -EINVAL; spin_lock_irqsave(q->queue_lock, flags); diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 20ad59dff73..76ee2e784f7 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -964,10 +964,11 @@ static void __scsi_remove_target(struct scsi_target *starget) list_for_each_entry(sdev, &shost->__devices, siblings) { if (sdev->channel != starget->channel || sdev->id != starget->id || - sdev->sdev_state == SDEV_DEL) + scsi_device_get(sdev)) continue; spin_unlock_irqrestore(shost->host_lock, flags); scsi_remove_device(sdev); + scsi_device_put(sdev); spin_lock_irqsave(shost->host_lock, flags); goto restart; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 57d1e3e1bd4..956496182c8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -46,7 +46,6 @@ #include <linux/blkdev.h> #include <linux/blkpg.h> #include <linux/delay.h> -#include <linux/smp_lock.h> #include <linux/mutex.h> #include <linux/string_helpers.h> #include <linux/async.h> @@ -259,6 +258,28 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr, } static ssize_t +sd_show_protection_mode(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_disk *sdkp = to_scsi_disk(dev); + struct scsi_device *sdp = sdkp->device; + unsigned int dif, dix; + + dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); + dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type); + + if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) { + dif = 0; + dix = 1; + } + + if (!dif && !dix) + return snprintf(buf, 20, "none\n"); + + return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif); +} + +static ssize_t sd_show_app_tag_own(struct device *dev, struct device_attribute *attr, char *buf) { @@ -285,6 +306,7 @@ static struct device_attribute sd_disk_attrs[] = { __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop, sd_store_manage_start_stop), __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL), + __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL), __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL), __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL), __ATTR_NULL, diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index cbb38c5197f..3cd8ffbad57 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -325,6 +325,15 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot) } /* + * SK/ASC/ASCQ of 2/4/2 means "initialization required" + * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close + * the tray, which resolves the initialization requirement. + */ + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY + && sshdr.asc == 0x04 && sshdr.ascq == 0x02) + return CDS_TRAY_OPEN; + + /* * 0x04 is format in progress .. but there must be a disc present! */ if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04) diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 9c73dbda3bb..606215e54b8 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c @@ -572,7 +572,7 @@ stex_slave_destroy(struct scsi_device *sdev) } static int -stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) +stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct st_hba *hba; struct Scsi_Host *host; @@ -698,6 +698,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(stex_queuecommand) + static void stex_scsi_done(struct st_ccb *ccb) { struct scsi_cmnd *cmd = ccb->cmd; diff --git a/drivers/scsi/sun3_NCR5380.c b/drivers/scsi/sun3_NCR5380.c index 713620ed70d..4f0e5485ffd 100644 --- a/drivers/scsi/sun3_NCR5380.c +++ b/drivers/scsi/sun3_NCR5380.c @@ -908,7 +908,7 @@ static int NCR5380_init (struct Scsi_Host *instance, int flags) */ /* Only make static if a wrapper function is used */ -static int NCR5380_queue_command(struct scsi_cmnd *cmd, +static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { SETUP_HOSTDATA(cmd->device->host); @@ -1019,6 +1019,8 @@ static int NCR5380_queue_command(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(NCR5380_queue_command) + /* * Function : NCR5380_main (void) * diff --git a/drivers/scsi/sun3_scsi.h b/drivers/scsi/sun3_scsi.h index b29a9d661ca..bcefd8458e6 100644 --- a/drivers/scsi/sun3_scsi.h +++ b/drivers/scsi/sun3_scsi.h @@ -51,8 +51,7 @@ static int sun3scsi_abort(struct scsi_cmnd *); static int sun3scsi_detect (struct scsi_host_template *); static const char *sun3scsi_info (struct Scsi_Host *); static int sun3scsi_bus_reset(struct scsi_cmnd *); -static int sun3scsi_queue_command(struct scsi_cmnd *, - void (*done)(struct scsi_cmnd *)); +static int sun3scsi_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static int sun3scsi_release (struct Scsi_Host *); #ifndef CMD_PER_LUN diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c index e5c369bb568..190107ae120 100644 --- a/drivers/scsi/sym53c416.c +++ b/drivers/scsi/sym53c416.c @@ -734,7 +734,7 @@ const char *sym53c416_info(struct Scsi_Host *SChost) return info; } -int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) +static int sym53c416_queuecommand_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) { int base; unsigned long flags = 0; @@ -761,6 +761,8 @@ int sym53c416_queuecommand(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *)) return 0; } +DEF_SCSI_QCMD(sym53c416_queuecommand) + static int sym53c416_host_reset(Scsi_Cmnd *SCpnt) { int base; diff --git a/drivers/scsi/sym53c416.h b/drivers/scsi/sym53c416.h index 77860d0748f..387de5d80a7 100644 --- a/drivers/scsi/sym53c416.h +++ b/drivers/scsi/sym53c416.h @@ -25,7 +25,7 @@ static int sym53c416_detect(struct scsi_host_template *); static const char *sym53c416_info(struct Scsi_Host *); static int sym53c416_release(struct Scsi_Host *); -static int sym53c416_queuecommand(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *)); +static int sym53c416_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static int sym53c416_host_reset(Scsi_Cmnd *); static int sym53c416_bios_param(struct scsi_device *, struct block_device *, sector_t, int *); diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index 8b955b534a3..6b97ded9d45 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -505,7 +505,7 @@ void sym_log_bus_error(struct Scsi_Host *shost) * queuecommand method. Entered with the host adapter lock held and * interrupts disabled. */ -static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, +static int sym53c8xx_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct sym_hcb *np = SYM_SOFTC_PTR(cmd); @@ -536,6 +536,8 @@ static int sym53c8xx_queue_command(struct scsi_cmnd *cmd, return 0; } +static DEF_SCSI_QCMD(sym53c8xx_queue_command) + /* * Linux entry point of the interrupt handler. */ diff --git a/drivers/scsi/t128.h b/drivers/scsi/t128.h index 76a069b7ac0..ada1115079c 100644 --- a/drivers/scsi/t128.h +++ b/drivers/scsi/t128.h @@ -96,8 +96,7 @@ static int t128_abort(struct scsi_cmnd *); static int t128_biosparam(struct scsi_device *, struct block_device *, sector_t, int*); static int t128_detect(struct scsi_host_template *); -static int t128_queue_command(struct scsi_cmnd *, - void (*done)(struct scsi_cmnd *)); +static int t128_queue_command(struct Scsi_Host *, struct scsi_cmnd *); static int t128_bus_reset(struct scsi_cmnd *); #ifndef CMD_PER_LUN diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c index 27866b0adfe..a124a28f2cc 100644 --- a/drivers/scsi/tmscsim.c +++ b/drivers/scsi/tmscsim.c @@ -1883,7 +1883,7 @@ dc390_ScsiRstDetect( struct dc390_acb* pACB ) return; } -static int DC390_queuecommand(struct scsi_cmnd *cmd, +static int DC390_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct scsi_device *sdev = cmd->device; @@ -1944,6 +1944,8 @@ static int DC390_queuecommand(struct scsi_cmnd *cmd, return SCSI_MLQUEUE_DEVICE_BUSY; } +static DEF_SCSI_QCMD(DC390_queuecommand) + static void dc390_dumpinfo (struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb* pSRB) { struct pci_dev *pdev; diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c index 5d9fdeeb231..edfc5da8be4 100644 --- a/drivers/scsi/u14-34f.c +++ b/drivers/scsi/u14-34f.c @@ -433,7 +433,7 @@ static int u14_34f_detect(struct scsi_host_template *); static int u14_34f_release(struct Scsi_Host *); -static int u14_34f_queuecommand(struct scsi_cmnd *, void (*done)(struct scsi_cmnd *)); +static int u14_34f_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static int u14_34f_eh_abort(struct scsi_cmnd *); static int u14_34f_eh_host_reset(struct scsi_cmnd *); static int u14_34f_bios_param(struct scsi_device *, struct block_device *, @@ -1248,7 +1248,7 @@ static void scsi_to_dev_dir(unsigned int i, unsigned int j) { } -static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { +static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { unsigned int i, j, k; struct mscp *cpp; @@ -1329,6 +1329,8 @@ static int u14_34f_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scs return 0; } +static DEF_SCSI_QCMD(u14_34f_queuecommand) + static int u14_34f_eh_abort(struct scsi_cmnd *SCarg) { unsigned int i, j; diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c index 27aa40f3980..0571ef9639c 100644 --- a/drivers/scsi/ultrastor.c +++ b/drivers/scsi/ultrastor.c @@ -700,7 +700,7 @@ static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt) mscp->transfer_data_length = transfer_length; } -static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt, +static int ultrastor_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done) (struct scsi_cmnd *)) { struct mscp *my_mscp; @@ -825,6 +825,8 @@ retry: return 0; } +static DEF_SCSI_QCMD(ultrastor_queuecommand) + /* This code must deal with 2 cases: 1. The command has not been written to the OGM. In this case, set diff --git a/drivers/scsi/ultrastor.h b/drivers/scsi/ultrastor.h index a692905f95f..165c18b5cf5 100644 --- a/drivers/scsi/ultrastor.h +++ b/drivers/scsi/ultrastor.h @@ -15,8 +15,7 @@ static int ultrastor_detect(struct scsi_host_template *); static const char *ultrastor_info(struct Scsi_Host *shpnt); -static int ultrastor_queuecommand(struct scsi_cmnd *, - void (*done)(struct scsi_cmnd *)); +static int ultrastor_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); static int ultrastor_abort(struct scsi_cmnd *); static int ultrastor_host_reset(struct scsi_cmnd *); static int ultrastor_biosparam(struct scsi_device *, struct block_device *, diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 26894459c37..a18996d2446 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -690,7 +690,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, return 0; } -static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); @@ -719,6 +719,8 @@ static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(pvscsi_queue) + static int pvscsi_abort(struct scsi_cmnd *cmd) { struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index b701bf2cc18..5f697e0bd00 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -371,8 +371,8 @@ calc_sync_msg(unsigned int period, unsigned int offset, unsigned int fast, msg[1] = offset; } -int -wd33c93_queuecommand(struct scsi_cmnd *cmd, +static int +wd33c93_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct WD33C93_hostdata *hostdata; @@ -468,6 +468,8 @@ wd33c93_queuecommand(struct scsi_cmnd *cmd, return 0; } +DEF_SCSI_QCMD(wd33c93_queuecommand) + /* * This routine attempts to start a scsi command. If the host_card is * already connected, we give up immediately. Otherwise, look through diff --git a/drivers/scsi/wd33c93.h b/drivers/scsi/wd33c93.h index 1ed5f3bf388..3b463d7304d 100644 --- a/drivers/scsi/wd33c93.h +++ b/drivers/scsi/wd33c93.h @@ -343,8 +343,7 @@ struct WD33C93_hostdata { void wd33c93_init (struct Scsi_Host *instance, const wd33c93_regs regs, dma_setup_t setup, dma_stop_t stop, int clock_freq); int wd33c93_abort (struct scsi_cmnd *cmd); -int wd33c93_queuecommand (struct scsi_cmnd *cmd, - void (*done)(struct scsi_cmnd *)); +int wd33c93_queuecommand (struct Scsi_Host *h, struct scsi_cmnd *cmd); void wd33c93_intr (struct Scsi_Host *instance); int wd33c93_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); int wd33c93_host_reset (struct scsi_cmnd *); diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c index 333580bf37c..db451ae0a36 100644 --- a/drivers/scsi/wd7000.c +++ b/drivers/scsi/wd7000.c @@ -1082,7 +1082,7 @@ static irqreturn_t wd7000_intr(int irq, void *dev_id) return IRQ_HANDLED; } -static int wd7000_queuecommand(struct scsi_cmnd *SCpnt, +static int wd7000_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) { Scb *scb; @@ -1139,6 +1139,8 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt, return 0; } +static DEF_SCSI_QCMD(wd7000_queuecommand) + static int wd7000_diagnostics(Adapter * host, int code) { static IcbDiag icb = { ICB_OP_DIAGNOSTICS }; diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 4d8e14b7aa9..09a550860dc 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -2872,7 +2872,7 @@ static struct console serial8250_console = { .device = uart_console_device, .setup = serial8250_console_setup, .early_setup = serial8250_console_early_setup, - .flags = CON_PRINTBUFFER, + .flags = CON_PRINTBUFFER | CON_ANYTIME, .index = -1, .data = &serial8250_reg, }; diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 53be4d35a0a..842e3b2a02b 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -2285,6 +2285,8 @@ static struct pciserial_board pci_boards[] __devinitdata = { static const struct pci_device_id softmodem_blacklist[] = { { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ + { PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */ + { PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */ }; /* @@ -2863,6 +2865,9 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 0, 0, pbn_b0_4_1152000 }, + { PCI_VENDOR_ID_OXSEMI, 0x9505, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + pbn_b0_bt_2_921600 }, /* * The below card is a little controversial since it is the diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index a9eff2b18ea..19cac9f610f 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c @@ -23,6 +23,7 @@ #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> +#include <linux/dma-mapping.h> #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) @@ -33,12 +34,10 @@ #include <asm/gpio.h> #include <mach/bfin_serial_5xx.h> -#ifdef CONFIG_SERIAL_BFIN_DMA -#include <linux/dma-mapping.h> +#include <asm/dma.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/cacheflush.h> -#endif #ifdef CONFIG_SERIAL_BFIN_MODULE # undef CONFIG_EARLY_PRINTK @@ -360,7 +359,6 @@ static void bfin_serial_tx_chars(struct bfin_serial_port *uart) UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); uart->port.icount.tx++; - SSYNC(); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) @@ -688,6 +686,13 @@ static int bfin_serial_startup(struct uart_port *port) # ifdef CONFIG_BF54x { + /* + * UART2 and UART3 on BF548 share interrupt PINs and DMA + * controllers with SPORT2 and SPORT3. UART rx and tx + * interrupts are generated in PIO mode only when configure + * their peripheral mapping registers properly, which means + * request corresponding DMA channels in PIO mode as well. + */ unsigned uart_dma_ch_rx, uart_dma_ch_tx; switch (uart->port.irq) { @@ -734,8 +739,7 @@ static int bfin_serial_startup(struct uart_port *port) IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_DISABLED, "BFIN_UART_CTS", uart)) { uart->cts_pin = -1; - pr_info("Unable to attach BlackFin UART CTS interrupt.\ - So, disable it.\n"); + pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n"); } } if (uart->rts_pin >= 0) { @@ -747,8 +751,7 @@ static int bfin_serial_startup(struct uart_port *port) if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int, IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { - pr_info("Unable to attach BlackFin UART Modem \ - Status interrupt.\n"); + pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n"); } /* CTS RTS PINs are negative assertive. */ @@ -846,6 +849,8 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, if (termios->c_cflag & CMSPAR) lcr |= STP; + spin_lock_irqsave(&uart->port.lock, flags); + port->read_status_mask = OE; if (termios->c_iflag & INPCK) port->read_status_mask |= (FE | PE); @@ -875,8 +880,6 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, if (termios->c_line != N_IRDA) quot -= ANOMALY_05000230; - spin_lock_irqsave(&uart->port.lock, flags); - UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); /* Disable UART */ @@ -1321,6 +1324,14 @@ struct console __init *bfin_earlyserial_init(unsigned int port, struct bfin_serial_port *uart; struct ktermios t; +#ifdef CONFIG_SERIAL_BFIN_CONSOLE + /* + * If we are using early serial, don't let the normal console rewind + * log buffer, since that causes things to be printed multiple times + */ + bfin_serial_console.flags &= ~CON_PRINTBUFFER; +#endif + if (port == -1 || port >= nr_active_ports) port = 0; bfin_serial_init_ports(); diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index c856905bb3b..bcc31f2140a 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c @@ -18,7 +18,6 @@ static char *serial_version = "$Revision: 1.25 $"; #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/major.h> -#include <linux/smp_lock.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/mm.h> @@ -1411,11 +1410,12 @@ e100_enable_rs485(struct tty_struct *tty, struct serial_rs485 *r) CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1); #endif - info->rs485.flags = r->flags; - if (r->delay_rts_before_send >= 1000) + info->rs485 = *r; + + /* Maximum delay before RTS equal to 1000 */ + if (info->rs485.delay_rts_before_send >= 1000) info->rs485.delay_rts_before_send = 1000; - else - info->rs485.delay_rts_before_send = r->delay_rts_before_send; + /* printk("rts: on send = %i, after = %i, enabled = %i", info->rs485.rts_on_send, info->rs485.rts_after_sent, @@ -3234,9 +3234,9 @@ rs_write(struct tty_struct *tty, e100_disable_rx(info); e100_enable_rx_irq(info); #endif - - if (info->rs485.delay_rts_before_send > 0) - msleep(info->rs485.delay_rts_before_send); + if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) && + (info->rs485.delay_rts_before_send > 0)) + msleep(info->rs485.delay_rts_before_send); } #endif /* CONFIG_ETRAX_RS485 */ @@ -3694,6 +3694,11 @@ rs_ioctl(struct tty_struct *tty, struct file * file, rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send; rs485data.flags = 0; + if (rs485data.delay_rts_before_send != 0) + rs485data.flags |= SER_RS485_RTS_BEFORE_SEND; + else + rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND); + if (rs485ctrl.enabled) rs485data.flags |= SER_RS485_ENABLED; else @@ -3731,7 +3736,7 @@ rs_ioctl(struct tty_struct *tty, struct file * file, /* This is the ioctl to get RS485 data from user-space */ if (copy_to_user((struct serial_rs485 *) arg, rs485data, - sizeof(serial_rs485))) + sizeof(struct serial_rs485))) return -EFAULT; break; } @@ -4527,6 +4532,7 @@ static int __init rs_init(void) /* Set sane defaults */ info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND); info->rs485.flags |= SER_RS485_RTS_AFTER_SEND; + info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND); info->rs485.delay_rts_before_send = 0; info->rs485.flags &= ~(SER_RS485_ENABLED); #endif diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c index d4b711c9a41..25a8bc565f4 100644 --- a/drivers/serial/kgdboc.c +++ b/drivers/serial/kgdboc.c @@ -18,6 +18,7 @@ #include <linux/tty.h> #include <linux/console.h> #include <linux/vt_kern.h> +#include <linux/input.h> #define MAX_CONFIG_LEN 40 @@ -37,6 +38,62 @@ static struct tty_driver *kgdb_tty_driver; static int kgdb_tty_line; #ifdef CONFIG_KDB_KEYBOARD +static int kgdboc_reset_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + input_reset_device(dev); + + /* Retrun an error - we do not want to bind, just to reset */ + return -ENODEV; +} + +static void kgdboc_reset_disconnect(struct input_handle *handle) +{ + /* We do not expect anyone to actually bind to us */ + BUG(); +} + +static const struct input_device_id kgdboc_reset_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_KEY) }, + }, + { } +}; + +static struct input_handler kgdboc_reset_handler = { + .connect = kgdboc_reset_connect, + .disconnect = kgdboc_reset_disconnect, + .name = "kgdboc_reset", + .id_table = kgdboc_reset_ids, +}; + +static DEFINE_MUTEX(kgdboc_reset_mutex); + +static void kgdboc_restore_input_helper(struct work_struct *dummy) +{ + /* + * We need to take a mutex to prevent several instances of + * this work running on different CPUs so they don't try + * to register again already registered handler. + */ + mutex_lock(&kgdboc_reset_mutex); + + if (input_register_handler(&kgdboc_reset_handler) == 0) + input_unregister_handler(&kgdboc_reset_handler); + + mutex_unlock(&kgdboc_reset_mutex); +} + +static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper); + +static void kgdboc_restore_input(void) +{ + if (likely(system_state == SYSTEM_RUNNING)) + schedule_work(&kgdboc_restore_input_work); +} + static int kgdboc_register_kbd(char **cptr) { if (strncmp(*cptr, "kbd", 3) == 0) { @@ -64,10 +121,12 @@ static void kgdboc_unregister_kbd(void) i--; } } + flush_work_sync(&kgdboc_restore_input_work); } #else /* ! CONFIG_KDB_KEYBOARD */ #define kgdboc_register_kbd(x) 0 #define kgdboc_unregister_kbd() +#define kgdboc_restore_input() #endif /* ! CONFIG_KDB_KEYBOARD */ static int kgdboc_option_setup(char *opt) @@ -231,6 +290,7 @@ static void kgdboc_post_exp_handler(void) dbg_restore_graphics = 0; con_debug_leave(); } + kgdboc_restore_input(); } static struct kgdb_io kgdboc_io_ops = { diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c index 5fc699e929d..d40010a22ec 100644 --- a/drivers/serial/mfd.c +++ b/drivers/serial/mfd.c @@ -900,8 +900,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, unsigned char cval, fcr = 0; unsigned long flags; unsigned int baud, quot; - u32 mul = 0x3600; - u32 ps = 0x10; + u32 ps, mul; switch (termios->c_cflag & CSIZE) { case CS5: @@ -943,31 +942,24 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios, baud = uart_get_baud_rate(port, termios, old, 0, 4000000); quot = 1; + ps = 0x10; + mul = 0x3600; switch (baud) { case 3500000: mul = 0x3345; ps = 0xC; break; - case 3000000: - mul = 0x2EE0; - break; - case 2500000: - mul = 0x2710; - break; - case 2000000: - mul = 0x1F40; - break; case 1843200: mul = 0x2400; break; + case 3000000: + case 2500000: + case 2000000: case 1500000: - mul = 0x1770; - break; case 1000000: - mul = 0xFA0; - break; case 500000: - mul = 0x7D0; + /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */ + mul = baud / 500000 * 0x9C4; break; default: /* Use uart_get_divisor to get quot for other baud rates */ diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index c4ea14670d4..9ffa5bee44a 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c @@ -29,7 +29,6 @@ #include <linux/console.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> #include <linux/device.h> #include <linux/serial.h> /* for serial_state and serial_icounter_struct */ #include <linux/serial_core.h> diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c index fd0d1b98901..3f5e387ed56 100644 --- a/drivers/sh/clk/core.c +++ b/drivers/sh/clk/core.c @@ -90,8 +90,8 @@ struct clk_rate_round_data { static long clk_rate_round_helper(struct clk_rate_round_data *rounder) { unsigned long rate_error, rate_error_prev = ~0UL; - unsigned long rate_best_fit = rounder->rate; unsigned long highest, lowest, freq; + long rate_best_fit = -ENOENT; int i; highest = 0; @@ -146,7 +146,7 @@ long clk_rate_table_round(struct clk *clk, }; if (clk->nr_freqs < 1) - return 0; + return -ENOSYS; return clk_rate_round_helper(&table_round); } @@ -418,8 +418,11 @@ int clk_register(struct clk *clk) list_add(&clk->sibling, &root_clks); list_add(&clk->node, &clock_list); + +#ifdef CONFIG_SH_CLK_CPG_LEGACY if (clk->ops && clk->ops->init) clk->ops->init(clk); +#endif out_unlock: mutex_unlock(&clock_list_sem); @@ -455,19 +458,13 @@ EXPORT_SYMBOL_GPL(clk_get_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { - return clk_set_rate_ex(clk, rate, 0); -} -EXPORT_SYMBOL_GPL(clk_set_rate); - -int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id) -{ int ret = -EOPNOTSUPP; unsigned long flags; spin_lock_irqsave(&clock_lock, flags); if (likely(clk->ops && clk->ops->set_rate)) { - ret = clk->ops->set_rate(clk, rate, algo_id); + ret = clk->ops->set_rate(clk, rate); if (ret != 0) goto out_unlock; } else { @@ -485,7 +482,7 @@ out_unlock: return ret; } -EXPORT_SYMBOL_GPL(clk_set_rate_ex); +EXPORT_SYMBOL_GPL(clk_set_rate); int clk_set_parent(struct clk *clk, struct clk *parent) { @@ -541,6 +538,98 @@ long clk_round_rate(struct clk *clk, unsigned long rate) } EXPORT_SYMBOL_GPL(clk_round_rate); +long clk_round_parent(struct clk *clk, unsigned long target, + unsigned long *best_freq, unsigned long *parent_freq, + unsigned int div_min, unsigned int div_max) +{ + struct cpufreq_frequency_table *freq, *best = NULL; + unsigned long error = ULONG_MAX, freq_high, freq_low, div; + struct clk *parent = clk_get_parent(clk); + + if (!parent) { + *parent_freq = 0; + *best_freq = clk_round_rate(clk, target); + return abs(target - *best_freq); + } + + for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END; + freq++) { + if (freq->frequency == CPUFREQ_ENTRY_INVALID) + continue; + + if (unlikely(freq->frequency / target <= div_min - 1)) { + unsigned long freq_max; + + freq_max = (freq->frequency + div_min / 2) / div_min; + if (error > target - freq_max) { + error = target - freq_max; + best = freq; + if (best_freq) + *best_freq = freq_max; + } + + pr_debug("too low freq %u, error %lu\n", freq->frequency, + target - freq_max); + + if (!error) + break; + + continue; + } + + if (unlikely(freq->frequency / target >= div_max)) { + unsigned long freq_min; + + freq_min = (freq->frequency + div_max / 2) / div_max; + if (error > freq_min - target) { + error = freq_min - target; + best = freq; + if (best_freq) + *best_freq = freq_min; + } + + pr_debug("too high freq %u, error %lu\n", freq->frequency, + freq_min - target); + + if (!error) + break; + + continue; + } + + div = freq->frequency / target; + freq_high = freq->frequency / div; + freq_low = freq->frequency / (div + 1); + + if (freq_high - target < error) { + error = freq_high - target; + best = freq; + if (best_freq) + *best_freq = freq_high; + } + + if (target - freq_low < error) { + error = target - freq_low; + best = freq; + if (best_freq) + *best_freq = freq_low; + } + + pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n", + freq->frequency, div, freq_high, div + 1, freq_low, + *best_freq, best->frequency); + + if (!error) + break; + } + + if (parent_freq) + *parent_freq = best->frequency; + + return error; +} +EXPORT_SYMBOL_GPL(clk_round_parent); + #ifdef CONFIG_PM static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) { @@ -561,8 +650,7 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state) clkp->ops->set_parent(clkp, clkp->parent); if (likely(clkp->ops->set_rate)) - clkp->ops->set_rate(clkp, - rate, NO_CHANGE); + clkp->ops->set_rate(clkp, rate); else if (likely(clkp->ops->recalc)) clkp->rate = clkp->ops->recalc(clkp); } diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index 3aea5f0ceb0..6172335ae32 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -110,8 +110,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) return 0; } -static int sh_clk_div6_set_rate(struct clk *clk, - unsigned long rate, int algo_id) +static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate) { unsigned long value; int idx; @@ -132,7 +131,7 @@ static int sh_clk_div6_enable(struct clk *clk) unsigned long value; int ret; - ret = sh_clk_div6_set_rate(clk, clk->rate, 0); + ret = sh_clk_div6_set_rate(clk, clk->rate); if (ret == 0) { value = __raw_readl(clk->enable_reg); value &= ~0x100; /* clear stop bit to enable clock */ @@ -253,7 +252,7 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent) return 0; } -static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) +static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate) { struct clk_div4_table *d4t = clk->priv; unsigned long value; diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 873a99ff8f6..e5e9e6735f7 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -79,7 +79,7 @@ static void __init intc_register_irq(struct intc_desc *desc, * Register the IRQ position with the global IRQ map, then insert * it in to the radix tree. */ - irq_reserve_irqs(irq, 1); + irq_reserve_irq(irq); raw_spin_lock_irqsave(&intc_big_lock, flags); radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq)); diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c index 4187cce20ff..a3677c9dfe3 100644 --- a/drivers/sh/intc/dynamic.c +++ b/drivers/sh/intc/dynamic.c @@ -60,5 +60,5 @@ void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs) int i; for (i = 0; i < nr_vecs; i++) - irq_reserve_irqs(evt2irq(vectors[i].vect), 1); + irq_reserve_irq(evt2irq(vectors[i].vect)); } diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index e5bf5d3c698..4e0ff718116 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c @@ -215,7 +215,7 @@ restart: entry = radix_tree_deref_slot((void **)entries[i]); if (unlikely(!entry)) continue; - if (unlikely(entry == RADIX_TREE_RETRY)) + if (radix_tree_deref_retry(entry)) goto restart; irq = create_irq(); diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index 154529aacc0..a067046c9da 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c @@ -352,8 +352,12 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; if (xfer->tx_buf) { + /* tx_buf is a const void* where we need a void * for the dma + * mapping */ + void *nonconst_tx = (void *)xfer->tx_buf; + xfer->tx_dma = dma_map_single(dev, - (void *) xfer->tx_buf, xfer->len, + nonconst_tx, xfer->len, DMA_TO_DEVICE); if (dma_mapping_error(dev, xfer->tx_dma)) return -ENOMEM; diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 90439314cf6..0838c79861e 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c @@ -413,6 +413,11 @@ static void poll_transfer(struct dw_spi *dws) { while (dws->write(dws)) dws->read(dws); + /* + * There is a possibility that the last word of a transaction + * will be lost if data is not ready. Re-read to solve this issue. + */ + dws->read(dws); transfer_complete(dws); } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b5a78a1f442..709c836607d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -29,11 +29,6 @@ #include <linux/spi/spi.h> #include <linux/of_spi.h> - -/* SPI bustype and spi_master class are registered after board init code - * provides the SPI device tables, ensuring that both are present by the - * time controller driver registration causes spi_devices to "enumerate". - */ static void spidev_release(struct device *dev) { struct spi_device *spi = to_spi_device(dev); @@ -202,11 +197,16 @@ EXPORT_SYMBOL_GPL(spi_register_driver); struct boardinfo { struct list_head list; - unsigned n_board_info; - struct spi_board_info board_info[0]; + struct spi_board_info board_info; }; static LIST_HEAD(board_list); +static LIST_HEAD(spi_master_list); + +/* + * Used to protect add/del opertion for board_info list and + * spi_master list, and their matching process + */ static DEFINE_MUTEX(board_lock); /** @@ -300,16 +300,16 @@ int spi_add_device(struct spi_device *spi) */ status = spi_setup(spi); if (status < 0) { - dev_err(dev, "can't %s %s, status %d\n", - "setup", dev_name(&spi->dev), status); + dev_err(dev, "can't setup %s, status %d\n", + dev_name(&spi->dev), status); goto done; } /* Device may be bound to an active driver when this returns */ status = device_add(&spi->dev); if (status < 0) - dev_err(dev, "can't %s %s, status %d\n", - "add", dev_name(&spi->dev), status); + dev_err(dev, "can't add %s, status %d\n", + dev_name(&spi->dev), status); else dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev)); @@ -371,6 +371,20 @@ struct spi_device *spi_new_device(struct spi_master *master, } EXPORT_SYMBOL_GPL(spi_new_device); +static void spi_match_master_to_boardinfo(struct spi_master *master, + struct spi_board_info *bi) +{ + struct spi_device *dev; + + if (master->bus_num != bi->bus_num) + return; + + dev = spi_new_device(master, bi); + if (!dev) + dev_err(master->dev.parent, "can't create new device for %s\n", + bi->modalias); +} + /** * spi_register_board_info - register SPI devices for a given board * @info: array of chip descriptors @@ -393,43 +407,25 @@ EXPORT_SYMBOL_GPL(spi_new_device); int __init spi_register_board_info(struct spi_board_info const *info, unsigned n) { - struct boardinfo *bi; + struct boardinfo *bi; + int i; - bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL); + bi = kzalloc(n * sizeof(*bi), GFP_KERNEL); if (!bi) return -ENOMEM; - bi->n_board_info = n; - memcpy(bi->board_info, info, n * sizeof *info); - mutex_lock(&board_lock); - list_add_tail(&bi->list, &board_list); - mutex_unlock(&board_lock); - return 0; -} + for (i = 0; i < n; i++, bi++, info++) { + struct spi_master *master; -/* FIXME someone should add support for a __setup("spi", ...) that - * creates board info from kernel command lines - */ - -static void scan_boardinfo(struct spi_master *master) -{ - struct boardinfo *bi; - - mutex_lock(&board_lock); - list_for_each_entry(bi, &board_list, list) { - struct spi_board_info *chip = bi->board_info; - unsigned n; - - for (n = bi->n_board_info; n > 0; n--, chip++) { - if (chip->bus_num != master->bus_num) - continue; - /* NOTE: this relies on spi_new_device to - * issue diagnostics when given bogus inputs - */ - (void) spi_new_device(master, chip); - } + memcpy(&bi->board_info, info, sizeof(*info)); + mutex_lock(&board_lock); + list_add_tail(&bi->list, &board_list); + list_for_each_entry(master, &spi_master_list, list) + spi_match_master_to_boardinfo(master, &bi->board_info); + mutex_unlock(&board_lock); } - mutex_unlock(&board_lock); + + return 0; } /*-------------------------------------------------------------------------*/ @@ -512,6 +508,7 @@ int spi_register_master(struct spi_master *master) { static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); struct device *dev = master->dev.parent; + struct boardinfo *bi; int status = -ENODEV; int dynamic = 0; @@ -547,8 +544,12 @@ int spi_register_master(struct spi_master *master) dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev), dynamic ? " (dynamic)" : ""); - /* populate children from any spi device tables */ - scan_boardinfo(master); + mutex_lock(&board_lock); + list_add_tail(&master->list, &spi_master_list); + list_for_each_entry(bi, &board_list, list) + spi_match_master_to_boardinfo(master, &bi->board_info); + mutex_unlock(&board_lock); + status = 0; /* Register devices from the device tree */ @@ -579,7 +580,12 @@ void spi_unregister_master(struct spi_master *master) { int dummy; - dummy = device_for_each_child(&master->dev, NULL, __unregister); + mutex_lock(&board_lock); + list_del(&master->list); + mutex_unlock(&board_lock); + + dummy = device_for_each_child(master->dev.parent, &master->dev, + __unregister); device_unregister(&master->dev); } EXPORT_SYMBOL_GPL(spi_unregister_master); @@ -652,7 +658,7 @@ int spi_setup(struct spi_device *spi) */ bad_bits = spi->mode & ~spi->master->mode_bits; if (bad_bits) { - dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n", + dev_err(&spi->dev, "setup: unsupported mode bits %x\n", bad_bits); return -EINVAL; } diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c index ab483a0ec6d..3f223511127 100644 --- a/drivers/spi/spi_bfin5xx.c +++ b/drivers/spi/spi_bfin5xx.c @@ -504,6 +504,15 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id) "in dma_irq_handler dmastat:0x%x spistat:0x%x\n", dmastat, spistat); + if (drv_data->rx != NULL) { + u16 cr = read_CTRL(drv_data); + /* discard old RX data and clear RXS */ + bfin_spi_dummy_read(drv_data); + write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */ + write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */ + write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */ + } + clear_dma_irqstat(drv_data->dma_channel); /* @@ -1099,12 +1108,15 @@ static int bfin_spi_setup(struct spi_device *spi) } if (chip->chip_select_num >= MAX_CTRL_CS) { - ret = gpio_request(chip->cs_gpio, spi->modalias); - if (ret) { - dev_err(&spi->dev, "gpio_request() error\n"); - goto pin_error; + /* Only request on first setup */ + if (spi_get_ctldata(spi) == NULL) { + ret = gpio_request(chip->cs_gpio, spi->modalias); + if (ret) { + dev_err(&spi->dev, "gpio_request() error\n"); + goto pin_error; + } + gpio_direction_output(chip->cs_gpio, 1); } - gpio_direction_output(chip->cs_gpio, 1); } dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n", diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c index ef9c6a04ad8..744d3f6e470 100644 --- a/drivers/ssb/b43_pci_bridge.c +++ b/drivers/ssb/b43_pci_bridge.c @@ -24,6 +24,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) }, + { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC, 0x4318) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) }, diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c index 8c95d8c2a4f..016c6f7f863 100644 --- a/drivers/staging/asus_oled/asus_oled.c +++ b/drivers/staging/asus_oled/asus_oled.c @@ -620,13 +620,13 @@ static ssize_t class_set_picture(struct device *device, #define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file -static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO, get_enabled, set_enabled); -static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture); +static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture); -static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, class_get_enabled, class_set_enabled); -static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture); +static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture); static int asus_oled_probe(struct usb_interface *interface, const struct usb_device_id *id) diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig index ae2cdf48b74..8a5caa30b85 100644 --- a/drivers/staging/ath6kl/Kconfig +++ b/drivers/staging/ath6kl/Kconfig @@ -102,7 +102,7 @@ config AR600x_BT_RESET_PIN config ATH6KL_CFG80211 bool "CFG80211 support" - depends on ATH6K_LEGACY + depends on ATH6K_LEGACY && CFG80211 help Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space. diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c index 22c6c6659f5..ee8b47746a1 100644 --- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c +++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c @@ -285,9 +285,9 @@ A_STATUS SetupHIFScatterSupport(HIF_DEVICE *device, HIF_DEVICE_SCATTER_SUPPORT_I do { /* check if host supports scatter requests and it meets our requirements */ - if (device->func->card->host->max_hw_segs < MAX_SCATTER_ENTRIES_PER_REQ) { + if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n", - device->func->card->host->max_hw_segs, MAX_SCATTER_ENTRIES_PER_REQ)); + device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ)); status = A_ENOTSUP; break; } diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c index c5a6d6c1673..a659f704737 100644 --- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c +++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c @@ -1126,7 +1126,7 @@ ar6000_transfer_bin_file(AR_SOFTC_T *ar, AR6K_BIN_FILE file, A_UINT32 address, A if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) { A_UINT32 param; - status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size); + status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size); if (status != A_OK) { AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__)); @@ -3030,7 +3030,8 @@ ar6000_data_tx(struct sk_buff *skb, struct net_device *dev) A_UINT8 csumDest=0; A_UINT8 csum=skb->ip_summed; if(csumOffload && (csum==CHECKSUM_PARTIAL)){ - csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR); + csumStart = (skb->head + skb->csum_start - skb_network_header(skb) + + sizeof(ATH_LLC_SNAP_HDR)); csumDest=skb->csum_offset+csumStart; } #endif diff --git a/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c b/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c index c196098f085..6b8eeea475c 100644 --- a/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c +++ b/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c @@ -198,8 +198,8 @@ int ar6000_htc_raw_open(AR_SOFTC_T *ar) for (streamID = HTC_RAW_STREAM_0; streamID < HTC_RAW_STREAM_NUM_MAX; streamID++) { /* Initialize the data structures */ - init_MUTEX(&arRaw->raw_htc_read_sem[streamID]); - init_MUTEX(&arRaw->raw_htc_write_sem[streamID]); + sema_init(&arRaw->raw_htc_read_sem[streamID], 1); + sema_init(&arRaw->raw_htc_write_sem[streamID], 1); init_waitqueue_head(&arRaw->raw_htc_read_queue[streamID]); init_waitqueue_head(&arRaw->raw_htc_write_queue[streamID]); diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c index c94ad29eeb4..7269d0a1d61 100644 --- a/drivers/staging/ath6kl/os/linux/cfg80211.c +++ b/drivers/staging/ath6kl/os/linux/cfg80211.c @@ -808,7 +808,7 @@ ar6k_cfg80211_scanComplete_event(AR_SOFTC_T *ar, A_STATUS status) static int ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, - A_UINT8 key_index, const A_UINT8 *mac_addr, + A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr, struct key_params *params) { AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); @@ -901,7 +901,7 @@ ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, static int ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, - A_UINT8 key_index, const A_UINT8 *mac_addr) + A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr) { AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); @@ -936,7 +936,8 @@ ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, static int ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, - A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie, + A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr, + void *cookie, void (*callback)(void *cookie, struct key_params*)) { AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev); diff --git a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h deleted file mode 100644 index e69de29bb2d..00000000000 --- a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h +++ /dev/null diff --git a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h deleted file mode 100644 index e69de29bb2d..00000000000 --- a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h +++ /dev/null diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c index 80cfa866958..d85de82f941 100644 --- a/drivers/staging/batman-adv/hard-interface.c +++ b/drivers/staging/batman-adv/hard-interface.c @@ -165,7 +165,7 @@ static void update_mac_addresses(struct batman_if *batman_if) batman_if->net_dev->dev_addr, ETH_ALEN); } -static void check_known_mac_addr(uint8_t *addr) +static void check_known_mac_addr(struct net_device *net_dev) { struct batman_if *batman_if; @@ -175,11 +175,16 @@ static void check_known_mac_addr(uint8_t *addr) (batman_if->if_status != IF_TO_BE_ACTIVATED)) continue; - if (!compare_orig(batman_if->net_dev->dev_addr, addr)) + if (batman_if->net_dev == net_dev) + continue; + + if (!compare_orig(batman_if->net_dev->dev_addr, + net_dev->dev_addr)) continue; pr_warning("The newly added mac address (%pM) already exists " - "on: %s\n", addr, batman_if->net_dev->name); + "on: %s\n", net_dev->dev_addr, + batman_if->net_dev->name); pr_warning("It is strongly recommended to keep mac addresses " "unique to avoid problems!\n"); } @@ -430,7 +435,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) atomic_set(&batman_if->refcnt, 0); hardif_hold(batman_if); - check_known_mac_addr(batman_if->net_dev->dev_addr); + check_known_mac_addr(batman_if->net_dev); spin_lock(&if_list_lock); list_add_tail_rcu(&batman_if->list, &if_list); @@ -458,9 +463,6 @@ static void hardif_remove_interface(struct batman_if *batman_if) return; batman_if->if_status = IF_TO_BE_REMOVED; - - /* caller must take if_list_lock */ - list_del_rcu(&batman_if->list); synchronize_rcu(); sysfs_del_hardif(&batman_if->hardif_obj); hardif_put(batman_if); @@ -469,13 +471,21 @@ static void hardif_remove_interface(struct batman_if *batman_if) void hardif_remove_interfaces(void) { struct batman_if *batman_if, *batman_if_tmp; + struct list_head if_queue; + + INIT_LIST_HEAD(&if_queue); - rtnl_lock(); spin_lock(&if_list_lock); list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { - hardif_remove_interface(batman_if); + list_del_rcu(&batman_if->list); + list_add_tail(&batman_if->list, &if_queue); } spin_unlock(&if_list_lock); + + rtnl_lock(); + list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) { + hardif_remove_interface(batman_if); + } rtnl_unlock(); } @@ -502,8 +512,10 @@ static int hard_if_event(struct notifier_block *this, break; case NETDEV_UNREGISTER: spin_lock(&if_list_lock); - hardif_remove_interface(batman_if); + list_del_rcu(&batman_if->list); spin_unlock(&if_list_lock); + + hardif_remove_interface(batman_if); break; case NETDEV_CHANGEMTU: if (batman_if->soft_iface) @@ -515,7 +527,7 @@ static int hard_if_event(struct notifier_block *this, goto out; } - check_known_mac_addr(batman_if->net_dev->dev_addr); + check_known_mac_addr(batman_if->net_dev); update_mac_addresses(batman_if); bat_priv = netdev_priv(batman_if->soft_iface); diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c index 90102631330..657b69e6b95 100644 --- a/drivers/staging/batman-adv/routing.c +++ b/drivers/staging/batman-adv/routing.c @@ -1000,10 +1000,10 @@ int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if) /* find a suitable router for this originator, and use * bonding if possible. */ -struct neigh_node *find_router(struct orig_node *orig_node, +struct neigh_node *find_router(struct bat_priv *bat_priv, + struct orig_node *orig_node, struct batman_if *recv_if) { - struct bat_priv *bat_priv; struct orig_node *primary_orig_node; struct orig_node *router_orig; struct neigh_node *router, *first_candidate, *best_router; @@ -1019,13 +1019,9 @@ struct neigh_node *find_router(struct orig_node *orig_node, /* without bonding, the first node should * always choose the default router. */ - if (!recv_if) - return orig_node->router; - - bat_priv = netdev_priv(recv_if->soft_iface); bonding_enabled = atomic_read(&bat_priv->bonding_enabled); - if (!bonding_enabled) + if ((!recv_if) && (!bonding_enabled)) return orig_node->router; router_orig = orig_node->router->orig_node; @@ -1154,7 +1150,7 @@ static int route_unicast_packet(struct sk_buff *skb, orig_node = ((struct orig_node *) hash_find(bat_priv->orig_hash, unicast_packet->dest)); - router = find_router(orig_node, recv_if); + router = find_router(bat_priv, orig_node, recv_if); if (!router) { spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags); diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h index 06ea99df370..92674c8d9c0 100644 --- a/drivers/staging/batman-adv/routing.h +++ b/drivers/staging/batman-adv/routing.h @@ -38,8 +38,8 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if); int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if); -struct neigh_node *find_router(struct orig_node *orig_node, - struct batman_if *recv_if); +struct neigh_node *find_router(struct bat_priv *bat_priv, + struct orig_node *orig_node, struct batman_if *recv_if); void update_bonding_candidates(struct bat_priv *bat_priv, struct orig_node *orig_node); diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c index 3904db9ce7b..0e996181daf 100644 --- a/drivers/staging/batman-adv/soft-interface.c +++ b/drivers/staging/batman-adv/soft-interface.c @@ -194,14 +194,15 @@ void interface_rx(struct net_device *soft_iface, struct bat_priv *priv = netdev_priv(soft_iface); /* check if enough space is available for pulling, and pull */ - if (!pskb_may_pull(skb, hdr_size)) { - kfree_skb(skb); - return; - } + if (!pskb_may_pull(skb, hdr_size)) + goto dropped; + skb_pull_rcsum(skb, hdr_size); /* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/ /* skb->dev & skb->pkt_type are set here */ + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) + goto dropped; skb->protocol = eth_type_trans(skb, soft_iface); /* should not be neccesary anymore as we use skb_pull_rcsum() @@ -216,6 +217,11 @@ void interface_rx(struct net_device *soft_iface, soft_iface->last_rx = jiffies; netif_rx(skb); + return; + +dropped: + kfree_skb(skb); + return; } #ifdef HAVE_NET_DEVICE_OPS diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c index 0dac50d69c0..0459413ff67 100644 --- a/drivers/staging/batman-adv/unicast.c +++ b/drivers/staging/batman-adv/unicast.c @@ -224,7 +224,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) if (!orig_node) orig_node = transtable_search(bat_priv, ethhdr->h_dest); - router = find_router(orig_node, NULL); + router = find_router(bat_priv, orig_node, NULL); if (!router) goto unlock; diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c index 77fdfe24d99..fead9c56162 100644 --- a/drivers/staging/bcm/Bcmchar.c +++ b/drivers/staging/bcm/Bcmchar.c @@ -1001,13 +1001,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) } #endif case IOCTL_BE_BUCKET_SIZE: - Adapter->BEBucketSize = *(PULONG)arg; - Status = STATUS_SUCCESS; + Status = 0; + if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg)) + Status = -EFAULT; break; case IOCTL_RTPS_BUCKET_SIZE: - Adapter->rtPSBucketSize = *(PULONG)arg; - Status = STATUS_SUCCESS; + Status = 0; + if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg)) + Status = -EFAULT; break; case IOCTL_CHIP_RESET: { @@ -1028,11 +1030,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) case IOCTL_QOS_THRESHOLD: { USHORT uiLoopIndex; - for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++) - { - Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg; + + Status = 0; + for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) { + if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold, + (unsigned long __user *)arg)) { + Status = -EFAULT; + break; + } } - Status = STATUS_SUCCESS; break; } @@ -1093,7 +1099,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) } case IOCTL_BCM_GET_CURRENT_STATUS: { - LINK_STATE *plink_state = NULL; + LINK_STATE plink_state; + /* Copy Ioctl Buffer structure */ if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) { @@ -1101,13 +1108,19 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) Status = -EFAULT; break; } - plink_state = (LINK_STATE*)arg; - plink_state->bIdleMode = (UCHAR)Adapter->IdleMode; - plink_state->bShutdownMode = Adapter->bShutStatus; - plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus; - if(copy_to_user(IoBuffer.OutputBuffer, - (PUCHAR)plink_state, (UINT)IoBuffer.OutputLength)) - { + if (IoBuffer.OutputLength != sizeof(plink_state)) { + Status = -EINVAL; + break; + } + + if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) { + Status = -EFAULT; + break; + } + plink_state.bIdleMode = (UCHAR)Adapter->IdleMode; + plink_state.bShutdownMode = Adapter->bShutStatus; + plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus; + if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n"); Status = -EFAULT; break; @@ -1331,7 +1344,9 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg) BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status); return -EFAULT; } - uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */ + if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer)) + return -EFAULT; + if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE)) { diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README index c3ba9bb9b11..a27bb0b4f58 100644 --- a/drivers/staging/brcm80211/README +++ b/drivers/staging/brcm80211/README @@ -88,7 +88,9 @@ with the driver. Contact Info: ============= -Brett Rudley brudley@broadcom.com -Henry Ptasinski henryp@broadcom.com -Nohee Ko noheek@broadcom.com +Brett Rudley brudley@broadcom.com +Henry Ptasinski henryp@broadcom.com +Dowan Kim dowan@broadcom.com +Roland Vossen rvossen@broadcom.com +Arend van Spriel arend@broadcom.com diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO index 8803d300b53..24ebadbe424 100644 --- a/drivers/staging/brcm80211/TODO +++ b/drivers/staging/brcm80211/TODO @@ -45,5 +45,7 @@ Contact ===== Brett Rudley <brudley@broadcom.com> Henry Ptasinski <henryp@broadcom.com> -Nohee Ko <noheek@broadcom.com> +Dowan Kim <dowan@broadcom.com> +Roland Vossen <rvossen@broadcom.com> +Arend van Spriel <arend@broadcom.com> diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c index e5357875661..9335f02029a 100644 --- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c @@ -1929,7 +1929,7 @@ dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) goto fail; net->netdev_ops = NULL; - init_MUTEX(&dhd->proto_sem); + sema_init(&dhd->proto_sem, 1); /* Initialize other structure content */ init_waitqueue_head(&dhd->ioctl_resp_wait); init_waitqueue_head(&dhd->ctrl_wait); @@ -1977,7 +1977,7 @@ dhd_pub_t *dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen) dhd->timer.function = dhd_watchdog; /* Initialize thread based operation and lock */ - init_MUTEX(&dhd->sdsem); + sema_init(&dhd->sdsem, 1); if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0)) dhd->threads_only = true; else @@ -2222,8 +2222,6 @@ int dhd_net_attach(dhd_pub_t *dhdp, int ifidx) ASSERT(net); ASSERT(!net->netdev_ops); - net->netdev_ops = &dhd_ops_virt; - net->netdev_ops = &dhd_ops_pri; /* diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c index 3f29488d9c7..ea0825238d5 100644 --- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c @@ -95,12 +95,12 @@ static s32 wl_cfg80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx); static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, const u8 *mac_addr, + u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params); static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, const u8 *mac_addr); + u8 key_idx, bool pairwise, const u8 *mac_addr); static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, const u8 *mac_addr, + u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback) (void *cookie, struct key_params * @@ -1615,7 +1615,7 @@ wl_add_keyext(struct wiphy *wiphy, struct net_device *dev, static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, const u8 *mac_addr, + u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct wl_wsec_key key; @@ -1700,7 +1700,7 @@ wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev, static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, const u8 *mac_addr) + u8 key_idx, bool pairwise, const u8 *mac_addr) { struct wl_wsec_key key; s32 err = 0; @@ -1756,7 +1756,7 @@ wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev, static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_idx, const u8 *mac_addr, void *cookie, + u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback) (void *cookie, struct key_params * params)) { struct key_params params; diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c index ad635ee7758..d060377629a 100644 --- a/drivers/staging/brcm80211/sys/wl_mac80211.c +++ b/drivers/staging/brcm80211/sys/wl_mac80211.c @@ -866,7 +866,7 @@ static wl_info_t *wl_attach(u16 vendor, u16 device, unsigned long regs, spin_lock_init(&wl->rpcq_lock); spin_lock_init(&wl->txq_lock); - init_MUTEX(&wl->sem); + sema_init(&wl->sem, 1); #else spin_lock_init(&wl->lock); spin_lock_init(&wl->isr_lock); diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c index 0560a745151..06059850dae 100644 --- a/drivers/staging/comedi/drivers/dt9812.c +++ b/drivers/staging/comedi/drivers/dt9812.c @@ -262,7 +262,7 @@ struct dt9812_usb_cmd { #define DT9812_NUM_SLOTS 16 -static DECLARE_MUTEX(dt9812_mutex); +static DEFINE_SEMAPHORE(dt9812_mutex); static const struct usb_device_id dt9812_table[] = { {USB_DEVICE(0x0867, 0x9812)}, diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c index 6131e2dd059..de784ff08ca 100644 --- a/drivers/staging/comedi/drivers/usbdux.c +++ b/drivers/staging/comedi/drivers/usbdux.c @@ -315,7 +315,7 @@ struct usbduxsub { */ static struct usbduxsub usbduxsub[NUMUSBDUX]; -static DECLARE_MUTEX(start_stop_sem); +static DEFINE_SEMAPHORE(start_stop_sem); /* * Stops the data acquision @@ -2295,8 +2295,8 @@ static void tidy_up(struct usbduxsub *usbduxsub_tmp) usbduxsub_tmp->inBuffer = NULL; kfree(usbduxsub_tmp->insnBuffer); usbduxsub_tmp->insnBuffer = NULL; - kfree(usbduxsub_tmp->inBuffer); - usbduxsub_tmp->inBuffer = NULL; + kfree(usbduxsub_tmp->outBuffer); + usbduxsub_tmp->outBuffer = NULL; kfree(usbduxsub_tmp->dac_commands); usbduxsub_tmp->dac_commands = NULL; kfree(usbduxsub_tmp->dux_commands); @@ -2367,7 +2367,7 @@ static int usbduxsub_probe(struct usb_interface *uinterf, dev_dbg(dev, "comedi_: usbdux: " "usbduxsub[%d] is ready to connect to comedi.\n", index); - init_MUTEX(&(usbduxsub[index].sem)); + sema_init(&(usbduxsub[index].sem), 1); /* save a pointer to the usb device */ usbduxsub[index].usbdev = udev; diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c index 0a164a9a66c..5b15e6df54e 100644 --- a/drivers/staging/comedi/drivers/usbduxfast.c +++ b/drivers/staging/comedi/drivers/usbduxfast.c @@ -199,7 +199,7 @@ struct usbduxfastsub_s { */ static struct usbduxfastsub_s usbduxfastsub[NUMUSBDUXFAST]; -static DECLARE_MUTEX(start_stop_sem); +static DEFINE_SEMAPHORE(start_stop_sem); /* * bulk transfers to usbduxfast @@ -1504,7 +1504,7 @@ static int usbduxfastsub_probe(struct usb_interface *uinterf, "connect to comedi.\n", index); #endif - init_MUTEX(&(usbduxfastsub[index].sem)); + sema_init(&(usbduxfastsub[index].sem), 1); /* save a pointer to the usb device */ usbduxfastsub[index].usbdev = udev; diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c index 933ae4c8cb9..0e740b8dafc 100644 --- a/drivers/staging/cpia/cpia.c +++ b/drivers/staging/cpia/cpia.c @@ -3184,13 +3184,9 @@ static int cpia_open(struct file *file) goto oops; } - err = -EINTR; - if(signal_pending(current)) - goto oops; - /* Set ownership of /proc/cpia/videoX to current user */ if(cam->proc_entry) - cam->proc_entry->uid = current_uid(); + cam->proc_entry->uid = current_euid(); /* set mark for loading first frame uncompressed */ cam->first_frame = 1; diff --git a/drivers/staging/cx25821/cx25821-video.c b/drivers/staging/cx25821/cx25821-video.c index e7f1d5778ce..52389308f33 100644 --- a/drivers/staging/cx25821/cx25821-video.c +++ b/drivers/staging/cx25821/cx25821-video.c @@ -92,7 +92,7 @@ int cx25821_get_format_size(void) return ARRAY_SIZE(formats); } -struct cx25821_fmt *format_by_fourcc(unsigned int fourcc) +struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc) { unsigned int i; @@ -848,7 +848,7 @@ static int video_open(struct file *file) pix_format = (dev->channels[ch_id].pixel_formats == PIXEL_FRMT_411) ? V4L2_PIX_FMT_Y41P : V4L2_PIX_FMT_YUYV; - fh->fmt = format_by_fourcc(pix_format); + fh->fmt = cx25821_format_by_fourcc(pix_format); v4l2_prio_open(&dev->channels[ch_id].prio, &fh->prio); @@ -1010,7 +1010,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, if (0 != err) return err; - fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); + fh->fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat); fh->vidq.field = f->fmt.pix.field; /* check if width and height is valid based on set standard */ @@ -1119,7 +1119,7 @@ int cx25821_vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fo enum v4l2_field field; unsigned int maxw, maxh; - fmt = format_by_fourcc(f->fmt.pix.pixelformat); + fmt = cx25821_format_by_fourcc(f->fmt.pix.pixelformat); if (NULL == fmt) return -EINVAL; diff --git a/drivers/staging/cx25821/cx25821-video.h b/drivers/staging/cx25821/cx25821-video.h index cc6034b1a95..a2415d33235 100644 --- a/drivers/staging/cx25821/cx25821-video.h +++ b/drivers/staging/cx25821/cx25821-video.h @@ -87,7 +87,7 @@ extern unsigned int vid_limit; #define FORMAT_FLAGS_PACKED 0x01 extern struct cx25821_fmt formats[]; -extern struct cx25821_fmt *format_by_fourcc(unsigned int fourcc); +extern struct cx25821_fmt *cx25821_format_by_fourcc(unsigned int fourcc); extern struct cx25821_data timeout_data[MAX_VID_CHANNEL_NUM]; extern void cx25821_dump_video_queue(struct cx25821_dev *dev, diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h index f3c827eb0ab..884263b2775 100644 --- a/drivers/staging/easycap/easycap.h +++ b/drivers/staging/easycap/easycap.h @@ -75,9 +75,9 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/module.h> #include <linux/kref.h> -#include <linux/smp_lock.h> #include <linux/usb.h> #include <linux/uaccess.h> diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c index a145a15cfdb..8894ab14f16 100644 --- a/drivers/staging/frontier/tranzport.c +++ b/drivers/staging/frontier/tranzport.c @@ -204,7 +204,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev) t->value = temp; \ return count; \ } \ - static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); + static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value); show_int(enable); show_int(offline); diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c index 87a6487531c..20d509836d9 100644 --- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c +++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c @@ -286,7 +286,6 @@ int ft1000_CreateDevice(struct ft1000_device *dev) pid = kernel_thread (exec_mknod, (void *)info, 0); // initialize application information - info->appcnt = 0; // if (ft1000_flarion_cnt == 0) { // diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c index b3f42f37a31..48d4e483d8a 100644 --- a/drivers/staging/go7007/go7007-driver.c +++ b/drivers/staging/go7007/go7007-driver.c @@ -199,7 +199,7 @@ static int init_i2c_module(struct i2c_adapter *adapter, const char *type, struct go7007 *go = i2c_get_adapdata(adapter); struct v4l2_device *v4l2_dev = &go->v4l2_dev; - if (v4l2_i2c_new_subdev(v4l2_dev, adapter, NULL, type, addr, NULL)) + if (v4l2_i2c_new_subdev(v4l2_dev, adapter, type, addr, NULL)) return 0; printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type); diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c index 702a478d554..a99e900ec4c 100644 --- a/drivers/staging/hv/hv_utils.c +++ b/drivers/staging/hv/hv_utils.c @@ -212,9 +212,6 @@ static void heartbeat_onchannelcallback(void *context) recvlen, requestid); icmsghdrp = (struct icmsg_hdr *)&buf[ - sizeof(struct vmbuspipe_hdr)]; - - icmsghdrp = (struct icmsg_hdr *)&buf[ sizeof(struct vmbuspipe_hdr)]; if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) { diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c index 41d9acf4cd6..6f8d67d0d64 100644 --- a/drivers/staging/hv/storvsc_drv.c +++ b/drivers/staging/hv/storvsc_drv.c @@ -72,8 +72,7 @@ struct storvsc_driver_context { /* Static decl */ static int storvsc_probe(struct device *dev); -static int storvsc_queuecommand(struct scsi_cmnd *scmnd, - void (*done)(struct scsi_cmnd *)); +static int storvsc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd); static int storvsc_device_alloc(struct scsi_device *); static int storvsc_device_configure(struct scsi_device *); static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd); @@ -595,7 +594,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, /* * storvsc_queuecommand - Initiate command processing */ -static int storvsc_queuecommand(struct scsi_cmnd *scmnd, +static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd, void (*done)(struct scsi_cmnd *)) { int ret; @@ -783,6 +782,8 @@ retry_request: return ret; } +static DEF_SCSI_QCMD(storvsc_queuecommand) + static int storvsc_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd, struct bio_vec *bvec) { diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c index c86d1498737..1c1e98aee2d 100644 --- a/drivers/staging/iio/accel/adis16220_core.c +++ b/drivers/staging/iio/accel/adis16220_core.c @@ -507,7 +507,7 @@ static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, adis16220_write_reset, 0); #define IIO_DEV_ATTR_CAPTURE(_store) \ - IIO_DEVICE_ATTR(capture, S_IWUGO, NULL, _store, 0) + IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0) static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture); diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c index 463e5cba830..991440015e9 100644 --- a/drivers/staging/intel_sst/intel_sst_app_interface.c +++ b/drivers/staging/intel_sst/intel_sst_app_interface.c @@ -34,7 +34,6 @@ #include <linux/uaccess.h> #include <linux/firmware.h> #include <linux/ioctl.h> -#include <linux/smp_lock.h> #ifdef CONFIG_MRST_RAR_HANDLER #include <linux/rar_register.h> #include "../../../drivers/staging/memrar/memrar.h" @@ -244,12 +243,12 @@ static int intel_sst_mmap_play_capture(u32 str_id, int retval, i; struct stream_info *stream; struct snd_sst_mmap_buff_entry *buf_entry; + struct snd_sst_mmap_buff_entry *tmp_buf; pr_debug("sst:called for str_id %d\n", str_id); retval = sst_validate_strid(str_id); if (retval) return -EINVAL; - BUG_ON(!mmap_buf); stream = &sst_drv_ctx->streams[str_id]; if (stream->mmapped != true) @@ -262,14 +261,24 @@ static int intel_sst_mmap_play_capture(u32 str_id, stream->curr_bytes = 0; stream->cumm_bytes = 0; + tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL); + if (!tmp_buf) + return -ENOMEM; + if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff, + mmap_buf->entries * sizeof(*tmp_buf))) { + retval = -EFAULT; + goto out_free; + } + pr_debug("sst:new buffers count %d status %d\n", mmap_buf->entries, stream->status); - buf_entry = mmap_buf->buff; + buf_entry = tmp_buf; for (i = 0; i < mmap_buf->entries; i++) { - BUG_ON(!buf_entry); bufs = kzalloc(sizeof(*bufs), GFP_KERNEL); - if (!bufs) - return -ENOMEM; + if (!bufs) { + retval = -ENOMEM; + goto out_free; + } bufs->size = buf_entry->size; bufs->offset = buf_entry->offset; bufs->addr = sst_drv_ctx->mmap_mem; @@ -293,13 +302,15 @@ static int intel_sst_mmap_play_capture(u32 str_id, if (sst_play_frame(str_id) < 0) { pr_warn("sst: play frames fail\n"); mutex_unlock(&stream->lock); - return -EIO; + retval = -EIO; + goto out_free; } } else if (stream->ops == STREAM_OPS_CAPTURE) { if (sst_capture_frame(str_id) < 0) { pr_warn("sst: capture frame fail\n"); mutex_unlock(&stream->lock); - return -EIO; + retval = -EIO; + goto out_free; } } } @@ -314,6 +325,9 @@ static int intel_sst_mmap_play_capture(u32 str_id, if (retval >= 0) retval = stream->cumm_bytes; pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval); + +out_free: + kfree(tmp_buf); return retval; } @@ -377,7 +391,7 @@ static int snd_sst_fill_kernel_list(struct stream_info *stream, { struct sst_stream_bufs *stream_bufs; unsigned long index, mmap_len; - unsigned char *bufp; + unsigned char __user *bufp; unsigned long size, copied_size; int retval = 0, add_to_list = 0; static int sent_offset; @@ -512,9 +526,7 @@ static int snd_sst_copy_userbuf_capture(struct stream_info *stream, /* copy to user */ list_for_each_entry_safe(entry, _entry, copy_to_list, node) { - if (copy_to_user((void *) - iovec[entry->iov_index].iov_base + - entry->iov_offset, + if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset, kbufs->addr + entry->offset, entry->size)) { /* Clean up the list and return error */ @@ -590,7 +602,7 @@ static int intel_sst_read_write(unsigned int str_id, char __user *buf, buf, (int) count, (int) stream->status); stream->buf_type = SST_BUF_USER_STATIC; - iovec.iov_base = (void *)buf; + iovec.iov_base = buf; iovec.iov_len = count; nr_segs = 1; @@ -838,7 +850,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) break; case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): { - struct snd_sst_params *str_param = (struct snd_sst_params *)arg; + struct snd_sst_params str_param; pr_debug("sst: IOCTL_SET_PARAMS recieved!\n"); if (minor != STREAM_MODULE) { @@ -846,17 +858,25 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) break; } + if (copy_from_user(&str_param, (void __user *)arg, + sizeof(str_param))) { + retval = -EFAULT; + break; + } + if (!str_id) { - retval = sst_get_stream(str_param); + retval = sst_get_stream(&str_param); if (retval > 0) { struct stream_info *str_info; + char __user *dest; + sst_drv_ctx->stream_cnt++; data->str_id = retval; str_info = &sst_drv_ctx->streams[retval]; str_info->src = SST_DRV; - retval = copy_to_user(&str_param->stream_id, - &retval, sizeof(__u32)); + dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id); + retval = copy_to_user(dest, &retval, sizeof(__u32)); if (retval) retval = -EFAULT; } else { @@ -866,16 +886,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) } else { pr_debug("sst: SET_STREAM_PARAMS recieved!\n"); /* allocated set params only */ - retval = sst_set_stream_param(str_id, str_param); + retval = sst_set_stream_param(str_id, &str_param); /* Block the call for reply */ if (!retval) { int sfreq = 0, word_size = 0, num_channel = 0; - sfreq = str_param->sparams.uc.pcm_params.sfreq; - word_size = str_param->sparams. - uc.pcm_params.pcm_wd_sz; - num_channel = str_param-> - sparams.uc.pcm_params.num_chan; - if (str_param->ops == STREAM_OPS_CAPTURE) { + sfreq = str_param.sparams.uc.pcm_params.sfreq; + word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz; + num_channel = str_param.sparams.uc.pcm_params.num_chan; + if (str_param.ops == STREAM_OPS_CAPTURE) { sst_drv_ctx->scard_ops->\ set_pcm_audio_params(sfreq, word_size, num_channel); @@ -885,41 +903,39 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) break; } case _IOC_NR(SNDRV_SST_SET_VOL): { - struct snd_sst_vol *set_vol; - struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; + struct snd_sst_vol set_vol; + + if (copy_from_user(&set_vol, (void __user *)arg, + sizeof(set_vol))) { + pr_debug("sst: copy failed\n"); + retval = -EFAULT; + break; + } pr_debug("sst: SET_VOLUME recieved for %d!\n", - rec_vol->stream_id); - if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { + set_vol.stream_id); + if (minor == STREAM_MODULE && set_vol.stream_id == 0) { pr_debug("sst: invalid operation!\n"); retval = -EPERM; break; } - set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC); - if (!set_vol) { - pr_debug("sst: mem allocation failed\n"); - retval = -ENOMEM; - break; - } - if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) { - pr_debug("sst: copy failed\n"); - retval = -EFAULT; - break; - } - retval = sst_set_vol(set_vol); - kfree(set_vol); + retval = sst_set_vol(&set_vol); break; } case _IOC_NR(SNDRV_SST_GET_VOL): { - struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg; struct snd_sst_vol get_vol; + + if (copy_from_user(&get_vol, (void __user *)arg, + sizeof(get_vol))) { + retval = -EFAULT; + break; + } pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n", - rec_vol->stream_id); - if (minor == STREAM_MODULE && rec_vol->stream_id == 0) { + get_vol.stream_id); + if (minor == STREAM_MODULE && get_vol.stream_id == 0) { pr_debug("sst: invalid operation!\n"); retval = -EPERM; break; } - get_vol.stream_id = rec_vol->stream_id; retval = sst_get_vol(&get_vol); if (retval) { retval = -EIO; @@ -928,7 +944,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n", get_vol.stream_id, get_vol.volume, get_vol.ramp_duration, get_vol.ramp_type); - if (copy_to_user((struct snd_sst_vol *)arg, + if (copy_to_user((struct snd_sst_vol __user *)arg, &get_vol, sizeof(get_vol))) { retval = -EFAULT; break; @@ -938,25 +954,20 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) } case _IOC_NR(SNDRV_SST_MUTE): { - struct snd_sst_mute *set_mute; - struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg; - pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", - rec_mute->stream_id); - if (minor == STREAM_MODULE && rec_mute->stream_id == 0) { - retval = -EPERM; - break; - } - set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC); - if (!set_mute) { - retval = -ENOMEM; + struct snd_sst_mute set_mute; + + if (copy_from_user(&set_mute, (void __user *)arg, + sizeof(set_mute))) { + retval = -EFAULT; break; } - if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) { - retval = -EFAULT; + pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n", + set_mute.stream_id); + if (minor == STREAM_MODULE && set_mute.stream_id == 0) { + retval = -EPERM; break; } - retval = sst_set_mute(set_mute); - kfree(set_mute); + retval = sst_set_mute(&set_mute); break; } case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): { @@ -973,7 +984,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) retval = -EIO; break; } - if (copy_to_user((struct snd_sst_get_stream_params *)arg, + if (copy_to_user((struct snd_sst_get_stream_params __user *)arg, &get_params, sizeof(get_params))) { retval = -EFAULT; break; @@ -983,16 +994,22 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) } case _IOC_NR(SNDRV_SST_MMAP_PLAY): - case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): + case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): { + struct snd_sst_mmap_buffs mmap_buf; + pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n"); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } - retval = intel_sst_mmap_play_capture(str_id, - (struct snd_sst_mmap_buffs *)arg); + if (copy_from_user(&mmap_buf, (void __user *)arg, + sizeof(mmap_buf))) { + retval = -EFAULT; + break; + } + retval = intel_sst_mmap_play_capture(str_id, &mmap_buf); break; - + } case _IOC_NR(SNDRV_SST_STREAM_DROP): pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n"); if (minor != STREAM_MODULE) { @@ -1003,7 +1020,6 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) break; case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): { - unsigned long long *ms = (unsigned long long *)arg; struct snd_sst_tstamp tstamp = {0}; unsigned long long time, freq, mod; @@ -1013,14 +1029,14 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) break; } memcpy_fromio(&tstamp, - ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) - +(str_id * sizeof(tstamp))), + sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp), sizeof(tstamp)); time = tstamp.samples_rendered; freq = (unsigned long long) tstamp.sampling_frequency; time = time * 1000; /* converting it to ms */ mod = do_div(time, freq); - if (copy_to_user(ms, &time, sizeof(*ms))) + if (copy_to_user((void __user *)arg, &time, + sizeof(unsigned long long))) retval = -EFAULT; break; } @@ -1065,92 +1081,118 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) } case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): { - struct snd_sst_target_device *target_device; + struct snd_sst_target_device target_device; pr_debug("sst: SET_TARGET_DEVICE recieved!\n"); - target_device = (struct snd_sst_target_device *)arg; - BUG_ON(!target_device); + if (copy_from_user(&target_device, (void __user *)arg, + sizeof(target_device))) { + retval = -EFAULT; + break; + } if (minor != AM_MODULE) { retval = -EBADRQC; break; } - retval = sst_target_device_select(target_device); + retval = sst_target_device_select(&target_device); break; } case _IOC_NR(SNDRV_SST_DRIVER_INFO): { - struct snd_sst_driver_info *info = - (struct snd_sst_driver_info *)arg; + struct snd_sst_driver_info info; pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n"); - info->version = SST_VERSION_NUM; + info.version = SST_VERSION_NUM; /* hard coding, shud get sumhow later */ - info->active_pcm_streams = sst_drv_ctx->stream_cnt - + info.active_pcm_streams = sst_drv_ctx->stream_cnt - sst_drv_ctx->encoded_cnt; - info->active_enc_streams = sst_drv_ctx->encoded_cnt; - info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; - info->max_enc_streams = MAX_ENC_STREAM; - info->buf_per_stream = sst_drv_ctx->mmap_len; + info.active_enc_streams = sst_drv_ctx->encoded_cnt; + info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM; + info.max_enc_streams = MAX_ENC_STREAM; + info.buf_per_stream = sst_drv_ctx->mmap_len; + if (copy_to_user((void __user *)arg, &info, + sizeof(info))) + retval = -EFAULT; break; } case _IOC_NR(SNDRV_SST_STREAM_DECODE): { - struct snd_sst_dbufs *param = - (struct snd_sst_dbufs *)arg, dbufs_local; - int i; + struct snd_sst_dbufs param; + struct snd_sst_dbufs dbufs_local; struct snd_sst_buffs ibufs, obufs; - struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries], - obuf_temp[param->obufs->entries]; + struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp; + char __user *dest; pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n"); if (minor != STREAM_MODULE) { retval = -EBADRQC; break; } - if (!param) { - retval = -EINVAL; + if (copy_from_user(¶m, (void __user *)arg, + sizeof(param))) { + retval = -EFAULT; break; } - dbufs_local.input_bytes_consumed = param->input_bytes_consumed; + dbufs_local.input_bytes_consumed = param.input_bytes_consumed; dbufs_local.output_bytes_produced = - param->output_bytes_produced; - dbufs_local.ibufs = &ibufs; - dbufs_local.obufs = &obufs; - dbufs_local.ibufs->entries = param->ibufs->entries; - dbufs_local.ibufs->type = param->ibufs->type; - dbufs_local.obufs->entries = param->obufs->entries; - dbufs_local.obufs->type = param->obufs->type; - - dbufs_local.ibufs->buff_entry = ibuf_temp; - for (i = 0; i < dbufs_local.ibufs->entries; i++) { - ibuf_temp[i].buffer = - param->ibufs->buff_entry[i].buffer; - ibuf_temp[i].size = - param->ibufs->buff_entry[i].size; + param.output_bytes_produced; + + if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) { + retval = -EFAULT; + break; + } + if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) { + retval = -EFAULT; + break; } - dbufs_local.obufs->buff_entry = obuf_temp; - for (i = 0; i < dbufs_local.obufs->entries; i++) { - obuf_temp[i].buffer = - param->obufs->buff_entry[i].buffer; - obuf_temp[i].size = - param->obufs->buff_entry[i].size; + + ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL); + obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL); + if (!ibuf_tmp || !obuf_tmp) { + retval = -ENOMEM; + goto free_iobufs; + } + + if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry, + ibufs.entries * sizeof(*ibuf_tmp))) { + retval = -EFAULT; + goto free_iobufs; } + ibufs.buff_entry = ibuf_tmp; + dbufs_local.ibufs = &ibufs; + + if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry, + obufs.entries * sizeof(*obuf_tmp))) { + retval = -EFAULT; + goto free_iobufs; + } + obufs.buff_entry = obuf_tmp; + dbufs_local.obufs = &obufs; + retval = sst_decode(str_id, &dbufs_local); - if (retval) - retval = -EAGAIN; - if (copy_to_user(¶m->input_bytes_consumed, + if (retval) { + retval = -EAGAIN; + goto free_iobufs; + } + + dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed); + if (copy_to_user(dest, &dbufs_local.input_bytes_consumed, sizeof(unsigned long long))) { - retval = -EFAULT; - break; + retval = -EFAULT; + goto free_iobufs; } - if (copy_to_user(¶m->output_bytes_produced, + + dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed); + if (copy_to_user(dest, &dbufs_local.output_bytes_produced, sizeof(unsigned long long))) { - retval = -EFAULT; - break; + retval = -EFAULT; + goto free_iobufs; } +free_iobufs: + kfree(ibuf_tmp); + kfree(obuf_tmp); break; } @@ -1164,7 +1206,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) break; case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): { - unsigned long long *bytes = (unsigned long long *)arg; + unsigned long long __user *bytes = (unsigned long long __user *)arg; struct snd_sst_tstamp tstamp = {0}; pr_debug("sst: STREAM_BYTES_DECODED recieved!\n"); @@ -1173,8 +1215,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) break; } memcpy_fromio(&tstamp, - ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP) - +(str_id * sizeof(tstamp))), + sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp), sizeof(tstamp)); if (copy_to_user(bytes, &tstamp.bytes_processed, sizeof(*bytes))) @@ -1197,7 +1238,7 @@ long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg) kfree(fw_info); break; } - if (copy_to_user((struct snd_sst_dbufs *)arg, + if (copy_to_user((struct snd_sst_dbufs __user *)arg, fw_info, sizeof(*fw_info))) { kfree(fw_info); retval = -EFAULT; diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h index 73a98c851e4..bf0ead78bfa 100644 --- a/drivers/staging/intel_sst/intel_sst_common.h +++ b/drivers/staging/intel_sst/intel_sst_common.h @@ -231,8 +231,8 @@ struct stream_info { spinlock_t pcm_lock; bool mmapped; unsigned int sg_index; /* current buf Index */ - unsigned char *cur_ptr; /* Current static bufs */ - struct snd_sst_buf_entry *buf_entry; + unsigned char __user *cur_ptr; /* Current static bufs */ + struct snd_sst_buf_entry __user *buf_entry; struct sst_block data_blk; /* stream ops block */ struct sst_block ctrl_blk; /* stream control cmd block */ enum snd_sst_buf_type buf_type; diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c index fbae39fda5c..5c455608b02 100644 --- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c +++ b/drivers/staging/intel_sst/intel_sst_stream_encoded.c @@ -1269,7 +1269,7 @@ finish: dbufs->output_bytes_produced = total_output; str_info->status = str_info->prev; str_info->prev = STREAM_DECODE; - str_info->decode_ibuf = NULL; kfree(str_info->decode_ibuf); + str_info->decode_ibuf = NULL; return retval; } diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c index 1934805844f..978bf87ff13 100644 --- a/drivers/staging/keucr/init.c +++ b/drivers/staging/keucr/init.c @@ -22,7 +22,7 @@ int ENE_InitMedia(struct us_data *us) int result; BYTE MiscReg03 = 0; - printk("--- Initial Nedia ---\n"); + printk("--- Init Media ---\n"); result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03); if (result != USB_STOR_XFER_GOOD) { @@ -64,7 +64,7 @@ int ENE_Read_BYTE(struct us_data *us, WORD index, void *buf) struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf; int result; - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x01; bcb->Flags = 0x80; @@ -92,7 +92,7 @@ int ENE_SDInit(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->Flags = 0x80; bcb->CDB[0] = 0xF2; @@ -112,7 +112,7 @@ int ENE_SDInit(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; @@ -161,7 +161,7 @@ int ENE_MSInit(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; @@ -219,7 +219,7 @@ int ENE_SMInit(struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; @@ -341,7 +341,7 @@ int ENE_LoadBinCode(struct us_data *us, BYTE flag) break; } - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x800; bcb->Flags =0x00; @@ -433,7 +433,7 @@ int ENE_Read_Data(struct us_data *us, void *buf, unsigned int length) //printk("transport --- ENE_Read_Data\n"); // set up the command wrapper - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = length; bcb->Flags =0x80; @@ -470,7 +470,7 @@ int ENE_Write_Data(struct us_data *us, void *buf, unsigned int length) //printk("transport --- ENE_Write_Data\n"); // set up the command wrapper - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = length; bcb->Flags =0x00; diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c index d4340a9da87..9a3fdb4e4fe 100644 --- a/drivers/staging/keucr/ms.c +++ b/drivers/staging/keucr/ms.c @@ -15,7 +15,7 @@ int MS_ReaderCopyBlock(struct us_data *us, WORD oldphy, WORD newphy, WORD PhyBlo if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200*len; bcb->Flags = 0x00; @@ -53,7 +53,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO return USB_STOR_TRANSPORT_ERROR; // Read Page Data - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; @@ -69,7 +69,7 @@ int MS_ReaderReadPage(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, PDWO return USB_STOR_TRANSPORT_ERROR; // Read Extra Data - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4; bcb->Flags = 0x80; @@ -108,7 +108,7 @@ int MS_ReaderEraseBlock(struct us_data *us, DWORD PhyBlockAddr) if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; @@ -673,7 +673,7 @@ int MS_LibReadExtraBlock(struct us_data *us, DWORD PhyBlock, BYTE PageNum, BYTE //printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen); // Read Extra Data - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4 * blen; bcb->Flags = 0x80; @@ -700,7 +700,7 @@ int MS_LibReadExtra(struct us_data *us, DWORD PhyBlock, BYTE PageNum, MS_LibType BYTE ExtBuf[4]; //printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4; bcb->Flags = 0x80; @@ -807,7 +807,7 @@ int MS_LibOverwriteExtra(struct us_data *us, DWORD PhyBlockAddr, BYTE PageNum, B if (result != USB_STOR_XFER_GOOD) return USB_STOR_TRANSPORT_ERROR; - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x4; bcb->Flags = 0x80; diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c index ad0c5c62993..cb92d25acee 100644 --- a/drivers/staging/keucr/msscsi.c +++ b/drivers/staging/keucr/msscsi.c @@ -145,7 +145,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb) } // set up the command wrapper - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = 0x80; @@ -193,7 +193,7 @@ int MS_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb) blkno = phyblk * 0x20 + PageNum; // set up the command wrapper - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200 * len; bcb->Flags = 0x80; @@ -250,7 +250,7 @@ int MS_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb) } // set up the command wrapper - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = 0x00; diff --git a/drivers/staging/keucr/scsiglue.c b/drivers/staging/keucr/scsiglue.c index a2671404f7a..da4f42af383 100644 --- a/drivers/staging/keucr/scsiglue.c +++ b/drivers/staging/keucr/scsiglue.c @@ -87,7 +87,7 @@ static int slave_configure(struct scsi_device *sdev) /* This is always called with scsi_lock(host) held */ //----- queuecommand() --------------------- -static int queuecommand(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) +static int queuecommand_lck(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) { struct us_data *us = host_to_us(srb->device->host); @@ -117,6 +117,8 @@ static int queuecommand(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) return 0; } +static DEF_SCSI_QCMD(queuecommand) + /*********************************************************************** * Error handling functions ***********************************************************************/ diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c index 6c332f850eb..d646507a361 100644 --- a/drivers/staging/keucr/sdscsi.c +++ b/drivers/staging/keucr/sdscsi.c @@ -152,7 +152,7 @@ int SD_SCSI_Read(struct us_data *us, struct scsi_cmnd *srb) bnByte = bn; // set up the command wrapper - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = 0x80; @@ -192,7 +192,7 @@ int SD_SCSI_Write(struct us_data *us, struct scsi_cmnd *srb) bnByte = bn; // set up the command wrapper - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = blenByte; bcb->Flags = 0x00; diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c index 844b6598863..1b52535a388 100644 --- a/drivers/staging/keucr/smilsub.c +++ b/drivers/staging/keucr/smilsub.c @@ -266,7 +266,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant) addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; // Read sect data - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; @@ -281,7 +281,7 @@ int Ssfdc_D_ReadSect(struct us_data *us, BYTE *buf,BYTE *redundant) return USB_STOR_TRANSPORT_ERROR; // Read redundant - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x10; bcb->Flags = 0x80; @@ -319,7 +319,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant) addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; // Read sect data - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200*count; bcb->Flags = 0x80; @@ -334,7 +334,7 @@ int Ssfdc_D_ReadBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant) return USB_STOR_TRANSPORT_ERROR; // Read redundant - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x10; bcb->Flags = 0x80; @@ -536,7 +536,7 @@ int Ssfdc_D_CopyBlock(struct us_data *us, WORD count, BYTE *buf,BYTE *redundant) WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors; // Write sect data - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200*count; bcb->Flags = 0x00; @@ -754,7 +754,7 @@ int Ssfdc_D_WriteSectForCopy(struct us_data *us, BYTE *buf, BYTE *redundant) addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; // Write sect data - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x00; @@ -791,7 +791,7 @@ int Ssfdc_D_EraseBlock(struct us_data *us) addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; addr=addr*(WORD)Ssfdc.MaxSectors; - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x200; bcb->Flags = 0x80; @@ -827,7 +827,7 @@ int Ssfdc_D_ReadRedtData(struct us_data *us, BYTE *redundant) addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x10; bcb->Flags = 0x80; @@ -870,7 +870,7 @@ int Ssfdc_D_WriteRedtData(struct us_data *us, BYTE *redundant) addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock; addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector; - memset(bcb, 0, sizeof(bcb)); + memset(bcb, 0, sizeof(struct bulk_cb_wrap)); bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); bcb->DataTransferLength = 0x10; bcb->Flags = 0x80; diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c index fd98df643ab..111160cce44 100644 --- a/drivers/staging/keucr/transport.c +++ b/drivers/staging/keucr/transport.c @@ -40,7 +40,7 @@ static int usb_stor_msg_common(struct us_data *us, int timeout) us->current_urb->error_count = 0; us->current_urb->status = 0; -// us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP; + us->current_urb->transfer_flags = 0; if (us->current_urb->transfer_buffer == us->iobuf) us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; us->current_urb->transfer_dma = us->iobuf_dma; diff --git a/drivers/staging/line6/control.c b/drivers/staging/line6/control.c index 040e25ca6d3..67e23b6e2d3 100644 --- a/drivers/staging/line6/control.c +++ b/drivers/staging/line6/control.c @@ -266,210 +266,210 @@ VARIAX_PARAM_R(float, mix2); VARIAX_PARAM_R(float, mix1); VARIAX_PARAM_R(int, pickup_wiring); -static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak); -static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position, +static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak); +static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position, pod_set_wah_position); -static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO, pod_get_compression_gain, pod_set_compression_gain); -static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO, pod_get_vol_pedal_position, pod_set_vol_pedal_position); -static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO, pod_get_compression_threshold, pod_set_compression_threshold); -static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan); -static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup, +static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan); +static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup, pod_set_amp_model_setup); -static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model, +static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model, pod_set_amp_model); -static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive); -static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass); -static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid); -static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid); -static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble); -static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid, +static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive); +static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass); +static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid); +static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid); +static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble); +static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid, pod_set_highmid); -static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol, +static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol, pod_set_chan_vol); -static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix, +static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix, pod_set_reverb_mix); -static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup, +static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup, pod_set_effect_setup); -static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO, pod_get_band_1_frequency, pod_set_band_1_frequency); -static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence, +static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence, pod_set_presence); -static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO, pod_get_treble__bass, pod_set_treble__bass); -static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO, pod_get_noise_gate_enable, pod_set_noise_gate_enable); -static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold, +static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold, pod_set_gate_threshold); -static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time, +static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time, pod_set_gate_decay_time); -static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable, +static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable, pod_set_stomp_enable); -static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable, +static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable, pod_set_comp_enable); -static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time, +static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time, pod_set_stomp_time); -static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable, +static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable, pod_set_delay_enable); -static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1, +static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1, pod_set_mod_param_1); -static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1, +static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1, pod_set_delay_param_1); -static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_delay_param_1_note_value, pod_set_delay_param_1_note_value); -static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency__bass, pod_set_band_2_frequency__bass); -static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2, +static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2, pod_set_delay_param_2); -static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO, pod_get_delay_volume_mix, pod_set_delay_volume_mix); -static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3, +static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3, pod_set_delay_param_3); -static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable, +static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable, pod_set_reverb_enable); -static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type, +static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type, pod_set_reverb_type); -static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay, +static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay, pod_set_reverb_decay); -static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone, +static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone, pod_set_reverb_tone); -static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO, pod_get_reverb_pre_delay, pod_set_reverb_pre_delay); -static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post, +static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post, pod_set_reverb_pre_post); -static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO, pod_get_band_2_frequency, pod_set_band_2_frequency); -static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency__bass, pod_set_band_3_frequency__bass); -static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable, +static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable, pod_set_wah_enable); -static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO, pod_get_modulation_lo_cut, pod_set_modulation_lo_cut); -static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO, pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut); -static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO, pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum); -static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post, +static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post, pod_set_eq_pre_post); -static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post, +static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post, pod_set_volume_pre_post); -static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model, +static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model, pod_set_di_model); -static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay, +static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay, pod_set_di_delay); -static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable, +static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable, pod_set_mod_enable); -static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_mod_param_1_note_value, pod_set_mod_param_1_note_value); -static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2, +static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2, pod_set_mod_param_2); -static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3, +static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3, pod_set_mod_param_3); -static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4, +static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4, pod_set_mod_param_4); -static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5, +static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5, pod_set_mod_param_5); -static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix, +static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix, pod_set_mod_volume_mix); -static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post, +static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post, pod_set_mod_pre_post); -static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO, pod_get_modulation_model, pod_set_modulation_model); -static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO, pod_get_band_3_frequency, pod_set_band_3_frequency); -static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency__bass, pod_set_band_4_frequency__bass); -static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_mod_param_1_double_precision, pod_set_mod_param_1_double_precision); -static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO, pod_get_delay_param_1_double_precision, pod_set_delay_param_1_double_precision); -static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable, +static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable, pod_set_eq_enable); -static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap); -static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap); +static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO, pod_get_volume_tweak_pedal_assign, pod_set_volume_tweak_pedal_assign); -static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO, pod_get_band_5_frequency, pod_set_band_5_frequency); -static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner); -static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection, +static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner); +static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection, pod_set_mic_selection); -static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model, +static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model, pod_set_cabinet_model); -static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model, +static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model, pod_set_stomp_model); -static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel, +static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel, pod_set_roomlevel); -static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO, pod_get_band_4_frequency, pod_set_band_4_frequency); -static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO, pod_get_band_6_frequency, pod_set_band_6_frequency); -static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO, pod_get_stomp_param_1_note_value, pod_set_stomp_param_1_note_value); -static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2, +static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2, pod_set_stomp_param_2); -static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3, +static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3, pod_set_stomp_param_3); -static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4, +static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4, pod_set_stomp_param_4); -static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5, +static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5, pod_set_stomp_param_5); -static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6, +static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6, pod_set_stomp_param_6); -static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO, pod_get_amp_switch_select, pod_set_amp_switch_select); -static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4, +static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4, pod_set_delay_param_4); -static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5, +static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5, pod_set_delay_param_5); -static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post, +static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post, pod_set_delay_pre_post); -static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model, +static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model, pod_set_delay_model); -static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO, pod_get_delay_verb_model, pod_set_delay_verb_model); -static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb, +static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb, pod_set_tempo_msb); -static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb, +static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb, pod_set_tempo_lsb); -static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model, +static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model, pod_set_wah_model); -static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume, +static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume, pod_set_bypass_volume); -static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off, +static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off, pod_set_fx_loop_on_off); -static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO, pod_get_tweak_param_select, pod_set_tweak_param_select); -static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage, +static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage, pod_set_amp1_engage); -static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain, +static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain, pod_set_band_1_gain); -static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain__bass, pod_set_band_2_gain__bass); -static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain, +static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain, pod_set_band_2_gain); -static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain__bass, pod_set_band_3_gain__bass); -static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain, +static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain, pod_set_band_3_gain); -static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain__bass, pod_set_band_4_gain__bass); -static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO, pod_get_band_5_gain__bass, pod_set_band_5_gain__bass); -static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain, +static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain, pod_set_band_4_gain); -static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO, +static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO, pod_get_band_6_gain__bass, pod_set_band_6_gain__bass); static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write); static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c index 4304dfe6c16..ab67e889d2c 100644 --- a/drivers/staging/line6/midi.c +++ b/drivers/staging/line6/midi.c @@ -350,9 +350,9 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev, return count; } -static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO, midi_get_midi_mask_transmit, midi_set_midi_mask_transmit); -static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO, midi_get_midi_mask_receive, midi_set_midi_mask_receive); /* MIDI device destructor */ diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c index e54770e34d2..b9c55f9eb50 100644 --- a/drivers/staging/line6/pcm.c +++ b/drivers/staging/line6/pcm.c @@ -79,9 +79,9 @@ static ssize_t pcm_set_impulse_period(struct device *dev, return count; } -static DEVICE_ATTR(impulse_volume, S_IWUGO | S_IRUGO, pcm_get_impulse_volume, +static DEVICE_ATTR(impulse_volume, S_IWUSR | S_IRUGO, pcm_get_impulse_volume, pcm_set_impulse_volume); -static DEVICE_ATTR(impulse_period, S_IWUGO | S_IRUGO, pcm_get_impulse_period, +static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period, pcm_set_impulse_period); #endif diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c index 22e2cedcacf..d9b30212585 100644 --- a/drivers/staging/line6/pod.c +++ b/drivers/staging/line6/pod.c @@ -1051,48 +1051,48 @@ POD_GET_SYSTEM_PARAM(tuner_pitch, 1); #undef GET_SYSTEM_PARAM /* POD special files: */ -static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, +static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel, pod_set_channel); static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write); static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write); static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write); -static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump); -static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, +static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump); +static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf, pod_set_dump_buf); -static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish); +static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish); static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, line6_nop_write); -static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO, pod_get_midi_postprocess, pod_set_midi_postprocess); -static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, +static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level, pod_set_monitor_level); static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write); static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write); -static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_amp_setup); -static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read, pod_set_retrieve_channel); -static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read, pod_set_retrieve_effects_setup); -static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, +static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing, pod_set_routing); static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, line6_nop_write); -static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read, pod_set_store_amp_setup); -static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read, pod_set_store_channel); -static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, +static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read, pod_set_store_effects_setup); -static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, +static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq, pod_set_tuner_freq); -static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, +static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute, pod_set_tuner_mute); static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write); static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write); #ifdef CONFIG_LINE6_USB_RAW -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw); #endif /* control info callback */ diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c index 6a10b0f9749..879e6992bbc 100644 --- a/drivers/staging/line6/toneport.c +++ b/drivers/staging/line6/toneport.c @@ -154,9 +154,9 @@ static ssize_t toneport_set_led_green(struct device *dev, return count; } -static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read, +static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_red); -static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read, +static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read, toneport_set_led_green); static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2) diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c index 894eee7f231..81241cdf1be 100644 --- a/drivers/staging/line6/variax.c +++ b/drivers/staging/line6/variax.c @@ -549,21 +549,21 @@ static ssize_t variax_set_raw2(struct device *dev, #endif /* Variax workbench special files: */ -static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, +static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model, variax_set_model); -static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, +static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume, variax_set_volume); -static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone); +static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone); static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write); static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write); static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write); -static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, +static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active, variax_set_active); static DEVICE_ATTR(guitar, S_IRUGO, variax_get_guitar, line6_nop_write); #ifdef CONFIG_LINE6_USB_RAW -static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); -static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2); +static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw); +static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2); #endif /* diff --git a/drivers/staging/msm/msm_fb.c b/drivers/staging/msm/msm_fb.c index ea268edbf43..23fa049b51f 100644 --- a/drivers/staging/msm/msm_fb.c +++ b/drivers/staging/msm/msm_fb.c @@ -1158,7 +1158,7 @@ static int msm_fb_release(struct fb_info *info, int user) return ret; } -DECLARE_MUTEX(msm_fb_pan_sem); +DEFINE_SEMAPHORE(msm_fb_pan_sem); static int msm_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) @@ -1962,7 +1962,7 @@ static int msmfb_overlay_play(struct fb_info *info, unsigned long *argp) #endif -DECLARE_MUTEX(msm_fb_ioctl_ppp_sem); +DEFINE_SEMAPHORE(msm_fb_ioctl_ppp_sem); DEFINE_MUTEX(msm_fb_ioctl_lut_sem); DEFINE_MUTEX(msm_fb_ioctl_hist_sem); diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c index 75aa7a36307..4ca45ec7fd8 100644 --- a/drivers/staging/olpc_dcon/olpc_dcon.c +++ b/drivers/staging/olpc_dcon/olpc_dcon.c @@ -17,7 +17,6 @@ #include <linux/console.h> #include <linux/i2c.h> #include <linux/platform_device.h> -#include <linux/i2c-id.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/interrupt.h> @@ -733,7 +732,6 @@ static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id) edev: platform_device_unregister(dcon_device); dcon_device = NULL; - i2c_set_clientdata(client, NULL); eirq: free_irq(DCON_IRQ, &dcon_driver); einit: @@ -757,8 +755,6 @@ static int dcon_remove(struct i2c_client *client) platform_device_unregister(dcon_device); cancel_work_sync(&dcon_work); - i2c_set_clientdata(client, NULL); - return 0; } diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c index d746715d3d8..d83bec876d2 100644 --- a/drivers/staging/quickstart/quickstart.c +++ b/drivers/staging/quickstart/quickstart.c @@ -355,7 +355,6 @@ static int quickstart_acpi_remove(struct acpi_device *device, int type) static void quickstart_exit(void) { input_unregister_device(quickstart_input); - input_free_device(quickstart_input); device_remove_file(&pf_device->dev, &dev_attr_pressed_button); device_remove_file(&pf_device->dev, &dev_attr_buttons); @@ -375,6 +374,7 @@ static int __init quickstart_init_input(void) { struct quickstart_btn **ptr = &quickstart_data.btn_lst; int count; + int ret; quickstart_input = input_allocate_device(); @@ -391,7 +391,13 @@ static int __init quickstart_init_input(void) ptr = &((*ptr)->next); } - return input_register_device(quickstart_input); + ret = input_register_device(quickstart_input); + if (ret) { + input_free_device(quickstart_input); + return ret; + } + + return 0; } static int __init quickstart_init(void) diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c index 1d159ff82fd..a99879bada4 100644 --- a/drivers/staging/rt2860/common/cmm_aes.c +++ b/drivers/staging/rt2860/common/cmm_aes.c @@ -330,8 +330,6 @@ void construct_mic_iv(unsigned char *mic_iv, for (i = 8; i < 14; i++) mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */ #endif - i = (payload_length / 256); - i = (payload_length % 256); mic_iv[14] = (unsigned char)(payload_length / 256); mic_iv[15] = (unsigned char)(payload_length % 256); diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c index ebf9074a908..cd15daae541 100644 --- a/drivers/staging/rt2860/usb_main_dev.c +++ b/drivers/staging/rt2860/usb_main_dev.c @@ -65,6 +65,7 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x14B2, 0x3C07)}, /* AL */ {USB_DEVICE(0x050D, 0x8053)}, /* Belkin */ {USB_DEVICE(0x050D, 0x825B)}, /* Belkin */ + {USB_DEVICE(0x050D, 0x935A)}, /* Belkin F6D4050 v1 */ {USB_DEVICE(0x050D, 0x935B)}, /* Belkin F6D4050 v2 */ {USB_DEVICE(0x14B2, 0x3C23)}, /* Airlink */ {USB_DEVICE(0x14B2, 0x3C27)}, /* Airlink */ @@ -181,6 +182,7 @@ struct usb_device_id rtusb_usb_id[] = { {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ {USB_DEVICE(0x2019, 0xED14)}, /* Planex Communications, Inc. */ + {USB_DEVICE(0x0411, 0x015D)}, /* Buffalo Airstation WLI-UC-GN */ {} /* Terminating entry */ }; diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c index 46000d72f4c..3bdf9b31cc4 100644 --- a/drivers/staging/rtl8187se/r8185b_init.c +++ b/drivers/staging/rtl8187se/r8185b_init.c @@ -264,8 +264,12 @@ HwHSSIThreeWire( udelay(10); } - if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) - panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp); + if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) { + printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:" + " %#X RE|WE bits are not clear!!\n", u1bTmp); + dump_stack(); + return 0; + } /* RTL8187S HSSI Read/Write Function */ u1bTmp = read_nic_byte(dev, RF_SW_CONFIG); @@ -298,13 +302,23 @@ HwHSSIThreeWire( int idx; int ByteCnt = nDataBufBitCnt / 8; /* printk("%d\n",nDataBufBitCnt); */ - if ((nDataBufBitCnt % 8) != 0) - panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n", - nDataBufBitCnt); + if ((nDataBufBitCnt % 8) != 0) { + printk(KERN_ERR "rtl8187se: " + "HwThreeWire(): nDataBufBitCnt(%d)" + " should be multiple of 8!!!\n", + nDataBufBitCnt); + dump_stack(); + nDataBufBitCnt += 8; + nDataBufBitCnt &= ~7; + } - if (nDataBufBitCnt > 64) - panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n", - nDataBufBitCnt); + if (nDataBufBitCnt > 64) { + printk(KERN_ERR "rtl8187se: HwThreeWire():" + " nDataBufBitCnt(%d) should <= 64!!!\n", + nDataBufBitCnt); + dump_stack(); + nDataBufBitCnt = 64; + } for (idx = 0; idx < ByteCnt; idx++) write_nic_byte(dev, (SW_3W_DB0+idx), *(pDataBuf+idx)); diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c index a202194b5cb..b1786dcac24 100644 --- a/drivers/staging/rtl8192e/r8192E_core.c +++ b/drivers/staging/rtl8192e/r8192E_core.c @@ -5829,6 +5829,9 @@ static void rtl8192_rx(struct net_device *dev) } } + pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb), + priv->rxbuffersize, PCI_DMA_FROMDEVICE); + skb = new_skb; priv->rx_buf[priv->rx_idx] = skb; *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE); diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h index 7fca42c7c0d..831d81e0e42 100644 --- a/drivers/staging/rtl8712/osdep_service.h +++ b/drivers/staging/rtl8712/osdep_service.h @@ -22,7 +22,6 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/kref.h> -#include <linux/smp_lock.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/usb.h> @@ -161,7 +160,7 @@ static inline u32 _down_sema(struct semaphore *sema) static inline void _rtl_rwlock_init(struct semaphore *prwlock) { - init_MUTEX(prwlock); + sema_init(prwlock, 1); } static inline void _init_listhead(struct list_head *list) diff --git a/drivers/staging/rtl8712/usb_halinit.c b/drivers/staging/rtl8712/usb_halinit.c index f6569dce301..0e9483bbabe 100644 --- a/drivers/staging/rtl8712/usb_halinit.c +++ b/drivers/staging/rtl8712/usb_halinit.c @@ -37,7 +37,7 @@ u8 r8712_usb_hal_bus_init(struct _adapter *padapter) { u8 val8 = 0; u8 ret = _SUCCESS; - u8 PollingCnt = 20; + int PollingCnt = 20; struct registry_priv *pregistrypriv = &padapter->registrypriv; if (pregistrypriv->chip_version == RTL8712_FPGA) { diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c index eb44b60e1eb..ac2bf11e111 100644 --- a/drivers/staging/samsung-laptop/samsung-laptop.c +++ b/drivers/staging/samsung-laptop/samsung-laptop.c @@ -356,7 +356,7 @@ static ssize_t set_silent_state(struct device *dev, } return count; } -static DEVICE_ATTR(silent, S_IWUGO | S_IRUGO, +static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO, get_silent_state, set_silent_state); diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c index f9c493591ce..540a984bb51 100644 --- a/drivers/staging/smbfs/inode.c +++ b/drivers/staging/smbfs/inode.c @@ -537,7 +537,7 @@ static int smb_fill_super(struct super_block *sb, void *raw_data, int silent) server->mnt = NULL; server->sock_file = NULL; init_waitqueue_head(&server->conn_wq); - init_MUTEX(&server->sem); + sema_init(&server->sem, 1); INIT_LIST_HEAD(&server->entry); INIT_LIST_HEAD(&server->xmitq); INIT_LIST_HEAD(&server->recvq); diff --git a/drivers/staging/solo6x10/solo6010-v4l2-enc.c b/drivers/staging/solo6x10/solo6010-v4l2-enc.c index bbf3d9c4abb..097e82bc7a6 100644 --- a/drivers/staging/solo6x10/solo6010-v4l2-enc.c +++ b/drivers/staging/solo6x10/solo6010-v4l2-enc.c @@ -766,7 +766,7 @@ static int solo_enc_open(struct file *file) &solo_enc->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_INTERLACED, - sizeof(struct videobuf_buffer), fh); + sizeof(struct videobuf_buffer), fh, NULL); spin_unlock(&solo_enc->lock); diff --git a/drivers/staging/solo6x10/solo6010-v4l2.c b/drivers/staging/solo6x10/solo6010-v4l2.c index 9731fa02b5e..6ffd21de837 100644 --- a/drivers/staging/solo6x10/solo6010-v4l2.c +++ b/drivers/staging/solo6x10/solo6010-v4l2.c @@ -437,7 +437,7 @@ static int solo_v4l2_open(struct file *file) &solo_dev->pdev->dev, &fh->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, SOLO_DISP_PIX_FIELD, - sizeof(struct videobuf_buffer), fh); + sizeof(struct videobuf_buffer), fh, NULL); return 0; } diff --git a/drivers/staging/speakup/buffers.c b/drivers/staging/speakup/buffers.c index b7b60d5e866..a2db956edd5 100644 --- a/drivers/staging/speakup/buffers.c +++ b/drivers/staging/speakup/buffers.c @@ -1,5 +1,4 @@ #include <linux/console.h> -#include <linux/smp_lock.h> #include <linux/types.h> #include <linux/wait.h> diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c index adb93f21c0d..65b231178f0 100644 --- a/drivers/staging/speakup/fakekey.c +++ b/drivers/staging/speakup/fakekey.c @@ -62,7 +62,6 @@ void speakup_remove_virtual_keyboard(void) { if (virt_keyboard != NULL) { input_unregister_device(virt_keyboard); - input_free_device(virt_keyboard); virt_keyboard = NULL; } } diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c index c7932da03c5..63a9d0adf32 100644 --- a/drivers/staging/spectra/ffsport.c +++ b/drivers/staging/spectra/ffsport.c @@ -656,7 +656,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which) /* Here we force report 512 byte hardware sector size to Kernel */ blk_queue_logical_block_size(dev->queue, 512); - blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH); + blk_queue_flush(dev->queue, REQ_FLUSH); dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); if (IS_ERR(dev->thread)) { diff --git a/drivers/staging/stradis/Kconfig b/drivers/staging/stradis/Kconfig index 92e89114189..02f0fc504cf 100644 --- a/drivers/staging/stradis/Kconfig +++ b/drivers/staging/stradis/Kconfig @@ -1,6 +1,6 @@ config VIDEO_STRADIS tristate "Stradis 4:2:2 MPEG-2 video driver (DEPRECATED)" - depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS + depends on EXPERIMENTAL && PCI && VIDEO_V4L1 && VIRT_TO_BUS && BKL help Say Y here to enable support for the Stradis 4:2:2 MPEG-2 video driver for PCI. There is a product page at diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c index a057824e7eb..807dd7eb748 100644 --- a/drivers/staging/stradis/stradis.c +++ b/drivers/staging/stradis/stradis.c @@ -1286,6 +1286,7 @@ static long saa_ioctl(struct file *file, case VIDIOCGCAP: { struct video_capability b; + memset(&b, 0, sizeof(b)); strcpy(b.name, saa->video_dev.name); b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY | VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM | @@ -1416,6 +1417,7 @@ static long saa_ioctl(struct file *file, case VIDIOCGWIN: { struct video_window vw; + memset(&vw, 0, sizeof(vw)); vw.x = saa->win.x; vw.y = saa->win.y; vw.width = saa->win.width; @@ -1448,6 +1450,7 @@ static long saa_ioctl(struct file *file, case VIDIOCGFBUF: { struct video_buffer v; + memset(&v, 0, sizeof(v)); v.base = (void *)saa->win.vidadr; v.height = saa->win.sheight; v.width = saa->win.swidth; @@ -1492,6 +1495,7 @@ static long saa_ioctl(struct file *file, case VIDIOCGAUDIO: { struct video_audio v; + memset(&v, 0, sizeof(v)); v = saa->audio_dev; v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE); v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME; @@ -1534,6 +1538,7 @@ static long saa_ioctl(struct file *file, case VIDIOCGUNIT: { struct video_unit vu; + memset(&vu, 0, sizeof(vu)); vu.video = saa->video_dev.minor; vu.vbi = VIDEO_NO_UNIT; vu.radio = VIDEO_NO_UNIT; @@ -1888,6 +1893,7 @@ static int saa_open(struct file *file) saa->user++; if (saa->user > 1) { + saa->user--; unlock_kernel(); return 0; /* device open already, don't reset */ } @@ -2000,10 +2006,13 @@ static int __devinit configure_saa7146(struct pci_dev *pdev, int num) if (retval < 0) { dev_err(&pdev->dev, "%d: error in registering video device!\n", num); - goto errio; + goto errirq; } return 0; + +errirq: + free_irq(saa->irq, saa); errio: iounmap(saa->saa7146_mem); err: diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig index ff64d464143..93de4f2e8bf 100644 --- a/drivers/staging/tidspbridge/Kconfig +++ b/drivers/staging/tidspbridge/Kconfig @@ -6,7 +6,6 @@ menuconfig TIDSPBRIDGE tristate "DSP Bridge driver" depends on ARCH_OMAP3 select OMAP_MBOX_FWK - select OMAP_IOMMU help DSP/BIOS Bridge is designed for platforms that contain a GPP and one or more attached DSPs. The GPP is considered the master or diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile index 50decc2935c..41c644c3318 100644 --- a/drivers/staging/tidspbridge/Makefile +++ b/drivers/staging/tidspbridge/Makefile @@ -2,18 +2,19 @@ obj-$(CONFIG_TIDSPBRIDGE) += bridgedriver.o libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \ - core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \ + core/tiomap3430_pwr.o core/tiomap_io.o \ core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \ - pmgr/cmm.o pmgr/dbll.o + pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \ rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \ rmgr/nldr.o rmgr/drv_interface.o libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \ dynload/tramp.o +libhw = hw/hw_mmu.o bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \ - $(libdload) + $(libdload) $(libhw) #Machine dependent ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \ diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h index 8ae263387a8..16723cd3483 100644 --- a/drivers/staging/tidspbridge/core/_deh.h +++ b/drivers/staging/tidspbridge/core/_deh.h @@ -27,8 +27,9 @@ struct deh_mgr { struct bridge_dev_context *hbridge_context; /* Bridge context. */ struct ntfy_object *ntfy_obj; /* NTFY object */ -}; -int mmu_fault_isr(struct iommu *mmu); + /* MMU Fault DPC */ + struct tasklet_struct dpc_tasklet; +}; #endif /* _DEH_ */ diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h index e0a801c1cb9..1c1f157e167 100644 --- a/drivers/staging/tidspbridge/core/_tiomap.h +++ b/drivers/staging/tidspbridge/core/_tiomap.h @@ -23,8 +23,8 @@ #include <plat/clockdomain.h> #include <mach-omap2/prm-regbits-34xx.h> #include <mach-omap2/cm-regbits-34xx.h> -#include <dspbridge/dsp-mmu.h> #include <dspbridge/devdefs.h> +#include <hw_defs.h> #include <dspbridge/dspioctl.h> /* for bridge_ioctl_extproc defn */ #include <dspbridge/sync.h> #include <dspbridge/clk.h> @@ -306,18 +306,6 @@ static const struct bpwr_clk_t bpwr_clks[] = { #define CLEAR_BIT_INDEX(reg, index) (reg &= ~(1 << (index))) -struct shm_segs { - u32 seg0_da; - u32 seg0_pa; - u32 seg0_va; - u32 seg0_size; - u32 seg1_da; - u32 seg1_pa; - u32 seg1_va; - u32 seg1_size; -}; - - /* This Bridge driver's device context: */ struct bridge_dev_context { struct dev_object *hdev_obj; /* Handle to Bridge device object. */ @@ -328,6 +316,7 @@ struct bridge_dev_context { */ u32 dw_dsp_ext_base_addr; /* See the comment above */ u32 dw_api_reg_base; /* API mem map'd registers */ + void __iomem *dw_dsp_mmu_base; /* DSP MMU Mapped registers */ u32 dw_api_clk_base; /* CLK Registers */ u32 dw_dsp_clk_m2_base; /* DSP Clock Module m2 */ u32 dw_public_rhea; /* Pub Rhea */ @@ -339,8 +328,7 @@ struct bridge_dev_context { u32 dw_internal_size; /* Internal memory size */ struct omap_mbox *mbox; /* Mail box handle */ - struct iommu *dsp_mmu; /* iommu for iva2 handler */ - struct shm_segs sh_s; + struct cfg_hostres *resources; /* Host Resources */ /* @@ -353,6 +341,7 @@ struct bridge_dev_context { /* TC Settings */ bool tc_word_swap_on; /* Traffic Controller Word Swap */ + struct pg_table_attrs *pt_attrs; u32 dsp_per_clks; }; diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c deleted file mode 100644 index 983c95adc8f..00000000000 --- a/drivers/staging/tidspbridge/core/dsp-mmu.c +++ /dev/null @@ -1,317 +0,0 @@ -/* - * dsp-mmu.c - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * DSP iommu. - * - * Copyright (C) 2010 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#include <dspbridge/host_os.h> -#include <plat/dmtimer.h> -#include <dspbridge/dbdefs.h> -#include <dspbridge/dev.h> -#include <dspbridge/io_sm.h> -#include <dspbridge/dspdeh.h> -#include "_tiomap.h" - -#include <dspbridge/dsp-mmu.h> - -#define MMU_CNTL_TWL_EN (1 << 2) - -static struct tasklet_struct mmu_tasklet; - -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE -static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) -{ - void *dummy_addr; - u32 fa, tmp; - struct iotlb_entry e; - struct iommu *mmu = dev_context->dsp_mmu; - dummy_addr = (void *)__get_free_page(GFP_ATOMIC); - - /* - * Before acking the MMU fault, let's make sure MMU can only - * access entry #0. Then add a new entry so that the DSP OS - * can continue in order to dump the stack. - */ - tmp = iommu_read_reg(mmu, MMU_CNTL); - tmp &= ~MMU_CNTL_TWL_EN; - iommu_write_reg(mmu, tmp, MMU_CNTL); - fa = iommu_read_reg(mmu, MMU_FAULT_AD); - e.da = fa & PAGE_MASK; - e.pa = virt_to_phys(dummy_addr); - e.valid = 1; - e.prsvd = 1; - e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK; - e.endian = MMU_RAM_ENDIAN_LITTLE; - e.elsz = MMU_RAM_ELSZ_32; - e.mixed = 0; - - load_iotlb_entry(mmu, &e); - - dsp_clk_enable(DSP_CLK_GPT8); - - dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); - - /* Clear MMU interrupt */ - tmp = iommu_read_reg(mmu, MMU_IRQSTATUS); - iommu_write_reg(mmu, tmp, MMU_IRQSTATUS); - - dump_dsp_stack(dev_context); - dsp_clk_disable(DSP_CLK_GPT8); - - iopgtable_clear_entry(mmu, fa); - free_page((unsigned long)dummy_addr); -} -#endif - - -static void fault_tasklet(unsigned long data) -{ - struct iommu *mmu = (struct iommu *)data; - struct bridge_dev_context *dev_ctx; - struct deh_mgr *dm; - u32 fa; - dev_get_deh_mgr(dev_get_first(), &dm); - dev_get_bridge_context(dev_get_first(), &dev_ctx); - - if (!dm || !dev_ctx) - return; - - fa = iommu_read_reg(mmu, MMU_FAULT_AD); - -#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE - print_dsp_trace_buffer(dev_ctx); - dump_dl_modules(dev_ctx); - mmu_fault_print_stack(dev_ctx); -#endif - - bridge_deh_notify(dm, DSP_MMUFAULT, fa); -} - -/* - * ======== mmu_fault_isr ======== - * ISR to be triggered by a DSP MMU fault interrupt. - */ -static int mmu_fault_callback(struct iommu *mmu) -{ - if (!mmu) - return -EPERM; - - iommu_write_reg(mmu, 0, MMU_IRQENABLE); - tasklet_schedule(&mmu_tasklet); - return 0; -} - -/** - * dsp_mmu_init() - initialize dsp_mmu module and returns a handle - * - * This function initialize dsp mmu module and returns a struct iommu - * handle to use it for dsp maps. - * - */ -struct iommu *dsp_mmu_init() -{ - struct iommu *mmu; - - mmu = iommu_get("iva2"); - - if (!IS_ERR(mmu)) { - tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu); - mmu->isr = mmu_fault_callback; - } - - return mmu; -} - -/** - * dsp_mmu_exit() - destroy dsp mmu module - * @mmu: Pointer to iommu handle. - * - * This function destroys dsp mmu module. - * - */ -void dsp_mmu_exit(struct iommu *mmu) -{ - if (mmu) - iommu_put(mmu); - tasklet_kill(&mmu_tasklet); -} - -/** - * user_va2_pa() - get physical address from userspace address. - * @mm: mm_struct Pointer of the process. - * @address: Virtual user space address. - * - */ -static u32 user_va2_pa(struct mm_struct *mm, u32 address) -{ - pgd_t *pgd; - pmd_t *pmd; - pte_t *ptep, pte; - - pgd = pgd_offset(mm, address); - if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { - pmd = pmd_offset(pgd, address); - if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { - ptep = pte_offset_map(pmd, address); - if (ptep) { - pte = *ptep; - if (pte_present(pte)) - return pte & PAGE_MASK; - } - } - } - - return 0; -} - -/** - * get_io_pages() - pin and get pages of io user's buffer. - * @mm: mm_struct Pointer of the process. - * @uva: Virtual user space address. - * @pages Pages to be pined. - * @usr_pgs struct page array pointer where the user pages will be stored - * - */ -static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages, - struct page **usr_pgs) -{ - u32 pa; - int i; - struct page *pg; - - for (i = 0; i < pages; i++) { - pa = user_va2_pa(mm, uva); - - if (!pfn_valid(__phys_to_pfn(pa))) - break; - - pg = phys_to_page(pa); - usr_pgs[i] = pg; - get_page(pg); - } - return i; -} - -/** - * user_to_dsp_map() - maps user to dsp virtual address - * @mmu: Pointer to iommu handle. - * @uva: Virtual user space address. - * @da DSP address - * @size Buffer size to map. - * @usr_pgs struct page array pointer where the user pages will be stored - * - * This function maps a user space buffer into DSP virtual address. - * - */ -u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, - struct page **usr_pgs) -{ - int res, w; - unsigned pages; - int i; - struct vm_area_struct *vma; - struct mm_struct *mm = current->mm; - struct sg_table *sgt; - struct scatterlist *sg; - - if (!size || !usr_pgs) - return -EINVAL; - - pages = size / PG_SIZE4K; - - down_read(&mm->mmap_sem); - vma = find_vma(mm, uva); - while (vma && (uva + size > vma->vm_end)) - vma = find_vma(mm, vma->vm_end + 1); - - if (!vma) { - pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", - __func__, uva, size); - up_read(&mm->mmap_sem); - return -EINVAL; - } - if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) - w = 1; - - if (vma->vm_flags & VM_IO) - i = get_io_pages(mm, uva, pages, usr_pgs); - else - i = get_user_pages(current, mm, uva, pages, w, 1, - usr_pgs, NULL); - up_read(&mm->mmap_sem); - - if (i < 0) - return i; - - if (i < pages) { - res = -EFAULT; - goto err_pages; - } - - sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); - if (!sgt) { - res = -ENOMEM; - goto err_pages; - } - - res = sg_alloc_table(sgt, pages, GFP_KERNEL); - - if (res < 0) - goto err_sg; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) - sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0); - - da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); - - if (!IS_ERR_VALUE(da)) - return da; - res = (int)da; - - sg_free_table(sgt); -err_sg: - kfree(sgt); - i = pages; -err_pages: - while (i--) - put_page(usr_pgs[i]); - return res; -} - -/** - * user_to_dsp_unmap() - unmaps DSP virtual buffer. - * @mmu: Pointer to iommu handle. - * @da DSP address - * - * This function unmaps a user space buffer into DSP virtual address. - * - */ -int user_to_dsp_unmap(struct iommu *mmu, u32 da) -{ - unsigned i; - struct sg_table *sgt; - struct scatterlist *sg; - - sgt = iommu_vunmap(mmu, da); - if (!sgt) - return -EFAULT; - - for_each_sg(sgt->sgl, sg, sgt->nents, i) - put_page(sg_page(sg)); - sg_free_table(sgt); - kfree(sgt); - - return 0; -} diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c index 194badaba0e..571864555dd 100644 --- a/drivers/staging/tidspbridge/core/io_sm.c +++ b/drivers/staging/tidspbridge/core/io_sm.c @@ -39,6 +39,10 @@ #include <dspbridge/ntfy.h> #include <dspbridge/sync.h> +/* Hardware Abstraction Layer */ +#include <hw_defs.h> +#include <hw_mmu.h> + /* Bridge Driver */ #include <dspbridge/dspdeh.h> #include <dspbridge/dspio.h> @@ -287,7 +291,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) struct cod_manager *cod_man; struct chnl_mgr *hchnl_mgr; struct msg_mgr *hmsg_mgr; - struct shm_segs *sm_sg; u32 ul_shm_base; u32 ul_shm_base_offset; u32 ul_shm_limit; @@ -310,9 +313,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB]; struct cfg_hostres *host_res; struct bridge_dev_context *pbridge_context; + u32 map_attrs; u32 shm0_end; u32 ul_dyn_ext_base; u32 ul_seg1_size = 0; + u32 pa_curr = 0; + u32 va_curr = 0; + u32 gpp_va_curr = 0; + u32 num_bytes = 0; + u32 all_bits = 0; + u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, + HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB + }; status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context); if (!pbridge_context) { @@ -325,8 +337,6 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) status = -EFAULT; goto func_end; } - sm_sg = &pbridge_context->sh_s; - status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man); if (!cod_man) { status = -EFAULT; @@ -461,14 +471,129 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) if (status) goto func_end; - sm_sg->seg1_pa = ul_gpp_pa; - sm_sg->seg1_da = ul_dyn_ext_base; - sm_sg->seg1_va = ul_gpp_va; - sm_sg->seg1_size = ul_seg1_size; - sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size; - sm_sg->seg0_da = ul_dsp_va; - sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size; - sm_sg->seg0_size = ul_seg_size; + pa_curr = ul_gpp_pa; + va_curr = ul_dyn_ext_base * hio_mgr->word_size; + gpp_va_curr = ul_gpp_va; + num_bytes = ul_seg1_size; + + /* + * Try to fit into TLB entries. If not possible, push them to page + * tables. It is quite possible that if sections are not on + * bigger page boundary, we may end up making several small pages. + * So, push them onto page tables, if that is the case. + */ + map_attrs = 0x00000000; + map_attrs = DSP_MAPLITTLEENDIAN; + map_attrs |= DSP_MAPPHYSICALADDR; + map_attrs |= DSP_MAPELEMSIZE32; + map_attrs |= DSP_MAPDONOTLOCK; + + while (num_bytes) { + /* + * To find the max. page size with which both PA & VA are + * aligned. + */ + all_bits = pa_curr | va_curr; + dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, " + "num_bytes %x\n", all_bits, pa_curr, va_curr, + num_bytes); + for (i = 0; i < 4; i++) { + if ((num_bytes >= page_size[i]) && ((all_bits & + (page_size[i] - + 1)) == 0)) { + status = + hio_mgr->intf_fxns-> + pfn_brd_mem_map(hio_mgr->hbridge_context, + pa_curr, va_curr, + page_size[i], map_attrs, + NULL); + if (status) + goto func_end; + pa_curr += page_size[i]; + va_curr += page_size[i]; + gpp_va_curr += page_size[i]; + num_bytes -= page_size[i]; + /* + * Don't try smaller sizes. Hopefully we have + * reached an address aligned to a bigger page + * size. + */ + break; + } + } + } + pa_curr += ul_pad_size; + va_curr += ul_pad_size; + gpp_va_curr += ul_pad_size; + + /* Configure the TLB entries for the next cacheable segment */ + num_bytes = ul_seg_size; + va_curr = ul_dsp_va * hio_mgr->word_size; + while (num_bytes) { + /* + * To find the max. page size with which both PA & VA are + * aligned. + */ + all_bits = pa_curr | va_curr; + dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, " + "va_curr %x, num_bytes %x\n", all_bits, pa_curr, + va_curr, num_bytes); + for (i = 0; i < 4; i++) { + if (!(num_bytes >= page_size[i]) || + !((all_bits & (page_size[i] - 1)) == 0)) + continue; + if (ndx < MAX_LOCK_TLB_ENTRIES) { + /* + * This is the physical address written to + * DSP MMU. + */ + ae_proc[ndx].ul_gpp_pa = pa_curr; + /* + * This is the virtual uncached ioremapped + * address!!! + */ + ae_proc[ndx].ul_gpp_va = gpp_va_curr; + ae_proc[ndx].ul_dsp_va = + va_curr / hio_mgr->word_size; + ae_proc[ndx].ul_size = page_size[i]; + ae_proc[ndx].endianism = HW_LITTLE_ENDIAN; + ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT; + ae_proc[ndx].mixed_mode = HW_MMU_CPUES; + dev_dbg(bridge, "shm MMU TLB entry PA %x" + " VA %x DSP_VA %x Size %x\n", + ae_proc[ndx].ul_gpp_pa, + ae_proc[ndx].ul_gpp_va, + ae_proc[ndx].ul_dsp_va * + hio_mgr->word_size, page_size[i]); + ndx++; + } else { + status = + hio_mgr->intf_fxns-> + pfn_brd_mem_map(hio_mgr->hbridge_context, + pa_curr, va_curr, + page_size[i], map_attrs, + NULL); + dev_dbg(bridge, + "shm MMU PTE entry PA %x" + " VA %x DSP_VA %x Size %x\n", + ae_proc[ndx].ul_gpp_pa, + ae_proc[ndx].ul_gpp_va, + ae_proc[ndx].ul_dsp_va * + hio_mgr->word_size, page_size[i]); + if (status) + goto func_end; + } + pa_curr += page_size[i]; + va_curr += page_size[i]; + gpp_va_curr += page_size[i]; + num_bytes -= page_size[i]; + /* + * Don't try smaller sizes. Hopefully we have reached + * an address aligned to a bigger page size. + */ + break; + } + } /* * Copy remaining entries from CDB. All entries are 1 MB and @@ -509,12 +634,38 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa, ae_proc[ndx].ul_dsp_va); ndx++; + } else { + status = hio_mgr->intf_fxns->pfn_brd_mem_map + (hio_mgr->hbridge_context, + hio_mgr->ext_proc_info.ty_tlb[i]. + ul_gpp_phys, + hio_mgr->ext_proc_info.ty_tlb[i]. + ul_dsp_virt, 0x100000, map_attrs, + NULL); } } if (status) goto func_end; } + map_attrs = 0x00000000; + map_attrs = DSP_MAPLITTLEENDIAN; + map_attrs |= DSP_MAPPHYSICALADDR; + map_attrs |= DSP_MAPELEMSIZE32; + map_attrs |= DSP_MAPDONOTLOCK; + + /* Map the L4 peripherals */ + i = 0; + while (l4_peripheral_table[i].phys_addr) { + status = hio_mgr->intf_fxns->pfn_brd_mem_map + (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr, + l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB, + map_attrs, NULL); + if (status) + goto func_end; + i++; + } + for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) { ae_proc[i].ul_dsp_va = 0; ae_proc[i].ul_gpp_pa = 0; @@ -537,12 +688,12 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) status = -EFAULT; goto func_end; } else { - if (sm_sg->seg0_da > ul_shm_base) { + if (ae_proc[0].ul_dsp_va > ul_shm_base) { status = -EPERM; goto func_end; } /* ul_shm_base may not be at ul_dsp_va address */ - ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) * + ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) * hio_mgr->word_size; /* * bridge_dev_ctrl() will set dev context dsp-mmu info. In @@ -566,7 +717,8 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) goto func_end; } /* Register SM */ - status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa); + status = + register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa); } hio_mgr->shared_mem = (struct shm *)ul_shm_base; diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index f22bc12bc0d..1be081f917a 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c @@ -23,7 +23,6 @@ #include <dspbridge/host_os.h> #include <linux/mm.h> #include <linux/mmzone.h> -#include <plat/control.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> @@ -35,6 +34,10 @@ #include <dspbridge/drv.h> #include <dspbridge/sync.h> +/* ------------------------------------ Hardware Abstraction Layer */ +#include <hw_defs.h> +#include <hw_mmu.h> + /* ----------------------------------- Link Driver */ #include <dspbridge/dspdefs.h> #include <dspbridge/dspchnl.h> @@ -47,6 +50,7 @@ /* ----------------------------------- Platform Manager */ #include <dspbridge/dev.h> #include <dspbridge/dspapi.h> +#include <dspbridge/dmm.h> #include <dspbridge/wdt.h> /* ----------------------------------- Local */ @@ -67,6 +71,20 @@ #define MMU_SMALL_PAGE_MASK 0xFFFFF000 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00 #define PAGES_II_LVL_TABLE 512 +#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT) + +/* + * This is a totally ugly layer violation, but needed until + * omap_ctrl_set_dsp_boot*() are provided. + */ +#define OMAP3_IVA2_BOOTMOD_IDLE 1 +#define OMAP2_CONTROL_GENERAL 0x270 +#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) +#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) + +#define OMAP343X_CTRL_REGADDR(reg) \ + OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg)) + /* Forward Declarations: */ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); @@ -91,6 +109,12 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, u8 *host_buff, u32 dsp_addr, u32 ul_num_bytes, u32 mem_type); +static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, + u32 ul_mpu_addr, u32 virt_addr, + u32 ul_num_bytes, u32 ul_map_attr, + struct page **mapped_pages); +static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, + u32 virt_addr, u32 ul_num_bytes); static int bridge_dev_create(struct bridge_dev_context **dev_cntxt, struct dev_object *hdev_obj, @@ -98,8 +122,57 @@ static int bridge_dev_create(struct bridge_dev_context static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, u32 dw_cmd, void *pargs); static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt); +static u32 user_va2_pa(struct mm_struct *mm, u32 address); +static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, + u32 va, u32 size, + struct hw_mmu_map_attrs_t *map_attrs); +static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, + u32 size, struct hw_mmu_map_attrs_t *attrs); +static int mem_map_vmalloc(struct bridge_dev_context *dev_context, + u32 ul_mpu_addr, u32 virt_addr, + u32 ul_num_bytes, + struct hw_mmu_map_attrs_t *hw_attrs); + bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr); +/* ----------------------------------- Globals */ + +/* Attributes of L2 page tables for DSP MMU */ +struct page_info { + u32 num_entries; /* Number of valid PTEs in the L2 PT */ +}; + +/* Attributes used to manage the DSP MMU page tables */ +struct pg_table_attrs { + spinlock_t pg_lock; /* Critical section object handle */ + + u32 l1_base_pa; /* Physical address of the L1 PT */ + u32 l1_base_va; /* Virtual address of the L1 PT */ + u32 l1_size; /* Size of the L1 PT */ + u32 l1_tbl_alloc_pa; + /* Physical address of Allocated mem for L1 table. May not be aligned */ + u32 l1_tbl_alloc_va; + /* Virtual address of Allocated mem for L1 table. May not be aligned */ + u32 l1_tbl_alloc_sz; + /* Size of consistent memory allocated for L1 table. + * May not be aligned */ + + u32 l2_base_pa; /* Physical address of the L2 PT */ + u32 l2_base_va; /* Virtual address of the L2 PT */ + u32 l2_size; /* Size of the L2 PT */ + u32 l2_tbl_alloc_pa; + /* Physical address of Allocated mem for L2 table. May not be aligned */ + u32 l2_tbl_alloc_va; + /* Virtual address of Allocated mem for L2 table. May not be aligned */ + u32 l2_tbl_alloc_sz; + /* Size of consistent memory allocated for L2 table. + * May not be aligned */ + + u32 l2_num_pages; /* Number of allocated L2 PT */ + /* Array [l2_num_pages] of L2 PT info structs */ + struct page_info *pg_info; +}; + /* * This Bridge driver's function interface table. */ @@ -119,6 +192,8 @@ static struct bridge_drv_interface drv_interface_fxns = { bridge_brd_set_state, bridge_brd_mem_copy, bridge_brd_mem_write, + bridge_brd_mem_map, + bridge_brd_mem_un_map, /* The following CHNL functions are provided by chnl_io.lib: */ bridge_chnl_create, bridge_chnl_destroy, @@ -148,6 +223,27 @@ static struct bridge_drv_interface drv_interface_fxns = { bridge_msg_set_queue_id, }; +static inline void flush_all(struct bridge_dev_context *dev_context) +{ + if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION || + dev_context->dw_brd_state == BRD_HIBERNATION) + wake_dsp(dev_context, NULL); + + hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base); +} + +static void bad_page_dump(u32 pa, struct page *pg) +{ + pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa); + pr_emerg("Bad page state in process '%s'\n" + "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n" + "Backtrace:\n", + current->comm, pg, (int)(2 * sizeof(unsigned long)), + (unsigned long)pg->flags, pg->mapping, + page_mapcount(pg), page_count(pg)); + dump_stack(); +} + /* * ======== bridge_drv_entry ======== * purpose: @@ -203,7 +299,8 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); } - + (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, + OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); dsp_clk_enable(DSP_CLK_IVA2); /* set the device state to IDLE */ @@ -274,17 +371,14 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, { int status = 0; struct bridge_dev_context *dev_context = dev_ctxt; - struct iommu *mmu = NULL; - struct shm_segs *sm_sg; - int l4_i = 0, tlb_i = 0; - u32 sg0_da = 0, sg1_da = 0; - struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; u32 dw_sync_addr = 0; u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */ u32 ul_shm_base_virt; /* Dsp Virt SM base addr */ u32 ul_tlb_base_virt; /* Base of MMU TLB entry */ /* Offset of shm_base_virt from tlb_base_virt */ u32 ul_shm_offset_virt; + s32 entry_ndx; + s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */ struct cfg_hostres *resources = NULL; u32 temp; u32 ul_dsp_clk_rate; @@ -305,12 +399,12 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, ul_shm_base_virt *= DSPWORDSIZE; DBC_ASSERT(ul_shm_base_virt != 0); /* DSP Virtual address */ - ul_tlb_base_virt = dev_context->sh_s.seg0_da; + ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va; DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); ul_shm_offset_virt = ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE); /* Kernel logical address */ - ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt; + ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt; DBC_ASSERT(ul_shm_base != 0); /* 2nd wd is used as sync field */ @@ -345,83 +439,78 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, OMAP343X_CONTROL_IVA2_BOOTMOD)); } } - if (!status) { + /* Reset and Unreset the RST2, so that BOOTADDR is copied to + * IVA2 SYSC register */ + (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, + OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); + udelay(100); (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); - mmu = dev_context->dsp_mmu; - if (mmu) - dsp_mmu_exit(mmu); - mmu = dsp_mmu_init(); - if (IS_ERR(mmu)) { - dev_err(bridge, "dsp_mmu_init failed!\n"); - dev_context->dsp_mmu = NULL; - status = (int)mmu; - } - } - if (!status) { - dev_context->dsp_mmu = mmu; - sm_sg = &dev_context->sh_s; - sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa, - sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); - if (IS_ERR_VALUE(sg0_da)) { - status = (int)sg0_da; - sg0_da = 0; - } - } - if (!status) { - sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa, - sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); - if (IS_ERR_VALUE(sg1_da)) { - status = (int)sg1_da; - sg1_da = 0; - } - } - if (!status) { - u32 da; - for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) { - if (!tlb[tlb_i].ul_gpp_pa) + udelay(100); + + /* Disbale the DSP MMU */ + hw_mmu_disable(resources->dw_dmmu_base); + /* Disable TWL */ + hw_mmu_twl_disable(resources->dw_dmmu_base); + + /* Only make TLB entry if both addresses are non-zero */ + for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; + entry_ndx++) { + struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx]; + struct hw_mmu_map_attrs_t map_attrs = { + .endianism = e->endianism, + .element_size = e->elem_size, + .mixed_size = e->mixed_mode, + }; + + if (!e->ul_gpp_pa || !e->ul_dsp_va) continue; - dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size" - " 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa, - tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size); - - da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va, - tlb[tlb_i].ul_gpp_pa, PAGE_SIZE, - IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); - if (IS_ERR_VALUE(da)) { - status = (int)da; - break; - } - } - } - if (!status) { - u32 da; - l4_i = 0; - while (l4_peripheral_table[l4_i].phys_addr) { - da = iommu_kmap(mmu, l4_peripheral_table[l4_i]. - dsp_virt_addr, l4_peripheral_table[l4_i]. - phys_addr, PAGE_SIZE, - IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32); - if (IS_ERR_VALUE(da)) { - status = (int)da; - break; - } - l4_i++; + dev_dbg(bridge, + "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x", + itmp_entry_ndx, + e->ul_gpp_pa, + e->ul_dsp_va, + e->ul_size); + + hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base, + e->ul_gpp_pa, + e->ul_dsp_va, + e->ul_size, + itmp_entry_ndx, + &map_attrs, 1, 1); + + itmp_entry_ndx++; } } /* Lock the above TLB entries and get the BIOS and load monitor timer * information */ if (!status) { + hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); + hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); + hw_mmu_ttb_set(resources->dw_dmmu_base, + dev_context->pt_attrs->l1_base_pa); + hw_mmu_twl_enable(resources->dw_dmmu_base); + /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */ + + temp = __raw_readl((resources->dw_dmmu_base) + 0x10); + temp = (temp & 0xFFFFFFEF) | 0x11; + __raw_writel(temp, (resources->dw_dmmu_base) + 0x10); + + /* Let the DSP MMU run */ + hw_mmu_enable(resources->dw_dmmu_base); + /* Enable the BIOS clock */ (void)dev_get_symbol(dev_context->hdev_obj, BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer); (void)dev_get_symbol(dev_context->hdev_obj, BRIDGEINIT_LOADMON_GPTIMER, &ul_load_monitor_timer); + } + if (!status) { if (ul_load_monitor_timer != 0xFFFF) { clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | ul_load_monitor_timer; @@ -430,7 +519,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, dev_dbg(bridge, "Not able to get the symbol for Load " "Monitor Timer\n"); } + } + if (!status) { if (ul_bios_gp_timer != 0xFFFF) { clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | ul_bios_gp_timer; @@ -439,7 +530,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, dev_dbg(bridge, "Not able to get the symbol for BIOS Timer\n"); } + } + if (!status) { /* Set the DSP clock rate */ (void)dev_get_symbol(dev_context->hdev_obj, "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); @@ -492,6 +585,9 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, /* Let DSP go */ dev_dbg(bridge, "%s Unreset\n", __func__); + /* Enable DSP MMU Interrupts */ + hw_mmu_event_enable(resources->dw_dmmu_base, + HW_MMU_ALL_INTERRUPTS); /* release the RST1, DSP starts executing now .. */ (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); @@ -521,23 +617,11 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, /* update board state */ dev_context->dw_brd_state = BRD_RUNNING; - return 0; + /* (void)chnlsm_enable_interrupt(dev_context); */ } else { dev_context->dw_brd_state = BRD_UNKNOWN; } } - - while (tlb_i--) { - if (!tlb[tlb_i].ul_gpp_pa) - continue; - iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va); - } - while (l4_i--) - iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr); - if (sg0_da) - iommu_kunmap(mmu, sg0_da); - if (sg1_da) - iommu_kunmap(mmu, sg1_da); return status; } @@ -553,9 +637,8 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) { int status = 0; struct bridge_dev_context *dev_context = dev_ctxt; + struct pg_table_attrs *pt_attrs; u32 dsp_pwr_state; - int i; - struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry; struct omap_dsp_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; @@ -591,37 +674,23 @@ static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt) dsp_wdt_enable(false); - /* Reset DSP */ - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, - OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); - + /* This is a good place to clear the MMU page tables as well */ + if (dev_context->pt_attrs) { + pt_attrs = dev_context->pt_attrs; + memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size); + memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size); + memset((u8 *) pt_attrs->pg_info, 0x00, + (pt_attrs->l2_num_pages * sizeof(struct page_info))); + } /* Disable the mailbox interrupts */ if (dev_context->mbox) { omap_mbox_disable_irq(dev_context->mbox, IRQ_RX); omap_mbox_put(dev_context->mbox); dev_context->mbox = NULL; } - if (dev_context->dsp_mmu) { - pr_err("Proc stop mmu if statement\n"); - for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) { - if (!tlb[i].ul_gpp_pa) - continue; - iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va); - } - i = 0; - while (l4_peripheral_table[i].phys_addr) { - iommu_kunmap(dev_context->dsp_mmu, - l4_peripheral_table[i].dsp_virt_addr); - i++; - } - iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da); - iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da); - dsp_mmu_exit(dev_context->dsp_mmu); - dev_context->dsp_mmu = NULL; - } - /* Reset IVA IOMMU*/ - (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, - OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); + /* Reset IVA2 clocks*/ + (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK | + OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); dsp_clock_disable_all(dev_context->dsp_per_clks); dsp_clk_disable(DSP_CLK_IVA2); @@ -681,6 +750,10 @@ static int bridge_dev_create(struct bridge_dev_context struct bridge_dev_context *dev_context = NULL; s32 entry_ndx; struct cfg_hostres *resources = config_param; + struct pg_table_attrs *pt_attrs; + u32 pg_tbl_pa; + u32 pg_tbl_va; + u32 align_size; struct drv_data *drv_datap = dev_get_drvdata(bridge); /* Allocate and initialize a data structure to contain the bridge driver @@ -711,8 +784,97 @@ static int bridge_dev_create(struct bridge_dev_context if (!dev_context->dw_dsp_base_addr) status = -EPERM; + pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL); + if (pt_attrs != NULL) { + /* Assuming that we use only DSP's memory map + * until 0x4000:0000 , we would need only 1024 + * L1 enties i.e L1 size = 4K */ + pt_attrs->l1_size = 0x1000; + align_size = pt_attrs->l1_size; + /* Align sizes are expected to be power of 2 */ + /* we like to get aligned on L1 table size */ + pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size, + align_size, &pg_tbl_pa); + + /* Check if the PA is aligned for us */ + if ((pg_tbl_pa) & (align_size - 1)) { + /* PA not aligned to page table size , + * try with more allocation and align */ + mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa, + pt_attrs->l1_size); + /* we like to get aligned on L1 table size */ + pg_tbl_va = + (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2, + align_size, &pg_tbl_pa); + /* We should be able to get aligned table now */ + pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; + pt_attrs->l1_tbl_alloc_va = pg_tbl_va; + pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2; + /* Align the PA to the next 'align' boundary */ + pt_attrs->l1_base_pa = + ((pg_tbl_pa) + + (align_size - 1)) & (~(align_size - 1)); + pt_attrs->l1_base_va = + pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa); + } else { + /* We got aligned PA, cool */ + pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa; + pt_attrs->l1_tbl_alloc_va = pg_tbl_va; + pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size; + pt_attrs->l1_base_pa = pg_tbl_pa; + pt_attrs->l1_base_va = pg_tbl_va; + } + if (pt_attrs->l1_base_va) + memset((u8 *) pt_attrs->l1_base_va, 0x00, + pt_attrs->l1_size); + + /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM + + * L4 pages */ + pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6); + pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE * + pt_attrs->l2_num_pages; + align_size = 4; /* Make it u32 aligned */ + /* we like to get aligned on L1 table size */ + pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size, + align_size, &pg_tbl_pa); + pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa; + pt_attrs->l2_tbl_alloc_va = pg_tbl_va; + pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size; + pt_attrs->l2_base_pa = pg_tbl_pa; + pt_attrs->l2_base_va = pg_tbl_va; + + if (pt_attrs->l2_base_va) + memset((u8 *) pt_attrs->l2_base_va, 0x00, + pt_attrs->l2_size); + + pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages * + sizeof(struct page_info), GFP_KERNEL); + dev_dbg(bridge, + "L1 pa %x, va %x, size %x\n L2 pa %x, va " + "%x, size %x\n", pt_attrs->l1_base_pa, + pt_attrs->l1_base_va, pt_attrs->l1_size, + pt_attrs->l2_base_pa, pt_attrs->l2_base_va, + pt_attrs->l2_size); + dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n", + pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info); + } + if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) && + (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL)) + dev_context->pt_attrs = pt_attrs; + else + status = -ENOMEM; + if (!status) { + spin_lock_init(&pt_attrs->pg_lock); dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; + + /* Set the Clock Divisor for the DSP module */ + udelay(5); + /* MMU address is obtained from the host + * resources struct */ + dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; + } + if (!status) { dev_context->hdev_obj = hdev_obj; /* Store current board state. */ dev_context->dw_brd_state = BRD_UNKNOWN; @@ -722,6 +884,23 @@ static int bridge_dev_create(struct bridge_dev_context /* Return ptr to our device state to the DSP API for storage */ *dev_cntxt = dev_context; } else { + if (pt_attrs != NULL) { + kfree(pt_attrs->pg_info); + + if (pt_attrs->l2_tbl_alloc_va) { + mem_free_phys_mem((void *) + pt_attrs->l2_tbl_alloc_va, + pt_attrs->l2_tbl_alloc_pa, + pt_attrs->l2_tbl_alloc_sz); + } + if (pt_attrs->l1_tbl_alloc_va) { + mem_free_phys_mem((void *) + pt_attrs->l1_tbl_alloc_va, + pt_attrs->l1_tbl_alloc_pa, + pt_attrs->l1_tbl_alloc_sz); + } + } + kfree(pt_attrs); kfree(dev_context); } func_end: @@ -789,6 +968,7 @@ static int bridge_dev_ctrl(struct bridge_dev_context *dev_context, */ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) { + struct pg_table_attrs *pt_attrs; int status = 0; struct bridge_dev_context *dev_context = (struct bridge_dev_context *) dev_ctxt; @@ -802,6 +982,23 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) /* first put the device to stop state */ bridge_brd_stop(dev_context); + if (dev_context->pt_attrs) { + pt_attrs = dev_context->pt_attrs; + kfree(pt_attrs->pg_info); + + if (pt_attrs->l2_tbl_alloc_va) { + mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va, + pt_attrs->l2_tbl_alloc_pa, + pt_attrs->l2_tbl_alloc_sz); + } + if (pt_attrs->l1_tbl_alloc_va) { + mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va, + pt_attrs->l1_tbl_alloc_pa, + pt_attrs->l1_tbl_alloc_sz); + } + kfree(pt_attrs); + + } if (dev_context->resources) { host_res = dev_context->resources; @@ -832,6 +1029,8 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) iounmap((void *)host_res->dw_mem_base[3]); if (host_res->dw_mem_base[4]) iounmap((void *)host_res->dw_mem_base[4]); + if (host_res->dw_dmmu_base) + iounmap(host_res->dw_dmmu_base); if (host_res->dw_per_base) iounmap(host_res->dw_per_base); if (host_res->dw_per_pm_base) @@ -845,6 +1044,7 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt) host_res->dw_mem_base[2] = (u32) NULL; host_res->dw_mem_base[3] = (u32) NULL; host_res->dw_mem_base[4] = (u32) NULL; + host_res->dw_dmmu_base = NULL; host_res->dw_sys_ctrl_base = NULL; kfree(host_res); @@ -928,6 +1128,673 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, } /* + * ======== bridge_brd_mem_map ======== + * This function maps MPU buffer to the DSP address space. It performs + * linear to physical address translation if required. It translates each + * page since linear addresses can be physically non-contiguous + * All address & size arguments are assumed to be page aligned (in proc.c) + * + * TODO: Disable MMU while updating the page tables (but that'll stall DSP) + */ +static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, + u32 ul_mpu_addr, u32 virt_addr, + u32 ul_num_bytes, u32 ul_map_attr, + struct page **mapped_pages) +{ + u32 attrs; + int status = 0; + struct bridge_dev_context *dev_context = dev_ctxt; + struct hw_mmu_map_attrs_t hw_attrs; + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + u32 write = 0; + u32 num_usr_pgs = 0; + struct page *mapped_page, *pg; + s32 pg_num; + u32 va = virt_addr; + struct task_struct *curr_task = current; + u32 pg_i = 0; + u32 mpu_addr, pa; + + dev_dbg(bridge, + "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n", + __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes, + ul_map_attr); + if (ul_num_bytes == 0) + return -EINVAL; + + if (ul_map_attr & DSP_MAP_DIR_MASK) { + attrs = ul_map_attr; + } else { + /* Assign default attributes */ + attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16); + } + /* Take mapping properties */ + if (attrs & DSP_MAPBIGENDIAN) + hw_attrs.endianism = HW_BIG_ENDIAN; + else + hw_attrs.endianism = HW_LITTLE_ENDIAN; + + hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t) + ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2); + /* Ignore element_size if mixed_size is enabled */ + if (hw_attrs.mixed_size == 0) { + if (attrs & DSP_MAPELEMSIZE8) { + /* Size is 8 bit */ + hw_attrs.element_size = HW_ELEM_SIZE8BIT; + } else if (attrs & DSP_MAPELEMSIZE16) { + /* Size is 16 bit */ + hw_attrs.element_size = HW_ELEM_SIZE16BIT; + } else if (attrs & DSP_MAPELEMSIZE32) { + /* Size is 32 bit */ + hw_attrs.element_size = HW_ELEM_SIZE32BIT; + } else if (attrs & DSP_MAPELEMSIZE64) { + /* Size is 64 bit */ + hw_attrs.element_size = HW_ELEM_SIZE64BIT; + } else { + /* + * Mixedsize isn't enabled, so size can't be + * zero here + */ + return -EINVAL; + } + } + if (attrs & DSP_MAPDONOTLOCK) + hw_attrs.donotlockmpupage = 1; + else + hw_attrs.donotlockmpupage = 0; + + if (attrs & DSP_MAPVMALLOCADDR) { + return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr, + ul_num_bytes, &hw_attrs); + } + /* + * Do OS-specific user-va to pa translation. + * Combine physically contiguous regions to reduce TLBs. + * Pass the translated pa to pte_update. + */ + if ((attrs & DSP_MAPPHYSICALADDR)) { + status = pte_update(dev_context, ul_mpu_addr, virt_addr, + ul_num_bytes, &hw_attrs); + goto func_cont; + } + + /* + * Important Note: ul_mpu_addr is mapped from user application process + * to current process - it must lie completely within the current + * virtual memory address space in order to be of use to us here! + */ + down_read(&mm->mmap_sem); + vma = find_vma(mm, ul_mpu_addr); + if (vma) + dev_dbg(bridge, + "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, " + "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, + ul_num_bytes, vma->vm_start, vma->vm_end, + vma->vm_flags); + + /* + * It is observed that under some circumstances, the user buffer is + * spread across several VMAs. So loop through and check if the entire + * user buffer is covered + */ + while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) { + /* jump to the next VMA region */ + vma = find_vma(mm, vma->vm_end + 1); + dev_dbg(bridge, + "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, " + "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr, + ul_num_bytes, vma->vm_start, vma->vm_end, + vma->vm_flags); + } + if (!vma) { + pr_err("%s: Failed to get VMA region for 0x%x (%d)\n", + __func__, ul_mpu_addr, ul_num_bytes); + status = -EINVAL; + up_read(&mm->mmap_sem); + goto func_cont; + } + + if (vma->vm_flags & VM_IO) { + num_usr_pgs = ul_num_bytes / PG_SIZE4K; + mpu_addr = ul_mpu_addr; + + /* Get the physical addresses for user buffer */ + for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { + pa = user_va2_pa(mm, mpu_addr); + if (!pa) { + status = -EPERM; + pr_err("DSPBRIDGE: VM_IO mapping physical" + "address is invalid\n"); + break; + } + if (pfn_valid(__phys_to_pfn(pa))) { + pg = PHYS_TO_PAGE(pa); + get_page(pg); + if (page_count(pg) < 1) { + pr_err("Bad page in VM_IO buffer\n"); + bad_page_dump(pa, pg); + } + } + status = pte_set(dev_context->pt_attrs, pa, + va, HW_PAGE_SIZE4KB, &hw_attrs); + if (status) + break; + + va += HW_PAGE_SIZE4KB; + mpu_addr += HW_PAGE_SIZE4KB; + pa += HW_PAGE_SIZE4KB; + } + } else { + num_usr_pgs = ul_num_bytes / PG_SIZE4K; + if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) + write = 1; + + for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) { + pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1, + write, 1, &mapped_page, NULL); + if (pg_num > 0) { + if (page_count(mapped_page) < 1) { + pr_err("Bad page count after doing" + "get_user_pages on" + "user buffer\n"); + bad_page_dump(page_to_phys(mapped_page), + mapped_page); + } + status = pte_set(dev_context->pt_attrs, + page_to_phys(mapped_page), va, + HW_PAGE_SIZE4KB, &hw_attrs); + if (status) + break; + + if (mapped_pages) + mapped_pages[pg_i] = mapped_page; + + va += HW_PAGE_SIZE4KB; + ul_mpu_addr += HW_PAGE_SIZE4KB; + } else { + pr_err("DSPBRIDGE: get_user_pages FAILED," + "MPU addr = 0x%x," + "vma->vm_flags = 0x%lx," + "get_user_pages Err" + "Value = %d, Buffer" + "size=0x%x\n", ul_mpu_addr, + vma->vm_flags, pg_num, ul_num_bytes); + status = -EPERM; + break; + } + } + } + up_read(&mm->mmap_sem); +func_cont: + if (status) { + /* + * Roll out the mapped pages incase it failed in middle of + * mapping + */ + if (pg_i) { + bridge_brd_mem_un_map(dev_context, virt_addr, + (pg_i * PG_SIZE4K)); + } + status = -EPERM; + } + /* + * In any case, flush the TLB + * This is called from here instead from pte_update to avoid unnecessary + * repetition while mapping non-contiguous physical regions of a virtual + * region + */ + flush_all(dev_context); + dev_dbg(bridge, "%s status %x\n", __func__, status); + return status; +} + +/* + * ======== bridge_brd_mem_un_map ======== + * Invalidate the PTEs for the DSP VA block to be unmapped. + * + * PTEs of a mapped memory block are contiguous in any page table + * So, instead of looking up the PTE address for every 4K block, + * we clear consecutive PTEs until we unmap all the bytes + */ +static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, + u32 virt_addr, u32 ul_num_bytes) +{ + u32 l1_base_va; + u32 l2_base_va; + u32 l2_base_pa; + u32 l2_page_num; + u32 pte_val; + u32 pte_size; + u32 pte_count; + u32 pte_addr_l1; + u32 pte_addr_l2 = 0; + u32 rem_bytes; + u32 rem_bytes_l2; + u32 va_curr; + struct page *pg = NULL; + int status = 0; + struct bridge_dev_context *dev_context = dev_ctxt; + struct pg_table_attrs *pt = dev_context->pt_attrs; + u32 temp; + u32 paddr; + u32 numof4k_pages = 0; + + va_curr = virt_addr; + rem_bytes = ul_num_bytes; + rem_bytes_l2 = 0; + l1_base_va = pt->l1_base_va; + pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); + dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, " + "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, + ul_num_bytes, l1_base_va, pte_addr_l1); + + while (rem_bytes && !status) { + u32 va_curr_orig = va_curr; + /* Find whether the L1 PTE points to a valid L2 PT */ + pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); + pte_val = *(u32 *) pte_addr_l1; + pte_size = hw_mmu_pte_size_l1(pte_val); + + if (pte_size != HW_MMU_COARSE_PAGE_SIZE) + goto skip_coarse_page; + + /* + * Get the L2 PA from the L1 PTE, and find + * corresponding L2 VA + */ + l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); + l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va; + l2_page_num = + (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; + /* + * Find the L2 PTE address from which we will start + * clearing, the number of PTEs to be cleared on this + * page, and the size of VA space that needs to be + * cleared on this L2 page + */ + pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr); + pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1); + pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32); + if (rem_bytes < (pte_count * PG_SIZE4K)) + pte_count = rem_bytes / PG_SIZE4K; + rem_bytes_l2 = pte_count * PG_SIZE4K; + + /* + * Unmap the VA space on this L2 PT. A quicker way + * would be to clear pte_count entries starting from + * pte_addr_l2. However, below code checks that we don't + * clear invalid entries or less than 64KB for a 64KB + * entry. Similar checking is done for L1 PTEs too + * below + */ + while (rem_bytes_l2 && !status) { + pte_val = *(u32 *) pte_addr_l2; + pte_size = hw_mmu_pte_size_l2(pte_val); + /* va_curr aligned to pte_size? */ + if (pte_size == 0 || rem_bytes_l2 < pte_size || + va_curr & (pte_size - 1)) { + status = -EPERM; + break; + } + + /* Collect Physical addresses from VA */ + paddr = (pte_val & ~(pte_size - 1)); + if (pte_size == HW_PAGE_SIZE64KB) + numof4k_pages = 16; + else + numof4k_pages = 1; + temp = 0; + while (temp++ < numof4k_pages) { + if (!pfn_valid(__phys_to_pfn(paddr))) { + paddr += HW_PAGE_SIZE4KB; + continue; + } + pg = PHYS_TO_PAGE(paddr); + if (page_count(pg) < 1) { + pr_info("DSPBRIDGE: UNMAP function: " + "COUNT 0 FOR PA 0x%x, size = " + "0x%x\n", paddr, ul_num_bytes); + bad_page_dump(paddr, pg); + } else { + set_page_dirty(pg); + page_cache_release(pg); + } + paddr += HW_PAGE_SIZE4KB; + } + if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) { + status = -EPERM; + goto EXIT_LOOP; + } + + status = 0; + rem_bytes_l2 -= pte_size; + va_curr += pte_size; + pte_addr_l2 += (pte_size >> 12) * sizeof(u32); + } + spin_lock(&pt->pg_lock); + if (rem_bytes_l2 == 0) { + pt->pg_info[l2_page_num].num_entries -= pte_count; + if (pt->pg_info[l2_page_num].num_entries == 0) { + /* + * Clear the L1 PTE pointing to the L2 PT + */ + if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig, + HW_MMU_COARSE_PAGE_SIZE)) + status = 0; + else { + status = -EPERM; + spin_unlock(&pt->pg_lock); + goto EXIT_LOOP; + } + } + rem_bytes -= pte_count * PG_SIZE4K; + } else + status = -EPERM; + + spin_unlock(&pt->pg_lock); + continue; +skip_coarse_page: + /* va_curr aligned to pte_size? */ + /* pte_size = 1 MB or 16 MB */ + if (pte_size == 0 || rem_bytes < pte_size || + va_curr & (pte_size - 1)) { + status = -EPERM; + break; + } + + if (pte_size == HW_PAGE_SIZE1MB) + numof4k_pages = 256; + else + numof4k_pages = 4096; + temp = 0; + /* Collect Physical addresses from VA */ + paddr = (pte_val & ~(pte_size - 1)); + while (temp++ < numof4k_pages) { + if (pfn_valid(__phys_to_pfn(paddr))) { + pg = PHYS_TO_PAGE(paddr); + if (page_count(pg) < 1) { + pr_info("DSPBRIDGE: UNMAP function: " + "COUNT 0 FOR PA 0x%x, size = " + "0x%x\n", paddr, ul_num_bytes); + bad_page_dump(paddr, pg); + } else { + set_page_dirty(pg); + page_cache_release(pg); + } + } + paddr += HW_PAGE_SIZE4KB; + } + if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) { + status = 0; + rem_bytes -= pte_size; + va_curr += pte_size; + } else { + status = -EPERM; + goto EXIT_LOOP; + } + } + /* + * It is better to flush the TLB here, so that any stale old entries + * get flushed + */ +EXIT_LOOP: + flush_all(dev_context); + dev_dbg(bridge, + "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x," + " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1, + pte_addr_l2, rem_bytes, rem_bytes_l2, status); + return status; +} + +/* + * ======== user_va2_pa ======== + * Purpose: + * This function walks through the page tables to convert a userland + * virtual address to physical address + */ +static u32 user_va2_pa(struct mm_struct *mm, u32 address) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *ptep, pte; + + pgd = pgd_offset(mm, address); + if (!(pgd_none(*pgd) || pgd_bad(*pgd))) { + pmd = pmd_offset(pgd, address); + if (!(pmd_none(*pmd) || pmd_bad(*pmd))) { + ptep = pte_offset_map(pmd, address); + if (ptep) { + pte = *ptep; + if (pte_present(pte)) + return pte & PAGE_MASK; + } + } + } + + return 0; +} + +/* + * ======== pte_update ======== + * This function calculates the optimum page-aligned addresses and sizes + * Caller must pass page-aligned values + */ +static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, + u32 va, u32 size, + struct hw_mmu_map_attrs_t *map_attrs) +{ + u32 i; + u32 all_bits; + u32 pa_curr = pa; + u32 va_curr = va; + u32 num_bytes = size; + struct bridge_dev_context *dev_context = dev_ctxt; + int status = 0; + u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB, + HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB + }; + + while (num_bytes && !status) { + /* To find the max. page size with which both PA & VA are + * aligned */ + all_bits = pa_curr | va_curr; + + for (i = 0; i < 4; i++) { + if ((num_bytes >= page_size[i]) && ((all_bits & + (page_size[i] - + 1)) == 0)) { + status = + pte_set(dev_context->pt_attrs, pa_curr, + va_curr, page_size[i], map_attrs); + pa_curr += page_size[i]; + va_curr += page_size[i]; + num_bytes -= page_size[i]; + /* Don't try smaller sizes. Hopefully we have + * reached an address aligned to a bigger page + * size */ + break; + } + } + } + + return status; +} + +/* + * ======== pte_set ======== + * This function calculates PTE address (MPU virtual) to be updated + * It also manages the L2 page tables + */ +static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, + u32 size, struct hw_mmu_map_attrs_t *attrs) +{ + u32 i; + u32 pte_val; + u32 pte_addr_l1; + u32 pte_size; + /* Base address of the PT that will be updated */ + u32 pg_tbl_va; + u32 l1_base_va; + /* Compiler warns that the next three variables might be used + * uninitialized in this function. Doesn't seem so. Working around, + * anyways. */ + u32 l2_base_va = 0; + u32 l2_base_pa = 0; + u32 l2_page_num = 0; + int status = 0; + + l1_base_va = pt->l1_base_va; + pg_tbl_va = l1_base_va; + if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) { + /* Find whether the L1 PTE points to a valid L2 PT */ + pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va); + if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) { + pte_val = *(u32 *) pte_addr_l1; + pte_size = hw_mmu_pte_size_l1(pte_val); + } else { + return -EPERM; + } + spin_lock(&pt->pg_lock); + if (pte_size == HW_MMU_COARSE_PAGE_SIZE) { + /* Get the L2 PA from the L1 PTE, and find + * corresponding L2 VA */ + l2_base_pa = hw_mmu_pte_coarse_l1(pte_val); + l2_base_va = + l2_base_pa - pt->l2_base_pa + pt->l2_base_va; + l2_page_num = + (l2_base_pa - + pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE; + } else if (pte_size == 0) { + /* L1 PTE is invalid. Allocate a L2 PT and + * point the L1 PTE to it */ + /* Find a free L2 PT. */ + for (i = 0; (i < pt->l2_num_pages) && + (pt->pg_info[i].num_entries != 0); i++) + ;; + if (i < pt->l2_num_pages) { + l2_page_num = i; + l2_base_pa = pt->l2_base_pa + (l2_page_num * + HW_MMU_COARSE_PAGE_SIZE); + l2_base_va = pt->l2_base_va + (l2_page_num * + HW_MMU_COARSE_PAGE_SIZE); + /* Endianness attributes are ignored for + * HW_MMU_COARSE_PAGE_SIZE */ + status = + hw_mmu_pte_set(l1_base_va, l2_base_pa, va, + HW_MMU_COARSE_PAGE_SIZE, + attrs); + } else { + status = -ENOMEM; + } + } else { + /* Found valid L1 PTE of another size. + * Should not overwrite it. */ + status = -EPERM; + } + if (!status) { + pg_tbl_va = l2_base_va; + if (size == HW_PAGE_SIZE64KB) + pt->pg_info[l2_page_num].num_entries += 16; + else + pt->pg_info[l2_page_num].num_entries++; + dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum " + "%x, num_entries %x\n", l2_base_va, + l2_base_pa, l2_page_num, + pt->pg_info[l2_page_num].num_entries); + } + spin_unlock(&pt->pg_lock); + } + if (!status) { + dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", + pg_tbl_va, pa, va, size); + dev_dbg(bridge, "PTE: endianism %x, element_size %x, " + "mixed_size %x\n", attrs->endianism, + attrs->element_size, attrs->mixed_size); + status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs); + } + + return status; +} + +/* Memory map kernel VA -- memory allocated with vmalloc */ +static int mem_map_vmalloc(struct bridge_dev_context *dev_context, + u32 ul_mpu_addr, u32 virt_addr, + u32 ul_num_bytes, + struct hw_mmu_map_attrs_t *hw_attrs) +{ + int status = 0; + struct page *page[1]; + u32 i; + u32 pa_curr; + u32 pa_next; + u32 va_curr; + u32 size_curr; + u32 num_pages; + u32 pa; + u32 num_of4k_pages; + u32 temp = 0; + + /* + * Do Kernel va to pa translation. + * Combine physically contiguous regions to reduce TLBs. + * Pass the translated pa to pte_update. + */ + num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */ + i = 0; + va_curr = ul_mpu_addr; + page[0] = vmalloc_to_page((void *)va_curr); + pa_next = page_to_phys(page[0]); + while (!status && (i < num_pages)) { + /* + * Reuse pa_next from the previous iteraion to avoid + * an extra va2pa call + */ + pa_curr = pa_next; + size_curr = PAGE_SIZE; + /* + * If the next page is physically contiguous, + * map it with the current one by increasing + * the size of the region to be mapped + */ + while (++i < num_pages) { + page[0] = + vmalloc_to_page((void *)(va_curr + size_curr)); + pa_next = page_to_phys(page[0]); + + if (pa_next == (pa_curr + size_curr)) + size_curr += PAGE_SIZE; + else + break; + + } + if (pa_next == 0) { + status = -ENOMEM; + break; + } + pa = pa_curr; + num_of4k_pages = size_curr / HW_PAGE_SIZE4KB; + while (temp++ < num_of4k_pages) { + get_page(PHYS_TO_PAGE(pa)); + pa += HW_PAGE_SIZE4KB; + } + status = pte_update(dev_context, pa_curr, virt_addr + + (va_curr - ul_mpu_addr), size_curr, + hw_attrs); + va_curr += size_curr; + } + /* + * In any case, flush the TLB + * This is called from here instead from pte_update to avoid unnecessary + * repetition while mapping non-contiguous physical regions of a virtual + * region + */ + flush_all(dev_context); + dev_dbg(bridge, "%s status %x\n", __func__, status); + return status; +} + +/* * ======== wait_for_start ======== * Wait for the singal from DSP that it has started, or time out. */ diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c index b57a9fd5e75..fb9026e1403 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c +++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c @@ -31,6 +31,10 @@ #include <dspbridge/dev.h> #include <dspbridge/iodefs.h> +/* ------------------------------------ Hardware Abstraction Layer */ +#include <hw_defs.h> +#include <hw_mmu.h> + #include <dspbridge/pwr_sh.h> /* ----------------------------------- Bridge Driver */ diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c index 66dbf02549e..ba2961049da 100644 --- a/drivers/staging/tidspbridge/core/tiomap_io.c +++ b/drivers/staging/tidspbridge/core/tiomap_io.c @@ -134,16 +134,17 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, if (!status) { ul_tlb_base_virt = - dev_context->sh_s.seg0_da * DSPWORDSIZE; + dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); - dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va; + dw_ext_prog_virt_mem = + dev_context->atlb_entry[0].ul_gpp_va; if (!trace_read) { ul_shm_offset_virt = ul_shm_base_virt - ul_tlb_base_virt; ul_shm_offset_virt += PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base + - 1, PAGE_SIZE * 16); + 1, HW_PAGE_SIZE64KB); dw_ext_prog_virt_mem -= ul_shm_offset_virt; dw_ext_prog_virt_mem += (ul_ext_base - ul_dyn_ext_base); @@ -317,9 +318,8 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, ret = -EPERM; if (!ret) { - ul_tlb_base_virt = dev_context->sh_s.seg0_da * - DSPWORDSIZE; - + ul_tlb_base_virt = + dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); if (symbols_reloaded) { @@ -337,7 +337,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, ul_shm_base_virt - ul_tlb_base_virt; if (trace_load) { dw_ext_prog_virt_mem = - dev_context->sh_s.seg0_va; + dev_context->atlb_entry[0].ul_gpp_va; } else { dw_ext_prog_virt_mem = host_res->dw_mem_base[1]; dw_ext_prog_virt_mem += @@ -393,6 +393,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) omap_dspbridge_dev->dev.platform_data; struct cfg_hostres *resources = dev_context->resources; int status = 0; + u32 temp; if (!dev_context->mbox) return 0; @@ -436,7 +437,7 @@ int sm_interrupt_dsp(struct bridge_dev_context *dev_context, u16 mb_val) omap_mbox_restore_ctx(dev_context->mbox); /* Access MMU SYS CONFIG register to generate a short wakeup */ - iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG); + temp = readl(resources->dw_dmmu_base + 0x10); dev_context->dw_brd_state = BRD_RUNNING; } else if (dev_context->dw_brd_state == BRD_RETENTION) { diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c index e24ea0c7391..3430418190d 100644 --- a/drivers/staging/tidspbridge/core/ue_deh.c +++ b/drivers/staging/tidspbridge/core/ue_deh.c @@ -31,6 +31,57 @@ #include <dspbridge/drv.h> #include <dspbridge/wdt.h> +static u32 fault_addr; + +static void mmu_fault_dpc(unsigned long data) +{ + struct deh_mgr *deh = (void *)data; + + if (!deh) + return; + + bridge_deh_notify(deh, DSP_MMUFAULT, 0); +} + +static irqreturn_t mmu_fault_isr(int irq, void *data) +{ + struct deh_mgr *deh = data; + struct cfg_hostres *resources; + u32 event; + + if (!deh) + return IRQ_HANDLED; + + resources = deh->hbridge_context->resources; + if (!resources) { + dev_dbg(bridge, "%s: Failed to get Host Resources\n", + __func__); + return IRQ_HANDLED; + } + + hw_mmu_event_status(resources->dw_dmmu_base, &event); + if (event == HW_MMU_TRANSLATION_FAULT) { + hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr); + dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, + event, fault_addr); + /* + * Schedule a DPC directly. In the future, it may be + * necessary to check if DSP MMU fault is intended for + * Bridge. + */ + tasklet_schedule(&deh->dpc_tasklet); + + /* Disable the MMU events, else once we clear it will + * start to raise INTs again */ + hw_mmu_event_disable(resources->dw_dmmu_base, + HW_MMU_TRANSLATION_FAULT); + } else { + hw_mmu_event_disable(resources->dw_dmmu_base, + HW_MMU_ALL_INTERRUPTS); + } + return IRQ_HANDLED; +} + int bridge_deh_create(struct deh_mgr **ret_deh, struct dev_object *hdev_obj) { @@ -58,9 +109,18 @@ int bridge_deh_create(struct deh_mgr **ret_deh, } ntfy_init(deh->ntfy_obj); + /* Create a MMUfault DPC */ + tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); + /* Fill in context structure */ deh->hbridge_context = hbridge_context; + /* Install ISR function for DSP MMU fault */ + status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, + "DspBridge\tiommu fault", deh); + if (status < 0) + goto err; + *ret_deh = deh; return 0; @@ -80,6 +140,11 @@ int bridge_deh_destroy(struct deh_mgr *deh) ntfy_delete(deh->ntfy_obj); kfree(deh->ntfy_obj); } + /* Disable DSP MMU fault */ + free_irq(INT_DSP_MMU_IRQ, deh); + + /* Free DPC object */ + tasklet_kill(&deh->dpc_tasklet); /* Deallocate the DEH manager object */ kfree(deh); @@ -101,6 +166,48 @@ int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, return ntfy_unregister(deh->ntfy_obj, hnotification); } +#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE +static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) +{ + struct cfg_hostres *resources; + struct hw_mmu_map_attrs_t map_attrs = { + .endianism = HW_LITTLE_ENDIAN, + .element_size = HW_ELEM_SIZE16BIT, + .mixed_size = HW_MMU_CPUES, + }; + void *dummy_va_addr; + + resources = dev_context->resources; + dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC); + + /* + * Before acking the MMU fault, let's make sure MMU can only + * access entry #0. Then add a new entry so that the DSP OS + * can continue in order to dump the stack. + */ + hw_mmu_twl_disable(resources->dw_dmmu_base); + hw_mmu_tlb_flush_all(resources->dw_dmmu_base); + + hw_mmu_tlb_add(resources->dw_dmmu_base, + virt_to_phys(dummy_va_addr), fault_addr, + HW_PAGE_SIZE4KB, 1, + &map_attrs, HW_SET, HW_SET); + + dsp_clk_enable(DSP_CLK_GPT8); + + dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); + + /* Clear MMU interrupt */ + hw_mmu_event_ack(resources->dw_dmmu_base, + HW_MMU_TRANSLATION_FAULT); + dump_dsp_stack(dev_context); + dsp_clk_disable(DSP_CLK_GPT8); + + hw_mmu_disable(resources->dw_dmmu_base); + free_page((unsigned long)dummy_va_addr); +} +#endif + static inline const char *event_to_string(int event) { switch (event) { @@ -133,7 +240,13 @@ void bridge_deh_notify(struct deh_mgr *deh, int event, int info) #endif break; case DSP_MMUFAULT: - dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info); + dev_err(bridge, "%s: %s, addr=0x%x", __func__, + str, fault_addr); +#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE + print_dsp_trace_buffer(dev_context); + dump_dl_modules(dev_context); + mmu_fault_print_stack(dev_context); +#endif break; default: dev_err(bridge, "%s: %s", __func__, str); diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h new file mode 100644 index 00000000000..e48d7f67c60 --- /dev/null +++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h @@ -0,0 +1,41 @@ +/* + * EasiGlobal.h + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _EASIGLOBAL_H +#define _EASIGLOBAL_H +#include <linux/types.h> + +/* + * DEFINE: READ_ONLY, WRITE_ONLY & READ_WRITE + * + * DESCRIPTION: Defines used to describe register types for EASI-checker tests. + */ + +#define READ_ONLY 1 +#define WRITE_ONLY 2 +#define READ_WRITE 3 + +/* + * MACRO: _DEBUG_LEVEL1_EASI + * + * DESCRIPTION: A MACRO which can be used to indicate that a particular beach + * register access function was called. + * + * NOTE: We currently dont use this functionality. + */ +#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0) + +#endif /* _EASIGLOBAL_H */ diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h new file mode 100644 index 00000000000..1cefca321d7 --- /dev/null +++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h @@ -0,0 +1,76 @@ +/* + * MMUAccInt.h + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _MMU_ACC_INT_H +#define _MMU_ACC_INT_H + +/* Mappings of level 1 EASI function numbers to function names */ + +#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3) +#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32 (MMU_BASE_EASIL1 + 17) +#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32 (MMU_BASE_EASIL1 + 39) +#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 51) +#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102) +#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103) +#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156) +#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174) +#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 180) +#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32 (MMU_BASE_EASIL1 + 190) +#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32 (MMU_BASE_EASIL1 + 194) +#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 198) +#define EASIL1_MMUMMU_LOCK_READ_REGISTER32 (MMU_BASE_EASIL1 + 203) +#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 204) +#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32 (MMU_BASE_EASIL1 + 205) +#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209) +#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211) +#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32 (MMU_BASE_EASIL1 + 212) +#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32 (MMU_BASE_EASIL1 + 213) +#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 214) +#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 226) +#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268) +#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 322) + +/* Register offset address definitions */ +#define MMU_MMU_SYSCONFIG_OFFSET 0x10 +#define MMU_MMU_IRQSTATUS_OFFSET 0x18 +#define MMU_MMU_IRQENABLE_OFFSET 0x1c +#define MMU_MMU_WALKING_ST_OFFSET 0x40 +#define MMU_MMU_CNTL_OFFSET 0x44 +#define MMU_MMU_FAULT_AD_OFFSET 0x48 +#define MMU_MMU_TTB_OFFSET 0x4c +#define MMU_MMU_LOCK_OFFSET 0x50 +#define MMU_MMU_LD_TLB_OFFSET 0x54 +#define MMU_MMU_CAM_OFFSET 0x58 +#define MMU_MMU_RAM_OFFSET 0x5c +#define MMU_MMU_GFLUSH_OFFSET 0x60 +#define MMU_MMU_FLUSH_ENTRY_OFFSET 0x64 +/* Bitfield mask and offset declarations */ +#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK 0x18 +#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET 3 +#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK 0x1 +#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET 0 +#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1 +#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET 0 +#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4 +#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2 +#define MMU_MMU_CNTL_MMU_ENABLE_MASK 0x2 +#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET 1 +#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00 +#define MMU_MMU_LOCK_BASE_VALUE_OFFSET 10 +#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK 0x3f0 +#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET 4 + +#endif /* _MMU_ACC_INT_H */ diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h new file mode 100644 index 00000000000..ab1a16da731 --- /dev/null +++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h @@ -0,0 +1,225 @@ +/* + * MMURegAcM.h + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _MMU_REG_ACM_H +#define _MMU_REG_ACM_H + +#include <linux/io.h> +#include <EasiGlobal.h> + +#include "MMUAccInt.h" + +#if defined(USE_LEVEL_1_MACROS) + +#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\ + __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET)) + +#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ + register u32 data = __raw_readl((base_address)+offset);\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\ + data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\ + new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\ + new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\ + new_value |= data;\ + __raw_writel(new_value, base_address+offset);\ +} + +#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ + register u32 data = __raw_readl((base_address)+offset);\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\ + data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\ + new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\ + new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\ + new_value |= data;\ + __raw_writel(new_value, base_address+offset);\ +} + +#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\ + (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\ + __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET)) + +#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\ + __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET)) + +#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\ + (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\ + & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\ + MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET)) + +#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\ + (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\ + MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\ + MMU_MMU_CNTL_TWL_ENABLE_OFFSET)) + +#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_CNTL_OFFSET;\ + register u32 data = __raw_readl((base_address)+offset);\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\ + data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\ + new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\ + new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\ + new_value |= data;\ + __raw_writel(new_value, base_address+offset);\ +} + +#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_CNTL_OFFSET;\ + register u32 data = __raw_readl((base_address)+offset);\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\ + data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\ + new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\ + new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\ + new_value |= data;\ + __raw_writel(new_value, base_address+offset);\ +} + +#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\ + __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET)) + +#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_TTB_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#define MMUMMU_LOCK_READ_REGISTER32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\ + __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET)) + +#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_LOCK_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\ + (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ + MMU_MMU_LOCK_BASE_VALUE_MASK) >>\ + MMU_MMU_LOCK_BASE_VALUE_OFFSET)) + +#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_LOCK_OFFSET;\ + register u32 data = __raw_readl((base_address)+offset);\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\ + data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\ + new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\ + new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\ + new_value |= data;\ + __raw_writel(new_value, base_address+offset);\ +} + +#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\ + (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\ + MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\ + MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET)) + +#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_LOCK_OFFSET;\ + register u32 data = __raw_readl((base_address)+offset);\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\ + data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\ + new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\ + new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\ + new_value |= data;\ + __raw_writel(new_value, base_address+offset);\ +} + +#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\ + (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\ + (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\ + MMU_MMU_LOCK_CURRENT_VICTIM_MASK))) + +#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\ + (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\ + __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET)) + +#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_LD_TLB_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_CAM_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_RAM_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\ +{\ + const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\ + register u32 new_value = (value);\ + _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\ + __raw_writel(new_value, (base_address)+offset);\ +} + +#endif /* USE_LEVEL_1_MACROS */ + +#endif /* _MMU_REG_ACM_H */ diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h new file mode 100644 index 00000000000..d5266d4c163 --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_defs.h @@ -0,0 +1,58 @@ +/* + * hw_defs.h + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * Global HW definitions + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _HW_DEFS_H +#define _HW_DEFS_H + +/* Page size */ +#define HW_PAGE_SIZE4KB 0x1000 +#define HW_PAGE_SIZE64KB 0x10000 +#define HW_PAGE_SIZE1MB 0x100000 +#define HW_PAGE_SIZE16MB 0x1000000 + +/* hw_status: return type for HW API */ +typedef long hw_status; + +/* Macro used to set and clear any bit */ +#define HW_CLEAR 0 +#define HW_SET 1 + +/* hw_endianism_t: Enumerated Type used to specify the endianism + * Do NOT change these values. They are used as bit fields. */ +enum hw_endianism_t { + HW_LITTLE_ENDIAN, + HW_BIG_ENDIAN +}; + +/* hw_element_size_t: Enumerated Type used to specify the element size + * Do NOT change these values. They are used as bit fields. */ +enum hw_element_size_t { + HW_ELEM_SIZE8BIT, + HW_ELEM_SIZE16BIT, + HW_ELEM_SIZE32BIT, + HW_ELEM_SIZE64BIT +}; + +/* hw_idle_mode_t: Enumerated Type used to specify Idle modes */ +enum hw_idle_mode_t { + HW_FORCE_IDLE, + HW_NO_IDLE, + HW_SMART_IDLE +}; + +#endif /* _HW_DEFS_H */ diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c new file mode 100644 index 00000000000..014f5d5293a --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_mmu.c @@ -0,0 +1,562 @@ +/* + * hw_mmu.c + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * API definitions to setup MMU TLB and PTE + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#include <linux/io.h> +#include "MMURegAcM.h" +#include <hw_defs.h> +#include <hw_mmu.h> +#include <linux/types.h> +#include <linux/err.h> + +#define MMU_BASE_VAL_MASK 0xFC00 +#define MMU_PAGE_MAX 3 +#define MMU_ELEMENTSIZE_MAX 3 +#define MMU_ADDR_MASK 0xFFFFF000 +#define MMU_TTB_MASK 0xFFFFC000 +#define MMU_SECTION_ADDR_MASK 0xFFF00000 +#define MMU_SSECTION_ADDR_MASK 0xFF000000 +#define MMU_PAGE_TABLE_MASK 0xFFFFFC00 +#define MMU_LARGE_PAGE_MASK 0xFFFF0000 +#define MMU_SMALL_PAGE_MASK 0xFFFFF000 + +#define MMU_LOAD_TLB 0x00000001 +#define MMU_GFLUSH 0x60 + +/* + * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS) + */ +enum hw_mmu_page_size_t { + HW_MMU_SECTION, + HW_MMU_LARGE_PAGE, + HW_MMU_SMALL_PAGE, + HW_MMU_SUPERSECTION +}; + +/* + * FUNCTION : mmu_flush_entry + * + * INPUTS: + * + * Identifier : base_address + * Type : const u32 + * Description : Base Address of instance of MMU module + * + * RETURNS: + * + * Type : hw_status + * Description : 0 -- No errors occured + * RET_BAD_NULL_PARAM -- A Pointer + * Paramater was set to NULL + * + * PURPOSE: : Flush the TLB entry pointed by the + * lock counter register + * even if this entry is set protected + * + * METHOD: : Check the Input parameter and Flush a + * single entry in the TLB. + */ +static hw_status mmu_flush_entry(const void __iomem *base_address); + +/* + * FUNCTION : mmu_set_cam_entry + * + * INPUTS: + * + * Identifier : base_address + * TypE : const u32 + * Description : Base Address of instance of MMU module + * + * Identifier : page_sz + * TypE : const u32 + * Description : It indicates the page size + * + * Identifier : preserved_bit + * Type : const u32 + * Description : It indicates the TLB entry is preserved entry + * or not + * + * Identifier : valid_bit + * Type : const u32 + * Description : It indicates the TLB entry is valid entry or not + * + * + * Identifier : virtual_addr_tag + * Type : const u32 + * Description : virtual Address + * + * RETURNS: + * + * Type : hw_status + * Description : 0 -- No errors occured + * RET_BAD_NULL_PARAM -- A Pointer Paramater + * was set to NULL + * RET_PARAM_OUT_OF_RANGE -- Input Parameter out + * of Range + * + * PURPOSE: : Set MMU_CAM reg + * + * METHOD: : Check the Input parameters and set the CAM entry. + */ +static hw_status mmu_set_cam_entry(const void __iomem *base_address, + const u32 page_sz, + const u32 preserved_bit, + const u32 valid_bit, + const u32 virtual_addr_tag); + +/* + * FUNCTION : mmu_set_ram_entry + * + * INPUTS: + * + * Identifier : base_address + * Type : const u32 + * Description : Base Address of instance of MMU module + * + * Identifier : physical_addr + * Type : const u32 + * Description : Physical Address to which the corresponding + * virtual Address shouldpoint + * + * Identifier : endianism + * Type : hw_endianism_t + * Description : endianism for the given page + * + * Identifier : element_size + * Type : hw_element_size_t + * Description : The element size ( 8,16, 32 or 64 bit) + * + * Identifier : mixed_size + * Type : hw_mmu_mixed_size_t + * Description : Element Size to follow CPU or TLB + * + * RETURNS: + * + * Type : hw_status + * Description : 0 -- No errors occured + * RET_BAD_NULL_PARAM -- A Pointer Paramater + * was set to NULL + * RET_PARAM_OUT_OF_RANGE -- Input Parameter + * out of Range + * + * PURPOSE: : Set MMU_CAM reg + * + * METHOD: : Check the Input parameters and set the RAM entry. + */ +static hw_status mmu_set_ram_entry(const void __iomem *base_address, + const u32 physical_addr, + enum hw_endianism_t endianism, + enum hw_element_size_t element_size, + enum hw_mmu_mixed_size_t mixed_size); + +/* HW FUNCTIONS */ + +hw_status hw_mmu_enable(const void __iomem *base_address) +{ + hw_status status = 0; + + MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET); + + return status; +} + +hw_status hw_mmu_disable(const void __iomem *base_address) +{ + hw_status status = 0; + + MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR); + + return status; +} + +hw_status hw_mmu_num_locked_set(const void __iomem *base_address, + u32 num_locked_entries) +{ + hw_status status = 0; + + MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries); + + return status; +} + +hw_status hw_mmu_victim_num_set(const void __iomem *base_address, + u32 victim_entry_num) +{ + hw_status status = 0; + + MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num); + + return status; +} + +hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask) +{ + hw_status status = 0; + + MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask); + + return status; +} + +hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask) +{ + hw_status status = 0; + u32 irq_reg; + + irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); + + MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask); + + return status; +} + +hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask) +{ + hw_status status = 0; + u32 irq_reg; + + irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address); + + MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask); + + return status; +} + +hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask) +{ + hw_status status = 0; + + *irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address); + + return status; +} + +hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr) +{ + hw_status status = 0; + + /* read values from register */ + *addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address); + + return status; +} + +hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr) +{ + hw_status status = 0; + u32 load_ttb; + + load_ttb = ttb_phys_addr & ~0x7FUL; + /* write values to register */ + MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb); + + return status; +} + +hw_status hw_mmu_twl_enable(const void __iomem *base_address) +{ + hw_status status = 0; + + MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET); + + return status; +} + +hw_status hw_mmu_twl_disable(const void __iomem *base_address) +{ + hw_status status = 0; + + MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR); + + return status; +} + +hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr, + u32 page_sz) +{ + hw_status status = 0; + u32 virtual_addr_tag; + enum hw_mmu_page_size_t pg_size_bits; + + switch (page_sz) { + case HW_PAGE_SIZE4KB: + pg_size_bits = HW_MMU_SMALL_PAGE; + break; + + case HW_PAGE_SIZE64KB: + pg_size_bits = HW_MMU_LARGE_PAGE; + break; + + case HW_PAGE_SIZE1MB: + pg_size_bits = HW_MMU_SECTION; + break; + + case HW_PAGE_SIZE16MB: + pg_size_bits = HW_MMU_SUPERSECTION; + break; + + default: + return -EINVAL; + } + + /* Generate the 20-bit tag from virtual address */ + virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12); + + mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag); + + mmu_flush_entry(base_address); + + return status; +} + +hw_status hw_mmu_tlb_add(const void __iomem *base_address, + u32 physical_addr, + u32 virtual_addr, + u32 page_sz, + u32 entry_num, + struct hw_mmu_map_attrs_t *map_attrs, + s8 preserved_bit, s8 valid_bit) +{ + hw_status status = 0; + u32 lock_reg; + u32 virtual_addr_tag; + enum hw_mmu_page_size_t mmu_pg_size; + + /*Check the input Parameters */ + switch (page_sz) { + case HW_PAGE_SIZE4KB: + mmu_pg_size = HW_MMU_SMALL_PAGE; + break; + + case HW_PAGE_SIZE64KB: + mmu_pg_size = HW_MMU_LARGE_PAGE; + break; + + case HW_PAGE_SIZE1MB: + mmu_pg_size = HW_MMU_SECTION; + break; + + case HW_PAGE_SIZE16MB: + mmu_pg_size = HW_MMU_SUPERSECTION; + break; + + default: + return -EINVAL; + } + + lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address); + + /* Generate the 20-bit tag from virtual address */ + virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12); + + /* Write the fields in the CAM Entry Register */ + mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit, + virtual_addr_tag); + + /* Write the different fields of the RAM Entry Register */ + /* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */ + mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism, + map_attrs->element_size, map_attrs->mixed_size); + + /* Update the MMU Lock Register */ + /* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */ + MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num); + + /* Enable loading of an entry in TLB by writing 1 + into LD_TLB_REG register */ + MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB); + + MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg); + + return status; +} + +hw_status hw_mmu_pte_set(const u32 pg_tbl_va, + u32 physical_addr, + u32 virtual_addr, + u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs) +{ + hw_status status = 0; + u32 pte_addr, pte_val; + s32 num_entries = 1; + + switch (page_sz) { + case HW_PAGE_SIZE4KB: + pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, + virtual_addr & + MMU_SMALL_PAGE_MASK); + pte_val = + ((physical_addr & MMU_SMALL_PAGE_MASK) | + (map_attrs->endianism << 9) | (map_attrs-> + element_size << 4) | + (map_attrs->mixed_size << 11) | 2); + break; + + case HW_PAGE_SIZE64KB: + num_entries = 16; + pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, + virtual_addr & + MMU_LARGE_PAGE_MASK); + pte_val = + ((physical_addr & MMU_LARGE_PAGE_MASK) | + (map_attrs->endianism << 9) | (map_attrs-> + element_size << 4) | + (map_attrs->mixed_size << 11) | 1); + break; + + case HW_PAGE_SIZE1MB: + pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, + virtual_addr & + MMU_SECTION_ADDR_MASK); + pte_val = + ((((physical_addr & MMU_SECTION_ADDR_MASK) | + (map_attrs->endianism << 15) | (map_attrs-> + element_size << 10) | + (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2); + break; + + case HW_PAGE_SIZE16MB: + num_entries = 16; + pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, + virtual_addr & + MMU_SSECTION_ADDR_MASK); + pte_val = + (((physical_addr & MMU_SSECTION_ADDR_MASK) | + (map_attrs->endianism << 15) | (map_attrs-> + element_size << 10) | + (map_attrs->mixed_size << 17) + ) | 0x40000 | 0x2); + break; + + case HW_MMU_COARSE_PAGE_SIZE: + pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, + virtual_addr & + MMU_SECTION_ADDR_MASK); + pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1; + break; + + default: + return -EINVAL; + } + + while (--num_entries >= 0) + ((u32 *) pte_addr)[num_entries] = pte_val; + + return status; +} + +hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size) +{ + hw_status status = 0; + u32 pte_addr; + s32 num_entries = 1; + + switch (page_size) { + case HW_PAGE_SIZE4KB: + pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, + virtual_addr & + MMU_SMALL_PAGE_MASK); + break; + + case HW_PAGE_SIZE64KB: + num_entries = 16; + pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, + virtual_addr & + MMU_LARGE_PAGE_MASK); + break; + + case HW_PAGE_SIZE1MB: + case HW_MMU_COARSE_PAGE_SIZE: + pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, + virtual_addr & + MMU_SECTION_ADDR_MASK); + break; + + case HW_PAGE_SIZE16MB: + num_entries = 16; + pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, + virtual_addr & + MMU_SSECTION_ADDR_MASK); + break; + + default: + return -EINVAL; + } + + while (--num_entries >= 0) + ((u32 *) pte_addr)[num_entries] = 0; + + return status; +} + +/* mmu_flush_entry */ +static hw_status mmu_flush_entry(const void __iomem *base_address) +{ + hw_status status = 0; + u32 flush_entry_data = 0x1; + + /* write values to register */ + MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data); + + return status; +} + +/* mmu_set_cam_entry */ +static hw_status mmu_set_cam_entry(const void __iomem *base_address, + const u32 page_sz, + const u32 preserved_bit, + const u32 valid_bit, + const u32 virtual_addr_tag) +{ + hw_status status = 0; + u32 mmu_cam_reg; + + mmu_cam_reg = (virtual_addr_tag << 12); + mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) | + (preserved_bit << 3); + + /* write values to register */ + MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg); + + return status; +} + +/* mmu_set_ram_entry */ +static hw_status mmu_set_ram_entry(const void __iomem *base_address, + const u32 physical_addr, + enum hw_endianism_t endianism, + enum hw_element_size_t element_size, + enum hw_mmu_mixed_size_t mixed_size) +{ + hw_status status = 0; + u32 mmu_ram_reg; + + mmu_ram_reg = (physical_addr & MMU_ADDR_MASK); + mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) | + (mixed_size << 6)); + + /* write values to register */ + MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg); + + return status; + +} + +void hw_mmu_tlb_flush_all(const void __iomem *base) +{ + __raw_writeb(1, base + MMU_GFLUSH); +} diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h new file mode 100644 index 00000000000..1458a2c6027 --- /dev/null +++ b/drivers/staging/tidspbridge/hw/hw_mmu.h @@ -0,0 +1,163 @@ +/* + * hw_mmu.h + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * MMU types and API declarations + * + * Copyright (C) 2007 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef _HW_MMU_H +#define _HW_MMU_H + +#include <linux/types.h> + +/* Bitmasks for interrupt sources */ +#define HW_MMU_TRANSLATION_FAULT 0x2 +#define HW_MMU_ALL_INTERRUPTS 0x1F + +#define HW_MMU_COARSE_PAGE_SIZE 0x400 + +/* hw_mmu_mixed_size_t: Enumerated Type used to specify whether to follow + CPU/TLB Element size */ +enum hw_mmu_mixed_size_t { + HW_MMU_TLBES, + HW_MMU_CPUES +}; + +/* hw_mmu_map_attrs_t: Struct containing MMU mapping attributes */ +struct hw_mmu_map_attrs_t { + enum hw_endianism_t endianism; + enum hw_element_size_t element_size; + enum hw_mmu_mixed_size_t mixed_size; + bool donotlockmpupage; +}; + +extern hw_status hw_mmu_enable(const void __iomem *base_address); + +extern hw_status hw_mmu_disable(const void __iomem *base_address); + +extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address, + u32 num_locked_entries); + +extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address, + u32 victim_entry_num); + +/* For MMU faults */ +extern hw_status hw_mmu_event_ack(const void __iomem *base_address, + u32 irq_mask); + +extern hw_status hw_mmu_event_disable(const void __iomem *base_address, + u32 irq_mask); + +extern hw_status hw_mmu_event_enable(const void __iomem *base_address, + u32 irq_mask); + +extern hw_status hw_mmu_event_status(const void __iomem *base_address, + u32 *irq_mask); + +extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, + u32 *addr); + +/* Set the TT base address */ +extern hw_status hw_mmu_ttb_set(const void __iomem *base_address, + u32 ttb_phys_addr); + +extern hw_status hw_mmu_twl_enable(const void __iomem *base_address); + +extern hw_status hw_mmu_twl_disable(const void __iomem *base_address); + +extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address, + u32 virtual_addr, u32 page_sz); + +extern hw_status hw_mmu_tlb_add(const void __iomem *base_address, + u32 physical_addr, + u32 virtual_addr, + u32 page_sz, + u32 entry_num, + struct hw_mmu_map_attrs_t *map_attrs, + s8 preserved_bit, s8 valid_bit); + +/* For PTEs */ +extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va, + u32 physical_addr, + u32 virtual_addr, + u32 page_sz, + struct hw_mmu_map_attrs_t *map_attrs); + +extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, + u32 virtual_addr, u32 page_size); + +void hw_mmu_tlb_flush_all(const void __iomem *base); + +static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va) +{ + u32 pte_addr; + u32 va31_to20; + + va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */ + va31_to20 &= 0xFFFFFFFCUL; + pte_addr = l1_base + va31_to20; + + return pte_addr; +} + +static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va) +{ + u32 pte_addr; + + pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC); + + return pte_addr; +} + +static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val) +{ + u32 pte_coarse; + + pte_coarse = pte_val & 0xFFFFFC00; + + return pte_coarse; +} + +static inline u32 hw_mmu_pte_size_l1(u32 pte_val) +{ + u32 pte_size = 0; + + if ((pte_val & 0x3) == 0x1) { + /* Points to L2 PT */ + pte_size = HW_MMU_COARSE_PAGE_SIZE; + } + + if ((pte_val & 0x3) == 0x2) { + if (pte_val & (1 << 18)) + pte_size = HW_PAGE_SIZE16MB; + else + pte_size = HW_PAGE_SIZE1MB; + } + + return pte_size; +} + +static inline u32 hw_mmu_pte_size_l2(u32 pte_val) +{ + u32 pte_size = 0; + + if (pte_val & 0x2) + pte_size = HW_PAGE_SIZE4KB; + else if (pte_val & 0x1) + pte_size = HW_PAGE_SIZE64KB; + + return pte_size; +} + +#endif /* _HW_MMU_H */ diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h index dfb55cca34c..38122dbf877 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h +++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h @@ -68,6 +68,7 @@ struct cfg_hostres { void __iomem *dw_per_base; u32 dw_per_pm_base; u32 dw_core_pm_base; + void __iomem *dw_dmmu_base; void __iomem *dw_sys_ctrl_base; }; diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h index 9bdd48f5742..357458fadd2 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dev.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h @@ -27,6 +27,7 @@ #include <dspbridge/nodedefs.h> #include <dspbridge/dispdefs.h> #include <dspbridge/dspdefs.h> +#include <dspbridge/dmm.h> #include <dspbridge/host_os.h> /* ----------------------------------- This */ @@ -233,6 +234,29 @@ extern int dev_get_cmm_mgr(struct dev_object *hdev_obj, struct cmm_object **mgr); /* + * ======== dev_get_dmm_mgr ======== + * Purpose: + * Retrieve the handle to the dynamic memory manager created for this + * device. + * Parameters: + * hdev_obj: Handle to device object created with + * dev_create_device(). + * *mgr: Ptr to location to store handle. + * Returns: + * 0: Success. + * -EFAULT: Invalid hdev_obj. + * Requires: + * mgr != NULL. + * DEV Initialized. + * Ensures: + * 0: *mgr contains a handle to a channel manager object, + * or NULL. + * else: *mgr is NULL. + */ +extern int dev_get_dmm_mgr(struct dev_object *hdev_obj, + struct dmm_object **mgr); + +/* * ======== dev_get_cod_mgr ======== * Purpose: * Retrieve the COD manager create for this device. diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h new file mode 100644 index 00000000000..6c58335c5f6 --- /dev/null +++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h @@ -0,0 +1,75 @@ +/* + * dmm.h + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address + * space that can be directly mapped to any MPU buffer or memory region. + * + * Copyright (C) 2005-2006 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ + +#ifndef DMM_ +#define DMM_ + +#include <dspbridge/dbdefs.h> + +struct dmm_object; + +/* DMM attributes used in dmm_create() */ +struct dmm_mgrattrs { + u32 reserved; +}; + +#define DMMPOOLSIZE 0x4000000 + +/* + * ======== dmm_get_handle ======== + * Purpose: + * Return the dynamic memory manager object for this device. + * This is typically called from the client process. + */ + +extern int dmm_get_handle(void *hprocessor, + struct dmm_object **dmm_manager); + +extern int dmm_reserve_memory(struct dmm_object *dmm_mgr, + u32 size, u32 *prsv_addr); + +extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, + u32 rsv_addr); + +extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, + u32 size); + +extern int dmm_un_map_memory(struct dmm_object *dmm_mgr, + u32 addr, u32 *psize); + +extern int dmm_destroy(struct dmm_object *dmm_mgr); + +extern int dmm_delete_tables(struct dmm_object *dmm_mgr); + +extern int dmm_create(struct dmm_object **dmm_manager, + struct dev_object *hdev_obj, + const struct dmm_mgrattrs *mgr_attrts); + +extern bool dmm_init(void); + +extern void dmm_exit(void); + +extern int dmm_create_tables(struct dmm_object *dmm_mgr, + u32 addr, u32 size); + +#ifdef DSP_DMM_DEBUG +u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr); +#endif + +#endif /* DMM_ */ diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h index 75a2c9b5c6f..c1f363ec9af 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/drv.h +++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h @@ -108,6 +108,12 @@ struct dmm_map_object { struct bridge_dma_map_info dma_info; }; +/* Used for DMM reserved memory accounting */ +struct dmm_rsv_object { + struct list_head link; + u32 dsp_reserved_addr; +}; + /* New structure (member of process context) abstracts DMM resource info */ struct dspheap_res_object { s32 heap_allocated; /* DMM status */ @@ -159,6 +165,10 @@ struct process_context { struct list_head dmm_map_list; spinlock_t dmm_map_lock; + /* DMM reserved memory resources */ + struct list_head dmm_rsv_list; + spinlock_t dmm_rsv_lock; + /* DSP Heap resources */ struct dspheap_res_object *pdspheap_list; diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h deleted file mode 100644 index cb38d4cc073..00000000000 --- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * dsp-mmu.h - * - * DSP-BIOS Bridge driver support functions for TI OMAP processors. - * - * DSP iommu. - * - * Copyright (C) 2005-2010 Texas Instruments, Inc. - * - * This package is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED - * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. - */ - -#ifndef _DSP_MMU_ -#define _DSP_MMU_ - -#include <plat/iommu.h> -#include <plat/iovmm.h> - -/** - * dsp_mmu_init() - initialize dsp_mmu module and returns a handle - * - * This function initialize dsp mmu module and returns a struct iommu - * handle to use it for dsp maps. - * - */ -struct iommu *dsp_mmu_init(void); - -/** - * dsp_mmu_exit() - destroy dsp mmu module - * @mmu: Pointer to iommu handle. - * - * This function destroys dsp mmu module. - * - */ -void dsp_mmu_exit(struct iommu *mmu); - -/** - * user_to_dsp_map() - maps user to dsp virtual address - * @mmu: Pointer to iommu handle. - * @uva: Virtual user space address. - * @da DSP address - * @size Buffer size to map. - * @usr_pgs struct page array pointer where the user pages will be stored - * - * This function maps a user space buffer into DSP virtual address. - * - */ -u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size, - struct page **usr_pgs); - -/** - * user_to_dsp_unmap() - unmaps DSP virtual buffer. - * @mmu: Pointer to iommu handle. - * @da DSP address - * - * This function unmaps a user space buffer into DSP virtual address. - * - */ -int user_to_dsp_unmap(struct iommu *mmu, u32 da); - -#endif diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h index 61536347481..0ae7d1646a1 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h @@ -162,6 +162,48 @@ typedef int(*fxn_brd_memwrite) (struct bridge_dev_context u32 mem_type); /* + * ======== bridge_brd_mem_map ======== + * Purpose: + * Map a MPU memory region to a DSP/IVA memory space + * Parameters: + * dev_ctxt: Handle to Bridge driver defined device info. + * ul_mpu_addr: MPU memory region start address. + * virt_addr: DSP/IVA memory region u8 address. + * ul_num_bytes: Number of bytes to map. + * map_attrs: Mapping attributes (e.g. endianness). + * Returns: + * 0: Success. + * -EPERM: Other, unspecified error. + * Requires: + * dev_ctxt != NULL; + * Ensures: + */ +typedef int(*fxn_brd_memmap) (struct bridge_dev_context + * dev_ctxt, u32 ul_mpu_addr, + u32 virt_addr, u32 ul_num_bytes, + u32 map_attr, + struct page **mapped_pages); + +/* + * ======== bridge_brd_mem_un_map ======== + * Purpose: + * UnMap an MPU memory region from DSP/IVA memory space + * Parameters: + * dev_ctxt: Handle to Bridge driver defined device info. + * virt_addr: DSP/IVA memory region u8 address. + * ul_num_bytes: Number of bytes to unmap. + * Returns: + * 0: Success. + * -EPERM: Other, unspecified error. + * Requires: + * dev_ctxt != NULL; + * Ensures: + */ +typedef int(*fxn_brd_memunmap) (struct bridge_dev_context + * dev_ctxt, + u32 virt_addr, u32 ul_num_bytes); + +/* * ======== bridge_brd_stop ======== * Purpose: * Bring board to the BRD_STOPPED state. @@ -951,6 +993,8 @@ struct bridge_drv_interface { fxn_brd_setstate pfn_brd_set_state; /* Sets the Board State */ fxn_brd_memcopy pfn_brd_mem_copy; /* Copies DSP Memory */ fxn_brd_memwrite pfn_brd_mem_write; /* Write DSP Memory w/o halt */ + fxn_brd_memmap pfn_brd_mem_map; /* Maps MPU mem to DSP mem */ + fxn_brd_memunmap pfn_brd_mem_un_map; /* Unmaps MPU mem to DSP mem */ fxn_chnl_create pfn_chnl_create; /* Create channel manager. */ fxn_chnl_destroy pfn_chnl_destroy; /* Destroy channel manager. */ fxn_chnl_open pfn_chnl_open; /* Create a new channel. */ diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h index bad180108ad..41e0594dff3 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h +++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h @@ -19,6 +19,10 @@ #ifndef DSPIOCTL_ #define DSPIOCTL_ +/* ------------------------------------ Hardware Abstraction Layer */ +#include <hw_defs.h> +#include <hw_mmu.h> + /* * Any IOCTLS at or above this value are reserved for standard Bridge driver * interfaces. @@ -61,6 +65,9 @@ struct bridge_ioctl_extproc { /* GPP virtual address. __va does not work for ioremapped addresses */ u32 ul_gpp_va; u32 ul_size; /* Size of the mapped memory in bytes */ + enum hw_endianism_t endianism; + enum hw_mmu_mixed_size_t mixed_mode; + enum hw_element_size_t elem_size; }; #endif /* DSPIOCTL_ */ diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h index 2d12aab6b5b..5e09fd165d9 100644 --- a/drivers/staging/tidspbridge/include/dspbridge/proc.h +++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h @@ -551,6 +551,29 @@ extern int proc_map(void *hprocessor, struct process_context *pr_ctxt); /* + * ======== proc_reserve_memory ======== + * Purpose: + * Reserve a virtually contiguous region of DSP address space. + * Parameters: + * hprocessor : The processor handle. + * ul_size : Size of the address space to reserve. + * pp_rsv_addr : Ptr to DSP side reserved u8 address. + * Returns: + * 0 : Success. + * -EFAULT : Invalid processor handle. + * -EPERM : General failure. + * -ENOMEM : Cannot reserve chunk of this size. + * Requires: + * pp_rsv_addr is not NULL + * PROC Initialized. + * Ensures: + * Details: + */ +extern int proc_reserve_memory(void *hprocessor, + u32 ul_size, void **pp_rsv_addr, + struct process_context *pr_ctxt); + +/* * ======== proc_un_map ======== * Purpose: * Removes a MPU buffer mapping from the DSP address space. @@ -572,4 +595,27 @@ extern int proc_map(void *hprocessor, extern int proc_un_map(void *hprocessor, void *map_addr, struct process_context *pr_ctxt); +/* + * ======== proc_un_reserve_memory ======== + * Purpose: + * Frees a previously reserved region of DSP address space. + * Parameters: + * hprocessor : The processor handle. + * prsv_addr : Ptr to DSP side reservedBYTE address. + * Returns: + * 0 : Success. + * -EFAULT : Invalid processor handle. + * -EPERM : General failure. + * -ENOENT : Cannot find a reserved region starting with this + * : address. + * Requires: + * prsv_addr is not NULL + * PROC Initialized. + * Ensures: + * Details: + */ +extern int proc_un_reserve_memory(void *hprocessor, + void *prsv_addr, + struct process_context *pr_ctxt); + #endif /* PROC_ */ diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c index 7b30267ef0e..132e960967b 100644 --- a/drivers/staging/tidspbridge/pmgr/dev.c +++ b/drivers/staging/tidspbridge/pmgr/dev.c @@ -34,6 +34,7 @@ #include <dspbridge/cod.h> #include <dspbridge/drv.h> #include <dspbridge/proc.h> +#include <dspbridge/dmm.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/mgr.h> @@ -74,6 +75,7 @@ struct dev_object { struct msg_mgr *hmsg_mgr; /* Message manager. */ struct io_mgr *hio_mgr; /* IO manager (CHNL, msg_ctrl) */ struct cmm_object *hcmm_mgr; /* SM memory manager. */ + struct dmm_object *dmm_mgr; /* Dynamic memory manager. */ struct ldr_module *module_obj; /* Bridge Module handle. */ u32 word_size; /* DSP word size: quick access. */ struct drv_object *hdrv_obj; /* Driver Object */ @@ -248,6 +250,9 @@ int dev_create_device(struct dev_object **device_obj, /* Instantiate the DEH module */ status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj); } + /* Create DMM mgr . */ + status = dmm_create(&dev_obj->dmm_mgr, + (struct dev_object *)dev_obj, NULL); } /* Add the new DEV_Object to the global list: */ if (!status) { @@ -273,6 +278,8 @@ leave: kfree(dev_obj->proc_list); if (dev_obj->cod_mgr) cod_delete(dev_obj->cod_mgr); + if (dev_obj->dmm_mgr) + dmm_destroy(dev_obj->dmm_mgr); kfree(dev_obj); } @@ -382,6 +389,11 @@ int dev_destroy_device(struct dev_object *hdev_obj) dev_obj->hcmm_mgr = NULL; } + if (dev_obj->dmm_mgr) { + dmm_destroy(dev_obj->dmm_mgr); + dev_obj->dmm_mgr = NULL; + } + /* Call the driver's bridge_dev_destroy() function: */ /* Require of DevDestroy */ if (dev_obj->hbridge_context) { @@ -462,6 +474,32 @@ int dev_get_cmm_mgr(struct dev_object *hdev_obj, } /* + * ======== dev_get_dmm_mgr ======== + * Purpose: + * Retrieve the handle to the dynamic memory manager created for this + * device. + */ +int dev_get_dmm_mgr(struct dev_object *hdev_obj, + struct dmm_object **mgr) +{ + int status = 0; + struct dev_object *dev_obj = hdev_obj; + + DBC_REQUIRE(refs > 0); + DBC_REQUIRE(mgr != NULL); + + if (hdev_obj) { + *mgr = dev_obj->dmm_mgr; + } else { + *mgr = NULL; + status = -EFAULT; + } + + DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL)); + return status; +} + +/* * ======== dev_get_cod_mgr ======== * Purpose: * Retrieve the COD manager create for this device. @@ -713,8 +751,10 @@ void dev_exit(void) refs--; - if (refs == 0) + if (refs == 0) { cmm_exit(); + dmm_exit(); + } DBC_ENSURE(refs >= 0); } @@ -726,12 +766,25 @@ void dev_exit(void) */ bool dev_init(void) { - bool ret = true; + bool cmm_ret, dmm_ret, ret = true; DBC_REQUIRE(refs >= 0); - if (refs == 0) - ret = cmm_init(); + if (refs == 0) { + cmm_ret = cmm_init(); + dmm_ret = dmm_init(); + + ret = cmm_ret && dmm_ret; + + if (!ret) { + if (cmm_ret) + cmm_exit(); + + if (dmm_ret) + dmm_exit(); + + } + } if (ret) refs++; @@ -1065,6 +1118,8 @@ static void store_interface_fxns(struct bridge_drv_interface *drv_fxns, STORE_FXN(fxn_brd_setstate, pfn_brd_set_state); STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy); STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write); + STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map); + STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map); STORE_FXN(fxn_chnl_create, pfn_chnl_create); STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy); STORE_FXN(fxn_chnl_open, pfn_chnl_open); diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c new file mode 100644 index 00000000000..8685233d762 --- /dev/null +++ b/drivers/staging/tidspbridge/pmgr/dmm.c @@ -0,0 +1,533 @@ +/* + * dmm.c + * + * DSP-BIOS Bridge driver support functions for TI OMAP processors. + * + * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address + * space that can be directly mapped to any MPU buffer or memory region + * + * Notes: + * Region: Generic memory entitiy having a start address and a size + * Chunk: Reserved region + * + * Copyright (C) 2005-2006 Texas Instruments, Inc. + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED + * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. + */ +#include <linux/types.h> + +/* ----------------------------------- Host OS */ +#include <dspbridge/host_os.h> + +/* ----------------------------------- DSP/BIOS Bridge */ +#include <dspbridge/dbdefs.h> + +/* ----------------------------------- Trace & Debug */ +#include <dspbridge/dbc.h> + +/* ----------------------------------- OS Adaptation Layer */ +#include <dspbridge/sync.h> + +/* ----------------------------------- Platform Manager */ +#include <dspbridge/dev.h> +#include <dspbridge/proc.h> + +/* ----------------------------------- This */ +#include <dspbridge/dmm.h> + +/* ----------------------------------- Defines, Data Structures, Typedefs */ +#define DMM_ADDR_VIRTUAL(a) \ + (((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\ + dyn_mem_map_beg) +#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K) + +/* DMM Mgr */ +struct dmm_object { + /* Dmm Lock is used to serialize access mem manager for + * multi-threads. */ + spinlock_t dmm_lock; /* Lock to access dmm mgr */ +}; + +/* ----------------------------------- Globals */ +static u32 refs; /* module reference count */ +struct map_page { + u32 region_size:15; + u32 mapped_size:15; + u32 reserved:1; + u32 mapped:1; +}; + +/* Create the free list */ +static struct map_page *virtual_mapping_table; +static u32 free_region; /* The index of free region */ +static u32 free_size; +static u32 dyn_mem_map_beg; /* The Beginning of dynamic memory mapping */ +static u32 table_size; /* The size of virt and phys pages tables */ + +/* ----------------------------------- Function Prototypes */ +static struct map_page *get_region(u32 addr); +static struct map_page *get_free_region(u32 len); +static struct map_page *get_mapped_region(u32 addrs); + +/* ======== dmm_create_tables ======== + * Purpose: + * Create table to hold the information of physical address + * the buffer pages that is passed by the user, and the table + * to hold the information of the virtual memory that is reserved + * for DSP. + */ +int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size) +{ + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; + int status = 0; + + status = dmm_delete_tables(dmm_obj); + if (!status) { + dyn_mem_map_beg = addr; + table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K; + /* Create the free list */ + virtual_mapping_table = __vmalloc(table_size * + sizeof(struct map_page), GFP_KERNEL | + __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); + if (virtual_mapping_table == NULL) + status = -ENOMEM; + else { + /* On successful allocation, + * all entries are zero ('free') */ + free_region = 0; + free_size = table_size * PG_SIZE4K; + virtual_mapping_table[0].region_size = table_size; + } + } + + if (status) + pr_err("%s: failure, status 0x%x\n", __func__, status); + + return status; +} + +/* + * ======== dmm_create ======== + * Purpose: + * Create a dynamic memory manager object. + */ +int dmm_create(struct dmm_object **dmm_manager, + struct dev_object *hdev_obj, + const struct dmm_mgrattrs *mgr_attrts) +{ + struct dmm_object *dmm_obj = NULL; + int status = 0; + DBC_REQUIRE(refs > 0); + DBC_REQUIRE(dmm_manager != NULL); + + *dmm_manager = NULL; + /* create, zero, and tag a cmm mgr object */ + dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL); + if (dmm_obj != NULL) { + spin_lock_init(&dmm_obj->dmm_lock); + *dmm_manager = dmm_obj; + } else { + status = -ENOMEM; + } + + return status; +} + +/* + * ======== dmm_destroy ======== + * Purpose: + * Release the communication memory manager resources. + */ +int dmm_destroy(struct dmm_object *dmm_mgr) +{ + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; + int status = 0; + + DBC_REQUIRE(refs > 0); + if (dmm_mgr) { + status = dmm_delete_tables(dmm_obj); + if (!status) + kfree(dmm_obj); + } else + status = -EFAULT; + + return status; +} + +/* + * ======== dmm_delete_tables ======== + * Purpose: + * Delete DMM Tables. + */ +int dmm_delete_tables(struct dmm_object *dmm_mgr) +{ + int status = 0; + + DBC_REQUIRE(refs > 0); + /* Delete all DMM tables */ + if (dmm_mgr) + vfree(virtual_mapping_table); + else + status = -EFAULT; + return status; +} + +/* + * ======== dmm_exit ======== + * Purpose: + * Discontinue usage of module; free resources when reference count + * reaches 0. + */ +void dmm_exit(void) +{ + DBC_REQUIRE(refs > 0); + + refs--; +} + +/* + * ======== dmm_get_handle ======== + * Purpose: + * Return the dynamic memory manager object for this device. + * This is typically called from the client process. + */ +int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager) +{ + int status = 0; + struct dev_object *hdev_obj; + + DBC_REQUIRE(refs > 0); + DBC_REQUIRE(dmm_manager != NULL); + if (hprocessor != NULL) + status = proc_get_dev_object(hprocessor, &hdev_obj); + else + hdev_obj = dev_get_first(); /* default */ + + if (!status) + status = dev_get_dmm_mgr(hdev_obj, dmm_manager); + + return status; +} + +/* + * ======== dmm_init ======== + * Purpose: + * Initializes private state of DMM module. + */ +bool dmm_init(void) +{ + bool ret = true; + + DBC_REQUIRE(refs >= 0); + + if (ret) + refs++; + + DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); + + virtual_mapping_table = NULL; + table_size = 0; + + return ret; +} + +/* + * ======== dmm_map_memory ======== + * Purpose: + * Add a mapping block to the reserved chunk. DMM assumes that this block + * will be mapped in the DSP/IVA's address space. DMM returns an error if a + * mapping overlaps another one. This function stores the info that will be + * required later while unmapping the block. + */ +int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size) +{ + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; + struct map_page *chunk; + int status = 0; + + spin_lock(&dmm_obj->dmm_lock); + /* Find the Reserved memory chunk containing the DSP block to + * be mapped */ + chunk = (struct map_page *)get_region(addr); + if (chunk != NULL) { + /* Mark the region 'mapped', leave the 'reserved' info as-is */ + chunk->mapped = true; + chunk->mapped_size = (size / PG_SIZE4K); + } else + status = -ENOENT; + spin_unlock(&dmm_obj->dmm_lock); + + dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, " + "chunk %p", __func__, dmm_mgr, addr, size, status, chunk); + + return status; +} + +/* + * ======== dmm_reserve_memory ======== + * Purpose: + * Reserve a chunk of virtually contiguous DSP/IVA address space. + */ +int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size, + u32 *prsv_addr) +{ + int status = 0; + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; + struct map_page *node; + u32 rsv_addr = 0; + u32 rsv_size = 0; + + spin_lock(&dmm_obj->dmm_lock); + + /* Try to get a DSP chunk from the free list */ + node = get_free_region(size); + if (node != NULL) { + /* DSP chunk of given size is available. */ + rsv_addr = DMM_ADDR_VIRTUAL(node); + /* Calculate the number entries to use */ + rsv_size = size / PG_SIZE4K; + if (rsv_size < node->region_size) { + /* Mark remainder of free region */ + node[rsv_size].mapped = false; + node[rsv_size].reserved = false; + node[rsv_size].region_size = + node->region_size - rsv_size; + node[rsv_size].mapped_size = 0; + } + /* get_region will return first fit chunk. But we only use what + is requested. */ + node->mapped = false; + node->reserved = true; + node->region_size = rsv_size; + node->mapped_size = 0; + /* Return the chunk's starting address */ + *prsv_addr = rsv_addr; + } else + /*dSP chunk of given size is not available */ + status = -ENOMEM; + + spin_unlock(&dmm_obj->dmm_lock); + + dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, " + "rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size, + prsv_addr, status, rsv_addr, rsv_size); + + return status; +} + +/* + * ======== dmm_un_map_memory ======== + * Purpose: + * Remove the mapped block from the reserved chunk. + */ +int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize) +{ + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; + struct map_page *chunk; + int status = 0; + + spin_lock(&dmm_obj->dmm_lock); + chunk = get_mapped_region(addr); + if (chunk == NULL) + status = -ENOENT; + + if (!status) { + /* Unmap the region */ + *psize = chunk->mapped_size * PG_SIZE4K; + chunk->mapped = false; + chunk->mapped_size = 0; + } + spin_unlock(&dmm_obj->dmm_lock); + + dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, " + "chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk); + + return status; +} + +/* + * ======== dmm_un_reserve_memory ======== + * Purpose: + * Free a chunk of reserved DSP/IVA address space. + */ +int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr) +{ + struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr; + struct map_page *chunk; + u32 i; + int status = 0; + u32 chunk_size; + + spin_lock(&dmm_obj->dmm_lock); + + /* Find the chunk containing the reserved address */ + chunk = get_mapped_region(rsv_addr); + if (chunk == NULL) + status = -ENOENT; + + if (!status) { + /* Free all the mapped pages for this reserved region */ + i = 0; + while (i < chunk->region_size) { + if (chunk[i].mapped) { + /* Remove mapping from the page tables. */ + chunk_size = chunk[i].mapped_size; + /* Clear the mapping flags */ + chunk[i].mapped = false; + chunk[i].mapped_size = 0; + i += chunk_size; + } else + i++; + } + /* Clear the flags (mark the region 'free') */ + chunk->reserved = false; + /* NOTE: We do NOT coalesce free regions here. + * Free regions are coalesced in get_region(), as it traverses + *the whole mapping table + */ + } + spin_unlock(&dmm_obj->dmm_lock); + + dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p", + __func__, dmm_mgr, rsv_addr, status, chunk); + + return status; +} + +/* + * ======== get_region ======== + * Purpose: + * Returns a region containing the specified memory region + */ +static struct map_page *get_region(u32 addr) +{ + struct map_page *curr_region = NULL; + u32 i = 0; + + if (virtual_mapping_table != NULL) { + /* find page mapped by this address */ + i = DMM_ADDR_TO_INDEX(addr); + if (i < table_size) + curr_region = virtual_mapping_table + i; + } + + dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n", + __func__, curr_region, free_region, free_size); + return curr_region; +} + +/* + * ======== get_free_region ======== + * Purpose: + * Returns the requested free region + */ +static struct map_page *get_free_region(u32 len) +{ + struct map_page *curr_region = NULL; + u32 i = 0; + u32 region_size = 0; + u32 next_i = 0; + + if (virtual_mapping_table == NULL) + return curr_region; + if (len > free_size) { + /* Find the largest free region + * (coalesce during the traversal) */ + while (i < table_size) { + region_size = virtual_mapping_table[i].region_size; + next_i = i + region_size; + if (virtual_mapping_table[i].reserved == false) { + /* Coalesce, if possible */ + if (next_i < table_size && + virtual_mapping_table[next_i].reserved + == false) { + virtual_mapping_table[i].region_size += + virtual_mapping_table + [next_i].region_size; + continue; + } + region_size *= PG_SIZE4K; + if (region_size > free_size) { + free_region = i; + free_size = region_size; + } + } + i = next_i; + } + } + if (len <= free_size) { + curr_region = virtual_mapping_table + free_region; + free_region += (len / PG_SIZE4K); + free_size -= len; + } + return curr_region; +} + +/* + * ======== get_mapped_region ======== + * Purpose: + * Returns the requestedmapped region + */ +static struct map_page *get_mapped_region(u32 addrs) +{ + u32 i = 0; + struct map_page *curr_region = NULL; + + if (virtual_mapping_table == NULL) + return curr_region; + + i = DMM_ADDR_TO_INDEX(addrs); + if (i < table_size && (virtual_mapping_table[i].mapped || + virtual_mapping_table[i].reserved)) + curr_region = virtual_mapping_table + i; + return curr_region; +} + +#ifdef DSP_DMM_DEBUG +u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr) +{ + struct map_page *curr_node = NULL; + u32 i; + u32 freemem = 0; + u32 bigsize = 0; + + spin_lock(&dmm_mgr->dmm_lock); + + if (virtual_mapping_table != NULL) { + for (i = 0; i < table_size; i += + virtual_mapping_table[i].region_size) { + curr_node = virtual_mapping_table + i; + if (curr_node->reserved) { + /*printk("RESERVED size = 0x%x, " + "Map size = 0x%x\n", + (curr_node->region_size * PG_SIZE4K), + (curr_node->mapped == false) ? 0 : + (curr_node->mapped_size * PG_SIZE4K)); + */ + } else { +/* printk("UNRESERVED size = 0x%x\n", + (curr_node->region_size * PG_SIZE4K)); + */ + freemem += (curr_node->region_size * PG_SIZE4K); + if (curr_node->region_size > bigsize) + bigsize = curr_node->region_size; + } + } + } + spin_unlock(&dmm_mgr->dmm_lock); + printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n", + freemem / (1024 * 1024)); + printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n", + (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024)); + printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n", + (bigsize * PG_SIZE4K / (1024 * 1024))); + + return 0; +} +#endif diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c index 981551ce4d7..86ca785f191 100644 --- a/drivers/staging/tidspbridge/pmgr/dspapi.c +++ b/drivers/staging/tidspbridge/pmgr/dspapi.c @@ -993,10 +993,27 @@ u32 procwrap_register_notify(union trapped_args *args, void *pr_ctxt) /* * ======== procwrap_reserve_memory ======== */ -u32 __deprecated procwrap_reserve_memory(union trapped_args *args, - void *pr_ctxt) +u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt) { - return 0; + int status; + void *prsv_addr; + void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; + + if ((args->args_proc_rsvmem.ul_size <= 0) || + (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0) + return -EINVAL; + + status = proc_reserve_memory(hprocessor, + args->args_proc_rsvmem.ul_size, &prsv_addr, + pr_ctxt); + if (!status) { + if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) { + status = -EINVAL; + proc_un_reserve_memory(args->args_proc_rsvmem. + hprocessor, prsv_addr, pr_ctxt); + } + } + return status; } /* @@ -1025,10 +1042,15 @@ u32 procwrap_un_map(union trapped_args *args, void *pr_ctxt) /* * ======== procwrap_un_reserve_memory ======== */ -u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args, - void *pr_ctxt) +u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt) { - return 0; + int status; + void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor; + + status = proc_un_reserve_memory(hprocessor, + args->args_proc_unrsvmem.prsv_addr, + pr_ctxt); + return status; } /* diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c index 91cc168516e..81b1b901355 100644 --- a/drivers/staging/tidspbridge/rmgr/drv.c +++ b/drivers/staging/tidspbridge/rmgr/drv.c @@ -146,6 +146,7 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) struct process_context *ctxt = (struct process_context *)process_ctxt; int status = 0; struct dmm_map_object *temp_map, *map_obj; + struct dmm_rsv_object *temp_rsv, *rsv_obj; /* Free DMM mapped memory resources */ list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { @@ -155,6 +156,16 @@ int drv_remove_all_dmm_res_elements(void *process_ctxt) pr_err("%s: proc_un_map failed!" " status = 0x%xn", __func__, status); } + + /* Free DMM reserved memory resources */ + list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) { + status = proc_un_reserve_memory(ctxt->hprocessor, (void *) + rsv_obj->dsp_reserved_addr, + ctxt); + if (status) + pr_err("%s: proc_un_reserve_memory failed!" + " status = 0x%xn", __func__, status); + } return status; } @@ -732,6 +743,7 @@ static int request_bridge_resources(struct cfg_hostres *res) host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); + dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); /* for 24xx base port is not mapping the mamory for DSP * internal memory TODO Do a ioremap here */ @@ -785,6 +797,8 @@ int drv_request_bridge_res_dsp(void **phost_resources) OMAP_PER_PRM_SIZE); host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, OMAP_CORE_PRM_SIZE); + host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE, + OMAP_DMMU_SIZE); dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); @@ -796,6 +810,7 @@ int drv_request_bridge_res_dsp(void **phost_resources) host_res->dw_mem_base[3]); dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", host_res->dw_mem_base[4]); + dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); shm_size = drv_datap->shm_size; if (shm_size >= 0x10000) { diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c index 34be43fec04..324fcdffb3b 100644 --- a/drivers/staging/tidspbridge/rmgr/drv_interface.c +++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c @@ -509,6 +509,8 @@ static int bridge_open(struct inode *ip, struct file *filp) pr_ctxt->res_state = PROC_RES_ALLOCATED; spin_lock_init(&pr_ctxt->dmm_map_lock); INIT_LIST_HEAD(&pr_ctxt->dmm_map_list); + spin_lock_init(&pr_ctxt->dmm_rsv_lock); + INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list); pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL); if (pr_ctxt->node_id) { diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c index a660247f527..1562f3c1281 100644 --- a/drivers/staging/tidspbridge/rmgr/node.c +++ b/drivers/staging/tidspbridge/rmgr/node.c @@ -56,6 +56,7 @@ /* ----------------------------------- This */ #include <dspbridge/nodepriv.h> #include <dspbridge/node.h> +#include <dspbridge/dmm.h> /* Static/Dynamic Loader includes */ #include <dspbridge/dbll.h> @@ -316,6 +317,10 @@ int node_allocate(struct proc_object *hprocessor, u32 mapped_addr = 0; u32 map_attrs = 0x0; struct dsp_processorstate proc_state; +#ifdef DSP_DMM_DEBUG + struct dmm_object *dmm_mgr; + struct proc_object *p_proc_object = (struct proc_object *)hprocessor; +#endif void *node_res; @@ -425,12 +430,34 @@ int node_allocate(struct proc_object *hprocessor, if (status) goto func_cont; + status = proc_reserve_memory(hprocessor, + pnode->create_args.asa.task_arg_obj. + heap_size + PAGE_SIZE, + (void **)&(pnode->create_args.asa. + task_arg_obj.udsp_heap_res_addr), + pr_ctxt); + if (status) { + pr_err("%s: Failed to reserve memory for heap: 0x%x\n", + __func__, status); + goto func_cont; + } +#ifdef DSP_DMM_DEBUG + status = dmm_get_handle(p_proc_object, &dmm_mgr); + if (!dmm_mgr) { + status = DSP_EHANDLE; + goto func_cont; + } + + dmm_mem_map_dump(dmm_mgr); +#endif + map_attrs |= DSP_MAPLITTLEENDIAN; map_attrs |= DSP_MAPELEMSIZE32; map_attrs |= DSP_MAPVIRTUALADDR; status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr, pnode->create_args.asa.task_arg_obj.heap_size, - NULL, (void **)&mapped_addr, map_attrs, + (void *)pnode->create_args.asa.task_arg_obj. + udsp_heap_res_addr, (void **)&mapped_addr, map_attrs, pr_ctxt); if (status) pr_err("%s: Failed to map memory for Heap: 0x%x\n", @@ -2484,7 +2511,11 @@ static void delete_node(struct node_object *hnode, struct stream_chnl stream; struct node_msgargs node_msg_args; struct node_taskargs task_arg_obj; - +#ifdef DSP_DMM_DEBUG + struct dmm_object *dmm_mgr; + struct proc_object *p_proc_object = + (struct proc_object *)hnode->hprocessor; +#endif int status; if (!hnode) goto func_end; @@ -2545,6 +2576,19 @@ static void delete_node(struct node_object *hnode, status = proc_un_map(hnode->hprocessor, (void *) task_arg_obj.udsp_heap_addr, pr_ctxt); + + status = proc_un_reserve_memory(hnode->hprocessor, + (void *) + task_arg_obj. + udsp_heap_res_addr, + pr_ctxt); +#ifdef DSP_DMM_DEBUG + status = dmm_get_handle(p_proc_object, &dmm_mgr); + if (dmm_mgr) + dmm_mem_map_dump(dmm_mgr); + else + status = DSP_EHANDLE; +#endif } } if (node_type != NODE_MESSAGE) { diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c index 7a15a02efed..b47d7aa747b 100644 --- a/drivers/staging/tidspbridge/rmgr/proc.c +++ b/drivers/staging/tidspbridge/rmgr/proc.c @@ -39,6 +39,7 @@ #include <dspbridge/cod.h> #include <dspbridge/dev.h> #include <dspbridge/procpriv.h> +#include <dspbridge/dmm.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/mgr.h> @@ -51,7 +52,6 @@ #include <dspbridge/msg.h> #include <dspbridge/dspioctl.h> #include <dspbridge/drv.h> -#include <_tiomap.h> /* ----------------------------------- This */ #include <dspbridge/proc.h> @@ -151,21 +151,34 @@ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, return map_obj; } +static int match_exact_map_obj(struct dmm_map_object *map_obj, + u32 dsp_addr, u32 size) +{ + if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) + pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", + __func__, dsp_addr, map_obj->size, size); + + return map_obj->dsp_addr == dsp_addr && + map_obj->size == size; +} + static void remove_mapping_information(struct process_context *pr_ctxt, - u32 dsp_addr) + u32 dsp_addr, u32 size) { struct dmm_map_object *map_obj; - pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr); + pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, + dsp_addr, size); spin_lock(&pr_ctxt->dmm_map_lock); list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { - pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n", + pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", __func__, map_obj->mpu_addr, - map_obj->dsp_addr); + map_obj->dsp_addr, + map_obj->size); - if (map_obj->dsp_addr == dsp_addr) { + if (match_exact_map_obj(map_obj, dsp_addr, size)) { pr_debug("%s: match, deleting map info\n", __func__); list_del(&map_obj->link); kfree(map_obj->dma_info.sg); @@ -1077,6 +1090,7 @@ int proc_load(void *hprocessor, const s32 argc_index, s32 cnew_envp; /* " " in new_envp[] */ s32 nproc_id = 0; /* Anticipate MP version. */ struct dcd_manager *hdcd_handle; + struct dmm_object *dmm_mgr; u32 dw_ext_end; u32 proc_id; int brd_state; @@ -1267,6 +1281,25 @@ int proc_load(void *hprocessor, const s32 argc_index, if (!status) status = cod_get_sym_value(cod_mgr, EXTEND, &dw_ext_end); + + /* Reset DMM structs and add an initial free chunk */ + if (!status) { + status = + dev_get_dmm_mgr(p_proc_object->hdev_obj, + &dmm_mgr); + if (dmm_mgr) { + /* Set dw_ext_end to DMM START u8 + * address */ + dw_ext_end = + (dw_ext_end + 1) * DSPWORDSIZE; + /* DMM memory is from EXT_END */ + status = dmm_create_tables(dmm_mgr, + dw_ext_end, + DMMPOOLSIZE); + } else { + status = -EFAULT; + } + } } } /* Restore the original argv[0] */ @@ -1319,10 +1352,12 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, { u32 va_align; u32 pa_align; + struct dmm_object *dmm_mgr; u32 size_align; int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_map_object *map_obj; + u32 tmp_addr = 0; #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK if ((ul_map_attr & BUFMODE_MASK) != RBUF) { @@ -1347,30 +1382,33 @@ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, } /* Critical section */ mutex_lock(&proc_lock); + dmm_get_handle(p_proc_object, &dmm_mgr); + if (dmm_mgr) + status = dmm_map_memory(dmm_mgr, va_align, size_align); + else + status = -EFAULT; /* Add mapping to the page tables. */ if (!status) { + + /* Mapped address = MSB of VA | LSB of PA */ + tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); /* mapped memory resource tracking */ - map_obj = add_mapping_info(pr_ctxt, pa_align, va_align, + map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, size_align); - if (!map_obj) { + if (!map_obj) status = -ENOMEM; - } else { - va_align = user_to_dsp_map( - p_proc_object->hbridge_context->dsp_mmu, - pa_align, va_align, size_align, - map_obj->pages); - if (IS_ERR_VALUE(va_align)) - status = (int)va_align; - } + else + status = (*p_proc_object->intf_fxns->pfn_brd_mem_map) + (p_proc_object->hbridge_context, pa_align, va_align, + size_align, ul_map_attr, map_obj->pages); } if (!status) { /* Mapped address = MSB of VA | LSB of PA */ - map_obj->dsp_addr = (va_align | - ((u32)pmpu_addr & (PG_SIZE4K - 1))); - *pp_map_addr = (void *)map_obj->dsp_addr; + *pp_map_addr = (void *) tmp_addr; } else { - remove_mapping_information(pr_ctxt, va_align); + remove_mapping_information(pr_ctxt, tmp_addr, size_align); + dmm_un_map_memory(dmm_mgr, va_align, &size_align); } mutex_unlock(&proc_lock); @@ -1463,6 +1501,55 @@ func_end: } /* + * ======== proc_reserve_memory ======== + * Purpose: + * Reserve a virtually contiguous region of DSP address space. + */ +int proc_reserve_memory(void *hprocessor, u32 ul_size, + void **pp_rsv_addr, + struct process_context *pr_ctxt) +{ + struct dmm_object *dmm_mgr; + int status = 0; + struct proc_object *p_proc_object = (struct proc_object *)hprocessor; + struct dmm_rsv_object *rsv_obj; + + if (!p_proc_object) { + status = -EFAULT; + goto func_end; + } + + status = dmm_get_handle(p_proc_object, &dmm_mgr); + if (!dmm_mgr) { + status = -EFAULT; + goto func_end; + } + + status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); + if (status != 0) + goto func_end; + + /* + * A successful reserve should be followed by insertion of rsv_obj + * into dmm_rsv_list, so that reserved memory resource tracking + * remains uptodate + */ + rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); + if (rsv_obj) { + rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; + spin_lock(&pr_ctxt->dmm_rsv_lock); + list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); + spin_unlock(&pr_ctxt->dmm_rsv_lock); + } + +func_end: + dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " + "status 0x%x\n", __func__, hprocessor, + ul_size, pp_rsv_addr, status); + return status; +} + +/* * ======== proc_start ======== * Purpose: * Start a processor running. @@ -1610,7 +1697,9 @@ int proc_un_map(void *hprocessor, void *map_addr, { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; + struct dmm_object *dmm_mgr; u32 va_align; + u32 size_align; va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); if (!p_proc_object) { @@ -1618,11 +1707,24 @@ int proc_un_map(void *hprocessor, void *map_addr, goto func_end; } + status = dmm_get_handle(hprocessor, &dmm_mgr); + if (!dmm_mgr) { + status = -EFAULT; + goto func_end; + } + /* Critical section */ mutex_lock(&proc_lock); + /* + * Update DMM structures. Get the size to unmap. + * This function returns error if the VA is not mapped + */ + status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); /* Remove mapping from the page tables. */ - status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu, - va_align); + if (!status) { + status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map) + (p_proc_object->hbridge_context, va_align, size_align); + } mutex_unlock(&proc_lock); if (status) @@ -1633,7 +1735,7 @@ int proc_un_map(void *hprocessor, void *map_addr, * from dmm_map_list, so that mapped memory resource tracking * remains uptodate */ - remove_mapping_information(pr_ctxt, (u32) map_addr); + remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", @@ -1642,6 +1744,55 @@ func_end: } /* + * ======== proc_un_reserve_memory ======== + * Purpose: + * Frees a previously reserved region of DSP address space. + */ +int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, + struct process_context *pr_ctxt) +{ + struct dmm_object *dmm_mgr; + int status = 0; + struct proc_object *p_proc_object = (struct proc_object *)hprocessor; + struct dmm_rsv_object *rsv_obj; + + if (!p_proc_object) { + status = -EFAULT; + goto func_end; + } + + status = dmm_get_handle(p_proc_object, &dmm_mgr); + if (!dmm_mgr) { + status = -EFAULT; + goto func_end; + } + + status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); + if (status != 0) + goto func_end; + + /* + * A successful unreserve should be followed by removal of rsv_obj + * from dmm_rsv_list, so that reserved memory resource tracking + * remains uptodate + */ + spin_lock(&pr_ctxt->dmm_rsv_lock); + list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { + if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { + list_del(&rsv_obj->link); + kfree(rsv_obj); + break; + } + } + spin_unlock(&pr_ctxt->dmm_rsv_lock); + +func_end: + dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", + __func__, hprocessor, prsv_addr, status); + return status; +} + +/* * ======== = proc_monitor ======== == * Purpose: * Place the Processor in Monitor State. This is an internal diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c index 664e6038090..b143258f094 100644 --- a/drivers/staging/tm6000/tm6000-cards.c +++ b/drivers/staging/tm6000/tm6000-cards.c @@ -545,7 +545,7 @@ static void tm6000_config_tuner(struct tm6000_core *dev) /* Load tuner module */ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tuner", dev->tuner_addr, NULL); + "tuner", dev->tuner_addr, NULL); memset(&tun_setup, 0, sizeof(tun_setup)); tun_setup.type = dev->tuner_type; @@ -683,7 +683,7 @@ static int tm6000_init_dev(struct tm6000_core *dev) if (dev->caps.has_tda9874) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, - NULL, "tvaudio", I2C_ADDR_TDA9874, NULL); + "tvaudio", I2C_ADDR_TDA9874, NULL); /* register and initialize V4L2 */ rc = tm6000_v4l2_register(dev); diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/staging/tm6000/tm6000-i2c.c index 3e46866dd27..93f625fc852 100644 --- a/drivers/staging/tm6000/tm6000-i2c.c +++ b/drivers/staging/tm6000/tm6000-i2c.c @@ -320,7 +320,6 @@ static struct i2c_algorithm tm6000_algo = { static struct i2c_adapter tm6000_adap_template = { .owner = THIS_MODULE, - .class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL, .name = "tm6000", .algo = &tm6000_algo, }; diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c index 9ec82796634..c5690b2a892 100644 --- a/drivers/staging/tm6000/tm6000-video.c +++ b/drivers/staging/tm6000/tm6000-video.c @@ -1032,6 +1032,7 @@ static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm) struct tm6000_fh *fh=priv; struct tm6000_core *dev = fh->dev; + dev->norm = *norm; rc = tm6000_init_analog_mode(dev); fh->width = dev->width; diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c index 5969e848d29..b7ac1600526 100644 --- a/drivers/staging/udlfb/udlfb.c +++ b/drivers/staging/udlfb/udlfb.c @@ -887,7 +887,7 @@ static int dlfb_ops_open(struct fb_info *info, int user) struct fb_deferred_io *fbdefio; - fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io)); + fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); if (fbdefio) { fbdefio->delay = DL_DEFIO_WRITE_DELAY; @@ -1441,7 +1441,7 @@ static struct device_attribute fb_device_attrs[] = { __ATTR_RO(metrics_bytes_identical), __ATTR_RO(metrics_bytes_sent), __ATTR_RO(metrics_cpu_kcycles_used), - __ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store), + __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store), }; /* diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index e992d5d9e15..7cc3d2407d1 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -1675,13 +1675,14 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { { char essid[IW_ESSID_MAX_SIZE+1]; - if (wrq->u.essid.pointer) + if (wrq->u.essid.pointer) { rc = iwctl_giwessid(dev, NULL, &(wrq->u.essid), essid); if (copy_to_user(wrq->u.essid.pointer, essid, wrq->u.essid.length) ) rc = -EFAULT; + } } break; diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c index 5a219701206..7777d9a60a5 100644 --- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c +++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c @@ -1417,7 +1417,6 @@ cy_as_usb_set_enum_config(cy_as_device_handle handle, */ bus_mask = 0; media_mask = 0; - media_mask = 0; for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) { for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) { if (config_p->devices_to_enumerate[bus][device] == diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c index f428a7af357..e1851f00be5 100644 --- a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c +++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c @@ -157,7 +157,7 @@ struct cyasblkdev_blk_data { /* pointer to west bridge block data device superstructure */ static struct cyasblkdev_blk_data *gl_bd; -static DECLARE_MUTEX(open_lock); +static DEFINE_SEMAPHORE(open_lock); /* local forwardd declarationss */ static cy_as_device_handle *cyas_dev_handle; diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c index 24e959eca41..0bbb8a3e191 100644 --- a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c +++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c @@ -334,7 +334,7 @@ int cyasblkdev_init_queue(struct cyasblkdev_queue *bq, spinlock_t *lock) init_completion(&bq->thread_complete); init_waitqueue_head(&bq->thread_wq); - init_MUTEX(&bq->thread_sem); + sema_init(&bq->thread_sem, 1); ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL); if (ret >= 0) { diff --git a/drivers/staging/winbond/sysdef.h b/drivers/staging/winbond/sysdef.h index 9195adf98e1..d0d71f69bc8 100644 --- a/drivers/staging/winbond/sysdef.h +++ b/drivers/staging/winbond/sysdef.h @@ -2,6 +2,9 @@ #ifndef SYS_DEF_H #define SYS_DEF_H + +#include <linux/delay.h> + #define WB_LINUX #define WB_LINUX_WPA_PSK diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 4af83d5318f..6a71f52c59b 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -139,7 +139,7 @@ exit: } int prism2_add_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_index, const u8 *mac_addr, + u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { wlandevice_t *wlandev = dev->ml_priv; @@ -198,7 +198,7 @@ exit: } int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_index, const u8 *mac_addr, void *cookie, + u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params*)) { wlandevice_t *wlandev = dev->ml_priv; @@ -227,7 +227,7 @@ int prism2_get_key(struct wiphy *wiphy, struct net_device *dev, } int prism2_del_key(struct wiphy *wiphy, struct net_device *dev, - u8 key_index, const u8 *mac_addr) + u8 key_index, bool pairwise, const u8 *mac_addr) { wlandevice_t *wlandev = dev->ml_priv; u32 did; diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c index aa1792c8429..b7b4a733b46 100644 --- a/drivers/staging/wlan-ng/p80211netdev.c +++ b/drivers/staging/wlan-ng/p80211netdev.c @@ -522,8 +522,8 @@ static int p80211netdev_ethtool(wlandevice_t *wlandev, void __user *useraddr) if (copy_to_user(useraddr, &edata, sizeof(edata))) return -EFAULT; return 0; - } #endif + } return -EOPNOTSUPP; } diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c index 6c574a994d1..6b3cf00b0ff 100644 --- a/drivers/staging/zram/zram_sysfs.c +++ b/drivers/staging/zram/zram_sysfs.c @@ -189,10 +189,10 @@ static ssize_t mem_used_total_show(struct device *dev, return sprintf(buf, "%llu\n", val); } -static DEVICE_ATTR(disksize, S_IRUGO | S_IWUGO, +static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR, disksize_show, disksize_store); static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); -static DEVICE_ATTR(reset, S_IWUGO, NULL, reset_store); +static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store); static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL); static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL); static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL); diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile new file mode 100644 index 00000000000..c43ef48b1a0 --- /dev/null +++ b/drivers/tty/Makefile @@ -0,0 +1,11 @@ +obj-y += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \ + tty_buffer.o tty_port.o tty_mutex.o +obj-$(CONFIG_LEGACY_PTYS) += pty.o +obj-$(CONFIG_UNIX98_PTYS) += pty.o +obj-$(CONFIG_AUDIT) += tty_audit.o +obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o +obj-$(CONFIG_N_HDLC) += n_hdlc.o +obj-$(CONFIG_N_GSM) += n_gsm.o +obj-$(CONFIG_R3964) += n_r3964.o + +obj-y += vt/ diff --git a/drivers/char/n_gsm.c b/drivers/tty/n_gsm.c index 04ef3ef0a42..c5f8e5bda2b 100644 --- a/drivers/char/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -716,8 +716,8 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) if (msg->len < 128) *--dp = (msg->len << 1) | EA; else { - *--dp = (msg->len >> 6) | EA; - *--dp = (msg->len & 127) << 1; + *--dp = (msg->len >> 7); /* bits 7 - 15 */ + *--dp = (msg->len & 127) << 1; /* bits 0 - 6 */ } } @@ -968,6 +968,8 @@ static void gsm_control_reply(struct gsm_mux *gsm, int cmd, u8 *data, { struct gsm_msg *msg; msg = gsm_data_alloc(gsm, 0, dlen + 2, gsm->ftype); + if (msg == NULL) + return; msg->data[0] = (cmd & 0xFE) << 1 | EA; /* Clear C/R */ msg->data[1] = (dlen << 1) | EA; memcpy(msg->data + 2, data, dlen); @@ -2375,6 +2377,7 @@ static int gsmld_config(struct tty_struct *tty, struct gsm_mux *gsm, gsm->mru = c->mru; gsm->encoding = c->encapsulation; gsm->adaption = c->adaption; + gsm->n2 = c->n2; if (c->i == 1) gsm->ftype = UIH; diff --git a/drivers/char/n_hdlc.c b/drivers/tty/n_hdlc.c index 47d32281032..47d32281032 100644 --- a/drivers/char/n_hdlc.c +++ b/drivers/tty/n_hdlc.c diff --git a/drivers/char/n_r3964.c b/drivers/tty/n_r3964.c index 88dda0c45ee..88dda0c45ee 100644 --- a/drivers/char/n_r3964.c +++ b/drivers/tty/n_r3964.c diff --git a/drivers/char/n_tty.c b/drivers/tty/n_tty.c index 428f4fe0b5f..428f4fe0b5f 100644 --- a/drivers/char/n_tty.c +++ b/drivers/tty/n_tty.c diff --git a/drivers/char/pty.c b/drivers/tty/pty.c index 923a4858550..923a4858550 100644 --- a/drivers/char/pty.c +++ b/drivers/tty/pty.c diff --git a/drivers/char/sysrq.c b/drivers/tty/sysrq.c index eaa5d3efa79..c556ed9db13 100644 --- a/drivers/char/sysrq.c +++ b/drivers/tty/sysrq.c @@ -554,7 +554,7 @@ EXPORT_SYMBOL(handle_sysrq); #ifdef CONFIG_INPUT /* Simple translation table for the SysRq keys */ -static const unsigned char sysrq_xlate[KEY_MAX + 1] = +static const unsigned char sysrq_xlate[KEY_CNT] = "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ @@ -563,53 +563,129 @@ static const unsigned char sysrq_xlate[KEY_MAX + 1] = "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ "\r\000/"; /* 0x60 - 0x6f */ -static bool sysrq_down; -static int sysrq_alt_use; -static int sysrq_alt; -static DEFINE_SPINLOCK(sysrq_event_lock); +struct sysrq_state { + struct input_handle handle; + struct work_struct reinject_work; + unsigned long key_down[BITS_TO_LONGS(KEY_CNT)]; + unsigned int alt; + unsigned int alt_use; + bool active; + bool need_reinject; +}; + +static void sysrq_reinject_alt_sysrq(struct work_struct *work) +{ + struct sysrq_state *sysrq = + container_of(work, struct sysrq_state, reinject_work); + struct input_handle *handle = &sysrq->handle; + unsigned int alt_code = sysrq->alt_use; + + if (sysrq->need_reinject) { + /* Simulate press and release of Alt + SysRq */ + input_inject_event(handle, EV_KEY, alt_code, 1); + input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); + input_inject_event(handle, EV_SYN, SYN_REPORT, 1); + + input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); + input_inject_event(handle, EV_KEY, alt_code, 0); + input_inject_event(handle, EV_SYN, SYN_REPORT, 1); + } +} -static bool sysrq_filter(struct input_handle *handle, unsigned int type, - unsigned int code, int value) +static bool sysrq_filter(struct input_handle *handle, + unsigned int type, unsigned int code, int value) { + struct sysrq_state *sysrq = handle->private; + bool was_active = sysrq->active; bool suppress; - /* We are called with interrupts disabled, just take the lock */ - spin_lock(&sysrq_event_lock); + switch (type) { - if (type != EV_KEY) - goto out; + case EV_SYN: + suppress = false; + break; - switch (code) { + case EV_KEY: + switch (code) { - case KEY_LEFTALT: - case KEY_RIGHTALT: - if (value) - sysrq_alt = code; - else { - if (sysrq_down && code == sysrq_alt_use) - sysrq_down = false; + case KEY_LEFTALT: + case KEY_RIGHTALT: + if (!value) { + /* One of ALTs is being released */ + if (sysrq->active && code == sysrq->alt_use) + sysrq->active = false; - sysrq_alt = 0; + sysrq->alt = KEY_RESERVED; + + } else if (value != 2) { + sysrq->alt = code; + sysrq->need_reinject = false; + } + break; + + case KEY_SYSRQ: + if (value == 1 && sysrq->alt != KEY_RESERVED) { + sysrq->active = true; + sysrq->alt_use = sysrq->alt; + /* + * If nothing else will be pressed we'll need + * to * re-inject Alt-SysRq keysroke. + */ + sysrq->need_reinject = true; + } + + /* + * Pretend that sysrq was never pressed at all. This + * is needed to properly handle KGDB which will try + * to release all keys after exiting debugger. If we + * do not clear key bit it KGDB will end up sending + * release events for Alt and SysRq, potentially + * triggering print screen function. + */ + if (sysrq->active) + clear_bit(KEY_SYSRQ, handle->dev->key); + + break; + + default: + if (sysrq->active && value && value != 2) { + sysrq->need_reinject = false; + __handle_sysrq(sysrq_xlate[code], true); + } + break; } - break; - case KEY_SYSRQ: - if (value == 1 && sysrq_alt) { - sysrq_down = true; - sysrq_alt_use = sysrq_alt; + suppress = sysrq->active; + + if (!sysrq->active) { + /* + * If we are not suppressing key presses keep track of + * keyboard state so we can release keys that have been + * pressed before entering SysRq mode. + */ + if (value) + set_bit(code, sysrq->key_down); + else + clear_bit(code, sysrq->key_down); + + if (was_active) + schedule_work(&sysrq->reinject_work); + + } else if (value == 0 && + test_and_clear_bit(code, sysrq->key_down)) { + /* + * Pass on release events for keys that was pressed before + * entering SysRq mode. + */ + suppress = false; } break; default: - if (sysrq_down && value && value != 2) - __handle_sysrq(sysrq_xlate[code], true); + suppress = sysrq->active; break; } -out: - suppress = sysrq_down; - spin_unlock(&sysrq_event_lock); - return suppress; } @@ -617,28 +693,28 @@ static int sysrq_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { - struct input_handle *handle; + struct sysrq_state *sysrq; int error; - sysrq_down = false; - sysrq_alt = 0; - - handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); - if (!handle) + sysrq = kzalloc(sizeof(struct sysrq_state), GFP_KERNEL); + if (!sysrq) return -ENOMEM; - handle->dev = dev; - handle->handler = handler; - handle->name = "sysrq"; + INIT_WORK(&sysrq->reinject_work, sysrq_reinject_alt_sysrq); + + sysrq->handle.dev = dev; + sysrq->handle.handler = handler; + sysrq->handle.name = "sysrq"; + sysrq->handle.private = sysrq; - error = input_register_handle(handle); + error = input_register_handle(&sysrq->handle); if (error) { pr_err("Failed to register input sysrq handler, error %d\n", error); goto err_free; } - error = input_open_device(handle); + error = input_open_device(&sysrq->handle); if (error) { pr_err("Failed to open input device, error %d\n", error); goto err_unregister; @@ -647,17 +723,20 @@ static int sysrq_connect(struct input_handler *handler, return 0; err_unregister: - input_unregister_handle(handle); + input_unregister_handle(&sysrq->handle); err_free: - kfree(handle); + kfree(sysrq); return error; } static void sysrq_disconnect(struct input_handle *handle) { + struct sysrq_state *sysrq = handle->private; + input_close_device(handle); + cancel_work_sync(&sysrq->reinject_work); input_unregister_handle(handle); - kfree(handle); + kfree(sysrq); } /* diff --git a/drivers/char/tty_audit.c b/drivers/tty/tty_audit.c index f64582b0f62..f64582b0f62 100644 --- a/drivers/char/tty_audit.c +++ b/drivers/tty/tty_audit.c diff --git a/drivers/char/tty_buffer.c b/drivers/tty/tty_buffer.c index cc1e9850d65..d8210ca0072 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/tty/tty_buffer.c @@ -413,7 +413,8 @@ static void flush_to_ldisc(struct work_struct *work) spin_lock_irqsave(&tty->buf.lock, flags); if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { - struct tty_buffer *head; + struct tty_buffer *head, *tail = tty->buf.tail; + int seen_tail = 0; while ((head = tty->buf.head) != NULL) { int count; char *char_buf; @@ -423,6 +424,15 @@ static void flush_to_ldisc(struct work_struct *work) if (!count) { if (head->next == NULL) break; + /* + There's a possibility tty might get new buffer + added during the unlock window below. We could + end up spinning in here forever hogging the CPU + completely. To avoid this let's have a rest each + time we processed the tail buffer. + */ + if (tail == head) + seen_tail = 1; tty->buf.head = head->next; tty_buffer_free(tty, head); continue; @@ -432,7 +442,7 @@ static void flush_to_ldisc(struct work_struct *work) line discipline as we want to empty the queue */ if (test_bit(TTY_FLUSHPENDING, &tty->flags)) break; - if (!tty->receive_room) { + if (!tty->receive_room || seen_tail) { schedule_delayed_work(&tty->buf.work, 1); break; } diff --git a/drivers/char/tty_io.c b/drivers/tty/tty_io.c index c05c5af5aa0..35480dd57a3 100644 --- a/drivers/char/tty_io.c +++ b/drivers/tty/tty_io.c @@ -559,6 +559,9 @@ void __tty_hangup(struct tty_struct *tty) tty_lock(); + /* some functions below drop BTM, so we need this bit */ + set_bit(TTY_HUPPING, &tty->flags); + /* inuse_filps is protected by the single tty lock, this really needs to change if we want to flush the workqueue with the lock held */ @@ -578,6 +581,10 @@ void __tty_hangup(struct tty_struct *tty) } spin_unlock(&tty_files_lock); + /* + * it drops BTM and thus races with reopen + * we protect the race by TTY_HUPPING + */ tty_ldisc_hangup(tty); read_lock(&tasklist_lock); @@ -615,7 +622,6 @@ void __tty_hangup(struct tty_struct *tty) tty->session = NULL; tty->pgrp = NULL; tty->ctrl_status = 0; - set_bit(TTY_HUPPED, &tty->flags); spin_unlock_irqrestore(&tty->ctrl_lock, flags); /* Account for the p->signal references we killed */ @@ -641,6 +647,7 @@ void __tty_hangup(struct tty_struct *tty) * can't yet guarantee all that. */ set_bit(TTY_HUPPED, &tty->flags); + clear_bit(TTY_HUPPING, &tty->flags); tty_ldisc_enable(tty); tty_unlock(); @@ -1310,7 +1317,9 @@ static int tty_reopen(struct tty_struct *tty) { struct tty_driver *driver = tty->driver; - if (test_bit(TTY_CLOSING, &tty->flags)) + if (test_bit(TTY_CLOSING, &tty->flags) || + test_bit(TTY_HUPPING, &tty->flags) || + test_bit(TTY_LDISC_CHANGING, &tty->flags)) return -EIO; if (driver->type == TTY_DRIVER_TYPE_PTY && diff --git a/drivers/char/tty_ioctl.c b/drivers/tty/tty_ioctl.c index 0c188997145..0c188997145 100644 --- a/drivers/char/tty_ioctl.c +++ b/drivers/tty/tty_ioctl.c diff --git a/drivers/char/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 412f9775d19..4214d58276f 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -47,6 +47,7 @@ static DEFINE_SPINLOCK(tty_ldisc_lock); static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); +static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle); /* Line disc dispatch table */ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS]; @@ -83,6 +84,7 @@ static void put_ldisc(struct tty_ldisc *ld) return; } local_irq_restore(flags); + wake_up(&tty_ldisc_idle); } /** @@ -452,6 +454,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld) /* BTM here locks versus a hangup event */ WARN_ON(!tty_locked()); ret = ld->ops->open(tty); + if (ret) + clear_bit(TTY_LDISC_OPEN, &tty->flags); return ret; } return 0; @@ -531,6 +535,23 @@ static int tty_ldisc_halt(struct tty_struct *tty) } /** + * tty_ldisc_wait_idle - wait for the ldisc to become idle + * @tty: tty to wait for + * + * Wait for the line discipline to become idle. The discipline must + * have been halted for this to guarantee it remains idle. + */ +static int tty_ldisc_wait_idle(struct tty_struct *tty) +{ + int ret; + ret = wait_event_interruptible_timeout(tty_ldisc_idle, + atomic_read(&tty->ldisc->users) == 1, 5 * HZ); + if (ret < 0) + return ret; + return ret > 0 ? 0 : -EBUSY; +} + +/** * tty_set_ldisc - set line discipline * @tty: the terminal to set * @ldisc: the line discipline @@ -634,8 +655,17 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) flush_scheduled_work(); + retval = tty_ldisc_wait_idle(tty); + tty_lock(); mutex_lock(&tty->ldisc_mutex); + + /* handle wait idle failure locked */ + if (retval) { + tty_ldisc_put(new_ldisc); + goto enable; + } + if (test_bit(TTY_HUPPED, &tty->flags)) { /* We were raced by the hangup method. It will have stomped the ldisc data and closed the ldisc down */ @@ -669,6 +699,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc) tty_ldisc_put(o_ldisc); +enable: /* * Allow ldisc referencing to occur again */ @@ -714,9 +745,12 @@ static void tty_reset_termios(struct tty_struct *tty) * state closed */ -static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) +static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc) { - struct tty_ldisc *ld; + struct tty_ldisc *ld = tty_ldisc_get(ldisc); + + if (IS_ERR(ld)) + return -1; tty_ldisc_close(tty, tty->ldisc); tty_ldisc_put(tty->ldisc); @@ -724,10 +758,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) /* * Switch the line discipline back */ - ld = tty_ldisc_get(ldisc); - BUG_ON(IS_ERR(ld)); tty_ldisc_assign(tty, ld); tty_set_termios_ldisc(tty, ldisc); + + return 0; } /** @@ -802,13 +836,16 @@ void tty_ldisc_hangup(struct tty_struct *tty) a FIXME */ if (tty->ldisc) { /* Not yet closed */ if (reset == 0) { - tty_ldisc_reinit(tty, tty->termios->c_line); - err = tty_ldisc_open(tty, tty->ldisc); + + if (!tty_ldisc_reinit(tty, tty->termios->c_line)) + err = tty_ldisc_open(tty, tty->ldisc); + else + err = 1; } /* If the re-open fails or we reset then go to N_TTY. The N_TTY open cannot fail */ if (reset || err) { - tty_ldisc_reinit(tty, N_TTY); + BUG_ON(tty_ldisc_reinit(tty, N_TTY)); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); } tty_ldisc_enable(tty); diff --git a/drivers/char/tty_mutex.c b/drivers/tty/tty_mutex.c index 133697540c7..133697540c7 100644 --- a/drivers/char/tty_mutex.c +++ b/drivers/tty/tty_mutex.c diff --git a/drivers/char/tty_port.c b/drivers/tty/tty_port.c index 33d37d230f8..33d37d230f8 100644 --- a/drivers/char/tty_port.c +++ b/drivers/tty/tty_port.c diff --git a/drivers/char/.gitignore b/drivers/tty/vt/.gitignore index 83683a2d8e6..83683a2d8e6 100644 --- a/drivers/char/.gitignore +++ b/drivers/tty/vt/.gitignore diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile new file mode 100644 index 00000000000..14a51c9960d --- /dev/null +++ b/drivers/tty/vt/Makefile @@ -0,0 +1,34 @@ +# +# This file contains the font map for the default (hardware) font +# +FONTMAPFILE = cp437.uni + +obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o \ + selection.o keyboard.o +obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o +obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o + +# Files generated that shall be removed upon make clean +clean-files := consolemap_deftbl.c defkeymap.c + +quiet_cmd_conmk = CONMK $@ + cmd_conmk = scripts/conmakehash $< > $@ + +$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE) + $(call cmd,conmk) + +$(obj)/defkeymap.o: $(obj)/defkeymap.c + +# Uncomment if you're changing the keymap and have an appropriate +# loadkeys version for the map. By default, we'll use the shipped +# versions. +# GENERATE_KEYMAP := 1 + +ifdef GENERATE_KEYMAP + +$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map + loadkeys --mktable $< > $@.tmp + sed -e 's/^static *//' $@.tmp > $@ + rm $@.tmp + +endif diff --git a/drivers/char/consolemap.c b/drivers/tty/vt/consolemap.c index 45d3e80156d..45d3e80156d 100644 --- a/drivers/char/consolemap.c +++ b/drivers/tty/vt/consolemap.c diff --git a/drivers/char/cp437.uni b/drivers/tty/vt/cp437.uni index bc6163484f6..bc6163484f6 100644 --- a/drivers/char/cp437.uni +++ b/drivers/tty/vt/cp437.uni diff --git a/drivers/char/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped index d2208dfe3f6..d2208dfe3f6 100644 --- a/drivers/char/defkeymap.c_shipped +++ b/drivers/tty/vt/defkeymap.c_shipped diff --git a/drivers/char/defkeymap.map b/drivers/tty/vt/defkeymap.map index 50b30cace26..50b30cace26 100644 --- a/drivers/char/defkeymap.map +++ b/drivers/tty/vt/defkeymap.map diff --git a/drivers/char/keyboard.c b/drivers/tty/vt/keyboard.c index e95d7876ca6..e95d7876ca6 100644 --- a/drivers/char/keyboard.c +++ b/drivers/tty/vt/keyboard.c diff --git a/drivers/char/selection.c b/drivers/tty/vt/selection.c index ebae344ce91..ebae344ce91 100644 --- a/drivers/char/selection.c +++ b/drivers/tty/vt/selection.c diff --git a/drivers/char/vc_screen.c b/drivers/tty/vt/vc_screen.c index 273ab44cc91..eab3a1ff99e 100644 --- a/drivers/char/vc_screen.c +++ b/drivers/tty/vt/vc_screen.c @@ -553,12 +553,12 @@ static unsigned int vcs_poll(struct file *file, poll_table *wait) { struct vcs_poll_data *poll = vcs_poll_data_get(file); - int ret = 0; + int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI; if (poll) { poll_wait(file, &poll->waitq, wait); - if (!poll->seen_last_update) - ret = POLLIN | POLLRDNORM; + if (poll->seen_last_update) + ret = DEFAULT_POLLMASK; } return ret; } diff --git a/drivers/char/vt.c b/drivers/tty/vt/vt.c index a8ec48ed14d..a8ec48ed14d 100644 --- a/drivers/char/vt.c +++ b/drivers/tty/vt/vt.c diff --git a/drivers/char/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 6b68a0fb461..6b68a0fb461 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index a858d2b87b9..51fe1795d5a 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -3,7 +3,7 @@ * * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> - * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> + * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de> * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> * * Userspace IO diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c index a8ea2f19a0c..a84a451159e 100644 --- a/drivers/uio/uio_cif.c +++ b/drivers/uio/uio_cif.c @@ -1,7 +1,7 @@ /* * UIO Hilscher CIF card driver * - * (C) 2007 Hans J. Koch <hjk@linutronix.de> + * (C) 2007 Hans J. Koch <hjk@hansjkoch.de> * Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de> * * Licensed under GPL version 2 only. diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c index 5a18e9f7b83..5ffdb483b01 100644 --- a/drivers/uio/uio_netx.c +++ b/drivers/uio/uio_netx.c @@ -2,7 +2,7 @@ * UIO driver for Hilscher NetX based fieldbus cards (cifX, comX). * See http://www.hilscher.com for details. * - * (C) 2007 Hans J. Koch <hjk@linutronix.de> + * (C) 2007 Hans J. Koch <hjk@hansjkoch.de> * (C) 2008 Manuel Traut <manut@linutronix.de> * * Licensed under GPL version 2 only. diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index ea071a5b6ee..44447f54942 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c @@ -2301,7 +2301,7 @@ out: return ret; } -static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot); +static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot); static ssize_t read_human_status(struct device *dev, struct device_attribute *attr, char *buf) @@ -2364,8 +2364,7 @@ out: return ret; } -static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, - read_human_status, NULL); +static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL); static ssize_t read_delin(struct device *dev, struct device_attribute *attr, char *buf) @@ -2397,7 +2396,7 @@ out: return ret; } -static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL); +static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL); #define UEA_ATTR(name, reset) \ \ diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 05bf5a27b5b..989e16e4ab5 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -951,7 +951,9 @@ static int usbatm_atm_init(struct usbatm_data *instance) * condition: callbacks we register can be executed at once, before we have * initialized the struct atm_dev. To protect against this, all callbacks * abort if atm_dev->dev_data is NULL. */ - atm_dev = atm_dev_register(instance->driver_name, &usbatm_atm_devops, -1, NULL); + atm_dev = atm_dev_register(instance->driver_name, + &instance->usb_intf->dev, &usbatm_atm_devops, + -1, NULL); if (!atm_dev) { usb_err(instance, "%s: failed to register ATM device!\n", __func__); return -1; @@ -966,14 +968,6 @@ static int usbatm_atm_init(struct usbatm_data *instance) /* temp init ATM device, set to 128kbit */ atm_dev->link_rate = 128 * 1000 / 424; - ret = sysfs_create_link(&atm_dev->class_dev.kobj, - &instance->usb_intf->dev.kobj, "device"); - if (ret) { - atm_err(instance, "%s: sysfs_create_link failed: %d\n", - __func__, ret); - goto fail_sysfs; - } - if (instance->driver->atm_start && ((ret = instance->driver->atm_start(instance, atm_dev)) < 0)) { atm_err(instance, "%s: atm_start failed: %d!\n", __func__, ret); goto fail; @@ -992,8 +986,6 @@ static int usbatm_atm_init(struct usbatm_data *instance) return 0; fail: - sysfs_remove_link(&atm_dev->class_dev.kobj, "device"); - fail_sysfs: instance->atm_dev = NULL; atm_dev_deregister(atm_dev); /* usbatm_atm_dev_close will eventually be called */ return ret; @@ -1329,7 +1321,6 @@ void usbatm_usb_disconnect(struct usb_interface *intf) /* ATM finalize */ if (instance->atm_dev) { - sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device"); atm_dev_deregister(instance->atm_dev); instance->atm_dev = NULL; } diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index 9eed5b52d9d..bcc24779ba0 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -107,11 +107,19 @@ config USB_SUSPEND If you are unsure about this, say N here. config USB_OTG - bool + bool "OTG support" depends on USB && EXPERIMENTAL depends on USB_SUSPEND default n - + help + The most notable feature of USB OTG is support for a + "Dual-Role" device, which can act as either a device + or a host. The initial role is decided by the type of + plug inserted and can be changed later when two dual + role devices talk to each other. + + Select this only if your board has Mini-AB/Micro-AB + connector. config USB_OTG_WHITELIST bool "Rely on OTG Targeted Peripherals List" diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index ddb4dc98092..a3d2e239965 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -54,7 +54,6 @@ #include <linux/gfp.h> #include <linux/poll.h> #include <linux/usb.h> -#include <linux/smp_lock.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> #include <linux/mutex.h> diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index f1aaff6202a..a7131ad630f 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -37,7 +37,6 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/signal.h> #include <linux/poll.h> #include <linux/module.h> @@ -965,10 +964,11 @@ static int proc_getdriver(struct dev_state *ps, void __user *arg) static int proc_connectinfo(struct dev_state *ps, void __user *arg) { - struct usbdevfs_connectinfo ci; + struct usbdevfs_connectinfo ci = { + .devnum = ps->dev->devnum, + .slow = ps->dev->speed == USB_SPEED_LOW + }; - ci.devnum = ps->dev->devnum; - ci.slow = ps->dev->speed == USB_SPEED_LOW; if (copy_to_user(arg, &ci, sizeof(ci))) return -EFAULT; return 0; diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index 9fe34fb78ef..cf6a5423de0 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -19,7 +19,6 @@ #include <linux/errno.h> #include <linux/rwsem.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/usb.h> #include "usb.h" diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 61800f77dac..ced846ac414 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1330,6 +1330,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, */ if (usb_endpoint_xfer_control(&urb->ep->desc)) { + if (hcd->self.uses_pio_for_control) + return ret; if (hcd->self.uses_dma) { urb->setup_dma = dma_map_single( hcd->self.controller, diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 9819a4cc3b2..b690aa35df9 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c @@ -39,7 +39,6 @@ #include <linux/parser.h> #include <linux/notifier.h> #include <linux/seq_file.h> -#include <linux/smp_lock.h> #include <linux/usb/hcd.h> #include <asm/byteorder.h> #include "usb.h" diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index b739ca81465..607d0db4a98 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -158,7 +158,7 @@ config USB_GADGET_FSL_USB2 boolean "Freescale Highspeed USB DR Peripheral Controller" depends on FSL_SOC || ARCH_MXC select USB_GADGET_DUALSPEED - select USB_FSL_MPH_DR_OF + select USB_FSL_MPH_DR_OF if OF help Some of Freescale PowerPC processors have a High Speed Dual-Role(DR) USB controller, which supports device mode. diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index b5e20e873cb..717ff653fa2 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c @@ -2017,7 +2017,7 @@ static int __init usba_udc_probe(struct platform_device *pdev) } } else { /* gpio_request fail so use -EINVAL for gpio_is_valid */ - ubc->vbus_pin = -EINVAL; + udc->vbus_pin = -EINVAL; } } diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 7b5cc16e4a0..8572dad5ecb 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1047,9 +1047,9 @@ composite_unbind(struct usb_gadget *gadget) kfree(cdev->req->buf); usb_ep_free_request(gadget->ep0, cdev->req); } + device_remove_file(&gadget->dev, &dev_attr_suspended); kfree(cdev); set_gadget_data(gadget, NULL); - device_remove_file(&gadget->dev, &dev_attr_suspended); composite = NULL; } @@ -1107,14 +1107,6 @@ static int composite_bind(struct usb_gadget *gadget) */ usb_ep_autoconfig_reset(cdev->gadget); - /* standardized runtime overrides for device ID data */ - if (idVendor) - cdev->desc.idVendor = cpu_to_le16(idVendor); - if (idProduct) - cdev->desc.idProduct = cpu_to_le16(idProduct); - if (bcdDevice) - cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); - /* composite gadget needs to assign strings for whole device (like * serial number), register function drivers, potentially update * power state and consumption, etc @@ -1126,6 +1118,14 @@ static int composite_bind(struct usb_gadget *gadget) cdev->desc = *composite->dev; cdev->desc.bMaxPacketSize0 = gadget->ep0->maxpacket; + /* standardized runtime overrides for device ID data */ + if (idVendor) + cdev->desc.idVendor = cpu_to_le16(idVendor); + if (idProduct) + cdev->desc.idProduct = cpu_to_le16(idProduct); + if (bcdDevice) + cdev->desc.bcdDevice = cpu_to_le16(bcdDevice); + /* stirng overrides */ if (iManufacturer || !cdev->desc.iManufacturer) { if (!iManufacturer && !composite->iManufacturer && diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 4a830df4fc3..484c5ba5450 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -30,7 +30,6 @@ #include <linux/blkdev.h> #include <linux/pagemap.h> #include <asm/unaligned.h> -#include <linux/smp_lock.h> #include <linux/usb/composite.h> #include <linux/usb/functionfs.h> diff --git a/drivers/usb/gadget/f_hid.c b/drivers/usb/gadget/f_hid.c index 4f891eddd06..598e7e2ab80 100644 --- a/drivers/usb/gadget/f_hid.c +++ b/drivers/usb/gadget/f_hid.c @@ -25,7 +25,6 @@ #include <linux/cdev.h> #include <linux/mutex.h> #include <linux/poll.h> -#include <linux/smp_lock.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/usb/g_hid.h> diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h index 566cb231905..e7e0c69d3b1 100644 --- a/drivers/usb/gadget/goku_udc.h +++ b/drivers/usb/gadget/goku_udc.h @@ -251,7 +251,8 @@ struct goku_udc { got_region:1, req_config:1, configured:1, - enabled:1; + enabled:1, + registered:1; /* pci state used to access those endpoints */ struct pci_dev *pdev; diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index cb23355f52d..fbe86ca9580 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -811,7 +811,6 @@ int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]) INFO(dev, "MAC %pM\n", net->dev_addr); INFO(dev, "HOST MAC %pM\n", dev->host_mac); - netif_stop_queue(net); the_dev = dev; } diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c index 01e5354a4c2..40f7716b31f 100644 --- a/drivers/usb/gadget/u_serial.c +++ b/drivers/usb/gadget/u_serial.c @@ -105,11 +105,15 @@ struct gs_port { wait_queue_head_t close_wait; /* wait for last close */ struct list_head read_pool; + int read_started; + int read_allocated; struct list_head read_queue; unsigned n_read; struct tasklet_struct push; struct list_head write_pool; + int write_started; + int write_allocated; struct gs_buf port_write_buf; wait_queue_head_t drain_wait; /* wait while writes drain */ @@ -363,6 +367,9 @@ __acquires(&port->port_lock) struct usb_request *req; int len; + if (port->write_started >= QUEUE_SIZE) + break; + req = list_entry(pool->next, struct usb_request, list); len = gs_send_packet(port, req->buf, in->maxpacket); if (len == 0) { @@ -397,6 +404,8 @@ __acquires(&port->port_lock) break; } + port->write_started++; + /* abort immediately after disconnect */ if (!port->port_usb) break; @@ -418,7 +427,6 @@ __acquires(&port->port_lock) { struct list_head *pool = &port->read_pool; struct usb_ep *out = port->port_usb->out; - unsigned started = 0; while (!list_empty(pool)) { struct usb_request *req; @@ -430,6 +438,9 @@ __acquires(&port->port_lock) if (!tty) break; + if (port->read_started >= QUEUE_SIZE) + break; + req = list_entry(pool->next, struct usb_request, list); list_del(&req->list); req->length = out->maxpacket; @@ -447,13 +458,13 @@ __acquires(&port->port_lock) list_add(&req->list, pool); break; } - started++; + port->read_started++; /* abort immediately after disconnect */ if (!port->port_usb) break; } - return started; + return port->read_started; } /* @@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port) } recycle: list_move(&req->list, &port->read_pool); + port->read_started--; } /* Push from tty to ldisc; without low_latency set this is handled by @@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) spin_lock(&port->port_lock); list_add(&req->list, &port->write_pool); + port->write_started--; switch (req->status) { default: @@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) spin_unlock(&port->port_lock); } -static void gs_free_requests(struct usb_ep *ep, struct list_head *head) +static void gs_free_requests(struct usb_ep *ep, struct list_head *head, + int *allocated) { struct usb_request *req; @@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head) req = list_entry(head->next, struct usb_request, list); list_del(&req->list); gs_free_req(ep, req); + if (allocated) + (*allocated)--; } } static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, - void (*fn)(struct usb_ep *, struct usb_request *)) + void (*fn)(struct usb_ep *, struct usb_request *), + int *allocated) { int i; struct usb_request *req; + int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE; /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't * do quite that many this time, don't fail ... we just won't * be as speedy as we might otherwise be. */ - for (i = 0; i < QUEUE_SIZE; i++) { + for (i = 0; i < n; i++) { req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); if (!req) return list_empty(head) ? -ENOMEM : 0; req->complete = fn; list_add_tail(&req->list, head); + if (allocated) + (*allocated)++; } return 0; } @@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port) * configurations may use different endpoints with a given port; * and high speed vs full speed changes packet sizes too. */ - status = gs_alloc_requests(ep, head, gs_read_complete); + status = gs_alloc_requests(ep, head, gs_read_complete, + &port->read_allocated); if (status) return status; status = gs_alloc_requests(port->port_usb->in, &port->write_pool, - gs_write_complete); + gs_write_complete, &port->write_allocated); if (status) { - gs_free_requests(ep, head); + gs_free_requests(ep, head, &port->read_allocated); return status; } @@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port) if (started) { tty_wakeup(port->port_tty); } else { - gs_free_requests(ep, head); - gs_free_requests(port->port_usb->in, &port->write_pool); + gs_free_requests(ep, head, &port->read_allocated); + gs_free_requests(port->port_usb->in, &port->write_pool, + &port->write_allocated); status = -EIO; } @@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser) spin_lock_irqsave(&port->port_lock, flags); if (port->open_count == 0 && !port->openclose) gs_buf_free(&port->port_write_buf); - gs_free_requests(gser->out, &port->read_pool); - gs_free_requests(gser->out, &port->read_queue); - gs_free_requests(gser->in, &port->write_pool); + gs_free_requests(gser->out, &port->read_pool, NULL); + gs_free_requests(gser->out, &port->read_queue, NULL); + gs_free_requests(gser->in, &port->write_pool, NULL); + + port->read_allocated = port->read_started = + port->write_allocated = port->write_started = 0; + spin_unlock_irqrestore(&port->port_lock, flags); } diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 2391c396ca3..6f4f8e6a40c 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -122,7 +122,7 @@ config USB_EHCI_FSL bool "Support for Freescale on-chip EHCI USB controller" depends on USB_EHCI_HCD && FSL_SOC select USB_EHCI_ROOT_HUB_TT - select USB_FSL_MPH_DR_OF + select USB_FSL_MPH_DR_OF if OF ---help--- Variation of ARC USB block used in some Freescale chips. diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c index 86afdc73322..6e2599661b5 100644 --- a/drivers/usb/host/ehci-dbg.c +++ b/drivers/usb/host/ehci-dbg.c @@ -1067,7 +1067,7 @@ static inline void create_debug_files (struct ehci_hcd *ehci) &debug_registers_fops)) goto file_error; - if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus, + if (!debugfs_create_file("lpm", S_IRUGO|S_IWUSR, ehci->debug_dir, bus, &debug_lpm_fops)) goto file_error; diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 502a7e6fef4..e9062806d4a 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -1063,10 +1063,11 @@ rescan: tmp && tmp != qh; tmp = tmp->qh_next.qh) continue; - /* periodic qh self-unlinks on empty */ - if (!tmp) - goto nogood; - unlink_async (ehci, qh); + /* periodic qh self-unlinks on empty, and a COMPLETING qh + * may already be unlinked. + */ + if (tmp) + unlink_async(ehci, qh); /* FALL THROUGH */ case QH_STATE_UNLINK: /* wait for hw to finish? */ case QH_STATE_UNLINK_WAIT: @@ -1083,7 +1084,6 @@ idle_timeout: } /* else FALL THROUGH */ default: -nogood: /* caller was supposed to have unlinked any requests; * that's not our job. just leak this memory. */ diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c index d36e4e75e08..12f70c302b0 100644 --- a/drivers/usb/host/ehci-mem.c +++ b/drivers/usb/host/ehci-mem.c @@ -141,6 +141,10 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci) qh_put (ehci->async); ehci->async = NULL; + if (ehci->dummy) + qh_put(ehci->dummy); + ehci->dummy = NULL; + /* DMA consistent memory and pools */ if (ehci->qtd_pool) dma_pool_destroy (ehci->qtd_pool); @@ -227,8 +231,26 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) if (ehci->periodic == NULL) { goto fail; } - for (i = 0; i < ehci->periodic_size; i++) - ehci->periodic [i] = EHCI_LIST_END(ehci); + + if (ehci->use_dummy_qh) { + struct ehci_qh_hw *hw; + ehci->dummy = ehci_qh_alloc(ehci, flags); + if (!ehci->dummy) + goto fail; + + hw = ehci->dummy->hw; + hw->hw_next = EHCI_LIST_END(ehci); + hw->hw_qtd_next = EHCI_LIST_END(ehci); + hw->hw_alt_next = EHCI_LIST_END(ehci); + hw->hw_token &= ~QTD_STS_ACTIVE; + ehci->dummy->hw = hw; + + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = ehci->dummy->qh_dma; + } else { + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = EHCI_LIST_END(ehci); + } /* software shadow of hardware table */ ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index ac9c4d7c44a..bce85055019 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -36,6 +36,8 @@ struct ehci_mxc_priv { static int ehci_mxc_setup(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct device *dev = hcd->self.controller; + struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev); int retval; /* EHCI registers start at offset 0x100 */ @@ -63,6 +65,12 @@ static int ehci_mxc_setup(struct usb_hcd *hcd) ehci_reset(ehci); + /* set up the PORTSCx register */ + ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]); + + /* is this really needed? */ + msleep(10); + ehci_port_power(ehci, 0); return 0; } @@ -114,7 +122,7 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data; struct usb_hcd *hcd; struct resource *res; - int irq, ret, temp; + int irq, ret; struct ehci_mxc_priv *priv; struct device *dev = &pdev->dev; @@ -188,10 +196,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) clk_enable(priv->ahbclk); } - /* set up the PORTSCx register */ - ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]); - mdelay(10); - /* setup specific usb hw */ ret = mxc_initialize_usb_hw(pdev->id, pdata->flags); if (ret < 0) diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index a1e8d273103..655f3c9f88b 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -103,6 +103,19 @@ static int ehci_pci_setup(struct usb_hcd *hcd) if (retval) return retval; + if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) || + (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) { + /* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may + * read/write memory space which does not belong to it when + * there is NULL pointer with T-bit set to 1 in the frame list + * table. To avoid the issue, the frame list link pointer + * should always contain a valid pointer to a inactive qh. + */ + ehci->use_dummy_qh = 1; + ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI " + "dummy qh workaround\n"); + } + /* data structure init */ retval = ehci_init(hcd); if (retval) @@ -148,6 +161,18 @@ static int ehci_pci_setup(struct usb_hcd *hcd) if (pdev->revision < 0xa4) ehci->no_selective_suspend = 1; break; + + /* MCP89 chips on the MacBookAir3,1 give EPROTO when + * fetching device descriptors unless LPM is disabled. + * There are also intermittent problems enumerating + * devices with PPCD enabled. + */ + case 0x0d9d: + ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89"); + ehci->has_lpm = 0; + ehci->has_ppcd = 0; + ehci->command &= ~CMD_PPCEE; + break; } break; case PCI_VENDOR_ID_VIA: diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index a92526d6e5a..d9f78eb2657 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -98,7 +98,14 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) */ *prev_p = *periodic_next_shadow(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); - *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); + + if (!ehci->use_dummy_qh || + *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) + != EHCI_LIST_END(ehci)) + *hw_p = *shadow_next_periodic(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + else + *hw_p = ehci->dummy->qh_dma; } /* how many of the uframe's 125 usecs are allocated? */ @@ -2335,7 +2342,11 @@ restart: * pointer for much longer, if at all. */ *q_p = q.itd->itd_next; - *hw_p = q.itd->hw_next; + if (!ehci->use_dummy_qh || + q.itd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.itd->hw_next; + else + *hw_p = ehci->dummy->qh_dma; type = Q_NEXT_TYPE(ehci, q.itd->hw_next); wmb(); modified = itd_complete (ehci, q.itd); @@ -2368,7 +2379,11 @@ restart: * URB completion. */ *q_p = q.sitd->sitd_next; - *hw_p = q.sitd->hw_next; + if (!ehci->use_dummy_qh || + q.sitd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.sitd->hw_next; + else + *hw_p = ehci->dummy->qh_dma; type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); wmb(); modified = sitd_complete (ehci, q.sitd); diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index bde823f704e..ba8eab366b8 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -73,6 +73,7 @@ struct ehci_hcd { /* one per controller */ /* async schedule support */ struct ehci_qh *async; + struct ehci_qh *dummy; /* For AMD quirk use */ struct ehci_qh *reclaim; unsigned scanning : 1; @@ -131,6 +132,7 @@ struct ehci_hcd { /* one per controller */ unsigned need_io_watchdog:1; unsigned broken_periodic:1; unsigned fs_i_thresh:1; /* Intel iso scheduling */ + unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ /* required for usb32 quirk */ #define OHCI_CTRL_HCFS (3 << 6) diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 8196fa11fec..43a39eb56cc 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -70,7 +70,6 @@ #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> @@ -2684,7 +2683,7 @@ static int __devexit isp1362_remove(struct platform_device *pdev) return 0; } -static int __init isp1362_probe(struct platform_device *pdev) +static int __devinit isp1362_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct isp1362_hcd *isp1362_hcd; diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c index 10e1872f3ab..931d588c3fb 100644 --- a/drivers/usb/host/ohci-jz4740.c +++ b/drivers/usb/host/ohci-jz4740.c @@ -273,4 +273,4 @@ static struct platform_driver ohci_hcd_jz4740_driver = { }, }; -MODULE_ALIAS("platfrom:jz4740-ohci"); +MODULE_ALIAS("platform:jz4740-ohci"); diff --git a/drivers/usb/host/uhci-debug.c b/drivers/usb/host/uhci-debug.c index 6e7fb5f38db..ee60cd3ea64 100644 --- a/drivers/usb/host/uhci-debug.c +++ b/drivers/usb/host/uhci-debug.c @@ -12,7 +12,6 @@ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/debugfs.h> -#include <linux/smp_lock.h> #include <asm/io.h> #include "uhci-hcd.h" diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index fef5a1f9d48..5d963e35049 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -229,6 +229,13 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id) static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex, u32 __iomem *addr, u32 port_status) { + /* Don't allow the USB core to disable SuperSpeed ports. */ + if (xhci->port_array[wIndex] == 0x03) { + xhci_dbg(xhci, "Ignoring request to disable " + "SuperSpeed port.\n"); + return; + } + /* Write 1 to disable the port */ xhci_writel(xhci, port_status | PORT_PE, addr); port_status = xhci_readl(xhci, addr); diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 202770676da..1d0f45f0e7a 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1045,7 +1045,7 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci, if (udev->speed == USB_SPEED_SUPER) return ep->ss_ep_comp.wBytesPerInterval; - max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; /* A 0 in max burst means 1 transfer per ESIT */ return max_packet * (max_burst + 1); @@ -1135,7 +1135,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Fall through */ case USB_SPEED_FULL: case USB_SPEED_LOW: - max_packet = ep->desc.wMaxPacketSize & 0x3ff; + max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize); ep_ctx->ep_info2 |= MAX_PACKET(max_packet); break; default: @@ -1443,6 +1443,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci->dcbaa = NULL; scratchpad_free(xhci); + + xhci->num_usb2_ports = 0; + xhci->num_usb3_ports = 0; + kfree(xhci->usb2_ports); + kfree(xhci->usb3_ports); + kfree(xhci->port_array); + xhci->page_size = 0; xhci->page_shift = 0; xhci->bus_suspended = 0; @@ -1627,6 +1634,166 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) &xhci->ir_set->erst_dequeue); } +static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports, + u32 __iomem *addr, u8 major_revision) +{ + u32 temp, port_offset, port_count; + int i; + + if (major_revision > 0x03) { + xhci_warn(xhci, "Ignoring unknown port speed, " + "Ext Cap %p, revision = 0x%x\n", + addr, major_revision); + /* Ignoring port protocol we can't understand. FIXME */ + return; + } + + /* Port offset and count in the third dword, see section 7.2 */ + temp = xhci_readl(xhci, addr + 2); + port_offset = XHCI_EXT_PORT_OFF(temp); + port_count = XHCI_EXT_PORT_COUNT(temp); + xhci_dbg(xhci, "Ext Cap %p, port offset = %u, " + "count = %u, revision = 0x%x\n", + addr, port_offset, port_count, major_revision); + /* Port count includes the current port offset */ + if (port_offset == 0 || (port_offset + port_count - 1) > num_ports) + /* WTF? "Valid values are ‘1’ to MaxPorts" */ + return; + port_offset--; + for (i = port_offset; i < (port_offset + port_count); i++) { + /* Duplicate entry. Ignore the port if the revisions differ. */ + if (xhci->port_array[i] != 0) { + xhci_warn(xhci, "Duplicate port entry, Ext Cap %p," + " port %u\n", addr, i); + xhci_warn(xhci, "Port was marked as USB %u, " + "duplicated as USB %u\n", + xhci->port_array[i], major_revision); + /* Only adjust the roothub port counts if we haven't + * found a similar duplicate. + */ + if (xhci->port_array[i] != major_revision && + xhci->port_array[i] != (u8) -1) { + if (xhci->port_array[i] == 0x03) + xhci->num_usb3_ports--; + else + xhci->num_usb2_ports--; + xhci->port_array[i] = (u8) -1; + } + /* FIXME: Should we disable the port? */ + continue; + } + xhci->port_array[i] = major_revision; + if (major_revision == 0x03) + xhci->num_usb3_ports++; + else + xhci->num_usb2_ports++; + } + /* FIXME: Should we disable ports not in the Extended Capabilities? */ +} + +/* + * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that + * specify what speeds each port is supposed to be. We can't count on the port + * speed bits in the PORTSC register being correct until a device is connected, + * but we need to set up the two fake roothubs with the correct number of USB + * 3.0 and USB 2.0 ports at host controller initialization time. + */ +static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags) +{ + u32 __iomem *addr; + u32 offset; + unsigned int num_ports; + int i, port_index; + + addr = &xhci->cap_regs->hcc_params; + offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr)); + if (offset == 0) { + xhci_err(xhci, "No Extended Capability registers, " + "unable to set up roothub.\n"); + return -ENODEV; + } + + num_ports = HCS_MAX_PORTS(xhci->hcs_params1); + xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags); + if (!xhci->port_array) + return -ENOMEM; + + /* + * For whatever reason, the first capability offset is from the + * capability register base, not from the HCCPARAMS register. + * See section 5.3.6 for offset calculation. + */ + addr = &xhci->cap_regs->hc_capbase + offset; + while (1) { + u32 cap_id; + + cap_id = xhci_readl(xhci, addr); + if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL) + xhci_add_in_port(xhci, num_ports, addr, + (u8) XHCI_EXT_PORT_MAJOR(cap_id)); + offset = XHCI_EXT_CAPS_NEXT(cap_id); + if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports) + == num_ports) + break; + /* + * Once you're into the Extended Capabilities, the offset is + * always relative to the register holding the offset. + */ + addr += offset; + } + + if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) { + xhci_warn(xhci, "No ports on the roothubs?\n"); + return -ENODEV; + } + xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n", + xhci->num_usb2_ports, xhci->num_usb3_ports); + /* + * Note we could have all USB 3.0 ports, or all USB 2.0 ports. + * Not sure how the USB core will handle a hub with no ports... + */ + if (xhci->num_usb2_ports) { + xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)* + xhci->num_usb2_ports, flags); + if (!xhci->usb2_ports) + return -ENOMEM; + + port_index = 0; + for (i = 0; i < num_ports; i++) { + if (xhci->port_array[i] == 0x03 || + xhci->port_array[i] == 0 || + xhci->port_array[i] == -1) + continue; + + xhci->usb2_ports[port_index] = + &xhci->op_regs->port_status_base + + NUM_PORT_REGS*i; + xhci_dbg(xhci, "USB 2.0 port at index %u, " + "addr = %p\n", i, + xhci->usb2_ports[port_index]); + port_index++; + } + } + if (xhci->num_usb3_ports) { + xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)* + xhci->num_usb3_ports, flags); + if (!xhci->usb3_ports) + return -ENOMEM; + + port_index = 0; + for (i = 0; i < num_ports; i++) + if (xhci->port_array[i] == 0x03) { + xhci->usb3_ports[port_index] = + &xhci->op_regs->port_status_base + + NUM_PORT_REGS*i; + xhci_dbg(xhci, "USB 3.0 port at index %u, " + "addr = %p\n", i, + xhci->usb3_ports[port_index]); + port_index++; + } + } + return 0; +} int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) { @@ -1809,6 +1976,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (scratchpad_alloc(xhci, flags)) goto fail; + if (xhci_setup_port_arrays(xhci, flags)) + goto fail; return 0; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9f3115e729b..df558f6f84e 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2104,7 +2104,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd) if (!(status & STS_EINT)) { spin_unlock(&xhci->lock); - xhci_warn(xhci, "Spurious interrupt.\n"); return IRQ_NONE; } xhci_dbg(xhci, "op reg status = %08x\n", status); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 5d7d4e951ea..45e4a3108cc 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -577,6 +577,65 @@ static void xhci_restore_registers(struct xhci_hcd *xhci) xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); } +static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) +{ + u64 val_64; + + /* step 2: initialize command ring buffer */ + val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | + (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, + xhci->cmd_ring->dequeue) & + (u64) ~CMD_RING_RSVD_BITS) | + xhci->cmd_ring->cycle_state; + xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", + (long unsigned long) val_64); + xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); +} + +/* + * The whole command ring must be cleared to zero when we suspend the host. + * + * The host doesn't save the command ring pointer in the suspend well, so we + * need to re-program it on resume. Unfortunately, the pointer must be 64-byte + * aligned, because of the reserved bits in the command ring dequeue pointer + * register. Therefore, we can't just set the dequeue pointer back in the + * middle of the ring (TRBs are 16-byte aligned). + */ +static void xhci_clear_command_ring(struct xhci_hcd *xhci) +{ + struct xhci_ring *ring; + struct xhci_segment *seg; + + ring = xhci->cmd_ring; + seg = ring->deq_seg; + do { + memset(seg->trbs, 0, SEGMENT_SIZE); + seg = seg->next; + } while (seg != ring->deq_seg); + + /* Reset the software enqueue and dequeue pointers */ + ring->deq_seg = ring->first_seg; + ring->dequeue = ring->first_seg->trbs; + ring->enq_seg = ring->deq_seg; + ring->enqueue = ring->dequeue; + + /* + * Ring is now zeroed, so the HW should look for change of ownership + * when the cycle bit is set to 1. + */ + ring->cycle_state = 1; + + /* + * Reset the hardware dequeue pointer. + * Yes, this will need to be re-written after resume, but we're paranoid + * and want to make sure the hardware doesn't access bogus memory + * because, say, the BIOS or an SMI started the host without changing + * the command ring pointers. + */ + xhci_set_cmd_ring_deq(xhci); +} + /* * Stop HC (not bus-specific) * @@ -604,6 +663,7 @@ int xhci_suspend(struct xhci_hcd *xhci) spin_unlock_irq(&xhci->lock); return -ETIMEDOUT; } + xhci_clear_command_ring(xhci); /* step 3: save registers */ xhci_save_registers(xhci); @@ -635,7 +695,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); - u64 val_64; int old_state, retval; old_state = hcd->state; @@ -648,15 +707,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) /* step 1: restore register */ xhci_restore_registers(xhci); /* step 2: initialize command ring buffer */ - val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); - val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | - (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, - xhci->cmd_ring->dequeue) & - (u64) ~CMD_RING_RSVD_BITS) | - xhci->cmd_ring->cycle_state; - xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n", - (long unsigned long) val_64); - xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); + xhci_set_cmd_ring_deq(xhci); /* step 3: restore state and start state*/ /* step 3: set CRS flag */ command = xhci_readl(xhci, &xhci->op_regs->command); @@ -714,6 +765,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) return retval; } + spin_unlock_irq(&xhci->lock); /* Re-setup MSI-X */ if (hcd->irq) free_irq(hcd->irq, hcd); @@ -736,6 +788,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) hcd->irq = pdev->irq; } + spin_lock_irq(&xhci->lock); /* step 4: set Run/Stop bit */ command = xhci_readl(xhci, &xhci->op_regs->command); command |= CMD_RUN; @@ -1496,6 +1549,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, cmd_completion = command->completion; cmd_status = &command->status; command->command_trb = xhci->cmd_ring->enqueue; + + /* Enqueue pointer can be left pointing to the link TRB, + * we must handle that + */ + if ((command->command_trb->link.control & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK)) + command->command_trb = + xhci->cmd_ring->enq_seg->next->trbs; + list_add_tail(&command->cmd_list, &virt_dev->cmd_list); } else { in_ctx = virt_dev->in_ctx; @@ -2219,6 +2281,15 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) /* Attempt to submit the Reset Device command to the command ring */ spin_lock_irqsave(&xhci->lock, flags); reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; + + /* Enqueue pointer can be left pointing to the link TRB, + * we must handle that + */ + if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK) + == TRB_TYPE(TRB_LINK)) + reset_device_cmd->command_trb = + xhci->cmd_ring->enq_seg->next->trbs; + list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); ret = xhci_queue_reset_device(xhci, slot_id); if (ret) { diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 93d3bf4d213..170c367112d 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -454,6 +454,24 @@ struct xhci_doorbell_array { /** + * struct xhci_protocol_caps + * @revision: major revision, minor revision, capability ID, + * and next capability pointer. + * @name_string: Four ASCII characters to say which spec this xHC + * follows, typically "USB ". + * @port_info: Port offset, count, and protocol-defined information. + */ +struct xhci_protocol_caps { + u32 revision; + u32 name_string; + u32 port_info; +}; + +#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff) +#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff) +#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff) + +/** * struct xhci_container_ctx * @type: Type of context. Used to calculated offsets to contained contexts. * @size: Size of the context data @@ -621,6 +639,11 @@ struct xhci_ep_ctx { #define MAX_PACKET_MASK (0xffff << 16) #define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) +/* Get max packet size from ep desc. Bit 10..0 specify the max packet size. + * USB2.0 spec 9.6.6. + */ +#define GET_MAX_PACKET(p) ((p) & 0x7ff) + /* tx_info bitmasks */ #define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) #define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) @@ -1235,6 +1258,14 @@ struct xhci_hcd { u32 suspended_ports[8]; /* which ports are suspended */ unsigned long resume_done[MAX_HC_PORTS]; + /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */ + u8 *port_array; + /* Array of pointers to USB 3.0 PORTSC registers */ + u32 __iomem **usb3_ports; + unsigned int num_usb3_ports; + /* Array of pointers to USB 2.0 PORTSC registers */ + u32 __iomem **usb2_ports; + unsigned int num_usb2_ports; }; /* For testing purposes */ diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 5a47805d958..c90c89dc000 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c @@ -364,7 +364,7 @@ static int mts_scsi_host_reset(struct scsi_cmnd *srb) } static int -mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback); +mts_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *srb); static void mts_transfer_cleanup( struct urb *transfer ); static void mts_do_sg(struct urb * transfer); @@ -573,7 +573,7 @@ mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) static int -mts_scsi_queuecommand(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) +mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback) { struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); int err = 0; @@ -626,6 +626,8 @@ out: return err; } +static DEF_SCSI_QCMD(mts_scsi_queuecommand) + static struct scsi_host_template mts_scsi_host_template = { .module = THIS_MODULE, .name = "microtekX6", diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index 2f43c57743c..9251773ecef 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -196,11 +196,9 @@ static ssize_t get_port1_handler(struct device *dev, return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); } -static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO, - get_port0_handler, set_port0_handler); +static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler); -static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO, - get_port1_handler, set_port1_handler); +static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler); static int cypress_probe(struct usb_interface *interface, diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index 37566419877..c9078e4e1f4 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -553,6 +553,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd, /* needed for power consumption */ struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc; + memset(&info, 0, sizeof(info)); /* directly from the descriptor */ info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor); info.product = dev->product_id; diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 70d00e99a4b..dd573abd2d1 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3008,6 +3008,7 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #else x.sisusb_conactive = 0; #endif + memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved)); if (copy_to_user((void __user *)arg, &x, sizeof(x))) retval = -EFAULT; diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index d77aba46ae8..f63776a48e2 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c @@ -86,7 +86,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr, return count; } -static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed); +static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed); static int tv_probe(struct usb_interface *interface, const struct usb_device_id *id) diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c index 63da2c3c838..c96f51de169 100644 --- a/drivers/usb/misc/usbled.c +++ b/drivers/usb/misc/usbled.c @@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co change_color(led); \ return count; \ } \ -static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); +static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value); show_set(blue); show_set(red); show_set(green); diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index de8ef945b53..417b8f207e8 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c @@ -192,7 +192,7 @@ static ssize_t set_attr_##name(struct device *dev, \ \ return count; \ } \ -static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name); +static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name); static ssize_t show_attr_text(struct device *dev, struct device_attribute *attr, char *buf) @@ -223,7 +223,7 @@ static ssize_t set_attr_text(struct device *dev, return count; } -static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text); +static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text); static ssize_t show_attr_decimals(struct device *dev, struct device_attribute *attr, char *buf) @@ -272,8 +272,7 @@ static ssize_t set_attr_decimals(struct device *dev, return count; } -static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO, - show_attr_decimals, set_attr_decimals); +static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals); static ssize_t show_attr_textmode(struct device *dev, struct device_attribute *attr, char *buf) @@ -319,8 +318,7 @@ static ssize_t set_attr_textmode(struct device *dev, return -EINVAL; } -static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO, - show_attr_textmode, set_attr_textmode); +static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode); MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 796e2f68f74..4ff21587ab0 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -3,7 +3,7 @@ /* * uss720.c -- USS720 USB Parport Cable. * - * Copyright (C) 1999, 2005 + * Copyright (C) 1999, 2005, 2010 * Thomas Sailer (t.sailer@alumni.ethz.ch) * * This program is free software; you can redistribute it and/or modify @@ -776,6 +776,8 @@ static const struct usb_device_id uss720_table[] = { { USB_DEVICE(0x0557, 0x2001) }, { USB_DEVICE(0x0729, 0x1284) }, { USB_DEVICE(0x1293, 0x0002) }, + { USB_DEVICE(0x1293, 0x0002) }, + { USB_DEVICE(0x050d, 0x0002) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 719c6180b31..ac5bfd619e6 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -536,6 +536,7 @@ static const struct file_operations yurex_fops = { .open = yurex_open, .release = yurex_release, .fasync = yurex_fasync, + .llseek = default_llseek, }; diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 44cb37b5a4d..c436e1e2c3b 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -15,7 +15,6 @@ #include <linux/poll.h> #include <linux/compat.h> #include <linux/mm.h> -#include <linux/smp_lock.h> #include <linux/scatterlist.h> #include <linux/slab.h> diff --git a/drivers/usb/mon/mon_stat.c b/drivers/usb/mon/mon_stat.c index 8ec94f15a73..e5ce42bd316 100644 --- a/drivers/usb/mon/mon_stat.c +++ b/drivers/usb/mon/mon_stat.c @@ -11,7 +11,6 @@ #include <linux/slab.h> #include <linux/usb.h> #include <linux/fs.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include "usb_mon.h" diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index 611a9d27436..fcb5206a65b 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -171,8 +171,9 @@ static irqreturn_t blackfin_interrupt(int irq, void *__hci) } /* Start sampling ID pin, when plug is removed from MUSB */ - if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE - || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { + if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE + || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) || + (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) { mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); musb->a_wait_bcon = TIMER_DELAY; } @@ -323,30 +324,8 @@ int musb_platform_set_mode(struct musb *musb, u8 musb_mode) return -EIO; } -int __init musb_platform_init(struct musb *musb, void *board_data) +static void musb_platform_reg_init(struct musb *musb) { - - /* - * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE - * and OTG HOST modes, while rev 1.1 and greater require PE7 to - * be low for DEVICE mode and high for HOST mode. We set it high - * here because we are in host mode - */ - - if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { - printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n", - musb->config->gpio_vrsel); - return -ENODEV; - } - gpio_direction_output(musb->config->gpio_vrsel, 0); - - usb_nop_xceiv_register(); - musb->xceiv = otg_get_transceiver(); - if (!musb->xceiv) { - gpio_free(musb->config->gpio_vrsel); - return -ENODEV; - } - if (ANOMALY_05000346) { bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); SSYNC(); @@ -358,7 +337,8 @@ int __init musb_platform_init(struct musb *musb, void *board_data) } /* Configure PLL oscillator register */ - bfin_write_USB_PLLOSC_CTRL(0x30a8); + bfin_write_USB_PLLOSC_CTRL(0x3080 | + ((480/musb->config->clkin) << 1)); SSYNC(); bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); @@ -380,6 +360,33 @@ int __init musb_platform_init(struct musb *musb, void *board_data) EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); SSYNC(); +} + +int __init musb_platform_init(struct musb *musb, void *board_data) +{ + + /* + * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE + * and OTG HOST modes, while rev 1.1 and greater require PE7 to + * be low for DEVICE mode and high for HOST mode. We set it high + * here because we are in host mode + */ + + if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { + printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n", + musb->config->gpio_vrsel); + return -ENODEV; + } + gpio_direction_output(musb->config->gpio_vrsel, 0); + + usb_nop_xceiv_register(); + musb->xceiv = otg_get_transceiver(); + if (!musb->xceiv) { + gpio_free(musb->config->gpio_vrsel); + return -ENODEV; + } + + musb_platform_reg_init(musb); if (is_host_enabled(musb)) { musb->board_set_vbus = bfin_set_vbus; @@ -394,6 +401,27 @@ int __init musb_platform_init(struct musb *musb, void *board_data) return 0; } +#ifdef CONFIG_PM +void musb_platform_save_context(struct musb *musb, + struct musb_context_registers *musb_context) +{ + if (is_host_active(musb)) + /* + * During hibernate gpio_vrsel will change from high to low + * low which will generate wakeup event resume the system + * immediately. Set it to 0 before hibernate to avoid this + * wakeup event. + */ + gpio_set_value(musb->config->gpio_vrsel, 0); +} + +void musb_platform_restore_context(struct musb *musb, + struct musb_context_registers *musb_context) +{ + musb_platform_reg_init(musb); +} +#endif + int musb_platform_exit(struct musb *musb) { gpio_free(musb->config->gpio_vrsel); diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index c9f9024c551..99beebce855 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -552,7 +552,8 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, if (int_usb & MUSB_INTR_SESSREQ) { void __iomem *mbase = musb->mregs; - if (devctl & MUSB_DEVCTL_BDEVICE) { + if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS + && (devctl & MUSB_DEVCTL_BDEVICE)) { DBG(3, "SessReq while on B state\n"); return IRQ_HANDLED; } @@ -1052,6 +1053,11 @@ static void musb_shutdown(struct platform_device *pdev) clk_put(musb->clock); spin_unlock_irqrestore(&musb->lock, flags); + if (!is_otg_enabled(musb) && is_host_enabled(musb)) + usb_remove_hcd(musb_to_hcd(musb)); + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); + musb_platform_exit(musb); + /* FIXME power down */ } @@ -2110,12 +2116,15 @@ bad_config: * Otherwise, wait till the gadget driver hooks up. */ if (!is_otg_enabled(musb) && is_host_enabled(musb)) { + struct usb_hcd *hcd = musb_to_hcd(musb); + MUSB_HST_MODE(musb); musb->xceiv->default_a = 1; musb->xceiv->state = OTG_STATE_A_IDLE; status = usb_add_hcd(musb_to_hcd(musb), -1, 0); + hcd->self.uses_pio_for_control = 1; DBG(1, "%s mode, status %d, devctl %02x %c\n", "HOST", status, musb_readb(musb->mregs, MUSB_DEVCTL), @@ -2244,13 +2253,6 @@ static int __exit musb_remove(struct platform_device *pdev) */ musb_exit_debugfs(musb); musb_shutdown(pdev); -#ifdef CONFIG_USB_MUSB_HDRC_HCD - if (musb->board_mode == MUSB_HOST) - usb_remove_hcd(musb_to_hcd(musb)); -#endif - musb_writeb(musb->mregs, MUSB_DEVCTL, 0); - musb_platform_exit(musb); - musb_writeb(musb->mregs, MUSB_DEVCTL, 0); musb_free(musb); iounmap(ctrl_base); @@ -2411,9 +2413,6 @@ static int musb_suspend(struct device *dev) unsigned long flags; struct musb *musb = dev_to_musb(&pdev->dev); - if (!musb->clock) - return 0; - spin_lock_irqsave(&musb->lock, flags); if (is_peripheral_active(musb)) { @@ -2428,10 +2427,12 @@ static int musb_suspend(struct device *dev) musb_save_context(musb); - if (musb->set_clock) - musb->set_clock(musb->clock, 0); - else - clk_disable(musb->clock); + if (musb->clock) { + if (musb->set_clock) + musb->set_clock(musb->clock, 0); + else + clk_disable(musb->clock); + } spin_unlock_irqrestore(&musb->lock, flags); return 0; } @@ -2441,13 +2442,12 @@ static int musb_resume_noirq(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct musb *musb = dev_to_musb(&pdev->dev); - if (!musb->clock) - return 0; - - if (musb->set_clock) - musb->set_clock(musb->clock, 1); - else - clk_enable(musb->clock); + if (musb->clock) { + if (musb->set_clock) + musb->set_clock(musb->clock, 1); + else + clk_enable(musb->clock); + } musb_restore_context(musb); diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 69797e5b46a..febaabcc2b3 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -487,7 +487,7 @@ struct musb_context_registers { }; #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \ - defined(CONFIG_ARCH_OMAP4) + defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN) extern void musb_platform_save_context(struct musb *musb, struct musb_context_registers *musb_context); extern void musb_platform_restore_context(struct musb *musb, diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 5d815049cba..9d6ade82b9f 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -92,6 +92,59 @@ /* ----------------------------------------------------------------------- */ +/* Maps the buffer to dma */ + +static inline void map_dma_buffer(struct musb_request *request, + struct musb *musb) +{ + if (request->request.dma == DMA_ADDR_INVALID) { + request->request.dma = dma_map_single( + musb->controller, + request->request.buf, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->mapped = 1; + } else { + dma_sync_single_for_device(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->mapped = 0; + } +} + +/* Unmap the buffer from dma and maps it back to cpu */ +static inline void unmap_dma_buffer(struct musb_request *request, + struct musb *musb) +{ + if (request->request.dma == DMA_ADDR_INVALID) { + DBG(20, "not unmapping a never mapped buffer\n"); + return; + } + if (request->mapped) { + dma_unmap_single(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + request->request.dma = DMA_ADDR_INVALID; + request->mapped = 0; + } else { + dma_sync_single_for_cpu(musb->controller, + request->request.dma, + request->request.length, + request->tx + ? DMA_TO_DEVICE + : DMA_FROM_DEVICE); + + } +} + /* * Immediately complete a request. * @@ -119,24 +172,8 @@ __acquires(ep->musb->lock) ep->busy = 1; spin_unlock(&musb->lock); - if (is_dma_capable()) { - if (req->mapped) { - dma_unmap_single(musb->controller, - req->request.dma, - req->request.length, - req->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - req->request.dma = DMA_ADDR_INVALID; - req->mapped = 0; - } else if (req->request.dma != DMA_ADDR_INVALID) - dma_sync_single_for_cpu(musb->controller, - req->request.dma, - req->request.length, - req->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - } + if (is_dma_capable() && ep->dma) + unmap_dma_buffer(req, musb); if (request->status == 0) DBG(5, "%s done request %p, %d/%d\n", ep->end_point.name, request, @@ -395,6 +432,13 @@ static void txstate(struct musb *musb, struct musb_request *req) #endif if (!use_dma) { + /* + * Unmap the dma buffer back to cpu if dma channel + * programming fails + */ + if (is_dma_capable() && musb_ep->dma) + unmap_dma_buffer(req, musb); + musb_write_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); request->actual += fifo_count; @@ -644,10 +688,8 @@ static void rxstate(struct musb *musb, struct musb_request *req) */ csr |= MUSB_RXCSR_DMAENAB; - if (!musb_ep->hb_mult && - musb_ep->hw_ep->rx_double_buffered) - csr |= MUSB_RXCSR_AUTOCLEAR; #ifdef USE_MODE1 + csr |= MUSB_RXCSR_AUTOCLEAR; /* csr |= MUSB_RXCSR_DMAMODE; */ /* this special sequence (enabling and then @@ -656,6 +698,10 @@ static void rxstate(struct musb *musb, struct musb_request *req) */ musb_writew(epio, MUSB_RXCSR, csr | MUSB_RXCSR_DMAMODE); +#else + if (!musb_ep->hb_mult && + musb_ep->hw_ep->rx_double_buffered) + csr |= MUSB_RXCSR_AUTOCLEAR; #endif musb_writew(epio, MUSB_RXCSR, csr); @@ -711,6 +757,21 @@ static void rxstate(struct musb *musb, struct musb_request *req) return; } #endif + /* + * Unmap the dma buffer back to cpu if dma channel + * programming fails. This buffer is mapped if the + * channel allocation is successful + */ + if (is_dma_capable() && musb_ep->dma) { + unmap_dma_buffer(req, musb); + + /* + * Clear DMAENAB and AUTOCLEAR for the + * PIO mode transfer + */ + csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); + musb_writew(epio, MUSB_RXCSR, csr); + } musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) (request->buf + request->actual)); @@ -807,7 +868,7 @@ void musb_g_rx(struct musb *musb, u8 epnum) #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) /* Autoclear doesn't clear RxPktRdy for short packets */ - if ((dma->desired_mode == 0) + if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) || (dma->actual_len & (musb_ep->packet_sz - 1))) { /* ack the read! */ @@ -818,8 +879,16 @@ void musb_g_rx(struct musb *musb, u8 epnum) /* incomplete, and not short? wait for next IN packet */ if ((request->actual < request->length) && (musb_ep->dma->actual_len - == musb_ep->packet_sz)) + == musb_ep->packet_sz)) { + /* In double buffer case, continue to unload fifo if + * there is Rx packet in FIFO. + **/ + csr = musb_readw(epio, MUSB_RXCSR); + if ((csr & MUSB_RXCSR_RXPKTRDY) && + hw_ep->rx_double_buffered) + goto exit; return; + } #endif musb_g_giveback(musb_ep, request, 0); @@ -827,7 +896,9 @@ void musb_g_rx(struct musb *musb, u8 epnum) if (!request) return; } - +#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) +exit: +#endif /* Analyze request */ rxstate(musb, to_musb_request(request)); } @@ -916,13 +987,9 @@ static int musb_gadget_enable(struct usb_ep *ep, * likewise high bandwidth periodic tx */ /* Set TXMAXP with the FIFO size of the endpoint - * to disable double buffering mode. Currently, It seems that double - * buffering has problem if musb RTL revision number < 2.0. + * to disable double buffering mode. */ - if (musb->hwvers < MUSB_HWVERS_2000) - musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); - else - musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); + musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; if (musb_readw(regs, MUSB_TXCSR) @@ -958,10 +1025,7 @@ static int musb_gadget_enable(struct usb_ep *ep, /* Set RXMAXP with the FIFO size of the endpoint * to disable double buffering mode. */ - if (musb->hwvers < MUSB_HWVERS_2000) - musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx); - else - musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); + musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11)); /* force shared fifo to OUT-only mode */ if (hw_ep->is_shared_fifo) { @@ -1147,28 +1211,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, request->epnum = musb_ep->current_epnum; request->tx = musb_ep->is_in; - if (is_dma_capable() && musb_ep->dma) { - if (request->request.dma == DMA_ADDR_INVALID) { - request->request.dma = dma_map_single( - musb->controller, - request->request.buf, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - request->mapped = 1; - } else { - dma_sync_single_for_device(musb->controller, - request->request.dma, - request->request.length, - request->tx - ? DMA_TO_DEVICE - : DMA_FROM_DEVICE); - request->mapped = 0; - } - } else if (!req->buf) { - return -ENODATA; - } else + if (is_dma_capable() && musb_ep->dma) + map_dma_buffer(request, musb); + else request->mapped = 0; spin_lock_irqsave(&musb->lock, lockflags); @@ -1695,8 +1740,10 @@ int __init musb_gadget_setup(struct musb *musb) musb_platform_try_idle(musb, 0); status = device_register(&musb->g.dev); - if (status != 0) + if (status != 0) { + put_device(&musb->g.dev); the_gadget = NULL; + } return status; } @@ -1786,6 +1833,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, spin_unlock_irqrestore(&musb->lock, flags); if (is_otg_enabled(musb)) { + struct usb_hcd *hcd = musb_to_hcd(musb); + DBG(3, "OTG startup...\n"); /* REVISIT: funcall to other code, which also @@ -1800,6 +1849,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver, musb->gadget_driver = NULL; musb->g.dev.driver = NULL; spin_unlock_irqrestore(&musb->lock, flags); + } else { + hcd->self.uses_pio_for_control = 1; } } } diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 244267527a6..5a727c5b867 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h @@ -633,8 +633,9 @@ static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) return 0; } -static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) +static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum) { + return 0; } #endif /* CONFIG_BLACKFIN */ diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index 6f771af5cbd..563114d613d 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -158,6 +158,8 @@ static int dma_channel_program(struct dma_channel *channel, dma_addr_t dma_addr, u32 len) { struct musb_dma_channel *musb_channel = channel->private_data; + struct musb_dma_controller *controller = musb_channel->controller; + struct musb *musb = controller->private_data; DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n", musb_channel->epnum, @@ -167,6 +169,18 @@ static int dma_channel_program(struct dma_channel *channel, BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || channel->status == MUSB_DMA_STATUS_BUSY); + /* + * The DMA engine in RTL1.8 and above cannot handle + * DMA addresses that are not aligned to a 4 byte boundary. + * It ends up masking the last two bits of the address + * programmed in DMA_ADDR. + * + * Fail such DMA transfers, so that the backup PIO mode + * can carry out the transfer + */ + if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4)) + return false; + channel->actual_len = 0; musb_channel->start_addr = dma_addr; musb_channel->len = len; diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c index bdc3ea66be6..9fea48264fa 100644 --- a/drivers/usb/otg/langwell_otg.c +++ b/drivers/usb/otg/langwell_otg.c @@ -1896,7 +1896,7 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req); +static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req); static ssize_t get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) @@ -1942,8 +1942,7 @@ set_a_bus_drop(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO, - get_a_bus_drop, set_a_bus_drop); +static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop); static ssize_t get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) @@ -1988,7 +1987,7 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req); +static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req); static ssize_t set_a_clr_err(struct device *dev, struct device_attribute *attr, @@ -2012,7 +2011,7 @@ set_a_clr_err(struct device *dev, struct device_attribute *attr, } return count; } -static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err); +static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err); static struct attribute *inputs_attrs[] = { &dev_attr_a_bus_req.attr, diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 89a9a584780..2dec5001352 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -201,6 +201,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, + { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, @@ -696,6 +697,7 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, @@ -794,6 +796,9 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, + { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 7dfe02f1fb6..bf086728548 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -114,6 +114,9 @@ /* Lenz LI-USB Computer Interface. */ #define FTDI_LENZ_LIUSB_PID 0xD780 +/* Vardaan Enterprises Serial Interface VEUSB422R3 */ +#define FTDI_VARDAAN_PID 0xF070 + /* * Xsens Technologies BV products (http://www.xsens.com). */ @@ -721,6 +724,7 @@ */ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ +#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ /* * Bayer Ascensia Contour blood glucose meter USB-converter cable. @@ -1077,6 +1081,11 @@ #define MJSG_HD_RADIO_PID 0x937C /* + * D.O.Tec products (http://www.directout.eu) + */ +#define FTDI_DOTEC_PID 0x9868 + +/* * Xverve Signalyzer tools (http://www.signalyzer.com/) */ #define XVERVE_SIGNALYZER_ST_PID 0xBCA0 @@ -1100,3 +1109,10 @@ #define FTDI_SCIENCESCOPE_LOGBOOKML_PID 0xFF18 #define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID 0xFF1C #define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID 0xFF1D + +/* + * Milkymist One JTAG/Serial + */ +#define QIHARDWARE_VID 0x20B7 +#define MILKYMISTONE_JTAGSERIAL_PID 0x0713 + diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 2297fb1bcf6..ef2977d3a61 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -518,7 +518,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, - { USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index e64da74bdcc..6954de50c0f 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -21,7 +21,6 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> @@ -52,6 +51,7 @@ static struct usb_driver usb_serial_driver = { .suspend = usb_serial_suspend, .resume = usb_serial_resume, .no_dynamic_id = 1, + .supports_autosuspend = 1, }; /* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead @@ -1344,6 +1344,8 @@ int usb_serial_register(struct usb_serial_driver *driver) return -ENODEV; fixup_generic(driver); + if (driver->usb_driver) + driver->usb_driver->supports_autosuspend = 1; if (!driver->description) driver->description = driver->driver.name; diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index a688b1e686e..689ee1fb702 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -285,7 +285,7 @@ static int slave_configure(struct scsi_device *sdev) /* queue a command */ /* This is always called with scsi_lock(host) held */ -static int queuecommand(struct scsi_cmnd *srb, +static int queuecommand_lck(struct scsi_cmnd *srb, void (*done)(struct scsi_cmnd *)) { struct us_data *us = host_to_us(srb->device->host); @@ -315,6 +315,8 @@ static int queuecommand(struct scsi_cmnd *srb, return 0; } +static DEF_SCSI_QCMD(queuecommand) + /*********************************************************************** * Error handling functions ***********************************************************************/ diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c index 57fc2f532ca..ceba512f84d 100644 --- a/drivers/usb/storage/sierra_ms.c +++ b/drivers/usb/storage/sierra_ms.c @@ -121,7 +121,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, } return result; } -static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); +static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL); int sierra_ms_init(struct us_data *us) { diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 2054b1e25a6..339fac3949d 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp, iu->iu_id = IU_ID_COMMAND; iu->tag = cpu_to_be16(stream_id); - if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER)) - iu->prio_attr = UAS_ORDERED_TAG; - else - iu->prio_attr = UAS_SIMPLE_TAG; + iu->prio_attr = UAS_SIMPLE_TAG; iu->len = len; int_to_scsilun(sdev->lun, &iu->lun); memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); @@ -433,7 +430,7 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd, return 0; } -static int uas_queuecommand(struct scsi_cmnd *cmnd, +static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done)(struct scsi_cmnd *)) { struct scsi_device *sdev = cmnd->device; @@ -491,6 +488,8 @@ static int uas_queuecommand(struct scsi_cmnd *cmnd, return 0; } +static DEF_SCSI_QCMD(uas_queuecommand) + static int uas_eh_abort_handler(struct scsi_cmnd *cmnd) { struct scsi_device *sdev = cmnd->device; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 6ccdd3dd525..fcc1e32ce25 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -481,6 +481,13 @@ UNUSUAL_DEV( 0x04e8, 0x507c, 0x0220, 0x0220, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_MAX_SECTORS_64), +/* Reported by Vitaly Kuznetsov <vitty@altlinux.ru> */ +UNUSUAL_DEV( 0x04e8, 0x5122, 0x0000, 0x9999, + "Samsung", + "YP-CP3", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_MAX_SECTORS_64 | US_FL_BULK_IGNORE_TAG), + /* Entry and supporting patch by Theodore Kilgore <kilgota@auburn.edu>. * Device uses standards-violating 32-byte Bulk Command Block Wrappers and * reports itself as "Proprietary SCSI Bulk." Cf. device entry 0x084d:0x0011. diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c index 436e4f7110c..e45e673b877 100644 --- a/drivers/uwb/allocator.c +++ b/drivers/uwb/allocator.c @@ -326,7 +326,8 @@ int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *availab int bit_index; ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); - + if (!ai) + return UWB_RSV_ALLOC_NOT_FOUND; ai->min_mas = rsv->min_mas; ai->max_mas = rsv->max_mas; ai->max_interval = rsv->max_interval; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 4b4da5b86ff..f442668a1e5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -129,8 +129,9 @@ static void handle_tx(struct vhost_net *net) size_t hdr_size; struct socket *sock; - sock = rcu_dereference_check(vq->private_data, - lockdep_is_held(&vq->mutex)); + /* TODO: check that we are running from vhost_worker? + * Not sure it's worth it, it's straight-forward enough. */ + sock = rcu_dereference_check(vq->private_data, 1); if (!sock) return; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 94701ff3a23..159c77a5746 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -884,6 +884,7 @@ static int log_write(void __user *log_base, int r; if (!write_length) return 0; + write_length += write_address % VHOST_PAGE_SIZE; write_address /= VHOST_PAGE_SIZE; for (;;) { u64 base = (u64)(unsigned long)log_base; @@ -897,7 +898,7 @@ static int log_write(void __user *log_base, if (write_length <= VHOST_PAGE_SIZE) break; write_length -= VHOST_PAGE_SIZE; - write_address += VHOST_PAGE_SIZE; + write_address += 1; } return r; } diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index 3ec24609151..734c650a47c 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -502,8 +502,10 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct adp8860_bl *data = dev_get_drvdata(dev); + int ret = strict_strtoul(buf, 10, &data->cached_daylight_max); + if (ret) + return ret; - strict_strtoul(buf, 10, &data->cached_daylight_max); return adp8860_store(dev, buf, count, ADP8860_BLMX1); } static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show, @@ -614,7 +616,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev, if (val == 0) { /* Enable automatic ambient light sensing */ adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); - } else if ((val > 0) && (val < 6)) { + } else if ((val > 0) && (val <= 3)) { /* Disable automatic ambient light sensing */ adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN); @@ -622,7 +624,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev, mutex_lock(&data->lock); adp8860_read(data->client, ADP8860_CFGR, ®_val); reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT); - reg_val |= val << CFGR_BLV_SHIFT; + reg_val |= (val - 1) << CFGR_BLV_SHIFT; adp8860_write(data->client, ADP8860_CFGR, reg_val); mutex_unlock(&data->lock); } diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index e207810bba3..08703299ef6 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -197,12 +197,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state) { struct backlight_device *bd = to_backlight_device(dev); - if (bd->ops->options & BL_CORE_SUSPENDRESUME) { - mutex_lock(&bd->ops_lock); + mutex_lock(&bd->ops_lock); + if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state |= BL_CORE_SUSPENDED; backlight_update_status(bd); - mutex_unlock(&bd->ops_lock); } + mutex_unlock(&bd->ops_lock); return 0; } @@ -211,12 +211,12 @@ static int backlight_resume(struct device *dev) { struct backlight_device *bd = to_backlight_device(dev); - if (bd->ops->options & BL_CORE_SUSPENDRESUME) { - mutex_lock(&bd->ops_lock); + mutex_lock(&bd->ops_lock); + if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) { bd->props.state &= ~BL_CORE_SUSPENDED; backlight_update_status(bd); - mutex_unlock(&bd->ops_lock); } + mutex_unlock(&bd->ops_lock); return 0; } diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c index 9093ef0fa86..c67801e57aa 100644 --- a/drivers/video/backlight/l4f00242t03.c +++ b/drivers/video/backlight/l4f00242t03.c @@ -78,7 +78,7 @@ static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power) const u16 slpin = 0x10; const u16 disoff = 0x28; - if (power) { + if (power <= FB_BLANK_NORMAL) { if (priv->lcd_on) return 0; diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c index abc43a0eb97..5d3cf33953a 100644 --- a/drivers/video/backlight/lms283gf05.c +++ b/drivers/video/backlight/lms283gf05.c @@ -129,7 +129,7 @@ static int lms283gf05_power_set(struct lcd_device *ld, int power) struct spi_device *spi = st->spi; struct lms283gf05_pdata *pdata = spi->dev.platform_data; - if (power) { + if (power <= FB_BLANK_NORMAL) { if (pdata) lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted); diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 9fb533f6373..1485f7345f4 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c @@ -335,6 +335,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, .driver_data = (void *)&nvidia_chipset_data, }, + { + .callback = mbp_dmi_match, + .ident = "MacBookAir 3,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"), + }, + .driver_data = (void *)&nvidia_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookAir 3,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"), + }, + .driver_data = (void *)&nvidia_chipset_data, + }, { } }; diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 55044351889..21866ec6965 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -25,6 +25,7 @@ struct pwm_bl_data { struct pwm_device *pwm; struct device *dev; unsigned int period; + unsigned int lth_brightness; int (*notify)(struct device *, int brightness); }; @@ -48,7 +49,9 @@ static int pwm_backlight_update_status(struct backlight_device *bl) pwm_config(pb->pwm, 0, pb->period); pwm_disable(pb->pwm); } else { - pwm_config(pb->pwm, brightness * pb->period / max, pb->period); + brightness = pb->lth_brightness + + (brightness * (pb->period - pb->lth_brightness) / max); + pwm_config(pb->pwm, brightness, pb->period); pwm_enable(pb->pwm); } return 0; @@ -92,6 +95,8 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->period = data->pwm_period_ns; pb->notify = data->notify; + pb->lth_brightness = data->lth_brightness * + (data->pwm_period_ns / data->max_brightness); pb->dev = &pdev->dev; pb->pwm = pwm_request(data->pwm_id, "backlight"); diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c index a3128c9cb7a..5927db0da99 100644 --- a/drivers/video/backlight/s6e63m0.c +++ b/drivers/video/backlight/s6e63m0.c @@ -729,10 +729,10 @@ static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev, return strlen(buf); } -static DEVICE_ATTR(gamma_table, 0644, +static DEVICE_ATTR(gamma_table, 0444, s6e63m0_sysfs_show_gamma_table, NULL); -static int __init s6e63m0_probe(struct spi_device *spi) +static int __devinit s6e63m0_probe(struct spi_device *spi) { int ret = 0; struct s6e63m0 *lcd = NULL; @@ -829,6 +829,9 @@ static int __devexit s6e63m0_remove(struct spi_device *spi) struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev); s6e63m0_power(lcd, FB_BLANK_POWERDOWN); + device_remove_file(&spi->dev, &dev_attr_gamma_table); + device_remove_file(&spi->dev, &dev_attr_gamma_mode); + backlight_device_unregister(lcd->bd); lcd_device_unregister(lcd->ld); kfree(lcd); diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 54e32c51361..915448ec75b 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -47,7 +47,6 @@ #include <linux/ioport.h> #include <linux/init.h> #include <linux/screen_info.h> -#include <linux/smp_lock.h> #include <video/vga.h> #include <asm/io.h> diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index cad7d45c8ba..c265aed09e0 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -1029,10 +1029,6 @@ static int __init fb_probe(struct platform_device *device) goto err_release_pl_mem; } - ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par); - if (ret) - goto err_release_pl_mem; - /* Initialize par */ da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp; @@ -1060,7 +1056,7 @@ static int __init fb_probe(struct platform_device *device) ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); if (ret) - goto err_free_irq; + goto err_release_pl_mem; da8xx_fb_info->cmap.len = par->palette_sz; /* initialize var_screeninfo */ @@ -1088,8 +1084,13 @@ static int __init fb_probe(struct platform_device *device) goto err_cpu_freq; } #endif + + ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par); + if (ret) + goto irq_freq; return 0; +irq_freq: #ifdef CONFIG_CPU_FREQ err_cpu_freq: unregister_framebuffer(da8xx_fb_info); @@ -1098,9 +1099,6 @@ err_cpu_freq: err_dealloc_cmap: fb_dealloc_cmap(&da8xx_fb_info->cmap); -err_free_irq: - free_irq(par->irq, par); - err_release_pl_mem: dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base, par->p_palette_base); diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index f53b9f1d6ab..5c3960da755 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c @@ -80,6 +80,7 @@ static const struct fb_cmap default_16_colors = { * @cmap: frame buffer colormap structure * @len: length of @cmap * @transp: boolean, 1 if there is transparency, 0 otherwise + * @flags: flags for kmalloc memory allocation * * Allocates memory for a colormap @cmap. @len is the * number of entries in the palette. @@ -88,34 +89,48 @@ static const struct fb_cmap default_16_colors = { * */ -int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) +int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags) { - int size = len*sizeof(u16); - - if (cmap->len != len) { - fb_dealloc_cmap(cmap); - if (!len) - return 0; - if (!(cmap->red = kmalloc(size, GFP_ATOMIC))) - goto fail; - if (!(cmap->green = kmalloc(size, GFP_ATOMIC))) - goto fail; - if (!(cmap->blue = kmalloc(size, GFP_ATOMIC))) - goto fail; - if (transp) { - if (!(cmap->transp = kmalloc(size, GFP_ATOMIC))) + int size = len * sizeof(u16); + int ret = -ENOMEM; + + if (cmap->len != len) { + fb_dealloc_cmap(cmap); + if (!len) + return 0; + + cmap->red = kmalloc(size, flags); + if (!cmap->red) + goto fail; + cmap->green = kmalloc(size, flags); + if (!cmap->green) + goto fail; + cmap->blue = kmalloc(size, flags); + if (!cmap->blue) + goto fail; + if (transp) { + cmap->transp = kmalloc(size, flags); + if (!cmap->transp) + goto fail; + } else { + cmap->transp = NULL; + } + } + cmap->start = 0; + cmap->len = len; + ret = fb_copy_cmap(fb_default_cmap(len), cmap); + if (ret) goto fail; - } else - cmap->transp = NULL; - } - cmap->start = 0; - cmap->len = len; - fb_copy_cmap(fb_default_cmap(len), cmap); - return 0; + return 0; fail: - fb_dealloc_cmap(cmap); - return -ENOMEM; + fb_dealloc_cmap(cmap); + return ret; +} + +int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) +{ + return fb_alloc_cmap_gfp(cmap, len, transp, GFP_ATOMIC); } /** @@ -250,8 +265,12 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) int rc, size = cmap->len * sizeof(u16); struct fb_cmap umap; + if (size < 0 || size < cmap->len) + return -E2BIG; + memset(&umap, 0, sizeof(struct fb_cmap)); - rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL); + rc = fb_alloc_cmap_gfp(&umap, cmap->len, cmap->transp != NULL, + GFP_KERNEL); if (rc) return rc; if (copy_from_user(umap.red, cmap->red, size) || diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h index e4c4d89b786..be8ccb47ebe 100644 --- a/drivers/video/geode/lxfb.h +++ b/drivers/video/geode/lxfb.h @@ -22,6 +22,7 @@ #define DC_HFILT_COUNT 0x100 #define DC_VFILT_COUNT 0x100 #define VP_COEFF_SIZE 0x1000 +#define VP_PAL_COUNT 0x100 #define OUTPUT_CRT 0x01 #define OUTPUT_PANEL 0x02 @@ -48,7 +49,8 @@ struct lxfb_par { uint64_t vp[VP_REG_COUNT]; uint64_t fp[FP_REG_COUNT]; - uint32_t pal[DC_PAL_COUNT]; + uint32_t dc_pal[DC_PAL_COUNT]; + uint32_t vp_pal[VP_PAL_COUNT]; uint32_t hcoeff[DC_HFILT_COUNT * 2]; uint32_t vcoeff[DC_VFILT_COUNT]; uint32_t vp_coeff[VP_COEFF_SIZE / 4]; diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c index bc35a95e59d..79e9abc72b8 100644 --- a/drivers/video/geode/lxfb_ops.c +++ b/drivers/video/geode/lxfb_ops.c @@ -276,10 +276,10 @@ static void lx_graphics_enable(struct fb_info *info) write_fp(par, FP_PT1, 0); temp = FP_PT2_SCRC; - if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) + if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT)) temp |= FP_PT2_HSP; - if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) + if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT)) temp |= FP_PT2_VSP; write_fp(par, FP_PT2, temp); @@ -610,10 +610,15 @@ static void lx_save_regs(struct lxfb_par *par) memcpy(par->vp, par->vp_regs, sizeof(par->vp)); memcpy(par->fp, par->vp_regs + VP_FP_START, sizeof(par->fp)); - /* save the palette */ + /* save the display controller palette */ write_dc(par, DC_PAL_ADDRESS, 0); - for (i = 0; i < ARRAY_SIZE(par->pal); i++) - par->pal[i] = read_dc(par, DC_PAL_DATA); + for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++) + par->dc_pal[i] = read_dc(par, DC_PAL_DATA); + + /* save the video processor palette */ + write_vp(par, VP_PAR, 0); + for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++) + par->vp_pal[i] = read_vp(par, VP_PDR); /* save the horizontal filter coefficients */ filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL; @@ -706,8 +711,8 @@ static void lx_restore_display_ctlr(struct lxfb_par *par) /* restore the palette */ write_dc(par, DC_PAL_ADDRESS, 0); - for (i = 0; i < ARRAY_SIZE(par->pal); i++) - write_dc(par, DC_PAL_DATA, par->pal[i]); + for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++) + write_dc(par, DC_PAL_DATA, par->dc_pal[i]); /* restore the horizontal filter coefficients */ filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL; @@ -751,6 +756,11 @@ static void lx_restore_video_proc(struct lxfb_par *par) } } + /* restore video processor palette */ + write_vp(par, VP_PAR, 0); + for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++) + write_vp(par, VP_PDR, par->vp_pal[i]); + /* restore video coeff ram */ memcpy(par->vp_regs + VP_VCR, par->vp_coeff, sizeof(par->vp_coeff)); } diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c index 0a4dbdc1693..de450c1fb86 100644 --- a/drivers/video/modedb.c +++ b/drivers/video/modedb.c @@ -855,6 +855,7 @@ const struct fb_videomode *fb_find_nearest_mode(const struct fb_videomode *mode, abs(cmode->yres - mode->yres); if (diff > d) { diff = d; + diff_refresh = abs(cmode->refresh - mode->refresh); best = cmode; } else if (diff == d) { d = abs(cmode->refresh - mode->refresh); diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index 7cfc170bce1..ca0f6be9d12 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -27,6 +27,7 @@ #include <linux/clk.h> #include <linux/mutex.h> +#include <mach/dma.h> #include <mach/hardware.h> #include <mach/ipu.h> #include <mach/mx3fb.h> @@ -1420,6 +1421,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg) struct device *dev; struct mx3fb_platform_data *mx3fb_pdata; + if (!imx_dma_is_ipu(chan)) + return false; + if (!rq) return false; diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig index 455c6055325..083c8fe53e2 100644 --- a/drivers/video/omap/Kconfig +++ b/drivers/video/omap/Kconfig @@ -1,7 +1,7 @@ config FB_OMAP tristate "OMAP frame buffer support (EXPERIMENTAL)" - depends on FB && ARCH_OMAP && (OMAP2_DSS = "n") - + depends on FB && (OMAP2_DSS = "n") + depends on ARCH_OMAP1 || ARCH_OMAP2 || ARCH_OMAP3 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c index fed2a72bc6b..9441e2eb3de 100644 --- a/drivers/video/omap2/vram.c +++ b/drivers/video/omap2/vram.c @@ -551,12 +551,18 @@ void __init omap_vram_reserve_sdram_memblock(void) if (!size) return; - size = PAGE_ALIGN(size); + size = ALIGN(size, SZ_2M); if (paddr) { - if ((paddr & ~PAGE_MASK) || - !memblock_is_region_memory(paddr, size)) { - pr_err("Illegal SDRAM region for VRAM\n"); + if (paddr & ~PAGE_MASK) { + pr_err("VRAM start address 0x%08x not page aligned\n", + paddr); + return; + } + + if (!memblock_is_region_memory(paddr, size)) { + pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n", + paddr, paddr + size - 1); return; } @@ -570,9 +576,12 @@ void __init omap_vram_reserve_sdram_memblock(void) return; } } else { - paddr = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_REAL_LIMIT); + paddr = memblock_alloc(size, SZ_2M); } + memblock_free(paddr, size); + memblock_remove(paddr, size); + omap_vram_add_region(paddr, size); pr_info("Reserving %u bytes SDRAM for VRAM\n", size); diff --git a/drivers/video/riva/rivafb-i2c.c b/drivers/video/riva/rivafb-i2c.c index a0e22ac483a..167400e2a18 100644 --- a/drivers/video/riva/rivafb-i2c.c +++ b/drivers/video/riva/rivafb-i2c.c @@ -94,7 +94,6 @@ static int __devinit riva_setup_i2c_bus(struct riva_i2c_chan *chan, strcpy(chan->adapter.name, name); chan->adapter.owner = THIS_MODULE; - chan->adapter.id = I2C_HW_B_RIVA; chan->adapter.class = i2c_class; chan->adapter.algo_data = &chan->algo; chan->adapter.dev.parent = &chan->par->pdev->dev; diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c index 55b3077ff6f..d7df10315d8 100644 --- a/drivers/video/sh_mobile_hdmi.c +++ b/drivers/video/sh_mobile_hdmi.c @@ -1071,6 +1071,10 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) if (!hdmi->info) goto out; + hdmi->monspec.modedb_len = 0; + fb_destroy_modedb(hdmi->monspec.modedb); + hdmi->monspec.modedb = NULL; + acquire_console_sem(); /* HDMI disconnect */ @@ -1078,7 +1082,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) release_console_sem(); pm_runtime_put(hdmi->dev); - fb_destroy_modedb(hdmi->monspec.modedb); } out: diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 50963739a40..b02d97a879d 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -115,15 +115,16 @@ static const struct fb_videomode default_720p = { .xres = 1280, .yres = 720, - .left_margin = 200, - .right_margin = 88, - .hsync_len = 48, + .left_margin = 220, + .right_margin = 110, + .hsync_len = 40, .upper_margin = 20, .lower_margin = 5, .vsync_len = 5, .pixclock = 13468, + .refresh = 60, .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_HOR_HIGH_ACT, }; @@ -859,7 +860,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) /* Couldn't reconfigure, hopefully, can continue as before */ return; - info->fix.line_length = mode2.xres * (ch->cfg.bpp / 8); + info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8); /* * fb_set_var() calls the notifier change internally, only if @@ -867,7 +868,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) * user event, we have to call the chain ourselves. */ event.info = info; - event.data = &mode2; + event.data = &mode1; fb_notifier_call_chain(evnt, &event); } @@ -1197,6 +1198,7 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) const struct fb_videomode *mode = cfg->lcd_cfg; unsigned long max_size = 0; int k; + int num_cfg; ch->info = framebuffer_alloc(0, &pdev->dev); if (!ch->info) { @@ -1232,8 +1234,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) info->fix = sh_mobile_lcdc_fix; info->fix.smem_len = max_size * (cfg->bpp / 8) * 2; - if (!mode) + if (!mode) { mode = &default_720p; + num_cfg = 1; + } else { + num_cfg = ch->cfg.num_cfg; + } + + fb_videomode_to_modelist(mode, num_cfg, &info->modelist); fb_videomode_to_var(var, mode); /* Default Y virtual resolution is 2x panel size */ @@ -1281,10 +1289,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) for (i = 0; i < j; i++) { struct sh_mobile_lcdc_chan *ch = priv->ch + i; - const struct fb_videomode *mode = ch->cfg.lcd_cfg; - - if (!mode) - mode = &default_720p; info = ch->info; @@ -1297,7 +1301,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) } } - fb_videomode_to_modelist(mode, ch->cfg.num_cfg, &info->modelist); error = register_framebuffer(info); if (error < 0) goto err1; diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c index c311ad3c368..31137adc8fb 100644 --- a/drivers/video/sis/init.c +++ b/drivers/video/sis/init.c @@ -62,11 +62,11 @@ #include "init.h" -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 #include "300vtbl.h" #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 #include "310vtbl.h" #endif @@ -78,7 +78,7 @@ /* POINTER INITIALIZATION */ /*********************************************/ -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void InitCommonPointer(struct SiS_Private *SiS_Pr) { @@ -160,7 +160,7 @@ InitCommonPointer(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void InitTo300Pointer(struct SiS_Private *SiS_Pr) { @@ -237,7 +237,7 @@ InitTo300Pointer(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void InitTo310Pointer(struct SiS_Private *SiS_Pr) { @@ -321,13 +321,13 @@ bool SiSInitPtr(struct SiS_Private *SiS_Pr) { if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 InitTo300Pointer(SiS_Pr); #else return false; #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 InitTo310Pointer(SiS_Pr); #else return false; @@ -340,9 +340,7 @@ SiSInitPtr(struct SiS_Private *SiS_Pr) /* HELPER: Get ModeID */ /*********************************************/ -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, bool FSTN, int LCDwidth, int LCDheight) @@ -884,51 +882,51 @@ SiS_GetModeID_VGA2(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDispl void SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data) { - OutPortByte(port, index); - OutPortByte(port + 1, data); + outb((u8)index, port); + outb((u8)data, port + 1); } void SiS_SetRegByte(SISIOADDRESS port, unsigned short data) { - OutPortByte(port, data); + outb((u8)data, port); } void SiS_SetRegShort(SISIOADDRESS port, unsigned short data) { - OutPortWord(port, data); + outw((u16)data, port); } void SiS_SetRegLong(SISIOADDRESS port, unsigned int data) { - OutPortLong(port, data); + outl((u32)data, port); } unsigned char SiS_GetReg(SISIOADDRESS port, unsigned short index) { - OutPortByte(port, index); - return(InPortByte(port + 1)); + outb((u8)index, port); + return inb(port + 1); } unsigned char SiS_GetRegByte(SISIOADDRESS port) { - return(InPortByte(port)); + return inb(port); } unsigned short SiS_GetRegShort(SISIOADDRESS port) { - return(InPortWord(port)); + return inw(port); } unsigned int SiS_GetRegLong(SISIOADDRESS port) { - return(InPortLong(port)); + return inl(port); } void @@ -1089,7 +1087,7 @@ static void SiSInitPCIetc(struct SiS_Private *SiS_Pr) { switch(SiS_Pr->ChipType) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case SIS_300: case SIS_540: case SIS_630: @@ -1108,7 +1106,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); break; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 case SIS_315H: case SIS_315: case SIS_315PRO: @@ -1152,9 +1150,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr) /* HELPER: SetLVDSetc */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static -#endif void SiSSetLVDSetc(struct SiS_Private *SiS_Pr) { @@ -1174,7 +1170,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) if((temp == 1) || (temp == 2)) return; switch(SiS_Pr->ChipType) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case SIS_540: case SIS_630: case SIS_730: @@ -1188,7 +1184,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr) } break; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 case SIS_550: case SIS_650: case SIS_740: @@ -1420,9 +1416,7 @@ SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr) /* HELPER: GetVBType */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_GetVBType(struct SiS_Private *SiS_Pr) { @@ -1487,7 +1481,6 @@ SiS_GetVBType(struct SiS_Private *SiS_Pr) /* HELPER: Check RAM size */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static bool SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) @@ -1501,13 +1494,12 @@ SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, if(AdapterMemSize < memorysize) return false; return true; } -#endif /*********************************************/ /* HELPER: Get DRAM type */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned char SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) { @@ -1574,7 +1566,6 @@ SiS_GetMCLK(struct SiS_Private *SiS_Pr) /* HELPER: ClearBuffer */ /*********************************************/ -#ifdef SIS_LINUX_KERNEL static void SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { @@ -1587,7 +1578,7 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_ModeType >= ModeEGA) { if(ModeNo > 0x13) { - SiS_SetMemory(memaddr, memsize, 0); + memset_io(memaddr, 0, memsize); } else { pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); @@ -1596,10 +1587,9 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); } else { - SiS_SetMemory(memaddr, 0x8000, 0); + memset_io(memaddr, 0, 0x8000); } } -#endif /*********************************************/ /* HELPER: SearchModeID */ @@ -2132,7 +2122,7 @@ SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_20) { SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); if(!(temp = crt1data[5] & 0x1f)) { @@ -2215,7 +2205,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); if(SiS_Pr->ChipType == XGI_20) { unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); @@ -2236,7 +2226,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo, /* FIFO */ /*********************************************/ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, unsigned short *idx2) @@ -2506,11 +2496,7 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); /* Write foreground and background queue */ -#ifdef SIS_LINUX_KERNEL templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); -#else - templ = pciReadLong(0x00000000, 0x50); -#endif if(SiS_Pr->ChipType == SIS_730) { @@ -2530,13 +2516,8 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } -#ifdef SIS_LINUX_KERNEL sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); -#else - pciWriteLong(0x00000000, 0x50, templ); - templ = pciReadLong(0x00000000, 0xA0); -#endif /* GUI grant timer (PCI config 0xA3) */ if(SiS_Pr->ChipType == SIS_730) { @@ -2552,15 +2533,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } -#ifdef SIS_LINUX_KERNEL sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); -#else - pciWriteLong(0x00000000, 0xA0, templ); -#endif } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) { @@ -2612,7 +2589,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(VCLK > 150) data |= 0x80; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); @@ -2621,7 +2598,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); #endif } else if(SiS_Pr->ChipType < XGI_20) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(VCLK >= 166) data |= 0x0c; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); @@ -2630,7 +2607,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(VCLK >= 200) data |= 0x0c; if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); @@ -2675,7 +2652,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI) { unsigned short data, infoflag = 0, modeflag, resindex; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short data2, data3; #endif @@ -2736,7 +2713,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); } @@ -2826,7 +2803,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || (SiS_Pr->ChipType == XGI_40)) { if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { @@ -2845,7 +2822,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, #endif } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_SetupDualChip(struct SiS_Private *SiS_Pr) { @@ -2999,11 +2976,6 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho SiS_Pr->SiS_SelectCRT2Rate = 0; SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); -#ifdef SIS_XORG_XF86 - xf86DrvMsgVerb(0, X_PROBED, 4, "(init: VBType=0x%04x, VBInfo=0x%04x)\n", - SiS_Pr->SiS_VBType, SiS_Pr->SiS_VBInfo); -#endif - if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; @@ -3028,7 +3000,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho } switch(SiS_Pr->ChipType) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case SIS_300: SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); break; @@ -3039,7 +3011,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho break; #endif default: -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_20) { unsigned char sr2b = 0, sr2c = 0; switch(ModeNo) { @@ -3062,7 +3034,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == XGI_40) { SiS_SetupDualChip(SiS_Pr); } @@ -3070,11 +3042,9 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); -#ifdef SIS_LINUX_KERNEL if(SiS_Pr->SiS_flag_clearbuffer) { SiS_ClearBuffer(SiS_Pr, ModeNo); } -#endif if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { SiS_WaitRetrace1(SiS_Pr); @@ -3104,7 +3074,7 @@ SiS_InitVB(struct SiS_Private *SiS_Pr) static void SiS_ResetVB(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short temp; @@ -3139,7 +3109,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) * which locks CRT2 in some way to CRT1 timing. Disable * this here. */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if((IS_SIS651) || (IS_SISM650) || SiS_Pr->ChipType == SIS_340 || SiS_Pr->ChipType == XGI_40) { @@ -3160,7 +3130,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr) static void SiS_Handle760(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned int somebase; unsigned char temp1, temp2, temp3; @@ -3170,11 +3140,7 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) return; -#ifdef SIS_LINUX_KERNEL somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); -#else - somebase = pciReadWord(0x00001000, 0x74); -#endif somebase &= 0xffff; if(somebase == 0) return; @@ -3190,105 +3156,34 @@ SiS_Handle760(struct SiS_Private *SiS_Pr) temp2 = 0x0b; } -#ifdef SIS_LINUX_KERNEL sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); -#else - pciWriteByte(0x00000000, 0x7e, temp1); - pciWriteByte(0x00000000, 0x8d, temp2); -#endif SiS_SetRegByte((somebase + 0x85), temp3); #endif } /*********************************************/ -/* X.org/XFree86: SET SCREEN PITCH */ -/*********************************************/ - -#ifdef SIS_XORG_XF86 -static void -SiS_SetPitchCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) -{ - SISPtr pSiS = SISPTR(pScrn); - unsigned short HDisplay = pSiS->scrnPitch >> 3; - - SiS_SetReg(SiS_Pr->SiS_P3d4,0x13,(HDisplay & 0xFF)); - SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0E,0xF0,(HDisplay >> 8)); -} - -static void -SiS_SetPitchCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) -{ - SISPtr pSiS = SISPTR(pScrn); - unsigned short HDisplay = pSiS->scrnPitch2 >> 3; - - /* Unlock CRT2 */ - if(pSiS->VGAEngine == SIS_315_VGA) - SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2F, 0x01); - else - SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24, 0x01); - - SiS_SetReg(SiS_Pr->SiS_Part1Port,0x07,(HDisplay & 0xFF)); - SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x09,0xF0,(HDisplay >> 8)); -} - -static void -SiS_SetPitch(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn) -{ - SISPtr pSiS = SISPTR(pScrn); - bool isslavemode = false; - - if( (pSiS->VBFlags2 & VB2_VIDEOBRIDGE) && - ( ((pSiS->VGAEngine == SIS_300_VGA) && - (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0xa0) == 0x20) || - ((pSiS->VGAEngine == SIS_315_VGA) && - (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x50) == 0x10) ) ) { - isslavemode = true; - } - - /* We need to set pitch for CRT1 if bridge is in slave mode, too */ - if((pSiS->VBFlags & DISPTYPE_DISP1) || (isslavemode)) { - SiS_SetPitchCRT1(SiS_Pr, pScrn); - } - /* We must not set the pitch for CRT2 if bridge is in slave mode */ - if((pSiS->VBFlags & DISPTYPE_DISP2) && (!isslavemode)) { - SiS_SetPitchCRT2(SiS_Pr, pScrn); - } -} -#endif - -/*********************************************/ /* SiSSetMode() */ /*********************************************/ -#ifdef SIS_XORG_XF86 -/* We need pScrn for setting the pitch correctly */ -bool -SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, bool dosetpitch) -#else bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) -#endif { SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; unsigned short RealModeNo, ModeIdIndex; unsigned char backupreg = 0; -#ifdef SIS_LINUX_KERNEL unsigned short KeepLockReg; SiS_Pr->UseCustomMode = false; SiS_Pr->CRT1UsesCustomMode = false; -#endif SiS_Pr->SiS_flag_clearbuffer = 0; if(SiS_Pr->UseCustomMode) { ModeNo = 0xfe; } else { -#ifdef SIS_LINUX_KERNEL if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; -#endif ModeNo &= 0x7f; } @@ -3301,13 +3196,8 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_GetSysFlags(SiS_Pr); SiS_Pr->SiS_VGAINFO = 0x11; -#if defined(SIS_XORG_XF86) && (defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__)) - if(pScrn) SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); -#endif -#ifdef SIS_LINUX_KERNEL KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); -#endif SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); SiSInitPCIetc(SiS_Pr); @@ -3344,12 +3234,10 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetLowModeTest(SiS_Pr, ModeNo); -#ifdef SIS_LINUX_KERNEL /* Check memory size (kernel framebuffer driver only) */ if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { return false; } -#endif SiS_OpenCRTC(SiS_Pr); @@ -3384,7 +3272,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_DisplayOn(SiS_Pr); SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(!(SiS_IsDualEdge(SiS_Pr))) { @@ -3396,7 +3284,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(!SiS_Pr->SiS_ROMNew) { if(SiS_IsVAMode(SiS_Pr)) { SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); @@ -3424,424 +3312,16 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) } } -#ifdef SIS_XORG_XF86 - if(pScrn) { - /* SetPitch: Adapt to virtual size & position */ - if((ModeNo > 0x13) && (dosetpitch)) { - SiS_SetPitch(SiS_Pr, pScrn); - } - - /* Backup/Set ModeNo in BIOS scratch area */ - SiS_GetSetModeID(pScrn, ModeNo); - } -#endif - SiS_CloseCRTC(SiS_Pr); SiS_Handle760(SiS_Pr); -#ifdef SIS_LINUX_KERNEL /* We never lock registers in XF86 */ if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); -#endif return true; } -/*********************************************/ -/* X.org/XFree86: SiSBIOSSetMode() */ -/* for non-Dual-Head mode */ -/*********************************************/ - -#ifdef SIS_XORG_XF86 -bool -SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom) -{ - SISPtr pSiS = SISPTR(pScrn); - unsigned short ModeNo = 0; - - SiS_Pr->UseCustomMode = false; - - if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting custom mode %dx%d\n", - SiS_Pr->CHDisplay, - (mode->Flags & V_INTERLACE ? SiS_Pr->CVDisplay * 2 : - (mode->Flags & V_DBLSCAN ? SiS_Pr->CVDisplay / 2 : - SiS_Pr->CVDisplay))); - - } else { - - /* Don't need vbflags here; checks done earlier */ - ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); - if(!ModeNo) return false; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting standard mode 0x%x\n", ModeNo); - - } - - return(SiSSetMode(SiS_Pr, pScrn, ModeNo, true)); -} - -/*********************************************/ -/* X.org/XFree86: SiSBIOSSetModeCRT2() */ -/* for Dual-Head modes */ -/*********************************************/ - -bool -SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom) -{ - SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; - SISPtr pSiS = SISPTR(pScrn); -#ifdef SISDUALHEAD - SISEntPtr pSiSEnt = pSiS->entityPrivate; -#endif - unsigned short ModeIdIndex; - unsigned short ModeNo = 0; - unsigned char backupreg = 0; - - SiS_Pr->UseCustomMode = false; - - /* Remember: Custom modes for CRT2 are ONLY supported - * -) on the 30x/B/C, and - * -) if CRT2 is LCD or VGA, or CRT1 is LCDA - */ - - if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { - - ModeNo = 0xfe; - - } else { - - ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags); - if(!ModeNo) return false; - - } - - SiSRegInit(SiS_Pr, BaseAddr); - SiSInitPtr(SiS_Pr); - SiS_GetSysFlags(SiS_Pr); -#if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) - SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); -#else - SiS_Pr->SiS_VGAINFO = 0x11; -#endif - - SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); - - SiSInitPCIetc(SiS_Pr); - SiSSetLVDSetc(SiS_Pr); - SiSDetermineROMUsage(SiS_Pr); - - /* Save mode info so we can set it from within SetMode for CRT1 */ -#ifdef SISDUALHEAD - if(pSiS->DualHeadMode) { - pSiSEnt->CRT2ModeNo = ModeNo; - pSiSEnt->CRT2DMode = mode; - pSiSEnt->CRT2IsCustom = IsCustom; - pSiSEnt->CRT2CR30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); - pSiSEnt->CRT2CR31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); - pSiSEnt->CRT2CR35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - pSiSEnt->CRT2CR38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); -#if 0 - /* We can't set CRT2 mode before CRT1 mode is set - says who...? */ - if(pSiSEnt->CRT1ModeNo == -1) { - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting CRT2 mode delayed until after setting CRT1 mode\n"); - return true; - } -#endif - pSiSEnt->CRT2ModeSet = true; - } -#endif - - if(SiS_Pr->UseCustomMode) { - - unsigned short temptemp = SiS_Pr->CVDisplay; - - if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; - else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting custom mode %dx%d on CRT2\n", - SiS_Pr->CHDisplay, temptemp); - - } else { - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting standard mode 0x%x on CRT2\n", ModeNo); - - } - - SiS_UnLockCRT2(SiS_Pr); - - if(!SiS_Pr->UseCustomMode) { - if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; - } else { - ModeIdIndex = 0; - } - - SiS_GetVBType(SiS_Pr); - - SiS_InitVB(SiS_Pr); - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - SiS_ResetVB(SiS_Pr); - SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x32,0x10); - SiS_SetRegOR(SiS_Pr->SiS_Part2Port,0x00,0x0c); - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); - } else { - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - } - } - - /* Get VB information (connectors, connected devices) */ - if(!SiS_Pr->UseCustomMode) { - SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 1); - } else { - /* If this is a custom mode, we don't check the modeflag for CRT2Mode */ - SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); - } - SiS_SetYPbPr(SiS_Pr); - SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); - SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); - SiS_SetLowModeTest(SiS_Pr, ModeNo); - - SiS_ResetSegmentRegisters(SiS_Pr); - - /* Set mode on CRT2 */ - if( (SiS_Pr->SiS_VBType & VB_SISVB) || - (SiS_Pr->SiS_IF_DEF_LVDS == 1) || - (SiS_Pr->SiS_IF_DEF_CH70xx != 0) || - (SiS_Pr->SiS_IF_DEF_TRUMPION != 0) ) { - SiS_SetCRT2Group(SiS_Pr, ModeNo); - } - - SiS_StrangeStuff(SiS_Pr); - - SiS_DisplayOn(SiS_Pr); - SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); - - if(SiS_Pr->ChipType >= SIS_315H) { - if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { - if(!(SiS_IsDualEdge(SiS_Pr))) { - SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x13,0xfb); - } - } - } - - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - if(!SiS_Pr->SiS_ROMNew) { - if(SiS_IsVAMode(SiS_Pr)) { - SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); - } else { - SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x35,0xFE); - } - } - - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); - - if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x30) & SetCRT2ToLCD) { - SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x38,0xfc); - } - } else if((SiS_Pr->ChipType == SIS_630) || - (SiS_Pr->ChipType == SIS_730)) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); - } - } - - /* SetPitch: Adapt to virtual size & position */ - SiS_SetPitchCRT2(SiS_Pr, pScrn); - - SiS_Handle760(SiS_Pr); - - return true; -} - -/*********************************************/ -/* X.org/XFree86: SiSBIOSSetModeCRT1() */ -/* for Dual-Head modes */ -/*********************************************/ - -bool -SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom) -{ - SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; - SISPtr pSiS = SISPTR(pScrn); - unsigned short ModeIdIndex, ModeNo = 0; - unsigned char backupreg = 0; -#ifdef SISDUALHEAD - SISEntPtr pSiSEnt = pSiS->entityPrivate; - unsigned char backupcr30, backupcr31, backupcr38, backupcr35, backupp40d=0; - bool backupcustom; -#endif - - SiS_Pr->UseCustomMode = false; - - if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) { - - unsigned short temptemp = SiS_Pr->CVDisplay; - - if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1; - else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting custom mode %dx%d on CRT1\n", - SiS_Pr->CHDisplay, temptemp); - ModeNo = 0xfe; - - } else { - - ModeNo = SiS_GetModeNumber(pScrn, mode, 0); /* don't give VBFlags */ - if(!ModeNo) return false; - - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "Setting standard mode 0x%x on CRT1\n", ModeNo); - } - - SiSInitPtr(SiS_Pr); - SiSRegInit(SiS_Pr, BaseAddr); - SiS_GetSysFlags(SiS_Pr); -#if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__) - SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff); -#else - SiS_Pr->SiS_VGAINFO = 0x11; -#endif - - SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); - - SiSInitPCIetc(SiS_Pr); - SiSSetLVDSetc(SiS_Pr); - SiSDetermineROMUsage(SiS_Pr); - - SiS_UnLockCRT2(SiS_Pr); - - if(!SiS_Pr->UseCustomMode) { - if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false; - } else { - ModeIdIndex = 0; - } - - /* Determine VBType */ - SiS_GetVBType(SiS_Pr); - - SiS_InitVB(SiS_Pr); - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); - } else { - backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - } - } - - /* Get VB information (connectors, connected devices) */ - /* (We don't care if the current mode is a CRT2 mode) */ - SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0); - SiS_SetYPbPr(SiS_Pr); - SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex); - SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); - SiS_SetLowModeTest(SiS_Pr, ModeNo); - - SiS_OpenCRTC(SiS_Pr); - - /* Set mode on CRT1 */ - SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex); - if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { - SiS_SetCRT2Group(SiS_Pr, ModeNo); - } - - /* SetPitch: Adapt to virtual size & position */ - SiS_SetPitchCRT1(SiS_Pr, pScrn); - - SiS_HandleCRT1(SiS_Pr); - - SiS_StrangeStuff(SiS_Pr); - - SiS_CloseCRTC(SiS_Pr); - -#ifdef SISDUALHEAD - if(pSiS->DualHeadMode) { - pSiSEnt->CRT1ModeNo = ModeNo; - pSiSEnt->CRT1DMode = mode; - } -#endif - - if(SiS_Pr->UseCustomMode) { - SiS_Pr->CRT1UsesCustomMode = true; - SiS_Pr->CSRClock_CRT1 = SiS_Pr->CSRClock; - SiS_Pr->CModeFlag_CRT1 = SiS_Pr->CModeFlag; - } else { - SiS_Pr->CRT1UsesCustomMode = false; - } - - /* Reset CRT2 if changing mode on CRT1 */ -#ifdef SISDUALHEAD - if(pSiS->DualHeadMode) { - if(pSiSEnt->CRT2ModeNo != -1) { - xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, - "(Re-)Setting mode for CRT2\n"); - backupcustom = SiS_Pr->UseCustomMode; - backupcr30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30); - backupcr31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31); - backupcr35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35); - backupcr38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38); - if(SiS_Pr->SiS_VBType & VB_SISVB) { - /* Backup LUT-enable */ - if(pSiSEnt->CRT2ModeSet) { - backupp40d = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x0d) & 0x08; - } - } - if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,pSiSEnt->CRT2CR30); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,pSiSEnt->CRT2CR31); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,pSiSEnt->CRT2CR35); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,pSiSEnt->CRT2CR38); - } - - SiSBIOSSetModeCRT2(SiS_Pr, pSiSEnt->pScrn_1, - pSiSEnt->CRT2DMode, pSiSEnt->CRT2IsCustom); - - SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,backupcr30); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,backupcr31); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupcr35); - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupcr38); - if(SiS_Pr->SiS_VBType & VB_SISVB) { - SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x0d, ~0x08, backupp40d); - } - SiS_Pr->UseCustomMode = backupcustom; - } - } -#endif - - /* Warning: From here, the custom mode entries in SiS_Pr are - * possibly overwritten - */ - - SiS_DisplayOn(SiS_Pr); - SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); - - if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { - if(SiS_Pr->ChipType >= SIS_315H) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg); - } else if((SiS_Pr->ChipType == SIS_630) || - (SiS_Pr->ChipType == SIS_730)) { - SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg); - } - } - - SiS_Handle760(SiS_Pr); - - /* Backup/Set ModeNo in BIOS scratch area */ - SiS_GetSetModeID(pScrn,ModeNo); - - return true; -} -#endif /* Linux_XF86 */ - #ifndef GETBITSTR #define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) #define GENMASK(mask) BITMASK(1?mask,0?mask) @@ -3927,7 +3407,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 tempbx = SiS_Pr->SiS_VGAHT; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempbx = SiS_Pr->PanelHT; @@ -3936,7 +3416,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, remaining = tempbx % 8; #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* OK for LCDA, LVDS */ tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ @@ -3950,7 +3430,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; @@ -3982,7 +3462,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 tempax = VGAHDE; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempbx = SiS_Pr->PanelXRes; @@ -4001,7 +3481,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { tempax = SiS_Pr->PanelYRes; } else if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 /* Stupid hack for 640x400/320x200 */ if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { if((tempax + tempbx) == 438) tempbx += 16; @@ -4054,36 +3534,12 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, if(modeflag & DoubleScanMode) tempax |= 0x80; SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", - SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, - SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, - SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); - xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], - SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], - SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], - SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); - xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], - SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], - SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], - SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); - xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); -#endif -#endif } void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, int yres, -#ifdef SIS_XORG_XF86 - DisplayModePtr current -#endif -#ifdef SIS_LINUX_KERNEL struct fb_var_screeninfo *var, bool writeres -#endif ) { unsigned short HRE, HBE, HRS, HBS, HDE, HT; @@ -4127,25 +3583,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, D = B - F - C; -#ifdef SIS_XORG_XF86 - current->HDisplay = (E * 8); - current->HSyncStart = (E * 8) + (F * 8); - current->HSyncEnd = (E * 8) + (F * 8) + (C * 8); - current->HTotal = (E * 8) + (F * 8) + (C * 8) + (D * 8); -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, - "H: A %d B %d C %d D %d E %d F %d HT %d HDE %d HRS %d HBS %d HBE %d HRE %d\n", - A, B, C, D, E, F, HT, HDE, HRS, HBS, HBE, HRE); -#else - (void)VBS; (void)HBS; (void)A; -#endif -#endif -#ifdef SIS_LINUX_KERNEL if(writeres) var->xres = xres = E * 8; var->left_margin = D * 8; var->right_margin = F * 8; var->hsync_len = C * 8; -#endif /* Vertical */ sr_data = crdata[13]; @@ -4192,30 +3633,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, D = B - F - C; -#ifdef SIS_XORG_XF86 - current->VDisplay = VDE + 1; - current->VSyncStart = VRS + 1; - current->VSyncEnd = ((VRS & ~0x1f) | VRE) + 1; - if(VRE <= (VRS & 0x1f)) current->VSyncEnd += 32; - current->VTotal = E + D + C + F; -#if 0 - current->VDisplay = E; - current->VSyncStart = E + D; - current->VSyncEnd = E + D + C; - current->VTotal = E + D + C + F; -#endif -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, - "V: A %d B %d C %d D %d E %d F %d VT %d VDE %d VRS %d VBS %d VBE %d VRE %d\n", - A, B, C, D, E, F, VT, VDE, VRS, VBS, VBE, VRE); -#endif -#endif -#ifdef SIS_LINUX_KERNEL if(writeres) var->yres = yres = E; var->upper_margin = D; var->lower_margin = F; var->vsync_len = C; -#endif if((xres == 320) && ((yres == 200) || (yres == 240))) { /* Terrible hack, but correct CRTC data for @@ -4224,17 +3645,9 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, * a negative D. The CRT controller does not * seem to like correcting HRE to 50) */ -#ifdef SIS_XORG_XF86 - current->HDisplay = 320; - current->HSyncStart = 328; - current->HSyncEnd = 376; - current->HTotal = 400; -#endif -#ifdef SIS_LINUX_KERNEL var->left_margin = (400 - 376); var->right_margin = (328 - 320); var->hsync_len = (376 - 328); -#endif } diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h index b96005c39c6..ee8ed3c203d 100644 --- a/drivers/video/sis/init.h +++ b/drivers/video/sis/init.h @@ -53,21 +53,8 @@ #ifndef _INIT_H_ #define _INIT_H_ -#include "osdef.h" #include "initdef.h" -#ifdef SIS_XORG_XF86 -#include "sis.h" -#define SIS_NEED_inSISREG -#define SIS_NEED_inSISREGW -#define SIS_NEED_inSISREGL -#define SIS_NEED_outSISREG -#define SIS_NEED_outSISREGW -#define SIS_NEED_outSISREGL -#include "sis_regs.h" -#endif - -#ifdef SIS_LINUX_KERNEL #include "vgatypes.h" #include "vstruct.h" #ifdef SIS_CP @@ -78,7 +65,6 @@ #include <linux/fb.h> #include "sis.h" #include <video/sisfb.h> -#endif /* Mode numbers */ static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; @@ -286,7 +272,7 @@ static const struct SiS_ModeResInfo_S SiS_ModeResInfo[] = { 1280, 854, 8,16} /* 0x22 */ }; -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static const struct SiS_StandTable_S SiS_StandTable[]= { /* 0x00: MD_0_200 */ @@ -1521,10 +1507,6 @@ static const struct SiS_LVDSCRT1Data SiS_LVDSCRT1640x480_1_H[] = }; bool SiSInitPtr(struct SiS_Private *SiS_Pr); -#ifdef SIS_XORG_XF86 -unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, - int Depth, bool FSTN, int LCDwith, int LCDheight); -#endif unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, int Depth, bool FSTN, unsigned short CustomT, int LCDwith, int LCDheight, @@ -1550,17 +1532,11 @@ void SiS_SetRegOR(SISIOADDRESS Port,unsigned short Index, unsigned short DataOR void SiS_DisplayOn(struct SiS_Private *SiS_Pr); void SiS_DisplayOff(struct SiS_Private *SiS_Pr); void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr); -#ifndef SIS_LINUX_KERNEL -void SiSSetLVDSetc(struct SiS_Private *SiS_Pr); -#endif void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable); void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable); unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr); -#ifndef SIS_LINUX_KERNEL -void SiS_GetVBType(struct SiS_Private *SiS_Pr); -#endif bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, unsigned short *ModeIdIndex); @@ -1572,37 +1548,19 @@ unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short Mode unsigned short ModeIdIndex); unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RRTI); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, unsigned short *idx2); unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2); unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); #endif void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); -#ifdef SIS_XORG_XF86 -bool SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, - bool dosetpitch); -bool SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom); -bool SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom); -bool SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, - DisplayModePtr mode, bool IsCustom); -#endif -#ifdef SIS_LINUX_KERNEL bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); -#endif void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); -#ifdef SIS_XORG_XF86 -void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, - int yres, DisplayModePtr current); -#endif -#ifdef SIS_LINUX_KERNEL void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, int yres, struct fb_var_screeninfo *var, bool writeres); -#endif /* From init301.c: */ extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, @@ -1626,29 +1584,16 @@ extern unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short extern bool SiS_IsVAMode(struct SiS_Private *); extern bool SiS_IsDualEdge(struct SiS_Private *); -#ifdef SIS_XORG_XF86 -/* From other modules: */ -extern unsigned short SiS_CheckBuildCustomMode(ScrnInfoPtr pScrn, DisplayModePtr mode, - unsigned int VBFlags); -extern unsigned char SiS_GetSetBIOSScratch(ScrnInfoPtr pScrn, unsigned short offset, - unsigned char value); -extern unsigned char SiS_GetSetModeID(ScrnInfoPtr pScrn, unsigned char id); -extern unsigned short SiS_GetModeNumber(ScrnInfoPtr pScrn, DisplayModePtr mode, - unsigned int VBFlags); -#endif - -#ifdef SIS_LINUX_KERNEL -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, unsigned int val); #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, unsigned char val); extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg); #endif -#endif #endif diff --git a/drivers/video/sis/init301.c b/drivers/video/sis/init301.c index da33d801c22..9fa66fd4052 100644 --- a/drivers/video/sis/init301.c +++ b/drivers/video/sis/init301.c @@ -75,11 +75,11 @@ #include "init301.h" -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 #include "oem300.h" #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 #include "oem310.h" #endif @@ -87,9 +87,7 @@ #define SiS_I2CDELAYSHORT 150 static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr); -#ifdef SIS_LINUX_KERNEL static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); -#endif /*********************************************/ /* HELPER: Lock/Unlock CRT2 */ @@ -106,9 +104,7 @@ SiS_UnLockCRT2(struct SiS_Private *SiS_Pr) SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01); } -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_LockCRT2(struct SiS_Private *SiS_Pr) { @@ -138,7 +134,7 @@ SiS_SetRegSR11ANDOR(struct SiS_Private *SiS_Pr, unsigned short DataAND, unsigned /* HELPER: Get Pointer to LCD structure */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned char * GetLCDStructPtr661(struct SiS_Private *SiS_Pr) { @@ -404,7 +400,7 @@ SiS_SaveCRT2Info(struct SiS_Private *SiS_Pr, unsigned short ModeNo) /* HELPER: GET SOME DATA FROM BIOS ROM */ /*********************************************/ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static bool SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr) { @@ -449,7 +445,7 @@ SiS_DDC2Delay(struct SiS_Private *SiS_Pr, unsigned int delaytime) SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05); } -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) { @@ -457,7 +453,7 @@ SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) { @@ -467,7 +463,7 @@ SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) } #endif -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) { @@ -480,14 +476,14 @@ SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) static void SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) { -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short PanelID, DelayIndex, Delay=0; #endif if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); if(SiS_Pr->SiS_VBType & VB_SISVB) { @@ -513,11 +509,11 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) } SiS_ShortDelay(SiS_Pr, Delay); -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if((SiS_Pr->ChipType >= SIS_661) || (SiS_Pr->ChipType <= SIS_315PRO) || @@ -579,12 +575,12 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop) { @@ -613,7 +609,7 @@ SiS_WaitRetrace1(struct SiS_Private *SiS_Pr) while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog); } -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) static void SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg) { @@ -630,7 +626,7 @@ static void SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) { if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return; } @@ -641,7 +637,7 @@ SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) { SiS_WaitRetrace1(SiS_Pr); } else { @@ -686,7 +682,7 @@ SiS_VBLongWait(struct SiS_Private *SiS_Pr) /* HELPER: MISC */ /*********************************************/ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static bool SiS_Is301B(struct SiS_Private *SiS_Pr) { @@ -708,7 +704,7 @@ SiS_CRT2IsLCD(struct SiS_Private *SiS_Pr) bool SiS_IsDualEdge(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) { if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true; @@ -721,7 +717,7 @@ SiS_IsDualEdge(struct SiS_Private *SiS_Pr) bool SiS_IsVAMode(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short flag; if(SiS_Pr->ChipType >= SIS_315H) { @@ -732,7 +728,7 @@ SiS_IsVAMode(struct SiS_Private *SiS_Pr) return false; } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) { @@ -745,7 +741,7 @@ SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) static bool SiS_IsDualLink(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if((SiS_CRT2IsLCD(SiS_Pr)) || (SiS_IsVAMode(SiS_Pr))) { @@ -756,7 +752,7 @@ SiS_IsDualLink(struct SiS_Private *SiS_Pr) return false; } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_TVEnabled(struct SiS_Private *SiS_Pr) { @@ -768,7 +764,7 @@ SiS_TVEnabled(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) { @@ -777,7 +773,7 @@ SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) { @@ -788,7 +784,7 @@ SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) { @@ -804,7 +800,7 @@ SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsYPbPr(struct SiS_Private *SiS_Pr) { @@ -816,7 +812,7 @@ SiS_IsYPbPr(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsChScart(struct SiS_Private *SiS_Pr) { @@ -828,7 +824,7 @@ SiS_IsChScart(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) { @@ -848,7 +844,7 @@ SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static bool SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr) { @@ -914,7 +910,7 @@ SiS_BridgeInSlavemode(struct SiS_Private *SiS_Pr) /*********************************************/ /* Setup general purpose IO for Chrontel communication */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) { @@ -923,11 +919,7 @@ SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) if(!(SiS_Pr->SiS_ChSW)) return; -#ifdef SIS_LINUX_KERNEL acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74); -#else - acpibase = pciReadLong(0x00000800, 0x74); -#endif acpibase &= 0xFFFF; if(!acpibase) return; temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */ @@ -969,7 +961,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV); tempbx |= tempax; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_VBType & VB_SISLCDA) { if(ModeNo == 0x03) { @@ -1019,7 +1011,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) { tempbx &= ~(SetCRT2ToRAMDAC); @@ -1154,24 +1146,16 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, SiS_Pr->SiS_VBInfo = tempbx; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->ChipType == SIS_630) { SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo); } #endif -#ifdef SIS_LINUX_KERNEL #if 0 printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n", SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); #endif -#endif -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_PROBED, "(init301: VBInfo=0x%04x, SetFlag=0x%04x)\n", - SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); -#endif -#endif } /*********************************************/ @@ -1415,12 +1399,6 @@ SiS_SetTVMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } SiS_Pr->SiS_VBInfo &= ~SetPALTV; - -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "(init301: TVMode %x, VBInfo %x)\n", SiS_Pr->SiS_TVMode, SiS_Pr->SiS_VBInfo); -#endif -#endif } /*********************************************/ @@ -1443,22 +1421,10 @@ SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr) static void SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr; unsigned short temp; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Paneldata driver: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", - SiS_Pr->PanelHT, SiS_Pr->PanelVT, - SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, - SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); -#endif -#endif - if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) { if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) { SiS_Pr->SiS_NeedRomModeData = true; @@ -1480,18 +1446,6 @@ SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C = SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20]; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Paneldata BIOS: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n", - SiS_Pr->PanelHT, SiS_Pr->PanelVT, - SiS_Pr->PanelHRS, SiS_Pr->PanelHRE, - SiS_Pr->PanelVRS, SiS_Pr->PanelVRE, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A, - SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B); -#endif -#endif - } #endif } @@ -1517,13 +1471,13 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh { unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0; bool panelcanscale = false; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; static const unsigned char SiS300SeriesLCDRes[] = { 0, 1, 2, 3, 7, 4, 5, 8, 0, 0, 10, 0, 0, 0, 0, 15 }; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *myptr = NULL; #endif @@ -1562,7 +1516,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1; } temp &= 0x0f; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->ChipType < SIS_315H) { /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */ if(SiS_Pr->SiS_VBType & VB_SIS301) { @@ -1574,7 +1528,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh #endif /* Translate to our internal types */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType == SIS_550) { if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */ else if(temp == Panel310_320x240_2) temp = Panel_320x240_2; @@ -1597,7 +1551,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh SiS_Pr->SiS_LCDResInfo = temp; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { SiS_Pr->SiS_LCDResInfo = Panel_Barco1366; @@ -1639,7 +1593,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD; /* Dual link, Pass 1:1 BIOS default, etc. */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_661) { if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11; @@ -2076,7 +2030,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh } } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) { SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */ @@ -2186,17 +2140,10 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh SiS_Pr->SiS_SetFlag |= LCDVESATiming; } -#ifdef SIS_LINUX_KERNEL #if 0 printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n", SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo); #endif -#endif -#ifdef SIS_XORG_XF86 - xf86DrvMsgVerb(0, X_PROBED, 4, - "(init301: LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x SetFlag=0x%04x)\n", - SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo, SiS_Pr->SiS_SetFlag); -#endif } /*********************************************/ @@ -2359,7 +2306,7 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor VCLKIndex = SiS_Pr->PanelVCLKIdx315; } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 /* Special Timing: Barco iQ Pro R series */ if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44; @@ -2410,12 +2357,6 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "VCLKIndex %d (0x%x)\n", VCLKIndex, VCLKIndex); -#endif -#endif - return VCLKIndex; } @@ -2428,10 +2369,10 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned { unsigned short i, j, modeflag, tempah=0; short tempcl; -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) unsigned short tempbl; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned short tempah2, tempbl2; #endif @@ -2454,7 +2395,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* ---- 300 series ---- */ +#ifdef CONFIG_FB_SIS_300 /* ---- 300 series ---- */ /* For 301BDH: (with LCD via LVDS) */ if(SiS_Pr->SiS_VBType & VB_NoLCD) { @@ -2477,11 +2418,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0; -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* ------- 315/330 series ------ */ +#ifdef CONFIG_FB_SIS_315 /* ------- 315/330 series ------ */ if(ModeNo > 0x13) { tempcl -= ModeVGA; @@ -2494,7 +2435,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50; -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } @@ -2503,7 +2444,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType < SIS_315H) { SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah); } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah); } else if(SiS_Pr->SiS_VBType & VB_SISVB) { @@ -2584,7 +2525,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* LVDS can only be slave in 8bpp modes */ tempah = 0x80; if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) { @@ -2604,7 +2545,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned } else { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 tempah = 0; if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) { tempah |= 0x02; @@ -2626,7 +2567,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if(SiS_Pr->ChipType >= SIS_315H) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */ /* The following is nearly unpreditable and varies from machine @@ -2718,11 +2659,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah); } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f); if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) || @@ -2745,7 +2686,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned } else { /* LVDS */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { @@ -2931,7 +2872,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) { if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) { if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) { @@ -3036,7 +2977,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short case Panel_1280x1024: tempbx = 24; break; case Panel_1400x1050: tempbx = 26; break; case Panel_1600x1200: tempbx = 28; break; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case Panel_Barco1366: tempbx = 80; break; #endif } @@ -3053,7 +2994,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) { tempbx = 82; if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++; @@ -3189,7 +3130,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex); #endif @@ -3214,7 +3155,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break; case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break; case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break; case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break; case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break; @@ -3248,7 +3189,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned (SiS_Pr->SiS_SetFlag & SetDOSMode) ) { SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes; SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { if(ResIndex < 0x08) { SiS_Pr->SiS_HDE = 1280; @@ -3270,7 +3211,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s unsigned short resinfo, CRT2Index, ResIndex; const struct SiS_LCDData *LCDPtr = NULL; const struct SiS_TVData *TVPtr = NULL; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 short resinfo661; #endif @@ -3283,7 +3224,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661; if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && (SiS_Pr->SiS_SetFlag & LCDVESATiming) && @@ -3460,7 +3401,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr]; SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1]; SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8); @@ -3520,19 +3461,13 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s case Panel_1680x1050 : case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break; case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break; case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break; #endif default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "GetCRT2Data: Index %d ResIndex %d\n", CRT2Index, ResIndex); -#endif -#endif - SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX; SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT; SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT; @@ -3624,7 +3559,7 @@ SiS_GetLVDSDesPtr(struct SiS_Private *SiS_Pr) { const struct SiS_LVDSDes *PanelDesPtr = NULL; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { if(SiS_Pr->ChipType < SIS_315H) { @@ -3696,7 +3631,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { /* non-pass 1:1 only, see above */ if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) { @@ -3771,7 +3706,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else { if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 switch(SiS_Pr->SiS_LCDResInfo) { case Panel_800x600: if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) { @@ -3816,7 +3751,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(SiS_Pr->SiS_LCDResInfo) { case Panel_1024x768: case Panel_1280x1024: @@ -3844,7 +3779,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s if(SiS_Pr->ChipType < SIS_315H) { if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320; } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480; if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804; if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704; @@ -3866,7 +3801,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s /* DISABLE VIDEO BRIDGE */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static int SiS_HandlePWD(struct SiS_Private *SiS_Pr) { @@ -3891,11 +3826,6 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) ret = 1; } SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, 0, "Setting PWD %x\n", temp); -#endif -#endif } #endif return ret; @@ -3909,7 +3839,7 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr) void SiS_DisableBridge(struct SiS_Private *SiS_Pr) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short tempah, pushax=0, modenum; #endif unsigned short temp=0; @@ -3920,7 +3850,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { if(SiS_Pr->SiS_VBType & VB_SISLVDS) { @@ -3953,11 +3883,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ int didpwd = 0; bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || @@ -4081,14 +4011,14 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } } else { /* ============ For 301 ================ */ if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08); SiS_PanelDelay(SiS_Pr, 3); @@ -4111,7 +4041,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp); } else { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */ if( (!(SiS_CRT2IsLCD(SiS_Pr))) || (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) { @@ -4127,7 +4057,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { SiS_SetCH700x(SiS_Pr,0x0E,0x09); @@ -4171,11 +4101,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04); } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ if(!(SiS_IsNotM650orLater(SiS_Pr))) { /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */ @@ -4288,7 +4218,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* 315 series */ @@ -4304,14 +4234,12 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr) * from outside the context of a mode switch! * MUST call getVBType before calling this */ -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_EnableBridge(struct SiS_Private *SiS_Pr) { unsigned short temp=0, tempah; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short temp1, pushax=0; bool delaylong = false; #endif @@ -4322,7 +4250,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(SiS_CRT2IsLCD(SiS_Pr)) { if(SiS_Pr->SiS_VBType & VB_SISLVDS) { @@ -4385,11 +4313,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ #ifdef SET_EMI unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0; @@ -4688,7 +4616,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f); } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } @@ -4739,7 +4667,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ if(SiS_CRT2IsLCD(SiS_Pr)) { if(SiS_Pr->ChipType == SIS_730) { @@ -4783,11 +4711,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ if(!(SiS_IsNotM650orLater(SiS_Pr))) { /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */ @@ -4881,7 +4809,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr) } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* 310 series */ @@ -4971,7 +4899,7 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* ---- 300 series --- */ +#ifdef CONFIG_FB_SIS_300 /* ---- 300 series --- */ if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */ @@ -5000,11 +4928,11 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor } -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* ------- 315 series ------ */ +#ifdef CONFIG_FB_SIS_315 /* ------- 315 series ------ */ if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */ @@ -5076,13 +5004,13 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } } } /* Set CRT2 FIFO on 300/540/630/730 */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) { @@ -5154,13 +5082,8 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) } else { -#ifdef SIS_LINUX_KERNEL pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0); -#else - pci50 = pciReadLong(0x00000000, 0x50); - pciA0 = pciReadLong(0x00000000, 0xA0); -#endif if(SiS_Pr->ChipType == SIS_730) { @@ -5262,7 +5185,7 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) #endif /* Set CRT2 FIFO on 315/330 series */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr) { @@ -5420,27 +5343,6 @@ SiS_SetGroup1_301(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned sho temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */ - -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n", - SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal, - SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal, - SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd); - - xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1], - SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3], - SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5], - SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]); - xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n", - SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9], - SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11], - SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13], - SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]); - xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]); -#endif -#endif } /* Setup panel link @@ -5455,17 +5357,17 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s unsigned short push2, tempax, tempbx, tempcx, temp; unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0; bool islvds = false, issis = false, chkdclkfirst = false; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 unsigned short crt2crtc = 0; #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short pushcx; #endif if(ModeNo <= 0x13) { modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; #endif } else if(SiS_Pr->UseCustomMode) { @@ -5473,7 +5375,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } else { modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; #endif } @@ -5494,7 +5396,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } } -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { if(IS_SIS330) { SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10); @@ -5744,7 +5646,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* 300 series */ +#ifdef CONFIG_FB_SIS_300 /* 300 series */ tempeax = SiS_Pr->SiS_VGAVDE << 6; temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE); tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE; @@ -5755,11 +5657,11 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s temp = (unsigned short)(tempeax & 0x00FF); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */ tempvcfact = temp; -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* 315 series */ +#ifdef CONFIG_FB_SIS_315 /* 315 series */ tempeax = SiS_Pr->SiS_VGAVDE << 18; tempebx = SiS_Pr->SiS_VDE; temp = (tempeax % tempebx); @@ -5845,7 +5747,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s temp = (unsigned short)(tempecx & 0x00FF); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) { @@ -5863,7 +5765,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->SiS_IF_DEF_TRUMPION) { unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; unsigned char *trumpdata; @@ -5899,7 +5801,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) { SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00); SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00); @@ -5999,7 +5901,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a); } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* Set Part 1 */ @@ -6007,12 +5909,12 @@ static void SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefreshRateTableIndex) { -#if defined(SIS300) || defined(SIS315H) +#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315) unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; #endif unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0; unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short tempbl=0; #endif @@ -6038,11 +5940,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) { if(SiS_Pr->ChipType < SIS_315H ) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo); #endif } else { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetCRT2FIFO_310(SiS_Pr); #endif } @@ -6051,7 +5953,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->ChipType < SIS_315H ) { -#ifdef SIS300 /* ------------- 300 series --------------*/ +#ifdef CONFIG_FB_SIS_300 /* ------------- 300 series --------------*/ temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */ SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */ @@ -6070,11 +5972,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short bridgeadd = 12; -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* ------------------- 315/330 series --------------- */ +#ifdef CONFIG_FB_SIS_315 /* ------------------- 315/330 series --------------- */ tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */ if(modeflag & HalfDCLK) { @@ -6125,7 +6027,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } } -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } /* 315/330 series */ @@ -6256,7 +6158,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->ChipType < SIS_315H) { -#ifdef SIS300 /* ---------- 300 series -------------- */ +#ifdef CONFIG_FB_SIS_300 /* ---------- 300 series -------------- */ if(SiS_Pr->SiS_VBType & VB_SISVB) { temp = 0x20; @@ -6310,11 +6212,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */ -#endif /* SIS300 */ +#endif /* CONFIG_FB_SIS_300 */ } else { -#ifdef SIS315H /* --------------- 315/330 series ---------------*/ +#ifdef CONFIG_FB_SIS_315 /* --------------- 315/330 series ---------------*/ if(SiS_Pr->ChipType < SIS_661) { @@ -6349,7 +6251,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(modeflag & HalfDCLK) tempax |= 0x40; SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax); -#endif /* SIS315H */ +#endif /* CONFIG_FB_SIS_315 */ } @@ -6381,7 +6283,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* SET PART 2 REGISTER GROUP */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned char * SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype) { @@ -6478,7 +6380,7 @@ SiS_GetCRT2Part2Ptr(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned sh } #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc) { @@ -6690,7 +6592,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short unsigned int longtemp, PhaseIndex; bool newtvphase; const unsigned char *TimingPoint; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short resindex, CRT2Index; const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL; @@ -7069,7 +6971,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB); SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex, &CRT2Index, &resindex)) { switch(CRT2Index) { @@ -7130,12 +7032,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */ -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdvdes 0x%x lcdvdee 0x%x\n", tempcx, tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */ SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */ @@ -7184,12 +7080,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempbx = SiS_Pr->CVSyncStart; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdvrs 0x%x\n", tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */ temp = (tempbx >> 4) & 0xF0; @@ -7201,15 +7091,9 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short temp |= (SiS_Pr->CVSyncEnd & 0x0f); } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdvre[3:0] 0x%x\n", (temp & 0x0f)); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc); #endif @@ -7245,12 +7129,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempax >>= 1; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdhdee 0x%x\n", tempbx); -#endif -#endif - tempbx += bridgeoffset; SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */ @@ -7276,12 +7154,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempbx += bridgeoffset; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdhrs 0x%x\n", tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */ SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0)); @@ -7300,20 +7172,14 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short tempbx += bridgeoffset; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "lcdhre 0x%x\n", tempbx); -#endif -#endif - SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */ SiS_SetGroup2_Tail(SiS_Pr, ModeNo); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo); #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 } /* CRT2-LCD from table */ #endif } @@ -7382,7 +7248,7 @@ SiS_SetGroup3(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* SET PART 4 REGISTER GROUP */ /*********************************************/ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 #if 0 static void SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift) @@ -8011,7 +7877,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 /* Chrontel 7005 - I assume that it does not come with a 315 series chip */ @@ -8124,7 +7990,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short /* Chrontel 7019 - assumed that it does not come with a 300 series chip */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 unsigned short temp; @@ -8175,7 +8041,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short } -#ifdef SIS315H /* ----------- 315 series only ---------- */ +#ifdef CONFIG_FB_SIS_315 /* ----------- 315 series only ---------- */ void SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr) @@ -8657,7 +8523,7 @@ SiS_ChrontelDoSomething1(struct SiS_Private *SiS_Pr) bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) { -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; #endif unsigned short ModeIdIndex, RefreshRateTableIndex; @@ -8703,16 +8569,6 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "(init301: LCDHDES 0x%03x LCDVDES 0x%03x)\n", SiS_Pr->SiS_LCDHDES, SiS_Pr->SiS_LCDVDES); - xf86DrvMsg(0, X_INFO, "(init301: HDE 0x%03x VDE 0x%03x)\n", SiS_Pr->SiS_HDE, SiS_Pr->SiS_VDE); - xf86DrvMsg(0, X_INFO, "(init301: VGAHDE 0x%03x VGAVDE 0x%03x)\n", SiS_Pr->SiS_VGAHDE, SiS_Pr->SiS_VGAVDE); - xf86DrvMsg(0, X_INFO, "(init301: HT 0x%03x VT 0x%03x)\n", SiS_Pr->SiS_HT, SiS_Pr->SiS_VT); - xf86DrvMsg(0, X_INFO, "(init301: VGAHT 0x%03x VGAVT 0x%03x)\n", SiS_Pr->SiS_VGAHT, SiS_Pr->SiS_VGAVT); -#endif -#endif - if(SiS_Pr->SiS_SetFlag & LowModeTests) { SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); } @@ -8722,12 +8578,12 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_SetFlag & LowModeTests) { SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); #endif SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex); SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex); #endif SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex); @@ -8758,7 +8614,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) { -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 SiS_SetCH701xForLCD(SiS_Pr); #endif } @@ -8771,7 +8627,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 if(SiS_Pr->ChipType < SIS_315H) { if(SiS_Pr->SiS_SetFlag & LowModeTests) { if(SiS_Pr->SiS_UseOEM) { @@ -8794,7 +8650,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) } #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 if(SiS_Pr->ChipType >= SIS_315H) { if(SiS_Pr->SiS_SetFlag & LowModeTests) { if(SiS_Pr->ChipType < SIS_661) { @@ -8873,7 +8729,7 @@ SiS_SetupDDCN(struct SiS_Private *SiS_Pr) } } -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static unsigned char * SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr) { @@ -8923,11 +8779,6 @@ SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr) dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr); if(!dataptr) return false; } -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Trumpion block success\n"); -#endif -#endif return true; } #endif @@ -9002,9 +8853,7 @@ SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) SiS_SetChReg(SiS_Pr, reg, val, 0); } -#ifdef SIS_LINUX_KERNEL static -#endif void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) { @@ -9091,9 +8940,7 @@ SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempbx) /* Read from Chrontel 70xx */ /* Parameter is [Register no (S7-S0)] */ -#ifdef SIS_LINUX_KERNEL static -#endif unsigned short SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx) { @@ -9114,9 +8961,7 @@ SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, } /* Our own DDC functions */ -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, @@ -9224,12 +9069,6 @@ SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, SiS_SetupDDCN(SiS_Pr); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "DDC Port %x Index %x Shift %d\n", - SiS_Pr->SiS_DDC_Port, SiS_Pr->SiS_DDC_Index, temp); -#endif -#endif return 0; } @@ -9292,11 +9131,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) SiS_SetSwitchDDC2(SiS_Pr); if(SiS_PrepareDDC(SiS_Pr)) { SiS_SetStop(SiS_Pr); -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Probe: Prepare failed\n"); -#endif -#endif return 0xFFFF; } mask = 0xf0; @@ -9310,11 +9144,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) } else { failed = true; ret = 0xFFFF; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Probe: Read 1 failed\n"); -#endif -#endif } } if(!failed) { @@ -9324,11 +9153,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) if(temp == value) ret = 0; else { ret = 0xFFFF; -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "Probe: Read 2 failed\n"); -#endif -#endif if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) { if(temp == 0x30) ret = 0; } @@ -9338,9 +9162,7 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr) return ret; } -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr) { @@ -9357,9 +9179,7 @@ SiS_ProbeDDC(struct SiS_Private *SiS_Pr) return flag; } -#ifndef SIS_XORG_XF86 static -#endif unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer) { @@ -9606,11 +9426,6 @@ SiS_SetSCLKHigh(struct SiS_Private *SiS_Pr) temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index); } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog); if (!watchdog) { -#ifdef SIS_XORG_XF86 -#ifdef TWDEBUG - xf86DrvMsg(0, X_INFO, "SetClkHigh failed\n"); -#endif -#endif return 0xFFFF; } SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT); @@ -9641,7 +9456,7 @@ SiS_CheckACK(struct SiS_Private *SiS_Pr) /* =============== SiS 315/330 O.E.M. ================= */ -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static unsigned short GetRAMDACromptr(struct SiS_Private *SiS_Pr) @@ -10829,7 +10644,7 @@ SiS_FinalizeLCD(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor /* ================= SiS 300 O.E.M. ================== */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, diff --git a/drivers/video/sis/init301.h b/drivers/video/sis/init301.h index 51d99222375..e1fd31d0fdd 100644 --- a/drivers/video/sis/init301.h +++ b/drivers/video/sis/init301.h @@ -53,15 +53,8 @@ #ifndef _INIT301_H_ #define _INIT301_H_ -#include "osdef.h" #include "initdef.h" -#ifdef SIS_XORG_XF86 -#include "sis.h" -#include "sis_regs.h" -#endif - -#ifdef SIS_LINUX_KERNEL #include "vgatypes.h" #include "vstruct.h" #ifdef SIS_CP @@ -72,7 +65,6 @@ #include <linux/fb.h> #include "sis.h" #include <video/sisfb.h> -#endif static const unsigned char SiS_YPbPrTable[3][64] = { { @@ -237,7 +229,7 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */ 0xFF,0xFF, }; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 /* 661 et al LCD data structure (2.03.00) */ static const unsigned char SiS_LCDStruct661[] = { /* 1024x768 */ @@ -279,7 +271,7 @@ static const unsigned char SiS_LCDStruct661[] = { }; #endif -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static unsigned char SiS300_TrumpionData[14][80] = { { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02, 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23, @@ -356,9 +348,6 @@ static unsigned char SiS300_TrumpionData[14][80] = { #endif void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr); -#ifndef SIS_LINUX_KERNEL -void SiS_LockCRT2(struct SiS_Private *SiS_Pr); -#endif void SiS_EnableCRT2(struct SiS_Private *SiS_Pr); unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr); @@ -375,9 +364,6 @@ unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo unsigned short RefreshRateTableIndex); unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex); void SiS_DisableBridge(struct SiS_Private *SiS_Pr); -#ifndef SIS_LINUX_KERNEL -void SiS_EnableBridge(struct SiS_Private *SiS_Pr); -#endif bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo); void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr); void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr); @@ -386,13 +372,9 @@ void SiS_SetCH700x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned cha unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax); void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax); -#ifndef SIS_LINUX_KERNEL -void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); -unsigned short SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempax); -#endif void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char orval,unsigned short andval); -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr); static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr); static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr); @@ -401,7 +383,7 @@ void SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr); void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr); #endif /* 315 */ -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr); void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); #endif @@ -412,21 +394,12 @@ unsigned short SiS_HandleDDC(struct SiS_Private *SiS_Pr, unsigned int VBFlags, i unsigned short adaptnum, unsigned short DDCdatatype, unsigned char *buffer, unsigned int VBFlags2); -#ifdef SIS_XORG_XF86 -unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, - int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, - bool checkcr32, unsigned int VBFlags2); -unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); -unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, - unsigned char *buffer); -#else static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, unsigned int VBFlags2); static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer); -#endif static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr); static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr); static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr); @@ -441,13 +414,13 @@ static unsigned short SiS_PrepareDDC(struct SiS_Private *SiS_Pr); static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno); static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex); static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex); #endif -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI); static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr, @@ -482,15 +455,13 @@ extern void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short M extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); -#ifdef SIS300 +#ifdef CONFIG_FB_SIS_300 extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx, unsigned short *tempcl); extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl); extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); -#ifdef SIS_LINUX_KERNEL extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg); #endif -#endif #endif diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c index 99c04a4855d..9dec64da401 100644 --- a/drivers/video/sis/initextlfb.c +++ b/drivers/video/sis/initextlfb.c @@ -25,7 +25,6 @@ * Author: Thomas Winischhofer <thomas@winischhofer.net> */ -#include "osdef.h" #include "initdef.h" #include "vgatypes.h" #include "vstruct.h" @@ -59,7 +58,7 @@ sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr, unsigned char modeno, if(rateindex > 0) rateindex--; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(ModeNo) { case 0x5a: ModeNo = 0x50; break; case 0x5b: ModeNo = 0x56; @@ -103,7 +102,7 @@ sisfb_mode_rate_to_ddata(struct SiS_Private *SiS_Pr, unsigned char modeno, if(rateindex > 0) rateindex--; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(ModeNo) { case 0x5a: ModeNo = 0x50; break; case 0x5b: ModeNo = 0x56; @@ -187,7 +186,7 @@ sisfb_gettotalfrommode(struct SiS_Private *SiS_Pr, unsigned char modeno, int *ht if(rateindex > 0) rateindex--; -#ifdef SIS315H +#ifdef CONFIG_FB_SIS_315 switch(ModeNo) { case 0x5a: ModeNo = 0x50; break; case 0x5b: ModeNo = 0x56; diff --git a/drivers/video/sis/osdef.h b/drivers/video/sis/osdef.h deleted file mode 100644 index 6ff8f988a1a..00000000000 --- a/drivers/video/sis/osdef.h +++ /dev/null @@ -1,133 +0,0 @@ -/* $XFree86$ */ -/* $XdotOrg$ */ -/* - * OS depending defines - * - * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria - * - * If distributed as part of the Linux kernel, the following license terms - * apply: - * - * * This program is free software; you can redistribute it and/or modify - * * it under the terms of the GNU General Public License as published by - * * the Free Software Foundation; either version 2 of the named License, - * * or any later version. - * * - * * This program is distributed in the hope that it will be useful, - * * but WITHOUT ANY WARRANTY; without even the implied warranty of - * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * * GNU General Public License for more details. - * * - * * You should have received a copy of the GNU General Public License - * * along with this program; if not, write to the Free Software - * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA - * - * Otherwise, the following license terms apply: - * - * * Redistribution and use in source and binary forms, with or without - * * modification, are permitted provided that the following conditions - * * are met: - * * 1) Redistributions of source code must retain the above copyright - * * notice, this list of conditions and the following disclaimer. - * * 2) Redistributions in binary form must reproduce the above copyright - * * notice, this list of conditions and the following disclaimer in the - * * documentation and/or other materials provided with the distribution. - * * 3) The name of the author may not be used to endorse or promote products - * * derived from this software without specific prior written permission. - * * - * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * Author: Thomas Winischhofer <thomas@winischhofer.net> - * Silicon Integrated Systems, Inc. (used by permission) - * - */ - -#ifndef _SIS_OSDEF_H_ -#define _SIS_OSDEF_H_ - -/* The choices are: */ -#define SIS_LINUX_KERNEL /* Linux kernel framebuffer */ -#undef SIS_XORG_XF86 /* XFree86/X.org */ - -#ifdef OutPortByte -#undef OutPortByte -#endif - -#ifdef OutPortWord -#undef OutPortWord -#endif - -#ifdef OutPortLong -#undef OutPortLong -#endif - -#ifdef InPortByte -#undef InPortByte -#endif - -#ifdef InPortWord -#undef InPortWord -#endif - -#ifdef InPortLong -#undef InPortLong -#endif - -/**********************************************************************/ -/* LINUX KERNEL */ -/**********************************************************************/ - -#ifdef SIS_LINUX_KERNEL - -#ifdef CONFIG_FB_SIS_300 -#define SIS300 -#endif - -#ifdef CONFIG_FB_SIS_315 -#define SIS315H -#endif - -#if !defined(SIS300) && !defined(SIS315H) -#warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set -#warning sisfb will not work! -#endif - -#define OutPortByte(p,v) outb((u8)(v),(SISIOADDRESS)(p)) -#define OutPortWord(p,v) outw((u16)(v),(SISIOADDRESS)(p)) -#define OutPortLong(p,v) outl((u32)(v),(SISIOADDRESS)(p)) -#define InPortByte(p) inb((SISIOADDRESS)(p)) -#define InPortWord(p) inw((SISIOADDRESS)(p)) -#define InPortLong(p) inl((SISIOADDRESS)(p)) -#define SiS_SetMemory(MemoryAddress,MemorySize,value) memset_io(MemoryAddress, value, MemorySize) - -#endif /* LINUX_KERNEL */ - -/**********************************************************************/ -/* XFree86/X.org */ -/**********************************************************************/ - -#ifdef SIS_XORG_XF86 - -#define SIS300 -#define SIS315H - -#define OutPortByte(p,v) outSISREG((IOADDRESS)(p),(CARD8)(v)) -#define OutPortWord(p,v) outSISREGW((IOADDRESS)(p),(CARD16)(v)) -#define OutPortLong(p,v) outSISREGL((IOADDRESS)(p),(CARD32)(v)) -#define InPortByte(p) inSISREG((IOADDRESS)(p)) -#define InPortWord(p) inSISREGW((IOADDRESS)(p)) -#define InPortLong(p) inSISREGL((IOADDRESS)(p)) -#define SiS_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize) - -#endif /* XF86 */ - -#endif /* _OSDEF_H_ */ diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h index 7c5710e3fb5..80d89d37c41 100644 --- a/drivers/video/sis/sis.h +++ b/drivers/video/sis/sis.h @@ -24,7 +24,6 @@ #ifndef _SIS_H_ #define _SIS_H_ -#include "osdef.h" #include <video/sisfb.h> #include "vgatypes.h" diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index b52f8e4ef1f..7e3370f115b 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -60,6 +60,11 @@ #include "sis.h" #include "sis_main.h" +#if !defined(CONFIG_FB_SIS_300) && !defined(CONFIG_FB_SIS_315) +#warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set +#warning sisfb will not work! +#endif + static void sisfb_handle_command(struct sis_video_info *ivideo, struct sisfb_cmd *sisfb_command); @@ -4114,14 +4119,6 @@ sisfb_find_rom(struct pci_dev *pdev) if(sisfb_check_rom(rom_base, ivideo)) { if((myrombase = vmalloc(65536))) { - - /* Work around bug in pci/rom.c: Folks forgot to check - * whether the size retrieved from the BIOS image eventually - * is larger than the mapped size - */ - if(pci_resource_len(pdev, PCI_ROM_RESOURCE) < romsize) - romsize = pci_resource_len(pdev, PCI_ROM_RESOURCE); - memcpy_fromio(myrombase, rom_base, (romsize > 65536) ? 65536 : romsize); } @@ -4155,23 +4152,6 @@ sisfb_find_rom(struct pci_dev *pdev) } -#else - - pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &temp); - pci_write_config_dword(pdev, PCI_ROM_ADDRESS, - (ivideo->video_base & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE); - - rom_base = ioremap(ivideo->video_base, 65536); - if(rom_base) { - if(sisfb_check_rom(rom_base, ivideo)) { - if((myrombase = vmalloc(65536))) - memcpy_fromio(myrombase, rom_base, 65536); - } - iounmap(rom_base); - } - - pci_write_config_dword(pdev, PCI_ROM_ADDRESS, temp); - #endif return myrombase; @@ -4181,6 +4161,9 @@ static void __devinit sisfb_post_map_vram(struct sis_video_info *ivideo, unsigned int *mapsize, unsigned int min) { + if (*mapsize < (min << 20)) + return; + ivideo->video_vbase = ioremap(ivideo->video_base, (*mapsize)); if(!ivideo->video_vbase) { @@ -4514,7 +4497,7 @@ sisfb_post_sis300(struct pci_dev *pdev) } else { #endif /* Need to map max FB size for finding out about RAM size */ - mapsize = 64 << 20; + mapsize = ivideo->video_size; sisfb_post_map_vram(ivideo, &mapsize, 4); if(ivideo->video_vbase) { @@ -4680,7 +4663,7 @@ sisfb_post_xgi_ramsize(struct sis_video_info *ivideo) orSISIDXREG(SISSR, 0x20, (0x80 | 0x04)); /* Need to map max FB size for finding out about RAM size */ - mapsize = 256 << 20; + mapsize = ivideo->video_size; sisfb_post_map_vram(ivideo, &mapsize, 32); if(!ivideo->video_vbase) { @@ -5936,6 +5919,7 @@ sisfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } ivideo->video_base = pci_resource_start(pdev, 0); + ivideo->video_size = pci_resource_len(pdev, 0); ivideo->mmio_base = pci_resource_start(pdev, 1); ivideo->mmio_size = pci_resource_len(pdev, 1); ivideo->SiS_Pr.RelIO = pci_resource_start(pdev, 2) + 0x30; diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h index 81a22eaabfd..12c0dfaf251 100644 --- a/drivers/video/sis/vgatypes.h +++ b/drivers/video/sis/vgatypes.h @@ -55,21 +55,10 @@ #define SISIOMEMTYPE -#ifdef SIS_LINUX_KERNEL typedef unsigned long SISIOADDRESS; #include <linux/types.h> /* Need __iomem */ #undef SISIOMEMTYPE #define SISIOMEMTYPE __iomem -#endif - -#ifdef SIS_XORG_XF86 -#if XF86_VERSION_CURRENT < XF86_VERSION_NUMERIC(4,2,0,0,0) -typedef unsigned long IOADDRESS; -typedef unsigned long SISIOADDRESS; -#else -typedef IOADDRESS SISIOADDRESS; -#endif -#endif typedef enum _SIS_CHIP_TYPE { SIS_VGALegacy = 0, diff --git a/drivers/video/sis/vstruct.h b/drivers/video/sis/vstruct.h index bef4aae388d..ea94d214dcf 100644 --- a/drivers/video/sis/vstruct.h +++ b/drivers/video/sis/vstruct.h @@ -233,24 +233,15 @@ struct SiS_Private { unsigned char ChipType; unsigned char ChipRevision; -#ifdef SIS_XORG_XF86 - PCITAG PciTag; -#endif -#ifdef SIS_LINUX_KERNEL void *ivideo; -#endif unsigned char *VirtualRomBase; bool UseROM; -#ifdef SIS_LINUX_KERNEL unsigned char SISIOMEMTYPE *VideoMemoryAddress; unsigned int VideoMemorySize; -#endif SISIOADDRESS IOAddress; SISIOADDRESS IOAddress2; /* For dual chip XGI volari */ -#ifdef SIS_LINUX_KERNEL SISIOADDRESS RelIO; -#endif SISIOADDRESS SiS_P3c4; SISIOADDRESS SiS_P3d4; SISIOADDRESS SiS_P3c0; @@ -280,9 +271,6 @@ struct SiS_Private unsigned short SiS_IF_DEF_FSTN; unsigned short SiS_SysFlags; unsigned char SiS_VGAINFO; -#ifdef SIS_XORG_XF86 - unsigned short SiS_CP1, SiS_CP2, SiS_CP3, SiS_CP4; -#endif bool SiS_UseROM; bool SiS_ROMNew; bool SiS_XGIROM; diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 3a43ebf83a4..efb35aa8309 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -9,19 +9,19 @@ static ssize_t device_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - return sprintf(buf, "%hu", dev->id.device); + return sprintf(buf, "0x%04x\n", dev->id.device); } static ssize_t vendor_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - return sprintf(buf, "%hu", dev->id.vendor); + return sprintf(buf, "0x%04x\n", dev->id.vendor); } static ssize_t status_show(struct device *_d, struct device_attribute *attr, char *buf) { struct virtio_device *dev = container_of(_d,struct virtio_device,dev); - return sprintf(buf, "0x%08x", dev->config->get_status(dev)); + return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); } static ssize_t modalias_show(struct device *_d, struct device_attribute *attr, char *buf) diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 1475ed6b575..cc2f73e0347 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -230,9 +230,6 @@ add_head: pr_debug("Added buffer head %i to %p\n", head, vq); END_USE(vq); - /* If we're indirect, we can fit many (assuming not OOM). */ - if (vq->indirect) - return vq->num_free ? vq->vring.num : 0; return vq->num_free; } EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 4a291045eba..a5ad77ef426 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -558,6 +558,9 @@ config IT8712F_WDT This is the driver for the built-in watchdog timer on the IT8712F Super I/0 chipset used on many motherboards. + If the driver does not work, then make sure that the game port in + the BIOS is enabled. + To compile this driver as a module, choose M here: the module will be called it8712f_wdt. diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c index a1debc89356..3c5045a206d 100644 --- a/drivers/watchdog/bcm63xx_wdt.c +++ b/drivers/watchdog/bcm63xx_wdt.c @@ -18,7 +18,6 @@ #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> -#include <linux/reboot.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/watchdog.h> @@ -220,14 +219,6 @@ static long bcm63xx_wdt_ioctl(struct file *file, unsigned int cmd, } } -static int bcm63xx_wdt_notify_sys(struct notifier_block *this, - unsigned long code, void *unused) -{ - if (code == SYS_DOWN || code == SYS_HALT) - bcm63xx_wdt_pause(); - return NOTIFY_DONE; -} - static const struct file_operations bcm63xx_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, @@ -243,12 +234,8 @@ static struct miscdevice bcm63xx_wdt_miscdev = { .fops = &bcm63xx_wdt_fops, }; -static struct notifier_block bcm63xx_wdt_notifier = { - .notifier_call = bcm63xx_wdt_notify_sys, -}; - -static int bcm63xx_wdt_probe(struct platform_device *pdev) +static int __devinit bcm63xx_wdt_probe(struct platform_device *pdev) { int ret; struct resource *r; @@ -280,16 +267,10 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev) wdt_time); } - ret = register_reboot_notifier(&bcm63xx_wdt_notifier); - if (ret) { - dev_err(&pdev->dev, "failed to register reboot_notifier\n"); - goto unregister_timer; - } - ret = misc_register(&bcm63xx_wdt_miscdev); if (ret < 0) { dev_err(&pdev->dev, "failed to register watchdog device\n"); - goto unregister_reboot_notifier; + goto unregister_timer; } dev_info(&pdev->dev, " started, timer margin: %d sec\n", @@ -297,8 +278,6 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev) return 0; -unregister_reboot_notifier: - unregister_reboot_notifier(&bcm63xx_wdt_notifier); unregister_timer: bcm63xx_timer_unregister(TIMER_WDT_ID); unmap: @@ -306,25 +285,28 @@ unmap: return ret; } -static int bcm63xx_wdt_remove(struct platform_device *pdev) +static int __devexit bcm63xx_wdt_remove(struct platform_device *pdev) { if (!nowayout) bcm63xx_wdt_pause(); misc_deregister(&bcm63xx_wdt_miscdev); - - iounmap(bcm63xx_wdt_device.regs); - - unregister_reboot_notifier(&bcm63xx_wdt_notifier); bcm63xx_timer_unregister(TIMER_WDT_ID); - + iounmap(bcm63xx_wdt_device.regs); return 0; } +static void bcm63xx_wdt_shutdown(struct platform_device *pdev) +{ + bcm63xx_wdt_pause(); +} + static struct platform_driver bcm63xx_wdt = { .probe = bcm63xx_wdt_probe, - .remove = bcm63xx_wdt_remove, + .remove = __devexit_p(bcm63xx_wdt_remove), + .shutdown = bcm63xx_wdt_shutdown, .driver = { + .owner = THIS_MODULE, .name = "bcm63xx-wdt", } }; diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index 9c21d19043a..f6bd6f10fce 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -30,6 +30,7 @@ #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> +#include <linux/fs.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/io.h> diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c index f7e90fe47b7..b8838d2c67a 100644 --- a/drivers/watchdog/iTCO_wdt.c +++ b/drivers/watchdog/iTCO_wdt.c @@ -32,6 +32,7 @@ * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH) * document number 320066-003, 320257-008: EP80597 (IICH) * document number TBD : Cougar Point (CPT) + * document number TBD : Patsburg (PBG) */ /* @@ -146,7 +147,8 @@ enum iTCO_chipsets { TCO_CPT29, /* Cougar Point */ TCO_CPT30, /* Cougar Point */ TCO_CPT31, /* Cougar Point */ - TCO_PBG, /* Patsburg */ + TCO_PBG1, /* Patsburg */ + TCO_PBG2, /* Patsburg */ }; static struct { @@ -235,6 +237,7 @@ static struct { {"Cougar Point", 2}, {"Cougar Point", 2}, {"Patsburg", 2}, + {"Patsburg", 2}, {NULL, 0} }; @@ -350,7 +353,8 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = { { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)}, { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)}, { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, - { ITCO_PCI_DEVICE(0x1d40, TCO_PBG)}, + { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)}, + { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)}, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index eb8a78d77d9..533a199e7a3 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -8,9 +8,12 @@ obj-$(CONFIG_BLOCK) += biomerge.o obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_XEN_XENCOMM) += xencomm.o obj-$(CONFIG_XEN_BALLOON) += balloon.o -obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o +obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o obj-$(CONFIG_XENFS) += xenfs/ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o obj-$(CONFIG_XEN_DOM0) += pci.o + +xen-evtchn-y := evtchn.o + diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 500290b150b..43f9f02c7db 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -50,6 +50,7 @@ #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/tlb.h> +#include <asm/e820.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> @@ -119,7 +120,7 @@ static void scrub_page(struct page *page) } /* balloon_append: add the given page to the balloon. */ -static void balloon_append(struct page *page) +static void __balloon_append(struct page *page) { /* Lowmem is re-populated first, so highmem pages go at list tail. */ if (PageHighMem(page)) { @@ -130,7 +131,11 @@ static void balloon_append(struct page *page) list_add(&page->lru, &ballooned_pages); balloon_stats.balloon_low++; } +} +static void balloon_append(struct page *page) +{ + __balloon_append(page); totalram_pages--; } @@ -191,7 +196,7 @@ static unsigned long current_target(void) static int increase_reservation(unsigned long nr_pages) { - unsigned long pfn, i, flags; + unsigned long pfn, i; struct page *page; long rc; struct xen_memory_reservation reservation = { @@ -203,8 +208,6 @@ static int increase_reservation(unsigned long nr_pages) if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); - spin_lock_irqsave(&xen_reservation_lock, flags); - page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); @@ -247,14 +250,12 @@ static int increase_reservation(unsigned long nr_pages) balloon_stats.current_pages += rc; out: - spin_unlock_irqrestore(&xen_reservation_lock, flags); - return rc < 0 ? rc : rc != nr_pages; } static int decrease_reservation(unsigned long nr_pages) { - unsigned long pfn, i, flags; + unsigned long pfn, i; struct page *page; int need_sleep = 0; int ret; @@ -292,8 +293,6 @@ static int decrease_reservation(unsigned long nr_pages) kmap_flush_unused(); flush_tlb_all(); - spin_lock_irqsave(&xen_reservation_lock, flags); - /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); @@ -308,8 +307,6 @@ static int decrease_reservation(unsigned long nr_pages) balloon_stats.current_pages -= nr_pages; - spin_unlock_irqrestore(&xen_reservation_lock, flags); - return need_sleep; } @@ -395,7 +392,7 @@ static struct notifier_block xenstore_notifier; static int __init balloon_init(void) { - unsigned long pfn; + unsigned long pfn, extra_pfn_end; struct page *page; if (!xen_pv_domain()) @@ -415,11 +412,24 @@ static int __init balloon_init(void) register_balloon(&balloon_sysdev); - /* Initialise the balloon with excess memory space. */ - for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { + /* + * Initialise the balloon with excess memory space. We need + * to make sure we don't add memory which doesn't exist or + * logically exist. The E820 map can be trimmed to be smaller + * than the amount of physical memory due to the mem= command + * line parameter. And if this is a 32-bit non-HIGHMEM kernel + * on a system with memory which requires highmem to access, + * don't try to use it. + */ + extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()), + (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size)); + for (pfn = PFN_UP(xen_extra_mem_start); + pfn < extra_pfn_end; + pfn++) { page = pfn_to_page(pfn); - if (!PageReserved(page)) - balloon_append(page); + /* totalram_pages doesn't include the boot-time + balloon extension, so don't subtract from it. */ + __balloon_append(page); } target_watch.callback = watch_target; diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 97612f548a8..31af0ac31a9 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -105,7 +105,6 @@ struct irq_info static struct irq_info *irq_info; static int *pirq_to_irq; -static int nr_pirqs; static int *evtchn_to_irq; struct cpu_evtchn_s { @@ -278,17 +277,17 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); #endif - __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); - __set_bit(chn, cpu_evtchn_mask(cpu)); + clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); + set_bit(chn, cpu_evtchn_mask(cpu)); irq_info[irq].cpu = cpu; } static void init_evtchn_cpu_bindings(void) { + int i; #ifdef CONFIG_SMP struct irq_desc *desc; - int i; /* By default all event channels notify CPU#0. */ for_each_irq_desc(i, desc) { @@ -296,7 +295,10 @@ static void init_evtchn_cpu_bindings(void) } #endif - memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); + for_each_possible_cpu(i) + memset(cpu_evtchn_mask(i), + (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s)); + } static inline void clear_evtchn(int port) @@ -382,12 +384,17 @@ static int get_nr_hw_irqs(void) return ret; } -/* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs - * succeeded otherwise nr_pirqs won't hold the right value */ -static int find_unbound_pirq(void) +static int find_unbound_pirq(int type) { - int i; - for (i = nr_pirqs-1; i >= 0; i--) { + int rc, i; + struct physdev_get_free_pirq op_get_free_pirq; + op_get_free_pirq.type = type; + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); + if (!rc) + return op_get_free_pirq.pirq; + + for (i = 0; i < nr_irqs; i++) { if (pirq_to_irq[i] < 0) return i; } @@ -420,7 +427,7 @@ static int find_unbound_irq(void) if (irq == start) goto no_irqs; - res = irq_alloc_desc_at(irq, 0); + res = irq_alloc_desc_at(irq, -1); if (WARN_ON(res != irq)) return -1; @@ -608,10 +615,10 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) spin_lock(&irq_mapping_update_lock); - if ((pirq > nr_pirqs) || (gsi > nr_irqs)) { + if ((pirq > nr_irqs) || (gsi > nr_irqs)) { printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", - pirq > nr_pirqs ? "nr_pirqs" :"", - gsi > nr_irqs ? "nr_irqs" : ""); + pirq > nr_irqs ? "pirq" :"", + gsi > nr_irqs ? "gsi" : ""); goto out; } @@ -627,7 +634,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) if (identity_mapped_irq(gsi) || (!xen_initial_domain() && xen_pv_domain())) { irq = gsi; - irq_alloc_desc_at(irq, 0); + irq_alloc_desc_at(irq, -1); } else irq = find_unbound_irq(); @@ -661,17 +668,21 @@ out: #include <linux/msi.h> #include "../pci/msi.h" -void xen_allocate_pirq_msi(char *name, int *irq, int *pirq) +void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) { spin_lock(&irq_mapping_update_lock); - *irq = find_unbound_irq(); - if (*irq == -1) - goto out; + if (alloc & XEN_ALLOC_IRQ) { + *irq = find_unbound_irq(); + if (*irq == -1) + goto out; + } - *pirq = find_unbound_pirq(); - if (*pirq == -1) - goto out; + if (alloc & XEN_ALLOC_PIRQ) { + *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI); + if (*pirq == -1) + goto out; + } set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, handle_level_irq, name); @@ -752,13 +763,14 @@ int xen_destroy_irq(int irq) goto out; if (xen_initial_domain()) { - unmap_irq.pirq = info->u.pirq.gsi; + unmap_irq.pirq = info->u.pirq.pirq; unmap_irq.domid = DOMID_SELF; rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); if (rc) { printk(KERN_WARNING "unmap irq failed %d\n", rc); goto out; } + pirq_to_irq[info->u.pirq.pirq] = -1; } irq_info[irq] = mk_unbound_info(); @@ -779,6 +791,11 @@ int xen_gsi_from_irq(unsigned irq) return gsi_from_irq(irq); } +int xen_irq_from_pirq(unsigned pirq) +{ + return pirq_to_irq[pirq]; +} + int bind_evtchn_to_irq(unsigned int evtchn) { int irq; @@ -1276,6 +1293,42 @@ static int retrigger_dynirq(unsigned int irq) return ret; } +static void restore_cpu_pirqs(void) +{ + int pirq, rc, irq, gsi; + struct physdev_map_pirq map_irq; + + for (pirq = 0; pirq < nr_irqs; pirq++) { + irq = pirq_to_irq[pirq]; + if (irq == -1) + continue; + + /* save/restore of PT devices doesn't work, so at this point the + * only devices present are GSI based emulated devices */ + gsi = gsi_from_irq(irq); + if (!gsi) + continue; + + map_irq.domid = DOMID_SELF; + map_irq.type = MAP_PIRQ_TYPE_GSI; + map_irq.index = gsi; + map_irq.pirq = pirq; + + rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); + if (rc) { + printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n", + gsi, irq, pirq, rc); + irq_info[irq] = mk_unbound_info(); + pirq_to_irq[pirq] = -1; + continue; + } + + printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); + + startup_pirq(irq); + } +} + static void restore_cpu_virqs(unsigned int cpu) { struct evtchn_bind_virq bind_virq; @@ -1299,9 +1352,6 @@ static void restore_cpu_virqs(unsigned int cpu) evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_virq_info(evtchn, virq); bind_evtchn_to_cpu(evtchn, cpu); - - /* Ready for use. */ - unmask_evtchn(evtchn); } } @@ -1327,10 +1377,6 @@ static void restore_cpu_ipis(unsigned int cpu) evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_ipi_info(evtchn, ipi); bind_evtchn_to_cpu(evtchn, cpu); - - /* Ready for use. */ - unmask_evtchn(evtchn); - } } @@ -1390,6 +1436,7 @@ void xen_poll_irq(int irq) void xen_irq_resume(void) { unsigned int cpu, irq, evtchn; + struct irq_desc *desc; init_evtchn_cpu_bindings(); @@ -1408,6 +1455,25 @@ void xen_irq_resume(void) restore_cpu_virqs(cpu); restore_cpu_ipis(cpu); } + + /* + * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These + * are not handled by the IRQ core. + */ + for_each_irq_desc(irq, desc) { + if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND)) + continue; + if (desc->status & IRQ_DISABLED) + continue; + + evtchn = evtchn_from_irq(irq); + if (evtchn == -1) + continue; + + unmask_evtchn(evtchn); + } + + restore_cpu_pirqs(); } static struct irq_chip xen_dynamic_chip __read_mostly = { @@ -1492,26 +1558,17 @@ void xen_callback_vector(void) {} void __init xen_init_IRQ(void) { - int i, rc; - struct physdev_nr_pirqs op_nr_pirqs; + int i; cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), GFP_KERNEL); irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); - rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs); - if (rc < 0) { - nr_pirqs = nr_irqs; - if (rc != -ENOSYS) - printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc); - } else { - if (xen_pv_domain() && !xen_initial_domain()) - nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs); - else - nr_pirqs = op_nr_pirqs.nr_pirqs; - } - pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL); - for (i = 0; i < nr_pirqs; i++) + /* We are using nr_irqs as the maximum number of pirq available but + * that number is actually chosen by Xen and we don't know exactly + * what it is. Be careful choosing high pirq numbers. */ + pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL); + for (i = 0; i < nr_irqs; i++) pirq_to_irq[i] = -1; evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index fec6ba3c08a..ef11daf0caf 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -69,20 +69,51 @@ struct per_user_data { const char *name; }; -/* Who's bound to each port? */ -static struct per_user_data *port_user[NR_EVENT_CHANNELS]; +/* + * Who's bound to each port? This is logically an array of struct + * per_user_data *, but we encode the current enabled-state in bit 0. + */ +static unsigned long *port_user; static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ -irqreturn_t evtchn_interrupt(int irq, void *data) +static inline struct per_user_data *get_port_user(unsigned port) +{ + return (struct per_user_data *)(port_user[port] & ~1); +} + +static inline void set_port_user(unsigned port, struct per_user_data *u) +{ + port_user[port] = (unsigned long)u; +} + +static inline bool get_port_enabled(unsigned port) +{ + return port_user[port] & 1; +} + +static inline void set_port_enabled(unsigned port, bool enabled) +{ + if (enabled) + port_user[port] |= 1; + else + port_user[port] &= ~1; +} + +static irqreturn_t evtchn_interrupt(int irq, void *data) { unsigned int port = (unsigned long)data; struct per_user_data *u; spin_lock(&port_user_lock); - u = port_user[port]; + u = get_port_user(port); + + WARN(!get_port_enabled(port), + "Interrupt for port %d, but apparently not enabled; per-user %p\n", + port, u); disable_irq_nosync(irq); + set_port_enabled(port, false); if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; @@ -92,9 +123,8 @@ irqreturn_t evtchn_interrupt(int irq, void *data) kill_fasync(&u->evtchn_async_queue, SIGIO, POLL_IN); } - } else { + } else u->ring_overflow = 1; - } spin_unlock(&port_user_lock); @@ -198,9 +228,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf, goto out; spin_lock_irq(&port_user_lock); - for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) - if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) - enable_irq(irq_from_evtchn(kbuf[i])); + + for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { + unsigned port = kbuf[i]; + + if (port < NR_EVENT_CHANNELS && + get_port_user(port) == u && + !get_port_enabled(port)) { + set_port_enabled(port, true); + enable_irq(irq_from_evtchn(port)); + } + } + spin_unlock_irq(&port_user_lock); rc = count; @@ -222,8 +261,9 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port) * interrupt handler yet, and our caller has already * serialized bind operations.) */ - BUG_ON(port_user[port] != NULL); - port_user[port] = u; + BUG_ON(get_port_user(port) != NULL); + set_port_user(port, u); + set_port_enabled(port, true); /* start enabled */ rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, u->name, (void *)(unsigned long)port); @@ -239,10 +279,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port) unbind_from_irqhandler(irq, (void *)(unsigned long)port); - /* make sure we unbind the irq handler before clearing the port */ - barrier(); - - port_user[port] = NULL; + set_port_user(port, NULL); } static long evtchn_ioctl(struct file *file, @@ -333,15 +370,17 @@ static long evtchn_ioctl(struct file *file, spin_lock_irq(&port_user_lock); rc = -ENOTCONN; - if (port_user[unbind.port] != u) { + if (get_port_user(unbind.port) != u) { spin_unlock_irq(&port_user_lock); break; } - evtchn_unbind_from_user(u, unbind.port); + disable_irq(irq_from_evtchn(unbind.port)); spin_unlock_irq(&port_user_lock); + evtchn_unbind_from_user(u, unbind.port); + rc = 0; break; } @@ -355,7 +394,7 @@ static long evtchn_ioctl(struct file *file, if (notify.port >= NR_EVENT_CHANNELS) { rc = -EINVAL; - } else if (port_user[notify.port] != u) { + } else if (get_port_user(notify.port) != u) { rc = -ENOTCONN; } else { notify_remote_via_evtchn(notify.port); @@ -431,7 +470,7 @@ static int evtchn_open(struct inode *inode, struct file *filp) filp->private_data = u; - return 0; + return nonseekable_open(inode, filp);; } static int evtchn_release(struct inode *inode, struct file *filp) @@ -444,14 +483,21 @@ static int evtchn_release(struct inode *inode, struct file *filp) free_page((unsigned long)u->ring); for (i = 0; i < NR_EVENT_CHANNELS; i++) { - if (port_user[i] != u) + if (get_port_user(i) != u) continue; - evtchn_unbind_from_user(port_user[i], i); + disable_irq(irq_from_evtchn(i)); } spin_unlock_irq(&port_user_lock); + for (i = 0; i < NR_EVENT_CHANNELS; i++) { + if (get_port_user(i) != u) + continue; + + evtchn_unbind_from_user(get_port_user(i), i); + } + kfree(u->name); kfree(u); @@ -467,12 +513,12 @@ static const struct file_operations evtchn_fops = { .fasync = evtchn_fasync, .open = evtchn_open, .release = evtchn_release, - .llseek = noop_llseek, + .llseek = no_llseek, }; static struct miscdevice evtchn_miscdev = { .minor = MISC_DYNAMIC_MINOR, - .name = "evtchn", + .name = "xen/evtchn", .fops = &evtchn_fops, }; static int __init evtchn_init(void) @@ -482,8 +528,11 @@ static int __init evtchn_init(void) if (!xen_domain()) return -ENODEV; + port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL); + if (port_user == NULL) + return -ENOMEM; + spin_lock_init(&port_user_lock); - memset(port_user, 0, sizeof(port_user)); /* Create '/dev/misc/evtchn'. */ err = misc_register(&evtchn_miscdev); @@ -499,6 +548,9 @@ static int __init evtchn_init(void) static void __exit evtchn_cleanup(void) { + kfree(port_user); + port_user = NULL; + misc_deregister(&evtchn_miscdev); } diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index ef9c7db5207..db8c4c4ac88 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -49,6 +49,7 @@ static int xen_hvm_suspend(void *data) if (!*cancelled) { xen_irq_resume(); + xen_console_resume(); xen_timer_resume(); } diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c index f80be7f6eb9..dbd3b16fd13 100644 --- a/drivers/xen/xenfs/privcmd.c +++ b/drivers/xen/xenfs/privcmd.c @@ -15,7 +15,6 @@ #include <linux/mman.h> #include <linux/uaccess.h> #include <linux/swap.h> -#include <linux/smp_lock.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/seq_file.h> @@ -266,9 +265,7 @@ static int mmap_return_errors(void *data, void *state) xen_pfn_t *mfnp = data; struct mmap_batch_state *st = state; - put_user(*mfnp, st->user++); - - return 0; + return put_user(*mfnp, st->user++); } static struct vm_operations_struct privcmd_vm_ops; @@ -323,10 +320,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) up_write(&mm->mmap_sem); if (state.err > 0) { - ret = 0; - state.user = m.arr; - traverse_pages(m.num, sizeof(xen_pfn_t), + ret = traverse_pages(m.num, sizeof(xen_pfn_t), &pagelist, mmap_return_errors, &state); } @@ -384,8 +379,9 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) if (xen_feature(XENFEAT_auto_translated_physmap)) return -ENOSYS; - /* DONTCOPY is essential for Xen as copy_page_range is broken. */ - vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; + /* DONTCOPY is essential for Xen because copy_page_range doesn't know + * how to recreate these mappings */ + vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP; vma->vm_ops = &privcmd_vm_ops; vma->vm_private_data = NULL; diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index f6339d11d59..1aa38971984 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -12,8 +12,6 @@ #include <linux/module.h> #include <linux/fs.h> #include <linux/magic.h> -#include <linux/mm.h> -#include <linux/backing-dev.h> #include <xen/xen.h> @@ -24,28 +22,12 @@ MODULE_DESCRIPTION("Xen filesystem"); MODULE_LICENSE("GPL"); -static int xenfs_set_page_dirty(struct page *page) -{ - return !TestSetPageDirty(page); -} - -static const struct address_space_operations xenfs_aops = { - .set_page_dirty = xenfs_set_page_dirty, -}; - -static struct backing_dev_info xenfs_backing_dev_info = { - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, -}; - static struct inode *xenfs_make_inode(struct super_block *sb, int mode) { struct inode *ret = new_inode(sb); if (ret) { ret->i_mode = mode; - ret->i_mapping->a_ops = &xenfs_aops; - ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info; ret->i_uid = ret->i_gid = 0; ret->i_blocks = 0; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; @@ -121,9 +103,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent) return rc; } -static int xenfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) +static struct dentry *xenfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) { return mount_single(fs_type, flags, data, xenfs_fill_super); } @@ -137,25 +119,11 @@ static struct file_system_type xenfs_type = { static int __init xenfs_init(void) { - int err; - if (!xen_domain()) { - printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n"); - return 0; - } - - err = register_filesystem(&xenfs_type); - if (err) { - printk(KERN_ERR "xenfs: Unable to register filesystem!\n"); - goto out; - } - - err = bdi_init(&xenfs_backing_dev_info); - if (err) - unregister_filesystem(&xenfs_type); - - out: + if (xen_domain()) + return register_filesystem(&xenfs_type); - return err; + printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); + return 0; } static void __exit xenfs_exit(void) diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c index cafc5045429..e0c84725d3e 100644 --- a/drivers/zorro/proc.c +++ b/drivers/zorro/proc.c @@ -13,7 +13,6 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> -#include <linux/smp_lock.h> #include <asm/uaccess.h> #include <asm/amigahw.h> #include <asm/setup.h> |