diff options
Diffstat (limited to 'drivers')
347 files changed, 24936 insertions, 4584 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 61631edfecc..3bb154d8c8c 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -54,6 +54,8 @@ source "drivers/spi/Kconfig" source "drivers/pps/Kconfig" +source "drivers/ptp/Kconfig" + source "drivers/gpio/Kconfig" source "drivers/w1/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 145aeadb6c0..6b17f586434 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_I2O) += message/ obj-$(CONFIG_RTC_LIB) += rtc/ obj-y += i2c/ media/ obj-$(CONFIG_PPS) += pps/ +obj-$(CONFIG_PTP_1588_CLOCK) += ptp/ obj-$(CONFIG_W1) += w1/ obj-$(CONFIG_POWER_SUPPLY) += power/ obj-$(CONFIG_HWMON) += hwmon/ @@ -94,7 +95,7 @@ obj-$(CONFIG_CPU_IDLE) += cpuidle/ obj-$(CONFIG_DMA_ENGINE) += dma/ obj-$(CONFIG_MMC) += mmc/ obj-$(CONFIG_MEMSTICK) += memstick/ -obj-$(CONFIG_NEW_LEDS) += leds/ +obj-y += leds/ obj-$(CONFIG_INFINIBAND) += infiniband/ obj-$(CONFIG_SGI_SN) += sn/ obj-y += firmware/ diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 3a17ca5fff6..bc2218db5ba 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -73,17 +73,6 @@ config ACPI_PROCFS_POWER Say N to delete power /proc/acpi/ directories that have moved to /sys/ -config ACPI_POWER_METER - tristate "ACPI 4.0 power meter" - depends on HWMON - help - This driver exposes ACPI 4.0 power meters as hardware monitoring - devices. Say Y (or M) if you have a computer with ACPI 4.0 firmware - and a power meter. - - To compile this driver as a module, choose M here: - the module will be called power-meter. - config ACPI_EC_DEBUGFS tristate "EC read/write access through /sys/kernel/debug/ec" default n diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index d113fa5100b..b66fbb2fc85 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -59,7 +59,6 @@ obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o obj-$(CONFIG_ACPI_BATTERY) += battery.o obj-$(CONFIG_ACPI_SBS) += sbshc.o obj-$(CONFIG_ACPI_SBS) += sbs.o -obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o obj-$(CONFIG_ACPI_HED) += hed.o obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 096aebfe7f3..f74b2ea11f2 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -101,6 +101,14 @@ static DEFINE_MUTEX(einj_mutex); static struct einj_parameter *einj_param; +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr+4); +} +#endif + static void einj_exec_ctx_init(struct apei_exec_context *ctx) { apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c index 542e5390389..7489b89c300 100644 --- a/drivers/acpi/atomicio.c +++ b/drivers/acpi/atomicio.c @@ -280,9 +280,11 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width) case 32: *val = readl(addr); break; +#ifdef readq case 64: *val = readq(addr); break; +#endif default: return -EINVAL; } @@ -307,9 +309,11 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) case 32: writel(val, addr); break; +#ifdef writeq case 64: writeq(val, addr); break; +#endif default: return -EINVAL; } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 30ea95f43e7..d51f9795c06 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1089,21 +1089,21 @@ static int atapi_drain_needed(struct request *rq) static int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) { + struct request_queue *q = sdev->request_queue; + if (!ata_id_has_unload(dev->id)) dev->flags |= ATA_DFLAG_NO_UNLOAD; /* configure max sectors */ - blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); + blk_queue_max_hw_sectors(q, dev->max_sectors); if (dev->class == ATA_DEV_ATAPI) { - struct request_queue *q = sdev->request_queue; void *buf; sdev->sector_size = ATA_SECT_SIZE; /* set DMA padding */ - blk_queue_update_dma_pad(sdev->request_queue, - ATA_DMA_PAD_SZ - 1); + blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); /* configure draining */ buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); @@ -1131,8 +1131,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", sdev->sector_size); - blk_queue_update_dma_alignment(sdev->request_queue, - sdev->sector_size - 1); + blk_queue_update_dma_alignment(q, sdev->sector_size - 1); if (dev->flags & ATA_DFLAG_AN) set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); @@ -1145,6 +1144,8 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); } + blk_queue_flush_queueable(q, false); + dev->sdev = sdev; return 0; } diff --git a/drivers/base/node.c b/drivers/base/node.c index b3b72d64e80..793f796c4da 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -7,6 +7,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/memory.h> +#include <linux/vmstat.h> #include <linux/node.h> #include <linux/hugetlb.h> #include <linux/compaction.h> @@ -179,11 +180,14 @@ static ssize_t node_read_vmstat(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { int nid = dev->id; - return sprintf(buf, - "nr_written %lu\n" - "nr_dirtied %lu\n", - node_page_state(nid, NR_WRITTEN), - node_page_state(nid, NR_DIRTIED)); + int i; + int n = 0; + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], + node_page_state(nid, i)); + + return n; } static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 99dd36e8500..ffd8797faf4 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -171,6 +171,7 @@ static void bcma_host_pci_remove(struct pci_dev *dev) } static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 83c32cb7258..717d6e4e18d 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -470,6 +470,27 @@ config XEN_BLKDEV_FRONTEND block device driver. It communicates with a back-end driver in another domain which drives the actual block device. +config XEN_BLKDEV_BACKEND + tristate "Block-device backend driver" + depends on XEN_BACKEND + help + The block-device backend driver allows the kernel to export its + block devices to other guests via a high-performance shared-memory + interface. + + The corresponding Linux frontend driver is enabled by the + CONFIG_XEN_BLKDEV_FRONTEND configuration option. + + The backend driver attaches itself to a any block device specified + in the XenBus configuration. There are no limits to what the block + device as long as it has a major and minor. + + If you are compiling a kernel to run in a Xen block backend driver + domain (often this is domain 0) you should say Y here. To + compile this driver as a module, chose M here: the module + will be called xen-blkback. + + config VIRTIO_BLK tristate "Virtio block driver (EXPERIMENTAL)" depends on EXPERIMENTAL && VIRTIO diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 40528ba56d1..76646e9a1c9 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_BLK_DEV_UB) += ub.o obj-$(CONFIG_BLK_DEV_HD) += hd.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o +obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9bf13988f1a..8f4ef656a1a 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -64,6 +64,10 @@ MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION("3.6.26"); MODULE_LICENSE("GPL"); +static int cciss_tape_cmds = 6; +module_param(cciss_tape_cmds, int, 0644); +MODULE_PARM_DESC(cciss_tape_cmds, + "number of commands to allocate for tape devices (default: 6)"); static DEFINE_MUTEX(cciss_mutex); static struct proc_dir_entry *proc_cciss; @@ -194,6 +198,8 @@ static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); +static __devinit int write_driver_ver_to_cfgtable( + CfgTable_struct __iomem *cfgtable); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, @@ -556,7 +562,7 @@ static void __devinit cciss_procinit(ctlr_info_t *h) #define to_hba(n) container_of(n, struct ctlr_info, dev) #define to_drv(n) container_of(n, drive_info_struct, dev) -/* List of controllers which cannot be reset on kexec with reset_devices */ +/* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ 0x324b103C, /* SmartArray P711m */ @@ -574,23 +580,45 @@ static u32 unresettable_controller[] = { 0x409D0E11, /* Smart Array 6400 EM */ }; -static int ctlr_is_resettable(struct ctlr_info *h) +/* List of controllers which cannot even be soft reset */ +static u32 soft_unresettable_controller[] = { + 0x409C0E11, /* Smart Array 6400 */ + 0x409D0E11, /* Smart Array 6400 EM */ +}; + +static int ctlr_is_hard_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) - if (unresettable_controller[i] == h->board_id) + if (unresettable_controller[i] == board_id) return 0; return 1; } +static int ctlr_is_soft_resettable(u32 board_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) + if (soft_unresettable_controller[i] == board_id) + return 0; + return 1; +} + +static int ctlr_is_resettable(u32 board_id) +{ + return ctlr_is_hard_resettable(board_id) || + ctlr_is_soft_resettable(board_id); +} + static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h = to_hba(dev); - return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h)); + return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); } static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); @@ -2567,7 +2595,7 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, } } else if (cmd_type == TYPE_MSG) { switch (cmd) { - case 0: /* ABORT message */ + case CCISS_ABORT_MSG: c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; @@ -2577,16 +2605,16 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, /* buff contains the tag of the command to abort */ memcpy(&c->Request.CDB[4], buff, 8); break; - case 1: /* RESET message */ + case CCISS_RESET_MSG: c->Request.CDBLen = 16; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; /* reset */ - c->Request.CDB[1] = 0x03; /* reset a target */ + c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET; break; - case 3: /* No-Op message */ + case CCISS_NOOP_MSG: c->Request.CDBLen = 1; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; @@ -2615,6 +2643,31 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, return status; } +static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr, + u8 reset_type) +{ + CommandList_struct *c; + int return_status; + + c = cmd_alloc(h); + if (!c) + return -ENOMEM; + return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0, + CTLR_LUNID, TYPE_MSG); + c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ + if (return_status != IO_OK) { + cmd_special_free(h, c); + return return_status; + } + c->waiting = NULL; + enqueue_cmd_and_start_io(h, c); + /* Don't wait for completion, the reset won't complete. Don't free + * the command either. This is the last command we will send before + * re-initializing everything, so it doesn't matter and won't leak. + */ + return 0; +} + static int check_target_status(ctlr_info_t *h, CommandList_struct *c) { switch (c->err_info->ScsiStatus) { @@ -3461,6 +3514,63 @@ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) return next_command(h); } +/* Some controllers, like p400, will give us one interrupt + * after a soft reset, even if we turned interrupts off. + * Only need to check for this in the cciss_xxx_discard_completions + * functions. + */ +static int ignore_bogus_interrupt(ctlr_info_t *h) +{ + if (likely(!reset_devices)) + return 0; + + if (likely(h->interrupts_enabled)) + return 0; + + dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " + "(known firmware bug.) Ignoring.\n"); + + return 1; +} + +static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id) +{ + ctlr_info_t *h = dev_id; + unsigned long flags; + u32 raw_tag; + + if (ignore_bogus_interrupt(h)) + return IRQ_NONE; + + if (interrupt_not_for_us(h)) + return IRQ_NONE; + spin_lock_irqsave(&h->lock, flags); + while (interrupt_pending(h)) { + raw_tag = get_next_completion(h); + while (raw_tag != FIFO_EMPTY) + raw_tag = next_command(h); + } + spin_unlock_irqrestore(&h->lock, flags); + return IRQ_HANDLED; +} + +static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id) +{ + ctlr_info_t *h = dev_id; + unsigned long flags; + u32 raw_tag; + + if (ignore_bogus_interrupt(h)) + return IRQ_NONE; + + spin_lock_irqsave(&h->lock, flags); + raw_tag = get_next_completion(h); + while (raw_tag != FIFO_EMPTY) + raw_tag = next_command(h); + spin_unlock_irqrestore(&h->lock, flags); + return IRQ_HANDLED; +} + static irqreturn_t do_cciss_intx(int irq, void *dev_id) { ctlr_info_t *h = dev_id; @@ -4078,6 +4188,9 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h) cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); if (!h->cfgtable) return -ENOMEM; + rc = write_driver_ver_to_cfgtable(h->cfgtable); + if (rc) + return rc; /* Find performant mode table. */ trans_offset = readl(&h->cfgtable->TransMethodOffset); h->transtable = remap_pci_mem(pci_resource_start(h->pdev, @@ -4112,7 +4225,7 @@ static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) static void __devinit cciss_find_board_params(ctlr_info_t *h) { cciss_get_max_perf_mode_cmds(h); - h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ + h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds; h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); /* * Limit in-command s/g elements to 32 save dma'able memory. @@ -4348,7 +4461,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); if ((tag & ~3) == paddr32) break; - schedule_timeout_uninterruptible(HZ); + msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS); } iounmap(vaddr); @@ -4375,11 +4488,10 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u return 0; } -#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) #define cciss_noop(p) cciss_message(p, 3, 0) static int cciss_controller_hard_reset(struct pci_dev *pdev, - void * __iomem vaddr, bool use_doorbell) + void * __iomem vaddr, u32 use_doorbell) { u16 pmcsr; int pos; @@ -4390,8 +4502,7 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev, * other way using the doorbell register. */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); - writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); - msleep(1000); + writel(use_doorbell, vaddr + SA5_DOORBELL); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power @@ -4422,12 +4533,64 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev, pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D0; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); - - msleep(500); } return 0; } +static __devinit void init_driver_version(char *driver_version, int len) +{ + memset(driver_version, 0, len); + strncpy(driver_version, "cciss " DRIVER_NAME, len - 1); +} + +static __devinit int write_driver_ver_to_cfgtable( + CfgTable_struct __iomem *cfgtable) +{ + char *driver_version; + int i, size = sizeof(cfgtable->driver_version); + + driver_version = kmalloc(size, GFP_KERNEL); + if (!driver_version) + return -ENOMEM; + + init_driver_version(driver_version, size); + for (i = 0; i < size; i++) + writeb(driver_version[i], &cfgtable->driver_version[i]); + kfree(driver_version); + return 0; +} + +static __devinit void read_driver_ver_from_cfgtable( + CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver) +{ + int i; + + for (i = 0; i < sizeof(cfgtable->driver_version); i++) + driver_ver[i] = readb(&cfgtable->driver_version[i]); +} + +static __devinit int controller_reset_failed( + CfgTable_struct __iomem *cfgtable) +{ + + char *driver_ver, *old_driver_ver; + int rc, size = sizeof(cfgtable->driver_version); + + old_driver_ver = kmalloc(2 * size, GFP_KERNEL); + if (!old_driver_ver) + return -ENOMEM; + driver_ver = old_driver_ver + size; + + /* After a reset, the 32 bytes of "driver version" in the cfgtable + * should have been changed, otherwise we know the reset failed. + */ + init_driver_version(old_driver_ver, size); + read_driver_ver_from_cfgtable(cfgtable, driver_ver); + rc = !memcmp(driver_ver, old_driver_ver, size); + kfree(old_driver_ver); + return rc; +} + /* This does a hard reset of the controller using PCI power management * states or using the doorbell register. */ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) @@ -4437,10 +4600,10 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; - u32 misc_fw_support, active_transport; + u32 misc_fw_support; int rc; CfgTable_struct __iomem *cfgtable; - bool use_doorbell; + u32 use_doorbell; u32 board_id; u16 command_register; @@ -4464,12 +4627,16 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) * likely not be happy. Just forbid resetting this conjoined mess. */ cciss_lookup_board_id(pdev, &board_id); - if (board_id == 0x409C0E11 || board_id == 0x409D0E11) { + if (!ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " "due to shared cache module."); return -ENODEV; } + /* if controller is soft- but not hard resettable... */ + if (!ctlr_is_hard_resettable(board_id)) + return -ENOTSUPP; /* try soft reset later. */ + /* Save the PCI command register */ pci_read_config_word(pdev, 4, &command_register); /* Turn the board off. This is so that later pci_restore_state() @@ -4497,16 +4664,28 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) rc = -ENOMEM; goto unmap_vaddr; } + rc = write_driver_ver_to_cfgtable(cfgtable); + if (rc) + goto unmap_vaddr; - /* If reset via doorbell register is supported, use that. */ - misc_fw_support = readl(&cfgtable->misc_fw_support); - use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; - - /* The doorbell reset seems to cause lockups on some Smart - * Arrays (e.g. P410, P410i, maybe others). Until this is - * fixed or at least isolated, avoid the doorbell reset. + /* If reset via doorbell register is supported, use that. + * There are two such methods. Favor the newest method. */ - use_doorbell = 0; + misc_fw_support = readl(&cfgtable->misc_fw_support); + use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; + if (use_doorbell) { + use_doorbell = DOORBELL_CTLR_RESET2; + } else { + use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; + if (use_doorbell) { + dev_warn(&pdev->dev, "Controller claims that " + "'Bit 2 doorbell reset' is " + "supported, but not 'bit 5 doorbell reset'. " + "Firmware update is recommended.\n"); + rc = -ENOTSUPP; /* use the soft reset */ + goto unmap_cfgtable; + } + } rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) @@ -4524,30 +4703,31 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) msleep(CCISS_POST_RESET_PAUSE_MSECS); /* Wait for board to become not ready, then ready. */ - dev_info(&pdev->dev, "Waiting for board to become ready.\n"); + dev_info(&pdev->dev, "Waiting for board to reset.\n"); rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); - if (rc) /* Don't bail, might be E500, etc. which can't be reset */ - dev_warn(&pdev->dev, - "failed waiting for board to become not ready\n"); + if (rc) { + dev_warn(&pdev->dev, "Failed waiting for board to hard reset." + " Will try soft reset.\n"); + rc = -ENOTSUPP; /* Not expected, but try soft reset later */ + goto unmap_cfgtable; + } rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, - "failed waiting for board to become ready\n"); + "failed waiting for board to become ready " + "after hard reset\n"); goto unmap_cfgtable; } - dev_info(&pdev->dev, "board ready.\n"); - /* Controller should be in simple mode at this point. If it's not, - * It means we're on one of those controllers which doesn't support - * the doorbell reset method and on which the PCI power management reset - * method doesn't work (P800, for example.) - * In those cases, don't try to proceed, as it generally doesn't work. - */ - active_transport = readl(&cfgtable->TransportActive); - if (active_transport & PERFORMANT_MODE) { - dev_warn(&pdev->dev, "Unable to successfully reset controller," - " Ignoring controller.\n"); - rc = -ENODEV; + rc = controller_reset_failed(vaddr); + if (rc < 0) + goto unmap_cfgtable; + if (rc) { + dev_warn(&pdev->dev, "Unable to successfully hard reset " + "controller. Will try soft reset.\n"); + rc = -ENOTSUPP; /* Not expected, but try soft reset later */ + } else { + dev_info(&pdev->dev, "Board ready after hard reset.\n"); } unmap_cfgtable: @@ -4574,11 +4754,12 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) * due to concerns about shared bbwc between 6402/6404 pair. */ if (rc == -ENOTSUPP) - return 0; /* just try to do the kdump anyhow. */ + return rc; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; /* Now try to get the controller to respond to a no-op */ + dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { if (cciss_noop(pdev) == 0) break; @@ -4591,6 +4772,148 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) return 0; } +static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h) +{ + h->cmd_pool_bits = kmalloc( + DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * + sizeof(unsigned long), GFP_KERNEL); + h->cmd_pool = pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(CommandList_struct), + &(h->cmd_pool_dhandle)); + h->errinfo_pool = pci_alloc_consistent(h->pdev, + h->nr_cmds * sizeof(ErrorInfo_struct), + &(h->errinfo_pool_dhandle)); + if ((h->cmd_pool_bits == NULL) + || (h->cmd_pool == NULL) + || (h->errinfo_pool == NULL)) { + dev_err(&h->pdev->dev, "out of memory"); + return -ENOMEM; + } + return 0; +} + +static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h) +{ + int i; + + /* zero it, so that on free we need not know how many were alloc'ed */ + h->scatter_list = kzalloc(h->max_commands * + sizeof(struct scatterlist *), GFP_KERNEL); + if (!h->scatter_list) + return -ENOMEM; + + for (i = 0; i < h->nr_cmds; i++) { + h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) * + h->maxsgentries, GFP_KERNEL); + if (h->scatter_list[i] == NULL) { + dev_err(&h->pdev->dev, "could not allocate " + "s/g lists\n"); + return -ENOMEM; + } + } + return 0; +} + +static void cciss_free_scatterlists(ctlr_info_t *h) +{ + int i; + + if (h->scatter_list) { + for (i = 0; i < h->nr_cmds; i++) + kfree(h->scatter_list[i]); + kfree(h->scatter_list); + } +} + +static void cciss_free_cmd_pool(ctlr_info_t *h) +{ + kfree(h->cmd_pool_bits); + if (h->cmd_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(CommandList_struct), + h->cmd_pool, h->cmd_pool_dhandle); + if (h->errinfo_pool) + pci_free_consistent(h->pdev, + h->nr_cmds * sizeof(ErrorInfo_struct), + h->errinfo_pool, h->errinfo_pool_dhandle); +} + +static int cciss_request_irq(ctlr_info_t *h, + irqreturn_t (*msixhandler)(int, void *), + irqreturn_t (*intxhandler)(int, void *)) +{ + if (h->msix_vector || h->msi_vector) { + if (!request_irq(h->intr[PERF_MODE_INT], msixhandler, + IRQF_DISABLED, h->devname, h)) + return 0; + dev_err(&h->pdev->dev, "Unable to get msi irq %d" + " for %s\n", h->intr[PERF_MODE_INT], + h->devname); + return -1; + } + + if (!request_irq(h->intr[PERF_MODE_INT], intxhandler, + IRQF_DISABLED, h->devname, h)) + return 0; + dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", + h->intr[PERF_MODE_INT], h->devname); + return -1; +} + +static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h) +{ + if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) { + dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); + return -EIO; + } + + dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); + if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { + dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); + return -1; + } + + dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); + if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { + dev_warn(&h->pdev->dev, "Board failed to become ready " + "after soft reset.\n"); + return -1; + } + + return 0; +} + +static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) +{ + int ctlr = h->ctlr; + + free_irq(h->intr[PERF_MODE_INT], h); +#ifdef CONFIG_PCI_MSI + if (h->msix_vector) + pci_disable_msix(h->pdev); + else if (h->msi_vector) + pci_disable_msi(h->pdev); +#endif /* CONFIG_PCI_MSI */ + cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); + cciss_free_scatterlists(h); + cciss_free_cmd_pool(h); + kfree(h->blockFetchTable); + if (h->reply_pool) + pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), + h->reply_pool, h->reply_pool_dhandle); + if (h->transtable) + iounmap(h->transtable); + if (h->cfgtable) + iounmap(h->cfgtable); + if (h->vaddr) + iounmap(h->vaddr); + unregister_blkdev(h->major, h->devname); + cciss_destroy_hba_sysfs_entry(h); + pci_release_regions(h->pdev); + kfree(h); + hba[ctlr] = NULL; +} + /* * This is it. Find all the controllers and register them. I really hate * stealing all these major device numbers. @@ -4601,15 +4924,28 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, { int i; int j = 0; - int k = 0; int rc; + int try_soft_reset = 0; int dac, return_code; InquiryData_struct *inq_buff; ctlr_info_t *h; + unsigned long flags; rc = cciss_init_reset_devices(pdev); - if (rc) - return rc; + if (rc) { + if (rc != -ENOTSUPP) + return rc; + /* If the reset fails in a particular way (it has no way to do + * a proper hard reset, so returns -ENOTSUPP) we can try to do + * a soft reset once we get the controller configured up to the + * point that it can accept a command. + */ + try_soft_reset = 1; + rc = 0; + } + +reinit_after_soft_reset: + i = alloc_cciss_hba(pdev); if (i < 0) return -1; @@ -4627,6 +4963,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, sprintf(h->devname, "cciss%d", i); h->ctlr = i; + if (cciss_tape_cmds < 2) + cciss_tape_cmds = 2; + if (cciss_tape_cmds > 16) + cciss_tape_cmds = 16; + init_completion(&h->scan_wait); if (cciss_create_hba_sysfs_entry(h)) @@ -4662,62 +5003,20 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, /* make sure the board interrupts are off */ h->access.set_intr_mask(h, CCISS_INTR_OFF); - if (h->msi_vector || h->msix_vector) { - if (request_irq(h->intr[PERF_MODE_INT], - do_cciss_msix_intr, - IRQF_DISABLED, h->devname, h)) { - dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", - h->intr[PERF_MODE_INT], h->devname); - goto clean2; - } - } else { - if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx, - IRQF_DISABLED, h->devname, h)) { - dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", - h->intr[PERF_MODE_INT], h->devname); - goto clean2; - } - } + rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); + if (rc) + goto clean2; dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", h->devname, pdev->device, pci_name(pdev), h->intr[PERF_MODE_INT], dac ? "" : " not"); - h->cmd_pool_bits = - kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) - * sizeof(unsigned long), GFP_KERNEL); - h->cmd_pool = (CommandList_struct *) - pci_alloc_consistent(h->pdev, - h->nr_cmds * sizeof(CommandList_struct), - &(h->cmd_pool_dhandle)); - h->errinfo_pool = (ErrorInfo_struct *) - pci_alloc_consistent(h->pdev, - h->nr_cmds * sizeof(ErrorInfo_struct), - &(h->errinfo_pool_dhandle)); - if ((h->cmd_pool_bits == NULL) - || (h->cmd_pool == NULL) - || (h->errinfo_pool == NULL)) { - dev_err(&h->pdev->dev, "out of memory"); + if (cciss_allocate_cmd_pool(h)) goto clean4; - } - /* Need space for temp scatter list */ - h->scatter_list = kmalloc(h->max_commands * - sizeof(struct scatterlist *), - GFP_KERNEL); - if (!h->scatter_list) + if (cciss_allocate_scatterlists(h)) goto clean4; - for (k = 0; k < h->nr_cmds; k++) { - h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * - h->maxsgentries, - GFP_KERNEL); - if (h->scatter_list[k] == NULL) { - dev_err(&h->pdev->dev, - "could not allocate s/g lists\n"); - goto clean4; - } - } h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, h->chainsize, h->nr_cmds); if (!h->cmd_sg_list && h->chainsize > 0) @@ -4741,6 +5040,62 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, h->gendisk[j] = NULL; } + /* At this point, the controller is ready to take commands. + * Now, if reset_devices and the hard reset didn't work, try + * the soft reset and see if that works. + */ + if (try_soft_reset) { + + /* This is kind of gross. We may or may not get a completion + * from the soft reset command, and if we do, then the value + * from the fifo may or may not be valid. So, we wait 10 secs + * after the reset throwing away any completions we get during + * that time. Unregister the interrupt handler and register + * fake ones to scoop up any residual completions. + */ + spin_lock_irqsave(&h->lock, flags); + h->access.set_intr_mask(h, CCISS_INTR_OFF); + spin_unlock_irqrestore(&h->lock, flags); + free_irq(h->intr[PERF_MODE_INT], h); + rc = cciss_request_irq(h, cciss_msix_discard_completions, + cciss_intx_discard_completions); + if (rc) { + dev_warn(&h->pdev->dev, "Failed to request_irq after " + "soft reset.\n"); + goto clean4; + } + + rc = cciss_kdump_soft_reset(h); + if (rc) { + dev_warn(&h->pdev->dev, "Soft reset failed.\n"); + goto clean4; + } + + dev_info(&h->pdev->dev, "Board READY.\n"); + dev_info(&h->pdev->dev, + "Waiting for stale completions to drain.\n"); + h->access.set_intr_mask(h, CCISS_INTR_ON); + msleep(10000); + h->access.set_intr_mask(h, CCISS_INTR_OFF); + + rc = controller_reset_failed(h->cfgtable); + if (rc) + dev_info(&h->pdev->dev, + "Soft reset appears to have failed.\n"); + + /* since the controller's reset, we have to go back and re-init + * everything. Easiest to just forget what we've done and do it + * all over again. + */ + cciss_undo_allocations_after_kdump_soft_reset(h); + try_soft_reset = 0; + if (rc) + /* don't go to clean4, we already unallocated */ + return -ENODEV; + + goto reinit_after_soft_reset; + } + cciss_scsi_setup(h); /* Turn the interrupts on so we can service requests */ @@ -4775,21 +5130,9 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, return 1; clean4: - kfree(h->cmd_pool_bits); - /* Free up sg elements */ - for (k-- ; k >= 0; k--) - kfree(h->scatter_list[k]); - kfree(h->scatter_list); + cciss_free_cmd_pool(h); + cciss_free_scatterlists(h); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); - if (h->cmd_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(CommandList_struct), - h->cmd_pool, h->cmd_pool_dhandle); - if (h->errinfo_pool) - pci_free_consistent(h->pdev, - h->nr_cmds * sizeof(ErrorInfo_struct), - h->errinfo_pool, - h->errinfo_pool_dhandle); free_irq(h->intr[PERF_MODE_INT], h); clean2: unregister_blkdev(h->major, h->devname); @@ -4887,16 +5230,16 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) iounmap(h->cfgtable); iounmap(h->vaddr); - pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), - h->cmd_pool, h->cmd_pool_dhandle); - pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct), - h->errinfo_pool, h->errinfo_pool_dhandle); - kfree(h->cmd_pool_bits); + cciss_free_cmd_pool(h); /* Free up sg elements */ for (j = 0; j < h->nr_cmds; j++) kfree(h->scatter_list[j]); kfree(h->scatter_list); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); + kfree(h->blockFetchTable); + if (h->reply_pool) + pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), + h->reply_pool, h->reply_pool_dhandle); /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 554bbd907d1..16b4d58d84d 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h @@ -200,7 +200,7 @@ struct ctlr_info * the above. */ #define CCISS_BOARD_READY_WAIT_SECS (120) -#define CCISS_BOARD_NOT_READY_WAIT_SECS (10) +#define CCISS_BOARD_NOT_READY_WAIT_SECS (100) #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) #define CCISS_BOARD_READY_ITERATIONS \ ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ @@ -209,8 +209,9 @@ struct ctlr_info ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \ CCISS_BOARD_READY_POLL_INTERVAL_MSECS) #define CCISS_POST_RESET_PAUSE_MSECS (3000) -#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) +#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (4000) #define CCISS_POST_RESET_NOOP_RETRIES (12) +#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000) /* Send the command to the hardware @@ -239,11 +240,13 @@ static void SA5_intr_mask(ctlr_info_t *h, unsigned long val) { /* Turn interrupts on */ h->interrupts_enabled = 1; writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } else /* Turn them off */ { h->interrupts_enabled = 0; writel( SA5_INTR_OFF, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } } /* @@ -257,11 +260,13 @@ static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val) { /* Turn interrupts on */ h->interrupts_enabled = 1; writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } else /* Turn them off */ { h->interrupts_enabled = 0; writel( SA5B_INTR_OFF, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } } @@ -271,10 +276,12 @@ static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val) if (val) { /* turn on interrupts */ h->interrupts_enabled = 1; writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } else { h->interrupts_enabled = 0; writel(SA5_PERF_INTR_OFF, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); + (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); } } diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index cd441bef031..d9be6b4d49a 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h @@ -53,6 +53,7 @@ #define CFGTBL_ChangeReq 0x00000001l #define CFGTBL_AccCmds 0x00000001l #define DOORBELL_CTLR_RESET 0x00000004l +#define DOORBELL_CTLR_RESET2 0x00000020l #define CFGTBL_Trans_Simple 0x00000002l #define CFGTBL_Trans_Performant 0x00000004l @@ -142,6 +143,14 @@ typedef struct _ReadCapdata_struct_16 #define BMIC_CACHE_FLUSH 0xc2 #define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */ +#define CCISS_ABORT_MSG 0x00 +#define CCISS_RESET_MSG 0x01 +#define CCISS_RESET_TYPE_CONTROLLER 0x00 +#define CCISS_RESET_TYPE_BUS 0x01 +#define CCISS_RESET_TYPE_TARGET 0x03 +#define CCISS_RESET_TYPE_LUN 0x04 +#define CCISS_NOOP_MSG 0x03 + /* Command List Structure */ #define CTLR_LUNID "\0\0\0\0\0\0\0\0" @@ -235,6 +244,8 @@ typedef struct _CfgTable_struct { u8 reserved[0x78 - 0x58]; u32 misc_fw_support; /* offset 0x78 */ #define MISC_FW_DOORBELL_RESET (0x02) +#define MISC_FW_DOORBELL_RESET2 (0x10) + u8 driver_version[32]; } CfgTable_struct; struct TransTable_struct { diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index df793803f5a..696100241a6 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = { .proc_name = "cciss", .proc_info = cciss_scsi_proc_info, .queuecommand = cciss_scsi_queue_command, - .can_queue = SCSI_CCISS_CAN_QUEUE, .this_id = 7, .cmd_per_lun = 1, .use_clustering = DISABLE_CLUSTERING, @@ -108,16 +107,13 @@ struct cciss_scsi_cmd_stack_elem_t { #pragma pack() -#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \ - CCISS_MAX_SCSI_DEVS_PER_HBA + 2) - // plus two for init time usage - #pragma pack(1) struct cciss_scsi_cmd_stack_t { struct cciss_scsi_cmd_stack_elem_t *pool; - struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE]; + struct cciss_scsi_cmd_stack_elem_t **elem; dma_addr_t cmd_pool_handle; int top; + int nelems; }; #pragma pack() @@ -191,7 +187,7 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c) sa = h->scsi_ctlr; stk = &sa->cmd_stack; stk->top++; - if (stk->top >= CMD_STACK_SIZE) { + if (stk->top >= stk->nelems) { dev_err(&h->pdev->dev, "scsi_cmd_free called too many times.\n"); BUG(); @@ -206,13 +202,14 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) struct cciss_scsi_cmd_stack_t *stk; size_t size; + stk = &sa->cmd_stack; + stk->nelems = cciss_tape_cmds + 2; sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, - h->chainsize, CMD_STACK_SIZE); + h->chainsize, stk->nelems); if (!sa->cmd_sg_list && h->chainsize > 0) return -ENOMEM; - stk = &sa->cmd_stack; - size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; + size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems; /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); @@ -221,18 +218,23 @@ scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle); if (stk->pool == NULL) { - cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); + cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems); sa->cmd_sg_list = NULL; return -ENOMEM; } - - for (i=0; i<CMD_STACK_SIZE; i++) { + stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL); + if (!stk->elem) { + pci_free_consistent(h->pdev, size, stk->pool, + stk->cmd_pool_handle); + return -1; + } + for (i = 0; i < stk->nelems; i++) { stk->elem[i] = &stk->pool[i]; stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); stk->elem[i]->cmdindex = i; } - stk->top = CMD_STACK_SIZE-1; + stk->top = stk->nelems-1; return 0; } @@ -245,16 +247,18 @@ scsi_cmd_stack_free(ctlr_info_t *h) sa = h->scsi_ctlr; stk = &sa->cmd_stack; - if (stk->top != CMD_STACK_SIZE-1) { + if (stk->top != stk->nelems-1) { dev_warn(&h->pdev->dev, "bug: %d scsi commands are still outstanding.\n", - CMD_STACK_SIZE - stk->top); + stk->nelems - stk->top); } - size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; + size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems; pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); stk->pool = NULL; - cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); + cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems); + kfree(stk->elem); + stk->elem = NULL; } #if 0 @@ -859,6 +863,7 @@ cciss_scsi_detect(ctlr_info_t *h) sh->io_port = 0; // good enough? FIXME, sh->n_io_port = 0; // I don't think we use these two... sh->this_id = SELF_SCSI_ID; + sh->can_queue = cciss_tape_cmds; sh->sg_tablesize = h->maxsgentries; sh->max_cmd_len = MAX_COMMAND_SIZE; diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h index 6d5822fe851..e71d986727c 100644 --- a/drivers/block/cciss_scsi.h +++ b/drivers/block/cciss_scsi.h @@ -36,13 +36,9 @@ addressible natively, and may in fact turn out to be not scsi at all. */ -#define SCSI_CCISS_CAN_QUEUE 2 /* -Note, cmd_per_lun could give us some trouble, so I'm setting it very low. -Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively. - If the upper scsi layer tries to track how many commands we have outstanding, it will be operating under the misapprehension that it is the only one sending us requests. We also have the block interface, diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index c6828b68d77..09ef9a878ef 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -28,7 +28,7 @@ #include "drbd_int.h" #include "drbd_wrappers.h" -/* We maintain a trivial check sum in our on disk activity log. +/* We maintain a trivial checksum in our on disk activity log. * With that we can ensure correct operation even when the storage * device might do a partial (last) sector write while losing power. */ diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index 76210ba401a..f440a02dfdb 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -74,7 +74,7 @@ * as we are "attached" to a local disk, which at 32 GiB for 1PiB storage * seems excessive. * - * We plan to reduce the amount of in-core bitmap pages by pageing them in + * We plan to reduce the amount of in-core bitmap pages by paging them in * and out against their on-disk location as necessary, but need to make * sure we don't cause too much meta data IO, and must not deadlock in * tight memory situations. This needs some more work. @@ -200,7 +200,7 @@ void drbd_bm_unlock(struct drbd_conf *mdev) * we if bits have been cleared since last IO. */ #define BM_PAGE_LAZY_WRITEOUT 28 -/* store_page_idx uses non-atomic assingment. It is only used directly after +/* store_page_idx uses non-atomic assignment. It is only used directly after * allocating the page. All other bm_set_page_* and bm_clear_page_* need to * use atomic bit manipulation, as set_out_of_sync (and therefore bitmap * changes) may happen from various contexts, and wait_on_bit/wake_up_bit @@ -318,7 +318,7 @@ static void bm_unmap(unsigned long *p_addr) /* word offset from start of bitmap to word number _in_page_ * modulo longs per page #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) - hm, well, Philipp thinks gcc might not optimze the % into & (... - 1) + hm, well, Philipp thinks gcc might not optimize the % into & (... - 1) so do it explicitly: */ #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index d871b14ed5a..ef2ceed3be4 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -700,7 +700,7 @@ struct drbd_request { * see drbd_endio_pri(). */ struct bio *private_bio; - struct hlist_node colision; + struct hlist_node collision; sector_t sector; unsigned int size; unsigned int epoch; /* barrier_nr */ @@ -766,7 +766,7 @@ struct digest_info { struct drbd_epoch_entry { struct drbd_work w; - struct hlist_node colision; + struct hlist_node collision; struct drbd_epoch *epoch; /* for writes */ struct drbd_conf *mdev; struct page *pages; @@ -1129,6 +1129,8 @@ struct drbd_conf { int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_planed; /* resync sectors already planned */ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ + int peer_max_bio_size; + int local_max_bio_size; }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) @@ -1218,8 +1220,6 @@ extern void drbd_free_resources(struct drbd_conf *mdev); extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, unsigned int set_size); extern void tl_clear(struct drbd_conf *mdev); -enum drbd_req_event; -extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what); extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); extern void drbd_free_sock(struct drbd_conf *mdev); extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, @@ -1434,6 +1434,7 @@ struct bm_extent { * hash table. */ #define HT_SHIFT 8 #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) +#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ #define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ @@ -1518,9 +1519,9 @@ extern void drbd_resume_io(struct drbd_conf *mdev); extern char *ppsize(char *buf, unsigned long long size); extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; -extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); +extern enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); extern void resync_after_online_grow(struct drbd_conf *); -extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); +extern void drbd_reconsider_max_bio_size(struct drbd_conf *mdev); extern enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force); @@ -1828,6 +1829,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, if (!forcedetach) { if (__ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Local IO failed in %s.\n", where); + if (mdev->state.disk > D_INCONSISTENT) + _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL); break; } /* NOTE fall through to detach case if forcedetach set */ @@ -2153,6 +2156,10 @@ static inline int get_net_conf(struct drbd_conf *mdev) static inline void put_ldev(struct drbd_conf *mdev) { int i = atomic_dec_return(&mdev->local_cnt); + + /* This may be called from some endio handler, + * so we must not sleep here. */ + __release(local); D_ASSERT(i >= 0); if (i == 0) { diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 5b525c179f3..0358e55356c 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -745,6 +745,9 @@ is_valid_state(struct drbd_conf *mdev, union drbd_state ns) mdev->agreed_pro_version < 88) rv = SS_NOT_SUPPORTED; + else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN) + rv = SS_CONNECTED_OUTDATES; + return rv; } @@ -1565,6 +1568,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, put_ldev(mdev); } + /* Notify peer that I had a local IO error, and did not detached.. */ + if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT) + drbd_send_state(mdev); + /* Disks got bigger while they were detached */ if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING && test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) { @@ -2064,7 +2071,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl { struct p_sizes p; sector_t d_size, u_size; - int q_order_type; + int q_order_type, max_bio_size; int ok; if (get_ldev_if_state(mdev, D_NEGOTIATING)) { @@ -2072,17 +2079,20 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl d_size = drbd_get_max_capacity(mdev->ldev); u_size = mdev->ldev->dc.disk_size; q_order_type = drbd_queue_order_type(mdev); + max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; + max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); put_ldev(mdev); } else { d_size = 0; u_size = 0; q_order_type = QUEUE_ORDERED_NONE; + max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ } p.d_size = cpu_to_be64(d_size); p.u_size = cpu_to_be64(u_size); p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev)); - p.max_bio_size = cpu_to_be32(queue_max_hw_sectors(mdev->rq_queue) << 9); + p.max_bio_size = cpu_to_be32(max_bio_size); p.queue_order_type = cpu_to_be16(q_order_type); p.dds_flags = cpu_to_be16(flags); @@ -2722,7 +2732,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req) /* double check digest, sometimes buffers have been modified in flight. */ if (dgs > 0 && dgs <= 64) { - /* 64 byte, 512 bit, is the larges digest size + /* 64 byte, 512 bit, is the largest digest size * currently supported in kernel crypto. */ unsigned char digest[64]; drbd_csum_bio(mdev, mdev->integrity_w_tfm, req->master_bio, digest); @@ -3041,6 +3051,8 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) mdev->agreed_pro_version = PRO_VERSION_MAX; mdev->write_ordering = WO_bdev_flush; mdev->resync_wenr = LC_FREE; + mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; + mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; } void drbd_mdev_cleanup(struct drbd_conf *mdev) @@ -3275,7 +3287,7 @@ static void drbd_delete_device(unsigned int minor) drbd_release_ee_lists(mdev); - /* should be free'd on disconnect? */ + /* should be freed on disconnect? */ kfree(mdev->ee_hash); /* mdev->ee_hash_s = 0; @@ -3415,7 +3427,9 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request); - blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE >> 9); + /* Setting the max_hw_sectors to an odd value of 8kibyte here + This triggers a max_bio_size message upon first attach or connect */ + blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_merge_bvec(q, drbd_merge_bvec); q->queue_lock = &mdev->req_lock; @@ -3627,7 +3641,8 @@ struct meta_data_on_disk { /* `-- act_log->nr_elements <-- sync_conf.al_extents */ u32 bm_offset; /* offset to the bitmap, from here */ u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */ - u32 reserved_u32[4]; + u32 la_peer_max_bio_size; /* last peer max_bio_size */ + u32 reserved_u32[3]; } __packed; @@ -3668,6 +3683,7 @@ void drbd_md_sync(struct drbd_conf *mdev) buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid); buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset); + buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size); D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset); sector = mdev->ldev->md.md_offset; @@ -3751,6 +3767,15 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents); bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid); + spin_lock_irq(&mdev->req_lock); + if (mdev->state.conn < C_CONNECTED) { + int peer; + peer = be32_to_cpu(buffer->la_peer_max_bio_size); + peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); + mdev->peer_max_bio_size = peer; + } + spin_unlock_irq(&mdev->req_lock); + if (mdev->sync_conf.al_extents < 7) mdev->sync_conf.al_extents = 127; diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 03b29f78a37..515bcd948a4 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -272,9 +272,28 @@ static int _try_outdate_peer_async(void *data) { struct drbd_conf *mdev = (struct drbd_conf *)data; enum drbd_disk_state nps; + union drbd_state ns; nps = drbd_try_outdate_peer(mdev); - drbd_request_state(mdev, NS(pdsk, nps)); + + /* Not using + drbd_request_state(mdev, NS(pdsk, nps)); + here, because we might were able to re-establish the connection + in the meantime. This can only partially be solved in the state's + engine is_valid_state() and is_valid_state_transition() + functions. + + nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN. + pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid, + therefore we have to have the pre state change check here. + */ + spin_lock_irq(&mdev->req_lock); + ns = mdev->state; + if (ns.conn < C_WF_REPORT_PARAMS) { + ns.pdsk = nps; + _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); + } + spin_unlock_irq(&mdev->req_lock); return 0; } @@ -577,7 +596,7 @@ void drbd_resume_io(struct drbd_conf *mdev) * Returns 0 on success, negative return values indicate errors. * You should call drbd_md_sync() after calling this function. */ -enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) +enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local) { sector_t prev_first_sect, prev_size; /* previous meta location */ sector_t la_size; @@ -773,30 +792,78 @@ static int drbd_check_al_size(struct drbd_conf *mdev) return 0; } -void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) __must_hold(local) +static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) { struct request_queue * const q = mdev->rq_queue; - struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; - int max_segments = mdev->ldev->dc.max_bio_bvecs; - int max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); + int max_hw_sectors = max_bio_size >> 9; + int max_segments = 0; + + if (get_ldev_if_state(mdev, D_ATTACHING)) { + struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; + + max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9); + max_segments = mdev->ldev->dc.max_bio_bvecs; + put_ldev(mdev); + } blk_queue_logical_block_size(q, 512); blk_queue_max_hw_sectors(q, max_hw_sectors); /* This is the workaround for "bio would need to, but cannot, be split" */ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); - blk_queue_stack_limits(q, b); - dev_info(DEV, "max BIO size = %u\n", queue_max_hw_sectors(q) << 9); + if (get_ldev_if_state(mdev, D_ATTACHING)) { + struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; + + blk_queue_stack_limits(q, b); - if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { - dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", - q->backing_dev_info.ra_pages, - b->backing_dev_info.ra_pages); - q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; + if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { + dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", + q->backing_dev_info.ra_pages, + b->backing_dev_info.ra_pages); + q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; + } + put_ldev(mdev); } } +void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) +{ + int now, new, local, peer; + + now = queue_max_hw_sectors(mdev->rq_queue) << 9; + local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ + peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */ + + if (get_ldev_if_state(mdev, D_ATTACHING)) { + local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; + mdev->local_max_bio_size = local; + put_ldev(mdev); + } + + /* We may ignore peer limits if the peer is modern enough. + Because new from 8.3.8 onwards the peer can use multiple + BIOs for a single peer_request */ + if (mdev->state.conn >= C_CONNECTED) { + if (mdev->agreed_pro_version < 94) + peer = mdev->peer_max_bio_size; + else if (mdev->agreed_pro_version == 94) + peer = DRBD_MAX_SIZE_H80_PACKET; + else /* drbd 8.3.8 onwards */ + peer = DRBD_MAX_BIO_SIZE; + } + + new = min_t(int, local, peer); + + if (mdev->state.role == R_PRIMARY && new < now) + dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); + + if (new != now) + dev_info(DEV, "max BIO size = %u\n", new); + + drbd_setup_queue_param(mdev, new); +} + /* serialize deconfig (worker exiting, doing cleanup) * and reconfig (drbdsetup disk, drbdsetup net) * @@ -865,7 +932,6 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp struct block_device *bdev; struct lru_cache *resync_lru = NULL; union drbd_state ns, os; - unsigned int max_bio_size; enum drbd_state_rv rv; int cp_discovered = 0; int logical_block_size; @@ -1117,20 +1183,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp mdev->read_cnt = 0; mdev->writ_cnt = 0; - max_bio_size = DRBD_MAX_BIO_SIZE; - if (mdev->state.conn == C_CONNECTED) { - /* We are Primary, Connected, and now attach a new local - * backing store. We must not increase the user visible maximum - * bio size on this device to something the peer may not be - * able to handle. */ - if (mdev->agreed_pro_version < 94) - max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9; - else if (mdev->agreed_pro_version == 94) - max_bio_size = DRBD_MAX_SIZE_H80_PACKET; - /* else: drbd 8.3.9 and later, stay with default */ - } - - drbd_setup_queue_param(mdev, max_bio_size); + drbd_reconsider_max_bio_size(mdev); /* If I am currently not R_PRIMARY, * but meta data primary indicator is set, @@ -1152,7 +1205,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) set_bit(USE_DEGR_WFC_T, &mdev->flags); - dd = drbd_determin_dev_size(mdev, 0); + dd = drbd_determine_dev_size(mdev, 0); if (dd == dev_size_error) { retcode = ERR_NOMEM_BITMAP; goto force_diskless_dec; @@ -1281,11 +1334,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, struct drbd_nl_cfg_reply *reply) { + enum drbd_ret_code retcode; + int ret; drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */ - reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); - if (mdev->state.disk == D_DISKLESS) - wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + retcode = drbd_request_state(mdev, NS(disk, D_FAILED)); + /* D_FAILED will transition to DISKLESS. */ + ret = wait_event_interruptible(mdev->misc_wait, + mdev->state.disk != D_FAILED); drbd_resume_io(mdev); + if ((int)retcode == (int)SS_IS_DISKLESS) + retcode = SS_NOTHING_TO_DO; + if (ret) + retcode = ERR_INTR; + reply->ret_code = retcode; return 0; } @@ -1658,7 +1719,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0); - dd = drbd_determin_dev_size(mdev, ddsf); + dd = drbd_determine_dev_size(mdev, ddsf); drbd_md_sync(mdev); put_ldev(mdev); if (dd == dev_size_error) { diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index fd26666c0b0..25d32c5aa50 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -333,7 +333,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, if (!page) goto fail; - INIT_HLIST_NODE(&e->colision); + INIT_HLIST_NODE(&e->collision); e->epoch = NULL; e->mdev = mdev; e->pages = page; @@ -356,7 +356,7 @@ void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int i kfree(e->digest); drbd_pp_free(mdev, e->pages, is_net); D_ASSERT(atomic_read(&e->pending_bios) == 0); - D_ASSERT(hlist_unhashed(&e->colision)); + D_ASSERT(hlist_unhashed(&e->collision)); mempool_free(e, drbd_ee_mempool); } @@ -787,7 +787,7 @@ static int drbd_connect(struct drbd_conf *mdev) } if (sock && msock) { - schedule_timeout_interruptible(HZ / 10); + schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10); ok = drbd_socket_okay(mdev, &sock); ok = drbd_socket_okay(mdev, &msock) && ok; if (ok) @@ -899,11 +899,6 @@ retry: drbd_thread_start(&mdev->asender); - if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) { - drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET); - put_ldev(mdev); - } - if (drbd_send_protocol(mdev) == -1) return -1; drbd_send_sync_param(mdev, &mdev->sync_conf); @@ -1418,7 +1413,7 @@ static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int u sector_t sector = e->sector; int ok; - D_ASSERT(hlist_unhashed(&e->colision)); + D_ASSERT(hlist_unhashed(&e->collision)); if (likely((e->flags & EE_WAS_ERROR) == 0)) { drbd_set_in_sync(mdev, sector, e->size); @@ -1487,7 +1482,7 @@ static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsi return false; } - /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid + /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid * special casing it there for the various failure cases. * still no race with drbd_fail_pending_reads */ ok = recv_dless_read(mdev, req, sector, data_size); @@ -1558,11 +1553,11 @@ static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ if (mdev->net_conf->two_primaries) { spin_lock_irq(&mdev->req_lock); - D_ASSERT(!hlist_unhashed(&e->colision)); - hlist_del_init(&e->colision); + D_ASSERT(!hlist_unhashed(&e->collision)); + hlist_del_init(&e->collision); spin_unlock_irq(&mdev->req_lock); } else { - D_ASSERT(hlist_unhashed(&e->colision)); + D_ASSERT(hlist_unhashed(&e->collision)); } drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); @@ -1579,8 +1574,8 @@ static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int u ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); spin_lock_irq(&mdev->req_lock); - D_ASSERT(!hlist_unhashed(&e->colision)); - hlist_del_init(&e->colision); + D_ASSERT(!hlist_unhashed(&e->collision)); + hlist_del_init(&e->collision); spin_unlock_irq(&mdev->req_lock); dec_unacked(mdev); @@ -1755,7 +1750,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned spin_lock_irq(&mdev->req_lock); - hlist_add_head(&e->colision, ee_hash_slot(mdev, sector)); + hlist_add_head(&e->collision, ee_hash_slot(mdev, sector)); #define OVERLAPS overlaps(i->sector, i->size, sector, size) slot = tl_hash_slot(mdev, sector); @@ -1765,7 +1760,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int have_conflict = 0; prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE); - hlist_for_each_entry(i, n, slot, colision) { + hlist_for_each_entry(i, n, slot, collision) { if (OVERLAPS) { /* only ALERT on first iteration, * we may be woken up early... */ @@ -1804,7 +1799,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned } if (signal_pending(current)) { - hlist_del_init(&e->colision); + hlist_del_init(&e->collision); spin_unlock_irq(&mdev->req_lock); @@ -1862,7 +1857,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned dev_err(DEV, "submit failed, triggering re-connect\n"); spin_lock_irq(&mdev->req_lock); list_del(&e->w.list); - hlist_del_init(&e->colision); + hlist_del_init(&e->collision); spin_unlock_irq(&mdev->req_lock); if (e->flags & EE_CALL_AL_COMPLETE_IO) drbd_al_complete_io(mdev, e->sector); @@ -2916,12 +2911,6 @@ disconnect: return false; } -static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) -{ - /* sorry, we currently have no working implementation - * of distributed TCQ */ -} - /* warn if the arguments differ by more than 12.5% */ static void warn_if_differ_considerably(struct drbd_conf *mdev, const char *s, sector_t a, sector_t b) @@ -2939,7 +2928,6 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned { struct p_sizes *p = &mdev->data.rbuf.sizes; enum determine_dev_size dd = unchanged; - unsigned int max_bio_size; sector_t p_size, p_usize, my_usize; int ldsc = 0; /* local disk size changed */ enum dds_flags ddsf; @@ -2994,7 +2982,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned ddsf = be16_to_cpu(p->dds_flags); if (get_ldev(mdev)) { - dd = drbd_determin_dev_size(mdev, ddsf); + dd = drbd_determine_dev_size(mdev, ddsf); put_ldev(mdev); if (dd == dev_size_error) return false; @@ -3004,23 +2992,15 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned drbd_set_my_capacity(mdev, p_size); } + mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size); + drbd_reconsider_max_bio_size(mdev); + if (get_ldev(mdev)) { if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); ldsc = 1; } - if (mdev->agreed_pro_version < 94) - max_bio_size = be32_to_cpu(p->max_bio_size); - else if (mdev->agreed_pro_version == 94) - max_bio_size = DRBD_MAX_SIZE_H80_PACKET; - else /* drbd 8.3.8 onwards */ - max_bio_size = DRBD_MAX_BIO_SIZE; - - if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9) - drbd_setup_queue_param(mdev, max_bio_size); - - drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); put_ldev(mdev); } @@ -4275,7 +4255,7 @@ static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, struct hlist_node *n; struct drbd_request *req; - hlist_for_each_entry(req, n, slot, colision) { + hlist_for_each_entry(req, n, slot, collision) { if ((unsigned long)req == (unsigned long)id) { if (req->sector != sector) { dev_err(DEV, "_ack_id_to_req: found req %p but it has " @@ -4554,6 +4534,7 @@ int drbd_asender(struct drbd_thread *thi) int received = 0; int expect = sizeof(struct p_header80); int empty; + int ping_timeout_active = 0; sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); @@ -4566,6 +4547,7 @@ int drbd_asender(struct drbd_thread *thi) ERR_IF(!drbd_send_ping(mdev)) goto reconnect; mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*HZ/10; + ping_timeout_active = 1; } /* conditionally cork; @@ -4620,8 +4602,7 @@ int drbd_asender(struct drbd_thread *thi) dev_err(DEV, "meta connection shut down by peer.\n"); goto reconnect; } else if (rv == -EAGAIN) { - if (mdev->meta.socket->sk->sk_rcvtimeo == - mdev->net_conf->ping_timeo*HZ/10) { + if (ping_timeout_active) { dev_err(DEV, "PingAck did not arrive in time.\n"); goto reconnect; } @@ -4660,6 +4641,11 @@ int drbd_asender(struct drbd_thread *thi) if (!cmd->process(mdev, h)) goto reconnect; + /* the idle_timeout (ping-int) + * has been restored in got_PingAck() */ + if (cmd == get_asender_cmd(P_PING_ACK)) + ping_timeout_active = 0; + buf = h; received = 0; expect = sizeof(struct p_header80); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 5c0c8be1bb0..3424d675b76 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -163,7 +163,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, * they must have been failed on the spot */ #define OVERLAPS overlaps(sector, size, i->sector, i->size) slot = tl_hash_slot(mdev, sector); - hlist_for_each_entry(i, n, slot, colision) { + hlist_for_each_entry(i, n, slot, collision) { if (OVERLAPS) { dev_alert(DEV, "LOGIC BUG: completed: %p %llus +%u; " "other: %p %llus +%u\n", @@ -187,7 +187,7 @@ static void _about_to_complete_local_write(struct drbd_conf *mdev, #undef OVERLAPS #define OVERLAPS overlaps(sector, size, e->sector, e->size) slot = ee_hash_slot(mdev, req->sector); - hlist_for_each_entry(e, n, slot, colision) { + hlist_for_each_entry(e, n, slot, collision) { if (OVERLAPS) { wake_up(&mdev->misc_wait); break; @@ -260,8 +260,8 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) /* remove the request from the conflict detection * respective block_id verification hash */ - if (!hlist_unhashed(&req->colision)) - hlist_del(&req->colision); + if (!hlist_unhashed(&req->collision)) + hlist_del(&req->collision); else D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0); @@ -329,7 +329,7 @@ static int _req_conflicts(struct drbd_request *req) struct hlist_node *n; struct hlist_head *slot; - D_ASSERT(hlist_unhashed(&req->colision)); + D_ASSERT(hlist_unhashed(&req->collision)); if (!get_net_conf(mdev)) return 0; @@ -341,7 +341,7 @@ static int _req_conflicts(struct drbd_request *req) #define OVERLAPS overlaps(i->sector, i->size, sector, size) slot = tl_hash_slot(mdev, sector); - hlist_for_each_entry(i, n, slot, colision) { + hlist_for_each_entry(i, n, slot, collision) { if (OVERLAPS) { dev_alert(DEV, "%s[%u] Concurrent local write detected! " "[DISCARD L] new: %llus +%u; " @@ -359,7 +359,7 @@ static int _req_conflicts(struct drbd_request *req) #undef OVERLAPS #define OVERLAPS overlaps(e->sector, e->size, sector, size) slot = ee_hash_slot(mdev, sector); - hlist_for_each_entry(e, n, slot, colision) { + hlist_for_each_entry(e, n, slot, collision) { if (OVERLAPS) { dev_alert(DEV, "%s[%u] Concurrent remote write detected!" " [DISCARD L] new: %llus +%u; " @@ -491,7 +491,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, /* so we can verify the handle in the answer packet * corresponding hlist_del is in _req_may_be_done() */ - hlist_add_head(&req->colision, ar_hash_slot(mdev, req->sector)); + hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector)); set_bit(UNPLUG_REMOTE, &mdev->flags); @@ -507,7 +507,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, /* assert something? */ /* from drbd_make_request_common only */ - hlist_add_head(&req->colision, tl_hash_slot(mdev, req->sector)); + hlist_add_head(&req->collision, tl_hash_slot(mdev, req->sector)); /* corresponding hlist_del is in _req_may_be_done() */ /* NOTE @@ -1033,7 +1033,7 @@ fail_conflicting: err = 0; fail_free_complete: - if (rw == WRITE && local) + if (req->rq_state & RQ_IN_ACT_LOG) drbd_al_complete_io(mdev, sector); fail_and_free_req: if (local) { diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index 32e2c3e6a81..68a234a5fdc 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -256,7 +256,7 @@ static inline struct drbd_request *_ar_id_to_req(struct drbd_conf *mdev, struct hlist_node *n; struct drbd_request *req; - hlist_for_each_entry(req, n, slot, colision) { + hlist_for_each_entry(req, n, slot, collision) { if ((unsigned long)req == (unsigned long)id) { D_ASSERT(req->sector == sector); return req; @@ -291,7 +291,7 @@ static inline struct drbd_request *drbd_req_new(struct drbd_conf *mdev, req->epoch = 0; req->sector = bio_src->bi_sector; req->size = bio_src->bi_size; - INIT_HLIST_NODE(&req->colision); + INIT_HLIST_NODE(&req->collision); INIT_LIST_HEAD(&req->tl_requests); INIT_LIST_HEAD(&req->w.list); } @@ -323,6 +323,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, extern void complete_master_bio(struct drbd_conf *mdev, struct bio_and_error *m); extern void request_timer_fn(unsigned long data); +extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what); /* use this if you don't want to deal with calling complete_master_bio() * outside the spinlock, e.g. when walking some list on cleanup. */ diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index f7e6c92f8d0..4d76b06b6b2 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -126,7 +126,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo list_del(&e->w.list); /* has been on active_ee or sync_ee */ list_add_tail(&e->w.list, &mdev->done_ee); - /* No hlist_del_init(&e->colision) here, we did not send the Ack yet, + /* No hlist_del_init(&e->collision) here, we did not send the Ack yet, * neither did we wake possibly waiting conflicting requests. * done from "drbd_process_done_ee" within the appropriate w.cb * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */ @@ -297,42 +297,48 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio * crypto_hash_final(&desc, digest); } -static int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) +/* TODO merge common code with w_e_end_ov_req */ +int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); int digest_size; void *digest; - int ok; + int ok = 1; D_ASSERT(e->block_id == DRBD_MAGIC + 0xbeef); - if (unlikely(cancel)) { - drbd_free_ee(mdev, e); - return 1; - } + if (unlikely(cancel)) + goto out; - if (likely((e->flags & EE_WAS_ERROR) == 0)) { - digest_size = crypto_hash_digestsize(mdev->csums_tfm); - digest = kmalloc(digest_size, GFP_NOIO); - if (digest) { - drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); + if (likely((e->flags & EE_WAS_ERROR) != 0)) + goto out; - inc_rs_pending(mdev); - ok = drbd_send_drequest_csum(mdev, - e->sector, - e->size, - digest, - digest_size, - P_CSUM_RS_REQUEST); - kfree(digest); - } else { - dev_err(DEV, "kmalloc() of digest failed.\n"); - ok = 0; - } - } else - ok = 1; + digest_size = crypto_hash_digestsize(mdev->csums_tfm); + digest = kmalloc(digest_size, GFP_NOIO); + if (digest) { + sector_t sector = e->sector; + unsigned int size = e->size; + drbd_csum_ee(mdev, mdev->csums_tfm, e, digest); + /* Free e and pages before send. + * In case we block on congestion, we could otherwise run into + * some distributed deadlock, if the other side blocks on + * congestion as well, because our receiver blocks in + * drbd_pp_alloc due to pp_in_use > max_buffers. */ + drbd_free_ee(mdev, e); + e = NULL; + inc_rs_pending(mdev); + ok = drbd_send_drequest_csum(mdev, sector, size, + digest, digest_size, + P_CSUM_RS_REQUEST); + kfree(digest); + } else { + dev_err(DEV, "kmalloc() of digest failed.\n"); + ok = 0; + } - drbd_free_ee(mdev, e); +out: + if (e) + drbd_free_ee(mdev, e); if (unlikely(!ok)) dev_err(DEV, "drbd_send_drequest(..., csum) failed\n"); @@ -834,7 +840,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) const int ratio = (t == 0) ? 0 : (t < 100000) ? ((s*100)/t) : (s/(t/100)); - dev_info(DEV, "%u %% had equal check sums, eliminated: %luK; " + dev_info(DEV, "%u %% had equal checksums, eliminated: %luK; " "transferred %luK total %luK\n", ratio, Bit2KB(mdev->rs_same_csum), @@ -1071,9 +1077,12 @@ int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) return ok; } +/* TODO merge common code with w_e_send_csum */ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); + sector_t sector = e->sector; + unsigned int size = e->size; int digest_size; void *digest; int ok = 1; @@ -1093,17 +1102,25 @@ int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel) else memset(digest, 0, digest_size); + /* Free e and pages before send. + * In case we block on congestion, we could otherwise run into + * some distributed deadlock, if the other side blocks on + * congestion as well, because our receiver blocks in + * drbd_pp_alloc due to pp_in_use > max_buffers. */ + drbd_free_ee(mdev, e); + e = NULL; inc_rs_pending(mdev); - ok = drbd_send_drequest_csum(mdev, e->sector, e->size, - digest, digest_size, P_OV_REPLY); + ok = drbd_send_drequest_csum(mdev, sector, size, + digest, digest_size, + P_OV_REPLY); if (!ok) dec_rs_pending(mdev); kfree(digest); out: - drbd_free_ee(mdev, e); + if (e) + drbd_free_ee(mdev, e); dec_unacked(mdev); - return ok; } @@ -1122,8 +1139,10 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) { struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w); struct digest_info *di; - int digest_size; void *digest; + sector_t sector = e->sector; + unsigned int size = e->size; + int digest_size; int ok, eq = 0; if (unlikely(cancel)) { @@ -1153,16 +1172,21 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel) } } - dec_unacked(mdev); + /* Free e and pages before send. + * In case we block on congestion, we could otherwise run into + * some distributed deadlock, if the other side blocks on + * congestion as well, because our receiver blocks in + * drbd_pp_alloc due to pp_in_use > max_buffers. */ + drbd_free_ee(mdev, e); if (!eq) - drbd_ov_oos_found(mdev, e->sector, e->size); + drbd_ov_oos_found(mdev, sector, size); else ov_oos_print(mdev); - ok = drbd_send_ack_ex(mdev, P_OV_RESULT, e->sector, e->size, + ok = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, eq ? ID_IN_SYNC : ID_OUT_OF_SYNC); - drbd_free_ee(mdev, e); + dec_unacked(mdev); --mdev->ov_left; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index a076a14ca72..c59a672a3de 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1658,7 +1658,7 @@ static struct kobject *loop_probe(dev_t dev, int *part, void *data) struct kobject *kobj; mutex_lock(&loop_devices_mutex); - lo = loop_init_one(dev & MINORMASK); + lo = loop_init_one(MINOR(dev) >> part_shift); kobj = lo ? get_disk(lo->lo_disk) : ERR_PTR(-ENOMEM); mutex_unlock(&loop_devices_mutex); @@ -1691,15 +1691,18 @@ static int __init loop_init(void) if (max_part > 0) part_shift = fls(max_part); + if ((1UL << part_shift) > DISK_MAX_PARTS) + return -EINVAL; + if (max_loop > 1UL << (MINORBITS - part_shift)) return -EINVAL; if (max_loop) { nr = max_loop; - range = max_loop; + range = max_loop << part_shift; } else { nr = 8; - range = 1UL << (MINORBITS - part_shift); + range = 1UL << MINORBITS; } if (register_blkdev(LOOP_MAJOR, "loop")) @@ -1738,7 +1741,7 @@ static void __exit loop_exit(void) unsigned long range; struct loop_device *lo, *next; - range = max_loop ? max_loop : 1UL << (MINORBITS - part_shift); + range = max_loop ? max_loop << part_shift : 1UL << MINORBITS; list_for_each_entry_safe(lo, next, &loop_devices, lo_list) loop_del_one(lo); diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 8690e31d993..a0aabd904a5 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -320,6 +320,8 @@ static void pcd_init_units(void) disk->first_minor = unit; strcpy(disk->disk_name, cd->name); /* umm... */ disk->fops = &pcd_bdops; + disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + disk->events = DISK_EVENT_MEDIA_CHANGE; } } diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 9712fad82bc..1278098624e 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1191,14 +1191,19 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev, static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *dev = (struct rbd_device *)data; + int rc; + if (!dev) return; dout("rbd_watch_cb %s notify_id=%lld opcode=%d\n", dev->obj_md_name, notify_id, (int)opcode); mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - __rbd_update_snaps(dev); + rc = __rbd_update_snaps(dev); mutex_unlock(&ctl_mutex); + if (rc) + pr_warning(DRV_NAME "%d got notification but failed to update" + " snaps: %d\n", dev->major, rc); rbd_req_sync_notify_ack(dev, ver, notify_id, dev->obj_md_name); } @@ -1597,7 +1602,7 @@ static int rbd_header_add_snap(struct rbd_device *dev, int name_len = strlen(snap_name); u64 new_snapid; int ret; - void *data, *data_start, *data_end; + void *data, *p, *e; u64 ver; /* we should create a snapshot only if we're pointing at the head */ @@ -1614,16 +1619,16 @@ static int rbd_header_add_snap(struct rbd_device *dev, if (!data) return -ENOMEM; - data_start = data; - data_end = data + name_len + 16; + p = data; + e = data + name_len + 16; - ceph_encode_string_safe(&data, data_end, snap_name, name_len, bad); - ceph_encode_64_safe(&data, data_end, new_snapid, bad); + ceph_encode_string_safe(&p, e, snap_name, name_len, bad); + ceph_encode_64_safe(&p, e, new_snapid, bad); ret = rbd_req_sync_exec(dev, dev->obj_md_name, "rbd", "snap_add", - data_start, data - data_start, &ver); + data, p - data, &ver); - kfree(data_start); + kfree(data); if (ret < 0) return ret; @@ -1659,6 +1664,9 @@ static int __rbd_update_snaps(struct rbd_device *rbd_dev) if (ret < 0) return ret; + /* resized? */ + set_capacity(rbd_dev->disk, h.image_size / 512ULL); + down_write(&rbd_dev->header.snap_rwsem); snap_seq = rbd_dev->header.snapc->seq; @@ -1716,7 +1724,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) if (!disk) goto out; - sprintf(disk->disk_name, DRV_NAME "%d", rbd_dev->id); + snprintf(disk->disk_name, sizeof(disk->disk_name), DRV_NAME "%d", + rbd_dev->id); disk->major = rbd_dev->major; disk->first_minor = 0; disk->fops = &rbd_bd_ops; diff --git a/drivers/block/xen-blkback/Makefile b/drivers/block/xen-blkback/Makefile new file mode 100644 index 00000000000..e491c1b7687 --- /dev/null +++ b/drivers/block/xen-blkback/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_XEN_BLKDEV_BACKEND) := xen-blkback.o + +xen-blkback-y := blkback.o xenbus.o diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c new file mode 100644 index 00000000000..c73910cc28c --- /dev/null +++ b/drivers/block/xen-blkback/blkback.c @@ -0,0 +1,824 @@ +/****************************************************************************** + * + * Back-end of the driver for virtual block devices. This portion of the + * driver exports a 'unified' block-device interface that can be accessed + * by any operating system that implements a compatible front end. A + * reference front-end implementation can be found in: + * drivers/block/xen-blkfront.c + * + * Copyright (c) 2003-2004, Keir Fraser & Steve Hand + * Copyright (c) 2005, Christopher Clark + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/spinlock.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/freezer.h> + +#include <xen/events.h> +#include <xen/page.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/hypercall.h> +#include "common.h" + +/* + * These are rather arbitrary. They are fairly large because adjacent requests + * pulled from a communication ring are quite likely to end up being part of + * the same scatter/gather request at the disc. + * + * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW ** + * + * This will increase the chances of being able to write whole tracks. + * 64 should be enough to keep us competitive with Linux. + */ +static int xen_blkif_reqs = 64; +module_param_named(reqs, xen_blkif_reqs, int, 0); +MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate"); + +/* Run-time switchable: /sys/module/blkback/parameters/ */ +static unsigned int log_stats; +module_param(log_stats, int, 0644); + +/* + * Each outstanding request that we've passed to the lower device layers has a + * 'pending_req' allocated to it. Each buffer_head that completes decrements + * the pendcnt towards zero. When it hits zero, the specified domain has a + * response queued for it, with the saved 'id' passed back. + */ +struct pending_req { + struct xen_blkif *blkif; + u64 id; + int nr_pages; + atomic_t pendcnt; + unsigned short operation; + int status; + struct list_head free_list; +}; + +#define BLKBACK_INVALID_HANDLE (~0) + +struct xen_blkbk { + struct pending_req *pending_reqs; + /* List of all 'pending_req' available */ + struct list_head pending_free; + /* And its spinlock. */ + spinlock_t pending_free_lock; + wait_queue_head_t pending_free_wq; + /* The list of all pages that are available. */ + struct page **pending_pages; + /* And the grant handles that are available. */ + grant_handle_t *pending_grant_handles; +}; + +static struct xen_blkbk *blkbk; + +/* + * Little helpful macro to figure out the index and virtual address of the + * pending_pages[..]. For each 'pending_req' we have have up to + * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through + * 10 and would index in the pending_pages[..]. + */ +static inline int vaddr_pagenr(struct pending_req *req, int seg) +{ + return (req - blkbk->pending_reqs) * + BLKIF_MAX_SEGMENTS_PER_REQUEST + seg; +} + +#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)] + +static inline unsigned long vaddr(struct pending_req *req, int seg) +{ + unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg)); + return (unsigned long)pfn_to_kaddr(pfn); +} + +#define pending_handle(_req, _seg) \ + (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)]) + + +static int do_block_io_op(struct xen_blkif *blkif); +static int dispatch_rw_block_io(struct xen_blkif *blkif, + struct blkif_request *req, + struct pending_req *pending_req); +static void make_response(struct xen_blkif *blkif, u64 id, + unsigned short op, int st); + +/* + * Retrieve from the 'pending_reqs' a free pending_req structure to be used. + */ +static struct pending_req *alloc_req(void) +{ + struct pending_req *req = NULL; + unsigned long flags; + + spin_lock_irqsave(&blkbk->pending_free_lock, flags); + if (!list_empty(&blkbk->pending_free)) { + req = list_entry(blkbk->pending_free.next, struct pending_req, + free_list); + list_del(&req->free_list); + } + spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); + return req; +} + +/* + * Return the 'pending_req' structure back to the freepool. We also + * wake up the thread if it was waiting for a free page. + */ +static void free_req(struct pending_req *req) +{ + unsigned long flags; + int was_empty; + + spin_lock_irqsave(&blkbk->pending_free_lock, flags); + was_empty = list_empty(&blkbk->pending_free); + list_add(&req->free_list, &blkbk->pending_free); + spin_unlock_irqrestore(&blkbk->pending_free_lock, flags); + if (was_empty) + wake_up(&blkbk->pending_free_wq); +} + +/* + * Routines for managing virtual block devices (vbds). + */ +static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif, + int operation) +{ + struct xen_vbd *vbd = &blkif->vbd; + int rc = -EACCES; + + if ((operation != READ) && vbd->readonly) + goto out; + + if (likely(req->nr_sects)) { + blkif_sector_t end = req->sector_number + req->nr_sects; + + if (unlikely(end < req->sector_number)) + goto out; + if (unlikely(end > vbd_sz(vbd))) + goto out; + } + + req->dev = vbd->pdevice; + req->bdev = vbd->bdev; + rc = 0; + + out: + return rc; +} + +static void xen_vbd_resize(struct xen_blkif *blkif) +{ + struct xen_vbd *vbd = &blkif->vbd; + struct xenbus_transaction xbt; + int err; + struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be); + unsigned long long new_size = vbd_sz(vbd); + + pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n", + blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice)); + pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size); + vbd->size = new_size; +again: + err = xenbus_transaction_start(&xbt); + if (err) { + pr_warn(DRV_PFX "Error starting transaction"); + return; + } + err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", + (unsigned long long)vbd_sz(vbd)); + if (err) { + pr_warn(DRV_PFX "Error writing new size"); + goto abort; + } + /* + * Write the current state; we will use this to synchronize + * the front-end. If the current state is "connected" the + * front-end will get the new size information online. + */ + err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state); + if (err) { + pr_warn(DRV_PFX "Error writing the state"); + goto abort; + } + + err = xenbus_transaction_end(xbt, 0); + if (err == -EAGAIN) + goto again; + if (err) + pr_warn(DRV_PFX "Error ending transaction"); + return; +abort: + xenbus_transaction_end(xbt, 1); +} + +/* + * Notification from the guest OS. + */ +static void blkif_notify_work(struct xen_blkif *blkif) +{ + blkif->waiting_reqs = 1; + wake_up(&blkif->wq); +} + +irqreturn_t xen_blkif_be_int(int irq, void *dev_id) +{ + blkif_notify_work(dev_id); + return IRQ_HANDLED; +} + +/* + * SCHEDULER FUNCTIONS + */ + +static void print_stats(struct xen_blkif *blkif) +{ + pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d\n", + current->comm, blkif->st_oo_req, + blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req); + blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000); + blkif->st_rd_req = 0; + blkif->st_wr_req = 0; + blkif->st_oo_req = 0; +} + +int xen_blkif_schedule(void *arg) +{ + struct xen_blkif *blkif = arg; + struct xen_vbd *vbd = &blkif->vbd; + + xen_blkif_get(blkif); + + while (!kthread_should_stop()) { + if (try_to_freeze()) + continue; + if (unlikely(vbd->size != vbd_sz(vbd))) + xen_vbd_resize(blkif); + + wait_event_interruptible( + blkif->wq, + blkif->waiting_reqs || kthread_should_stop()); + wait_event_interruptible( + blkbk->pending_free_wq, + !list_empty(&blkbk->pending_free) || + kthread_should_stop()); + + blkif->waiting_reqs = 0; + smp_mb(); /* clear flag *before* checking for work */ + + if (do_block_io_op(blkif)) + blkif->waiting_reqs = 1; + + if (log_stats && time_after(jiffies, blkif->st_print)) + print_stats(blkif); + } + + if (log_stats) + print_stats(blkif); + + blkif->xenblkd = NULL; + xen_blkif_put(blkif); + + return 0; +} + +struct seg_buf { + unsigned long buf; + unsigned int nsec; +}; +/* + * Unmap the grant references, and also remove the M2P over-rides + * used in the 'pending_req'. + */ +static void xen_blkbk_unmap(struct pending_req *req) +{ + struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + unsigned int i, invcount = 0; + grant_handle_t handle; + int ret; + + for (i = 0; i < req->nr_pages; i++) { + handle = pending_handle(req, i); + if (handle == BLKBACK_INVALID_HANDLE) + continue; + gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), + GNTMAP_host_map, handle); + pending_handle(req, i) = BLKBACK_INVALID_HANDLE; + invcount++; + } + + ret = HYPERVISOR_grant_table_op( + GNTTABOP_unmap_grant_ref, unmap, invcount); + BUG_ON(ret); + /* + * Note, we use invcount, so nr->pages, so we can't index + * using vaddr(req, i). + */ + for (i = 0; i < invcount; i++) { + ret = m2p_remove_override( + virt_to_page(unmap[i].host_addr), false); + if (ret) { + pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n", + (unsigned long)unmap[i].host_addr); + continue; + } + } +} + +static int xen_blkbk_map(struct blkif_request *req, + struct pending_req *pending_req, + struct seg_buf seg[]) +{ + struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + int i; + int nseg = req->nr_segments; + int ret = 0; + + /* + * Fill out preq.nr_sects with proper amount of sectors, and setup + * assign map[..] with the PFN of the page in our domain with the + * corresponding grant reference for each page. + */ + for (i = 0; i < nseg; i++) { + uint32_t flags; + + flags = GNTMAP_host_map; + if (pending_req->operation != BLKIF_OP_READ) + flags |= GNTMAP_readonly; + gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags, + req->u.rw.seg[i].gref, + pending_req->blkif->domid); + } + + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); + BUG_ON(ret); + + /* + * Now swizzle the MFN in our domain with the MFN from the other domain + * so that when we access vaddr(pending_req,i) it has the contents of + * the page from the other domain. + */ + for (i = 0; i < nseg; i++) { + if (unlikely(map[i].status != 0)) { + pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); + map[i].handle = BLKBACK_INVALID_HANDLE; + ret |= 1; + } + + pending_handle(pending_req, i) = map[i].handle; + + if (ret) + continue; + + ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr), + blkbk->pending_page(pending_req, i), false); + if (ret) { + pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n", + (unsigned long)map[i].dev_bus_addr, ret); + /* We could switch over to GNTTABOP_copy */ + continue; + } + + seg[i].buf = map[i].dev_bus_addr | + (req->u.rw.seg[i].first_sect << 9); + } + return ret; +} + +/* + * Completion callback on the bio's. Called as bh->b_end_io() + */ + +static void __end_block_io_op(struct pending_req *pending_req, int error) +{ + /* An error fails the entire request. */ + if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) && + (error == -EOPNOTSUPP)) { + pr_debug(DRV_PFX "flush diskcache op failed, not supported\n"); + xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0); + pending_req->status = BLKIF_RSP_EOPNOTSUPP; + } else if (error) { + pr_debug(DRV_PFX "Buffer not up-to-date at end of operation," + " error=%d\n", error); + pending_req->status = BLKIF_RSP_ERROR; + } + + /* + * If all of the bio's have completed it is time to unmap + * the grant references associated with 'request' and provide + * the proper response on the ring. + */ + if (atomic_dec_and_test(&pending_req->pendcnt)) { + xen_blkbk_unmap(pending_req); + make_response(pending_req->blkif, pending_req->id, + pending_req->operation, pending_req->status); + xen_blkif_put(pending_req->blkif); + free_req(pending_req); + } +} + +/* + * bio callback. + */ +static void end_block_io_op(struct bio *bio, int error) +{ + __end_block_io_op(bio->bi_private, error); + bio_put(bio); +} + + + +/* + * Function to copy the from the ring buffer the 'struct blkif_request' + * (which has the sectors we want, number of them, grant references, etc), + * and transmute it to the block API to hand it over to the proper block disk. + */ +static int do_block_io_op(struct xen_blkif *blkif) +{ + union blkif_back_rings *blk_rings = &blkif->blk_rings; + struct blkif_request req; + struct pending_req *pending_req; + RING_IDX rc, rp; + int more_to_do = 0; + + rc = blk_rings->common.req_cons; + rp = blk_rings->common.sring->req_prod; + rmb(); /* Ensure we see queued requests up to 'rp'. */ + + while (rc != rp) { + + if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) + break; + + if (kthread_should_stop()) { + more_to_do = 1; + break; + } + + pending_req = alloc_req(); + if (NULL == pending_req) { + blkif->st_oo_req++; + more_to_do = 1; + break; + } + + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req)); + break; + case BLKIF_PROTOCOL_X86_32: + blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc)); + break; + case BLKIF_PROTOCOL_X86_64: + blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc)); + break; + default: + BUG(); + } + blk_rings->common.req_cons = ++rc; /* before make_response() */ + + /* Apply all sanity checks to /private copy/ of request. */ + barrier(); + + if (dispatch_rw_block_io(blkif, &req, pending_req)) + break; + + /* Yield point for this unbounded loop. */ + cond_resched(); + } + + return more_to_do; +} + +/* + * Transmutation of the 'struct blkif_request' to a proper 'struct bio' + * and call the 'submit_bio' to pass it to the underlying storage. + */ +static int dispatch_rw_block_io(struct xen_blkif *blkif, + struct blkif_request *req, + struct pending_req *pending_req) +{ + struct phys_req preq; + struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + unsigned int nseg; + struct bio *bio = NULL; + struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST]; + int i, nbio = 0; + int operation; + struct blk_plug plug; + + switch (req->operation) { + case BLKIF_OP_READ: + blkif->st_rd_req++; + operation = READ; + break; + case BLKIF_OP_WRITE: + blkif->st_wr_req++; + operation = WRITE_ODIRECT; + break; + case BLKIF_OP_FLUSH_DISKCACHE: + blkif->st_f_req++; + operation = WRITE_FLUSH; + break; + case BLKIF_OP_WRITE_BARRIER: + default: + operation = 0; /* make gcc happy */ + goto fail_response; + break; + } + + /* Check that the number of segments is sane. */ + nseg = req->nr_segments; + if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || + unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { + pr_debug(DRV_PFX "Bad number of segments in request (%d)\n", + nseg); + /* Haven't submitted any bio's yet. */ + goto fail_response; + } + + preq.dev = req->handle; + preq.sector_number = req->u.rw.sector_number; + preq.nr_sects = 0; + + pending_req->blkif = blkif; + pending_req->id = req->id; + pending_req->operation = req->operation; + pending_req->status = BLKIF_RSP_OKAY; + pending_req->nr_pages = nseg; + + for (i = 0; i < nseg; i++) { + seg[i].nsec = req->u.rw.seg[i].last_sect - + req->u.rw.seg[i].first_sect + 1; + if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) || + (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect)) + goto fail_response; + preq.nr_sects += seg[i].nsec; + + } + + if (xen_vbd_translate(&preq, blkif, operation) != 0) { + pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n", + operation == READ ? "read" : "write", + preq.sector_number, + preq.sector_number + preq.nr_sects, preq.dev); + goto fail_response; + } + + /* + * This check _MUST_ be done after xen_vbd_translate as the preq.bdev + * is set there. + */ + for (i = 0; i < nseg; i++) { + if (((int)preq.sector_number|(int)seg[i].nsec) & + ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) { + pr_debug(DRV_PFX "Misaligned I/O request from domain %d", + blkif->domid); + goto fail_response; + } + } + + /* + * If we have failed at this point, we need to undo the M2P override, + * set gnttab_set_unmap_op on all of the grant references and perform + * the hypercall to unmap the grants - that is all done in + * xen_blkbk_unmap. + */ + if (xen_blkbk_map(req, pending_req, seg)) + goto fail_flush; + + /* This corresponding xen_blkif_put is done in __end_block_io_op */ + xen_blkif_get(blkif); + + for (i = 0; i < nseg; i++) { + while ((bio == NULL) || + (bio_add_page(bio, + blkbk->pending_page(pending_req, i), + seg[i].nsec << 9, + seg[i].buf & ~PAGE_MASK) == 0)) { + + bio = bio_alloc(GFP_KERNEL, nseg-i); + if (unlikely(bio == NULL)) + goto fail_put_bio; + + biolist[nbio++] = bio; + bio->bi_bdev = preq.bdev; + bio->bi_private = pending_req; + bio->bi_end_io = end_block_io_op; + bio->bi_sector = preq.sector_number; + } + + preq.sector_number += seg[i].nsec; + } + + /* This will be hit if the operation was a flush. */ + if (!bio) { + BUG_ON(operation != WRITE_FLUSH); + + bio = bio_alloc(GFP_KERNEL, 0); + if (unlikely(bio == NULL)) + goto fail_put_bio; + + biolist[nbio++] = bio; + bio->bi_bdev = preq.bdev; + bio->bi_private = pending_req; + bio->bi_end_io = end_block_io_op; + } + + /* + * We set it one so that the last submit_bio does not have to call + * atomic_inc. + */ + atomic_set(&pending_req->pendcnt, nbio); + + /* Get a reference count for the disk queue and start sending I/O */ + blk_start_plug(&plug); + + for (i = 0; i < nbio; i++) + submit_bio(operation, biolist[i]); + + /* Let the I/Os go.. */ + blk_finish_plug(&plug); + + if (operation == READ) + blkif->st_rd_sect += preq.nr_sects; + else if (operation == WRITE || operation == WRITE_FLUSH) + blkif->st_wr_sect += preq.nr_sects; + + return 0; + + fail_flush: + xen_blkbk_unmap(pending_req); + fail_response: + /* Haven't submitted any bio's yet. */ + make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); + free_req(pending_req); + msleep(1); /* back off a bit */ + return -EIO; + + fail_put_bio: + for (i = 0; i < nbio; i++) + bio_put(biolist[i]); + __end_block_io_op(pending_req, -EINVAL); + msleep(1); /* back off a bit */ + return -EIO; +} + + + +/* + * Put a response on the ring on how the operation fared. + */ +static void make_response(struct xen_blkif *blkif, u64 id, + unsigned short op, int st) +{ + struct blkif_response resp; + unsigned long flags; + union blkif_back_rings *blk_rings = &blkif->blk_rings; + int more_to_do = 0; + int notify; + + resp.id = id; + resp.operation = op; + resp.status = st; + + spin_lock_irqsave(&blkif->blk_ring_lock, flags); + /* Place on the response ring for the relevant domain. */ + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + case BLKIF_PROTOCOL_X86_32: + memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + case BLKIF_PROTOCOL_X86_64: + memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt), + &resp, sizeof(resp)); + break; + default: + BUG(); + } + blk_rings->common.rsp_prod_pvt++; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify); + if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) { + /* + * Tail check for pending requests. Allows frontend to avoid + * notifications if requests are already in flight (lower + * overheads and promotes batching). + */ + RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do); + + } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) { + more_to_do = 1; + } + + spin_unlock_irqrestore(&blkif->blk_ring_lock, flags); + + if (more_to_do) + blkif_notify_work(blkif); + if (notify) + notify_remote_via_irq(blkif->irq); +} + +static int __init xen_blkif_init(void) +{ + int i, mmap_pages; + int rc = 0; + + if (!xen_pv_domain()) + return -ENODEV; + + blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); + if (!blkbk) { + pr_alert(DRV_PFX "%s: out of memory!\n", __func__); + return -ENOMEM; + } + + mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST; + + blkbk->pending_reqs = kmalloc(sizeof(blkbk->pending_reqs[0]) * + xen_blkif_reqs, GFP_KERNEL); + blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) * + mmap_pages, GFP_KERNEL); + blkbk->pending_pages = kzalloc(sizeof(blkbk->pending_pages[0]) * + mmap_pages, GFP_KERNEL); + + if (!blkbk->pending_reqs || !blkbk->pending_grant_handles || + !blkbk->pending_pages) { + rc = -ENOMEM; + goto out_of_memory; + } + + for (i = 0; i < mmap_pages; i++) { + blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE; + blkbk->pending_pages[i] = alloc_page(GFP_KERNEL); + if (blkbk->pending_pages[i] == NULL) { + rc = -ENOMEM; + goto out_of_memory; + } + } + rc = xen_blkif_interface_init(); + if (rc) + goto failed_init; + + memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs)); + + INIT_LIST_HEAD(&blkbk->pending_free); + spin_lock_init(&blkbk->pending_free_lock); + init_waitqueue_head(&blkbk->pending_free_wq); + + for (i = 0; i < xen_blkif_reqs; i++) + list_add_tail(&blkbk->pending_reqs[i].free_list, + &blkbk->pending_free); + + rc = xen_blkif_xenbus_init(); + if (rc) + goto failed_init; + + return 0; + + out_of_memory: + pr_alert(DRV_PFX "%s: out of memory\n", __func__); + failed_init: + kfree(blkbk->pending_reqs); + kfree(blkbk->pending_grant_handles); + for (i = 0; i < mmap_pages; i++) { + if (blkbk->pending_pages[i]) + __free_page(blkbk->pending_pages[i]); + } + kfree(blkbk->pending_pages); + kfree(blkbk); + blkbk = NULL; + return rc; +} + +module_init(xen_blkif_init); + +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h new file mode 100644 index 00000000000..9e40b283a46 --- /dev/null +++ b/drivers/block/xen-blkback/common.h @@ -0,0 +1,233 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __XEN_BLKIF__BACKEND__COMMON_H__ +#define __XEN_BLKIF__BACKEND__COMMON_H__ + +#include <linux/version.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/slab.h> +#include <linux/blkdev.h> +#include <linux/vmalloc.h> +#include <linux/wait.h> +#include <linux/io.h> +#include <asm/setup.h> +#include <asm/pgalloc.h> +#include <asm/hypervisor.h> +#include <xen/grant_table.h> +#include <xen/xenbus.h> +#include <xen/interface/io/ring.h> +#include <xen/interface/io/blkif.h> +#include <xen/interface/io/protocols.h> + +#define DRV_PFX "xen-blkback:" +#define DPRINTK(fmt, args...) \ + pr_debug(DRV_PFX "(%s:%d) " fmt ".\n", \ + __func__, __LINE__, ##args) + + +/* Not a real protocol. Used to generate ring structs which contain + * the elements common to all protocols only. This way we get a + * compiler-checkable way to use common struct elements, so we can + * avoid using switch(protocol) in a number of places. */ +struct blkif_common_request { + char dummy; +}; +struct blkif_common_response { + char dummy; +}; + +/* i386 protocol version */ +#pragma pack(push, 4) +struct blkif_x86_32_request { + uint8_t operation; /* BLKIF_OP_??? */ + uint8_t nr_segments; /* number of segments */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t id; /* private guest value, echoed in resp */ + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; +struct blkif_x86_32_response { + uint64_t id; /* copied from request */ + uint8_t operation; /* copied from request */ + int16_t status; /* BLKIF_RSP_??? */ +}; +#pragma pack(pop) + +/* x86_64 protocol version */ +struct blkif_x86_64_request { + uint8_t operation; /* BLKIF_OP_??? */ + uint8_t nr_segments; /* number of segments */ + blkif_vdev_t handle; /* only for read/write requests */ + uint64_t __attribute__((__aligned__(8))) id; + blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ + struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; +}; +struct blkif_x86_64_response { + uint64_t __attribute__((__aligned__(8))) id; + uint8_t operation; /* copied from request */ + int16_t status; /* BLKIF_RSP_??? */ +}; + +DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, + struct blkif_common_response); +DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, + struct blkif_x86_32_response); +DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, + struct blkif_x86_64_response); + +union blkif_back_rings { + struct blkif_back_ring native; + struct blkif_common_back_ring common; + struct blkif_x86_32_back_ring x86_32; + struct blkif_x86_64_back_ring x86_64; +}; + +enum blkif_protocol { + BLKIF_PROTOCOL_NATIVE = 1, + BLKIF_PROTOCOL_X86_32 = 2, + BLKIF_PROTOCOL_X86_64 = 3, +}; + +struct xen_vbd { + /* What the domain refers to this vbd as. */ + blkif_vdev_t handle; + /* Non-zero -> read-only */ + unsigned char readonly; + /* VDISK_xxx */ + unsigned char type; + /* phys device that this vbd maps to. */ + u32 pdevice; + struct block_device *bdev; + /* Cached size parameter. */ + sector_t size; + bool flush_support; +}; + +struct backend_info; + +struct xen_blkif { + /* Unique identifier for this interface. */ + domid_t domid; + unsigned int handle; + /* Physical parameters of the comms window. */ + unsigned int irq; + /* Comms information. */ + enum blkif_protocol blk_protocol; + union blkif_back_rings blk_rings; + struct vm_struct *blk_ring_area; + /* The VBD attached to this interface. */ + struct xen_vbd vbd; + /* Back pointer to the backend_info. */ + struct backend_info *be; + /* Private fields. */ + spinlock_t blk_ring_lock; + atomic_t refcnt; + + wait_queue_head_t wq; + /* One thread per one blkif. */ + struct task_struct *xenblkd; + unsigned int waiting_reqs; + + /* statistics */ + unsigned long st_print; + int st_rd_req; + int st_wr_req; + int st_oo_req; + int st_f_req; + int st_rd_sect; + int st_wr_sect; + + wait_queue_head_t waiting_to_free; + + grant_handle_t shmem_handle; + grant_ref_t shmem_ref; +}; + + +#define vbd_sz(_v) ((_v)->bdev->bd_part ? \ + (_v)->bdev->bd_part->nr_sects : \ + get_capacity((_v)->bdev->bd_disk)) + +#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt)) +#define xen_blkif_put(_b) \ + do { \ + if (atomic_dec_and_test(&(_b)->refcnt)) \ + wake_up(&(_b)->waiting_to_free);\ + } while (0) + +struct phys_req { + unsigned short dev; + unsigned short nr_sects; + struct block_device *bdev; + blkif_sector_t sector_number; +}; +int xen_blkif_interface_init(void); + +int xen_blkif_xenbus_init(void); + +irqreturn_t xen_blkif_be_int(int irq, void *dev_id); +int xen_blkif_schedule(void *arg); + +int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, + struct backend_info *be, int state); + +struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be); + +static inline void blkif_get_x86_32_req(struct blkif_request *dst, + struct blkif_x86_32_request *src) +{ + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; + dst->operation = src->operation; + dst->nr_segments = src->nr_segments; + dst->handle = src->handle; + dst->id = src->id; + dst->u.rw.sector_number = src->sector_number; + barrier(); + if (n > dst->nr_segments) + n = dst->nr_segments; + for (i = 0; i < n; i++) + dst->u.rw.seg[i] = src->seg[i]; +} + +static inline void blkif_get_x86_64_req(struct blkif_request *dst, + struct blkif_x86_64_request *src) +{ + int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; + dst->operation = src->operation; + dst->nr_segments = src->nr_segments; + dst->handle = src->handle; + dst->id = src->id; + dst->u.rw.sector_number = src->sector_number; + barrier(); + if (n > dst->nr_segments) + n = dst->nr_segments; + for (i = 0; i < n; i++) + dst->u.rw.seg[i] = src->seg[i]; +} + +#endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */ diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c new file mode 100644 index 00000000000..34570823355 --- /dev/null +++ b/drivers/block/xen-blkback/xenbus.c @@ -0,0 +1,768 @@ +/* Xenbus code for blkif backend + Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au> + Copyright (C) 2005 XenSource Ltd + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + +*/ + +#include <stdarg.h> +#include <linux/module.h> +#include <linux/kthread.h> +#include <xen/events.h> +#include <xen/grant_table.h> +#include "common.h" + +struct backend_info { + struct xenbus_device *dev; + struct xen_blkif *blkif; + struct xenbus_watch backend_watch; + unsigned major; + unsigned minor; + char *mode; +}; + +static struct kmem_cache *xen_blkif_cachep; +static void connect(struct backend_info *); +static int connect_ring(struct backend_info *); +static void backend_changed(struct xenbus_watch *, const char **, + unsigned int); + +struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be) +{ + return be->dev; +} + +static int blkback_name(struct xen_blkif *blkif, char *buf) +{ + char *devpath, *devname; + struct xenbus_device *dev = blkif->be->dev; + + devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL); + if (IS_ERR(devpath)) + return PTR_ERR(devpath); + + devname = strstr(devpath, "/dev/"); + if (devname != NULL) + devname += strlen("/dev/"); + else + devname = devpath; + + snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname); + kfree(devpath); + + return 0; +} + +static void xen_update_blkif_status(struct xen_blkif *blkif) +{ + int err; + char name[TASK_COMM_LEN]; + + /* Not ready to connect? */ + if (!blkif->irq || !blkif->vbd.bdev) + return; + + /* Already connected? */ + if (blkif->be->dev->state == XenbusStateConnected) + return; + + /* Attempt to connect: exit if we fail to. */ + connect(blkif->be); + if (blkif->be->dev->state != XenbusStateConnected) + return; + + err = blkback_name(blkif, name); + if (err) { + xenbus_dev_error(blkif->be->dev, err, "get blkback dev name"); + return; + } + + err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping); + if (err) { + xenbus_dev_error(blkif->be->dev, err, "block flush"); + return; + } + invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping); + + blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name); + if (IS_ERR(blkif->xenblkd)) { + err = PTR_ERR(blkif->xenblkd); + blkif->xenblkd = NULL; + xenbus_dev_error(blkif->be->dev, err, "start xenblkd"); + } +} + +static struct xen_blkif *xen_blkif_alloc(domid_t domid) +{ + struct xen_blkif *blkif; + + blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL); + if (!blkif) + return ERR_PTR(-ENOMEM); + + memset(blkif, 0, sizeof(*blkif)); + blkif->domid = domid; + spin_lock_init(&blkif->blk_ring_lock); + atomic_set(&blkif->refcnt, 1); + init_waitqueue_head(&blkif->wq); + blkif->st_print = jiffies; + init_waitqueue_head(&blkif->waiting_to_free); + + return blkif; +} + +static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page) +{ + struct gnttab_map_grant_ref op; + + gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr, + GNTMAP_host_map, shared_page, blkif->domid); + + if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1)) + BUG(); + + if (op.status) { + DPRINTK("Grant table operation failure !\n"); + return op.status; + } + + blkif->shmem_ref = shared_page; + blkif->shmem_handle = op.handle; + + return 0; +} + +static void unmap_frontend_page(struct xen_blkif *blkif) +{ + struct gnttab_unmap_grant_ref op; + + gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr, + GNTMAP_host_map, blkif->shmem_handle); + + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) + BUG(); +} + +static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, + unsigned int evtchn) +{ + int err; + + /* Already connected through? */ + if (blkif->irq) + return 0; + + blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE); + if (!blkif->blk_ring_area) + return -ENOMEM; + + err = map_frontend_page(blkif, shared_page); + if (err) { + free_vm_area(blkif->blk_ring_area); + return err; + } + + switch (blkif->blk_protocol) { + case BLKIF_PROTOCOL_NATIVE: + { + struct blkif_sring *sring; + sring = (struct blkif_sring *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE); + break; + } + case BLKIF_PROTOCOL_X86_32: + { + struct blkif_x86_32_sring *sring_x86_32; + sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE); + break; + } + case BLKIF_PROTOCOL_X86_64: + { + struct blkif_x86_64_sring *sring_x86_64; + sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr; + BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE); + break; + } + default: + BUG(); + } + + err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn, + xen_blkif_be_int, 0, + "blkif-backend", blkif); + if (err < 0) { + unmap_frontend_page(blkif); + free_vm_area(blkif->blk_ring_area); + blkif->blk_rings.common.sring = NULL; + return err; + } + blkif->irq = err; + + return 0; +} + +static void xen_blkif_disconnect(struct xen_blkif *blkif) +{ + if (blkif->xenblkd) { + kthread_stop(blkif->xenblkd); + blkif->xenblkd = NULL; + } + + atomic_dec(&blkif->refcnt); + wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0); + atomic_inc(&blkif->refcnt); + + if (blkif->irq) { + unbind_from_irqhandler(blkif->irq, blkif); + blkif->irq = 0; + } + + if (blkif->blk_rings.common.sring) { + unmap_frontend_page(blkif); + free_vm_area(blkif->blk_ring_area); + blkif->blk_rings.common.sring = NULL; + } +} + +void xen_blkif_free(struct xen_blkif *blkif) +{ + if (!atomic_dec_and_test(&blkif->refcnt)) + BUG(); + kmem_cache_free(xen_blkif_cachep, blkif); +} + +int __init xen_blkif_interface_init(void) +{ + xen_blkif_cachep = kmem_cache_create("blkif_cache", + sizeof(struct xen_blkif), + 0, 0, NULL); + if (!xen_blkif_cachep) + return -ENOMEM; + + return 0; +} + +/* + * sysfs interface for VBD I/O requests + */ + +#define VBD_SHOW(name, format, args...) \ + static ssize_t show_##name(struct device *_dev, \ + struct device_attribute *attr, \ + char *buf) \ + { \ + struct xenbus_device *dev = to_xenbus_device(_dev); \ + struct backend_info *be = dev_get_drvdata(&dev->dev); \ + \ + return sprintf(buf, format, ##args); \ + } \ + static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) + +VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req); +VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req); +VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req); +VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req); +VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect); +VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect); + +static struct attribute *xen_vbdstat_attrs[] = { + &dev_attr_oo_req.attr, + &dev_attr_rd_req.attr, + &dev_attr_wr_req.attr, + &dev_attr_f_req.attr, + &dev_attr_rd_sect.attr, + &dev_attr_wr_sect.attr, + NULL +}; + +static struct attribute_group xen_vbdstat_group = { + .name = "statistics", + .attrs = xen_vbdstat_attrs, +}; + +VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor); +VBD_SHOW(mode, "%s\n", be->mode); + +int xenvbd_sysfs_addif(struct xenbus_device *dev) +{ + int error; + + error = device_create_file(&dev->dev, &dev_attr_physical_device); + if (error) + goto fail1; + + error = device_create_file(&dev->dev, &dev_attr_mode); + if (error) + goto fail2; + + error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group); + if (error) + goto fail3; + + return 0; + +fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group); +fail2: device_remove_file(&dev->dev, &dev_attr_mode); +fail1: device_remove_file(&dev->dev, &dev_attr_physical_device); + return error; +} + +void xenvbd_sysfs_delif(struct xenbus_device *dev) +{ + sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group); + device_remove_file(&dev->dev, &dev_attr_mode); + device_remove_file(&dev->dev, &dev_attr_physical_device); +} + + +static void xen_vbd_free(struct xen_vbd *vbd) +{ + if (vbd->bdev) + blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE); + vbd->bdev = NULL; +} + +static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle, + unsigned major, unsigned minor, int readonly, + int cdrom) +{ + struct xen_vbd *vbd; + struct block_device *bdev; + struct request_queue *q; + + vbd = &blkif->vbd; + vbd->handle = handle; + vbd->readonly = readonly; + vbd->type = 0; + + vbd->pdevice = MKDEV(major, minor); + + bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ? + FMODE_READ : FMODE_WRITE, NULL); + + if (IS_ERR(bdev)) { + DPRINTK("xen_vbd_create: device %08x could not be opened.\n", + vbd->pdevice); + return -ENOENT; + } + + vbd->bdev = bdev; + vbd->size = vbd_sz(vbd); + + if (vbd->bdev->bd_disk == NULL) { + DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", + vbd->pdevice); + xen_vbd_free(vbd); + return -ENOENT; + } + + if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) + vbd->type |= VDISK_CDROM; + if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE) + vbd->type |= VDISK_REMOVABLE; + + q = bdev_get_queue(bdev); + if (q && q->flush_flags) + vbd->flush_support = true; + + DPRINTK("Successful creation of handle=%04x (dom=%u)\n", + handle, blkif->domid); + return 0; +} +static int xen_blkbk_remove(struct xenbus_device *dev) +{ + struct backend_info *be = dev_get_drvdata(&dev->dev); + + DPRINTK(""); + + if (be->major || be->minor) + xenvbd_sysfs_delif(dev); + + if (be->backend_watch.node) { + unregister_xenbus_watch(&be->backend_watch); + kfree(be->backend_watch.node); + be->backend_watch.node = NULL; + } + + if (be->blkif) { + xen_blkif_disconnect(be->blkif); + xen_vbd_free(&be->blkif->vbd); + xen_blkif_free(be->blkif); + be->blkif = NULL; + } + + kfree(be); + dev_set_drvdata(&dev->dev, NULL); + return 0; +} + +int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt, + struct backend_info *be, int state) +{ + struct xenbus_device *dev = be->dev; + int err; + + err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache", + "%d", state); + if (err) + xenbus_dev_fatal(dev, err, "writing feature-flush-cache"); + + return err; +} + +/* + * Entry point to this code when a new device is created. Allocate the basic + * structures, and watch the store waiting for the hotplug scripts to tell us + * the device's physical major and minor numbers. Switch to InitWait. + */ +static int xen_blkbk_probe(struct xenbus_device *dev, + const struct xenbus_device_id *id) +{ + int err; + struct backend_info *be = kzalloc(sizeof(struct backend_info), + GFP_KERNEL); + if (!be) { + xenbus_dev_fatal(dev, -ENOMEM, + "allocating backend structure"); + return -ENOMEM; + } + be->dev = dev; + dev_set_drvdata(&dev->dev, be); + + be->blkif = xen_blkif_alloc(dev->otherend_id); + if (IS_ERR(be->blkif)) { + err = PTR_ERR(be->blkif); + be->blkif = NULL; + xenbus_dev_fatal(dev, err, "creating block interface"); + goto fail; + } + + /* setup back pointer */ + be->blkif->be = be; + + err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed, + "%s/%s", dev->nodename, "physical-device"); + if (err) + goto fail; + + err = xenbus_switch_state(dev, XenbusStateInitWait); + if (err) + goto fail; + + return 0; + +fail: + DPRINTK("failed"); + xen_blkbk_remove(dev); + return err; +} + + +/* + * Callback received when the hotplug scripts have placed the physical-device + * node. Read it and the mode node, and create a vbd. If the frontend is + * ready, connect. + */ +static void backend_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) +{ + int err; + unsigned major; + unsigned minor; + struct backend_info *be + = container_of(watch, struct backend_info, backend_watch); + struct xenbus_device *dev = be->dev; + int cdrom = 0; + char *device_type; + + DPRINTK(""); + + err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x", + &major, &minor); + if (XENBUS_EXIST_ERR(err)) { + /* + * Since this watch will fire once immediately after it is + * registered, we expect this. Ignore it, and wait for the + * hotplug scripts. + */ + return; + } + if (err != 2) { + xenbus_dev_fatal(dev, err, "reading physical-device"); + return; + } + + if ((be->major || be->minor) && + ((be->major != major) || (be->minor != minor))) { + pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n", + be->major, be->minor, major, minor); + return; + } + + be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL); + if (IS_ERR(be->mode)) { + err = PTR_ERR(be->mode); + be->mode = NULL; + xenbus_dev_fatal(dev, err, "reading mode"); + return; + } + + device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL); + if (!IS_ERR(device_type)) { + cdrom = strcmp(device_type, "cdrom") == 0; + kfree(device_type); + } + + if (be->major == 0 && be->minor == 0) { + /* Front end dir is a number, which is used as the handle. */ + + char *p = strrchr(dev->otherend, '/') + 1; + long handle; + err = strict_strtoul(p, 0, &handle); + if (err) + return; + + be->major = major; + be->minor = minor; + + err = xen_vbd_create(be->blkif, handle, major, minor, + (NULL == strchr(be->mode, 'w')), cdrom); + if (err) { + be->major = 0; + be->minor = 0; + xenbus_dev_fatal(dev, err, "creating vbd structure"); + return; + } + + err = xenvbd_sysfs_addif(dev); + if (err) { + xen_vbd_free(&be->blkif->vbd); + be->major = 0; + be->minor = 0; + xenbus_dev_fatal(dev, err, "creating sysfs entries"); + return; + } + + /* We're potentially connected now */ + xen_update_blkif_status(be->blkif); + } +} + + +/* + * Callback received when the frontend's state changes. + */ +static void frontend_changed(struct xenbus_device *dev, + enum xenbus_state frontend_state) +{ + struct backend_info *be = dev_get_drvdata(&dev->dev); + int err; + + DPRINTK("%s", xenbus_strstate(frontend_state)); + + switch (frontend_state) { + case XenbusStateInitialising: + if (dev->state == XenbusStateClosed) { + pr_info(DRV_PFX "%s: prepare for reconnect\n", + dev->nodename); + xenbus_switch_state(dev, XenbusStateInitWait); + } + break; + + case XenbusStateInitialised: + case XenbusStateConnected: + /* + * Ensure we connect even when two watches fire in + * close successsion and we miss the intermediate value + * of frontend_state. + */ + if (dev->state == XenbusStateConnected) + break; + + /* + * Enforce precondition before potential leak point. + * blkif_disconnect() is idempotent. + */ + xen_blkif_disconnect(be->blkif); + + err = connect_ring(be); + if (err) + break; + xen_update_blkif_status(be->blkif); + break; + + case XenbusStateClosing: + xen_blkif_disconnect(be->blkif); + xenbus_switch_state(dev, XenbusStateClosing); + break; + + case XenbusStateClosed: + xenbus_switch_state(dev, XenbusStateClosed); + if (xenbus_dev_is_online(dev)) + break; + /* fall through if not online */ + case XenbusStateUnknown: + /* implies blkif_disconnect() via blkback_remove() */ + device_unregister(&dev->dev); + break; + + default: + xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend", + frontend_state); + break; + } +} + + +/* ** Connection ** */ + + +/* + * Write the physical details regarding the block device to the store, and + * switch to Connected state. + */ +static void connect(struct backend_info *be) +{ + struct xenbus_transaction xbt; + int err; + struct xenbus_device *dev = be->dev; + + DPRINTK("%s", dev->otherend); + + /* Supply the information about the device the frontend needs */ +again: + err = xenbus_transaction_start(&xbt); + if (err) { + xenbus_dev_fatal(dev, err, "starting transaction"); + return; + } + + err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support); + if (err) + goto abort; + + err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", + (unsigned long long)vbd_sz(&be->blkif->vbd)); + if (err) { + xenbus_dev_fatal(dev, err, "writing %s/sectors", + dev->nodename); + goto abort; + } + + /* FIXME: use a typename instead */ + err = xenbus_printf(xbt, dev->nodename, "info", "%u", + be->blkif->vbd.type | + (be->blkif->vbd.readonly ? VDISK_READONLY : 0)); + if (err) { + xenbus_dev_fatal(dev, err, "writing %s/info", + dev->nodename); + goto abort; + } + err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu", + (unsigned long) + bdev_logical_block_size(be->blkif->vbd.bdev)); + if (err) { + xenbus_dev_fatal(dev, err, "writing %s/sector-size", + dev->nodename); + goto abort; + } + + err = xenbus_transaction_end(xbt, 0); + if (err == -EAGAIN) + goto again; + if (err) + xenbus_dev_fatal(dev, err, "ending transaction"); + + err = xenbus_switch_state(dev, XenbusStateConnected); + if (err) + xenbus_dev_fatal(dev, err, "switching to Connected state", + dev->nodename); + + return; + abort: + xenbus_transaction_end(xbt, 1); +} + + +static int connect_ring(struct backend_info *be) +{ + struct xenbus_device *dev = be->dev; + unsigned long ring_ref; + unsigned int evtchn; + char protocol[64] = ""; + int err; + + DPRINTK("%s", dev->otherend); + + err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", + &ring_ref, "event-channel", "%u", &evtchn, NULL); + if (err) { + xenbus_dev_fatal(dev, err, + "reading %s/ring-ref and event-channel", + dev->otherend); + return err; + } + + be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + err = xenbus_gather(XBT_NIL, dev->otherend, "protocol", + "%63s", protocol, NULL); + if (err) + strcpy(protocol, "unspecified, assuming native"); + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; + else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) + be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; + else { + xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); + return -1; + } + pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n", + ring_ref, evtchn, be->blkif->blk_protocol, protocol); + + /* Map the shared frame, irq etc. */ + err = xen_blkif_map(be->blkif, ring_ref, evtchn); + if (err) { + xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u", + ring_ref, evtchn); + return err; + } + + return 0; +} + + +/* ** Driver Registration ** */ + + +static const struct xenbus_device_id xen_blkbk_ids[] = { + { "vbd" }, + { "" } +}; + + +static struct xenbus_driver xen_blkbk = { + .name = "vbd", + .owner = THIS_MODULE, + .ids = xen_blkbk_ids, + .probe = xen_blkbk_probe, + .remove = xen_blkbk_remove, + .otherend_changed = frontend_changed +}; + + +int xen_blkif_xenbus_init(void) +{ + return xenbus_register_backend(&xen_blkbk); +} diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 9cb8668ff5f..b536a9cef91 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -97,6 +97,7 @@ struct blkfront_info struct blk_shadow shadow[BLK_RING_SIZE]; unsigned long shadow_free; unsigned int feature_flush; + unsigned int flush_op; int is_ready; }; @@ -250,8 +251,7 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, /* * Generate a Xen blkfront IO request from a blk layer request. Reads - * and writes are handled as expected. Since we lack a loose flush - * request, we map flushes into a full ordered barrier. + * and writes are handled as expected. * * @req: a request struct */ @@ -293,14 +293,13 @@ static int blkif_queue_request(struct request *req) if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { /* - * Ideally we could just do an unordered - * flush-to-disk, but all we have is a full write - * barrier at the moment. However, a barrier write is + * Ideally we can do an unordered flush-to-disk. In case the + * backend onlysupports barriers, use that. A barrier request * a superset of FUA, so we can implement it the same * way. (It's also a FLUSH+FUA, since it is * guaranteed ordered WRT previous writes.) */ - ring_req->operation = BLKIF_OP_WRITE_BARRIER; + ring_req->operation = info->flush_op; } ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); @@ -433,8 +432,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) static void xlvbd_flush(struct blkfront_info *info) { blk_queue_flush(info->rq, info->feature_flush); - printk(KERN_INFO "blkfront: %s: barriers %s\n", + printk(KERN_INFO "blkfront: %s: %s: %s\n", info->gd->disk_name, + info->flush_op == BLKIF_OP_WRITE_BARRIER ? + "barrier" : (info->flush_op == BLKIF_OP_FLUSH_DISKCACHE ? + "flush diskcache" : "barrier or flush"), info->feature_flush ? "enabled" : "disabled"); } @@ -720,15 +722,20 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { + case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { - printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", + printk(KERN_WARNING "blkfront: %s: write %s op failed\n", + info->flush_op == BLKIF_OP_WRITE_BARRIER ? + "barrier" : "flush disk cache", info->gd->disk_name); error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.nr_segments == 0)) { - printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", + printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", + info->flush_op == BLKIF_OP_WRITE_BARRIER ? + "barrier" : "flush disk cache", info->gd->disk_name); error = -EOPNOTSUPP; } @@ -736,6 +743,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) if (error == -EOPNOTSUPP) error = 0; info->feature_flush = 0; + info->flush_op = 0; xlvbd_flush(info); } /* fall through */ @@ -1100,7 +1108,7 @@ static void blkfront_connect(struct blkfront_info *info) unsigned long sector_size; unsigned int binfo; int err; - int barrier; + int barrier, flush; switch (info->connected) { case BLKIF_STATE_CONNECTED: @@ -1140,8 +1148,11 @@ static void blkfront_connect(struct blkfront_info *info) return; } + info->feature_flush = 0; + info->flush_op = 0; + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "feature-barrier", "%lu", &barrier, + "feature-barrier", "%d", &barrier, NULL); /* @@ -1151,11 +1162,23 @@ static void blkfront_connect(struct blkfront_info *info) * * If there are barriers, then we use flush. */ - info->feature_flush = 0; - - if (!err && barrier) + if (!err && barrier) { info->feature_flush = REQ_FLUSH | REQ_FUA; + info->flush_op = BLKIF_OP_WRITE_BARRIER; + } + /* + * And if there is "feature-flush-cache" use that above + * barriers. + */ + err = xenbus_gather(XBT_NIL, info->xbdev->otherend, + "feature-flush-cache", "%d", &flush, + NULL); + if (!err && flush) { + info->feature_flush = REQ_FLUSH; + info->flush_op = BLKIF_OP_FLUSH_DISKCACHE; + } + err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); if (err) { xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s", diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index e427fbe4599..ae15a4ddaa9 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c @@ -625,7 +625,9 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) blk_queue_max_hw_sectors(q, 4096 / 512); gendisk->queue = q; gendisk->fops = &viocd_fops; - gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; + gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE | + GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + gendisk->events = DISK_EVENT_MEDIA_CHANGE; set_capacity(gendisk, 0); gendisk->private_data = d; d->viocd_disk = gendisk; diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index d72433f2d31..6e40072fbf6 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c @@ -5,6 +5,9 @@ * * Copyright (C) 2001 Massimo Dal Zotto <dz@debian.org> * + * Hwmon integration: + * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org> + * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any @@ -24,6 +27,8 @@ #include <linux/dmi.h> #include <linux/capability.h> #include <linux/mutex.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> #include <asm/uaccess.h> #include <asm/io.h> @@ -58,6 +63,7 @@ static DEFINE_MUTEX(i8k_mutex); static char bios_version[4]; +static struct device *i8k_hwmon_dev; MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)"); MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops"); @@ -139,8 +145,8 @@ static int i8k_smm(struct smm_regs *regs) "movl %%edi,20(%%rax)\n\t" "popq %%rdx\n\t" "movl %%edx,0(%%rax)\n\t" - "lahf\n\t" - "shrl $8,%%eax\n\t" + "pushfq\n\t" + "popq %%rax\n\t" "andl $1,%%eax\n" :"=a"(rc) : "a"(regs) @@ -455,6 +461,152 @@ static int i8k_open_fs(struct inode *inode, struct file *file) return single_open(file, i8k_proc_show, NULL); } + +/* + * Hwmon interface + */ + +static ssize_t i8k_hwmon_show_temp(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int cpu_temp; + + cpu_temp = i8k_get_temp(0); + if (cpu_temp < 0) + return cpu_temp; + return sprintf(buf, "%d\n", cpu_temp * 1000); +} + +static ssize_t i8k_hwmon_show_fan(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + int index = to_sensor_dev_attr(devattr)->index; + int fan_speed; + + fan_speed = i8k_get_fan_speed(index); + if (fan_speed < 0) + return fan_speed; + return sprintf(buf, "%d\n", fan_speed); +} + +static ssize_t i8k_hwmon_show_label(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + static const char *labels[4] = { + "i8k", + "CPU", + "Left Fan", + "Right Fan", + }; + int index = to_sensor_dev_attr(devattr)->index; + + return sprintf(buf, "%s\n", labels[index]); +} + +static DEVICE_ATTR(temp1_input, S_IRUGO, i8k_hwmon_show_temp, NULL); +static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, i8k_hwmon_show_fan, NULL, + I8K_FAN_LEFT); +static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, i8k_hwmon_show_fan, NULL, + I8K_FAN_RIGHT); +static SENSOR_DEVICE_ATTR(name, S_IRUGO, i8k_hwmon_show_label, NULL, 0); +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 1); +static SENSOR_DEVICE_ATTR(fan1_label, S_IRUGO, i8k_hwmon_show_label, NULL, 2); +static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_label, NULL, 3); + +static void i8k_hwmon_remove_files(struct device *dev) +{ + device_remove_file(dev, &dev_attr_temp1_input); + device_remove_file(dev, &sensor_dev_attr_fan1_input.dev_attr); + device_remove_file(dev, &sensor_dev_attr_fan2_input.dev_attr); + device_remove_file(dev, &sensor_dev_attr_temp1_label.dev_attr); + device_remove_file(dev, &sensor_dev_attr_fan1_label.dev_attr); + device_remove_file(dev, &sensor_dev_attr_fan2_label.dev_attr); + device_remove_file(dev, &sensor_dev_attr_name.dev_attr); +} + +static int __init i8k_init_hwmon(void) +{ + int err; + + i8k_hwmon_dev = hwmon_device_register(NULL); + if (IS_ERR(i8k_hwmon_dev)) { + err = PTR_ERR(i8k_hwmon_dev); + i8k_hwmon_dev = NULL; + printk(KERN_ERR "i8k: hwmon registration failed (%d)\n", err); + return err; + } + + /* Required name attribute */ + err = device_create_file(i8k_hwmon_dev, + &sensor_dev_attr_name.dev_attr); + if (err) + goto exit_unregister; + + /* CPU temperature attributes, if temperature reading is OK */ + err = i8k_get_temp(0); + if (err < 0) { + dev_dbg(i8k_hwmon_dev, + "Not creating temperature attributes (%d)\n", err); + } else { + err = device_create_file(i8k_hwmon_dev, &dev_attr_temp1_input); + if (err) + goto exit_remove_files; + err = device_create_file(i8k_hwmon_dev, + &sensor_dev_attr_temp1_label.dev_attr); + if (err) + goto exit_remove_files; + } + + /* Left fan attributes, if left fan is present */ + err = i8k_get_fan_status(I8K_FAN_LEFT); + if (err < 0) { + dev_dbg(i8k_hwmon_dev, + "Not creating %s fan attributes (%d)\n", "left", err); + } else { + err = device_create_file(i8k_hwmon_dev, + &sensor_dev_attr_fan1_input.dev_attr); + if (err) + goto exit_remove_files; + err = device_create_file(i8k_hwmon_dev, + &sensor_dev_attr_fan1_label.dev_attr); + if (err) + goto exit_remove_files; + } + + /* Right fan attributes, if right fan is present */ + err = i8k_get_fan_status(I8K_FAN_RIGHT); + if (err < 0) { + dev_dbg(i8k_hwmon_dev, + "Not creating %s fan attributes (%d)\n", "right", err); + } else { + err = device_create_file(i8k_hwmon_dev, + &sensor_dev_attr_fan2_input.dev_attr); + if (err) + goto exit_remove_files; + err = device_create_file(i8k_hwmon_dev, + &sensor_dev_attr_fan2_label.dev_attr); + if (err) + goto exit_remove_files; + } + + return 0; + + exit_remove_files: + i8k_hwmon_remove_files(i8k_hwmon_dev); + exit_unregister: + hwmon_device_unregister(i8k_hwmon_dev); + return err; +} + +static void __exit i8k_exit_hwmon(void) +{ + i8k_hwmon_remove_files(i8k_hwmon_dev); + hwmon_device_unregister(i8k_hwmon_dev); +} + static struct dmi_system_id __initdata i8k_dmi_table[] = { { .ident = "Dell Inspiron", @@ -580,6 +732,7 @@ static int __init i8k_probe(void) static int __init i8k_init(void) { struct proc_dir_entry *proc_i8k; + int err; /* Are we running on an supported laptop? */ if (i8k_probe()) @@ -590,15 +743,24 @@ static int __init i8k_init(void) if (!proc_i8k) return -ENOENT; + err = i8k_init_hwmon(); + if (err) + goto exit_remove_proc; + printk(KERN_INFO "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", I8K_VERSION); return 0; + + exit_remove_proc: + remove_proc_entry("i8k", NULL); + return err; } static void __exit i8k_exit(void) { + i8k_exit_hwmon(); remove_proc_entry("i8k", NULL); } diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c7f1a6f16b6..e2fc2d21fa6 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -39,3 +39,5 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o ##################################################################################d +# ARM SoC drivers +obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c new file mode 100644 index 00000000000..d90456a809f --- /dev/null +++ b/drivers/cpufreq/db8500-cpufreq.c @@ -0,0 +1,169 @@ +/* + * Copyright (C) STMicroelectronics 2009 + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Sundar Iyer <sundar.iyer@stericsson.com> + * Author: Martin Persson <martin.persson@stericsson.com> + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> + * + */ +#include <linux/kernel.h> +#include <linux/cpufreq.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/mfd/db8500-prcmu.h> +#include <mach/id.h> + +static struct cpufreq_frequency_table freq_table[] = { + [0] = { + .index = 0, + .frequency = 300000, + }, + [1] = { + .index = 1, + .frequency = 600000, + }, + [2] = { + /* Used for MAX_OPP, if available */ + .index = 2, + .frequency = CPUFREQ_TABLE_END, + }, + [3] = { + .index = 3, + .frequency = CPUFREQ_TABLE_END, + }, +}; + +static enum arm_opp idx2opp[] = { + ARM_50_OPP, + ARM_100_OPP, + ARM_MAX_OPP +}; + +static struct freq_attr *db8500_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, +}; + +static int db8500_cpufreq_verify_speed(struct cpufreq_policy *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static int db8500_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_freqs freqs; + unsigned int idx; + + /* scale the target frequency to one of the extremes supported */ + if (target_freq < policy->cpuinfo.min_freq) + target_freq = policy->cpuinfo.min_freq; + if (target_freq > policy->cpuinfo.max_freq) + target_freq = policy->cpuinfo.max_freq; + + /* Lookup the next frequency */ + if (cpufreq_frequency_table_target + (policy, freq_table, target_freq, relation, &idx)) { + return -EINVAL; + } + + freqs.old = policy->cur; + freqs.new = freq_table[idx].frequency; + freqs.cpu = policy->cpu; + + if (freqs.old == freqs.new) + return 0; + + /* pre-change notification */ + cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); + + /* request the PRCM unit for opp change */ + if (prcmu_set_arm_opp(idx2opp[idx])) { + pr_err("db8500-cpufreq: Failed to set OPP level\n"); + return -EINVAL; + } + + /* post change notification */ + cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); + + return 0; +} + +static unsigned int db8500_cpufreq_getspeed(unsigned int cpu) +{ + int i; + /* request the prcm to get the current ARM opp */ + for (i = 0; prcmu_get_arm_opp() != idx2opp[i]; i++) + ; + return freq_table[i].frequency; +} + +static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) +{ + int res; + int i; + + BUILD_BUG_ON(ARRAY_SIZE(idx2opp) + 1 != ARRAY_SIZE(freq_table)); + + if (cpu_is_u8500v2() && !prcmu_is_u8400()) { + freq_table[0].frequency = 400000; + freq_table[1].frequency = 800000; + if (prcmu_has_arm_maxopp()) + freq_table[2].frequency = 1000000; + } + + /* get policy fields based on the table */ + res = cpufreq_frequency_table_cpuinfo(policy, freq_table); + if (!res) + cpufreq_frequency_table_get_attr(freq_table, policy->cpu); + else { + pr_err("db8500-cpufreq : Failed to read policy table\n"); + return res; + } + + policy->min = policy->cpuinfo.min_freq; + policy->max = policy->cpuinfo.max_freq; + policy->cur = db8500_cpufreq_getspeed(policy->cpu); + + for (i = 0; freq_table[i].frequency != policy->cur; i++) + ; + + policy->governor = CPUFREQ_DEFAULT_GOVERNOR; + + /* + * FIXME : Need to take time measurement across the target() + * function with no/some/all drivers in the notification + * list. + */ + policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ + + /* policy sharing between dual CPUs */ + cpumask_copy(policy->cpus, &cpu_present_map); + + policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; + + return 0; +} + +static struct cpufreq_driver db8500_cpufreq_driver = { + .flags = CPUFREQ_STICKY, + .verify = db8500_cpufreq_verify_speed, + .target = db8500_cpufreq_target, + .get = db8500_cpufreq_getspeed, + .init = db8500_cpufreq_init, + .name = "DB8500", + .attr = db8500_cpufreq_attr, +}; + +static int __init db8500_cpufreq_register(void) +{ + if (!cpu_is_u8500v20_or_later()) + return -ENODEV; + + pr_info("cpufreq for DB8500 started\n"); + return cpufreq_register_driver(&db8500_cpufreq_driver); +} +device_initcall(db8500_cpufreq_register); diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index dcc1b2139ff..636e40925b1 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -213,12 +213,17 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) struct sh_dmae_device, common); struct sh_dmae_pdata *pdata = shdev->pdata; const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; - u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); + u16 __iomem *addr = shdev->dmars; int shift = chan_pdata->dmars_bit; if (dmae_is_busy(sh_chan)) return -EBUSY; + /* in the case of a missing DMARS resource use first memory window */ + if (!addr) + addr = (u16 __iomem *)shdev->chan_reg; + addr += chan_pdata->dmars / sizeof(u16); + __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), addr); @@ -1078,7 +1083,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) unsigned long irqflags = IRQF_DISABLED, chan_flag[SH_DMAC_MAX_CHANNELS] = {}; int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; - int err, i, irq_cnt = 0, irqres = 0; + int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; struct sh_dmae_device *shdev; struct resource *chan, *dmars, *errirq_res, *chanirq_res; @@ -1087,7 +1092,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) return -ENODEV; chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); - /* DMARS area is optional, if absent, this controller cannot do slave DMA */ + /* DMARS area is optional */ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); /* * IRQ resources: @@ -1154,7 +1159,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) INIT_LIST_HEAD(&shdev->common.channels); dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); - if (dmars) + if (pdata->slave && pdata->slave_num) dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); shdev->common.device_alloc_chan_resources @@ -1203,8 +1208,13 @@ static int __init sh_dmae_probe(struct platform_device *pdev) !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { /* Special case - all multiplexed */ for (; irq_cnt < pdata->channel_num; irq_cnt++) { - chan_irq[irq_cnt] = chanirq_res->start; - chan_flag[irq_cnt] = IRQF_SHARED; + if (irq_cnt < SH_DMAC_MAX_CHANNELS) { + chan_irq[irq_cnt] = chanirq_res->start; + chan_flag[irq_cnt] = IRQF_SHARED; + } else { + irq_cap = 1; + break; + } } } else { do { @@ -1218,22 +1228,32 @@ static int __init sh_dmae_probe(struct platform_device *pdev) "Found IRQ %d for channel %d\n", i, irq_cnt); chan_irq[irq_cnt++] = i; + + if (irq_cnt >= SH_DMAC_MAX_CHANNELS) + break; + } + + if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { + irq_cap = 1; + break; } chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, ++irqres); } while (irq_cnt < pdata->channel_num && chanirq_res); } - if (irq_cnt < pdata->channel_num) - goto eirqres; - /* Create DMA Channel */ - for (i = 0; i < pdata->channel_num; i++) { + for (i = 0; i < irq_cnt; i++) { err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); if (err) goto chan_probe_err; } + if (irq_cap) + dev_notice(&pdev->dev, "Attempting to register %d DMA " + "channels when a maximum of %d are supported.\n", + pdata->channel_num, SH_DMAC_MAX_CHANNELS); + pm_runtime_put(&pdev->dev); platform_set_drvdata(pdev, shdev); @@ -1243,7 +1263,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) chan_probe_err: sh_dmae_chan_remove(shdev); -eirqres: + #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) free_irq(errirq, shdev); eirq_err: diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 3f9d3cd0658..5ae9fc51218 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h @@ -17,7 +17,7 @@ #include <linux/interrupt.h> #include <linux/list.h> -#define SH_DMAC_MAX_CHANNELS 6 +#define SH_DMAC_MAX_CHANNELS 20 #define SH_DMA_SLAVE_NUMBER 256 #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c index d41f9002da4..aa08497a075 100644 --- a/drivers/edac/i3200_edac.c +++ b/drivers/edac/i3200_edac.c @@ -101,6 +101,19 @@ struct i3200_priv { static int nr_channels; +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#endif + static int how_many_channels(struct pci_dev *pdev) { unsigned char capid0_8b; /* 8th byte of CAPID0 */ diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c index 0a775f7987c..1bc621ac353 100644 --- a/drivers/gpio/ml_ioh_gpio.c +++ b/drivers/gpio/ml_ioh_gpio.c @@ -15,6 +15,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/pci.h> #include <linux/gpio.h> @@ -138,6 +139,7 @@ static int ioh_gpio_direction_input(struct gpio_chip *gpio, unsigned nr) return 0; } +#ifdef CONFIG_PM /* * Save register configuration and disable interrupts. */ @@ -157,6 +159,7 @@ static void ioh_gpio_restore_reg_conf(struct ioh_gpio *chip) /* to store contents of PM register */ iowrite32(chip->ioh_gpio_reg.pm_reg, &chip->reg->regs[chip->ch].pm); } +#endif static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port) { diff --git a/drivers/gpio/vx855_gpio.c b/drivers/gpio/vx855_gpio.c index 8a98ee5d5f6..ef5aabd8b8b 100644 --- a/drivers/gpio/vx855_gpio.c +++ b/drivers/gpio/vx855_gpio.c @@ -26,6 +26,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/gpio.h> +#include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/pci.h> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c6289034e29..0b2e167d2bc 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -56,9 +56,7 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); static int i915_gem_inactive_shrink(struct shrinker *shrinker, - int nr_to_scan, - gfp_t gfp_mask); - + struct shrink_control *sc); /* some bookkeeping */ static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, @@ -4092,9 +4090,7 @@ i915_gpu_is_active(struct drm_device *dev) } static int -i915_gem_inactive_shrink(struct shrinker *shrinker, - int nr_to_scan, - gfp_t gfp_mask) +i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) { struct drm_i915_private *dev_priv = container_of(shrinker, @@ -4102,6 +4098,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; struct drm_i915_gem_object *obj, *next; + int nr_to_scan = sc->nr_to_scan; int cnt; if (!mutex_trylock(&dev->struct_mutex)) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 9d9d92945f8..d948575717b 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -395,12 +395,14 @@ static int ttm_pool_get_num_unused_pages(void) /** * Callback for mm to request pool to reduce number of page held. */ -static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask) +static int ttm_pool_mm_shrink(struct shrinker *shrink, + struct shrink_control *sc) { static atomic_t start_pool = ATOMIC_INIT(0); unsigned i; unsigned pool_offset = atomic_add_return(1, &start_pool); struct ttm_page_pool *pool; + int shrink_pages = sc->nr_to_scan; pool_offset = pool_offset % NUM_POOLS; /* select start pool in round robin fashion */ diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 43221beb9e9..16db83c83c8 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -41,7 +41,7 @@ comment "Native drivers" config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" - depends on X86 && EXPERIMENTAL + depends on X86 && DMI && EXPERIMENTAL help If you say yes here you get support for the sensor part of the first and second revision of the Abit uGuru chip. The voltage and frequency @@ -56,7 +56,7 @@ config SENSORS_ABITUGURU config SENSORS_ABITUGURU3 tristate "Abit uGuru (rev 3)" - depends on X86 && EXPERIMENTAL + depends on X86 && DMI && EXPERIMENTAL help If you say yes here you get support for the sensor part of the third revision of the Abit uGuru chip. Only reading the sensors @@ -213,7 +213,7 @@ config SENSORS_ADT7475 config SENSORS_ASC7621 tristate "Andigilog aSC7621" - depends on HWMON && I2C + depends on I2C help If you say yes here you get support for the aSC7621 family of SMBus sensors chip found on most Intel X38, X48, X58, @@ -237,17 +237,27 @@ config SENSORS_K8TEMP will be called k8temp. config SENSORS_K10TEMP - tristate "AMD Family 10h/11h/12h/14h temperature sensor" + tristate "AMD Family 10h+ temperature sensor" depends on X86 && PCI help If you say yes here you get support for the temperature sensor(s) inside your CPU. Supported are later revisions of the AMD Family 10h and all revisions of the AMD Family 11h, - 12h (Llano), and 14h (Brazos) microarchitectures. + 12h (Llano), 14h (Brazos) and 15h (Bulldozer) microarchitectures. This driver can also be built as a module. If so, the module will be called k10temp. +config SENSORS_FAM15H_POWER + tristate "AMD Family 15h processor power" + depends on X86 && PCI + help + If you say yes here you get support for processor power + information of your AMD family 15h CPU. + + This driver can also be built as a module. If so, the module + will be called fam15h_power. + config SENSORS_ASB100 tristate "Asus ASB100 Bach" depends on X86 && I2C && EXPERIMENTAL @@ -319,7 +329,7 @@ config SENSORS_F71882FG If you say yes here you get support for hardware monitoring features of many Fintek Super-I/O (LPC) chips. The currently supported chips are: - F71808E + F71808E/A F71858FG F71862FG F71863FG @@ -978,6 +988,16 @@ config SENSORS_EMC2103 This driver can also be built as a module. If so, the module will be called emc2103. +config SENSORS_EMC6W201 + tristate "SMSC EMC6W201" + depends on I2C + help + If you say yes here you get support for the SMSC EMC6W201 + hardware monitoring chip. + + This driver can also be built as a module. If so, the module + will be called emc6w201. + config SENSORS_SMSC47M1 tristate "SMSC LPC47M10x and compatibles" help @@ -1341,6 +1361,16 @@ if ACPI comment "ACPI drivers" +config SENSORS_ACPI_POWER + tristate "ACPI 4.0 power meter" + help + This driver exposes ACPI 4.0 power meters as hardware monitoring + devices. Say Y (or M) if you have a computer with ACPI 4.0 firmware + and a power meter. + + To compile this driver as a module, choose M here: + the module will be called acpi_power_meter. + config SENSORS_ATK0110 tristate "ASUS ATK0110" depends on X86 && EXPERIMENTAL diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 28e8d52f637..28061cfa0cd 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_HWMON) += hwmon.o obj-$(CONFIG_HWMON_VID) += hwmon-vid.o # APCI drivers +obj-$(CONFIG_SENSORS_ACPI_POWER) += acpi_power_meter.o obj-$(CONFIG_SENSORS_ATK0110) += asus_atk0110.o # Native drivers @@ -45,9 +46,11 @@ obj-$(CONFIG_SENSORS_DS620) += ds620.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o obj-$(CONFIG_SENSORS_EMC2103) += emc2103.o +obj-$(CONFIG_SENSORS_EMC6W201) += emc6w201.o obj-$(CONFIG_SENSORS_F71805F) += f71805f.o obj-$(CONFIG_SENSORS_F71882FG) += f71882fg.o obj-$(CONFIG_SENSORS_F75375S) += f75375s.o +obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o obj-$(CONFIG_SENSORS_FSCHMD) += fschmd.o obj-$(CONFIG_SENSORS_G760A) += g760a.o obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c index e7d4c4687f0..65a35cf5b3c 100644 --- a/drivers/hwmon/abituguru.c +++ b/drivers/hwmon/abituguru.c @@ -1448,15 +1448,12 @@ static int __init abituguru_init(void) { int address, err; struct resource res = { .flags = IORESOURCE_IO }; - -#ifdef CONFIG_DMI const char *board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR); /* safety check, refuse to load on non Abit motherboards */ if (!force && (!board_vendor || strcmp(board_vendor, "http://www.abit.com.tw/"))) return -ENODEV; -#endif address = abituguru_detect(); if (address < 0) diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c index e89d572e332..d30855a7578 100644 --- a/drivers/hwmon/abituguru3.c +++ b/drivers/hwmon/abituguru3.c @@ -1119,8 +1119,6 @@ static struct platform_driver abituguru3_driver = { .resume = abituguru3_resume }; -#ifdef CONFIG_DMI - static int __init abituguru3_dmi_detect(void) { const char *board_vendor, *board_name; @@ -1159,15 +1157,6 @@ static int __init abituguru3_dmi_detect(void) return 1; } -#else /* !CONFIG_DMI */ - -static inline int abituguru3_dmi_detect(void) -{ - return 1; -} - -#endif /* CONFIG_DMI */ - /* FIXME: Manual detection should die eventually; we need to collect stable * DMI model names first before we can rely entirely on CONFIG_DMI. */ @@ -1216,10 +1205,8 @@ static int __init abituguru3_init(void) if (err) return err; -#ifdef CONFIG_DMI pr_warn("this motherboard was not detected using DMI. " "Please send the output of \"dmidecode\" to the abituguru3 maintainer (see MAINTAINERS)\n"); -#endif } err = platform_driver_register(&abituguru3_driver); diff --git a/drivers/acpi/power_meter.c b/drivers/hwmon/acpi_power_meter.c index 66f67293341..66f67293341 100644 --- a/drivers/acpi/power_meter.c +++ b/drivers/hwmon/acpi_power_meter.c diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c index fbdc7655303..b2cacbe707a 100644 --- a/drivers/hwmon/adcxx.c +++ b/drivers/hwmon/adcxx.c @@ -62,7 +62,7 @@ static ssize_t adcxx_read(struct device *dev, { struct spi_device *spi = to_spi_device(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct adcxx *adc = dev_get_drvdata(&spi->dev); + struct adcxx *adc = spi_get_drvdata(spi); u8 tx_buf[2]; u8 rx_buf[2]; int status; @@ -105,7 +105,7 @@ static ssize_t adcxx_show_max(struct device *dev, struct device_attribute *devattr, char *buf) { struct spi_device *spi = to_spi_device(dev); - struct adcxx *adc = dev_get_drvdata(&spi->dev); + struct adcxx *adc = spi_get_drvdata(spi); u32 reference; if (mutex_lock_interruptible(&adc->lock)) @@ -122,7 +122,7 @@ static ssize_t adcxx_set_max(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct spi_device *spi = to_spi_device(dev); - struct adcxx *adc = dev_get_drvdata(&spi->dev); + struct adcxx *adc = spi_get_drvdata(spi); unsigned long value; if (strict_strtoul(buf, 10, &value)) @@ -142,7 +142,7 @@ static ssize_t adcxx_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { struct spi_device *spi = to_spi_device(dev); - struct adcxx *adc = dev_get_drvdata(&spi->dev); + struct adcxx *adc = spi_get_drvdata(spi); return sprintf(buf, "adcxx%ds\n", adc->channels); } @@ -182,7 +182,7 @@ static int __devinit adcxx_probe(struct spi_device *spi) mutex_lock(&adc->lock); - dev_set_drvdata(&spi->dev, adc); + spi_set_drvdata(spi, adc); for (i = 0; i < 3 + adc->channels; i++) { status = device_create_file(&spi->dev, &ad_input[i].dev_attr); @@ -206,7 +206,7 @@ out_err: for (i--; i >= 0; i--) device_remove_file(&spi->dev, &ad_input[i].dev_attr); - dev_set_drvdata(&spi->dev, NULL); + spi_set_drvdata(spi, NULL); mutex_unlock(&adc->lock); kfree(adc); return status; @@ -214,7 +214,7 @@ out_err: static int __devexit adcxx_remove(struct spi_device *spi) { - struct adcxx *adc = dev_get_drvdata(&spi->dev); + struct adcxx *adc = spi_get_drvdata(spi); int i; mutex_lock(&adc->lock); @@ -222,7 +222,7 @@ static int __devexit adcxx_remove(struct spi_device *spi) for (i = 0; i < 3 + adc->channels; i++) device_remove_file(&spi->dev, &ad_input[i].dev_attr); - dev_set_drvdata(&spi->dev, NULL); + spi_set_drvdata(spi, NULL); mutex_unlock(&adc->lock); kfree(adc); diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c new file mode 100644 index 00000000000..e0ef32378ac --- /dev/null +++ b/drivers/hwmon/emc6w201.c @@ -0,0 +1,539 @@ +/* + * emc6w201.c - Hardware monitoring driver for the SMSC EMC6W201 + * Copyright (C) 2011 Jean Delvare <khali@linux-fr.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/jiffies.h> +#include <linux/i2c.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/err.h> +#include <linux/mutex.h> + +/* + * Addresses to scan + */ + +static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; + +/* + * The EMC6W201 registers + */ + +#define EMC6W201_REG_IN(nr) (0x20 + (nr)) +#define EMC6W201_REG_TEMP(nr) (0x26 + (nr)) +#define EMC6W201_REG_FAN(nr) (0x2C + (nr) * 2) +#define EMC6W201_REG_COMPANY 0x3E +#define EMC6W201_REG_VERSTEP 0x3F +#define EMC6W201_REG_CONFIG 0x40 +#define EMC6W201_REG_IN_LOW(nr) (0x4A + (nr) * 2) +#define EMC6W201_REG_IN_HIGH(nr) (0x4B + (nr) * 2) +#define EMC6W201_REG_TEMP_LOW(nr) (0x56 + (nr) * 2) +#define EMC6W201_REG_TEMP_HIGH(nr) (0x57 + (nr) * 2) +#define EMC6W201_REG_FAN_MIN(nr) (0x62 + (nr) * 2) + +enum { input, min, max } subfeature; + +/* + * Per-device data + */ + +struct emc6w201_data { + struct device *hwmon_dev; + struct mutex update_lock; + char valid; /* zero until following fields are valid */ + unsigned long last_updated; /* in jiffies */ + + /* registers values */ + u8 in[3][6]; + s8 temp[3][6]; + u16 fan[2][5]; +}; + +/* + * Combine LSB and MSB registers in a single value + * Locking: must be called with data->update_lock held + */ +static u16 emc6w201_read16(struct i2c_client *client, u8 reg) +{ + int lsb, msb; + + lsb = i2c_smbus_read_byte_data(client, reg); + msb = i2c_smbus_read_byte_data(client, reg + 1); + if (lsb < 0 || msb < 0) { + dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg); + return 0xFFFF; /* Arbitrary value */ + } + + return (msb << 8) | lsb; +} + +/* + * Write 16-bit value to LSB and MSB registers + * Locking: must be called with data->update_lock held + */ +static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val) +{ + int err; + + err = i2c_smbus_write_byte_data(client, reg, val & 0xff); + if (!err) + err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8); + if (err < 0) + dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg); + + return err; +} + +static struct emc6w201_data *emc6w201_update_device(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct emc6w201_data *data = i2c_get_clientdata(client); + int nr; + + mutex_lock(&data->update_lock); + + if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { + for (nr = 0; nr < 6; nr++) { + data->in[input][nr] = + i2c_smbus_read_byte_data(client, + EMC6W201_REG_IN(nr)); + data->in[min][nr] = + i2c_smbus_read_byte_data(client, + EMC6W201_REG_IN_LOW(nr)); + data->in[max][nr] = + i2c_smbus_read_byte_data(client, + EMC6W201_REG_IN_HIGH(nr)); + } + + for (nr = 0; nr < 6; nr++) { + data->temp[input][nr] = + i2c_smbus_read_byte_data(client, + EMC6W201_REG_TEMP(nr)); + data->temp[min][nr] = + i2c_smbus_read_byte_data(client, + EMC6W201_REG_TEMP_LOW(nr)); + data->temp[max][nr] = + i2c_smbus_read_byte_data(client, + EMC6W201_REG_TEMP_HIGH(nr)); + } + + for (nr = 0; nr < 5; nr++) { + data->fan[input][nr] = + emc6w201_read16(client, + EMC6W201_REG_FAN(nr)); + data->fan[min][nr] = + emc6w201_read16(client, + EMC6W201_REG_FAN_MIN(nr)); + } + + data->last_updated = jiffies; + data->valid = 1; + } + + mutex_unlock(&data->update_lock); + + return data; +} + +/* + * Sysfs callback functions + */ + +static const u16 nominal_mv[6] = { 2500, 1500, 3300, 5000, 1500, 1500 }; + +static ssize_t show_in(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + struct emc6w201_data *data = emc6w201_update_device(dev); + int sf = to_sensor_dev_attr_2(devattr)->index; + int nr = to_sensor_dev_attr_2(devattr)->nr; + + return sprintf(buf, "%u\n", + (unsigned)data->in[sf][nr] * nominal_mv[nr] / 0xC0); +} + +static ssize_t set_in(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct emc6w201_data *data = i2c_get_clientdata(client); + int sf = to_sensor_dev_attr_2(devattr)->index; + int nr = to_sensor_dev_attr_2(devattr)->nr; + int err; + long val; + u8 reg; + + err = strict_strtol(buf, 10, &val); + if (err < 0) + return err; + + val = DIV_ROUND_CLOSEST(val * 0xC0, nominal_mv[nr]); + reg = (sf == min) ? EMC6W201_REG_IN_LOW(nr) + : EMC6W201_REG_IN_HIGH(nr); + + mutex_lock(&data->update_lock); + data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255); + err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]); + mutex_unlock(&data->update_lock); + + return err < 0 ? err : count; +} + +static ssize_t show_temp(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + struct emc6w201_data *data = emc6w201_update_device(dev); + int sf = to_sensor_dev_attr_2(devattr)->index; + int nr = to_sensor_dev_attr_2(devattr)->nr; + + return sprintf(buf, "%d\n", (int)data->temp[sf][nr] * 1000); +} + +static ssize_t set_temp(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct emc6w201_data *data = i2c_get_clientdata(client); + int sf = to_sensor_dev_attr_2(devattr)->index; + int nr = to_sensor_dev_attr_2(devattr)->nr; + int err; + long val; + u8 reg; + + err = strict_strtol(buf, 10, &val); + if (err < 0) + return err; + + val /= 1000; + reg = (sf == min) ? EMC6W201_REG_TEMP_LOW(nr) + : EMC6W201_REG_TEMP_HIGH(nr); + + mutex_lock(&data->update_lock); + data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128); + err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]); + mutex_unlock(&data->update_lock); + + return err < 0 ? err : count; +} + +static ssize_t show_fan(struct device *dev, struct device_attribute *devattr, + char *buf) +{ + struct emc6w201_data *data = emc6w201_update_device(dev); + int sf = to_sensor_dev_attr_2(devattr)->index; + int nr = to_sensor_dev_attr_2(devattr)->nr; + unsigned rpm; + + if (data->fan[sf][nr] == 0 || data->fan[sf][nr] == 0xFFFF) + rpm = 0; + else + rpm = 5400000U / data->fan[sf][nr]; + + return sprintf(buf, "%u\n", rpm); +} + +static ssize_t set_fan(struct device *dev, struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct emc6w201_data *data = i2c_get_clientdata(client); + int sf = to_sensor_dev_attr_2(devattr)->index; + int nr = to_sensor_dev_attr_2(devattr)->nr; + int err; + unsigned long val; + + err = strict_strtoul(buf, 10, &val); + if (err < 0) + return err; + + if (val == 0) { + val = 0xFFFF; + } else { + val = DIV_ROUND_CLOSEST(5400000U, val); + val = SENSORS_LIMIT(val, 0, 0xFFFE); + } + + mutex_lock(&data->update_lock); + data->fan[sf][nr] = val; + err = emc6w201_write16(client, EMC6W201_REG_FAN_MIN(nr), + data->fan[sf][nr]); + mutex_unlock(&data->update_lock); + + return err < 0 ? err : count; +} + +static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in, NULL, 0, input); +static SENSOR_DEVICE_ATTR_2(in0_min, S_IRUGO | S_IWUSR, show_in, set_in, + 0, min); +static SENSOR_DEVICE_ATTR_2(in0_max, S_IRUGO | S_IWUSR, show_in, set_in, + 0, max); +static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in, NULL, 1, input); +static SENSOR_DEVICE_ATTR_2(in1_min, S_IRUGO | S_IWUSR, show_in, set_in, + 1, min); +static SENSOR_DEVICE_ATTR_2(in1_max, S_IRUGO | S_IWUSR, show_in, set_in, + 1, max); +static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in, NULL, 2, input); +static SENSOR_DEVICE_ATTR_2(in2_min, S_IRUGO | S_IWUSR, show_in, set_in, + 2, min); +static SENSOR_DEVICE_ATTR_2(in2_max, S_IRUGO | S_IWUSR, show_in, set_in, + 2, max); +static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in, NULL, 3, input); +static SENSOR_DEVICE_ATTR_2(in3_min, S_IRUGO | S_IWUSR, show_in, set_in, + 3, min); +static SENSOR_DEVICE_ATTR_2(in3_max, S_IRUGO | S_IWUSR, show_in, set_in, + 3, max); +static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in, NULL, 4, input); +static SENSOR_DEVICE_ATTR_2(in4_min, S_IRUGO | S_IWUSR, show_in, set_in, + 4, min); +static SENSOR_DEVICE_ATTR_2(in4_max, S_IRUGO | S_IWUSR, show_in, set_in, + 4, max); +static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in, NULL, 5, input); +static SENSOR_DEVICE_ATTR_2(in5_min, S_IRUGO | S_IWUSR, show_in, set_in, + 5, min); +static SENSOR_DEVICE_ATTR_2(in5_max, S_IRUGO | S_IWUSR, show_in, set_in, + 5, max); + +static SENSOR_DEVICE_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, input); +static SENSOR_DEVICE_ATTR_2(temp1_min, S_IRUGO | S_IWUSR, show_temp, set_temp, + 0, min); +static SENSOR_DEVICE_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, set_temp, + 0, max); +static SENSOR_DEVICE_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, input); +static SENSOR_DEVICE_ATTR_2(temp2_min, S_IRUGO | S_IWUSR, show_temp, set_temp, + 1, min); +static SENSOR_DEVICE_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, set_temp, + 1, max); +static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 2, input); +static SENSOR_DEVICE_ATTR_2(temp3_min, S_IRUGO | S_IWUSR, show_temp, set_temp, + 2, min); +static SENSOR_DEVICE_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, set_temp, + 2, max); +static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, input); +static SENSOR_DEVICE_ATTR_2(temp4_min, S_IRUGO | S_IWUSR, show_temp, set_temp, + 3, min); +static SENSOR_DEVICE_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, set_temp, + 3, max); +static SENSOR_DEVICE_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, input); +static SENSOR_DEVICE_ATTR_2(temp5_min, S_IRUGO | S_IWUSR, show_temp, set_temp, + 4, min); +static SENSOR_DEVICE_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, set_temp, + 4, max); +static SENSOR_DEVICE_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, input); +static SENSOR_DEVICE_ATTR_2(temp6_min, S_IRUGO | S_IWUSR, show_temp, set_temp, + 5, min); +static SENSOR_DEVICE_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, set_temp, + 5, max); + +static SENSOR_DEVICE_ATTR_2(fan1_input, S_IRUGO, show_fan, NULL, 0, input); +static SENSOR_DEVICE_ATTR_2(fan1_min, S_IRUGO | S_IWUSR, show_fan, set_fan, + 0, min); +static SENSOR_DEVICE_ATTR_2(fan2_input, S_IRUGO, show_fan, NULL, 1, input); +static SENSOR_DEVICE_ATTR_2(fan2_min, S_IRUGO | S_IWUSR, show_fan, set_fan, + 1, min); +static SENSOR_DEVICE_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 2, input); +static SENSOR_DEVICE_ATTR_2(fan3_min, S_IRUGO | S_IWUSR, show_fan, set_fan, + 2, min); +static SENSOR_DEVICE_ATTR_2(fan4_input, S_IRUGO, show_fan, NULL, 3, input); +static SENSOR_DEVICE_ATTR_2(fan4_min, S_IRUGO | S_IWUSR, show_fan, set_fan, + 3, min); +static SENSOR_DEVICE_ATTR_2(fan5_input, S_IRUGO, show_fan, NULL, 4, input); +static SENSOR_DEVICE_ATTR_2(fan5_min, S_IRUGO | S_IWUSR, show_fan, set_fan, + 4, min); + +static struct attribute *emc6w201_attributes[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_min.dev_attr.attr, + &sensor_dev_attr_in0_max.dev_attr.attr, + &sensor_dev_attr_in1_input.dev_attr.attr, + &sensor_dev_attr_in1_min.dev_attr.attr, + &sensor_dev_attr_in1_max.dev_attr.attr, + &sensor_dev_attr_in2_input.dev_attr.attr, + &sensor_dev_attr_in2_min.dev_attr.attr, + &sensor_dev_attr_in2_max.dev_attr.attr, + &sensor_dev_attr_in3_input.dev_attr.attr, + &sensor_dev_attr_in3_min.dev_attr.attr, + &sensor_dev_attr_in3_max.dev_attr.attr, + &sensor_dev_attr_in4_input.dev_attr.attr, + &sensor_dev_attr_in4_min.dev_attr.attr, + &sensor_dev_attr_in4_max.dev_attr.attr, + &sensor_dev_attr_in5_input.dev_attr.attr, + &sensor_dev_attr_in5_min.dev_attr.attr, + &sensor_dev_attr_in5_max.dev_attr.attr, + + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_min.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_min.dev_attr.attr, + &sensor_dev_attr_temp2_max.dev_attr.attr, + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_min.dev_attr.attr, + &sensor_dev_attr_temp3_max.dev_attr.attr, + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_min.dev_attr.attr, + &sensor_dev_attr_temp4_max.dev_attr.attr, + &sensor_dev_attr_temp5_input.dev_attr.attr, + &sensor_dev_attr_temp5_min.dev_attr.attr, + &sensor_dev_attr_temp5_max.dev_attr.attr, + &sensor_dev_attr_temp6_input.dev_attr.attr, + &sensor_dev_attr_temp6_min.dev_attr.attr, + &sensor_dev_attr_temp6_max.dev_attr.attr, + + &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan1_min.dev_attr.attr, + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan2_min.dev_attr.attr, + &sensor_dev_attr_fan3_input.dev_attr.attr, + &sensor_dev_attr_fan3_min.dev_attr.attr, + &sensor_dev_attr_fan4_input.dev_attr.attr, + &sensor_dev_attr_fan4_min.dev_attr.attr, + &sensor_dev_attr_fan5_input.dev_attr.attr, + &sensor_dev_attr_fan5_min.dev_attr.attr, + NULL +}; + +static const struct attribute_group emc6w201_group = { + .attrs = emc6w201_attributes, +}; + +/* + * Driver interface + */ + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int emc6w201_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int company, verstep, config; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + /* Identification */ + company = i2c_smbus_read_byte_data(client, EMC6W201_REG_COMPANY); + if (company != 0x5C) + return -ENODEV; + verstep = i2c_smbus_read_byte_data(client, EMC6W201_REG_VERSTEP); + if (verstep < 0 || (verstep & 0xF0) != 0xB0) + return -ENODEV; + if ((verstep & 0x0F) > 2) { + dev_dbg(&client->dev, "Unknwown EMC6W201 stepping %d\n", + verstep & 0x0F); + return -ENODEV; + } + + /* Check configuration */ + config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG); + if ((config & 0xF4) != 0x04) + return -ENODEV; + if (!(config & 0x01)) { + dev_err(&client->dev, "Monitoring not enabled\n"); + return -ENODEV; + } + + strlcpy(info->type, "emc6w201", I2C_NAME_SIZE); + + return 0; +} + +static int emc6w201_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct emc6w201_data *data; + int err; + + data = kzalloc(sizeof(struct emc6w201_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + + i2c_set_clientdata(client, data); + mutex_init(&data->update_lock); + + /* Create sysfs attribute */ + err = sysfs_create_group(&client->dev.kobj, &emc6w201_group); + if (err) + goto exit_free; + + /* Expose as a hwmon device */ + data->hwmon_dev = hwmon_device_register(&client->dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + goto exit_remove; + } + + return 0; + + exit_remove: + sysfs_remove_group(&client->dev.kobj, &emc6w201_group); + exit_free: + kfree(data); + exit: + return err; +} + +static int emc6w201_remove(struct i2c_client *client) +{ + struct emc6w201_data *data = i2c_get_clientdata(client); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&client->dev.kobj, &emc6w201_group); + kfree(data); + + return 0; +} + +static const struct i2c_device_id emc6w201_id[] = { + { "emc6w201", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, emc6w201_id); + +static struct i2c_driver emc6w201_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "emc6w201", + }, + .probe = emc6w201_probe, + .remove = emc6w201_remove, + .id_table = emc6w201_id, + .detect = emc6w201_detect, + .address_list = normal_i2c, +}; + +static int __init sensors_emc6w201_init(void) +{ + return i2c_add_driver(&emc6w201_driver); +} +module_init(sensors_emc6w201_init); + +static void __exit sensors_emc6w201_exit(void) +{ + i2c_del_driver(&emc6w201_driver); +} +module_exit(sensors_emc6w201_exit); + +MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); +MODULE_DESCRIPTION("SMSC EMC6W201 hardware monitoring driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index ca07a32447c..a4a94a096c9 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c @@ -48,6 +48,7 @@ #define SIO_FINTEK_ID 0x1934 /* Manufacturers ID */ #define SIO_F71808E_ID 0x0901 /* Chipset ID */ +#define SIO_F71808A_ID 0x1001 /* Chipset ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ #define SIO_F71869_ID 0x0814 /* Chipset ID */ @@ -107,11 +108,12 @@ static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -enum chips { f71808e, f71858fg, f71862fg, f71869, f71882fg, f71889fg, +enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f71889ed, f71889a, f8000, f81865f }; static const char *f71882fg_names[] = { "f71808e", + "f71808a", "f71858fg", "f71862fg", "f71869", /* Both f71869f and f71869e, reg. compatible and same id */ @@ -125,6 +127,7 @@ static const char *f71882fg_names[] = { static const char f71882fg_has_in[][F71882FG_MAX_INS] = { [f71808e] = { 1, 1, 1, 1, 1, 1, 0, 1, 1 }, + [f71808a] = { 1, 1, 1, 1, 0, 0, 0, 1, 1 }, [f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 }, [f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, [f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 }, @@ -138,6 +141,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = { static const char f71882fg_has_in1_alarm[] = { [f71808e] = 0, + [f71808a] = 0, [f71858fg] = 0, [f71862fg] = 0, [f71869] = 0, @@ -149,8 +153,9 @@ static const char f71882fg_has_in1_alarm[] = { [f81865f] = 1, }; -static const char f71882fg_has_beep[] = { +static const char f71882fg_fan_has_beep[] = { [f71808e] = 0, + [f71808a] = 0, [f71858fg] = 0, [f71862fg] = 1, [f71869] = 1, @@ -164,6 +169,7 @@ static const char f71882fg_has_beep[] = { static const char f71882fg_nr_fans[] = { [f71808e] = 3, + [f71808a] = 2, /* +1 fan which is monitor + simple pwm only */ [f71858fg] = 3, [f71862fg] = 3, [f71869] = 3, @@ -171,12 +177,27 @@ static const char f71882fg_nr_fans[] = { [f71889fg] = 3, [f71889ed] = 3, [f71889a] = 3, - [f8000] = 3, + [f8000] = 3, /* +1 fan which is monitor only */ [f81865f] = 2, }; +static const char f71882fg_temp_has_beep[] = { + [f71808e] = 0, + [f71808a] = 1, + [f71858fg] = 0, + [f71862fg] = 1, + [f71869] = 1, + [f71882fg] = 1, + [f71889fg] = 1, + [f71889ed] = 1, + [f71889a] = 1, + [f8000] = 0, + [f81865f] = 1, +}; + static const char f71882fg_nr_temps[] = { [f71808e] = 2, + [f71808a] = 2, [f71858fg] = 3, [f71862fg] = 3, [f71869] = 3, @@ -301,6 +322,10 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); +static ssize_t show_simple_pwm(struct device *dev, + struct device_attribute *devattr, char *buf); +static ssize_t store_simple_pwm(struct device *dev, + struct device_attribute *devattr, const char *buf, size_t count); static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf); static ssize_t store_pwm_enable(struct device *dev, @@ -550,6 +575,14 @@ static struct sensor_device_attribute_2 fxxxx_fan_attr[4][6] = { { show_pwm_interpolate, store_pwm_interpolate, 0, 3), } }; +/* Attr for the third fan of the f71808a, which only has manual pwm */ +static struct sensor_device_attribute_2 f71808a_fan3_attr[] = { + SENSOR_ATTR_2(fan3_input, S_IRUGO, show_fan, NULL, 0, 2), + SENSOR_ATTR_2(fan3_alarm, S_IRUGO, show_fan_alarm, NULL, 0, 2), + SENSOR_ATTR_2(pwm3, S_IRUGO|S_IWUSR, + show_simple_pwm, store_simple_pwm, 0, 2), +}; + /* Attr for models which can beep on Fan alarm */ static struct sensor_device_attribute_2 fxxxx_fan_beep_attr[] = { SENSOR_ATTR_2(fan1_beep, S_IRUGO|S_IWUSR, show_fan_beep, @@ -1146,12 +1179,13 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->temp_type[3] = (reg & 0x08) ? 2 : 4; } - if (f71882fg_has_beep[data->type]) { + if (f71882fg_fan_has_beep[data->type]) data->fan_beep = f71882fg_read8(data, F71882FG_REG_FAN_BEEP); + + if (f71882fg_temp_has_beep[data->type]) data->temp_beep = f71882fg_read8(data, F71882FG_REG_TEMP_BEEP); - } data->pwm_enable = f71882fg_read8(data, F71882FG_REG_PWM_ENABLE); @@ -1232,7 +1266,13 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev) data->pwm[nr] = f71882fg_read8(data, F71882FG_REG_PWM(nr)); } - /* The f8000 can monitor 1 more fan, but has no pwm for it */ + /* Some models have 1 more fan with limited capabilities */ + if (data->type == f71808a) { + data->fan[2] = f71882fg_read16(data, + F71882FG_REG_FAN(2)); + data->pwm[2] = f71882fg_read8(data, + F71882FG_REG_PWM(2)); + } if (data->type == f8000) data->fan[3] = f71882fg_read16(data, F71882FG_REG_FAN(3)); @@ -1722,6 +1762,38 @@ leave: return count; } +static ssize_t show_simple_pwm(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct f71882fg_data *data = f71882fg_update_device(dev); + int val, nr = to_sensor_dev_attr_2(devattr)->index; + + val = data->pwm[nr]; + return sprintf(buf, "%d\n", val); +} + +static ssize_t store_simple_pwm(struct device *dev, + struct device_attribute *devattr, + const char *buf, size_t count) +{ + struct f71882fg_data *data = dev_get_drvdata(dev); + int err, nr = to_sensor_dev_attr_2(devattr)->index; + long val; + + err = strict_strtol(buf, 10, &val); + if (err) + return err; + + val = SENSORS_LIMIT(val, 0, 255); + + mutex_lock(&data->update_lock); + f71882fg_write8(data, F71882FG_REG_PWM(nr), val); + data->pwm[nr] = val; + mutex_unlock(&data->update_lock); + + return count; +} + static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *devattr, char *buf) { @@ -2140,7 +2212,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) if (err) goto exit_unregister_sysfs; - if (f71882fg_has_beep[data->type]) { + if (f71882fg_temp_has_beep[data->type]) { err = f71882fg_create_sysfs_files(pdev, &fxxxx_temp_beep_attr[0][0], ARRAY_SIZE(fxxxx_temp_beep_attr[0]) @@ -2169,6 +2241,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) if (start_reg & 0x02) { switch (data->type) { case f71808e: + case f71808a: case f71869: /* These always have signed auto point temps */ data->auto_point_temp_signed = 1; @@ -2221,7 +2294,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) if (err) goto exit_unregister_sysfs; - if (f71882fg_has_beep[data->type]) { + if (f71882fg_fan_has_beep[data->type]) { err = f71882fg_create_sysfs_files(pdev, fxxxx_fan_beep_attr, nr_fans); if (err) @@ -2230,6 +2303,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) switch (data->type) { case f71808e: + case f71808a: case f71869: case f71889fg: case f71889ed: @@ -2255,6 +2329,16 @@ static int __devinit f71882fg_probe(struct platform_device *pdev) } switch (data->type) { + case f71808a: + err = f71882fg_create_sysfs_files(pdev, + &fxxxx_auto_pwm_attr[0][0], + ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); + if (err) + goto exit_unregister_sysfs; + err = f71882fg_create_sysfs_files(pdev, + f71808a_fan3_attr, + ARRAY_SIZE(f71808a_fan3_attr)); + break; case f71862fg: err = f71882fg_create_sysfs_files(pdev, f71862fg_auto_pwm_attr, @@ -2343,7 +2427,7 @@ static int f71882fg_remove(struct platform_device *pdev) &fxxxx_temp_attr[0][0], ARRAY_SIZE(fxxxx_temp_attr[0]) * nr_temps); } - if (f71882fg_has_beep[data->type]) { + if (f71882fg_temp_has_beep[data->type]) { f71882fg_remove_sysfs_files(pdev, &fxxxx_temp_beep_attr[0][0], ARRAY_SIZE(fxxxx_temp_beep_attr[0]) * nr_temps); @@ -2366,12 +2450,20 @@ static int f71882fg_remove(struct platform_device *pdev) f71882fg_remove_sysfs_files(pdev, &fxxxx_fan_attr[0][0], ARRAY_SIZE(fxxxx_fan_attr[0]) * nr_fans); - if (f71882fg_has_beep[data->type]) { + if (f71882fg_fan_has_beep[data->type]) { f71882fg_remove_sysfs_files(pdev, fxxxx_fan_beep_attr, nr_fans); } switch (data->type) { + case f71808a: + f71882fg_remove_sysfs_files(pdev, + &fxxxx_auto_pwm_attr[0][0], + ARRAY_SIZE(fxxxx_auto_pwm_attr[0]) * nr_fans); + f71882fg_remove_sysfs_files(pdev, + f71808a_fan3_attr, + ARRAY_SIZE(f71808a_fan3_attr)); + break; case f71862fg: f71882fg_remove_sysfs_files(pdev, f71862fg_auto_pwm_attr, @@ -2424,6 +2516,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address, case SIO_F71808E_ID: sio_data->type = f71808e; break; + case SIO_F71808A_ID: + sio_data->type = f71808a; + break; case SIO_F71858_ID: sio_data->type = f71858fg; break; diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c new file mode 100644 index 00000000000..523f8fb9e7d --- /dev/null +++ b/drivers/hwmon/fam15h_power.c @@ -0,0 +1,229 @@ +/* + * fam15h_power.c - AMD Family 15h processor power monitoring + * + * Copyright (c) 2011 Advanced Micro Devices, Inc. + * Author: Andreas Herrmann <andreas.herrmann3@amd.com> + * + * + * This driver is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License; either + * version 2 of the License, or (at your option) any later version. + * + * This driver is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this driver; if not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/bitops.h> +#include <asm/processor.h> + +MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor"); +MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>"); +MODULE_LICENSE("GPL"); + +/* D18F3 */ +#define REG_NORTHBRIDGE_CAP 0xe8 + +/* D18F4 */ +#define REG_PROCESSOR_TDP 0x1b8 + +/* D18F5 */ +#define REG_TDP_RUNNING_AVERAGE 0xe0 +#define REG_TDP_LIMIT3 0xe8 + +struct fam15h_power_data { + struct device *hwmon_dev; + unsigned int tdp_to_watts; + unsigned int base_tdp; + unsigned int processor_pwr_watts; +}; + +static ssize_t show_power(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u32 val, tdp_limit, running_avg_range; + s32 running_avg_capture; + u64 curr_pwr_watts; + struct pci_dev *f4 = to_pci_dev(dev); + struct fam15h_power_data *data = dev_get_drvdata(dev); + + pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), + REG_TDP_RUNNING_AVERAGE, &val); + running_avg_capture = (val >> 4) & 0x3fffff; + running_avg_capture = sign_extend32(running_avg_capture, 22); + running_avg_range = val & 0xf; + + pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), + REG_TDP_LIMIT3, &val); + + tdp_limit = val >> 16; + curr_pwr_watts = tdp_limit + data->base_tdp - + (s32)(running_avg_capture >> (running_avg_range + 1)); + curr_pwr_watts *= data->tdp_to_watts; + + /* + * Convert to microWatt + * + * power is in Watt provided as fixed point integer with + * scaling factor 1/(2^16). For conversion we use + * (10^6)/(2^16) = 15625/(2^10) + */ + curr_pwr_watts = (curr_pwr_watts * 15625) >> 10; + return sprintf(buf, "%u\n", (unsigned int) curr_pwr_watts); +} +static DEVICE_ATTR(power1_input, S_IRUGO, show_power, NULL); + +static ssize_t show_power_crit(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct fam15h_power_data *data = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", data->processor_pwr_watts); +} +static DEVICE_ATTR(power1_crit, S_IRUGO, show_power_crit, NULL); + +static ssize_t show_name(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "fam15h_power\n"); +} +static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); + +static struct attribute *fam15h_power_attrs[] = { + &dev_attr_power1_input.attr, + &dev_attr_power1_crit.attr, + &dev_attr_name.attr, + NULL +}; + +static const struct attribute_group fam15h_power_attr_group = { + .attrs = fam15h_power_attrs, +}; + +static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4) +{ + u32 val; + + pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 3), + REG_NORTHBRIDGE_CAP, &val); + if ((val & BIT(29)) && ((val >> 30) & 3)) + return false; + + return true; +} + +static void __devinit fam15h_power_init_data(struct pci_dev *f4, + struct fam15h_power_data *data) +{ + u32 val; + u64 tmp; + + pci_read_config_dword(f4, REG_PROCESSOR_TDP, &val); + data->base_tdp = val >> 16; + tmp = val & 0xffff; + + pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), + REG_TDP_LIMIT3, &val); + + data->tdp_to_watts = ((val & 0x3ff) << 6) | ((val >> 10) & 0x3f); + tmp *= data->tdp_to_watts; + + /* result not allowed to be >= 256W */ + if ((tmp >> 16) >= 256) + dev_warn(&f4->dev, "Bogus value for ProcessorPwrWatts " + "(processor_pwr_watts>=%u)\n", + (unsigned int) (tmp >> 16)); + + /* convert to microWatt */ + data->processor_pwr_watts = (tmp * 15625) >> 10; +} + +static int __devinit fam15h_power_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct fam15h_power_data *data; + struct device *dev; + int err; + + if (!fam15h_power_is_internal_node0(pdev)) { + err = -ENODEV; + goto exit; + } + + data = kzalloc(sizeof(struct fam15h_power_data), GFP_KERNEL); + if (!data) { + err = -ENOMEM; + goto exit; + } + fam15h_power_init_data(pdev, data); + dev = &pdev->dev; + + dev_set_drvdata(dev, data); + err = sysfs_create_group(&dev->kobj, &fam15h_power_attr_group); + if (err) + goto exit_free_data; + + data->hwmon_dev = hwmon_device_register(dev); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + goto exit_remove_group; + } + + return 0; + +exit_remove_group: + sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group); +exit_free_data: + kfree(data); +exit: + return err; +} + +static void __devexit fam15h_power_remove(struct pci_dev *pdev) +{ + struct device *dev; + struct fam15h_power_data *data; + + dev = &pdev->dev; + data = dev_get_drvdata(dev); + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&dev->kobj, &fam15h_power_attr_group); + dev_set_drvdata(dev, NULL); + kfree(data); +} + +static DEFINE_PCI_DEVICE_TABLE(fam15h_power_id_table) = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + {} +}; +MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); + +static struct pci_driver fam15h_power_driver = { + .name = "fam15h_power", + .id_table = fam15h_power_id_table, + .probe = fam15h_power_probe, + .remove = __devexit_p(fam15h_power_remove), +}; + +static int __init fam15h_power_init(void) +{ + return pci_register_driver(&fam15h_power_driver); +} + +static void __exit fam15h_power_exit(void) +{ + pci_unregister_driver(&fam15h_power_driver); +} + +module_init(fam15h_power_init) +module_exit(fam15h_power_exit) diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index bc6e2ab3a36..537409d07ee 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c @@ -523,7 +523,7 @@ static void aem_delete(struct aem_data *data) aem_remove_sensors(data); hwmon_device_unregister(data->hwmon_dev); ipmi_destroy_user(data->ipmi.user); - dev_set_drvdata(&data->pdev->dev, NULL); + platform_set_drvdata(data->pdev, NULL); platform_device_unregister(data->pdev); aem_idr_put(data->id); kfree(data); @@ -594,7 +594,7 @@ static int aem_init_aem1_inst(struct aem_ipmi_data *probe, u8 module_handle) if (res) goto ipmi_err; - dev_set_drvdata(&data->pdev->dev, data); + platform_set_drvdata(data->pdev, data); /* Set up IPMI interface */ if (aem_init_ipmi_data(&data->ipmi, probe->interface, @@ -630,7 +630,7 @@ sensor_err: hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: - dev_set_drvdata(&data->pdev->dev, NULL); + platform_set_drvdata(data->pdev, NULL); platform_device_unregister(data->pdev); dev_err: aem_idr_put(data->id); @@ -727,7 +727,7 @@ static int aem_init_aem2_inst(struct aem_ipmi_data *probe, if (res) goto ipmi_err; - dev_set_drvdata(&data->pdev->dev, data); + platform_set_drvdata(data->pdev, data); /* Set up IPMI interface */ if (aem_init_ipmi_data(&data->ipmi, probe->interface, @@ -763,7 +763,7 @@ sensor_err: hwmon_reg_err: ipmi_destroy_user(data->ipmi.user); ipmi_err: - dev_set_drvdata(&data->pdev->dev, NULL); + platform_set_drvdata(data->pdev, NULL); platform_device_unregister(data->pdev); dev_err: aem_idr_put(data->id); diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index 316b64823f7..bb6405b9200 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -77,15 +77,13 @@ static struct platform_device *pdev; #define DEVID 0x20 /* Register: Device ID */ #define DEVREV 0x22 /* Register: Device Revision */ -static inline int -superio_inb(int reg) +static inline int superio_inb(int reg) { outb(reg, REG); return inb(VAL); } -static inline void -superio_outb(int reg, int val) +static inline void superio_outb(int reg, int val) { outb(reg, REG); outb(val, VAL); @@ -101,27 +99,32 @@ static int superio_inw(int reg) return val; } -static inline void -superio_select(int ldn) +static inline void superio_select(int ldn) { outb(DEV, REG); outb(ldn, VAL); } -static inline void -superio_enter(void) +static inline int superio_enter(void) { + /* + * Try to reserve REG and REG + 1 for exclusive access. + */ + if (!request_muxed_region(REG, 2, DRVNAME)) + return -EBUSY; + outb(0x87, REG); outb(0x01, REG); outb(0x55, REG); outb(0x55, REG); + return 0; } -static inline void -superio_exit(void) +static inline void superio_exit(void) { outb(0x02, REG); outb(0x02, VAL); + release_region(REG, 2); } /* Logical device 4 registers */ @@ -1542,11 +1545,15 @@ static const struct attribute_group it87_group_label = { static int __init it87_find(unsigned short *address, struct it87_sio_data *sio_data) { - int err = -ENODEV; + int err; u16 chip_type; const char *board_vendor, *board_name; - superio_enter(); + err = superio_enter(); + if (err) + return err; + + err = -ENODEV; chip_type = force_id ? force_id : superio_inw(DEVID); switch (chip_type) { diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 93499123706..02cebb74e20 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c @@ -213,7 +213,7 @@ static const struct dev_pm_ops jc42_dev_pm_ops = { /* This is the driver that will be inserted */ static struct i2c_driver jc42_driver = { - .class = I2C_CLASS_HWMON, + .class = I2C_CLASS_SPD, .driver = { .name = "jc42", .pm = JC42_DEV_PM_OPS, diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 82bf65aa296..41aa6a31987 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,5 +1,5 @@ /* - * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h/15h processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> * @@ -25,7 +25,7 @@ #include <linux/pci.h> #include <asm/processor.h> -MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor"); +MODULE_DESCRIPTION("AMD Family 10h+ CPU core temperature monitor"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL"); @@ -173,7 +173,7 @@ static int __devinit k10temp_probe(struct pci_dev *pdev, err = PTR_ERR(hwmon_dev); goto exit_remove; } - dev_set_drvdata(&pdev->dev, hwmon_dev); + pci_set_drvdata(pdev, hwmon_dev); if (unreliable && force) dev_warn(&pdev->dev, @@ -194,7 +194,7 @@ exit: static void __devexit k10temp_remove(struct pci_dev *pdev) { - hwmon_device_unregister(dev_get_drvdata(&pdev->dev)); + hwmon_device_unregister(pci_get_drvdata(pdev)); device_remove_file(&pdev->dev, &dev_attr_name); device_remove_file(&pdev->dev, &dev_attr_temp1_input); device_remove_file(&pdev->dev, &dev_attr_temp1_max); @@ -202,13 +202,14 @@ static void __devexit k10temp_remove(struct pci_dev *pdev) &sensor_dev_attr_temp1_crit.dev_attr); device_remove_file(&pdev->dev, &sensor_dev_attr_temp1_crit_hyst.dev_attr); - dev_set_drvdata(&pdev->dev, NULL); + pci_set_drvdata(pdev, NULL); } static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c index 418496f1302..b923bc2307a 100644 --- a/drivers/hwmon/k8temp.c +++ b/drivers/hwmon/k8temp.c @@ -252,7 +252,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev, data->name = "k8temp"; mutex_init(&data->update_lock); - dev_set_drvdata(&pdev->dev, data); + pci_set_drvdata(pdev, data); /* Register sysfs hooks */ err = device_create_file(&pdev->dev, @@ -307,7 +307,7 @@ exit_remove: &sensor_dev_attr_temp4_input.dev_attr); device_remove_file(&pdev->dev, &dev_attr_name); exit_free: - dev_set_drvdata(&pdev->dev, NULL); + pci_set_drvdata(pdev, NULL); kfree(data); exit: return err; @@ -315,7 +315,7 @@ exit: static void __devexit k8temp_remove(struct pci_dev *pdev) { - struct k8temp_data *data = dev_get_drvdata(&pdev->dev); + struct k8temp_data *data = pci_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); device_remove_file(&pdev->dev, @@ -327,7 +327,7 @@ static void __devexit k8temp_remove(struct pci_dev *pdev) device_remove_file(&pdev->dev, &sensor_dev_attr_temp4_input.dev_attr); device_remove_file(&pdev->dev, &dev_attr_name); - dev_set_drvdata(&pdev->dev, NULL); + pci_set_drvdata(pdev, NULL); kfree(data); } diff --git a/drivers/hwmon/lm70.c b/drivers/hwmon/lm70.c index 3b84fb50305..c274ea25d89 100644 --- a/drivers/hwmon/lm70.c +++ b/drivers/hwmon/lm70.c @@ -58,7 +58,7 @@ static ssize_t lm70_sense_temp(struct device *dev, int status, val = 0; u8 rxbuf[2]; s16 raw=0; - struct lm70 *p_lm70 = dev_get_drvdata(&spi->dev); + struct lm70 *p_lm70 = spi_get_drvdata(spi); if (mutex_lock_interruptible(&p_lm70->lock)) return -ERESTARTSYS; @@ -163,7 +163,7 @@ static int __devinit lm70_probe(struct spi_device *spi) status = PTR_ERR(p_lm70->hwmon_dev); goto out_dev_reg_failed; } - dev_set_drvdata(&spi->dev, p_lm70); + spi_set_drvdata(spi, p_lm70); if ((status = device_create_file(&spi->dev, &dev_attr_temp1_input)) || (status = device_create_file(&spi->dev, &dev_attr_name))) { @@ -177,19 +177,19 @@ out_dev_create_file_failed: device_remove_file(&spi->dev, &dev_attr_temp1_input); hwmon_device_unregister(p_lm70->hwmon_dev); out_dev_reg_failed: - dev_set_drvdata(&spi->dev, NULL); + spi_set_drvdata(spi, NULL); kfree(p_lm70); return status; } static int __devexit lm70_remove(struct spi_device *spi) { - struct lm70 *p_lm70 = dev_get_drvdata(&spi->dev); + struct lm70 *p_lm70 = spi_get_drvdata(spi); device_remove_file(&spi->dev, &dev_attr_temp1_input); device_remove_file(&spi->dev, &dev_attr_name); hwmon_device_unregister(p_lm70->hwmon_dev); - dev_set_drvdata(&spi->dev, NULL); + spi_set_drvdata(spi, NULL); kfree(p_lm70); return 0; diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c index 9a11532ecae..ece3aafa54b 100644 --- a/drivers/hwmon/max6650.c +++ b/drivers/hwmon/max6650.c @@ -41,13 +41,6 @@ #include <linux/err.h> /* - * Addresses to scan. There are four disjoint possibilities, by pin config. - */ - -static const unsigned short normal_i2c[] = {0x1b, 0x1f, 0x48, 0x4b, - I2C_CLIENT_END}; - -/* * Insmod parameters */ @@ -114,8 +107,6 @@ module_param(clock, int, S_IRUGO); static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id); -static int max6650_detect(struct i2c_client *client, - struct i2c_board_info *info); static int max6650_init_client(struct i2c_client *client); static int max6650_remove(struct i2c_client *client); static struct max6650_data *max6650_update_device(struct device *dev); @@ -125,21 +116,19 @@ static struct max6650_data *max6650_update_device(struct device *dev); */ static const struct i2c_device_id max6650_id[] = { - { "max6650", 0 }, + { "max6650", 1 }, + { "max6651", 4 }, { } }; MODULE_DEVICE_TABLE(i2c, max6650_id); static struct i2c_driver max6650_driver = { - .class = I2C_CLASS_HWMON, .driver = { .name = "max6650", }, .probe = max6650_probe, .remove = max6650_remove, .id_table = max6650_id, - .detect = max6650_detect, - .address_list = normal_i2c, }; /* @@ -150,6 +139,7 @@ struct max6650_data { struct device *hwmon_dev; struct mutex update_lock; + int nr_fans; char valid; /* zero until following fields are valid */ unsigned long last_updated; /* in jiffies */ @@ -501,9 +491,6 @@ static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a, static struct attribute *max6650_attrs[] = { &sensor_dev_attr_fan1_input.dev_attr.attr, - &sensor_dev_attr_fan2_input.dev_attr.attr, - &sensor_dev_attr_fan3_input.dev_attr.attr, - &sensor_dev_attr_fan4_input.dev_attr.attr, &dev_attr_fan1_target.attr, &dev_attr_fan1_div.attr, &dev_attr_pwm1_enable.attr, @@ -521,42 +508,21 @@ static struct attribute_group max6650_attr_grp = { .is_visible = max6650_attrs_visible, }; +static struct attribute *max6651_attrs[] = { + &sensor_dev_attr_fan2_input.dev_attr.attr, + &sensor_dev_attr_fan3_input.dev_attr.attr, + &sensor_dev_attr_fan4_input.dev_attr.attr, + NULL +}; + +static const struct attribute_group max6651_attr_grp = { + .attrs = max6651_attrs, +}; + /* * Real code */ -/* Return 0 if detection is successful, -ENODEV otherwise */ -static int max6650_detect(struct i2c_client *client, - struct i2c_board_info *info) -{ - struct i2c_adapter *adapter = client->adapter; - int address = client->addr; - - dev_dbg(&adapter->dev, "max6650_detect called\n"); - - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { - dev_dbg(&adapter->dev, "max6650: I2C bus doesn't support " - "byte read mode, skipping.\n"); - return -ENODEV; - } - - if (((i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG) & 0xC0) - ||(i2c_smbus_read_byte_data(client, MAX6650_REG_GPIO_STAT) & 0xE0) - ||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM_EN) & 0xE0) - ||(i2c_smbus_read_byte_data(client, MAX6650_REG_ALARM) & 0xE0) - ||(i2c_smbus_read_byte_data(client, MAX6650_REG_COUNT) & 0xFC))) { - dev_dbg(&adapter->dev, - "max6650: detection failed at 0x%02x.\n", address); - return -ENODEV; - } - - dev_info(&adapter->dev, "max6650: chip found at 0x%02x.\n", address); - - strlcpy(info->type, "max6650", I2C_NAME_SIZE); - - return 0; -} - static int max6650_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -570,6 +536,7 @@ static int max6650_probe(struct i2c_client *client, i2c_set_clientdata(client, data); mutex_init(&data->update_lock); + data->nr_fans = id->driver_data; /* * Initialize the max6650 chip @@ -581,6 +548,12 @@ static int max6650_probe(struct i2c_client *client, err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp); if (err) goto err_free; + /* 3 additional fan inputs for the MAX6651 */ + if (data->nr_fans == 4) { + err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp); + if (err) + goto err_remove; + } data->hwmon_dev = hwmon_device_register(&client->dev); if (!IS_ERR(data->hwmon_dev)) @@ -588,6 +561,9 @@ static int max6650_probe(struct i2c_client *client, err = PTR_ERR(data->hwmon_dev); dev_err(&client->dev, "error registering hwmon device.\n"); + if (data->nr_fans == 4) + sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp); +err_remove: sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); err_free: kfree(data); @@ -598,8 +574,10 @@ static int max6650_remove(struct i2c_client *client) { struct max6650_data *data = i2c_get_clientdata(client); - sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); hwmon_device_unregister(data->hwmon_dev); + if (data->nr_fans == 4) + sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp); + sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp); kfree(data); return 0; } @@ -712,7 +690,7 @@ static struct max6650_data *max6650_update_device(struct device *dev) MAX6650_REG_SPEED); data->config = i2c_smbus_read_byte_data(client, MAX6650_REG_CONFIG); - for (i = 0; i < 4; i++) { + for (i = 0; i < data->nr_fans; i++) { data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); } diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c index 9a51dcca9b0..020c87273ea 100644 --- a/drivers/hwmon/sch5627.c +++ b/drivers/hwmon/sch5627.c @@ -52,6 +52,9 @@ #define SCH5627_COMPANY_ID 0x5c #define SCH5627_PRIMARY_ID 0xa0 +#define SCH5627_CMD_READ 0x02 +#define SCH5627_CMD_WRITE 0x03 + #define SCH5627_REG_BUILD_CODE 0x39 #define SCH5627_REG_BUILD_ID 0x3a #define SCH5627_REG_HWMON_ID 0x3c @@ -94,11 +97,13 @@ static const char * const SCH5627_IN_LABELS[SCH5627_NO_IN] = { struct sch5627_data { unsigned short addr; struct device *hwmon_dev; + u8 control; u8 temp_max[SCH5627_NO_TEMPS]; u8 temp_crit[SCH5627_NO_TEMPS]; u16 fan_min[SCH5627_NO_FANS]; struct mutex update_lock; + unsigned long last_battery; /* In jiffies */ char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ u16 temp[SCH5627_NO_TEMPS]; @@ -140,7 +145,7 @@ static inline void superio_exit(int base) release_region(base, 2); } -static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg) +static int sch5627_send_cmd(struct sch5627_data *data, u8 cmd, u16 reg, u8 v) { u8 val; int i; @@ -163,10 +168,14 @@ static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg) outb(0x80, data->addr + 3); /* Write Request Packet Header */ - outb(0x02, data->addr + 4); /* Access Type: VREG read */ + outb(cmd, data->addr + 4); /* VREG Access Type read:0x02 write:0x03 */ outb(0x01, data->addr + 5); /* # of Entries: 1 Byte (8-bit) */ outb(0x04, data->addr + 2); /* Mailbox AP to first data entry loc. */ + /* Write Value field */ + if (cmd == SCH5627_CMD_WRITE) + outb(v, data->addr + 4); + /* Write Address field */ outb(reg & 0xff, data->addr + 6); outb(reg >> 8, data->addr + 7); @@ -224,8 +233,22 @@ static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg) * But if we do that things don't work, so let's not. */ - /* Read Data from Mailbox */ - return inb(data->addr + 4); + /* Read Value field */ + if (cmd == SCH5627_CMD_READ) + return inb(data->addr + 4); + + return 0; +} + +static int sch5627_read_virtual_reg(struct sch5627_data *data, u16 reg) +{ + return sch5627_send_cmd(data, SCH5627_CMD_READ, reg, 0); +} + +static int sch5627_write_virtual_reg(struct sch5627_data *data, + u16 reg, u8 val) +{ + return sch5627_send_cmd(data, SCH5627_CMD_WRITE, reg, val); } static int sch5627_read_virtual_reg16(struct sch5627_data *data, u16 reg) @@ -272,6 +295,13 @@ static struct sch5627_data *sch5627_update_device(struct device *dev) mutex_lock(&data->update_lock); + /* Trigger a Vbat voltage measurement every 5 minutes */ + if (time_after(jiffies, data->last_battery + 300 * HZ)) { + sch5627_write_virtual_reg(data, SCH5627_REG_CTRL, + data->control | 0x10); + data->last_battery = jiffies; + } + /* Cache the values for 1 second */ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { for (i = 0; i < SCH5627_NO_TEMPS; i++) { @@ -696,11 +726,17 @@ static int __devinit sch5627_probe(struct platform_device *pdev) err = val; goto error; } - if (!(val & 0x01)) { + data->control = val; + if (!(data->control & 0x01)) { pr_err("hardware monitoring not enabled\n"); err = -ENODEV; goto error; } + /* Trigger a Vbat voltage measurement, so that we get a valid reading + the first time we read Vbat */ + sch5627_write_virtual_reg(data, SCH5627_REG_CTRL, + data->control | 0x10); + data->last_battery = jiffies; /* * Read limits, we do this only once as reading a register on diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c index 1f36c635d93..27a62711e0a 100644 --- a/drivers/hwmon/ultra45_env.c +++ b/drivers/hwmon/ultra45_env.c @@ -258,7 +258,7 @@ static int __devinit env_probe(struct platform_device *op) goto out_sysfs_remove_group; } - dev_set_drvdata(&op->dev, p); + platform_set_drvdata(op, p); err = 0; out: @@ -277,7 +277,7 @@ out_free: static int __devexit env_remove(struct platform_device *op) { - struct env *p = dev_get_drvdata(&op->dev); + struct env *p = platform_get_drvdata(op); if (p) { sysfs_remove_group(&op->dev.kobj, &env_group); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 428d098c35e..646068e5100 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -673,15 +673,19 @@ config I2C_XILINX will be called xilinx_i2c. config I2C_EG20T - tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH" + tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)" depends on PCI help This driver is for PCH(Platform controller Hub) I2C of EG20T which is an IOH(Input/Output Hub) for x86 embedded processor. This driver can access PCH I2C bus device. - This driver also supports the ML7213, a companion chip for the - Atom E6xx series and compatible with the Intel EG20T PCH. + This driver also can be used for OKI SEMICONDUCTOR IOH(Input/ + Output Hub), ML7213 and ML7223. + ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is + for MP(Media Phone) use. + ML7213/ML7223 is companion chip for Intel Atom E6xx series. + ML7213/ML7223 is completely compatible for Intel EG20T PCH. comment "External I2C/SMBus adapter drivers" diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 878a12026af..8abfa4a03ce 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -182,10 +182,12 @@ static DEFINE_MUTEX(pch_mutex); /* Definition for ML7213 by OKI SEMICONDUCTOR */ #define PCI_VENDOR_ID_ROHM 0x10DB #define PCI_DEVICE_ID_ML7213_I2C 0x802D +#define PCI_DEVICE_ID_ML7223_I2C 0x8010 static struct pci_device_id __devinitdata pch_pcidev_id[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, }, { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, }, + { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, }, {0,} }; diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index e10e5cf3751..0c731ca69f1 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -15,13 +15,14 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> #include <plat/i2c.h> @@ -103,9 +104,6 @@ /* maximum threshold value */ #define MAX_I2C_FIFO_THRESHOLD 15 -/* per-transfer delay, required for the hardware to stabilize */ -#define I2C_DELAY 150 - enum i2c_status { I2C_NOP, I2C_ON_GOING, @@ -120,9 +118,6 @@ enum i2c_operation { I2C_READ = 0x01 }; -/* controller response timeout in ms */ -#define I2C_TIMEOUT_MS 2000 - /** * struct i2c_nmk_client - client specific data * @slave_adr: 7-bit slave address @@ -151,6 +146,7 @@ struct i2c_nmk_client { * @stop: stop condition * @xfer_complete: acknowledge completion for a I2C message * @result: controller propogated result + * @busy: Busy doing transfer */ struct nmk_i2c_dev { struct platform_device *pdev; @@ -163,6 +159,8 @@ struct nmk_i2c_dev { int stop; struct completion xfer_complete; int result; + struct regulator *regulator; + bool busy; }; /* controller's abort causes */ @@ -209,7 +207,7 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev) writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR); for (i = 0; i < LOOP_ATTEMPTS; i++) { - timeout = jiffies + msecs_to_jiffies(I2C_TIMEOUT_MS); + timeout = jiffies + dev->adap.timeout; while (!time_after(jiffies, timeout)) { if ((readl(dev->virtbase + I2C_CR) & @@ -253,11 +251,9 @@ static int init_hw(struct nmk_i2c_dev *dev) { int stat; - clk_enable(dev->clk); - stat = flush_i2c_fifo(dev); if (stat) - return stat; + goto exit; /* disable the controller */ i2c_clr_bit(dev->virtbase + I2C_CR , I2C_CR_PE); @@ -268,10 +264,8 @@ static int init_hw(struct nmk_i2c_dev *dev) dev->cli.operation = I2C_NO_OPERATION; - clk_disable(dev->clk); - - udelay(I2C_DELAY); - return 0; +exit: + return stat; } /* enable peripheral, master mode operation */ @@ -424,7 +418,7 @@ static int read_i2c(struct nmk_i2c_dev *dev) dev->virtbase + I2C_IMSCR); timeout = wait_for_completion_interruptible_timeout( - &dev->xfer_complete, msecs_to_jiffies(I2C_TIMEOUT_MS)); + &dev->xfer_complete, dev->adap.timeout); if (timeout < 0) { dev_err(&dev->pdev->dev, @@ -434,14 +428,32 @@ static int read_i2c(struct nmk_i2c_dev *dev) } if (timeout == 0) { - /* controller has timedout, re-init the h/w */ - dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n"); - (void) init_hw(dev); + /* Controller timed out */ + dev_err(&dev->pdev->dev, "read from slave 0x%x timed out\n", + dev->cli.slave_adr); status = -ETIMEDOUT; } return status; } +static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes) +{ + int count; + + for (count = (no_bytes - 2); + (count > 0) && + (dev->cli.count != 0); + count--) { + /* write to the Tx FIFO */ + writeb(*dev->cli.buffer, + dev->virtbase + I2C_TFR); + dev->cli.buffer++; + dev->cli.count--; + dev->cli.xfer_bytes++; + } + +} + /** * write_i2c() - Write data to I2C client. * @dev: private data of I2C Driver @@ -469,8 +481,13 @@ static int write_i2c(struct nmk_i2c_dev *dev) init_completion(&dev->xfer_complete); /* enable interrupts by settings the masks */ - irq_mask = (I2C_IT_TXFNE | I2C_IT_TXFOVR | - I2C_IT_MAL | I2C_IT_BERR); + irq_mask = (I2C_IT_TXFOVR | I2C_IT_MAL | I2C_IT_BERR); + + /* Fill the TX FIFO with transmit data */ + fill_tx_fifo(dev, MAX_I2C_FIFO_THRESHOLD); + + if (dev->cli.count != 0) + irq_mask |= I2C_IT_TXFNE; /* * check if we want to transfer a single or multiple bytes, if so @@ -488,7 +505,7 @@ static int write_i2c(struct nmk_i2c_dev *dev) dev->virtbase + I2C_IMSCR); timeout = wait_for_completion_interruptible_timeout( - &dev->xfer_complete, msecs_to_jiffies(I2C_TIMEOUT_MS)); + &dev->xfer_complete, dev->adap.timeout); if (timeout < 0) { dev_err(&dev->pdev->dev, @@ -498,9 +515,9 @@ static int write_i2c(struct nmk_i2c_dev *dev) } if (timeout == 0) { - /* controller has timedout, re-init the h/w */ - dev_err(&dev->pdev->dev, "controller timed out, re-init h/w\n"); - (void) init_hw(dev); + /* Controller timed out */ + dev_err(&dev->pdev->dev, "write to slave 0x%x timed out\n", + dev->cli.slave_adr); status = -ETIMEDOUT; } @@ -508,6 +525,51 @@ static int write_i2c(struct nmk_i2c_dev *dev) } /** + * nmk_i2c_xfer_one() - transmit a single I2C message + * @dev: device with a message encoded into it + * @flags: message flags + */ +static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags) +{ + int status; + + if (flags & I2C_M_RD) { + /* read operation */ + dev->cli.operation = I2C_READ; + status = read_i2c(dev); + } else { + /* write operation */ + dev->cli.operation = I2C_WRITE; + status = write_i2c(dev); + } + + if (status || (dev->result)) { + u32 i2c_sr; + u32 cause; + + i2c_sr = readl(dev->virtbase + I2C_SR); + /* + * Check if the controller I2C operation status + * is set to ABORT(11b). + */ + if (((i2c_sr >> 2) & 0x3) == 0x3) { + /* get the abort cause */ + cause = (i2c_sr >> 4) & 0x7; + dev_err(&dev->pdev->dev, "%s\n", cause + >= ARRAY_SIZE(abort_causes) ? + "unknown reason" : + abort_causes[cause]); + } + + (void) init_hw(dev); + + status = status ? status : dev->result; + } + + return status; +} + +/** * nmk_i2c_xfer() - I2C transfer function used by kernel framework * @i2c_adap: Adapter pointer to the controller * @msgs: Pointer to data to be written. @@ -559,53 +621,55 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap, { int status; int i; - u32 cause; struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap); + int j; + + dev->busy = true; + + if (dev->regulator) + regulator_enable(dev->regulator); + pm_runtime_get_sync(&dev->pdev->dev); + + clk_enable(dev->clk); status = init_hw(dev); if (status) - return status; + goto out; - clk_enable(dev->clk); + /* Attempt three times to send the message queue */ + for (j = 0; j < 3; j++) { + /* setup the i2c controller */ + setup_i2c_controller(dev); - /* setup the i2c controller */ - setup_i2c_controller(dev); + for (i = 0; i < num_msgs; i++) { + if (unlikely(msgs[i].flags & I2C_M_TEN)) { + dev_err(&dev->pdev->dev, "10 bit addressing" + "not supported\n"); - for (i = 0; i < num_msgs; i++) { - if (unlikely(msgs[i].flags & I2C_M_TEN)) { - dev_err(&dev->pdev->dev, "10 bit addressing" - "not supported\n"); - return -EINVAL; - } - dev->cli.slave_adr = msgs[i].addr; - dev->cli.buffer = msgs[i].buf; - dev->cli.count = msgs[i].len; - dev->stop = (i < (num_msgs - 1)) ? 0 : 1; - dev->result = 0; - - if (msgs[i].flags & I2C_M_RD) { - /* it is a read operation */ - dev->cli.operation = I2C_READ; - status = read_i2c(dev); - } else { - /* write operation */ - dev->cli.operation = I2C_WRITE; - status = write_i2c(dev); - } - if (status || (dev->result)) { - /* get the abort cause */ - cause = (readl(dev->virtbase + I2C_SR) >> 4) & 0x7; - dev_err(&dev->pdev->dev, "error during I2C" - "message xfer: %d\n", cause); - dev_err(&dev->pdev->dev, "%s\n", - cause >= ARRAY_SIZE(abort_causes) - ? "unknown reason" : abort_causes[cause]); - clk_disable(dev->clk); - return status; + status = -EINVAL; + goto out; + } + dev->cli.slave_adr = msgs[i].addr; + dev->cli.buffer = msgs[i].buf; + dev->cli.count = msgs[i].len; + dev->stop = (i < (num_msgs - 1)) ? 0 : 1; + dev->result = 0; + + status = nmk_i2c_xfer_one(dev, msgs[i].flags); + if (status != 0) + break; } - udelay(I2C_DELAY); + if (status == 0) + break; } + +out: clk_disable(dev->clk); + pm_runtime_put_sync(&dev->pdev->dev); + if (dev->regulator) + regulator_disable(dev->regulator); + + dev->busy = false; /* return the no. messages processed */ if (status) @@ -666,17 +730,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) */ disable_interrupts(dev, I2C_IT_TXFNE); } else { - for (count = (MAX_I2C_FIFO_THRESHOLD - tft - 2); - (count > 0) && - (dev->cli.count != 0); - count--) { - /* write to the Tx FIFO */ - writeb(*dev->cli.buffer, - dev->virtbase + I2C_TFR); - dev->cli.buffer++; - dev->cli.count--; - dev->cli.xfer_bytes++; - } + fill_tx_fifo(dev, (MAX_I2C_FIFO_THRESHOLD - tft)); /* * if done, close the transfer by disabling the * corresponding TXFNE interrupt @@ -729,16 +783,11 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) } } - i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MTD); - i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MTDWS); - - disable_interrupts(dev, - (I2C_IT_TXFNE | I2C_IT_TXFE | I2C_IT_TXFF - | I2C_IT_TXFOVR | I2C_IT_RXFNF - | I2C_IT_RXFF | I2C_IT_RXFE)); + disable_all_interrupts(dev); + clear_all_interrupts(dev); if (dev->cli.count) { - dev->result = -1; + dev->result = -EIO; dev_err(&dev->pdev->dev, "%lu bytes still remain to be" "xfered\n", dev->cli.count); (void) init_hw(dev); @@ -749,7 +798,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) /* Master Arbitration lost interrupt */ case I2C_IT_MAL: - dev->result = -1; + dev->result = -EIO; (void) init_hw(dev); i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL); @@ -763,7 +812,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) * during the transaction. */ case I2C_IT_BERR: - dev->result = -1; + dev->result = -EIO; /* get the status */ if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT) (void) init_hw(dev); @@ -779,7 +828,7 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) * the Tx FIFO is full. */ case I2C_IT_TXFOVR: - dev->result = -1; + dev->result = -EIO; (void) init_hw(dev); dev_err(&dev->pdev->dev, "Tx Fifo Over run\n"); @@ -805,6 +854,38 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg) return IRQ_HANDLED; } + +#ifdef CONFIG_PM +static int nmk_i2c_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct nmk_i2c_dev *nmk_i2c = platform_get_drvdata(pdev); + + if (nmk_i2c->busy) + return -EBUSY; + + return 0; +} + +static int nmk_i2c_resume(struct device *dev) +{ + return 0; +} +#else +#define nmk_i2c_suspend NULL +#define nmk_i2c_resume NULL +#endif + +/* + * We use noirq so that we suspend late and resume before the wakeup interrupt + * to ensure that we do the !pm_runtime_suspended() check in resume before + * there has been a regular pm runtime resume (via pm_runtime_get_sync()). + */ +static const struct dev_pm_ops nmk_i2c_pm = { + .suspend_noirq = nmk_i2c_suspend, + .resume_noirq = nmk_i2c_resume, +}; + static unsigned int nmk_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; @@ -830,7 +911,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) ret = -ENOMEM; goto err_no_mem; } - + dev->busy = false; dev->pdev = pdev; platform_set_drvdata(pdev, dev); @@ -860,6 +941,15 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) goto err_irq; } + dev->regulator = regulator_get(&pdev->dev, "v-i2c"); + if (IS_ERR(dev->regulator)) { + dev_warn(&pdev->dev, "could not get i2c regulator\n"); + dev->regulator = NULL; + } + + pm_suspend_ignore_children(&pdev->dev, true); + pm_runtime_enable(&pdev->dev); + dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) { dev_err(&pdev->dev, "could not get i2c clock\n"); @@ -872,6 +962,8 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) adap->owner = THIS_MODULE; adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; adap->algo = &nmk_i2c_algo; + adap->timeout = pdata->timeout ? msecs_to_jiffies(pdata->timeout) : + msecs_to_jiffies(20000); snprintf(adap->name, sizeof(adap->name), "Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start); @@ -887,12 +979,6 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) i2c_set_adapdata(adap, dev); - ret = init_hw(dev); - if (ret != 0) { - dev_err(&pdev->dev, "error in initializing i2c hardware\n"); - goto err_init_hw; - } - dev_info(&pdev->dev, "initialize %s on virtual " "base %p\n", adap->name, dev->virtbase); @@ -904,10 +990,12 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev) return 0; - err_init_hw: err_add_adap: clk_put(dev->clk); err_no_clk: + if (dev->regulator) + regulator_put(dev->regulator); + pm_runtime_disable(&pdev->dev); free_irq(dev->irq, dev); err_irq: iounmap(dev->virtbase); @@ -938,6 +1026,9 @@ static int __devexit nmk_i2c_remove(struct platform_device *pdev) if (res) release_mem_region(res->start, resource_size(res)); clk_put(dev->clk); + if (dev->regulator) + regulator_put(dev->regulator); + pm_runtime_disable(&pdev->dev); platform_set_drvdata(pdev, NULL); kfree(dev); @@ -948,6 +1039,7 @@ static struct platform_driver nmk_i2c_driver = { .driver = { .owner = THIS_MODULE, .name = DRIVER_NAME, + .pm = &nmk_i2c_pm, }, .probe = nmk_i2c_probe, .remove = __devexit_p(nmk_i2c_remove), diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 81ccd787562..f633a53b6db 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -32,6 +32,7 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/slab.h> +#include <linux/i2c/i2c-sh_mobile.h> /* Transmit operation: */ /* */ @@ -117,7 +118,7 @@ struct sh_mobile_i2c_data { struct device *dev; void __iomem *reg; struct i2c_adapter adap; - + unsigned long bus_speed; struct clk *clk; u_int8_t icic; u_int8_t iccl; @@ -205,7 +206,7 @@ static void activate_ch(struct sh_mobile_i2c_data *pd) * We also round off the result. */ num = i2c_clk * 5; - denom = NORMAL_SPEED * 9; + denom = pd->bus_speed * 9; tmp = num * 10 / denom; if (tmp % 10 >= 5) pd->iccl = (u_int8_t)((num/denom) + 1); @@ -574,10 +575,10 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook) static int sh_mobile_i2c_probe(struct platform_device *dev) { + struct i2c_sh_mobile_platform_data *pdata = dev->dev.platform_data; struct sh_mobile_i2c_data *pd; struct i2c_adapter *adap; struct resource *res; - char clk_name[8]; int size; int ret; @@ -587,10 +588,9 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) return -ENOMEM; } - snprintf(clk_name, sizeof(clk_name), "i2c%d", dev->id); - pd->clk = clk_get(&dev->dev, clk_name); + pd->clk = clk_get(&dev->dev, NULL); if (IS_ERR(pd->clk)) { - dev_err(&dev->dev, "cannot get clock \"%s\"\n", clk_name); + dev_err(&dev->dev, "cannot get clock\n"); ret = PTR_ERR(pd->clk); goto err; } @@ -620,6 +620,11 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) goto err_irq; } + /* Use platformd data bus speed or NORMAL_SPEED */ + pd->bus_speed = NORMAL_SPEED; + if (pdata && pdata->bus_speed) + pd->bus_speed = pdata->bus_speed; + /* The IIC blocks on SH-Mobile ARM processors * come with two new bits in ICIC. */ @@ -660,6 +665,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) goto err_all; } + dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n", + adap->nr, pd->bus_speed); return 0; err_all: diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index b4ab39b741e..4d9319665e3 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -35,8 +35,10 @@ #define BYTES_PER_FIFO_WORD 4 #define I2C_CNFG 0x000 +#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 #define I2C_CNFG_PACKET_MODE_EN (1<<10) #define I2C_CNFG_NEW_MASTER_FSM (1<<11) +#define I2C_STATUS 0x01C #define I2C_SL_CNFG 0x020 #define I2C_SL_CNFG_NEWSL (1<<2) #define I2C_SL_ADDR1 0x02c @@ -77,6 +79,7 @@ #define I2C_ERR_NONE 0x00 #define I2C_ERR_NO_ACK 0x01 #define I2C_ERR_ARBITRATION_LOST 0x02 +#define I2C_ERR_UNKNOWN_INTERRUPT 0x04 #define PACKET_HEADER0_HEADER_SIZE_SHIFT 28 #define PACKET_HEADER0_PACKET_ID_SHIFT 16 @@ -121,6 +124,7 @@ struct tegra_i2c_dev { void __iomem *base; int cont_id; int irq; + bool irq_disabled; int is_dvc; struct completion msg_complete; int msg_err; @@ -325,11 +329,17 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) if (i2c_dev->is_dvc) tegra_dvc_init(i2c_dev); - val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN; + val = I2C_CNFG_NEW_MASTER_FSM | I2C_CNFG_PACKET_MODE_EN | + (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT); i2c_writel(i2c_dev, val, I2C_CNFG); i2c_writel(i2c_dev, 0, I2C_INT_MASK); clk_set_rate(i2c_dev->clk, i2c_dev->bus_clk_rate * 8); + if (!i2c_dev->is_dvc) { + u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG); + i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG); + } + val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT | 0 << I2C_FIFO_CONTROL_RX_TRIG_SHIFT; i2c_writel(i2c_dev, val, I2C_FIFO_CONTROL); @@ -338,6 +348,12 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) err = -ETIMEDOUT; clk_disable(i2c_dev->clk); + + if (i2c_dev->irq_disabled) { + i2c_dev->irq_disabled = 0; + enable_irq(i2c_dev->irq); + } + return err; } @@ -350,8 +366,19 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id) status = i2c_readl(i2c_dev, I2C_INT_STATUS); if (status == 0) { - dev_warn(i2c_dev->dev, "interrupt with no status\n"); - return IRQ_NONE; + dev_warn(i2c_dev->dev, "irq status 0 %08x %08x %08x\n", + i2c_readl(i2c_dev, I2C_PACKET_TRANSFER_STATUS), + i2c_readl(i2c_dev, I2C_STATUS), + i2c_readl(i2c_dev, I2C_CNFG)); + i2c_dev->msg_err |= I2C_ERR_UNKNOWN_INTERRUPT; + + if (!i2c_dev->irq_disabled) { + disable_irq_nosync(i2c_dev->irq); + i2c_dev->irq_disabled = 1; + } + + complete(&i2c_dev->msg_complete); + goto err; } if (unlikely(status & status_err)) { @@ -391,6 +418,8 @@ err: I2C_INT_PACKET_XFER_COMPLETE | I2C_INT_TX_FIFO_DATA_REQ | I2C_INT_RX_FIFO_DATA_REQ); i2c_writel(i2c_dev, status, I2C_INT_STATUS); + if (i2c_dev->is_dvc) + dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS); return IRQ_HANDLED; } @@ -424,12 +453,12 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT; packet_header |= I2C_HEADER_IE_ENABLE; + if (!stop) + packet_header |= I2C_HEADER_REPEAT_START; if (msg->flags & I2C_M_TEN) packet_header |= I2C_HEADER_10BIT_ADDR; if (msg->flags & I2C_M_IGNORE_NAK) packet_header |= I2C_HEADER_CONT_ON_NAK; - if (msg->flags & I2C_M_NOSTART) - packet_header |= I2C_HEADER_REPEAT_START; if (msg->flags & I2C_M_RD) packet_header |= I2C_HEADER_READ; i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index a5ec5a7cb38..6e5123b1d34 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -1781,7 +1781,8 @@ static int ide_cd_probe(ide_drive_t *drive) ide_cd_read_toc(drive, &sense); g->fops = &idecd_ops; - g->flags |= GENHD_FL_REMOVABLE; + g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; + g->events = DISK_EVENT_MEDIA_CHANGE; add_disk(g); return 0; diff --git a/drivers/input/input-compat.h b/drivers/input/input-compat.h index 4d8ea32e8a0..22be27b424d 100644 --- a/drivers/input/input-compat.h +++ b/drivers/input/input-compat.h @@ -19,7 +19,7 @@ /* Note to the author of this code: did it ever occur to you why the ifdefs are needed? Think about it again. -AK */ -#ifdef CONFIG_X86_64 +#if defined(CONFIG_X86_64) || defined(CONFIG_TILE) # define INPUT_COMPAT_TEST is_compat_task() #elif defined(CONFIG_S390) # define INPUT_COMPAT_TEST test_thread_flag(TIF_31BIT) diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 54ae71a907f..db25b6b2ae3 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -1072,6 +1072,12 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return -ENODEV; } + if (pdev->subsystem_vendor == 0xb100 && + pdev->subsystem_device == 0x0003 ) { + pr_notice("Netjet: Digium TDM400P not handled yet\n"); + return -ENODEV; + } + card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC); if (!card) { pr_info("No kmem for Netjet\n"); diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 9bec8699b8a..1d027b475b2 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -14,6 +14,13 @@ config LEDS_CLASS This option enables the led sysfs class in /sys/class/leds. You'll need this to do anything useful with LEDs. If unsure, say N. +config LEDS_GPIO_REGISTER + bool + help + This option provides the function gpio_led_register_device. + As this function is used by arch code it must not be compiled as a + module. + if NEW_LEDS comment "LED drivers" @@ -115,13 +122,6 @@ config LEDS_ALIX2 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. You have to set leds-alix2.force=1 for boards with Award BIOS. -config LEDS_H1940 - tristate "LED Support for iPAQ H1940 device" - depends on LEDS_CLASS - depends on ARCH_H1940 - help - This option enables support for the LEDs on the h1940. - config LEDS_COBALT_QUBE tristate "LED Support for the Cobalt Qube series front LED" depends on LEDS_CLASS @@ -162,6 +162,16 @@ config LEDS_PCA9532 LED controller. It is generally only useful as a platform driver +config LEDS_PCA9532_GPIO + bool "Enable GPIO support for PCA9532" + depends on LEDS_PCA9532 + depends on GPIOLIB + help + Allow unused pins on PCA9532 to be used as gpio. + + To use a pin as gpio pca9532_type in pca9532_platform data needs to + set to PCA9532_TYPE_GPIO. + config LEDS_GPIO tristate "LED Support for GPIO connected LEDs" depends on LEDS_CLASS diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 39c80fca84d..bccb96c9bb4 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -17,11 +17,11 @@ obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o obj-$(CONFIG_LEDS_NET5501) += leds-net5501.o obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o obj-$(CONFIG_LEDS_ALIX2) += leds-alix2.o -obj-$(CONFIG_LEDS_H1940) += leds-h1940.o obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o +obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index d5a4ade8899..dc3d3d83191 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -131,7 +131,8 @@ static void led_set_software_blink(struct led_classdev *led_cdev, if (!led_cdev->blink_brightness) led_cdev->blink_brightness = led_cdev->max_brightness; - if (delay_on == led_cdev->blink_delay_on && + if (led_get_trigger_data(led_cdev) && + delay_on == led_cdev->blink_delay_on && delay_off == led_cdev->blink_delay_off) return; diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c new file mode 100644 index 00000000000..1c4ed5510f3 --- /dev/null +++ b/drivers/leds/leds-gpio-register.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2011 Pengutronix + * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. + */ +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/leds.h> + +/** + * gpio_led_register_device - register a gpio-led device + * @pdata: the platform data used for the new device + * + * Makes a copy of pdata and pdata->leds and registers a new leds-gpio device + * with the result. This allows to have pdata and pdata-leds in .init.rodata + * and so saves some bytes compared to a static struct platform_device with + * static platform data. + * + * Returns the registered device or an error pointer. + */ +struct platform_device *__init gpio_led_register_device( + int id, const struct gpio_led_platform_data *pdata) +{ + struct platform_device *ret; + struct gpio_led_platform_data _pdata = *pdata; + + _pdata.leds = kmemdup(pdata->leds, + pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL); + if (!_pdata.leds) + return ERR_PTR(-ENOMEM); + + ret = platform_device_register_resndata(NULL, "leds-gpio", id, + NULL, 0, &_pdata, sizeof(_pdata)); + if (IS_ERR(ret)) + kfree(_pdata.leds); + + return ret; +} diff --git a/drivers/leds/leds-h1940.c b/drivers/leds/leds-h1940.c deleted file mode 100644 index 173d104d9ff..00000000000 --- a/drivers/leds/leds-h1940.c +++ /dev/null @@ -1,170 +0,0 @@ -/* - * drivers/leds/leds-h1940.c - * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive for - * more details. - * - * H1940 leds driver - * - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/delay.h> -#include <linux/string.h> -#include <linux/ctype.h> -#include <linux/leds.h> -#include <linux/gpio.h> - -#include <mach/regs-gpio.h> -#include <mach/hardware.h> -#include <mach/h1940-latch.h> - -/* - * Green led. - */ -static void h1940_greenled_set(struct led_classdev *led_dev, - enum led_brightness value) -{ - switch (value) { - case LED_HALF: - h1940_latch_control(0, H1940_LATCH_LED_FLASH); - s3c2410_gpio_setpin(S3C2410_GPA7, 1); - break; - case LED_FULL: - h1940_latch_control(0, H1940_LATCH_LED_GREEN); - s3c2410_gpio_setpin(S3C2410_GPA7, 1); - break; - default: - case LED_OFF: - h1940_latch_control(H1940_LATCH_LED_FLASH, 0); - h1940_latch_control(H1940_LATCH_LED_GREEN, 0); - s3c2410_gpio_setpin(S3C2410_GPA7, 0); - break; - } -} - -static struct led_classdev h1940_greenled = { - .name = "h1940:green", - .brightness_set = h1940_greenled_set, - .default_trigger = "h1940-charger", -}; - -/* - * Red led. - */ -static void h1940_redled_set(struct led_classdev *led_dev, - enum led_brightness value) -{ - switch (value) { - case LED_HALF: - h1940_latch_control(0, H1940_LATCH_LED_FLASH); - s3c2410_gpio_setpin(S3C2410_GPA1, 1); - break; - case LED_FULL: - h1940_latch_control(0, H1940_LATCH_LED_RED); - s3c2410_gpio_setpin(S3C2410_GPA1, 1); - break; - default: - case LED_OFF: - h1940_latch_control(H1940_LATCH_LED_FLASH, 0); - h1940_latch_control(H1940_LATCH_LED_RED, 0); - s3c2410_gpio_setpin(S3C2410_GPA1, 0); - break; - } -} - -static struct led_classdev h1940_redled = { - .name = "h1940:red", - .brightness_set = h1940_redled_set, - .default_trigger = "h1940-charger", -}; - -/* - * Blue led. - * (it can only be blue flashing led) - */ -static void h1940_blueled_set(struct led_classdev *led_dev, - enum led_brightness value) -{ - if (value) { - /* flashing Blue */ - h1940_latch_control(0, H1940_LATCH_LED_FLASH); - s3c2410_gpio_setpin(S3C2410_GPA3, 1); - } else { - h1940_latch_control(H1940_LATCH_LED_FLASH, 0); - s3c2410_gpio_setpin(S3C2410_GPA3, 0); - } - -} - -static struct led_classdev h1940_blueled = { - .name = "h1940:blue", - .brightness_set = h1940_blueled_set, - .default_trigger = "h1940-bluetooth", -}; - -static int __devinit h1940leds_probe(struct platform_device *pdev) -{ - int ret; - - ret = led_classdev_register(&pdev->dev, &h1940_greenled); - if (ret) - goto err_green; - - ret = led_classdev_register(&pdev->dev, &h1940_redled); - if (ret) - goto err_red; - - ret = led_classdev_register(&pdev->dev, &h1940_blueled); - if (ret) - goto err_blue; - - return 0; - -err_blue: - led_classdev_unregister(&h1940_redled); -err_red: - led_classdev_unregister(&h1940_greenled); -err_green: - return ret; -} - -static int h1940leds_remove(struct platform_device *pdev) -{ - led_classdev_unregister(&h1940_greenled); - led_classdev_unregister(&h1940_redled); - led_classdev_unregister(&h1940_blueled); - return 0; -} - - -static struct platform_driver h1940leds_driver = { - .driver = { - .name = "h1940-leds", - .owner = THIS_MODULE, - }, - .probe = h1940leds_probe, - .remove = h1940leds_remove, -}; - - -static int __init h1940leds_init(void) -{ - return platform_driver_register(&h1940leds_driver); -} - -static void __exit h1940leds_exit(void) -{ - platform_driver_unregister(&h1940leds_driver); -} - -module_init(h1940leds_init); -module_exit(h1940leds_exit); - -MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); -MODULE_DESCRIPTION("LED driver for the iPAQ H1940"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:h1940-leds"); diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c index b37e6186d0f..4d7ce7631ac 100644 --- a/drivers/leds/leds-lm3530.c +++ b/drivers/leds/leds-lm3530.c @@ -17,6 +17,7 @@ #include <linux/input.h> #include <linux/led-lm3530.h> #include <linux/types.h> +#include <linux/regulator/consumer.h> #define LM3530_LED_DEV "lcd-backlight" #define LM3530_NAME "lm3530-led" @@ -96,12 +97,18 @@ static struct lm3530_mode_map mode_map[] = { * @client: i2c client * @pdata: LM3530 platform data * @mode: mode of operation - manual, ALS, PWM + * @regulator: regulator + * @brighness: previous brightness value + * @enable: regulator is enabled */ struct lm3530_data { struct led_classdev led_dev; struct i2c_client *client; struct lm3530_platform_data *pdata; enum lm3530_mode mode; + struct regulator *regulator; + enum led_brightness brightness; + bool enable; }; static const u8 lm3530_reg[LM3530_REG_MAX] = { @@ -172,7 +179,10 @@ static int lm3530_init_registers(struct lm3530_data *drvdata) brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) | (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT); - brightness = pltfm->brt_val; + if (drvdata->brightness) + brightness = drvdata->brightness; + else + brightness = drvdata->brightness = pltfm->brt_val; reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */ reg_val[1] = als_config; /* LM3530_ALS_CONFIG */ @@ -190,6 +200,16 @@ static int lm3530_init_registers(struct lm3530_data *drvdata) reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */ reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */ + if (!drvdata->enable) { + ret = regulator_enable(drvdata->regulator); + if (ret) { + dev_err(&drvdata->client->dev, + "Enable regulator failed\n"); + return ret; + } + drvdata->enable = true; + } + for (i = 0; i < LM3530_REG_MAX; i++) { ret = i2c_smbus_write_byte_data(client, lm3530_reg[i], reg_val[i]); @@ -210,12 +230,31 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev, switch (drvdata->mode) { case LM3530_BL_MODE_MANUAL: + if (!drvdata->enable) { + err = lm3530_init_registers(drvdata); + if (err) { + dev_err(&drvdata->client->dev, + "Register Init failed: %d\n", err); + break; + } + } + /* set the brightness in brightness control register*/ err = i2c_smbus_write_byte_data(drvdata->client, LM3530_BRT_CTRL_REG, brt_val / 2); if (err) dev_err(&drvdata->client->dev, "Unable to set brightness: %d\n", err); + else + drvdata->brightness = brt_val / 2; + + if (brt_val == 0) { + err = regulator_disable(drvdata->regulator); + if (err) + dev_err(&drvdata->client->dev, + "Disable regulator failed\n"); + drvdata->enable = false; + } break; case LM3530_BL_MODE_ALS: break; @@ -297,20 +336,31 @@ static int __devinit lm3530_probe(struct i2c_client *client, drvdata->mode = pdata->mode; drvdata->client = client; drvdata->pdata = pdata; + drvdata->brightness = LED_OFF; + drvdata->enable = false; drvdata->led_dev.name = LM3530_LED_DEV; drvdata->led_dev.brightness_set = lm3530_brightness_set; i2c_set_clientdata(client, drvdata); - err = lm3530_init_registers(drvdata); - if (err < 0) { - dev_err(&client->dev, "Register Init failed: %d\n", err); - err = -ENODEV; - goto err_reg_init; + drvdata->regulator = regulator_get(&client->dev, "vin"); + if (IS_ERR(drvdata->regulator)) { + dev_err(&client->dev, "regulator get failed\n"); + err = PTR_ERR(drvdata->regulator); + drvdata->regulator = NULL; + goto err_regulator_get; } - err = led_classdev_register((struct device *) - &client->dev, &drvdata->led_dev); + if (drvdata->pdata->brt_val) { + err = lm3530_init_registers(drvdata); + if (err < 0) { + dev_err(&client->dev, + "Register Init failed: %d\n", err); + err = -ENODEV; + goto err_reg_init; + } + } + err = led_classdev_register(&client->dev, &drvdata->led_dev); if (err < 0) { dev_err(&client->dev, "Register led class failed: %d\n", err); err = -ENODEV; @@ -330,6 +380,9 @@ err_create_file: led_classdev_unregister(&drvdata->led_dev); err_class_register: err_reg_init: + regulator_put(drvdata->regulator); +err_regulator_get: + i2c_set_clientdata(client, NULL); kfree(drvdata); err_out: return err; @@ -340,6 +393,10 @@ static int __devexit lm3530_remove(struct i2c_client *client) struct lm3530_data *drvdata = i2c_get_clientdata(client); device_remove_file(drvdata->led_dev.dev, &dev_attr_mode); + + if (drvdata->enable) + regulator_disable(drvdata->regulator); + regulator_put(drvdata->regulator); led_classdev_unregister(&drvdata->led_dev); kfree(drvdata); return 0; diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c index 5bf63af09dd..d8d3a1e910a 100644 --- a/drivers/leds/leds-pca9532.c +++ b/drivers/leds/leds-pca9532.c @@ -1,13 +1,14 @@ /* * pca9532.c - 16-bit Led dimmer * + * Copyright (C) 2011 Jan Weitzel * Copyright (C) 2008 Riku Voipio * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * - * Datasheet: http://www.nxp.com/acrobat/datasheets/PCA9532_3.pdf + * Datasheet: http://www.nxp.com/documents/data_sheet/PCA9532.pdf * */ @@ -19,21 +20,32 @@ #include <linux/mutex.h> #include <linux/workqueue.h> #include <linux/leds-pca9532.h> +#include <linux/gpio.h> -#define PCA9532_REG_PSC(i) (0x2+(i)*2) -#define PCA9532_REG_PWM(i) (0x3+(i)*2) -#define PCA9532_REG_LS0 0x6 -#define LED_REG(led) ((led>>2)+PCA9532_REG_LS0) -#define LED_NUM(led) (led & 0x3) +/* m = num_leds*/ +#define PCA9532_REG_INPUT(i) ((i) >> 3) +#define PCA9532_REG_OFFSET(m) ((m) >> 4) +#define PCA9532_REG_PSC(m, i) (PCA9532_REG_OFFSET(m) + 0x1 + (i) * 2) +#define PCA9532_REG_PWM(m, i) (PCA9532_REG_OFFSET(m) + 0x2 + (i) * 2) +#define LED_REG(m, led) (PCA9532_REG_OFFSET(m) + 0x5 + (led >> 2)) +#define LED_NUM(led) (led & 0x3) #define ldev_to_led(c) container_of(c, struct pca9532_led, ldev) +struct pca9532_chip_info { + u8 num_leds; +}; + struct pca9532_data { struct i2c_client *client; struct pca9532_led leds[16]; struct mutex update_lock; struct input_dev *idev; struct work_struct work; +#ifdef CONFIG_LEDS_PCA9532_GPIO + struct gpio_chip gpio; +#endif + const struct pca9532_chip_info *chip_info; u8 pwm[2]; u8 psc[2]; }; @@ -42,16 +54,41 @@ static int pca9532_probe(struct i2c_client *client, const struct i2c_device_id *id); static int pca9532_remove(struct i2c_client *client); +enum { + pca9530, + pca9531, + pca9532, + pca9533, +}; + static const struct i2c_device_id pca9532_id[] = { - { "pca9532", 0 }, + { "pca9530", pca9530 }, + { "pca9531", pca9531 }, + { "pca9532", pca9532 }, + { "pca9533", pca9533 }, { } }; MODULE_DEVICE_TABLE(i2c, pca9532_id); +static const struct pca9532_chip_info pca9532_chip_info_tbl[] = { + [pca9530] = { + .num_leds = 2, + }, + [pca9531] = { + .num_leds = 8, + }, + [pca9532] = { + .num_leds = 16, + }, + [pca9533] = { + .num_leds = 4, + }, +}; + static struct i2c_driver pca9532_driver = { .driver = { - .name = "pca9532", + .name = "pca953x", }, .probe = pca9532_probe, .remove = pca9532_remove, @@ -68,7 +105,7 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink, { int a = 0, b = 0, i = 0; struct pca9532_data *data = i2c_get_clientdata(client); - for (i = 0; i < 16; i++) { + for (i = 0; i < data->chip_info->num_leds; i++) { if (data->leds[i].type == PCA9532_TYPE_LED && data->leds[i].state == PCA9532_PWM0+pwm) { a++; @@ -92,10 +129,12 @@ static int pca9532_calcpwm(struct i2c_client *client, int pwm, int blink, static int pca9532_setpwm(struct i2c_client *client, int pwm) { struct pca9532_data *data = i2c_get_clientdata(client); + u8 maxleds = data->chip_info->num_leds; + mutex_lock(&data->update_lock); - i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(pwm), + i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(maxleds, pwm), data->pwm[pwm]); - i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(pwm), + i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(maxleds, pwm), data->psc[pwm]); mutex_unlock(&data->update_lock); return 0; @@ -106,15 +145,16 @@ static void pca9532_setled(struct pca9532_led *led) { struct i2c_client *client = led->client; struct pca9532_data *data = i2c_get_clientdata(client); + u8 maxleds = data->chip_info->num_leds; char reg; mutex_lock(&data->update_lock); - reg = i2c_smbus_read_byte_data(client, LED_REG(led->id)); + reg = i2c_smbus_read_byte_data(client, LED_REG(maxleds, led->id)); /* zero led bits */ reg = reg & ~(0x3<<LED_NUM(led->id)*2); /* set the new value */ reg = reg | (led->state << LED_NUM(led->id)*2); - i2c_smbus_write_byte_data(client, LED_REG(led->id), reg); + i2c_smbus_write_byte_data(client, LED_REG(maxleds, led->id), reg); mutex_unlock(&data->update_lock); } @@ -183,10 +223,12 @@ static int pca9532_event(struct input_dev *dev, unsigned int type, static void pca9532_input_work(struct work_struct *work) { - struct pca9532_data *data; - data = container_of(work, struct pca9532_data, work); + struct pca9532_data *data = + container_of(work, struct pca9532_data, work); + u8 maxleds = data->chip_info->num_leds; + mutex_lock(&data->update_lock); - i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(1), + i2c_smbus_write_byte_data(data->client, PCA9532_REG_PWM(maxleds, 1), data->pwm[1]); mutex_unlock(&data->update_lock); } @@ -200,16 +242,68 @@ static void pca9532_led_work(struct work_struct *work) pca9532_setled(led); } -static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs) +#ifdef CONFIG_LEDS_PCA9532_GPIO +static int pca9532_gpio_request_pin(struct gpio_chip *gc, unsigned offset) +{ + struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio); + struct pca9532_led *led = &data->leds[offset]; + + if (led->type == PCA9532_TYPE_GPIO) + return 0; + + return -EBUSY; +} + +static void pca9532_gpio_set_value(struct gpio_chip *gc, unsigned offset, int val) +{ + struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio); + struct pca9532_led *led = &data->leds[offset]; + + if (val) + led->state = PCA9532_ON; + else + led->state = PCA9532_OFF; + + pca9532_setled(led); +} + +static int pca9532_gpio_get_value(struct gpio_chip *gc, unsigned offset) +{ + struct pca9532_data *data = container_of(gc, struct pca9532_data, gpio); + unsigned char reg; + + reg = i2c_smbus_read_byte_data(data->client, PCA9532_REG_INPUT(offset)); + + return !!(reg & (1 << (offset % 8))); +} + +static int pca9532_gpio_direction_input(struct gpio_chip *gc, unsigned offset) +{ + /* To use as input ensure pin is not driven */ + pca9532_gpio_set_value(gc, offset, 0); + + return 0; +} + +static int pca9532_gpio_direction_output(struct gpio_chip *gc, unsigned offset, int val) +{ + pca9532_gpio_set_value(gc, offset, val); + + return 0; +} +#endif /* CONFIG_LEDS_PCA9532_GPIO */ + +static int pca9532_destroy_devices(struct pca9532_data *data, int n_devs) { int i = n_devs; if (!data) - return; + return -EINVAL; while (--i >= 0) { switch (data->leds[i].type) { case PCA9532_TYPE_NONE: + case PCA9532_TYPE_GPIO: break; case PCA9532_TYPE_LED: led_classdev_unregister(&data->leds[i].ldev); @@ -224,23 +318,38 @@ static void pca9532_destroy_devices(struct pca9532_data *data, int n_devs) break; } } + +#ifdef CONFIG_LEDS_PCA9532_GPIO + if (data->gpio.dev) { + int err = gpiochip_remove(&data->gpio); + if (err) { + dev_err(&data->client->dev, "%s failed, %d\n", + "gpiochip_remove()", err); + return err; + } + } +#endif + + return 0; } static int pca9532_configure(struct i2c_client *client, struct pca9532_data *data, struct pca9532_platform_data *pdata) { int i, err = 0; + int gpios = 0; + u8 maxleds = data->chip_info->num_leds; for (i = 0; i < 2; i++) { data->pwm[i] = pdata->pwm[i]; data->psc[i] = pdata->psc[i]; - i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(i), + i2c_smbus_write_byte_data(client, PCA9532_REG_PWM(maxleds, i), data->pwm[i]); - i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(i), + i2c_smbus_write_byte_data(client, PCA9532_REG_PSC(maxleds, i), data->psc[i]); } - for (i = 0; i < 16; i++) { + for (i = 0; i < data->chip_info->num_leds; i++) { struct pca9532_led *led = &data->leds[i]; struct pca9532_led *pled = &pdata->leds[i]; led->client = client; @@ -249,6 +358,9 @@ static int pca9532_configure(struct i2c_client *client, switch (led->type) { case PCA9532_TYPE_NONE: break; + case PCA9532_TYPE_GPIO: + gpios++; + break; case PCA9532_TYPE_LED: led->state = pled->state; led->name = pled->name; @@ -297,6 +409,34 @@ static int pca9532_configure(struct i2c_client *client, break; } } + +#ifdef CONFIG_LEDS_PCA9532_GPIO + if (gpios) { + data->gpio.label = "gpio-pca9532"; + data->gpio.direction_input = pca9532_gpio_direction_input; + data->gpio.direction_output = pca9532_gpio_direction_output; + data->gpio.set = pca9532_gpio_set_value; + data->gpio.get = pca9532_gpio_get_value; + data->gpio.request = pca9532_gpio_request_pin; + data->gpio.can_sleep = 1; + data->gpio.base = pdata->gpio_base; + data->gpio.ngpio = data->chip_info->num_leds; + data->gpio.dev = &client->dev; + data->gpio.owner = THIS_MODULE; + + err = gpiochip_add(&data->gpio); + if (err) { + /* Use data->gpio.dev as a flag for freeing gpiochip */ + data->gpio.dev = NULL; + dev_warn(&client->dev, "could not add gpiochip\n"); + } else { + dev_info(&client->dev, "gpios %i...%i\n", + data->gpio.base, data->gpio.base + + data->gpio.ngpio - 1); + } + } +#endif + return 0; exit: @@ -322,6 +462,8 @@ static int pca9532_probe(struct i2c_client *client, if (!data) return -ENOMEM; + data->chip_info = &pca9532_chip_info_tbl[id->driver_data]; + dev_info(&client->dev, "setting platform data\n"); i2c_set_clientdata(client, data); data->client = client; @@ -337,7 +479,12 @@ static int pca9532_probe(struct i2c_client *client, static int pca9532_remove(struct i2c_client *client) { struct pca9532_data *data = i2c_get_clientdata(client); - pca9532_destroy_devices(data, 16); + int err; + + err = pca9532_destroy_devices(data, data->chip_info->num_leds); + if (err) + return err; + kfree(data); return 0; } diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h index 2dd8ecbfdc3..e77c7f8dcdd 100644 --- a/drivers/leds/leds.h +++ b/drivers/leds/leds.h @@ -40,10 +40,17 @@ void led_trigger_set_default(struct led_classdev *led_cdev); void led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger); void led_trigger_remove(struct led_classdev *led_cdev); + +static inline void *led_get_trigger_data(struct led_classdev *led_cdev) +{ + return led_cdev->trigger_data; +} + #else #define led_trigger_set_default(x) do {} while (0) #define led_trigger_set(x, y) do {} while (0) #define led_trigger_remove(x) do {} while (0) +#define led_get_trigger_data(x) (NULL) #endif ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index b09bcbeade9..d87c9d02f78 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c @@ -91,6 +91,9 @@ static void timer_trig_activate(struct led_classdev *led_cdev) if (rc) goto err_out_delayon; + led_blink_set(led_cdev, &led_cdev->blink_delay_on, + &led_cdev->blink_delay_off); + led_cdev->trigger_data = (void *)1; return; diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index d4fe7bc92a1..4ada9be1d43 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c @@ -47,7 +47,7 @@ #include <plat/dma.h> #include <plat/vram.h> #include <plat/vrfb.h> -#include <plat/display.h> +#include <video/omapdss.h> #include "omap_voutlib.h" #include "omap_voutdef.h" diff --git a/drivers/media/video/omap/omap_voutdef.h b/drivers/media/video/omap/omap_voutdef.h index ea3a047f8bc..659497b8499 100644 --- a/drivers/media/video/omap/omap_voutdef.h +++ b/drivers/media/video/omap/omap_voutdef.h @@ -11,7 +11,7 @@ #ifndef OMAP_VOUTDEF_H #define OMAP_VOUTDEF_H -#include <plat/display.h> +#include <video/omapdss.h> #define YUYV_BPP 2 #define RGB565_BPP 2 diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 3ed3ff06be5..481770ab271 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -538,7 +538,7 @@ config AB8500_CORE config AB8500_I2C_CORE bool "AB8500 register access via PRCMU I2C" - depends on AB8500_CORE && UX500_SOC_DB8500 + depends on AB8500_CORE && MFD_DB8500_PRCMU default y help This enables register access to the AB8500 chip via PRCMU I2C. @@ -575,6 +575,26 @@ config AB3550_CORE LEDs, vibrator, system power and temperature, power management and ALSA sound. +config MFD_DB8500_PRCMU + bool "ST-Ericsson DB8500 Power Reset Control Management Unit" + depends on UX500_SOC_DB8500 + select MFD_CORE + help + Select this option to enable support for the DB8500 Power Reset + and Control Management Unit. This is basically an autonomous + system controller running an XP70 microprocessor, which is accessed + through a register map. + +config MFD_DB5500_PRCMU + bool "ST-Ericsson DB5500 Power Reset Control Management Unit" + depends on UX500_SOC_DB5500 + select MFD_CORE + help + Select this option to enable support for the DB5500 Power Reset + and Control Management Unit. This is basically an autonomous + system controller running an XP70 microprocessor, which is accessed + through a register map. + config MFD_CS5535 tristate "Support for CS5535 and CS5536 southbridge core functions" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 419caa9d7dc..24aa44448da 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -74,9 +74,12 @@ obj-$(CONFIG_AB3100_CORE) += ab3100-core.o obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o obj-$(CONFIG_AB3550_CORE) += ab3550-core.o obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o -obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o +obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o +# ab8500-i2c need to come after db8500-prcmu (which provides the channel) +obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o +obj-$(CONFIG_MFD_DB5500_PRCMU) += db5500-prcmu.o obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o obj-$(CONFIG_PMIC_ADP5520) += adp5520.o obj-$(CONFIG_LPC_SCH) += lpc_sch.o diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c index 821e6b86afd..9be541c6b00 100644 --- a/drivers/mfd/ab8500-i2c.c +++ b/drivers/mfd/ab8500-i2c.c @@ -11,8 +11,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/ab8500.h> - -#include <mach/prcmu.h> +#include <linux/mfd/db8500-prcmu.h> static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data) { diff --git a/drivers/mfd/db5500-prcmu-regs.h b/drivers/mfd/db5500-prcmu-regs.h new file mode 100644 index 00000000000..9a8e9e4ddd3 --- /dev/null +++ b/drivers/mfd/db5500-prcmu-regs.h @@ -0,0 +1,115 @@ +/* + * Copyright (C) STMicroelectronics 2009 + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> + * Author: Sundar Iyer <sundar.iyer@stericsson.com> + * + * License Terms: GNU General Public License v2 + * + * PRCM Unit registers + */ + +#ifndef __MACH_PRCMU_REGS_H +#define __MACH_PRCMU_REGS_H + +#include <mach/hardware.h> + +#define PRCM_ARM_PLLDIVPS (_PRCMU_BASE + 0x118) +#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE 0x3f +#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xf + +#define PRCM_PLLARM_LOCKP (_PRCMU_BASE + 0x0a8) +#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 0x2 + +#define PRCM_ARM_CHGCLKREQ (_PRCMU_BASE + 0x114) +#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ 0x1 + +#define PRCM_PLLARM_ENABLE (_PRCMU_BASE + 0x98) +#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE 0x1 +#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON 0x100 + +#define PRCM_ARMCLKFIX_MGT (_PRCMU_BASE + 0x0) +#define PRCM_A9_RESETN_CLR (_PRCMU_BASE + 0x1f4) +#define PRCM_A9_RESETN_SET (_PRCMU_BASE + 0x1f0) +#define PRCM_ARM_LS_CLAMP (_PRCMU_BASE + 0x30c) +#define PRCM_SRAM_A9 (_PRCMU_BASE + 0x308) + +/* ARM WFI Standby signal register */ +#define PRCM_ARM_WFI_STANDBY (_PRCMU_BASE + 0x130) +#define PRCM_IOCR (_PRCMU_BASE + 0x310) +#define PRCM_IOCR_IOFORCE 0x1 + +/* CPU mailbox registers */ +#define PRCM_MBOX_CPU_VAL (_PRCMU_BASE + 0x0fc) +#define PRCM_MBOX_CPU_SET (_PRCMU_BASE + 0x100) +#define PRCM_MBOX_CPU_CLR (_PRCMU_BASE + 0x104) + +/* Dual A9 core interrupt management unit registers */ +#define PRCM_A9_MASK_REQ (_PRCMU_BASE + 0x328) +#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ 0x1 + +#define PRCM_A9_MASK_ACK (_PRCMU_BASE + 0x32c) +#define PRCM_ARMITMSK31TO0 (_PRCMU_BASE + 0x11c) +#define PRCM_ARMITMSK63TO32 (_PRCMU_BASE + 0x120) +#define PRCM_ARMITMSK95TO64 (_PRCMU_BASE + 0x124) +#define PRCM_ARMITMSK127TO96 (_PRCMU_BASE + 0x128) +#define PRCM_POWER_STATE_VAL (_PRCMU_BASE + 0x25C) +#define PRCM_ARMITVAL31TO0 (_PRCMU_BASE + 0x260) +#define PRCM_ARMITVAL63TO32 (_PRCMU_BASE + 0x264) +#define PRCM_ARMITVAL95TO64 (_PRCMU_BASE + 0x268) +#define PRCM_ARMITVAL127TO96 (_PRCMU_BASE + 0x26C) + +#define PRCM_HOSTACCESS_REQ (_PRCMU_BASE + 0x334) +#define ARM_WAKEUP_MODEM 0x1 + +#define PRCM_ARM_IT1_CLEAR (_PRCMU_BASE + 0x48C) +#define PRCM_ARM_IT1_VAL (_PRCMU_BASE + 0x494) +#define PRCM_HOLD_EVT (_PRCMU_BASE + 0x174) + +#define PRCM_ITSTATUS0 (_PRCMU_BASE + 0x148) +#define PRCM_ITSTATUS1 (_PRCMU_BASE + 0x150) +#define PRCM_ITSTATUS2 (_PRCMU_BASE + 0x158) +#define PRCM_ITSTATUS3 (_PRCMU_BASE + 0x160) +#define PRCM_ITSTATUS4 (_PRCMU_BASE + 0x168) +#define PRCM_ITSTATUS5 (_PRCMU_BASE + 0x484) +#define PRCM_ITCLEAR5 (_PRCMU_BASE + 0x488) +#define PRCM_ARMIT_MASKXP70_IT (_PRCMU_BASE + 0x1018) + +/* System reset register */ +#define PRCM_APE_SOFTRST (_PRCMU_BASE + 0x228) + +/* Level shifter and clamp control registers */ +#define PRCM_MMIP_LS_CLAMP_SET (_PRCMU_BASE + 0x420) +#define PRCM_MMIP_LS_CLAMP_CLR (_PRCMU_BASE + 0x424) + +/* PRCMU clock/PLL/reset registers */ +#define PRCM_PLLDSI_FREQ (_PRCMU_BASE + 0x500) +#define PRCM_PLLDSI_ENABLE (_PRCMU_BASE + 0x504) +#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508) +#define PRCM_LCDCLK_MGT (_PRCMU_BASE + 0x044) +#define PRCM_MCDECLK_MGT (_PRCMU_BASE + 0x064) +#define PRCM_HDMICLK_MGT (_PRCMU_BASE + 0x058) +#define PRCM_TVCLK_MGT (_PRCMU_BASE + 0x07c) +#define PRCM_DSI_PLLOUT_SEL (_PRCMU_BASE + 0x530) +#define PRCM_DSITVCLK_DIV (_PRCMU_BASE + 0x52C) +#define PRCM_PLLDSI_LOCKP (_PRCMU_BASE + 0x508) +#define PRCM_APE_RESETN_SET (_PRCMU_BASE + 0x1E4) +#define PRCM_APE_RESETN_CLR (_PRCMU_BASE + 0x1E8) +#define PRCM_CLKOCR (_PRCMU_BASE + 0x1CC) + +/* ePOD and memory power signal control registers */ +#define PRCM_EPOD_C_SET (_PRCMU_BASE + 0x410) +#define PRCM_SRAM_LS_SLEEP (_PRCMU_BASE + 0x304) + +/* Debug power control unit registers */ +#define PRCM_POWER_STATE_SET (_PRCMU_BASE + 0x254) + +/* Miscellaneous unit registers */ +#define PRCM_DSI_SW_RESET (_PRCMU_BASE + 0x324) +#define PRCM_GPIOCR (_PRCMU_BASE + 0x138) +#define PRCM_GPIOCR_DBG_STM_MOD_CMD1 0x800 +#define PRCM_GPIOCR_DBG_UARTMOD_CMD0 0x1 + + +#endif /* __MACH_PRCMU__REGS_H */ diff --git a/drivers/mfd/db5500-prcmu.c b/drivers/mfd/db5500-prcmu.c new file mode 100644 index 00000000000..9dbb3cab4a6 --- /dev/null +++ b/drivers/mfd/db5500-prcmu.c @@ -0,0 +1,448 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> + * + * U5500 PRCM Unit interface driver + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/completion.h> +#include <linux/irq.h> +#include <linux/jiffies.h> +#include <linux/bitops.h> +#include <linux/interrupt.h> +#include <linux/mfd/db5500-prcmu.h> +#include <mach/hardware.h> +#include <mach/irqs.h> +#include <mach/db5500-regs.h> +#include "db5500-prcmu-regs.h" + +#define _PRCM_MB_HEADER (tcdm_base + 0xFE8) +#define PRCM_REQ_MB0_HEADER (_PRCM_MB_HEADER + 0x0) +#define PRCM_REQ_MB1_HEADER (_PRCM_MB_HEADER + 0x1) +#define PRCM_REQ_MB2_HEADER (_PRCM_MB_HEADER + 0x2) +#define PRCM_REQ_MB3_HEADER (_PRCM_MB_HEADER + 0x3) +#define PRCM_REQ_MB4_HEADER (_PRCM_MB_HEADER + 0x4) +#define PRCM_REQ_MB5_HEADER (_PRCM_MB_HEADER + 0x5) +#define PRCM_REQ_MB6_HEADER (_PRCM_MB_HEADER + 0x6) +#define PRCM_REQ_MB7_HEADER (_PRCM_MB_HEADER + 0x7) +#define PRCM_ACK_MB0_HEADER (_PRCM_MB_HEADER + 0x8) +#define PRCM_ACK_MB1_HEADER (_PRCM_MB_HEADER + 0x9) +#define PRCM_ACK_MB2_HEADER (_PRCM_MB_HEADER + 0xa) +#define PRCM_ACK_MB3_HEADER (_PRCM_MB_HEADER + 0xb) +#define PRCM_ACK_MB4_HEADER (_PRCM_MB_HEADER + 0xc) +#define PRCM_ACK_MB5_HEADER (_PRCM_MB_HEADER + 0xd) +#define PRCM_ACK_MB6_HEADER (_PRCM_MB_HEADER + 0xe) +#define PRCM_ACK_MB7_HEADER (_PRCM_MB_HEADER + 0xf) + +/* Req Mailboxes */ +#define PRCM_REQ_MB0 (tcdm_base + 0xFD8) +#define PRCM_REQ_MB1 (tcdm_base + 0xFCC) +#define PRCM_REQ_MB2 (tcdm_base + 0xFC4) +#define PRCM_REQ_MB3 (tcdm_base + 0xFC0) +#define PRCM_REQ_MB4 (tcdm_base + 0xF98) +#define PRCM_REQ_MB5 (tcdm_base + 0xF90) +#define PRCM_REQ_MB6 (tcdm_base + 0xF8C) +#define PRCM_REQ_MB7 (tcdm_base + 0xF84) + +/* Ack Mailboxes */ +#define PRCM_ACK_MB0 (tcdm_base + 0xF38) +#define PRCM_ACK_MB1 (tcdm_base + 0xF30) +#define PRCM_ACK_MB2 (tcdm_base + 0xF24) +#define PRCM_ACK_MB3 (tcdm_base + 0xF20) +#define PRCM_ACK_MB4 (tcdm_base + 0xF1C) +#define PRCM_ACK_MB5 (tcdm_base + 0xF14) +#define PRCM_ACK_MB6 (tcdm_base + 0xF0C) +#define PRCM_ACK_MB7 (tcdm_base + 0xF08) + +enum mb_return_code { + RC_SUCCESS, + RC_FAIL, +}; + +/* Mailbox 0 headers. */ +enum mb0_header { + /* request */ + RMB0H_PWR_STATE_TRANS = 1, + RMB0H_WAKE_UP_CFG, + RMB0H_RD_WAKE_UP_ACK, + /* acknowledge */ + AMB0H_WAKE_UP = 1, +}; + +/* Mailbox 5 headers. */ +enum mb5_header { + MB5H_I2C_WRITE = 1, + MB5H_I2C_READ, +}; + +/* Request mailbox 5 fields. */ +#define PRCM_REQ_MB5_I2C_SLAVE (PRCM_REQ_MB5 + 0) +#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 1) +#define PRCM_REQ_MB5_I2C_SIZE (PRCM_REQ_MB5 + 2) +#define PRCM_REQ_MB5_I2C_DATA (PRCM_REQ_MB5 + 4) + +/* Acknowledge mailbox 5 fields. */ +#define PRCM_ACK_MB5_RETURN_CODE (PRCM_ACK_MB5 + 0) +#define PRCM_ACK_MB5_I2C_DATA (PRCM_ACK_MB5 + 4) + +#define NUM_MB 8 +#define MBOX_BIT BIT +#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1) + +/* +* Used by MCDE to setup all necessary PRCMU registers +*/ +#define PRCMU_RESET_DSIPLL 0x00004000 +#define PRCMU_UNCLAMP_DSIPLL 0x00400800 + +/* HDMI CLK MGT PLLSW=001 (PLLSOC0), PLLDIV=0x8, = 50 Mhz*/ +#define PRCMU_DSI_CLOCK_SETTING 0x00000128 +/* TVCLK_MGT PLLSW=001 (PLLSOC0) PLLDIV=0x13, = 19.05 MHZ */ +#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000135 +#define PRCMU_PLLDSI_FREQ_SETTING 0x0004013C +#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000002 +#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x03000101 +#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00000101 + +#define PRCMU_ENABLE_PLLDSI 0x00000001 +#define PRCMU_DISABLE_PLLDSI 0x00000000 + +#define PRCMU_DSI_RESET_SW 0x00000003 + +#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3 + +/* + * mb0_transfer - state needed for mailbox 0 communication. + * @lock: The transaction lock. + */ +static struct { + spinlock_t lock; +} mb0_transfer; + +/* + * mb5_transfer - state needed for mailbox 5 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + * @ack: Reply ("acknowledge") data. + */ +static struct { + struct mutex lock; + struct completion work; + struct { + u8 header; + u8 status; + u8 value[4]; + } ack; +} mb5_transfer; + +/* PRCMU TCDM base IO address. */ +static __iomem void *tcdm_base; + +/** + * db5500_prcmu_abb_read() - Read register value(s) from the ABB. + * @slave: The I2C slave address. + * @reg: The (start) register address. + * @value: The read out value(s). + * @size: The number of registers to read. + * + * Reads register value(s) from the ABB. + * @size has to be <= 4. + */ +int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + int r; + + if ((size < 1) || (4 < size)) + return -EINVAL; + + mutex_lock(&mb5_transfer.lock); + + while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) + cpu_relax(); + writeb(slave, PRCM_REQ_MB5_I2C_SLAVE); + writeb(reg, PRCM_REQ_MB5_I2C_REG); + writeb(size, PRCM_REQ_MB5_I2C_SIZE); + writeb(MB5H_I2C_READ, PRCM_REQ_MB5_HEADER); + + writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); + wait_for_completion(&mb5_transfer.work); + + r = 0; + if ((mb5_transfer.ack.header == MB5H_I2C_READ) && + (mb5_transfer.ack.status == RC_SUCCESS)) + memcpy(value, mb5_transfer.ack.value, (size_t)size); + else + r = -EIO; + + mutex_unlock(&mb5_transfer.lock); + + return r; +} + +/** + * db5500_prcmu_abb_write() - Write register value(s) to the ABB. + * @slave: The I2C slave address. + * @reg: The (start) register address. + * @value: The value(s) to write. + * @size: The number of registers to write. + * + * Writes register value(s) to the ABB. + * @size has to be <= 4. + */ +int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + int r; + + if ((size < 1) || (4 < size)) + return -EINVAL; + + mutex_lock(&mb5_transfer.lock); + + while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) + cpu_relax(); + writeb(slave, PRCM_REQ_MB5_I2C_SLAVE); + writeb(reg, PRCM_REQ_MB5_I2C_REG); + writeb(size, PRCM_REQ_MB5_I2C_SIZE); + memcpy_toio(PRCM_REQ_MB5_I2C_DATA, value, size); + writeb(MB5H_I2C_WRITE, PRCM_REQ_MB5_HEADER); + + writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET); + wait_for_completion(&mb5_transfer.work); + + if ((mb5_transfer.ack.header == MB5H_I2C_WRITE) && + (mb5_transfer.ack.status == RC_SUCCESS)) + r = 0; + else + r = -EIO; + + mutex_unlock(&mb5_transfer.lock); + + return r; +} + +int db5500_prcmu_enable_dsipll(void) +{ + int i; + + /* Enable DSIPLL_RESETN resets */ + writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR); + /* Unclamp DSIPLL in/out */ + writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR); + /* Set DSI PLL FREQ */ + writel(PRCMU_PLLDSI_FREQ_SETTING, PRCM_PLLDSI_FREQ); + writel(PRCMU_DSI_PLLOUT_SEL_SETTING, + PRCM_DSI_PLLOUT_SEL); + /* Enable Escape clocks */ + writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); + + /* Start DSI PLL */ + writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE); + /* Reset DSI PLL */ + writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET); + for (i = 0; i < 10; i++) { + if ((readl(PRCM_PLLDSI_LOCKP) & + PRCMU_PLLDSI_LOCKP_LOCKED) == PRCMU_PLLDSI_LOCKP_LOCKED) + break; + udelay(100); + } + /* Release DSIPLL_RESETN */ + writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET); + return 0; +} + +int db5500_prcmu_disable_dsipll(void) +{ + /* Disable dsi pll */ + writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE); + /* Disable escapeclock */ + writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV); + return 0; +} + +int db5500_prcmu_set_display_clocks(void) +{ + /* HDMI and TVCLK Should be handled somewhere else */ + /* PLLDIV=8, PLLSW=2, CLKEN=1 */ + writel(PRCMU_DSI_CLOCK_SETTING, PRCM_HDMICLK_MGT); + /* PLLDIV=14, PLLSW=2, CLKEN=1 */ + writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT); + return 0; +} + +static void ack_dbb_wakeup(void) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) + cpu_relax(); + + writeb(RMB0H_RD_WAKE_UP_ACK, PRCM_REQ_MB0_HEADER); + writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +static inline void print_unknown_header_warning(u8 n, u8 header) +{ + pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n", + header, n); +} + +static bool read_mailbox_0(void) +{ + bool r; + u8 header; + + header = readb(PRCM_ACK_MB0_HEADER); + switch (header) { + case AMB0H_WAKE_UP: + r = true; + break; + default: + print_unknown_header_warning(0, header); + r = false; + break; + } + writel(MBOX_BIT(0), PRCM_ARM_IT1_CLEAR); + return r; +} + +static bool read_mailbox_1(void) +{ + writel(MBOX_BIT(1), PRCM_ARM_IT1_CLEAR); + return false; +} + +static bool read_mailbox_2(void) +{ + writel(MBOX_BIT(2), PRCM_ARM_IT1_CLEAR); + return false; +} + +static bool read_mailbox_3(void) +{ + writel(MBOX_BIT(3), PRCM_ARM_IT1_CLEAR); + return false; +} + +static bool read_mailbox_4(void) +{ + writel(MBOX_BIT(4), PRCM_ARM_IT1_CLEAR); + return false; +} + +static bool read_mailbox_5(void) +{ + u8 header; + + header = readb(PRCM_ACK_MB5_HEADER); + switch (header) { + case MB5H_I2C_READ: + memcpy_fromio(mb5_transfer.ack.value, PRCM_ACK_MB5_I2C_DATA, 4); + case MB5H_I2C_WRITE: + mb5_transfer.ack.header = header; + mb5_transfer.ack.status = readb(PRCM_ACK_MB5_RETURN_CODE); + complete(&mb5_transfer.work); + break; + default: + print_unknown_header_warning(5, header); + break; + } + writel(MBOX_BIT(5), PRCM_ARM_IT1_CLEAR); + return false; +} + +static bool read_mailbox_6(void) +{ + writel(MBOX_BIT(6), PRCM_ARM_IT1_CLEAR); + return false; +} + +static bool read_mailbox_7(void) +{ + writel(MBOX_BIT(7), PRCM_ARM_IT1_CLEAR); + return false; +} + +static bool (* const read_mailbox[NUM_MB])(void) = { + read_mailbox_0, + read_mailbox_1, + read_mailbox_2, + read_mailbox_3, + read_mailbox_4, + read_mailbox_5, + read_mailbox_6, + read_mailbox_7 +}; + +static irqreturn_t prcmu_irq_handler(int irq, void *data) +{ + u32 bits; + u8 n; + irqreturn_t r; + + bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS); + if (unlikely(!bits)) + return IRQ_NONE; + + r = IRQ_HANDLED; + for (n = 0; bits; n++) { + if (bits & MBOX_BIT(n)) { + bits -= MBOX_BIT(n); + if (read_mailbox[n]()) + r = IRQ_WAKE_THREAD; + } + } + return r; +} + +static irqreturn_t prcmu_irq_thread_fn(int irq, void *data) +{ + ack_dbb_wakeup(); + return IRQ_HANDLED; +} + +void __init db5500_prcmu_early_init(void) +{ + tcdm_base = __io_address(U5500_PRCMU_TCDM_BASE); + spin_lock_init(&mb0_transfer.lock); + mutex_init(&mb5_transfer.lock); + init_completion(&mb5_transfer.work); +} + +/** + * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic + * + */ +int __init db5500_prcmu_init(void) +{ + int r = 0; + + if (ux500_is_svp() || !cpu_is_u5500()) + return -ENODEV; + + /* Clean up the mailbox interrupts after pre-kernel code. */ + writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLEAR); + + r = request_threaded_irq(IRQ_DB5500_PRCMU1, prcmu_irq_handler, + prcmu_irq_thread_fn, 0, "prcmu", NULL); + if (r < 0) { + pr_err("prcmu: Failed to allocate IRQ_DB5500_PRCMU1.\n"); + return -EBUSY; + } + return 0; +} + +arch_initcall(db5500_prcmu_init); diff --git a/drivers/mfd/db8500-prcmu-regs.h b/drivers/mfd/db8500-prcmu-regs.h new file mode 100644 index 00000000000..3bbf04d5804 --- /dev/null +++ b/drivers/mfd/db8500-prcmu-regs.h @@ -0,0 +1,166 @@ +/* + * Copyright (C) STMicroelectronics 2009 + * Copyright (C) ST-Ericsson SA 2010 + * + * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> + * Author: Sundar Iyer <sundar.iyer@stericsson.com> + * + * License Terms: GNU General Public License v2 + * + * PRCM Unit registers + */ +#ifndef __DB8500_PRCMU_REGS_H +#define __DB8500_PRCMU_REGS_H + +#include <linux/bitops.h> +#include <mach/hardware.h> + +#define BITS(_start, _end) ((BIT(_end) - BIT(_start)) + BIT(_end)) + +#define PRCM_ARM_PLLDIVPS 0x118 +#define PRCM_ARM_PLLDIVPS_ARM_BRM_RATE BITS(0, 5) +#define PRCM_ARM_PLLDIVPS_MAX_MASK 0xF + +#define PRCM_PLLARM_LOCKP 0x0A8 +#define PRCM_PLLARM_LOCKP_PRCM_PLLARM_LOCKP3 BIT(1) + +#define PRCM_ARM_CHGCLKREQ 0x114 +#define PRCM_ARM_CHGCLKREQ_PRCM_ARM_CHGCLKREQ BIT(0) + +#define PRCM_PLLARM_ENABLE 0x98 +#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_ENABLE BIT(0) +#define PRCM_PLLARM_ENABLE_PRCM_PLLARM_COUNTON BIT(8) + +#define PRCM_ARMCLKFIX_MGT 0x0 +#define PRCM_A9_RESETN_CLR 0x1f4 +#define PRCM_A9_RESETN_SET 0x1f0 +#define PRCM_ARM_LS_CLAMP 0x30C +#define PRCM_SRAM_A9 0x308 + +/* ARM WFI Standby signal register */ +#define PRCM_ARM_WFI_STANDBY 0x130 +#define PRCM_IOCR 0x310 +#define PRCM_IOCR_IOFORCE BIT(0) + +/* CPU mailbox registers */ +#define PRCM_MBOX_CPU_VAL 0x0FC +#define PRCM_MBOX_CPU_SET 0x100 + +/* Dual A9 core interrupt management unit registers */ +#define PRCM_A9_MASK_REQ 0x328 +#define PRCM_A9_MASK_REQ_PRCM_A9_MASK_REQ BIT(0) + +#define PRCM_A9_MASK_ACK 0x32C +#define PRCM_ARMITMSK31TO0 0x11C +#define PRCM_ARMITMSK63TO32 0x120 +#define PRCM_ARMITMSK95TO64 0x124 +#define PRCM_ARMITMSK127TO96 0x128 +#define PRCM_POWER_STATE_VAL 0x25C +#define PRCM_ARMITVAL31TO0 0x260 +#define PRCM_ARMITVAL63TO32 0x264 +#define PRCM_ARMITVAL95TO64 0x268 +#define PRCM_ARMITVAL127TO96 0x26C + +#define PRCM_HOSTACCESS_REQ 0x334 +#define PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ BIT(0) + +#define PRCM_ARM_IT1_CLR 0x48C +#define PRCM_ARM_IT1_VAL 0x494 + +#define PRCM_ITSTATUS0 0x148 +#define PRCM_ITSTATUS1 0x150 +#define PRCM_ITSTATUS2 0x158 +#define PRCM_ITSTATUS3 0x160 +#define PRCM_ITSTATUS4 0x168 +#define PRCM_ITSTATUS5 0x484 +#define PRCM_ITCLEAR5 0x488 +#define PRCM_ARMIT_MASKXP70_IT 0x1018 + +/* System reset register */ +#define PRCM_APE_SOFTRST 0x228 + +/* Level shifter and clamp control registers */ +#define PRCM_MMIP_LS_CLAMP_SET 0x420 +#define PRCM_MMIP_LS_CLAMP_CLR 0x424 + +/* PRCMU HW semaphore */ +#define PRCM_SEM 0x400 +#define PRCM_SEM_PRCM_SEM BIT(0) + +/* PRCMU clock/PLL/reset registers */ +#define PRCM_PLLDSI_FREQ 0x500 +#define PRCM_PLLDSI_ENABLE 0x504 +#define PRCM_PLLDSI_LOCKP 0x508 +#define PRCM_DSI_PLLOUT_SEL 0x530 +#define PRCM_DSITVCLK_DIV 0x52C +#define PRCM_APE_RESETN_SET 0x1E4 +#define PRCM_APE_RESETN_CLR 0x1E8 + +#define PRCM_TCR 0x1C8 +#define PRCM_TCR_TENSEL_MASK BITS(0, 7) +#define PRCM_TCR_STOP_TIMERS BIT(16) +#define PRCM_TCR_DOZE_MODE BIT(17) + +#define PRCM_CLKOCR 0x1CC +#define PRCM_CLKOCR_CLKODIV0_SHIFT 0 +#define PRCM_CLKOCR_CLKODIV0_MASK BITS(0, 5) +#define PRCM_CLKOCR_CLKOSEL0_SHIFT 6 +#define PRCM_CLKOCR_CLKOSEL0_MASK BITS(6, 8) +#define PRCM_CLKOCR_CLKODIV1_SHIFT 16 +#define PRCM_CLKOCR_CLKODIV1_MASK BITS(16, 21) +#define PRCM_CLKOCR_CLKOSEL1_SHIFT 22 +#define PRCM_CLKOCR_CLKOSEL1_MASK BITS(22, 24) +#define PRCM_CLKOCR_CLK1TYPE BIT(28) + +#define PRCM_SGACLK_MGT 0x014 +#define PRCM_UARTCLK_MGT 0x018 +#define PRCM_MSP02CLK_MGT 0x01C +#define PRCM_MSP1CLK_MGT 0x288 +#define PRCM_I2CCLK_MGT 0x020 +#define PRCM_SDMMCCLK_MGT 0x024 +#define PRCM_SLIMCLK_MGT 0x028 +#define PRCM_PER1CLK_MGT 0x02C +#define PRCM_PER2CLK_MGT 0x030 +#define PRCM_PER3CLK_MGT 0x034 +#define PRCM_PER5CLK_MGT 0x038 +#define PRCM_PER6CLK_MGT 0x03C +#define PRCM_PER7CLK_MGT 0x040 +#define PRCM_LCDCLK_MGT 0x044 +#define PRCM_BMLCLK_MGT 0x04C +#define PRCM_HSITXCLK_MGT 0x050 +#define PRCM_HSIRXCLK_MGT 0x054 +#define PRCM_HDMICLK_MGT 0x058 +#define PRCM_APEATCLK_MGT 0x05C +#define PRCM_APETRACECLK_MGT 0x060 +#define PRCM_MCDECLK_MGT 0x064 +#define PRCM_IPI2CCLK_MGT 0x068 +#define PRCM_DSIALTCLK_MGT 0x06C +#define PRCM_DMACLK_MGT 0x074 +#define PRCM_B2R2CLK_MGT 0x078 +#define PRCM_TVCLK_MGT 0x07C +#define PRCM_UNIPROCLK_MGT 0x278 +#define PRCM_SSPCLK_MGT 0x280 +#define PRCM_RNGCLK_MGT 0x284 +#define PRCM_UICCCLK_MGT 0x27C + +#define PRCM_CLK_MGT_CLKPLLDIV_MASK BITS(0, 4) +#define PRCM_CLK_MGT_CLKPLLSW_MASK BITS(5, 7) +#define PRCM_CLK_MGT_CLKEN BIT(8) + +/* ePOD and memory power signal control registers */ +#define PRCM_EPOD_C_SET 0x410 +#define PRCM_SRAM_LS_SLEEP 0x304 + +/* Debug power control unit registers */ +#define PRCM_POWER_STATE_SET 0x254 + +/* Miscellaneous unit registers */ +#define PRCM_DSI_SW_RESET 0x324 +#define PRCM_GPIOCR 0x138 + +/* GPIOCR register */ +#define PRCM_GPIOCR_SPI2_SELECT BIT(23) + +#define PRCM_DDR_SUBSYS_APE_MINBW 0x438 + +#endif /* __DB8500_PRCMU_REGS_H */ diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c new file mode 100644 index 00000000000..e63782107e2 --- /dev/null +++ b/drivers/mfd/db8500-prcmu.c @@ -0,0 +1,2069 @@ +/* + * Copyright (C) STMicroelectronics 2009 + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com> + * Author: Sundar Iyer <sundar.iyer@stericsson.com> + * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com> + * + * U8500 PRCM Unit interface driver + * + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/spinlock.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/mutex.h> +#include <linux/completion.h> +#include <linux/irq.h> +#include <linux/jiffies.h> +#include <linux/bitops.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/mfd/core.h> +#include <linux/mfd/db8500-prcmu.h> +#include <linux/regulator/db8500-prcmu.h> +#include <linux/regulator/machine.h> +#include <mach/hardware.h> +#include <mach/irqs.h> +#include <mach/db8500-regs.h> +#include <mach/id.h> +#include "db8500-prcmu-regs.h" + +/* Offset for the firmware version within the TCPM */ +#define PRCMU_FW_VERSION_OFFSET 0xA4 + +/* PRCMU project numbers, defined by PRCMU FW */ +#define PRCMU_PROJECT_ID_8500V1_0 1 +#define PRCMU_PROJECT_ID_8500V2_0 2 +#define PRCMU_PROJECT_ID_8400V2_0 3 + +/* Index of different voltages to be used when accessing AVSData */ +#define PRCM_AVS_BASE 0x2FC +#define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0) +#define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1) +#define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2) +#define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3) +#define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4) +#define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5) +#define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6) +#define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7) +#define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8) +#define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9) +#define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA) +#define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB) +#define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC) + +#define PRCM_AVS_VOLTAGE 0 +#define PRCM_AVS_VOLTAGE_MASK 0x3f +#define PRCM_AVS_ISSLOWSTARTUP 6 +#define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP) +#define PRCM_AVS_ISMODEENABLE 7 +#define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE) + +#define PRCM_BOOT_STATUS 0xFFF +#define PRCM_ROMCODE_A2P 0xFFE +#define PRCM_ROMCODE_P2A 0xFFD +#define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */ + +#define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */ + +#define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */ +#define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0) +#define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1) +#define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2) +#define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3) +#define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4) +#define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5) +#define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8) + +/* Req Mailboxes */ +#define PRCM_REQ_MB0 0xFDC /* 12 bytes */ +#define PRCM_REQ_MB1 0xFD0 /* 12 bytes */ +#define PRCM_REQ_MB2 0xFC0 /* 16 bytes */ +#define PRCM_REQ_MB3 0xE4C /* 372 bytes */ +#define PRCM_REQ_MB4 0xE48 /* 4 bytes */ +#define PRCM_REQ_MB5 0xE44 /* 4 bytes */ + +/* Ack Mailboxes */ +#define PRCM_ACK_MB0 0xE08 /* 52 bytes */ +#define PRCM_ACK_MB1 0xE04 /* 4 bytes */ +#define PRCM_ACK_MB2 0xE00 /* 4 bytes */ +#define PRCM_ACK_MB3 0xDFC /* 4 bytes */ +#define PRCM_ACK_MB4 0xDF8 /* 4 bytes */ +#define PRCM_ACK_MB5 0xDF4 /* 4 bytes */ + +/* Mailbox 0 headers */ +#define MB0H_POWER_STATE_TRANS 0 +#define MB0H_CONFIG_WAKEUPS_EXE 1 +#define MB0H_READ_WAKEUP_ACK 3 +#define MB0H_CONFIG_WAKEUPS_SLEEP 4 + +#define MB0H_WAKEUP_EXE 2 +#define MB0H_WAKEUP_SLEEP 5 + +/* Mailbox 0 REQs */ +#define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0) +#define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1) +#define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2) +#define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3) +#define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4) +#define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8) + +/* Mailbox 0 ACKs */ +#define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0) +#define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1) +#define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4) +#define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8) +#define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C) +#define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20) +#define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20 + +/* Mailbox 1 headers */ +#define MB1H_ARM_APE_OPP 0x0 +#define MB1H_RESET_MODEM 0x2 +#define MB1H_REQUEST_APE_OPP_100_VOLT 0x3 +#define MB1H_RELEASE_APE_OPP_100_VOLT 0x4 +#define MB1H_RELEASE_USB_WAKEUP 0x5 + +/* Mailbox 1 Requests */ +#define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0) +#define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1) +#define PRCM_REQ_MB1_APE_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x4) +#define PRCM_REQ_MB1_ARM_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x8) + +/* Mailbox 1 ACKs */ +#define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0) +#define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1) +#define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2) +#define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3) + +/* Mailbox 2 headers */ +#define MB2H_DPS 0x0 +#define MB2H_AUTO_PWR 0x1 + +/* Mailbox 2 REQs */ +#define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0) +#define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1) +#define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2) +#define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3) +#define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4) +#define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5) +#define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6) +#define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7) +#define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8) +#define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC) + +/* Mailbox 2 ACKs */ +#define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0) +#define HWACC_PWR_ST_OK 0xFE + +/* Mailbox 3 headers */ +#define MB3H_ANC 0x0 +#define MB3H_SIDETONE 0x1 +#define MB3H_SYSCLK 0xE + +/* Mailbox 3 Requests */ +#define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0) +#define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20) +#define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60) +#define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64) +#define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68) +#define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C) +#define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C) + +/* Mailbox 4 headers */ +#define MB4H_DDR_INIT 0x0 +#define MB4H_MEM_ST 0x1 +#define MB4H_HOTDOG 0x12 +#define MB4H_HOTMON 0x13 +#define MB4H_HOT_PERIOD 0x14 + +/* Mailbox 4 Requests */ +#define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0) +#define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1) +#define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3) +#define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0) +#define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0) +#define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1) +#define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2) +#define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0) +#define HOTMON_CONFIG_LOW BIT(0) +#define HOTMON_CONFIG_HIGH BIT(1) + +/* Mailbox 5 Requests */ +#define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0) +#define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1) +#define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2) +#define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3) +#define PRCMU_I2C_WRITE(slave) \ + (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0)) +#define PRCMU_I2C_READ(slave) \ + (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0)) +#define PRCMU_I2C_STOP_EN BIT(3) + +/* Mailbox 5 ACKs */ +#define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1) +#define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3) +#define I2C_WR_OK 0x1 +#define I2C_RD_OK 0x2 + +#define NUM_MB 8 +#define MBOX_BIT BIT +#define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1) + +/* + * Wakeups/IRQs + */ + +#define WAKEUP_BIT_RTC BIT(0) +#define WAKEUP_BIT_RTT0 BIT(1) +#define WAKEUP_BIT_RTT1 BIT(2) +#define WAKEUP_BIT_HSI0 BIT(3) +#define WAKEUP_BIT_HSI1 BIT(4) +#define WAKEUP_BIT_CA_WAKE BIT(5) +#define WAKEUP_BIT_USB BIT(6) +#define WAKEUP_BIT_ABB BIT(7) +#define WAKEUP_BIT_ABB_FIFO BIT(8) +#define WAKEUP_BIT_SYSCLK_OK BIT(9) +#define WAKEUP_BIT_CA_SLEEP BIT(10) +#define WAKEUP_BIT_AC_WAKE_ACK BIT(11) +#define WAKEUP_BIT_SIDE_TONE_OK BIT(12) +#define WAKEUP_BIT_ANC_OK BIT(13) +#define WAKEUP_BIT_SW_ERROR BIT(14) +#define WAKEUP_BIT_AC_SLEEP_ACK BIT(15) +#define WAKEUP_BIT_ARM BIT(17) +#define WAKEUP_BIT_HOTMON_LOW BIT(18) +#define WAKEUP_BIT_HOTMON_HIGH BIT(19) +#define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20) +#define WAKEUP_BIT_GPIO0 BIT(23) +#define WAKEUP_BIT_GPIO1 BIT(24) +#define WAKEUP_BIT_GPIO2 BIT(25) +#define WAKEUP_BIT_GPIO3 BIT(26) +#define WAKEUP_BIT_GPIO4 BIT(27) +#define WAKEUP_BIT_GPIO5 BIT(28) +#define WAKEUP_BIT_GPIO6 BIT(29) +#define WAKEUP_BIT_GPIO7 BIT(30) +#define WAKEUP_BIT_GPIO8 BIT(31) + +/* + * This vector maps irq numbers to the bits in the bit field used in + * communication with the PRCMU firmware. + * + * The reason for having this is to keep the irq numbers contiguous even though + * the bits in the bit field are not. (The bits also have a tendency to move + * around, to further complicate matters.) + */ +#define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE) +#define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name) +static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = { + IRQ_ENTRY(RTC), + IRQ_ENTRY(RTT0), + IRQ_ENTRY(RTT1), + IRQ_ENTRY(HSI0), + IRQ_ENTRY(HSI1), + IRQ_ENTRY(CA_WAKE), + IRQ_ENTRY(USB), + IRQ_ENTRY(ABB), + IRQ_ENTRY(ABB_FIFO), + IRQ_ENTRY(CA_SLEEP), + IRQ_ENTRY(ARM), + IRQ_ENTRY(HOTMON_LOW), + IRQ_ENTRY(HOTMON_HIGH), + IRQ_ENTRY(MODEM_SW_RESET_REQ), + IRQ_ENTRY(GPIO0), + IRQ_ENTRY(GPIO1), + IRQ_ENTRY(GPIO2), + IRQ_ENTRY(GPIO3), + IRQ_ENTRY(GPIO4), + IRQ_ENTRY(GPIO5), + IRQ_ENTRY(GPIO6), + IRQ_ENTRY(GPIO7), + IRQ_ENTRY(GPIO8) +}; + +#define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1) +#define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name) +static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = { + WAKEUP_ENTRY(RTC), + WAKEUP_ENTRY(RTT0), + WAKEUP_ENTRY(RTT1), + WAKEUP_ENTRY(HSI0), + WAKEUP_ENTRY(HSI1), + WAKEUP_ENTRY(USB), + WAKEUP_ENTRY(ABB), + WAKEUP_ENTRY(ABB_FIFO), + WAKEUP_ENTRY(ARM) +}; + +/* + * mb0_transfer - state needed for mailbox 0 communication. + * @lock: The transaction lock. + * @dbb_events_lock: A lock used to handle concurrent access to (parts of) + * the request data. + * @mask_work: Work structure used for (un)masking wakeup interrupts. + * @req: Request data that need to persist between requests. + */ +static struct { + spinlock_t lock; + spinlock_t dbb_irqs_lock; + struct work_struct mask_work; + struct mutex ac_wake_lock; + struct completion ac_wake_work; + struct { + u32 dbb_irqs; + u32 dbb_wakeups; + u32 abb_events; + } req; +} mb0_transfer; + +/* + * mb1_transfer - state needed for mailbox 1 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + * @ack: Reply ("acknowledge") data. + */ +static struct { + struct mutex lock; + struct completion work; + struct { + u8 header; + u8 arm_opp; + u8 ape_opp; + u8 ape_voltage_status; + } ack; +} mb1_transfer; + +/* + * mb2_transfer - state needed for mailbox 2 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + * @auto_pm_lock: The autonomous power management configuration lock. + * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled. + * @req: Request data that need to persist between requests. + * @ack: Reply ("acknowledge") data. + */ +static struct { + struct mutex lock; + struct completion work; + spinlock_t auto_pm_lock; + bool auto_pm_enabled; + struct { + u8 status; + } ack; +} mb2_transfer; + +/* + * mb3_transfer - state needed for mailbox 3 communication. + * @lock: The request lock. + * @sysclk_lock: A lock used to handle concurrent sysclk requests. + * @sysclk_work: Work structure used for sysclk requests. + */ +static struct { + spinlock_t lock; + struct mutex sysclk_lock; + struct completion sysclk_work; +} mb3_transfer; + +/* + * mb4_transfer - state needed for mailbox 4 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + */ +static struct { + struct mutex lock; + struct completion work; +} mb4_transfer; + +/* + * mb5_transfer - state needed for mailbox 5 communication. + * @lock: The transaction lock. + * @work: The transaction completion structure. + * @ack: Reply ("acknowledge") data. + */ +static struct { + struct mutex lock; + struct completion work; + struct { + u8 status; + u8 value; + } ack; +} mb5_transfer; + +static atomic_t ac_wake_req_state = ATOMIC_INIT(0); + +/* Spinlocks */ +static DEFINE_SPINLOCK(clkout_lock); +static DEFINE_SPINLOCK(gpiocr_lock); + +/* Global var to runtime determine TCDM base for v2 or v1 */ +static __iomem void *tcdm_base; + +struct clk_mgt { + unsigned int offset; + u32 pllsw; +}; + +static DEFINE_SPINLOCK(clk_mgt_lock); + +#define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT), 0 } +struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = { + CLK_MGT_ENTRY(SGACLK), + CLK_MGT_ENTRY(UARTCLK), + CLK_MGT_ENTRY(MSP02CLK), + CLK_MGT_ENTRY(MSP1CLK), + CLK_MGT_ENTRY(I2CCLK), + CLK_MGT_ENTRY(SDMMCCLK), + CLK_MGT_ENTRY(SLIMCLK), + CLK_MGT_ENTRY(PER1CLK), + CLK_MGT_ENTRY(PER2CLK), + CLK_MGT_ENTRY(PER3CLK), + CLK_MGT_ENTRY(PER5CLK), + CLK_MGT_ENTRY(PER6CLK), + CLK_MGT_ENTRY(PER7CLK), + CLK_MGT_ENTRY(LCDCLK), + CLK_MGT_ENTRY(BMLCLK), + CLK_MGT_ENTRY(HSITXCLK), + CLK_MGT_ENTRY(HSIRXCLK), + CLK_MGT_ENTRY(HDMICLK), + CLK_MGT_ENTRY(APEATCLK), + CLK_MGT_ENTRY(APETRACECLK), + CLK_MGT_ENTRY(MCDECLK), + CLK_MGT_ENTRY(IPI2CCLK), + CLK_MGT_ENTRY(DSIALTCLK), + CLK_MGT_ENTRY(DMACLK), + CLK_MGT_ENTRY(B2R2CLK), + CLK_MGT_ENTRY(TVCLK), + CLK_MGT_ENTRY(SSPCLK), + CLK_MGT_ENTRY(RNGCLK), + CLK_MGT_ENTRY(UICCCLK), +}; + +/* +* Used by MCDE to setup all necessary PRCMU registers +*/ +#define PRCMU_RESET_DSIPLL 0x00004000 +#define PRCMU_UNCLAMP_DSIPLL 0x00400800 + +#define PRCMU_CLK_PLL_DIV_SHIFT 0 +#define PRCMU_CLK_PLL_SW_SHIFT 5 +#define PRCMU_CLK_38 (1 << 9) +#define PRCMU_CLK_38_SRC (1 << 10) +#define PRCMU_CLK_38_DIV (1 << 11) + +/* PLLDIV=12, PLLSW=4 (PLLDDR) */ +#define PRCMU_DSI_CLOCK_SETTING 0x0000008C + +/* PLLDIV=8, PLLSW=4 (PLLDDR) */ +#define PRCMU_DSI_CLOCK_SETTING_U8400 0x00000088 + +/* DPI 50000000 Hz */ +#define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \ + (16 << PRCMU_CLK_PLL_DIV_SHIFT)) +#define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00 + +/* D=101, N=1, R=4, SELDIV2=0 */ +#define PRCMU_PLLDSI_FREQ_SETTING 0x00040165 + +/* D=70, N=1, R=3, SELDIV2=0 */ +#define PRCMU_PLLDSI_FREQ_SETTING_U8400 0x00030146 + +#define PRCMU_ENABLE_PLLDSI 0x00000001 +#define PRCMU_DISABLE_PLLDSI 0x00000000 +#define PRCMU_RELEASE_RESET_DSS 0x0000400C +#define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202 +/* ESC clk, div0=1, div1=1, div2=3 */ +#define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101 +#define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101 +#define PRCMU_DSI_RESET_SW 0x00000007 + +#define PRCMU_PLLDSI_LOCKP_LOCKED 0x3 + +static struct { + u8 project_number; + u8 api_version; + u8 func_version; + u8 errata; +} prcmu_version; + + +int prcmu_enable_dsipll(void) +{ + int i; + unsigned int plldsifreq; + + /* Clear DSIPLL_RESETN */ + writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_CLR)); + /* Unclamp DSIPLL in/out */ + writel(PRCMU_UNCLAMP_DSIPLL, (_PRCMU_BASE + PRCM_MMIP_LS_CLAMP_CLR)); + + if (prcmu_is_u8400()) + plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400; + else + plldsifreq = PRCMU_PLLDSI_FREQ_SETTING; + /* Set DSI PLL FREQ */ + writel(plldsifreq, (_PRCMU_BASE + PRCM_PLLDSI_FREQ)); + writel(PRCMU_DSI_PLLOUT_SEL_SETTING, + (_PRCMU_BASE + PRCM_DSI_PLLOUT_SEL)); + /* Enable Escape clocks */ + writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, + (_PRCMU_BASE + PRCM_DSITVCLK_DIV)); + + /* Start DSI PLL */ + writel(PRCMU_ENABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE)); + /* Reset DSI PLL */ + writel(PRCMU_DSI_RESET_SW, (_PRCMU_BASE + PRCM_DSI_SW_RESET)); + for (i = 0; i < 10; i++) { + if ((readl(_PRCMU_BASE + PRCM_PLLDSI_LOCKP) & + PRCMU_PLLDSI_LOCKP_LOCKED) + == PRCMU_PLLDSI_LOCKP_LOCKED) + break; + udelay(100); + } + /* Set DSIPLL_RESETN */ + writel(PRCMU_RESET_DSIPLL, (_PRCMU_BASE + PRCM_APE_RESETN_SET)); + return 0; +} + +int prcmu_disable_dsipll(void) +{ + /* Disable dsi pll */ + writel(PRCMU_DISABLE_PLLDSI, (_PRCMU_BASE + PRCM_PLLDSI_ENABLE)); + /* Disable escapeclock */ + writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, + (_PRCMU_BASE + PRCM_DSITVCLK_DIV)); + return 0; +} + +int prcmu_set_display_clocks(void) +{ + unsigned long flags; + unsigned int dsiclk; + + if (prcmu_is_u8400()) + dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400; + else + dsiclk = PRCMU_DSI_CLOCK_SETTING; + + spin_lock_irqsave(&clk_mgt_lock, flags); + + /* Grab the HW semaphore. */ + while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) + cpu_relax(); + + writel(dsiclk, (_PRCMU_BASE + PRCM_HDMICLK_MGT)); + writel(PRCMU_DSI_LP_CLOCK_SETTING, (_PRCMU_BASE + PRCM_TVCLK_MGT)); + writel(PRCMU_DPI_CLOCK_SETTING, (_PRCMU_BASE + PRCM_LCDCLK_MGT)); + + /* Release the HW semaphore. */ + writel(0, (_PRCMU_BASE + PRCM_SEM)); + + spin_unlock_irqrestore(&clk_mgt_lock, flags); + + return 0; +} + +/** + * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1. + */ +void prcmu_enable_spi2(void) +{ + u32 reg; + unsigned long flags; + + spin_lock_irqsave(&gpiocr_lock, flags); + reg = readl(_PRCMU_BASE + PRCM_GPIOCR); + writel(reg | PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR); + spin_unlock_irqrestore(&gpiocr_lock, flags); +} + +/** + * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1. + */ +void prcmu_disable_spi2(void) +{ + u32 reg; + unsigned long flags; + + spin_lock_irqsave(&gpiocr_lock, flags); + reg = readl(_PRCMU_BASE + PRCM_GPIOCR); + writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, _PRCMU_BASE + PRCM_GPIOCR); + spin_unlock_irqrestore(&gpiocr_lock, flags); +} + +bool prcmu_has_arm_maxopp(void) +{ + return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) & + PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK; +} + +bool prcmu_is_u8400(void) +{ + return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0; +} + +/** + * prcmu_get_boot_status - PRCMU boot status checking + * Returns: the current PRCMU boot status + */ +int prcmu_get_boot_status(void) +{ + return readb(tcdm_base + PRCM_BOOT_STATUS); +} + +/** + * prcmu_set_rc_a2p - This function is used to run few power state sequences + * @val: Value to be set, i.e. transition requested + * Returns: 0 on success, -EINVAL on invalid argument + * + * This function is used to run the following power state sequences - + * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep + */ +int prcmu_set_rc_a2p(enum romcode_write val) +{ + if (val < RDY_2_DS || val > RDY_2_XP70_RST) + return -EINVAL; + writeb(val, (tcdm_base + PRCM_ROMCODE_A2P)); + return 0; +} + +/** + * prcmu_get_rc_p2a - This function is used to get power state sequences + * Returns: the power transition that has last happened + * + * This function can return the following transitions- + * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep + */ +enum romcode_read prcmu_get_rc_p2a(void) +{ + return readb(tcdm_base + PRCM_ROMCODE_P2A); +} + +/** + * prcmu_get_current_mode - Return the current XP70 power mode + * Returns: Returns the current AP(ARM) power mode: init, + * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset + */ +enum ap_pwrst prcmu_get_xp70_current_state(void) +{ + return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE); +} + +/** + * prcmu_config_clkout - Configure one of the programmable clock outputs. + * @clkout: The CLKOUT number (0 or 1). + * @source: The clock to be used (one of the PRCMU_CLKSRC_*). + * @div: The divider to be applied. + * + * Configures one of the programmable clock outputs (CLKOUTs). + * @div should be in the range [1,63] to request a configuration, or 0 to + * inform that the configuration is no longer requested. + */ +int prcmu_config_clkout(u8 clkout, u8 source, u8 div) +{ + static int requests[2]; + int r = 0; + unsigned long flags; + u32 val; + u32 bits; + u32 mask; + u32 div_mask; + + BUG_ON(clkout > 1); + BUG_ON(div > 63); + BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009)); + + if (!div && !requests[clkout]) + return -EINVAL; + + switch (clkout) { + case 0: + div_mask = PRCM_CLKOCR_CLKODIV0_MASK; + mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK); + bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) | + (div << PRCM_CLKOCR_CLKODIV0_SHIFT)); + break; + case 1: + div_mask = PRCM_CLKOCR_CLKODIV1_MASK; + mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK | + PRCM_CLKOCR_CLK1TYPE); + bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) | + (div << PRCM_CLKOCR_CLKODIV1_SHIFT)); + break; + } + bits &= mask; + + spin_lock_irqsave(&clkout_lock, flags); + + val = readl(_PRCMU_BASE + PRCM_CLKOCR); + if (val & div_mask) { + if (div) { + if ((val & mask) != bits) { + r = -EBUSY; + goto unlock_and_return; + } + } else { + if ((val & mask & ~div_mask) != bits) { + r = -EINVAL; + goto unlock_and_return; + } + } + } + writel((bits | (val & ~mask)), (_PRCMU_BASE + PRCM_CLKOCR)); + requests[clkout] += (div ? 1 : -1); + +unlock_and_return: + spin_unlock_irqrestore(&clkout_lock, flags); + + return r; +} + +int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll) +{ + unsigned long flags; + + BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state)); + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) + cpu_relax(); + + writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); + writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE)); + writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE)); + writeb((keep_ulp_clk ? 1 : 0), + (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE)); + writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI)); + writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); + + return 0; +} + +/* This function should only be called while mb0_transfer.lock is held. */ +static void config_wakeups(void) +{ + const u8 header[2] = { + MB0H_CONFIG_WAKEUPS_EXE, + MB0H_CONFIG_WAKEUPS_SLEEP + }; + static u32 last_dbb_events; + static u32 last_abb_events; + u32 dbb_events; + u32 abb_events; + unsigned int i; + + dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups; + dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK); + + abb_events = mb0_transfer.req.abb_events; + + if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events)) + return; + + for (i = 0; i < 2; i++) { + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) + cpu_relax(); + writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500)); + writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500)); + writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); + writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + } + last_dbb_events = dbb_events; + last_abb_events = abb_events; +} + +void prcmu_enable_wakeups(u32 wakeups) +{ + unsigned long flags; + u32 bits; + int i; + + BUG_ON(wakeups != (wakeups & VALID_WAKEUPS)); + + for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) { + if (wakeups & BIT(i)) + bits |= prcmu_wakeup_bit[i]; + } + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + mb0_transfer.req.dbb_wakeups = bits; + config_wakeups(); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +void prcmu_config_abb_event_readout(u32 abb_events) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + mb0_transfer.req.abb_events = abb_events; + config_wakeups(); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +void prcmu_get_abb_event_buffer(void __iomem **buf) +{ + if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) + *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500); + else + *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500); +} + +/** + * prcmu_set_arm_opp - set the appropriate ARM OPP + * @opp: The new ARM operating point to which transition is to be made + * Returns: 0 on success, non-zero on failure + * + * This function sets the the operating point of the ARM. + */ +int prcmu_set_arm_opp(u8 opp) +{ + int r; + + if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK) + return -EINVAL; + + r = 0; + + mutex_lock(&mb1_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) + cpu_relax(); + + writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); + writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); + writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP)); + + writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb1_transfer.work); + + if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || + (mb1_transfer.ack.arm_opp != opp)) + r = -EIO; + + mutex_unlock(&mb1_transfer.lock); + + return r; +} + +/** + * prcmu_get_arm_opp - get the current ARM OPP + * + * Returns: the current ARM OPP + */ +int prcmu_get_arm_opp(void) +{ + return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP); +} + +/** + * prcmu_get_ddr_opp - get the current DDR OPP + * + * Returns: the current DDR OPP + */ +int prcmu_get_ddr_opp(void) +{ + return readb(_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW); +} + +/** + * set_ddr_opp - set the appropriate DDR OPP + * @opp: The new DDR operating point to which transition is to be made + * Returns: 0 on success, non-zero on failure + * + * This function sets the operating point of the DDR. + */ +int prcmu_set_ddr_opp(u8 opp) +{ + if (opp < DDR_100_OPP || opp > DDR_25_OPP) + return -EINVAL; + /* Changing the DDR OPP can hang the hardware pre-v21 */ + if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20()) + writeb(opp, (_PRCMU_BASE + PRCM_DDR_SUBSYS_APE_MINBW)); + + return 0; +} +/** + * set_ape_opp - set the appropriate APE OPP + * @opp: The new APE operating point to which transition is to be made + * Returns: 0 on success, non-zero on failure + * + * This function sets the operating point of the APE. + */ +int prcmu_set_ape_opp(u8 opp) +{ + int r = 0; + + mutex_lock(&mb1_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) + cpu_relax(); + + writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); + writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP)); + writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP)); + + writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb1_transfer.work); + + if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) || + (mb1_transfer.ack.ape_opp != opp)) + r = -EIO; + + mutex_unlock(&mb1_transfer.lock); + + return r; +} + +/** + * prcmu_get_ape_opp - get the current APE OPP + * + * Returns: the current APE OPP + */ +int prcmu_get_ape_opp(void) +{ + return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP); +} + +/** + * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage + * @enable: true to request the higher voltage, false to drop a request. + * + * Calls to this function to enable and disable requests must be balanced. + */ +int prcmu_request_ape_opp_100_voltage(bool enable) +{ + int r = 0; + u8 header; + static unsigned int requests; + + mutex_lock(&mb1_transfer.lock); + + if (enable) { + if (0 != requests++) + goto unlock_and_return; + header = MB1H_REQUEST_APE_OPP_100_VOLT; + } else { + if (requests == 0) { + r = -EIO; + goto unlock_and_return; + } else if (1 != requests--) { + goto unlock_and_return; + } + header = MB1H_RELEASE_APE_OPP_100_VOLT; + } + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) + cpu_relax(); + + writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); + + writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb1_transfer.work); + + if ((mb1_transfer.ack.header != header) || + ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) + r = -EIO; + +unlock_and_return: + mutex_unlock(&mb1_transfer.lock); + + return r; +} + +/** + * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup + * + * This function releases the power state requirements of a USB wakeup. + */ +int prcmu_release_usb_wakeup_state(void) +{ + int r = 0; + + mutex_lock(&mb1_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) + cpu_relax(); + + writeb(MB1H_RELEASE_USB_WAKEUP, + (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); + + writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb1_transfer.work); + + if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) || + ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0)) + r = -EIO; + + mutex_unlock(&mb1_transfer.lock); + + return r; +} + +/** + * prcmu_set_epod - set the state of a EPOD (power domain) + * @epod_id: The EPOD to set + * @epod_state: The new EPOD state + * + * This function sets the state of a EPOD (power domain). It may not be called + * from interrupt context. + */ +int prcmu_set_epod(u16 epod_id, u8 epod_state) +{ + int r = 0; + bool ram_retention = false; + int i; + + /* check argument */ + BUG_ON(epod_id >= NUM_EPOD_ID); + + /* set flag if retention is possible */ + switch (epod_id) { + case EPOD_ID_SVAMMDSP: + case EPOD_ID_SIAMMDSP: + case EPOD_ID_ESRAM12: + case EPOD_ID_ESRAM34: + ram_retention = true; + break; + } + + /* check argument */ + BUG_ON(epod_state > EPOD_STATE_ON); + BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention); + + /* get lock */ + mutex_lock(&mb2_transfer.lock); + + /* wait for mailbox */ + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(2)) + cpu_relax(); + + /* fill in mailbox */ + for (i = 0; i < NUM_EPOD_ID; i++) + writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i)); + writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id)); + + writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2)); + + writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + /* + * The current firmware version does not handle errors correctly, + * and we cannot recover if there is an error. + * This is expected to change when the firmware is updated. + */ + if (!wait_for_completion_timeout(&mb2_transfer.work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", + __func__); + r = -EIO; + goto unlock_and_return; + } + + if (mb2_transfer.ack.status != HWACC_PWR_ST_OK) + r = -EIO; + +unlock_and_return: + mutex_unlock(&mb2_transfer.lock); + return r; +} + +/** + * prcmu_configure_auto_pm - Configure autonomous power management. + * @sleep: Configuration for ApSleep. + * @idle: Configuration for ApIdle. + */ +void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep, + struct prcmu_auto_pm_config *idle) +{ + u32 sleep_cfg; + u32 idle_cfg; + unsigned long flags; + + BUG_ON((sleep == NULL) || (idle == NULL)); + + sleep_cfg = (sleep->sva_auto_pm_enable & 0xF); + sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF)); + sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF)); + sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF)); + sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF)); + sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF)); + + idle_cfg = (idle->sva_auto_pm_enable & 0xF); + idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF)); + idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF)); + idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF)); + idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF)); + idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF)); + + spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags); + + /* + * The autonomous power management configuration is done through + * fields in mailbox 2, but these fields are only used as shared + * variables - i.e. there is no need to send a message. + */ + writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP)); + writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE)); + + mb2_transfer.auto_pm_enabled = + ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || + (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) || + (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) || + (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON)); + + spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags); +} +EXPORT_SYMBOL(prcmu_configure_auto_pm); + +bool prcmu_is_auto_pm_enabled(void) +{ + return mb2_transfer.auto_pm_enabled; +} + +static int request_sysclk(bool enable) +{ + int r; + unsigned long flags; + + r = 0; + + mutex_lock(&mb3_transfer.sysclk_lock); + + spin_lock_irqsave(&mb3_transfer.lock, flags); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(3)) + cpu_relax(); + + writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT)); + + writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3)); + writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + spin_unlock_irqrestore(&mb3_transfer.lock, flags); + + /* + * The firmware only sends an ACK if we want to enable the + * SysClk, and it succeeds. + */ + if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", + __func__); + r = -EIO; + } + + mutex_unlock(&mb3_transfer.sysclk_lock); + + return r; +} + +static int request_timclk(bool enable) +{ + u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK); + + if (!enable) + val |= PRCM_TCR_STOP_TIMERS; + writel(val, (_PRCMU_BASE + PRCM_TCR)); + + return 0; +} + +static int request_reg_clock(u8 clock, bool enable) +{ + u32 val; + unsigned long flags; + + spin_lock_irqsave(&clk_mgt_lock, flags); + + /* Grab the HW semaphore. */ + while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) + cpu_relax(); + + val = readl(_PRCMU_BASE + clk_mgt[clock].offset); + if (enable) { + val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw); + } else { + clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK); + val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK); + } + writel(val, (_PRCMU_BASE + clk_mgt[clock].offset)); + + /* Release the HW semaphore. */ + writel(0, (_PRCMU_BASE + PRCM_SEM)); + + spin_unlock_irqrestore(&clk_mgt_lock, flags); + + return 0; +} + +/** + * prcmu_request_clock() - Request for a clock to be enabled or disabled. + * @clock: The clock for which the request is made. + * @enable: Whether the clock should be enabled (true) or disabled (false). + * + * This function should only be used by the clock implementation. + * Do not use it from any other place! + */ +int prcmu_request_clock(u8 clock, bool enable) +{ + if (clock < PRCMU_NUM_REG_CLOCKS) + return request_reg_clock(clock, enable); + else if (clock == PRCMU_TIMCLK) + return request_timclk(enable); + else if (clock == PRCMU_SYSCLK) + return request_sysclk(enable); + else + return -EINVAL; +} + +int prcmu_config_esram0_deep_sleep(u8 state) +{ + if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) || + (state < ESRAM0_DEEP_SLEEP_STATE_OFF)) + return -EINVAL; + + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); + writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON), + (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE)); + writeb(DDR_PWR_STATE_ON, + (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE)); + writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST)); + + writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb4_transfer.work); + + mutex_unlock(&mb4_transfer.lock); + + return 0; +} + +int prcmu_config_hotdog(u8 threshold) +{ + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD)); + writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); + + writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb4_transfer.work); + + mutex_unlock(&mb4_transfer.lock); + + return 0; +} + +int prcmu_config_hotmon(u8 low, u8 high) +{ + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW)); + writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH)); + writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH), + (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG)); + writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); + + writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb4_transfer.work); + + mutex_unlock(&mb4_transfer.lock); + + return 0; +} + +static int config_hot_period(u16 val) +{ + mutex_lock(&mb4_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(4)) + cpu_relax(); + + writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD)); + writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4)); + + writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb4_transfer.work); + + mutex_unlock(&mb4_transfer.lock); + + return 0; +} + +int prcmu_start_temp_sense(u16 cycles32k) +{ + if (cycles32k == 0xFFFF) + return -EINVAL; + + return config_hot_period(cycles32k); +} + +int prcmu_stop_temp_sense(void) +{ + return config_hot_period(0xFFFF); +} + +/** + * prcmu_set_clock_divider() - Configure the clock divider. + * @clock: The clock for which the request is made. + * @divider: The clock divider. (< 32) + * + * This function should only be used by the clock implementation. + * Do not use it from any other place! + */ +int prcmu_set_clock_divider(u8 clock, u8 divider) +{ + u32 val; + unsigned long flags; + + if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider)) + return -EINVAL; + + spin_lock_irqsave(&clk_mgt_lock, flags); + + /* Grab the HW semaphore. */ + while ((readl(_PRCMU_BASE + PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0) + cpu_relax(); + + val = readl(_PRCMU_BASE + clk_mgt[clock].offset); + val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK); + val |= (u32)divider; + writel(val, (_PRCMU_BASE + clk_mgt[clock].offset)); + + /* Release the HW semaphore. */ + writel(0, (_PRCMU_BASE + PRCM_SEM)); + + spin_unlock_irqrestore(&clk_mgt_lock, flags); + + return 0; +} + +/** + * prcmu_abb_read() - Read register value(s) from the ABB. + * @slave: The I2C slave address. + * @reg: The (start) register address. + * @value: The read out value(s). + * @size: The number of registers to read. + * + * Reads register value(s) from the ABB. + * @size has to be 1 for the current firmware version. + */ +int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + int r; + + if (size != 1) + return -EINVAL; + + mutex_lock(&mb5_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) + cpu_relax(); + + writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); + writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); + writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); + writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); + + writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + if (!wait_for_completion_timeout(&mb5_transfer.work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", + __func__); + r = -EIO; + } else { + r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO); + } + + if (!r) + *value = mb5_transfer.ack.value; + + mutex_unlock(&mb5_transfer.lock); + + return r; +} + +/** + * prcmu_abb_write() - Write register value(s) to the ABB. + * @slave: The I2C slave address. + * @reg: The (start) register address. + * @value: The value(s) to write. + * @size: The number of registers to write. + * + * Reads register value(s) from the ABB. + * @size has to be 1 for the current firmware version. + */ +int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + int r; + + if (size != 1) + return -EINVAL; + + mutex_lock(&mb5_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(5)) + cpu_relax(); + + writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP)); + writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS)); + writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG)); + writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL)); + + writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + if (!wait_for_completion_timeout(&mb5_transfer.work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", + __func__); + r = -EIO; + } else { + r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO); + } + + mutex_unlock(&mb5_transfer.lock); + + return r; +} + +/** + * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem + */ +void prcmu_ac_wake_req(void) +{ + u32 val; + + mutex_lock(&mb0_transfer.ac_wake_lock); + + val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ); + if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ) + goto unlock_and_return; + + atomic_set(&ac_wake_req_state, 1); + + writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), + (_PRCMU_BASE + PRCM_HOSTACCESS_REQ)); + + if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", + __func__); + } + +unlock_and_return: + mutex_unlock(&mb0_transfer.ac_wake_lock); +} + +/** + * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem + */ +void prcmu_ac_sleep_req() +{ + u32 val; + + mutex_lock(&mb0_transfer.ac_wake_lock); + + val = readl(_PRCMU_BASE + PRCM_HOSTACCESS_REQ); + if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)) + goto unlock_and_return; + + writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), + (_PRCMU_BASE + PRCM_HOSTACCESS_REQ)); + + if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work, + msecs_to_jiffies(20000))) { + pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n", + __func__); + } + + atomic_set(&ac_wake_req_state, 0); + +unlock_and_return: + mutex_unlock(&mb0_transfer.ac_wake_lock); +} + +bool prcmu_is_ac_wake_requested(void) +{ + return (atomic_read(&ac_wake_req_state) != 0); +} + +/** + * prcmu_system_reset - System reset + * + * Saves the reset reason code and then sets the APE_SOFRST register which + * fires interrupt to fw + */ +void prcmu_system_reset(u16 reset_code) +{ + writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON)); + writel(1, (_PRCMU_BASE + PRCM_APE_SOFTRST)); +} + +/** + * prcmu_reset_modem - ask the PRCMU to reset modem + */ +void prcmu_modem_reset(void) +{ + mutex_lock(&mb1_transfer.lock); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(1)) + cpu_relax(); + + writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1)); + writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + wait_for_completion(&mb1_transfer.work); + + /* + * No need to check return from PRCMU as modem should go in reset state + * This state is already managed by upper layer + */ + + mutex_unlock(&mb1_transfer.lock); +} + +static void ack_dbb_wakeup(void) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + while (readl(_PRCMU_BASE + PRCM_MBOX_CPU_VAL) & MBOX_BIT(0)) + cpu_relax(); + + writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0)); + writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_MBOX_CPU_SET)); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +static inline void print_unknown_header_warning(u8 n, u8 header) +{ + pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n", + header, n); +} + +static bool read_mailbox_0(void) +{ + bool r; + u32 ev; + unsigned int n; + u8 header; + + header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0); + switch (header) { + case MB0H_WAKEUP_EXE: + case MB0H_WAKEUP_SLEEP: + if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1) + ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500); + else + ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500); + + if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK)) + complete(&mb0_transfer.ac_wake_work); + if (ev & WAKEUP_BIT_SYSCLK_OK) + complete(&mb3_transfer.sysclk_work); + + ev &= mb0_transfer.req.dbb_irqs; + + for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) { + if (ev & prcmu_irq_bit[n]) + generic_handle_irq(IRQ_PRCMU_BASE + n); + } + r = true; + break; + default: + print_unknown_header_warning(0, header); + r = false; + break; + } + writel(MBOX_BIT(0), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + return r; +} + +static bool read_mailbox_1(void) +{ + mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1); + mb1_transfer.ack.arm_opp = readb(tcdm_base + + PRCM_ACK_MB1_CURRENT_ARM_OPP); + mb1_transfer.ack.ape_opp = readb(tcdm_base + + PRCM_ACK_MB1_CURRENT_APE_OPP); + mb1_transfer.ack.ape_voltage_status = readb(tcdm_base + + PRCM_ACK_MB1_APE_VOLTAGE_STATUS); + writel(MBOX_BIT(1), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + complete(&mb1_transfer.work); + return false; +} + +static bool read_mailbox_2(void) +{ + mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS); + writel(MBOX_BIT(2), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + complete(&mb2_transfer.work); + return false; +} + +static bool read_mailbox_3(void) +{ + writel(MBOX_BIT(3), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + return false; +} + +static bool read_mailbox_4(void) +{ + u8 header; + bool do_complete = true; + + header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4); + switch (header) { + case MB4H_MEM_ST: + case MB4H_HOTDOG: + case MB4H_HOTMON: + case MB4H_HOT_PERIOD: + break; + default: + print_unknown_header_warning(4, header); + do_complete = false; + break; + } + + writel(MBOX_BIT(4), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + + if (do_complete) + complete(&mb4_transfer.work); + + return false; +} + +static bool read_mailbox_5(void) +{ + mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS); + mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL); + writel(MBOX_BIT(5), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + complete(&mb5_transfer.work); + return false; +} + +static bool read_mailbox_6(void) +{ + writel(MBOX_BIT(6), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + return false; +} + +static bool read_mailbox_7(void) +{ + writel(MBOX_BIT(7), (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + return false; +} + +static bool (* const read_mailbox[NUM_MB])(void) = { + read_mailbox_0, + read_mailbox_1, + read_mailbox_2, + read_mailbox_3, + read_mailbox_4, + read_mailbox_5, + read_mailbox_6, + read_mailbox_7 +}; + +static irqreturn_t prcmu_irq_handler(int irq, void *data) +{ + u32 bits; + u8 n; + irqreturn_t r; + + bits = (readl(_PRCMU_BASE + PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS); + if (unlikely(!bits)) + return IRQ_NONE; + + r = IRQ_HANDLED; + for (n = 0; bits; n++) { + if (bits & MBOX_BIT(n)) { + bits -= MBOX_BIT(n); + if (read_mailbox[n]()) + r = IRQ_WAKE_THREAD; + } + } + return r; +} + +static irqreturn_t prcmu_irq_thread_fn(int irq, void *data) +{ + ack_dbb_wakeup(); + return IRQ_HANDLED; +} + +static void prcmu_mask_work(struct work_struct *work) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.lock, flags); + + config_wakeups(); + + spin_unlock_irqrestore(&mb0_transfer.lock, flags); +} + +static void prcmu_irq_mask(struct irq_data *d) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); + + mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE]; + + spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); + + if (d->irq != IRQ_PRCMU_CA_SLEEP) + schedule_work(&mb0_transfer.mask_work); +} + +static void prcmu_irq_unmask(struct irq_data *d) +{ + unsigned long flags; + + spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags); + + mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE]; + + spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags); + + if (d->irq != IRQ_PRCMU_CA_SLEEP) + schedule_work(&mb0_transfer.mask_work); +} + +static void noop(struct irq_data *d) +{ +} + +static struct irq_chip prcmu_irq_chip = { + .name = "prcmu", + .irq_disable = prcmu_irq_mask, + .irq_ack = noop, + .irq_mask = prcmu_irq_mask, + .irq_unmask = prcmu_irq_unmask, +}; + +void __init prcmu_early_init(void) +{ + unsigned int i; + + if (cpu_is_u8500v1()) { + tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1); + } else if (cpu_is_u8500v2()) { + void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K); + + if (tcpm_base != NULL) { + int version; + version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET); + prcmu_version.project_number = version & 0xFF; + prcmu_version.api_version = (version >> 8) & 0xFF; + prcmu_version.func_version = (version >> 16) & 0xFF; + prcmu_version.errata = (version >> 24) & 0xFF; + pr_info("PRCMU firmware version %d.%d.%d\n", + (version >> 8) & 0xFF, (version >> 16) & 0xFF, + (version >> 24) & 0xFF); + iounmap(tcpm_base); + } + + tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE); + } else { + pr_err("prcmu: Unsupported chip version\n"); + BUG(); + } + + spin_lock_init(&mb0_transfer.lock); + spin_lock_init(&mb0_transfer.dbb_irqs_lock); + mutex_init(&mb0_transfer.ac_wake_lock); + init_completion(&mb0_transfer.ac_wake_work); + mutex_init(&mb1_transfer.lock); + init_completion(&mb1_transfer.work); + mutex_init(&mb2_transfer.lock); + init_completion(&mb2_transfer.work); + spin_lock_init(&mb2_transfer.auto_pm_lock); + spin_lock_init(&mb3_transfer.lock); + mutex_init(&mb3_transfer.sysclk_lock); + init_completion(&mb3_transfer.sysclk_work); + mutex_init(&mb4_transfer.lock); + init_completion(&mb4_transfer.work); + mutex_init(&mb5_transfer.lock); + init_completion(&mb5_transfer.work); + + INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work); + + /* Initalize irqs. */ + for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) { + unsigned int irq; + + irq = IRQ_PRCMU_BASE + i; + irq_set_chip_and_handler(irq, &prcmu_irq_chip, + handle_simple_irq); + set_irq_flags(irq, IRQF_VALID); + } +} + +/* + * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC + */ +static struct regulator_consumer_supply db8500_vape_consumers[] = { + REGULATOR_SUPPLY("v-ape", NULL), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"), + REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"), + /* "v-mmc" changed to "vcore" in the mainline kernel */ + REGULATOR_SUPPLY("vcore", "sdi0"), + REGULATOR_SUPPLY("vcore", "sdi1"), + REGULATOR_SUPPLY("vcore", "sdi2"), + REGULATOR_SUPPLY("vcore", "sdi3"), + REGULATOR_SUPPLY("vcore", "sdi4"), + REGULATOR_SUPPLY("v-dma", "dma40.0"), + REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"), + /* "v-uart" changed to "vcore" in the mainline kernel */ + REGULATOR_SUPPLY("vcore", "uart0"), + REGULATOR_SUPPLY("vcore", "uart1"), + REGULATOR_SUPPLY("vcore", "uart2"), + REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), +}; + +static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { + /* CG2900 and CW1200 power to off-chip peripherals */ + REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"), + REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"), + REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"), + /* AV8100 regulator */ + REGULATOR_SUPPLY("hdmi_1v8", "0-0070"), +}; + +static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = { + REGULATOR_SUPPLY("vsupply", "b2r2.0"), + REGULATOR_SUPPLY("vsupply", "mcde.0"), +}; + +static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = { + [DB8500_REGULATOR_VAPE] = { + .constraints = { + .name = "db8500-vape", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .consumer_supplies = db8500_vape_consumers, + .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers), + }, + [DB8500_REGULATOR_VARM] = { + .constraints = { + .name = "db8500-varm", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_VMODEM] = { + .constraints = { + .name = "db8500-vmodem", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_VPLL] = { + .constraints = { + .name = "db8500-vpll", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_VSMPS1] = { + .constraints = { + .name = "db8500-vsmps1", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_VSMPS2] = { + .constraints = { + .name = "db8500-vsmps2", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .consumer_supplies = db8500_vsmps2_consumers, + .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers), + }, + [DB8500_REGULATOR_VSMPS3] = { + .constraints = { + .name = "db8500-vsmps3", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_VRF1] = { + .constraints = { + .name = "db8500-vrf1", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_SVAMMDSP] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-sva-mmdsp", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = { + .constraints = { + /* "ret" means "retention" */ + .name = "db8500-sva-mmdsp-ret", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_SVAPIPE] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-sva-pipe", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_SIAMMDSP] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-sia-mmdsp", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = { + .constraints = { + .name = "db8500-sia-mmdsp-ret", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_SIAPIPE] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-sia-pipe", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_SGA] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-sga", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-b2r2-mcde", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + .consumer_supplies = db8500_b2r2_mcde_consumers, + .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers), + }, + [DB8500_REGULATOR_SWITCH_ESRAM12] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-esram12", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_ESRAM12RET] = { + .constraints = { + .name = "db8500-esram12-ret", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_ESRAM34] = { + .supply_regulator = "db8500-vape", + .constraints = { + .name = "db8500-esram34", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, + [DB8500_REGULATOR_SWITCH_ESRAM34RET] = { + .constraints = { + .name = "db8500-esram34-ret", + .valid_ops_mask = REGULATOR_CHANGE_STATUS, + }, + }, +}; + +static struct mfd_cell db8500_prcmu_devs[] = { + { + .name = "db8500-prcmu-regulators", + .mfd_data = &db8500_regulators, + }, + { + .name = "cpufreq-u8500", + }, +}; + +/** + * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic + * + */ +static int __init db8500_prcmu_probe(struct platform_device *pdev) +{ + int err = 0; + + if (ux500_is_svp()) + return -ENODEV; + + /* Clean up the mailbox interrupts after pre-kernel code. */ + writel(ALL_MBOX_BITS, (_PRCMU_BASE + PRCM_ARM_IT1_CLR)); + + err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler, + prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); + if (err < 0) { + pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n"); + err = -EBUSY; + goto no_irq_return; + } + + if (cpu_is_u8500v20_or_later()) + prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); + + err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs, + ARRAY_SIZE(db8500_prcmu_devs), NULL, + 0); + + if (err) + pr_err("prcmu: Failed to add subdevices\n"); + else + pr_info("DB8500 PRCMU initialized\n"); + +no_irq_return: + return err; +} + +static struct platform_driver db8500_prcmu_driver = { + .driver = { + .name = "db8500-prcmu", + .owner = THIS_MODULE, + }, +}; + +static int __init db8500_prcmu_init(void) +{ + return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe); +} + +arch_initcall(db8500_prcmu_init); + +MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>"); +MODULE_DESCRIPTION("DB8500 PRCM Unit driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 61d233a7c11..71da5641e25 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -31,7 +31,11 @@ #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> +#include <linux/delay.h> +#include <linux/capability.h> +#include <linux/compat.h> +#include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> @@ -48,6 +52,13 @@ MODULE_ALIAS("mmc:block"); #endif #define MODULE_PARAM_PREFIX "mmcblk." +#define INAND_CMD38_ARG_EXT_CSD 113 +#define INAND_CMD38_ARG_ERASE 0x00 +#define INAND_CMD38_ARG_TRIM 0x01 +#define INAND_CMD38_ARG_SECERASE 0x80 +#define INAND_CMD38_ARG_SECTRIM1 0x81 +#define INAND_CMD38_ARG_SECTRIM2 0x88 + static DEFINE_MUTEX(block_mutex); /* @@ -64,6 +75,7 @@ static int max_devices; /* 256 minors, so at most 256 separate devices */ static DECLARE_BITMAP(dev_use, 256); +static DECLARE_BITMAP(name_use, 256); /* * There is one mmc_blk_data per slot. @@ -72,9 +84,24 @@ struct mmc_blk_data { spinlock_t lock; struct gendisk *disk; struct mmc_queue queue; + struct list_head part; + + unsigned int flags; +#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ +#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ unsigned int usage; unsigned int read_only; + unsigned int part_type; + unsigned int name_idx; + + /* + * Only set in main mmc_blk_data associated + * with mmc_card with mmc_set_drvdata, and keeps + * track of the current selected device partition. + */ + unsigned int part_curr; + struct device_attribute force_ro; }; static DEFINE_MUTEX(open_lock); @@ -97,17 +124,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) return md; } +static inline int mmc_get_devidx(struct gendisk *disk) +{ + int devmaj = MAJOR(disk_devt(disk)); + int devidx = MINOR(disk_devt(disk)) / perdev_minors; + + if (!devmaj) + devidx = disk->first_minor / perdev_minors; + return devidx; +} + static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage--; if (md->usage == 0) { - int devmaj = MAJOR(disk_devt(md->disk)); - int devidx = MINOR(disk_devt(md->disk)) / perdev_minors; - - if (!devmaj) - devidx = md->disk->first_minor / perdev_minors; - + int devidx = mmc_get_devidx(md->disk); blk_cleanup_queue(md->queue.queue); __clear_bit(devidx, dev_use); @@ -118,6 +150,38 @@ static void mmc_blk_put(struct mmc_blk_data *md) mutex_unlock(&open_lock); } +static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + + ret = snprintf(buf, PAGE_SIZE, "%d", + get_disk_ro(dev_to_disk(dev)) ^ + md->read_only); + mmc_blk_put(md); + return ret; +} + +static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret; + char *end; + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + unsigned long set = simple_strtoul(buf, &end, 0); + if (end == buf) { + ret = -EINVAL; + goto out; + } + + set_disk_ro(dev_to_disk(dev), set || md->read_only); + ret = count; +out: + mmc_blk_put(md); + return ret; +} + static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); @@ -158,35 +222,255 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } +struct mmc_blk_ioc_data { + struct mmc_ioc_cmd ic; + unsigned char *buf; + u64 buf_bytes; +}; + +static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( + struct mmc_ioc_cmd __user *user) +{ + struct mmc_blk_ioc_data *idata; + int err; + + idata = kzalloc(sizeof(*idata), GFP_KERNEL); + if (!idata) { + err = -ENOMEM; + goto out; + } + + if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { + err = -EFAULT; + goto idata_err; + } + + idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; + if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { + err = -EOVERFLOW; + goto idata_err; + } + + idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); + if (!idata->buf) { + err = -ENOMEM; + goto idata_err; + } + + if (copy_from_user(idata->buf, (void __user *)(unsigned long) + idata->ic.data_ptr, idata->buf_bytes)) { + err = -EFAULT; + goto copy_err; + } + + return idata; + +copy_err: + kfree(idata->buf); +idata_err: + kfree(idata); +out: + return ERR_PTR(err); +} + +static int mmc_blk_ioctl_cmd(struct block_device *bdev, + struct mmc_ioc_cmd __user *ic_ptr) +{ + struct mmc_blk_ioc_data *idata; + struct mmc_blk_data *md; + struct mmc_card *card; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; + struct mmc_request mrq = {0}; + struct scatterlist sg; + int err; + + /* + * The caller must have CAP_SYS_RAWIO, and must be calling this on the + * whole block device, not on a partition. This prevents overspray + * between sibling partitions. + */ + if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) + return -EPERM; + + idata = mmc_blk_ioctl_copy_from_user(ic_ptr); + if (IS_ERR(idata)) + return PTR_ERR(idata); + + cmd.opcode = idata->ic.opcode; + cmd.arg = idata->ic.arg; + cmd.flags = idata->ic.flags; + + data.sg = &sg; + data.sg_len = 1; + data.blksz = idata->ic.blksz; + data.blocks = idata->ic.blocks; + + sg_init_one(data.sg, idata->buf, idata->buf_bytes); + + if (idata->ic.write_flag) + data.flags = MMC_DATA_WRITE; + else + data.flags = MMC_DATA_READ; + + mrq.cmd = &cmd; + mrq.data = &data; + + md = mmc_blk_get(bdev->bd_disk); + if (!md) { + err = -EINVAL; + goto cmd_done; + } + + card = md->queue.card; + if (IS_ERR(card)) { + err = PTR_ERR(card); + goto cmd_done; + } + + mmc_claim_host(card->host); + + if (idata->ic.is_acmd) { + err = mmc_app_cmd(card->host, card); + if (err) + goto cmd_rel_host; + } + + /* data.flags must already be set before doing this. */ + mmc_set_data_timeout(&data, card); + /* Allow overriding the timeout_ns for empirical tuning. */ + if (idata->ic.data_timeout_ns) + data.timeout_ns = idata->ic.data_timeout_ns; + + if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { + /* + * Pretend this is a data transfer and rely on the host driver + * to compute timeout. When all host drivers support + * cmd.cmd_timeout for R1B, this can be changed to: + * + * mrq.data = NULL; + * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; + */ + data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; + } + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error) { + dev_err(mmc_dev(card->host), "%s: cmd error %d\n", + __func__, cmd.error); + err = cmd.error; + goto cmd_rel_host; + } + if (data.error) { + dev_err(mmc_dev(card->host), "%s: data error %d\n", + __func__, data.error); + err = data.error; + goto cmd_rel_host; + } + + /* + * According to the SD specs, some commands require a delay after + * issuing the command. + */ + if (idata->ic.postsleep_min_us) + usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); + + if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { + err = -EFAULT; + goto cmd_rel_host; + } + + if (!idata->ic.write_flag) { + if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, + idata->buf, idata->buf_bytes)) { + err = -EFAULT; + goto cmd_rel_host; + } + } + +cmd_rel_host: + mmc_release_host(card->host); + +cmd_done: + mmc_blk_put(md); + kfree(idata->buf); + kfree(idata); + return err; +} + +static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + int ret = -EINVAL; + if (cmd == MMC_IOC_CMD) + ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); + return ret; +} + +#ifdef CONFIG_COMPAT +static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); +} +#endif + static const struct block_device_operations mmc_bdops = { .open = mmc_blk_open, .release = mmc_blk_release, .getgeo = mmc_blk_getgeo, .owner = THIS_MODULE, + .ioctl = mmc_blk_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mmc_blk_compat_ioctl, +#endif }; struct mmc_blk_request { struct mmc_request mrq; + struct mmc_command sbc; struct mmc_command cmd; struct mmc_command stop; struct mmc_data data; }; +static inline int mmc_blk_part_switch(struct mmc_card *card, + struct mmc_blk_data *md) +{ + int ret; + struct mmc_blk_data *main_md = mmc_get_drvdata(card); + if (main_md->part_curr == md->part_type) + return 0; + + if (mmc_card_mmc(card)) { + card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; + card->ext_csd.part_config |= md->part_type; + + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_PART_CONFIG, card->ext_csd.part_config, + card->ext_csd.part_time); + if (ret) + return ret; +} + + main_md->part_curr = md->part_type; + return 0; +} + static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) { int err; u32 result; __be32 *blocks; - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; unsigned int timeout_us; struct scatterlist sg; - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; @@ -203,8 +487,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; - memset(&data, 0, sizeof(struct mmc_data)); - data.timeout_ns = card->csd.tacc_ns * 100; data.timeout_clks = card->csd.tacc_clks * 100; @@ -223,8 +505,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) data.sg = &sg; data.sg_len = 1; - memset(&mrq, 0, sizeof(struct mmc_request)); - mrq.cmd = &cmd; mrq.data = &data; @@ -247,10 +527,9 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) static u32 get_card_status(struct mmc_card *card, struct request *req) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int err; - memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; @@ -269,8 +548,6 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) unsigned int from, nr, arg; int err = 0; - mmc_claim_host(card->host); - if (!mmc_can_erase(card)) { err = -EOPNOTSUPP; goto out; @@ -284,14 +561,22 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) else arg = MMC_ERASE_ARG; + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + arg == MMC_TRIM_ARG ? + INAND_CMD38_ARG_TRIM : + INAND_CMD38_ARG_ERASE, + 0); + if (err) + goto out; + } err = mmc_erase(card, from, nr, arg); out: spin_lock_irq(&md->lock); __blk_end_request(req, err, blk_rq_bytes(req)); spin_unlock_irq(&md->lock); - mmc_release_host(card->host); - return err ? 0 : 1; } @@ -303,8 +588,6 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, unsigned int from, nr, arg; int err = 0; - mmc_claim_host(card->host); - if (!mmc_can_secure_erase_trim(card)) { err = -EOPNOTSUPP; goto out; @@ -318,19 +601,74 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, else arg = MMC_SECURE_ERASE_ARG; + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + arg == MMC_SECURE_TRIM1_ARG ? + INAND_CMD38_ARG_SECTRIM1 : + INAND_CMD38_ARG_SECERASE, + 0); + if (err) + goto out; + } err = mmc_erase(card, from, nr, arg); - if (!err && arg == MMC_SECURE_TRIM1_ARG) + if (!err && arg == MMC_SECURE_TRIM1_ARG) { + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + INAND_CMD38_ARG_SECTRIM2, + 0); + if (err) + goto out; + } err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); + } out: spin_lock_irq(&md->lock); __blk_end_request(req, err, blk_rq_bytes(req)); spin_unlock_irq(&md->lock); - mmc_release_host(card->host); - return err ? 0 : 1; } +static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) +{ + struct mmc_blk_data *md = mq->data; + + /* + * No-op, only service this because we need REQ_FUA for reliable + * writes. + */ + spin_lock_irq(&md->lock); + __blk_end_request_all(req, 0); + spin_unlock_irq(&md->lock); + + return 1; +} + +/* + * Reformat current write as a reliable write, supporting + * both legacy and the enhanced reliable write MMC cards. + * In each transfer we'll handle only as much as a single + * reliable write can handle, thus finish the request in + * partial completions. + */ +static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, + struct mmc_card *card, + struct request *req) +{ + if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { + /* Legacy mode imposes restrictions on transfers. */ + if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) + brq->data.blocks = 1; + + if (brq->data.blocks > card->ext_csd.rel_sectors) + brq->data.blocks = card->ext_csd.rel_sectors; + else if (brq->data.blocks < card->ext_csd.rel_sectors) + brq->data.blocks = 1; + } +} + static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; @@ -338,10 +676,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) struct mmc_blk_request brq; int ret = 1, disable_multi = 0; - mmc_claim_host(card->host); + /* + * Reliable writes are used to implement Forced Unit Access and + * REQ_META accesses, and are supported only on MMCs. + */ + bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || + (req->cmd_flags & REQ_META)) && + (rq_data_dir(req) == WRITE) && + (md->flags & MMC_BLK_REL_WR); do { - struct mmc_command cmd; + struct mmc_command cmd = {0}; u32 readcmd, writecmd, status = 0; memset(&brq, 0, sizeof(struct mmc_blk_request)); @@ -374,12 +719,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) if (disable_multi && brq.data.blocks > 1) brq.data.blocks = 1; - if (brq.data.blocks > 1) { + if (brq.data.blocks > 1 || do_rel_wr) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. */ - if (!mmc_host_is_spi(card->host) - || rq_data_dir(req) == READ) + if (!mmc_host_is_spi(card->host) || + rq_data_dir(req) == READ) brq.mrq.stop = &brq.stop; readcmd = MMC_READ_MULTIPLE_BLOCK; writecmd = MMC_WRITE_MULTIPLE_BLOCK; @@ -396,6 +741,38 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) brq.data.flags |= MMC_DATA_WRITE; } + if (do_rel_wr) + mmc_apply_rel_rw(&brq, card, req); + + /* + * Pre-defined multi-block transfers are preferable to + * open ended-ones (and necessary for reliable writes). + * However, it is not sufficient to just send CMD23, + * and avoid the final CMD12, as on an error condition + * CMD12 (stop) needs to be sent anyway. This, coupled + * with Auto-CMD23 enhancements provided by some + * hosts, means that the complexity of dealing + * with this is best left to the host. If CMD23 is + * supported by card and host, we'll fill sbc in and let + * the host deal with handling it correctly. This means + * that for hosts that don't expose MMC_CAP_CMD23, no + * change of behavior will be observed. + * + * N.B: Some MMC cards experience perf degradation. + * We'll avoid using CMD23-bounded multiblock writes for + * these, while retaining features like reliable writes. + */ + + if ((md->flags & MMC_BLK_CMD23) && + mmc_op_multi(brq.cmd.opcode) && + (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { + brq.sbc.opcode = MMC_SET_BLOCK_COUNT; + brq.sbc.arg = brq.data.blocks | + (do_rel_wr ? (1 << 31) : 0); + brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + brq.mrq.sbc = &brq.sbc; + } + mmc_set_data_timeout(&brq.data, card); brq.data.sg = mq->sg; @@ -431,7 +808,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) * until later as we need to wait for the card to leave * programming mode even when things go wrong. */ - if (brq.cmd.error || brq.data.error || brq.stop.error) { + if (brq.sbc.error || brq.cmd.error || + brq.data.error || brq.stop.error) { if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { /* Redo read one sector at a time */ printk(KERN_WARNING "%s: retrying using single " @@ -442,6 +820,13 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) status = get_card_status(card, req); } + if (brq.sbc.error) { + printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT " + "command, response %#x, card status %#x\n", + req->rq_disk->disk_name, brq.sbc.error, + brq.sbc.resp[0], status); + } + if (brq.cmd.error) { printk(KERN_ERR "%s: error %d sending read/write " "command, response %#x, card status %#x\n", @@ -520,8 +905,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) spin_unlock_irq(&md->lock); } while (ret); - mmc_release_host(card->host); - return 1; cmd_err: @@ -548,8 +931,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) spin_unlock_irq(&md->lock); } - mmc_release_host(card->host); - spin_lock_irq(&md->lock); while (ret) ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); @@ -560,14 +941,31 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { + int ret; + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + + mmc_claim_host(card->host); + ret = mmc_blk_part_switch(card, md); + if (ret) { + ret = 0; + goto out; + } + if (req->cmd_flags & REQ_DISCARD) { if (req->cmd_flags & REQ_SECURE) - return mmc_blk_issue_secdiscard_rq(mq, req); + ret = mmc_blk_issue_secdiscard_rq(mq, req); else - return mmc_blk_issue_discard_rq(mq, req); + ret = mmc_blk_issue_discard_rq(mq, req); + } else if (req->cmd_flags & REQ_FLUSH) { + ret = mmc_blk_issue_flush(mq, req); } else { - return mmc_blk_issue_rw_rq(mq, req); + ret = mmc_blk_issue_rw_rq(mq, req); } + +out: + mmc_release_host(card->host); + return ret; } static inline int mmc_blk_readonly(struct mmc_card *card) @@ -576,7 +974,11 @@ static inline int mmc_blk_readonly(struct mmc_card *card) !(card->csd.cmdclass & CCC_BLOCK_WRITE); } -static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) +static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, + struct device *parent, + sector_t size, + bool default_ro, + const char *subname) { struct mmc_blk_data *md; int devidx, ret; @@ -592,6 +994,19 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) goto out; } + /* + * !subname implies we are creating main mmc_blk_data that will be + * associated with mmc_card with mmc_set_drvdata. Due to device + * partitions, devidx will not coincide with a per-physical card + * index anymore so we keep track of a name index. + */ + if (!subname) { + md->name_idx = find_first_zero_bit(name_use, max_devices); + __set_bit(md->name_idx, name_use); + } + else + md->name_idx = ((struct mmc_blk_data *) + dev_to_disk(parent)->private_data)->name_idx; /* * Set the read-only status based on the supported commands @@ -606,6 +1021,7 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) } spin_lock_init(&md->lock); + INIT_LIST_HEAD(&md->part); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock); @@ -620,8 +1036,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) md->disk->fops = &mmc_bdops; md->disk->private_data = md; md->disk->queue = md->queue.queue; - md->disk->driverfs_dev = &card->dev; - set_disk_ro(md->disk, md->read_only); + md->disk->driverfs_dev = parent; + set_disk_ro(md->disk, md->read_only || default_ro); /* * As discussed on lkml, GENHD_FL_REMOVABLE should: @@ -636,32 +1052,107 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) */ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), - "mmcblk%d", devidx); + "mmcblk%d%s", md->name_idx, subname ? subname : ""); blk_queue_logical_block_size(md->queue.queue, 512); + set_capacity(md->disk, size); + + if (mmc_host_cmd23(card->host)) { + if (mmc_card_mmc(card) || + (mmc_card_sd(card) && + card->scr.cmds & SD_SCR_CMD23_SUPPORT)) + md->flags |= MMC_BLK_CMD23; + } + + if (mmc_card_mmc(card) && + md->flags & MMC_BLK_CMD23 && + ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || + card->ext_csd.rel_sectors)) { + md->flags |= MMC_BLK_REL_WR; + blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); + } + + return md; + + err_putdisk: + put_disk(md->disk); + err_kfree: + kfree(md); + out: + return ERR_PTR(ret); +} + +static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) +{ + sector_t size; + struct mmc_blk_data *md; if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { /* * The EXT_CSD sector count is in number or 512 byte * sectors. */ - set_capacity(md->disk, card->ext_csd.sectors); + size = card->ext_csd.sectors; } else { /* * The CSD capacity field is in units of read_blkbits. * set_capacity takes units of 512 bytes. */ - set_capacity(md->disk, - card->csd.capacity << (card->csd.read_blkbits - 9)); + size = card->csd.capacity << (card->csd.read_blkbits - 9); } + + md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL); return md; +} - err_putdisk: - put_disk(md->disk); - err_kfree: - kfree(md); - out: - return ERR_PTR(ret); +static int mmc_blk_alloc_part(struct mmc_card *card, + struct mmc_blk_data *md, + unsigned int part_type, + sector_t size, + bool default_ro, + const char *subname) +{ + char cap_str[10]; + struct mmc_blk_data *part_md; + + part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, + subname); + if (IS_ERR(part_md)) + return PTR_ERR(part_md); + part_md->part_type = part_type; + list_add(&part_md->part, &md->part); + + string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, + cap_str, sizeof(cap_str)); + printk(KERN_INFO "%s: %s %s partition %u %s\n", + part_md->disk->disk_name, mmc_card_id(card), + mmc_card_name(card), part_md->part_type, cap_str); + return 0; +} + +static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) +{ + int ret = 0; + + if (!mmc_card_mmc(card)) + return 0; + + if (card->ext_csd.boot_size) { + ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0, + card->ext_csd.boot_size >> 9, + true, + "boot0"); + if (ret) + return ret; + ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1, + card->ext_csd.boot_size >> 9, + true, + "boot1"); + if (ret) + return ret; + } + + return ret; } static int @@ -682,9 +1173,81 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) return 0; } +static void mmc_blk_remove_req(struct mmc_blk_data *md) +{ + if (md) { + if (md->disk->flags & GENHD_FL_UP) { + device_remove_file(disk_to_dev(md->disk), &md->force_ro); + + /* Stop new requests from getting into the queue */ + del_gendisk(md->disk); + } + + /* Then flush out any already in there */ + mmc_cleanup_queue(&md->queue); + mmc_blk_put(md); + } +} + +static void mmc_blk_remove_parts(struct mmc_card *card, + struct mmc_blk_data *md) +{ + struct list_head *pos, *q; + struct mmc_blk_data *part_md; + + __clear_bit(md->name_idx, name_use); + list_for_each_safe(pos, q, &md->part) { + part_md = list_entry(pos, struct mmc_blk_data, part); + list_del(pos); + mmc_blk_remove_req(part_md); + } +} + +static int mmc_add_disk(struct mmc_blk_data *md) +{ + int ret; + + add_disk(md->disk); + md->force_ro.show = force_ro_show; + md->force_ro.store = force_ro_store; + sysfs_attr_init(&md->force_ro.attr); + md->force_ro.attr.name = "force_ro"; + md->force_ro.attr.mode = S_IRUGO | S_IWUSR; + ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); + if (ret) + del_gendisk(md->disk); + + return ret; +} + +static const struct mmc_fixup blk_fixups[] = +{ + MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), + MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), + MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), + MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), + MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38), + + /* + * Some MMC cards experience performance degradation with CMD23 + * instead of CMD12-bounded multiblock transfers. For now we'll + * black list what's bad... + * - Certain Toshiba cards. + * + * N.B. This doesn't affect SD cards. + */ + MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_BLK_NO_CMD23), + END_FIXUP +}; + static int mmc_blk_probe(struct mmc_card *card) { - struct mmc_blk_data *md; + struct mmc_blk_data *md, *part_md; int err; char cap_str[10]; @@ -708,14 +1271,24 @@ static int mmc_blk_probe(struct mmc_card *card) md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), cap_str, md->read_only ? "(ro)" : ""); + if (mmc_blk_alloc_parts(card, md)) + goto out; + mmc_set_drvdata(card, md); - add_disk(md->disk); + mmc_fixup_device(card, blk_fixups); + + if (mmc_add_disk(md)) + goto out; + + list_for_each_entry(part_md, &md->part, part) { + if (mmc_add_disk(part_md)) + goto out; + } return 0; out: - mmc_cleanup_queue(&md->queue); - mmc_blk_put(md); - + mmc_blk_remove_parts(card, md); + mmc_blk_remove_req(md); return err; } @@ -723,36 +1296,43 @@ static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = mmc_get_drvdata(card); - if (md) { - /* Stop new requests from getting into the queue */ - del_gendisk(md->disk); - - /* Then flush out any already in there */ - mmc_cleanup_queue(&md->queue); - - mmc_blk_put(md); - } + mmc_blk_remove_parts(card, md); + mmc_blk_remove_req(md); mmc_set_drvdata(card, NULL); } #ifdef CONFIG_PM static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) { + struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { mmc_queue_suspend(&md->queue); + list_for_each_entry(part_md, &md->part, part) { + mmc_queue_suspend(&part_md->queue); + } } return 0; } static int mmc_blk_resume(struct mmc_card *card) { + struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { mmc_blk_set_blksize(md, card); + + /* + * Resume involves the card going into idle state, + * so current partition is always the main one. + */ + md->part_curr = md->part_type; mmc_queue_resume(&md->queue); + list_for_each_entry(part_md, &md->part, part) { + mmc_queue_resume(&part_md->queue); + } } return 0; } diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index abc1a63bcc5..233cdfae92f 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -212,7 +212,7 @@ static int mmc_test_busy(struct mmc_command *cmd) static int mmc_test_wait_busy(struct mmc_test_card *test) { int ret, busy; - struct mmc_command cmd; + struct mmc_command cmd = {0}; busy = 0; do { @@ -246,18 +246,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test, { int ret; - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_command stop; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_command stop = {0}; + struct mmc_data data = {0}; struct scatterlist sg; - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - memset(&stop, 0, sizeof(struct mmc_command)); - mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; @@ -731,15 +726,10 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) { - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_command stop; - struct mmc_data data; - - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - memset(&stop, 0, sizeof(struct mmc_command)); + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_command stop = {0}; + struct mmc_data data = {0}; mrq.cmd = &cmd; mrq.data = &data; @@ -761,18 +751,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test, static int mmc_test_broken_transfer(struct mmc_test_card *test, unsigned blocks, unsigned blksz, int write) { - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_command stop; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_command stop = {0}; + struct mmc_data data = {0}; struct scatterlist sg; - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - memset(&stop, 0, sizeof(struct mmc_command)); - mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; @@ -1401,8 +1386,9 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, */ static int mmc_test_area_fill(struct mmc_test_card *test) { - return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, - 1, 0, 0); + struct mmc_test_area *t = &test->area; + + return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); } /* @@ -1415,7 +1401,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test) if (!mmc_can_erase(test->card)) return 0; - return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, + return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, MMC_ERASE_ARG); } @@ -1542,8 +1528,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test) static int mmc_test_best_performance(struct mmc_test_card *test, int write, int max_scatter) { - return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, - write, max_scatter, 1); + struct mmc_test_area *t = &test->area; + + return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, + max_scatter, 1); } /* @@ -1583,18 +1571,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) */ static int mmc_test_profile_read_perf(struct mmc_test_card *test) { + struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr; int ret; - for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { - dev_addr = test->area.dev_addr + (sz >> 9); + for (sz = 512; sz < t->max_tfr; sz <<= 1) { + dev_addr = t->dev_addr + (sz >> 9); ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); if (ret) return ret; } - sz = test->area.max_tfr; - dev_addr = test->area.dev_addr; + sz = t->max_tfr; + dev_addr = t->dev_addr; return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); } @@ -1603,6 +1592,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test) */ static int mmc_test_profile_write_perf(struct mmc_test_card *test) { + struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr; int ret; @@ -1610,8 +1600,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test) ret = mmc_test_area_erase(test); if (ret) return ret; - for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { - dev_addr = test->area.dev_addr + (sz >> 9); + for (sz = 512; sz < t->max_tfr; sz <<= 1) { + dev_addr = t->dev_addr + (sz >> 9); ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); if (ret) return ret; @@ -1619,8 +1609,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test) ret = mmc_test_area_erase(test); if (ret) return ret; - sz = test->area.max_tfr; - dev_addr = test->area.dev_addr; + sz = t->max_tfr; + dev_addr = t->dev_addr; return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); } @@ -1629,6 +1619,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test) */ static int mmc_test_profile_trim_perf(struct mmc_test_card *test) { + struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr; struct timespec ts1, ts2; @@ -1640,8 +1631,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test) if (!mmc_can_erase(test->card)) return RESULT_UNSUP_HOST; - for (sz = 512; sz < test->area.max_sz; sz <<= 1) { - dev_addr = test->area.dev_addr + (sz >> 9); + for (sz = 512; sz < t->max_sz; sz <<= 1) { + dev_addr = t->dev_addr + (sz >> 9); getnstimeofday(&ts1); ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); if (ret) @@ -1649,7 +1640,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test) getnstimeofday(&ts2); mmc_test_print_rate(test, sz, &ts1, &ts2); } - dev_addr = test->area.dev_addr; + dev_addr = t->dev_addr; getnstimeofday(&ts1); ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); if (ret) @@ -1661,12 +1652,13 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test) static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) { + struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt; struct timespec ts1, ts2; int ret; - cnt = test->area.max_sz / sz; - dev_addr = test->area.dev_addr; + cnt = t->max_sz / sz; + dev_addr = t->dev_addr; getnstimeofday(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); @@ -1684,20 +1676,22 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) */ static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) { + struct mmc_test_area *t = &test->area; unsigned long sz; int ret; - for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { + for (sz = 512; sz < t->max_tfr; sz <<= 1) { ret = mmc_test_seq_read_perf(test, sz); if (ret) return ret; } - sz = test->area.max_tfr; + sz = t->max_tfr; return mmc_test_seq_read_perf(test, sz); } static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) { + struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt; struct timespec ts1, ts2; int ret; @@ -1705,8 +1699,8 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) ret = mmc_test_area_erase(test); if (ret) return ret; - cnt = test->area.max_sz / sz; - dev_addr = test->area.dev_addr; + cnt = t->max_sz / sz; + dev_addr = t->dev_addr; getnstimeofday(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); @@ -1724,15 +1718,16 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) */ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) { + struct mmc_test_area *t = &test->area; unsigned long sz; int ret; - for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { + for (sz = 512; sz < t->max_tfr; sz <<= 1) { ret = mmc_test_seq_write_perf(test, sz); if (ret) return ret; } - sz = test->area.max_tfr; + sz = t->max_tfr; return mmc_test_seq_write_perf(test, sz); } @@ -1741,6 +1736,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) */ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) { + struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr, i, cnt; struct timespec ts1, ts2; @@ -1752,15 +1748,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) if (!mmc_can_erase(test->card)) return RESULT_UNSUP_HOST; - for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { + for (sz = 512; sz <= t->max_sz; sz <<= 1) { ret = mmc_test_area_erase(test); if (ret) return ret; ret = mmc_test_area_fill(test); if (ret) return ret; - cnt = test->area.max_sz / sz; - dev_addr = test->area.dev_addr; + cnt = t->max_sz / sz; + dev_addr = t->dev_addr; getnstimeofday(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_erase(test->card, dev_addr, sz >> 9, @@ -1823,11 +1819,12 @@ static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, static int mmc_test_random_perf(struct mmc_test_card *test, int write) { + struct mmc_test_area *t = &test->area; unsigned int next; unsigned long sz; int ret; - for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { + for (sz = 512; sz < t->max_tfr; sz <<= 1) { /* * When writing, try to get more consistent results by running * the test twice with exactly the same I/O but outputting the @@ -1844,7 +1841,7 @@ static int mmc_test_random_perf(struct mmc_test_card *test, int write) if (ret) return ret; } - sz = test->area.max_tfr; + sz = t->max_tfr; if (write) { next = rnd_next; ret = mmc_test_rnd_perf(test, write, 0, sz); @@ -1874,17 +1871,18 @@ static int mmc_test_random_write_perf(struct mmc_test_card *test) static int mmc_test_seq_perf(struct mmc_test_card *test, int write, unsigned int tot_sz, int max_scatter) { + struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt, sz, ssz; struct timespec ts1, ts2; int ret; - sz = test->area.max_tfr; + sz = t->max_tfr; + /* * In the case of a maximally scattered transfer, the maximum transfer * size is further limited by using PAGE_SIZE segments. */ if (max_scatter) { - struct mmc_test_area *t = &test->area; unsigned long max_tfr; if (t->max_seg_sz >= PAGE_SIZE) diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 2ae727568df..c07322c2658 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -343,18 +343,14 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq) */ void mmc_queue_bounce_pre(struct mmc_queue *mq) { - unsigned long flags; - if (!mq->bounce_buf) return; if (rq_data_dir(mq->req) != WRITE) return; - local_irq_save(flags); sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, mq->bounce_buf, mq->sg[0].length); - local_irq_restore(flags); } /* @@ -363,17 +359,13 @@ void mmc_queue_bounce_pre(struct mmc_queue *mq) */ void mmc_queue_bounce_post(struct mmc_queue *mq) { - unsigned long flags; - if (!mq->bounce_buf) return; if (rq_data_dir(mq->req) != READ) return; - local_irq_save(flags); sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, mq->bounce_buf, mq->sg[0].length); - local_irq_restore(flags); } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index d6d62fd07ee..393d817ed04 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -274,8 +274,12 @@ int mmc_add_card(struct mmc_card *card) break; case MMC_TYPE_SD: type = "SD"; - if (mmc_card_blockaddr(card)) - type = "SDHC"; + if (mmc_card_blockaddr(card)) { + if (mmc_card_ext_capacity(card)) + type = "SDXC"; + else + type = "SDHC"; + } break; case MMC_TYPE_SDIO: type = "SDIO"; @@ -299,7 +303,8 @@ int mmc_add_card(struct mmc_card *card) } else { printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", mmc_hostname(card->host), - mmc_card_highspeed(card) ? "high speed " : "", + mmc_sd_card_uhs(card) ? "ultra high speed " : + (mmc_card_highspeed(card) ? "high speed " : ""), mmc_card_ddr_mode(card) ? "DDR " : "", type, card->rca); } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 1f453acc868..68091dda3f3 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -236,12 +236,10 @@ EXPORT_SYMBOL(mmc_wait_for_req); */ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries) { - struct mmc_request mrq; + struct mmc_request mrq = {0}; WARN_ON(!host->claimed); - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(cmd->resp, 0, sizeof(cmd->resp)); cmd->retries = retries; @@ -720,22 +718,12 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) } /* - * Change data bus width and DDR mode of a host. - */ -void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, - unsigned int ddr) -{ - host->ios.bus_width = width; - host->ios.ddr = ddr; - mmc_set_ios(host); -} - -/* * Change data bus width of a host. */ void mmc_set_bus_width(struct mmc_host *host, unsigned int width) { - mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); + host->ios.bus_width = width; + mmc_set_ios(host); } /** @@ -944,6 +932,38 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) return ocr; } +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) +{ + struct mmc_command cmd = {0}; + int err = 0; + + BUG_ON(!host); + + /* + * Send CMD11 only if the request is to switch the card to + * 1.8V signalling. + */ + if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { + cmd.opcode = SD_SWITCH_VOLTAGE; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err) + return err; + + if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) + return -EIO; + } + + host->ios.signal_voltage = signal_voltage; + + if (host->ops->start_signal_voltage_switch) + err = host->ops->start_signal_voltage_switch(host, &host->ios); + + return err; +} + /* * Select timing parameters for host. */ @@ -954,6 +974,15 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing) } /* + * Select appropriate driver type for host. + */ +void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) +{ + host->ios.drv_type = drv_type; + mmc_set_ios(host); +} + +/* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. * We then wait a bit for the power to stabilise. Finally, @@ -1187,9 +1216,8 @@ void mmc_init_erase(struct mmc_card *card) } } -static void mmc_set_mmc_erase_timeout(struct mmc_card *card, - struct mmc_command *cmd, - unsigned int arg, unsigned int qty) +static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, + unsigned int arg, unsigned int qty) { unsigned int erase_timeout; @@ -1246,44 +1274,48 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card, if (mmc_host_is_spi(card->host) && erase_timeout < 1000) erase_timeout = 1000; - cmd->erase_timeout = erase_timeout; + return erase_timeout; } -static void mmc_set_sd_erase_timeout(struct mmc_card *card, - struct mmc_command *cmd, unsigned int arg, - unsigned int qty) +static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, + unsigned int arg, + unsigned int qty) { + unsigned int erase_timeout; + if (card->ssr.erase_timeout) { /* Erase timeout specified in SD Status Register (SSR) */ - cmd->erase_timeout = card->ssr.erase_timeout * qty + - card->ssr.erase_offset; + erase_timeout = card->ssr.erase_timeout * qty + + card->ssr.erase_offset; } else { /* * Erase timeout not specified in SD Status Register (SSR) so * use 250ms per write block. */ - cmd->erase_timeout = 250 * qty; + erase_timeout = 250 * qty; } /* Must not be less than 1 second */ - if (cmd->erase_timeout < 1000) - cmd->erase_timeout = 1000; + if (erase_timeout < 1000) + erase_timeout = 1000; + + return erase_timeout; } -static void mmc_set_erase_timeout(struct mmc_card *card, - struct mmc_command *cmd, unsigned int arg, - unsigned int qty) +static unsigned int mmc_erase_timeout(struct mmc_card *card, + unsigned int arg, + unsigned int qty) { if (mmc_card_sd(card)) - mmc_set_sd_erase_timeout(card, cmd, arg, qty); + return mmc_sd_erase_timeout(card, arg, qty); else - mmc_set_mmc_erase_timeout(card, cmd, arg, qty); + return mmc_mmc_erase_timeout(card, arg, qty); } static int mmc_do_erase(struct mmc_card *card, unsigned int from, unsigned int to, unsigned int arg) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; unsigned int qty = 0; int err; @@ -1317,7 +1349,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, to <<= 9; } - memset(&cmd, 0, sizeof(struct mmc_command)); if (mmc_card_sd(card)) cmd.opcode = SD_ERASE_WR_BLK_START; else @@ -1351,7 +1382,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, cmd.opcode = MMC_ERASE; cmd.arg = arg; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - mmc_set_erase_timeout(card, &cmd, arg, qty); + cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", @@ -1487,12 +1518,11 @@ EXPORT_SYMBOL(mmc_erase_group_aligned); int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) return 0; - memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SET_BLOCKLEN; cmd.arg = blocklen; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; @@ -1578,7 +1608,7 @@ void mmc_rescan(struct work_struct *work) for (i = 0; i < ARRAY_SIZE(freqs); i++) { if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) break; - if (freqs[i] < host->f_min) + if (freqs[i] <= host->f_min) break; } mmc_release_host(host); @@ -1746,7 +1776,7 @@ int mmc_suspend_host(struct mmc_host *host) } mmc_bus_put(host); - if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) + if (!err && !mmc_card_keep_power(host)) mmc_power_off(host); return err; @@ -1764,7 +1794,7 @@ int mmc_resume_host(struct mmc_host *host) mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { - if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { + if (!mmc_card_keep_power(host)) { mmc_power_up(host); mmc_select_voltage(host, host->ocr); /* @@ -1789,6 +1819,7 @@ int mmc_resume_host(struct mmc_host *host) err = 0; } } + host->pm_flags &= ~MMC_PM_KEEP_POWER; mmc_bus_put(host); return err; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 20b1c0831ea..d9411ed2a39 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -38,10 +38,11 @@ void mmc_ungate_clock(struct mmc_host *host); void mmc_set_ungated(struct mmc_host *host); void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_width(struct mmc_host *host, unsigned int width); -void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, - unsigned int ddr); u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, + bool cmd11); void mmc_set_timing(struct mmc_host *host, unsigned int timing); +void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); static inline void mmc_delay(unsigned int ms) { @@ -61,8 +62,6 @@ int mmc_attach_mmc(struct mmc_host *host); int mmc_attach_sd(struct mmc_host *host); int mmc_attach_sdio(struct mmc_host *host); -void mmc_fixup_device(struct mmc_card *card); - /* Module parameters */ extern int use_spi_crc; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 461e6a17fb9..b29d3e8fd3a 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -325,12 +325,12 @@ int mmc_add_host(struct mmc_host *host) WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && !host->ops->enable_sdio_irq); - led_trigger_register_simple(dev_name(&host->class_dev), &host->led); - err = device_add(&host->class_dev); if (err) return err; + led_trigger_register_simple(dev_name(&host->class_dev), &host->led); + #ifdef CONFIG_DEBUG_FS mmc_add_host_debugfs(host); #endif diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 772d0d0a541..2a7e43bc796 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -20,6 +20,7 @@ #include "core.h" #include "bus.h" #include "mmc_ops.h" +#include "sd_ops.h" static const unsigned int tran_exp[] = { 10000, 100000, 1000000, 10000000, @@ -173,14 +174,17 @@ static int mmc_decode_csd(struct mmc_card *card) } /* - * Read and decode extended CSD. + * Read extended CSD. */ -static int mmc_read_ext_csd(struct mmc_card *card) +static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) { int err; u8 *ext_csd; BUG_ON(!card); + BUG_ON(!new_ext_csd); + + *new_ext_csd = NULL; if (card->csd.mmca_vsn < CSD_SPEC_VER_4) return 0; @@ -198,12 +202,15 @@ static int mmc_read_ext_csd(struct mmc_card *card) err = mmc_send_ext_csd(card, ext_csd); if (err) { + kfree(ext_csd); + *new_ext_csd = NULL; + /* If the host or the card can't do the switch, * fail more gracefully. */ if ((err != -EINVAL) && (err != -ENOSYS) && (err != -EFAULT)) - goto out; + return err; /* * High capacity cards should have this "magic" size @@ -221,9 +228,23 @@ static int mmc_read_ext_csd(struct mmc_card *card) mmc_hostname(card->host)); err = 0; } + } else + *new_ext_csd = ext_csd; - goto out; - } + return err; +} + +/* + * Decode extended CSD. + */ +static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) +{ + int err = 0; + + BUG_ON(!card); + + if (!ext_csd) + return 0; /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ if (card->csd.structure == 3) { @@ -288,6 +309,10 @@ static int mmc_read_ext_csd(struct mmc_card *card) if (card->ext_csd.rev >= 3) { u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; + card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; + + /* EXT_CSD value is in units of 10ms, but we store in ms */ + card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME]; /* Sleep / awake timeout in 100ns units */ if (sa_shift > 0 && sa_shift <= 0x17) @@ -299,6 +324,14 @@ static int mmc_read_ext_csd(struct mmc_card *card) ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; card->ext_csd.hc_erase_size = ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; + + card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; + + /* + * There are two boot regions of equal size, defined in + * multiples of 128K. + */ + card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; } if (card->ext_csd.rev >= 4) { @@ -350,14 +383,78 @@ static int mmc_read_ext_csd(struct mmc_card *card) ext_csd[EXT_CSD_TRIM_MULT]; } + if (card->ext_csd.rev >= 5) + card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; + if (ext_csd[EXT_CSD_ERASED_MEM_CONT]) card->erased_byte = 0xFF; else card->erased_byte = 0x0; out: + return err; +} + +static inline void mmc_free_ext_csd(u8 *ext_csd) +{ kfree(ext_csd); +} + + +static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd, + unsigned bus_width) +{ + u8 *bw_ext_csd; + int err; + + err = mmc_get_ext_csd(card, &bw_ext_csd); + if (err) + return err; + + if ((ext_csd == NULL || bw_ext_csd == NULL)) { + if (bus_width != MMC_BUS_WIDTH_1) + err = -EINVAL; + goto out; + } + if (bus_width == MMC_BUS_WIDTH_1) + goto out; + + /* only compare read only fields */ + err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] == + bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && + (ext_csd[EXT_CSD_ERASED_MEM_CONT] == + bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && + (ext_csd[EXT_CSD_REV] == + bw_ext_csd[EXT_CSD_REV]) && + (ext_csd[EXT_CSD_STRUCTURE] == + bw_ext_csd[EXT_CSD_STRUCTURE]) && + (ext_csd[EXT_CSD_CARD_TYPE] == + bw_ext_csd[EXT_CSD_CARD_TYPE]) && + (ext_csd[EXT_CSD_S_A_TIMEOUT] == + bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && + (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] == + bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && + (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] == + bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && + (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] == + bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && + (ext_csd[EXT_CSD_SEC_TRIM_MULT] == + bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && + (ext_csd[EXT_CSD_SEC_ERASE_MULT] == + bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && + (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] == + bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && + (ext_csd[EXT_CSD_TRIM_MULT] == + bw_ext_csd[EXT_CSD_TRIM_MULT]) && + memcmp(&ext_csd[EXT_CSD_SEC_CNT], + &bw_ext_csd[EXT_CSD_SEC_CNT], + 4) != 0); + if (err) + err = -EINVAL; + +out: + mmc_free_ext_csd(bw_ext_csd); return err; } @@ -422,6 +519,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, u32 cid[4]; unsigned int max_dtr; u32 rocr; + u8 *ext_csd = NULL; BUG_ON(!host); WARN_ON(!host->claimed); @@ -520,7 +618,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, /* * Fetch and process extended CSD. */ - err = mmc_read_ext_csd(card); + + err = mmc_get_ext_csd(card, &ext_csd); + if (err) + goto free_card; + err = mmc_read_ext_csd(card, ext_csd); if (err) goto free_card; @@ -542,7 +644,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, */ if (card->ext_csd.enhanced_area_en) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_ERASE_GROUP_DEF, 1); + EXT_CSD_ERASE_GROUP_DEF, 1, 0); if (err && err != -EBADMSG) goto free_card; @@ -568,12 +670,24 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } /* + * Ensure eMMC user default partition is enabled + */ + if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { + card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, + card->ext_csd.part_config, + card->ext_csd.part_time); + if (err && err != -EBADMSG) + goto free_card; + } + + /* * Activate high speed (if supported) */ if ((card->ext_csd.hs_max_dtr != 0) && (host->caps & MMC_CAP_MMC_HIGHSPEED)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, 1); + EXT_CSD_HS_TIMING, 1, 0); if (err && err != -EBADMSG) goto free_card; @@ -606,10 +720,14 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, */ if (mmc_card_highspeed(card)) { if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) - && (host->caps & (MMC_CAP_1_8V_DDR))) + && ((host->caps & (MMC_CAP_1_8V_DDR | + MMC_CAP_UHS_DDR50)) + == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_8V_DDR_MODE; else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) - && (host->caps & (MMC_CAP_1_2V_DDR))) + && ((host->caps & (MMC_CAP_1_2V_DDR | + MMC_CAP_UHS_DDR50)) + == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) ddr = MMC_1_2V_DDR_MODE; } @@ -640,18 +758,22 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, ddr = 0; /* no DDR for 1-bit width */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH, - ext_csd_bits[idx][0]); + ext_csd_bits[idx][0], + 0); if (!err) { - mmc_set_bus_width_ddr(card->host, - bus_width, MMC_SDR_MODE); + mmc_set_bus_width(card->host, bus_width); + /* * If controller can't handle bus width test, - * use the highest bus width to maintain - * compatibility with previous MMC behavior. + * compare ext_csd previously read in 1 bit mode + * against ext_csd at new bus width */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) - break; - err = mmc_bus_test(card, bus_width); + err = mmc_compare_ext_csds(card, + ext_csd, + bus_width); + else + err = mmc_bus_test(card, bus_width); if (!err) break; } @@ -659,8 +781,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (!err && ddr) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BUS_WIDTH, - ext_csd_bits[idx][1]); + EXT_CSD_BUS_WIDTH, + ext_csd_bits[idx][1], + 0); } if (err) { printk(KERN_WARNING "%s: switch to bus width %d ddr %d " @@ -668,20 +791,43 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, 1 << bus_width, ddr); goto free_card; } else if (ddr) { + /* + * eMMC cards can support 3.3V to 1.2V i/o (vccq) + * signaling. + * + * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. + * + * 1.8V vccq at 3.3V core voltage (vcc) is not required + * in the JEDEC spec for DDR. + * + * Do not force change in vccq since we are obviously + * working and no change to vccq is needed. + * + * WARNING: eMMC rules are NOT the same as SD DDR + */ + if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) { + err = mmc_set_signal_voltage(host, + MMC_SIGNAL_VOLTAGE_120, 0); + if (err) + goto err; + } mmc_card_set_ddr_mode(card); - mmc_set_bus_width_ddr(card->host, bus_width, ddr); + mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); + mmc_set_bus_width(card->host, bus_width); } } if (!oldcard) host->card = card; + mmc_free_ext_csd(ext_csd); return 0; free_card: if (!oldcard) mmc_remove_card(card); err: + mmc_free_ext_csd(ext_csd); return err; } diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index f3b22bf89cc..845ce7c533b 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -23,12 +23,10 @@ static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SELECT_CARD; if (card) { @@ -60,15 +58,13 @@ int mmc_deselect_cards(struct mmc_host *host) int mmc_card_sleepawake(struct mmc_host *host, int sleep) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; struct mmc_card *card = host->card; int err; if (sleep) mmc_deselect_cards(host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SLEEP_AWAKE; cmd.arg = card->rca << 16; if (sleep) @@ -97,7 +93,7 @@ int mmc_card_sleepawake(struct mmc_host *host, int sleep) int mmc_go_idle(struct mmc_host *host) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; /* * Non-SPI hosts need to prevent chipselect going active during @@ -113,8 +109,6 @@ int mmc_go_idle(struct mmc_host *host) mmc_delay(1); } - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_GO_IDLE_STATE; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; @@ -135,13 +129,11 @@ int mmc_go_idle(struct mmc_host *host) int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int i, err = 0; BUG_ON(!host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SEND_OP_COND; cmd.arg = mmc_host_is_spi(host) ? 0 : ocr; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; @@ -178,13 +170,11 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) int mmc_all_send_cid(struct mmc_host *host, u32 *cid) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!host); BUG_ON(!cid); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_ALL_SEND_CID; cmd.arg = 0; cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; @@ -201,13 +191,11 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid) int mmc_set_relative_addr(struct mmc_card *card) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!card); BUG_ON(!card->host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SET_RELATIVE_ADDR; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; @@ -223,13 +211,11 @@ static int mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!host); BUG_ON(!cxd); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = opcode; cmd.arg = arg; cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; @@ -247,9 +233,9 @@ static int mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, void *buf, unsigned len) { - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; struct scatterlist sg; void *data_buf; @@ -260,10 +246,6 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, if (data_buf == NULL) return -ENOMEM; - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - mrq.cmd = &cmd; mrq.data = &data; @@ -355,11 +337,9 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int err; - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SPI_READ_OCR; cmd.arg = highcap ? (1 << 30) : 0; cmd.flags = MMC_RSP_SPI_R3; @@ -372,11 +352,9 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) int mmc_spi_set_crc(struct mmc_host *host, int use_crc) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int err; - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SPI_CRC_ON_OFF; cmd.flags = MMC_RSP_SPI_R1; cmd.arg = use_crc; @@ -387,23 +365,34 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) return err; } -int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) +/** + * mmc_switch - modify EXT_CSD register + * @card: the MMC card associated with the data transfer + * @set: cmd set values + * @index: EXT_CSD register index + * @value: value to program into EXT_CSD register + * @timeout_ms: timeout (ms) for operation performed by register write, + * timeout of zero implies maximum possible timeout + * + * Modifies the EXT_CSD register for selected card. + */ +int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, + unsigned int timeout_ms) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; u32 status; BUG_ON(!card); BUG_ON(!card->host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SWITCH; cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (index << 16) | (value << 8) | set; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + cmd.cmd_timeout_ms = timeout_ms; err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); if (err) @@ -433,17 +422,16 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) return 0; } +EXPORT_SYMBOL_GPL(mmc_switch); int mmc_send_status(struct mmc_card *card, u32 *status) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!card); BUG_ON(!card->host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; @@ -466,9 +454,9 @@ static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, u8 len) { - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; struct scatterlist sg; u8 *data_buf; u8 *test_buf; @@ -497,10 +485,6 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, if (opcode == MMC_BUS_TEST_W) memcpy(data_buf, test_buf, len); - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - mrq.cmd = &cmd; mrq.data = &data; cmd.opcode = opcode; diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index e6d44b8a18d..9276946fa5b 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -20,7 +20,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid); int mmc_set_relative_addr(struct mmc_card *card); int mmc_send_csd(struct mmc_card *card, u32 *csd); int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); -int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value); int mmc_send_status(struct mmc_card *card, u32 *status); int mmc_send_cid(struct mmc_host *host, u32 *cid); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c index 11118b74eb2..3a596217029 100644 --- a/drivers/mmc/core/quirks.c +++ b/drivers/mmc/core/quirks.c @@ -1,7 +1,8 @@ /* - * This file contains work-arounds for many known sdio hardware - * bugs. + * This file contains work-arounds for many known SD/MMC + * and SDIO hardware bugs. * + * Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com> * Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com> * Inspired from pci fixup code: * Copyright (c) 1999 Martin Mares <mj@ucw.cz> @@ -11,34 +12,14 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mmc/card.h> -#include <linux/mod_devicetable.h> -/* - * The world is not perfect and supplies us with broken mmc/sdio devices. - * For at least a part of these bugs we need a work-around - */ - -struct mmc_fixup { - u16 vendor, device; /* You can use SDIO_ANY_ID here of course */ - void (*vendor_fixup)(struct mmc_card *card, int data); - int data; -}; - -/* - * This hook just adds a quirk unconditionnally - */ -static void __maybe_unused add_quirk(struct mmc_card *card, int data) -{ - card->quirks |= data; -} +#ifndef SDIO_VENDOR_ID_TI +#define SDIO_VENDOR_ID_TI 0x0097 +#endif -/* - * This hook just removes a quirk unconditionnally - */ -static void __maybe_unused remove_quirk(struct mmc_card *card, int data) -{ - card->quirks &= ~data; -} +#ifndef SDIO_DEVICE_ID_TI_WL1271 +#define SDIO_DEVICE_ID_TI_WL1271 0x4076 +#endif /* * This hook just adds a quirk for all sdio devices @@ -49,33 +30,47 @@ static void add_quirk_for_sdio_devices(struct mmc_card *card, int data) card->quirks |= data; } -#ifndef SDIO_VENDOR_ID_TI -#define SDIO_VENDOR_ID_TI 0x0097 -#endif - -#ifndef SDIO_DEVICE_ID_TI_WL1271 -#define SDIO_DEVICE_ID_TI_WL1271 0x4076 -#endif - static const struct mmc_fixup mmc_fixup_methods[] = { /* by default sdio devices are considered CLK_GATING broken */ /* good cards will be whitelisted as they are tested */ - { SDIO_ANY_ID, SDIO_ANY_ID, - add_quirk_for_sdio_devices, MMC_QUIRK_BROKEN_CLK_GATING }, - { SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, - remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING }, - { 0 } + SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID, + add_quirk_for_sdio_devices, + MMC_QUIRK_BROKEN_CLK_GATING), + + SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, + remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING), + + SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, + add_quirk, MMC_QUIRK_NONSTD_FUNC_IF), + + SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, + add_quirk, MMC_QUIRK_DISABLE_CD), + + END_FIXUP }; -void mmc_fixup_device(struct mmc_card *card) +void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table) { const struct mmc_fixup *f; + u64 rev = cid_rev_card(card); + + /* Non-core specific workarounds. */ + if (!table) + table = mmc_fixup_methods; - for (f = mmc_fixup_methods; f->vendor_fixup; f++) { - if ((f->vendor == card->cis.vendor - || f->vendor == (u16) SDIO_ANY_ID) && - (f->device == card->cis.device - || f->device == (u16) SDIO_ANY_ID)) { + for (f = table; f->vendor_fixup; f++) { + if ((f->manfid == CID_MANFID_ANY || + f->manfid == card->cid.manfid) && + (f->oemid == CID_OEMID_ANY || + f->oemid == card->cid.oemid) && + (f->name == CID_NAME_ANY || + !strncmp(f->name, card->cid.prod_name, + sizeof(card->cid.prod_name))) && + (f->cis_vendor == card->cis.vendor || + f->cis_vendor == (u16) SDIO_ANY_ID) && + (f->cis_device == card->cis.device || + f->cis_device == (u16) SDIO_ANY_ID) && + rev >= f->rev_start && rev <= f->rev_end) { dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup); f->vendor_fixup(card, f->data); } diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 6dac89fe053..ff2774128aa 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -130,7 +130,7 @@ static int mmc_decode_csd(struct mmc_card *card) break; case 1: /* - * This is a block-addressed SDHC card. Most + * This is a block-addressed SDHC or SDXC card. Most * interesting fields are unused and have fixed * values. To avoid getting tripped by buggy cards, * we assume those fixed values ourselves. @@ -144,6 +144,11 @@ static int mmc_decode_csd(struct mmc_card *card) e = UNSTUFF_BITS(resp, 96, 3); csd->max_dtr = tran_exp[e] * tran_mant[m]; csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); + csd->c_size = UNSTUFF_BITS(resp, 48, 22); + + /* SDXC cards have a minimum C_SIZE of 0x00FFFF */ + if (csd->c_size >= 0xFFFF) + mmc_card_set_ext_capacity(card); m = UNSTUFF_BITS(resp, 48, 22); csd->capacity = (1 + m) << 10; @@ -189,12 +194,17 @@ static int mmc_decode_scr(struct mmc_card *card) scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4); scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); + if (scr->sda_vsn == SCR_SPEC_VER_2) + /* Check if Physical Layer Spec v3.0 is supported */ + scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1); if (UNSTUFF_BITS(resp, 55, 1)) card->erased_byte = 0xFF; else card->erased_byte = 0x0; + if (scr->sda_spec3) + scr->cmds = UNSTUFF_BITS(resp, 32, 2); return 0; } @@ -274,29 +284,74 @@ static int mmc_read_switch(struct mmc_card *card) status = kmalloc(64, GFP_KERNEL); if (!status) { printk(KERN_ERR "%s: could not allocate a buffer for " - "switch capabilities.\n", mmc_hostname(card->host)); + "switch capabilities.\n", + mmc_hostname(card->host)); return -ENOMEM; } + /* Find out the supported Bus Speed Modes. */ err = mmc_sd_switch(card, 0, 0, 1, status); if (err) { - /* If the host or the card can't do the switch, - * fail more gracefully. */ - if ((err != -EINVAL) - && (err != -ENOSYS) - && (err != -EFAULT)) + /* + * If the host or the card can't do the switch, + * fail more gracefully. + */ + if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) goto out; - printk(KERN_WARNING "%s: problem reading switch " - "capabilities, performance might suffer.\n", + printk(KERN_WARNING "%s: problem reading Bus Speed modes.\n", mmc_hostname(card->host)); err = 0; goto out; } - if (status[13] & 0x02) - card->sw_caps.hs_max_dtr = 50000000; + if (card->scr.sda_spec3) { + card->sw_caps.sd3_bus_mode = status[13]; + + /* Find out Driver Strengths supported by the card */ + err = mmc_sd_switch(card, 0, 2, 1, status); + if (err) { + /* + * If the host or the card can't do the switch, + * fail more gracefully. + */ + if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) + goto out; + + printk(KERN_WARNING "%s: problem reading " + "Driver Strength.\n", + mmc_hostname(card->host)); + err = 0; + + goto out; + } + + card->sw_caps.sd3_drv_type = status[9]; + + /* Find out Current Limits supported by the card */ + err = mmc_sd_switch(card, 0, 3, 1, status); + if (err) { + /* + * If the host or the card can't do the switch, + * fail more gracefully. + */ + if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) + goto out; + + printk(KERN_WARNING "%s: problem reading " + "Current Limit.\n", + mmc_hostname(card->host)); + err = 0; + + goto out; + } + + card->sw_caps.sd3_curr_limit = status[7]; + } else { + if (status[13] & 0x02) + card->sw_caps.hs_max_dtr = 50000000; + } out: kfree(status); @@ -352,6 +407,232 @@ out: return err; } +static int sd_select_driver_type(struct mmc_card *card, u8 *status) +{ + int host_drv_type = 0, card_drv_type = 0; + int err; + + /* + * If the host doesn't support any of the Driver Types A,C or D, + * default Driver Type B is used. + */ + if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C + | MMC_CAP_DRIVER_TYPE_D))) + return 0; + + if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) { + host_drv_type = MMC_SET_DRIVER_TYPE_A; + if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) + card_drv_type = MMC_SET_DRIVER_TYPE_A; + else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B) + card_drv_type = MMC_SET_DRIVER_TYPE_B; + else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) + card_drv_type = MMC_SET_DRIVER_TYPE_C; + } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) { + host_drv_type = MMC_SET_DRIVER_TYPE_C; + if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) + card_drv_type = MMC_SET_DRIVER_TYPE_C; + } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) { + /* + * If we are here, that means only the default driver type + * B is supported by the host. + */ + host_drv_type = MMC_SET_DRIVER_TYPE_B; + if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B) + card_drv_type = MMC_SET_DRIVER_TYPE_B; + else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) + card_drv_type = MMC_SET_DRIVER_TYPE_C; + } + + err = mmc_sd_switch(card, 1, 2, card_drv_type, status); + if (err) + return err; + + if ((status[15] & 0xF) != card_drv_type) { + printk(KERN_WARNING "%s: Problem setting driver strength!\n", + mmc_hostname(card->host)); + return 0; + } + + mmc_set_driver_type(card->host, host_drv_type); + + return 0; +} + +static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) +{ + unsigned int bus_speed = 0, timing = 0; + int err; + + /* + * If the host doesn't support any of the UHS-I modes, fallback on + * default speed. + */ + if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) + return 0; + + if ((card->host->caps & MMC_CAP_UHS_SDR104) && + (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { + bus_speed = UHS_SDR104_BUS_SPEED; + timing = MMC_TIMING_UHS_SDR104; + card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; + } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && + (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { + bus_speed = UHS_DDR50_BUS_SPEED; + timing = MMC_TIMING_UHS_DDR50; + card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; + } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | + MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & + SD_MODE_UHS_SDR50)) { + bus_speed = UHS_SDR50_BUS_SPEED; + timing = MMC_TIMING_UHS_SDR50; + card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; + } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && + (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { + bus_speed = UHS_SDR25_BUS_SPEED; + timing = MMC_TIMING_UHS_SDR25; + card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; + } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | + MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & + SD_MODE_UHS_SDR12)) { + bus_speed = UHS_SDR12_BUS_SPEED; + timing = MMC_TIMING_UHS_SDR12; + card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; + } + + card->sd_bus_speed = bus_speed; + err = mmc_sd_switch(card, 1, 0, bus_speed, status); + if (err) + return err; + + if ((status[16] & 0xF) != bus_speed) + printk(KERN_WARNING "%s: Problem setting bus speed mode!\n", + mmc_hostname(card->host)); + else { + mmc_set_timing(card->host, timing); + mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); + } + + return 0; +} + +static int sd_set_current_limit(struct mmc_card *card, u8 *status) +{ + int current_limit = 0; + int err; + + /* + * Current limit switch is only defined for SDR50, SDR104, and DDR50 + * bus speed modes. For other bus speed modes, we set the default + * current limit of 200mA. + */ + if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) || + (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) || + (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) { + if (card->host->caps & MMC_CAP_MAX_CURRENT_800) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800) + current_limit = SD_SET_CURRENT_LIMIT_800; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_600) + current_limit = SD_SET_CURRENT_LIMIT_600; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_400) + current_limit = SD_SET_CURRENT_LIMIT_400; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600) + current_limit = SD_SET_CURRENT_LIMIT_600; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_400) + current_limit = SD_SET_CURRENT_LIMIT_400; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400) + current_limit = SD_SET_CURRENT_LIMIT_400; + else if (card->sw_caps.sd3_curr_limit & + SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) { + if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200) + current_limit = SD_SET_CURRENT_LIMIT_200; + } + } else + current_limit = SD_SET_CURRENT_LIMIT_200; + + err = mmc_sd_switch(card, 1, 3, current_limit, status); + if (err) + return err; + + if (((status[15] >> 4) & 0x0F) != current_limit) + printk(KERN_WARNING "%s: Problem setting current limit!\n", + mmc_hostname(card->host)); + + return 0; +} + +/* + * UHS-I specific initialization procedure + */ +static int mmc_sd_init_uhs_card(struct mmc_card *card) +{ + int err; + u8 *status; + + if (!card->scr.sda_spec3) + return 0; + + if (!(card->csd.cmdclass & CCC_SWITCH)) + return 0; + + status = kmalloc(64, GFP_KERNEL); + if (!status) { + printk(KERN_ERR "%s: could not allocate a buffer for " + "switch capabilities.\n", mmc_hostname(card->host)); + return -ENOMEM; + } + + /* Set 4-bit bus width */ + if ((card->host->caps & MMC_CAP_4_BIT_DATA) && + (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { + err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); + if (err) + goto out; + + mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); + } + + /* Set the driver strength for the card */ + err = sd_select_driver_type(card, status); + if (err) + goto out; + + /* Set bus speed mode of the card */ + err = sd_set_bus_speed_mode(card, status); + if (err) + goto out; + + /* Set current limit for the card */ + err = sd_set_current_limit(card, status); + if (err) + goto out; + + /* SPI mode doesn't define CMD19 */ + if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) + err = card->host->ops->execute_tuning(card->host); + +out: + kfree(status); + + return err; +} + MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1], card->raw_cid[2], card->raw_cid[3]); MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], @@ -400,7 +681,7 @@ struct device_type sd_type = { /* * Fetch CID from card. */ -int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid) +int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) { int err; @@ -420,12 +701,39 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid) */ err = mmc_send_if_cond(host, ocr); if (!err) - ocr |= 1 << 30; + ocr |= SD_OCR_CCS; + + /* + * If the host supports one of UHS-I modes, request the card + * to switch to 1.8V signaling level. + */ + if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)) + ocr |= SD_OCR_S18R; + + /* If the host can supply more than 150mA, XPC should be set to 1. */ + if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 | + MMC_CAP_SET_XPC_180)) + ocr |= SD_OCR_XPC; - err = mmc_send_app_op_cond(host, ocr, NULL); +try_again: + err = mmc_send_app_op_cond(host, ocr, rocr); if (err) return err; + /* + * In case CCS and S18A in the response is set, start Signal Voltage + * Switch procedure. SPI mode doesn't support CMD11. + */ + if (!mmc_host_is_spi(host) && rocr && + ((*rocr & 0x41000000) == 0x41000000)) { + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true); + if (err) { + ocr &= ~SD_OCR_S18R; + goto try_again; + } + } + if (mmc_host_is_spi(host)) err = mmc_send_cid(host, cid); else @@ -553,11 +861,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *card; int err; u32 cid[4]; + u32 rocr = 0; BUG_ON(!host); WARN_ON(!host->claimed); - err = mmc_sd_get_cid(host, ocr, cid); + err = mmc_sd_get_cid(host, ocr, cid, &rocr); if (err) return err; @@ -610,30 +919,47 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (err) goto free_card; - /* - * Attempt to change to high-speed (if supported) - */ - err = mmc_sd_switch_hs(card); - if (err > 0) - mmc_sd_go_highspeed(card); - else if (err) - goto free_card; + /* Initialization sequence for UHS-I cards */ + if (rocr & SD_ROCR_S18A) { + err = mmc_sd_init_uhs_card(card); + if (err) + goto free_card; - /* - * Set bus speed. - */ - mmc_set_clock(host, mmc_sd_get_max_clock(card)); + /* Card is an ultra-high-speed card */ + mmc_sd_card_set_uhs(card); - /* - * Switch to wider bus (if supported). - */ - if ((host->caps & MMC_CAP_4_BIT_DATA) && - (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { - err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); - if (err) + /* + * Since initialization is now complete, enable preset + * value registers for UHS-I cards. + */ + if (host->ops->enable_preset_value) + host->ops->enable_preset_value(host, true); + } else { + /* + * Attempt to change to high-speed (if supported) + */ + err = mmc_sd_switch_hs(card); + if (err > 0) + mmc_sd_go_highspeed(card); + else if (err) goto free_card; - mmc_set_bus_width(host, MMC_BUS_WIDTH_4); + /* + * Set bus speed. + */ + mmc_set_clock(host, mmc_sd_get_max_clock(card)); + + /* + * Switch to wider bus (if supported). + */ + if ((host->caps & MMC_CAP_4_BIT_DATA) && + (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { + err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); + if (err) + goto free_card; + + mmc_set_bus_width(host, MMC_BUS_WIDTH_4); + } } host->card = card; @@ -773,6 +1099,15 @@ int mmc_attach_sd(struct mmc_host *host) BUG_ON(!host); WARN_ON(!host->claimed); + /* Make sure we are at 3.3V signalling voltage */ + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false); + if (err) + return err; + + /* Disable preset value enable if already set since last time */ + if (host->ops->enable_preset_value) + host->ops->enable_preset_value(host, false); + err = mmc_send_app_op_cond(host, 0, &ocr); if (err) return err; diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h index 3d8800fa760..4b34b24f3f7 100644 --- a/drivers/mmc/core/sd.h +++ b/drivers/mmc/core/sd.h @@ -5,7 +5,7 @@ extern struct device_type sd_type; -int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid); +int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr); int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card); void mmc_decode_cid(struct mmc_card *card); int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 76af349c14b..021fed15380 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -21,10 +21,10 @@ #include "core.h" #include "sd_ops.h" -static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) +int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!host); BUG_ON(card && (card->host != host)); @@ -49,6 +49,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) return 0; } +EXPORT_SYMBOL_GPL(mmc_app_cmd); /** * mmc_wait_for_app_cmd - start an application command and wait for @@ -66,7 +67,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card, struct mmc_command *cmd, int retries) { - struct mmc_request mrq; + struct mmc_request mrq = {0}; int i, err; @@ -119,13 +120,11 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd); int mmc_app_set_bus_width(struct mmc_card *card, int width) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!card); BUG_ON(!card->host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = SD_APP_SET_BUS_WIDTH; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; @@ -149,13 +148,11 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width) int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int i, err = 0; BUG_ON(!host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = SD_APP_OP_COND; if (mmc_host_is_spi(host)) cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */ @@ -194,7 +191,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) int mmc_send_if_cond(struct mmc_host *host, u32 ocr) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int err; static const u8 test_pattern = 0xAA; u8 result_pattern; @@ -226,13 +223,11 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr) int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) { int err; - struct mmc_command cmd; + struct mmc_command cmd = {0}; BUG_ON(!host); BUG_ON(!rca); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = SD_SEND_RELATIVE_ADDR; cmd.arg = 0; cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; @@ -249,9 +244,9 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca) int mmc_app_send_scr(struct mmc_card *card, u32 *scr) { int err; - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; struct scatterlist sg; void *data_buf; @@ -272,10 +267,6 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) if (data_buf == NULL) return -ENOMEM; - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - mrq.cmd = &cmd; mrq.data = &data; @@ -312,9 +303,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr) int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp) { - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; struct scatterlist sg; BUG_ON(!card); @@ -325,10 +316,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group, mode = !!mode; value &= 0xF; - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - mrq.cmd = &cmd; mrq.data = &data; @@ -361,9 +348,9 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group, int mmc_app_sd_status(struct mmc_card *card, void *ssr) { int err; - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; struct scatterlist sg; BUG_ON(!card); @@ -376,10 +363,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr) if (err) return err; - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - mrq.cmd = &cmd; mrq.data = &data; diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index db0f0b44d68..4d0c15bfa51 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -16,6 +16,7 @@ #include <linux/mmc/card.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> #include "core.h" #include "bus.h" @@ -31,6 +32,11 @@ static int sdio_read_fbr(struct sdio_func *func) int ret; unsigned char data; + if (mmc_card_nonstd_func_interface(func->card)) { + func->class = SDIO_CLASS_NONE; + return 0; + } + ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data); if (ret) @@ -181,7 +187,7 @@ static int sdio_disable_cd(struct mmc_card *card) int ret; u8 ctrl; - if (!card->cccr.disable_cd) + if (!mmc_card_disable_cd(card)) return 0; ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl); @@ -363,8 +369,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, goto err; } - if (ocr & R4_MEMORY_PRESENT - && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { + if ((ocr & R4_MEMORY_PRESENT) && + mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) { card->type = MMC_TYPE_SD_COMBO; if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || @@ -466,7 +472,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, card = oldcard; } - mmc_fixup_device(card); + mmc_fixup_device(card, NULL); if (card->type == MMC_TYPE_SD_COMBO) { err = mmc_sd_setup_card(host, card, oldcard != NULL); @@ -625,7 +631,7 @@ static int mmc_sdio_suspend(struct mmc_host *host) } } - if (!err && host->pm_flags & MMC_PM_KEEP_POWER) { + if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { mmc_claim_host(host); sdio_disable_wide(host->card); mmc_release_host(host); @@ -645,10 +651,10 @@ static int mmc_sdio_resume(struct mmc_host *host) mmc_claim_host(host); /* No need to reinitialize powered-resumed nonremovable cards */ - if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host)) + if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) err = mmc_sdio_init_card(host, host->ocr, host->card, - (host->pm_flags & MMC_PM_KEEP_POWER)); - else if (mmc_card_is_powered_resumed(host)) { + mmc_card_keep_power(host)); + else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { /* We may have switched to 1-bit mode during suspend */ err = sdio_enable_4bit_bus(host->card); if (err > 0) { @@ -691,7 +697,7 @@ static int mmc_sdio_power_restore(struct mmc_host *host) mmc_claim_host(host); ret = mmc_sdio_init_card(host, host->ocr, host->card, - (host->pm_flags & MMC_PM_KEEP_POWER)); + mmc_card_keep_power(host)); if (!ret && host->sdio_irqs) mmc_signal_sdio_irq(host); mmc_release_host(host); diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index b3001617e67..03ead028d2c 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -31,6 +31,17 @@ static int process_sdio_pending_irqs(struct mmc_card *card) { int i, ret, count; unsigned char pending; + struct sdio_func *func; + + /* + * Optimization, if there is only 1 function interrupt registered + * call irq handler directly + */ + func = card->sdio_single_irq; + if (func) { + func->irq_handler(func); + return 1; + } ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending); if (ret) { @@ -42,7 +53,7 @@ static int process_sdio_pending_irqs(struct mmc_card *card) count = 0; for (i = 1; i <= 7; i++) { if (pending & (1 << i)) { - struct sdio_func *func = card->sdio_func[i - 1]; + func = card->sdio_func[i - 1]; if (!func) { printk(KERN_WARNING "%s: pending IRQ for " "non-existent function\n", @@ -186,6 +197,24 @@ static int sdio_card_irq_put(struct mmc_card *card) return 0; } +/* If there is only 1 function registered set sdio_single_irq */ +static void sdio_single_irq_set(struct mmc_card *card) +{ + struct sdio_func *func; + int i; + + card->sdio_single_irq = NULL; + if ((card->host->caps & MMC_CAP_SDIO_IRQ) && + card->host->sdio_irqs == 1) + for (i = 0; i < card->sdio_funcs; i++) { + func = card->sdio_func[i]; + if (func && func->irq_handler) { + card->sdio_single_irq = func; + break; + } + } +} + /** * sdio_claim_irq - claim the IRQ for a SDIO function * @func: SDIO function @@ -227,6 +256,7 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler) ret = sdio_card_irq_get(func->card); if (ret) func->irq_handler = NULL; + sdio_single_irq_set(func->card); return ret; } @@ -251,6 +281,7 @@ int sdio_release_irq(struct sdio_func *func) if (func->irq_handler) { func->irq_handler = NULL; sdio_card_irq_put(func->card); + sdio_single_irq_set(func->card); } ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®); diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index dea36d9c22e..f087d876c57 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -21,13 +21,11 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int i, err = 0; BUG_ON(!host); - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = SD_IO_SEND_OP_COND; cmd.arg = ocr; cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR; @@ -70,7 +68,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr) static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn, unsigned addr, u8 in, u8 *out) { - struct mmc_command cmd; + struct mmc_command cmd = {0}; int err; BUG_ON(!host); @@ -80,8 +78,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn, if (addr & ~0x1FFFF) return -EINVAL; - memset(&cmd, 0, sizeof(struct mmc_command)); - cmd.opcode = SD_IO_RW_DIRECT; cmd.arg = write ? 0x80000000 : 0x00000000; cmd.arg |= fn << 28; @@ -125,9 +121,9 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn, int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz) { - struct mmc_request mrq; - struct mmc_command cmd; - struct mmc_data data; + struct mmc_request mrq = {0}; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; struct scatterlist sg; BUG_ON(!card); @@ -140,10 +136,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, if (addr & ~0x1FFFF) return -EINVAL; - memset(&mrq, 0, sizeof(struct mmc_request)); - memset(&cmd, 0, sizeof(struct mmc_command)); - memset(&data, 0, sizeof(struct mmc_data)); - mrq.cmd = &cmd; mrq.data = &data; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 94df40531c3..56dbf3f6ad0 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -154,7 +154,7 @@ config MMC_SDHCI_DOVE If unsure, say N. config MMC_SDHCI_TEGRA - tristate "SDHCI platform support for the Tegra SD/MMC Controller" + bool "SDHCI platform support for the Tegra SD/MMC Controller" depends on MMC_SDHCI_PLTFM && ARCH_TEGRA select MMC_SDHCI_IO_ACCESSORS help @@ -535,6 +535,37 @@ config MMC_JZ4740 If you have a board based on such a SoC and with a SD/MMC slot, say Y or M here. +config MMC_VUB300 + tristate "VUB300 USB to SDIO/SD/MMC Host Controller support" + depends on USB + help + This selects support for Elan Digital Systems' VUB300 chip. + + The VUB300 is a USB-SDIO Host Controller Interface chip + that enables the host computer to use SDIO/SD/MMC cards + via a USB 2.0 or USB 1.1 host. + + The VUB300 chip will be found in both physically separate + USB to SDIO/SD/MMC adapters and embedded on some motherboards. + + The VUB300 chip supports SD and MMC memory cards in addition + to single and multifunction SDIO cards. + + Some SDIO cards will need a firmware file to be loaded and + sent to VUB300 chip in order to achieve better data throughput. + Download these "Offload Pseudocode" from Elan Digital Systems' + web-site http://www.elandigitalsystems.com/support/downloads.php + and put them in /lib/firmware. Note that without these additional + firmware files the VUB300 chip will still function, but not at + the best obtainable data rate. + + To compile this mmc host controller driver as a module, + choose M here: the module will be called vub300. + + If you have a computer with an embedded VUB300 chip + or if you intend connecting a USB adapter based on a + VUB300 chip say Y or M here. + config MMC_USHC tristate "USB SD Host Controller (USHC) support" depends on USB diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 4f1df0aae57..58a5cf73d6e 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o obj-$(CONFIG_MMC_DW) += dw_mmc.o obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o +obj-$(CONFIG_MMC_VUB300) += vub300.o obj-$(CONFIG_MMC_USHC) += ushc.o obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 87e1f57ec9b..66dcddb9c20 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -1769,9 +1769,6 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg) int i, ret; struct dw_mci *host = platform_get_drvdata(pdev); - if (host->vmmc) - regulator_enable(host->vmmc); - for (i = 0; i < host->num_slots; i++) { struct dw_mci_slot *slot = host->slot[i]; if (!slot) @@ -1798,6 +1795,9 @@ static int dw_mci_resume(struct platform_device *pdev) int i, ret; struct dw_mci *host = platform_get_drvdata(pdev); + if (host->vmmc) + regulator_enable(host->vmmc); + if (host->dma_ops->init) host->dma_ops->init(host); diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index f8b5f37007b..936bbca19c0 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -18,11 +18,9 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/device.h> - #include <linux/mmc/host.h> - -#include <asm/scatterlist.h> -#include <asm/io.h> +#include <linux/scatterlist.h> +#include <linux/io.h> #include "sdhci.h" @@ -46,14 +44,14 @@ struct sdhci_pci_slot; struct sdhci_pci_fixes { unsigned int quirks; - int (*probe)(struct sdhci_pci_chip*); + int (*probe) (struct sdhci_pci_chip *); - int (*probe_slot)(struct sdhci_pci_slot*); - void (*remove_slot)(struct sdhci_pci_slot*, int); + int (*probe_slot) (struct sdhci_pci_slot *); + void (*remove_slot) (struct sdhci_pci_slot *, int); - int (*suspend)(struct sdhci_pci_chip*, + int (*suspend) (struct sdhci_pci_chip *, pm_message_t); - int (*resume)(struct sdhci_pci_chip*); + int (*resume) (struct sdhci_pci_chip *); }; struct sdhci_pci_slot { @@ -329,6 +327,11 @@ static int jmicron_probe(struct sdhci_pci_chip *chip) return ret; } + /* quirk for unsable RO-detection on JM388 chips */ + if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || + chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) + chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; + return 0; } @@ -402,7 +405,7 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { - for (i = 0;i < chip->num_slots;i++) + for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 0); } @@ -415,7 +418,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { - for (i = 0;i < chip->num_slots;i++) + for (i = 0; i < chip->num_slots; i++) jmicron_enable_mmc(chip->slots[i]->host, 1); } @@ -798,7 +801,7 @@ static struct sdhci_ops sdhci_pci_ops = { #ifdef CONFIG_PM -static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) +static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; @@ -810,7 +813,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) if (!chip) return 0; - for (i = 0;i < chip->num_slots;i++) { + for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; @@ -818,7 +821,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) ret = sdhci_suspend_host(slot->host, state); if (ret) { - for (i--;i >= 0;i--) + for (i--; i >= 0; i--) sdhci_resume_host(chip->slots[i]->host); return ret; } @@ -833,7 +836,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) if (chip->fixes && chip->fixes->suspend) { ret = chip->fixes->suspend(chip, state); if (ret) { - for (i = chip->num_slots - 1;i >= 0;i--) + for (i = chip->num_slots - 1; i >= 0; i--) sdhci_resume_host(chip->slots[i]->host); return ret; } @@ -855,7 +858,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) return 0; } -static int sdhci_pci_resume (struct pci_dev *pdev) +static int sdhci_pci_resume(struct pci_dev *pdev) { struct sdhci_pci_chip *chip; struct sdhci_pci_slot *slot; @@ -877,7 +880,7 @@ static int sdhci_pci_resume (struct pci_dev *pdev) return ret; } - for (i = 0;i < chip->num_slots;i++) { + for (i = 0; i < chip->num_slots; i++) { slot = chip->slots[i]; if (!slot) continue; @@ -1059,7 +1062,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, } chip->pdev = pdev; - chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; + chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; if (chip->fixes) chip->quirks = chip->fixes->quirks; chip->num_slots = slots; @@ -1074,10 +1077,10 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, slots = chip->num_slots; /* Quirk may have changed this */ - for (i = 0;i < slots;i++) { + for (i = 0; i < slots; i++) { slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); if (IS_ERR(slot)) { - for (i--;i >= 0;i--) + for (i--; i >= 0; i--) sdhci_pci_remove_slot(chip->slots[i]); ret = PTR_ERR(slot); goto free; @@ -1105,7 +1108,7 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev) chip = pci_get_drvdata(pdev); if (chip) { - for (i = 0;i < chip->num_slots; i++) + for (i = 0; i < chip->num_slots; i++) sdhci_pci_remove_slot(chip->slots[i]); pci_set_drvdata(pdev, NULL); @@ -1116,9 +1119,9 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev) } static struct pci_driver sdhci_driver = { - .name = "sdhci-pci", + .name = "sdhci-pci", .id_table = pci_ids, - .probe = sdhci_pci_probe, + .probe = sdhci_pci_probe, .remove = __devexit_p(sdhci_pci_remove), .suspend = sdhci_pci_suspend, .resume = sdhci_pci_resume, diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c index 5a61208cbc6..089c9a68b7b 100644 --- a/drivers/mmc/host/sdhci-pxa.c +++ b/drivers/mmc/host/sdhci-pxa.c @@ -69,7 +69,45 @@ static void set_clock(struct sdhci_host *host, unsigned int clock) } } +static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +{ + u16 ctrl_2; + + /* + * Set V18_EN -- UHS modes do not work without this. + * does not change signaling voltage + */ + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + switch (uhs) { + case MMC_TIMING_UHS_SDR12: + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + break; + case MMC_TIMING_UHS_SDR25: + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + break; + case MMC_TIMING_UHS_SDR50: + ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_UHS_SDR104: + ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; + break; + case MMC_TIMING_UHS_DDR50: + ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; + break; + } + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n", + __func__, mmc_hostname(host->mmc), uhs, ctrl_2); + + return 0; +} + static struct sdhci_ops sdhci_pxa_ops = { + .set_uhs_signaling = set_uhs_signaling, .set_clock = set_clock, }; @@ -136,11 +174,19 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev) host->hw_name = "MMC"; host->ops = &sdhci_pxa_ops; host->irq = irq; - host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + host->quirks = SDHCI_QUIRK_BROKEN_ADMA + | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL + | SDHCI_QUIRK_32BIT_DMA_ADDR + | SDHCI_QUIRK_32BIT_DMA_SIZE + | SDHCI_QUIRK_32BIT_ADMA_SIZE + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; if (pdata->quirks) host->quirks |= pdata->quirks; + /* enable 1/8V DDR capable */ + host->mmc->caps |= MMC_CAP_1_8V_DDR; + /* If slot design supports 8 bit data, indicate this to MMC. */ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) host->mmc->caps |= MMC_CAP_8_BIT_DATA; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index f7e1f964395..343c97edba3 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -184,6 +184,8 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host, clk_enable(clk); pltfm_host->clk = clk; + host->mmc->pm_caps = plat->pm_flags; + if (plat->is_8bit) host->mmc->caps |= MMC_CAP_8_BIT_DATA; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 5d20661bc35..58d5436ff64 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -38,13 +38,16 @@ #define SDHCI_USE_LEDS_CLASS #endif +#define MAX_TUNING_LOOP 40 + static unsigned int debug_quirks = 0; -static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *); static void sdhci_finish_data(struct sdhci_host *); static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); static void sdhci_finish_command(struct sdhci_host *); +static int sdhci_execute_tuning(struct mmc_host *mmc); +static void sdhci_tuning_timer(unsigned long data); static void sdhci_dumpregs(struct sdhci_host *host) { @@ -84,6 +87,8 @@ static void sdhci_dumpregs(struct sdhci_host *host) printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", sdhci_readw(host, SDHCI_COMMAND), sdhci_readl(host, SDHCI_MAX_CURRENT)); + printk(KERN_DEBUG DRIVER_NAME ": Host ctl2: 0x%08x\n", + sdhci_readw(host, SDHCI_HOST_CONTROL2)); if (host->flags & SDHCI_USE_ADMA) printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", @@ -157,6 +162,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) ier = sdhci_readl(host, SDHCI_INT_ENABLE); + if (host->ops->platform_reset_enter) + host->ops->platform_reset_enter(host, mask); + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); if (mask & SDHCI_RESET_ALL) @@ -177,6 +185,9 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) mdelay(1); } + if (host->ops->platform_reset_exit) + host->ops->platform_reset_exit(host, mask); + if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); } @@ -591,9 +602,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host, data->sg_len, direction); } -static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) +static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) { u8 count; + struct mmc_data *data = cmd->data; unsigned target_timeout, current_timeout; /* @@ -605,9 +617,16 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) return 0xE; + /* Unspecified timeout, assume max */ + if (!data && !cmd->cmd_timeout_ms) + return 0xE; + /* timeout in us */ - target_timeout = data->timeout_ns / 1000 + - data->timeout_clks / host->clock; + if (!data) + target_timeout = cmd->cmd_timeout_ms * 1000; + else + target_timeout = data->timeout_ns / 1000 + + data->timeout_clks / host->clock; if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) host->timeout_clk = host->clock / 1000; @@ -622,6 +641,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) * => * (1) / (2) > 2^6 */ + BUG_ON(!host->timeout_clk); count = 0; current_timeout = (1 << 13) * 1000 / host->timeout_clk; while (current_timeout < target_timeout) { @@ -632,8 +652,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) } if (count >= 0xF) { - printk(KERN_WARNING "%s: Too large timeout requested!\n", - mmc_hostname(host->mmc)); + printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n", + mmc_hostname(host->mmc), cmd->opcode); count = 0xE; } @@ -651,15 +671,21 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); } -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) { u8 count; u8 ctrl; + struct mmc_data *data = cmd->data; int ret; WARN_ON(host->data); - if (data == NULL) + if (data || (cmd->flags & MMC_RSP_BUSY)) { + count = sdhci_calc_timeout(host, cmd); + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + } + + if (!data) return; /* Sanity checks */ @@ -669,9 +695,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) host->data = data; host->data_early = 0; - - count = sdhci_calc_timeout(host, data); - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + host->data->bytes_xfered = 0; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) host->flags |= SDHCI_REQ_USE_DMA; @@ -807,15 +831,17 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) sdhci_set_transfer_irqs(host); - /* We do not handle DMA boundaries, so set it to max (512 KiB) */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); + /* Set the DMA boundary value and block size */ + sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, + data->blksz), SDHCI_BLOCK_SIZE); sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); } static void sdhci_set_transfer_mode(struct sdhci_host *host, - struct mmc_data *data) + struct mmc_command *cmd) { u16 mode; + struct mmc_data *data = cmd->data; if (data == NULL) return; @@ -823,12 +849,20 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, WARN_ON(!host->data); mode = SDHCI_TRNS_BLK_CNT_EN; - if (data->blocks > 1) { - if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) - mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; - else - mode |= SDHCI_TRNS_MULTI; + if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { + mode |= SDHCI_TRNS_MULTI; + /* + * If we are sending CMD23, CMD12 never gets sent + * on successful completion (so no Auto-CMD12). + */ + if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) + mode |= SDHCI_TRNS_AUTO_CMD12; + else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { + mode |= SDHCI_TRNS_AUTO_CMD23; + sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); + } } + if (data->flags & MMC_DATA_READ) mode |= SDHCI_TRNS_READ; if (host->flags & SDHCI_REQ_USE_DMA) @@ -868,7 +902,15 @@ static void sdhci_finish_data(struct sdhci_host *host) else data->bytes_xfered = data->blksz * data->blocks; - if (data->stop) { + /* + * Need to send CMD12 if - + * a) open-ended multiblock transfer (no CMD23) + * b) error in multiblock transfer + */ + if (data->stop && + (data->error || + !host->mrq->sbc)) { + /* * The controller needs a reset of internal state machines * upon error conditions. @@ -920,11 +962,11 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) host->cmd = cmd; - sdhci_prepare_data(host, cmd->data); + sdhci_prepare_data(host, cmd); sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); - sdhci_set_transfer_mode(host, cmd->data); + sdhci_set_transfer_mode(host, cmd); if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { printk(KERN_ERR "%s: Unsupported response type!\n", @@ -947,7 +989,9 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) flags |= SDHCI_CMD_CRC; if (cmd->flags & MMC_RSP_OPCODE) flags |= SDHCI_CMD_INDEX; - if (cmd->data) + + /* CMD19 is special in that the Data Present Select should be set */ + if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK)) flags |= SDHCI_CMD_DATA; sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); @@ -977,19 +1021,27 @@ static void sdhci_finish_command(struct sdhci_host *host) host->cmd->error = 0; - if (host->data && host->data_early) - sdhci_finish_data(host); + /* Finished CMD23, now send actual command. */ + if (host->cmd == host->mrq->sbc) { + host->cmd = NULL; + sdhci_send_command(host, host->mrq->cmd); + } else { - if (!host->cmd->data) - tasklet_schedule(&host->finish_tasklet); + /* Processed actual command. */ + if (host->data && host->data_early) + sdhci_finish_data(host); - host->cmd = NULL; + if (!host->cmd->data) + tasklet_schedule(&host->finish_tasklet); + + host->cmd = NULL; + } } static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) { - int div; - u16 clk; + int div = 0; /* Initialized for compiler warning */ + u16 clk = 0; unsigned long timeout; if (clock == host->clock) @@ -1007,14 +1059,45 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) goto out; if (host->version >= SDHCI_SPEC_300) { - /* Version 3.00 divisors must be a multiple of 2. */ - if (host->max_clk <= clock) - div = 1; - else { - for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { - if ((host->max_clk / div) <= clock) - break; + /* + * Check if the Host Controller supports Programmable Clock + * Mode. + */ + if (host->clk_mul) { + u16 ctrl; + + /* + * We need to figure out whether the Host Driver needs + * to select Programmable Clock Mode, or the value can + * be set automatically by the Host Controller based on + * the Preset Value registers. + */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + for (div = 1; div <= 1024; div++) { + if (((host->max_clk * host->clk_mul) / + div) <= clock) + break; + } + /* + * Set Programmable Clock Mode in the Clock + * Control register. + */ + clk = SDHCI_PROG_CLOCK_MODE; + div--; } + } else { + /* Version 3.00 divisors must be a multiple of 2. */ + if (host->max_clk <= clock) + div = 1; + else { + for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; + div += 2) { + if ((host->max_clk / div) <= clock) + break; + } + } + div >>= 1; } } else { /* Version 2.00 divisors must be a power of 2. */ @@ -1022,10 +1105,10 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) if ((host->max_clk / div) <= clock) break; } + div >>= 1; } - div >>= 1; - clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; + clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN) << SDHCI_DIVIDER_HI_SHIFT; clk |= SDHCI_CLOCK_INT_EN; @@ -1131,7 +1214,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) #ifndef SDHCI_USE_LEDS_CLASS sdhci_activate_led(host); #endif - if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { + + /* + * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED + * requests if Auto-CMD12 is enabled. + */ + if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) { if (mrq->stop) { mrq->data->stop = NULL; mrq->stop = NULL; @@ -1150,8 +1238,30 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) if (!present || host->flags & SDHCI_DEVICE_DEAD) { host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); - } else - sdhci_send_command(host, mrq->cmd); + } else { + u32 present_state; + + present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); + /* + * Check if the re-tuning timer has already expired and there + * is no on-going data transfer. If so, we need to execute + * tuning procedure before sending command. + */ + if ((host->flags & SDHCI_NEEDS_RETUNING) && + !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { + spin_unlock_irqrestore(&host->lock, flags); + sdhci_execute_tuning(mmc); + spin_lock_irqsave(&host->lock, flags); + + /* Restore original mmc_request structure */ + host->mrq = mrq; + } + + if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) + sdhci_send_command(host, mrq->sbc); + else + sdhci_send_command(host, mrq->cmd); + } mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -1222,7 +1332,84 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) else ctrl &= ~SDHCI_CTRL_HISPD; - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + if (host->version >= SDHCI_SPEC_300) { + u16 clk, ctrl_2; + unsigned int clock; + + /* In case of UHS-I modes, set High Speed Enable */ + if ((ios->timing == MMC_TIMING_UHS_SDR50) || + (ios->timing == MMC_TIMING_UHS_SDR104) || + (ios->timing == MMC_TIMING_UHS_DDR50) || + (ios->timing == MMC_TIMING_UHS_SDR25) || + (ios->timing == MMC_TIMING_UHS_SDR12)) + ctrl |= SDHCI_CTRL_HISPD; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + /* + * We only need to set Driver Strength if the + * preset value enable is not set. + */ + ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; + if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; + else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) + ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; + + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + } else { + /* + * According to SDHC Spec v3.00, if the Preset Value + * Enable in the Host Control 2 register is set, we + * need to reset SD Clock Enable before changing High + * Speed Enable to avoid generating clock gliches. + */ + + /* Reset SD Clock Enable */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + + /* Re-enable SD Clock */ + clock = host->clock; + host->clock = 0; + sdhci_set_clock(host, clock); + } + + + /* Reset SD Clock Enable */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (host->ops->set_uhs_signaling) + host->ops->set_uhs_signaling(host, ios->timing); + else { + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if (ios->timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (ios->timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (ios->timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if (ios->timing == MMC_TIMING_UHS_SDR104) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (ios->timing == MMC_TIMING_UHS_DDR50) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + } + + /* Re-enable SD Clock */ + clock = host->clock; + host->clock = 0; + sdhci_set_clock(host, clock); + } else + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); /* * Some (ENE) controllers go apeshit on some ios operation, @@ -1237,14 +1424,11 @@ out: spin_unlock_irqrestore(&host->lock, flags); } -static int sdhci_get_ro(struct mmc_host *mmc) +static int check_ro(struct sdhci_host *host) { - struct sdhci_host *host; unsigned long flags; int is_readonly; - host = mmc_priv(mmc); - spin_lock_irqsave(&host->lock, flags); if (host->flags & SDHCI_DEVICE_DEAD) @@ -1262,6 +1446,29 @@ static int sdhci_get_ro(struct mmc_host *mmc) !is_readonly : is_readonly; } +#define SAMPLE_COUNT 5 + +static int sdhci_get_ro(struct mmc_host *mmc) +{ + struct sdhci_host *host; + int i, ro_count; + + host = mmc_priv(mmc); + + if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) + return check_ro(host); + + ro_count = 0; + for (i = 0; i < SAMPLE_COUNT; i++) { + if (check_ro(host)) { + if (++ro_count > SAMPLE_COUNT / 2) + return 1; + } + msleep(30); + } + return 0; +} + static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct sdhci_host *host; @@ -1284,11 +1491,322 @@ out: spin_unlock_irqrestore(&host->lock, flags); } +static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct sdhci_host *host; + u8 pwr; + u16 clk, ctrl; + u32 present_state; + + host = mmc_priv(mmc); + + /* + * Signal Voltage Switching is only applicable for Host Controllers + * v3.00 and above. + */ + if (host->version < SDHCI_SPEC_300) + return 0; + + /* + * We first check whether the request is to set signalling voltage + * to 3.3V. If so, we change the voltage to 3.3V and return quickly. + */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ + ctrl &= ~SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* Wait for 5ms */ + usleep_range(5000, 5500); + + /* 3.3V regulator output should be stable within 5 ms */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (!(ctrl & SDHCI_CTRL_VDD_180)) + return 0; + else { + printk(KERN_INFO DRIVER_NAME ": Switching to 3.3V " + "signalling voltage failed\n"); + return -EIO; + } + } else if (!(ctrl & SDHCI_CTRL_VDD_180) && + (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { + /* Stop SDCLK */ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk &= ~SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Check whether DAT[3:0] is 0000 */ + present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); + if (!((present_state & SDHCI_DATA_LVL_MASK) >> + SDHCI_DATA_LVL_SHIFT)) { + /* + * Enable 1.8V Signal Enable in the Host Control2 + * register + */ + ctrl |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* Wait for 5ms */ + usleep_range(5000, 5500); + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ctrl & SDHCI_CTRL_VDD_180) { + /* Provide SDCLK again and wait for 1ms*/ + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + usleep_range(1000, 1500); + + /* + * If DAT[3:0] level is 1111b, then the card + * was successfully switched to 1.8V signaling. + */ + present_state = sdhci_readl(host, + SDHCI_PRESENT_STATE); + if ((present_state & SDHCI_DATA_LVL_MASK) == + SDHCI_DATA_LVL_MASK) + return 0; + } + } + + /* + * If we are here, that means the switch to 1.8V signaling + * failed. We power cycle the card, and retry initialization + * sequence by setting S18R to 0. + */ + pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); + pwr &= ~SDHCI_POWER_ON; + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + + /* Wait for 1ms as per the spec */ + usleep_range(1000, 1500); + pwr |= SDHCI_POWER_ON; + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + + printk(KERN_INFO DRIVER_NAME ": Switching to 1.8V signalling " + "voltage failed, retrying with S18R set to 0\n"); + return -EAGAIN; + } else + /* No signal voltage switch required */ + return 0; +} + +static int sdhci_execute_tuning(struct mmc_host *mmc) +{ + struct sdhci_host *host; + u16 ctrl; + u32 ier; + int tuning_loop_counter = MAX_TUNING_LOOP; + unsigned long timeout; + int err = 0; + + host = mmc_priv(mmc); + + disable_irq(host->irq); + spin_lock(&host->lock); + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + /* + * Host Controller needs tuning only in case of SDR104 mode + * and for SDR50 mode when Use Tuning for SDR50 is set in + * Capabilities register. + */ + if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || + (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && + (host->flags & SDHCI_SDR50_NEEDS_TUNING))) + ctrl |= SDHCI_CTRL_EXEC_TUNING; + else { + spin_unlock(&host->lock); + enable_irq(host->irq); + return 0; + } + + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + /* + * As per the Host Controller spec v3.00, tuning command + * generates Buffer Read Ready interrupt, so enable that. + * + * Note: The spec clearly says that when tuning sequence + * is being performed, the controller does not generate + * interrupts other than Buffer Read Ready interrupt. But + * to make sure we don't hit a controller bug, we _only_ + * enable Buffer Read Ready interrupt here. + */ + ier = sdhci_readl(host, SDHCI_INT_ENABLE); + sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL); + + /* + * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number + * of loops reaches 40 times or a timeout of 150ms occurs. + */ + timeout = 150; + do { + struct mmc_command cmd = {0}; + struct mmc_request mrq = {0}; + + if (!tuning_loop_counter && !timeout) + break; + + cmd.opcode = MMC_SEND_TUNING_BLOCK; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + cmd.retries = 0; + cmd.data = NULL; + cmd.error = 0; + + mrq.cmd = &cmd; + host->mrq = &mrq; + + /* + * In response to CMD19, the card sends 64 bytes of tuning + * block to the Host Controller. So we set the block size + * to 64 here. + */ + sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE); + + /* + * The tuning block is sent by the card to the host controller. + * So we set the TRNS_READ bit in the Transfer Mode register. + * This also takes care of setting DMA Enable and Multi Block + * Select in the same register to 0. + */ + sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); + + sdhci_send_command(host, &cmd); + + host->cmd = NULL; + host->mrq = NULL; + + spin_unlock(&host->lock); + enable_irq(host->irq); + + /* Wait for Buffer Read Ready interrupt */ + wait_event_interruptible_timeout(host->buf_ready_int, + (host->tuning_done == 1), + msecs_to_jiffies(50)); + disable_irq(host->irq); + spin_lock(&host->lock); + + if (!host->tuning_done) { + printk(KERN_INFO DRIVER_NAME ": Timeout waiting for " + "Buffer Read Ready interrupt during tuning " + "procedure, falling back to fixed sampling " + "clock\n"); + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl &= ~SDHCI_CTRL_TUNED_CLK; + ctrl &= ~SDHCI_CTRL_EXEC_TUNING; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + + err = -EIO; + goto out; + } + + host->tuning_done = 0; + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + tuning_loop_counter--; + timeout--; + mdelay(1); + } while (ctrl & SDHCI_CTRL_EXEC_TUNING); + + /* + * The Host Driver has exhausted the maximum number of loops allowed, + * so use fixed sampling frequency. + */ + if (!tuning_loop_counter || !timeout) { + ctrl &= ~SDHCI_CTRL_TUNED_CLK; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + } else { + if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { + printk(KERN_INFO DRIVER_NAME ": Tuning procedure" + " failed, falling back to fixed sampling" + " clock\n"); + err = -EIO; + } + } + +out: + /* + * If this is the very first time we are here, we start the retuning + * timer. Since only during the first time, SDHCI_NEEDS_RETUNING + * flag won't be set, we check this condition before actually starting + * the timer. + */ + if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && + (host->tuning_mode == SDHCI_TUNING_MODE_1)) { + mod_timer(&host->tuning_timer, jiffies + + host->tuning_count * HZ); + /* Tuning mode 1 limits the maximum data length to 4MB */ + mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; + } else { + host->flags &= ~SDHCI_NEEDS_RETUNING; + /* Reload the new initial value for timer */ + if (host->tuning_mode == SDHCI_TUNING_MODE_1) + mod_timer(&host->tuning_timer, jiffies + + host->tuning_count * HZ); + } + + /* + * In case tuning fails, host controllers which support re-tuning can + * try tuning again at a later time, when the re-tuning timer expires. + * So for these controllers, we return 0. Since there might be other + * controllers who do not have this capability, we return error for + * them. + */ + if (err && host->tuning_count && + host->tuning_mode == SDHCI_TUNING_MODE_1) + err = 0; + + sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); + spin_unlock(&host->lock); + enable_irq(host->irq); + + return err; +} + +static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) +{ + struct sdhci_host *host; + u16 ctrl; + unsigned long flags; + + host = mmc_priv(mmc); + + /* Host Controller v3.00 defines preset value registers */ + if (host->version < SDHCI_SPEC_300) + return; + + spin_lock_irqsave(&host->lock, flags); + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + /* + * We only enable or disable Preset Value if they are not already + * enabled or disabled respectively. Otherwise, we bail out. + */ + if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + } + + spin_unlock_irqrestore(&host->lock, flags); +} + static const struct mmc_host_ops sdhci_ops = { .request = sdhci_request, .set_ios = sdhci_set_ios, .get_ro = sdhci_get_ro, .enable_sdio_irq = sdhci_enable_sdio_irq, + .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, + .execute_tuning = sdhci_execute_tuning, + .enable_preset_value = sdhci_enable_preset_value, }; /*****************************************************************************\ @@ -1345,6 +1863,9 @@ static void sdhci_tasklet_finish(unsigned long param) del_timer(&host->timer); + if (host->version >= SDHCI_SPEC_300) + del_timer(&host->tuning_timer); + mrq = host->mrq; /* @@ -1418,6 +1939,20 @@ static void sdhci_timeout_timer(unsigned long data) spin_unlock_irqrestore(&host->lock, flags); } +static void sdhci_tuning_timer(unsigned long data) +{ + struct sdhci_host *host; + unsigned long flags; + + host = (struct sdhci_host *)data; + + spin_lock_irqsave(&host->lock, flags); + + host->flags |= SDHCI_NEEDS_RETUNING; + + spin_unlock_irqrestore(&host->lock, flags); +} + /*****************************************************************************\ * * * Interrupt handling * @@ -1506,6 +2041,16 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) { BUG_ON(intmask == 0); + /* CMD19 generates _only_ Buffer Read Ready interrupt */ + if (intmask & SDHCI_INT_DATA_AVAIL) { + if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) == + MMC_SEND_TUNING_BLOCK) { + host->tuning_done = 1; + wake_up(&host->buf_ready_int); + return; + } + } + if (!host->data) { /* * The "data complete" interrupt is also used to @@ -1551,10 +2096,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) * We currently don't do anything fancy with DMA * boundaries, but as we can't disable the feature * we need to at least restart the transfer. + * + * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) + * should return a valid address to continue from, but as + * some controllers are faulty, don't trust them. */ - if (intmask & SDHCI_INT_DMA_END) - sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), - SDHCI_DMA_ADDRESS); + if (intmask & SDHCI_INT_DMA_END) { + u32 dmastart, dmanow; + dmastart = sg_dma_address(host->data->sg); + dmanow = dmastart + host->data->bytes_xfered; + /* + * Force update to the next DMA block boundary. + */ + dmanow = (dmanow & + ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + + SDHCI_DEFAULT_BOUNDARY_SIZE; + host->data->bytes_xfered = dmanow - dmastart; + DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," + " next 0x%08x\n", + mmc_hostname(host->mmc), dmastart, + host->data->bytes_xfered, dmanow); + sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); + } if (intmask & SDHCI_INT_DATA_END) { if (host->cmd) { @@ -1664,6 +2227,14 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) sdhci_disable_card_detection(host); + /* Disable tuning since we are suspending */ + if (host->version >= SDHCI_SPEC_300 && host->tuning_count && + host->tuning_mode == SDHCI_TUNING_MODE_1) { + host->flags &= ~SDHCI_NEEDS_RETUNING; + mod_timer(&host->tuning_timer, jiffies + + host->tuning_count * HZ); + } + ret = mmc_suspend_host(host->mmc); if (ret) return ret; @@ -1705,6 +2276,11 @@ int sdhci_resume_host(struct sdhci_host *host) ret = mmc_resume_host(host->mmc); sdhci_enable_card_detection(host); + /* Set the re-tuning expiration flag */ + if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && + (host->tuning_mode == SDHCI_TUNING_MODE_1)) + host->flags |= SDHCI_NEEDS_RETUNING; + return ret; } @@ -1751,7 +2327,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host); int sdhci_add_host(struct sdhci_host *host) { struct mmc_host *mmc; - unsigned int caps, ocr_avail; + u32 caps[2]; + u32 max_current_caps; + unsigned int ocr_avail; int ret; WARN_ON(host == NULL); @@ -1774,12 +2352,15 @@ int sdhci_add_host(struct sdhci_host *host) host->version); } - caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : + caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : sdhci_readl(host, SDHCI_CAPABILITIES); + caps[1] = (host->version >= SDHCI_SPEC_300) ? + sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0; + if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; - else if (!(caps & SDHCI_CAN_DO_SDMA)) + else if (!(caps[0] & SDHCI_CAN_DO_SDMA)) DBG("Controller doesn't have SDMA capability\n"); else host->flags |= SDHCI_USE_SDMA; @@ -1790,7 +2371,8 @@ int sdhci_add_host(struct sdhci_host *host) host->flags &= ~SDHCI_USE_SDMA; } - if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) + if ((host->version >= SDHCI_SPEC_200) && + (caps[0] & SDHCI_CAN_DO_ADMA2)) host->flags |= SDHCI_USE_ADMA; if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && @@ -1840,10 +2422,10 @@ int sdhci_add_host(struct sdhci_host *host) } if (host->version >= SDHCI_SPEC_300) - host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) + host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; else - host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) + host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; host->max_clk *= 1000000; @@ -1859,7 +2441,7 @@ int sdhci_add_host(struct sdhci_host *host) } host->timeout_clk = - (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; + (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; if (host->timeout_clk == 0) { if (host->ops->get_timeout_clock) { host->timeout_clk = host->ops->get_timeout_clock(host); @@ -1871,22 +2453,55 @@ int sdhci_add_host(struct sdhci_host *host) return -ENODEV; } } - if (caps & SDHCI_TIMEOUT_CLK_UNIT) + if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT) host->timeout_clk *= 1000; /* + * In case of Host Controller v3.00, find out whether clock + * multiplier is supported. + */ + host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> + SDHCI_CLOCK_MUL_SHIFT; + + /* + * In case the value in Clock Multiplier is 0, then programmable + * clock mode is not supported, otherwise the actual clock + * multiplier is one more than the value of Clock Multiplier + * in the Capabilities Register. + */ + if (host->clk_mul) + host->clk_mul += 1; + + /* * Set host parameters. */ mmc->ops = &sdhci_ops; + mmc->f_max = host->max_clk; if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); - else if (host->version >= SDHCI_SPEC_300) - mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; - else + else if (host->version >= SDHCI_SPEC_300) { + if (host->clk_mul) { + mmc->f_min = (host->max_clk * host->clk_mul) / 1024; + mmc->f_max = host->max_clk * host->clk_mul; + } else + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; + } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; - mmc->f_max = host->max_clk; - mmc->caps |= MMC_CAP_SDIO_IRQ; + mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; + + if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) + host->flags |= SDHCI_AUTO_CMD12; + + /* Auto-CMD23 stuff only works in ADMA or PIO. */ + if ((host->version >= SDHCI_SPEC_300) && + ((host->flags & SDHCI_USE_ADMA) || + !(host->flags & SDHCI_USE_SDMA))) { + host->flags |= SDHCI_AUTO_CMD23; + DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); + } else { + DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); + } /* * A controller may support 8-bit width, but the board itself @@ -1898,21 +2513,113 @@ int sdhci_add_host(struct sdhci_host *host) if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) mmc->caps |= MMC_CAP_4_BIT_DATA; - if (caps & SDHCI_CAN_DO_HISPD) + if (caps[0] & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && mmc_card_is_removable(mmc)) mmc->caps |= MMC_CAP_NEEDS_POLL; + /* UHS-I mode(s) supported by the host controller. */ + if (host->version >= SDHCI_SPEC_300) + mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + + /* SDR104 supports also implies SDR50 support */ + if (caps[1] & SDHCI_SUPPORT_SDR104) + mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; + else if (caps[1] & SDHCI_SUPPORT_SDR50) + mmc->caps |= MMC_CAP_UHS_SDR50; + + if (caps[1] & SDHCI_SUPPORT_DDR50) + mmc->caps |= MMC_CAP_UHS_DDR50; + + /* Does the host needs tuning for SDR50? */ + if (caps[1] & SDHCI_USE_SDR50_TUNING) + host->flags |= SDHCI_SDR50_NEEDS_TUNING; + + /* Driver Type(s) (A, C, D) supported by the host */ + if (caps[1] & SDHCI_DRIVER_TYPE_A) + mmc->caps |= MMC_CAP_DRIVER_TYPE_A; + if (caps[1] & SDHCI_DRIVER_TYPE_C) + mmc->caps |= MMC_CAP_DRIVER_TYPE_C; + if (caps[1] & SDHCI_DRIVER_TYPE_D) + mmc->caps |= MMC_CAP_DRIVER_TYPE_D; + + /* Initial value for re-tuning timer count */ + host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> + SDHCI_RETUNING_TIMER_COUNT_SHIFT; + + /* + * In case Re-tuning Timer is not disabled, the actual value of + * re-tuning timer will be 2 ^ (n - 1). + */ + if (host->tuning_count) + host->tuning_count = 1 << (host->tuning_count - 1); + + /* Re-tuning mode supported by the Host Controller */ + host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> + SDHCI_RETUNING_MODE_SHIFT; + ocr_avail = 0; - if (caps & SDHCI_CAN_VDD_330) + /* + * According to SD Host Controller spec v3.00, if the Host System + * can afford more than 150mA, Host Driver should set XPC to 1. Also + * the value is meaningful only if Voltage Support in the Capabilities + * register is set. The actual current value is 4 times the register + * value. + */ + max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); + + if (caps[0] & SDHCI_CAN_VDD_330) { + int max_current_330; + ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; - if (caps & SDHCI_CAN_VDD_300) + + max_current_330 = ((max_current_caps & + SDHCI_MAX_CURRENT_330_MASK) >> + SDHCI_MAX_CURRENT_330_SHIFT) * + SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_330 > 150) + mmc->caps |= MMC_CAP_SET_XPC_330; + } + if (caps[0] & SDHCI_CAN_VDD_300) { + int max_current_300; + ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; - if (caps & SDHCI_CAN_VDD_180) + + max_current_300 = ((max_current_caps & + SDHCI_MAX_CURRENT_300_MASK) >> + SDHCI_MAX_CURRENT_300_SHIFT) * + SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_300 > 150) + mmc->caps |= MMC_CAP_SET_XPC_300; + } + if (caps[0] & SDHCI_CAN_VDD_180) { + int max_current_180; + ocr_avail |= MMC_VDD_165_195; + max_current_180 = ((max_current_caps & + SDHCI_MAX_CURRENT_180_MASK) >> + SDHCI_MAX_CURRENT_180_SHIFT) * + SDHCI_MAX_CURRENT_MULTIPLIER; + + if (max_current_180 > 150) + mmc->caps |= MMC_CAP_SET_XPC_180; + + /* Maximum current capabilities of the host at 1.8V */ + if (max_current_180 >= 800) + mmc->caps |= MMC_CAP_MAX_CURRENT_800; + else if (max_current_180 >= 600) + mmc->caps |= MMC_CAP_MAX_CURRENT_600; + else if (max_current_180 >= 400) + mmc->caps |= MMC_CAP_MAX_CURRENT_400; + else + mmc->caps |= MMC_CAP_MAX_CURRENT_200; + } + mmc->ocr_avail = ocr_avail; mmc->ocr_avail_sdio = ocr_avail; if (host->ocr_avail_sdio) @@ -1972,7 +2679,7 @@ int sdhci_add_host(struct sdhci_host *host) if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) { mmc->max_blk_size = 2; } else { - mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> + mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >> SDHCI_MAX_BLOCK_SHIFT; if (mmc->max_blk_size >= 3) { printk(KERN_WARNING "%s: Invalid maximum block size, " @@ -1998,6 +2705,15 @@ int sdhci_add_host(struct sdhci_host *host) setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); + if (host->version >= SDHCI_SPEC_300) { + init_waitqueue_head(&host->buf_ready_int); + + /* Initialize re-tuning timer */ + init_timer(&host->tuning_timer); + host->tuning_timer.data = (unsigned long)host; + host->tuning_timer.function = sdhci_tuning_timer; + } + ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, mmc_hostname(mmc), host); if (ret) @@ -2091,6 +2807,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) free_irq(host->irq, host); del_timer_sync(&host->timer); + if (host->version >= SDHCI_SPEC_300) + del_timer_sync(&host->tuning_timer); tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 25e8bde600d..745c42fa41e 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -25,6 +25,7 @@ */ #define SDHCI_DMA_ADDRESS 0x00 +#define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS #define SDHCI_BLOCK_SIZE 0x04 #define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) @@ -36,7 +37,8 @@ #define SDHCI_TRANSFER_MODE 0x0C #define SDHCI_TRNS_DMA 0x01 #define SDHCI_TRNS_BLK_CNT_EN 0x02 -#define SDHCI_TRNS_ACMD12 0x04 +#define SDHCI_TRNS_AUTO_CMD12 0x04 +#define SDHCI_TRNS_AUTO_CMD23 0x08 #define SDHCI_TRNS_READ 0x10 #define SDHCI_TRNS_MULTI 0x20 @@ -68,8 +70,10 @@ #define SDHCI_DATA_AVAILABLE 0x00000800 #define SDHCI_CARD_PRESENT 0x00010000 #define SDHCI_WRITE_PROTECT 0x00080000 +#define SDHCI_DATA_LVL_MASK 0x00F00000 +#define SDHCI_DATA_LVL_SHIFT 20 -#define SDHCI_HOST_CONTROL 0x28 +#define SDHCI_HOST_CONTROL 0x28 #define SDHCI_CTRL_LED 0x01 #define SDHCI_CTRL_4BITBUS 0x02 #define SDHCI_CTRL_HISPD 0x04 @@ -99,6 +103,7 @@ #define SDHCI_DIV_MASK 0xFF #define SDHCI_DIV_MASK_LEN 8 #define SDHCI_DIV_HI_MASK 0x300 +#define SDHCI_PROG_CLOCK_MODE 0x0020 #define SDHCI_CLOCK_CARD_EN 0x0004 #define SDHCI_CLOCK_INT_STABLE 0x0002 #define SDHCI_CLOCK_INT_EN 0x0001 @@ -146,7 +151,22 @@ #define SDHCI_ACMD12_ERR 0x3C -/* 3E-3F reserved */ +#define SDHCI_HOST_CONTROL2 0x3E +#define SDHCI_CTRL_UHS_MASK 0x0007 +#define SDHCI_CTRL_UHS_SDR12 0x0000 +#define SDHCI_CTRL_UHS_SDR25 0x0001 +#define SDHCI_CTRL_UHS_SDR50 0x0002 +#define SDHCI_CTRL_UHS_SDR104 0x0003 +#define SDHCI_CTRL_UHS_DDR50 0x0004 +#define SDHCI_CTRL_VDD_180 0x0008 +#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030 +#define SDHCI_CTRL_DRV_TYPE_B 0x0000 +#define SDHCI_CTRL_DRV_TYPE_A 0x0010 +#define SDHCI_CTRL_DRV_TYPE_C 0x0020 +#define SDHCI_CTRL_DRV_TYPE_D 0x0030 +#define SDHCI_CTRL_EXEC_TUNING 0x0040 +#define SDHCI_CTRL_TUNED_CLK 0x0080 +#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000 #define SDHCI_CAPABILITIES 0x40 #define SDHCI_TIMEOUT_CLK_MASK 0x0000003F @@ -167,9 +187,30 @@ #define SDHCI_CAN_VDD_180 0x04000000 #define SDHCI_CAN_64BIT 0x10000000 +#define SDHCI_SUPPORT_SDR50 0x00000001 +#define SDHCI_SUPPORT_SDR104 0x00000002 +#define SDHCI_SUPPORT_DDR50 0x00000004 +#define SDHCI_DRIVER_TYPE_A 0x00000010 +#define SDHCI_DRIVER_TYPE_C 0x00000020 +#define SDHCI_DRIVER_TYPE_D 0x00000040 +#define SDHCI_RETUNING_TIMER_COUNT_MASK 0x00000F00 +#define SDHCI_RETUNING_TIMER_COUNT_SHIFT 8 +#define SDHCI_USE_SDR50_TUNING 0x00002000 +#define SDHCI_RETUNING_MODE_MASK 0x0000C000 +#define SDHCI_RETUNING_MODE_SHIFT 14 +#define SDHCI_CLOCK_MUL_MASK 0x00FF0000 +#define SDHCI_CLOCK_MUL_SHIFT 16 + #define SDHCI_CAPABILITIES_1 0x44 -#define SDHCI_MAX_CURRENT 0x48 +#define SDHCI_MAX_CURRENT 0x48 +#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF +#define SDHCI_MAX_CURRENT_330_SHIFT 0 +#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00 +#define SDHCI_MAX_CURRENT_300_SHIFT 8 +#define SDHCI_MAX_CURRENT_180_MASK 0xFF0000 +#define SDHCI_MAX_CURRENT_180_SHIFT 16 +#define SDHCI_MAX_CURRENT_MULTIPLIER 4 /* 4C-4F reserved for more max current */ @@ -202,6 +243,12 @@ #define SDHCI_MAX_DIV_SPEC_200 256 #define SDHCI_MAX_DIV_SPEC_300 2046 +/* + * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2. + */ +#define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) +#define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) + struct sdhci_ops { #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS u32 (*read_l)(struct sdhci_host *host, int reg); @@ -223,6 +270,10 @@ struct sdhci_ops { void (*platform_send_init_74_clocks)(struct sdhci_host *host, u8 power_mode); unsigned int (*get_ro)(struct sdhci_host *host); + void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); + void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); + int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); + }; #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index af97015a2fc..14f8edbaa19 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -29,6 +29,8 @@ #include <linux/mmc/sh_mmcif.h> #include <linux/pagemap.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/spinlock.h> #define DRIVER_NAME "sh_mmcif" #define DRIVER_VERSION "2010-04-28" @@ -153,6 +155,12 @@ #define CLKDEV_MMC_DATA 20000000 /* 20MHz */ #define CLKDEV_INIT 400000 /* 400 KHz */ +enum mmcif_state { + STATE_IDLE, + STATE_REQUEST, + STATE_IOS, +}; + struct sh_mmcif_host { struct mmc_host *mmc; struct mmc_data *data; @@ -164,6 +172,9 @@ struct sh_mmcif_host { long timeout; void __iomem *addr; struct completion intr_wait; + enum mmcif_state state; + spinlock_t lock; + bool power; /* DMA support */ struct dma_chan *chan_rx; @@ -798,17 +809,31 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sh_mmcif_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->state != STATE_IDLE) { + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EAGAIN; + mmc_request_done(mmc, mrq); + return; + } + + host->state = STATE_REQUEST; + spin_unlock_irqrestore(&host->lock, flags); switch (mrq->cmd->opcode) { /* MMCIF does not support SD/SDIO command */ case SD_IO_SEND_OP_COND: case MMC_APP_CMD: + host->state = STATE_IDLE; mrq->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, mrq); return; case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ if (!mrq->data) { /* send_if_cond cmd (not support) */ + host->state = STATE_IDLE; mrq->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, mrq); return; @@ -830,12 +855,9 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) sh_mmcif_start_cmd(host, mrq, mrq->cmd); host->data = NULL; - if (mrq->cmd->error != 0) { - mmc_request_done(mmc, mrq); - return; - } - if (mrq->stop) + if (!mrq->cmd->error && mrq->stop) sh_mmcif_stop_cmd(host, mrq, mrq->stop); + host->state = STATE_IDLE; mmc_request_done(mmc, mrq); } @@ -843,15 +865,39 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sh_mmcif_host *host = mmc_priv(mmc); struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->state != STATE_IDLE) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + host->state = STATE_IOS; + spin_unlock_irqrestore(&host->lock, flags); if (ios->power_mode == MMC_POWER_UP) { if (p->set_pwr) p->set_pwr(host->pd, ios->power_mode); + if (!host->power) { + /* See if we also get DMA */ + sh_mmcif_request_dma(host, host->pd->dev.platform_data); + pm_runtime_get_sync(&host->pd->dev); + host->power = true; + } } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* clock stop */ sh_mmcif_clock_control(host, 0); - if (ios->power_mode == MMC_POWER_OFF && p->down_pwr) - p->down_pwr(host->pd); + if (ios->power_mode == MMC_POWER_OFF) { + if (host->power) { + pm_runtime_put(&host->pd->dev); + sh_mmcif_release_dma(host); + host->power = false; + } + if (p->down_pwr) + p->down_pwr(host->pd); + } + host->state = STATE_IDLE; return; } @@ -859,6 +905,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sh_mmcif_clock_control(host, ios->clock); host->bus_width = ios->bus_width; + host->state = STATE_IDLE; } static int sh_mmcif_get_cd(struct mmc_host *mmc) @@ -925,7 +972,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); err = 1; } else { - dev_dbg(&host->pd->dev, "Not support int\n"); + dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); err = 1; @@ -996,6 +1043,7 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) host->pd = pdev; init_completion(&host->intr_wait); + spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; mmc->f_max = host->clk; @@ -1020,24 +1068,29 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) sh_mmcif_sync_reset(host); platform_set_drvdata(pdev, host); - /* See if we also get DMA */ - sh_mmcif_request_dma(host, pd); + pm_runtime_enable(&pdev->dev); + host->power = false; + + ret = pm_runtime_resume(&pdev->dev); + if (ret < 0) + goto clean_up2; mmc_add_host(mmc); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); if (ret) { dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); - goto clean_up2; + goto clean_up3; } ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); if (ret) { free_irq(irq[0], host); dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); - goto clean_up2; + goto clean_up3; } - sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); sh_mmcif_detect(host->mmc); dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); @@ -1045,7 +1098,11 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; +clean_up3: + mmc_remove_host(mmc); + pm_runtime_suspend(&pdev->dev); clean_up2: + pm_runtime_disable(&pdev->dev); clk_disable(host->hclk); clean_up1: mmc_free_host(mmc); @@ -1060,14 +1117,14 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) struct sh_mmcif_host *host = platform_get_drvdata(pdev); int irq[2]; + pm_runtime_get_sync(&pdev->dev); + mmc_remove_host(host->mmc); - sh_mmcif_release_dma(host); + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); if (host->addr) iounmap(host->addr); - sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); @@ -1078,15 +1135,52 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) clk_disable(host->hclk); mmc_free_host(host->mmc); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return 0; } +#ifdef CONFIG_PM +static int sh_mmcif_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + int ret = mmc_suspend_host(host->mmc); + + if (!ret) { + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + clk_disable(host->hclk); + } + + return ret; +} + +static int sh_mmcif_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sh_mmcif_host *host = platform_get_drvdata(pdev); + + clk_enable(host->hclk); + + return mmc_resume_host(host->mmc); +} +#else +#define sh_mmcif_suspend NULL +#define sh_mmcif_resume NULL +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { + .suspend = sh_mmcif_suspend, + .resume = sh_mmcif_resume, +}; + static struct platform_driver sh_mmcif_driver = { .probe = sh_mmcif_probe, .remove = sh_mmcif_remove, .driver = { .name = DRIVER_NAME, + .pm = &sh_mmcif_dev_pm_ops, }, }; diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index cc701236d16..b3654293017 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -62,7 +62,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct tmio_mmc_host *host; char clk_name[8]; - int ret; + int i, irq, ret; priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); if (priv == NULL) { @@ -71,6 +71,7 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) } mmc_data = &priv->mmc_data; + p->pdata = mmc_data; snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); priv->clk = clk_get(&pdev->dev, clk_name); @@ -116,11 +117,36 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) if (ret < 0) goto eprobe; - pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), - (unsigned long)host->ctl, host->irq); + for (i = 0; i < 3; i++) { + irq = platform_get_irq(pdev, i); + if (irq < 0) { + if (i) { + continue; + } else { + ret = irq; + goto eirq; + } + } + ret = request_irq(irq, tmio_mmc_irq, 0, + dev_name(&pdev->dev), host); + if (ret) { + while (i--) { + irq = platform_get_irq(pdev, i); + if (irq >= 0) + free_irq(irq, host); + } + goto eirq; + } + } + dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", + mmc_hostname(host->mmc), (unsigned long) + (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start), + mmc_data->hclk / 1000000); return ret; +eirq: + tmio_mmc_host_remove(host); eprobe: clk_disable(priv->clk); clk_put(priv->clk); @@ -134,6 +160,16 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) struct mmc_host *mmc = platform_get_drvdata(pdev); struct tmio_mmc_host *host = mmc_priv(mmc); struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); + struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; + int i, irq; + + p->pdata = NULL; + + for (i = 0; i < 3; i++) { + irq = platform_get_irq(pdev, i); + if (irq >= 0) + free_irq(irq, host); + } tmio_mmc_host_remove(host); clk_disable(priv->clk); @@ -143,10 +179,18 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { + .suspend = tmio_mmc_host_suspend, + .resume = tmio_mmc_host_resume, + .runtime_suspend = tmio_mmc_host_runtime_suspend, + .runtime_resume = tmio_mmc_host_runtime_resume, +}; + static struct platform_driver sh_mobile_sdhi_driver = { .driver = { .name = "sh_mobile_sdhi", .owner = THIS_MODULE, + .pm = &tmio_mmc_dev_pm_ops, }, .probe = sh_mobile_sdhi_probe, .remove = __devexit_p(sh_mobile_sdhi_remove), diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 79c568461d5..14479f9ef53 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -30,7 +30,7 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) struct mmc_host *mmc = platform_get_drvdata(dev); int ret; - ret = mmc_suspend_host(mmc); + ret = tmio_mmc_host_suspend(&dev->dev); /* Tell MFD core it can disable us now.*/ if (!ret && cell->disable) @@ -46,15 +46,12 @@ static int tmio_mmc_resume(struct platform_device *dev) int ret = 0; /* Tell the MFD core we are ready to be enabled */ - if (cell->resume) { + if (cell->resume) ret = cell->resume(dev); - if (ret) - goto out; - } - mmc_resume_host(mmc); + if (!ret) + ret = tmio_mmc_host_resume(&dev->dev); -out: return ret; } #else @@ -67,7 +64,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev) const struct mfd_cell *cell = mfd_get_cell(pdev); struct tmio_mmc_data *pdata; struct tmio_mmc_host *host; - int ret = -EINVAL; + int ret = -EINVAL, irq; if (pdev->num_resources != 2) goto out; @@ -76,6 +73,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev) if (!pdata || !pdata->hclk) goto out; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out; + } + /* Tell the MFD core we are ready to be enabled */ if (cell->enable) { ret = cell->enable(pdev); @@ -87,11 +90,18 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev) if (ret) goto cell_disable; + ret = request_irq(irq, tmio_mmc_irq, IRQF_DISABLED | + IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), host); + if (ret) + goto host_remove; + pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), - (unsigned long)host->ctl, host->irq); + (unsigned long)host->ctl, irq); return 0; +host_remove: + tmio_mmc_host_remove(host); cell_disable: if (cell->disable) cell->disable(pdev); @@ -107,7 +117,9 @@ static int __devexit tmio_mmc_remove(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); if (mmc) { - tmio_mmc_host_remove(mmc_priv(mmc)); + struct tmio_mmc_host *host = mmc_priv(mmc); + free_irq(platform_get_irq(pdev, 0), host); + tmio_mmc_host_remove(host); if (cell->disable) cell->disable(pdev); } diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 099ed49a259..8260bc2c34e 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -19,6 +19,7 @@ #include <linux/highmem.h> #include <linux/mmc/tmio.h> #include <linux/pagemap.h> +#include <linux/spinlock.h> /* Definitions for values the CTRL_SDIO_STATUS register can take. */ #define TMIO_SDIO_STAT_IOIRQ 0x0001 @@ -44,13 +45,14 @@ struct tmio_mmc_host { struct mmc_request *mrq; struct mmc_data *data; struct mmc_host *mmc; - int irq; unsigned int sdio_irq_enabled; /* Callbacks for clock / power control */ void (*set_pwr)(struct platform_device *host, int state); void (*set_clk_div)(struct platform_device *host, int state); + int pm_error; + /* pio related stuff */ struct scatterlist *sg_ptr; struct scatterlist *sg_orig; @@ -83,6 +85,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); +irqreturn_t tmio_mmc_irq(int irq, void *devid); static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags) @@ -120,4 +123,15 @@ static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host) } #endif +#ifdef CONFIG_PM +int tmio_mmc_host_suspend(struct device *dev); +int tmio_mmc_host_resume(struct device *dev); +#else +#define tmio_mmc_host_suspend NULL +#define tmio_mmc_host_resume NULL +#endif + +int tmio_mmc_host_runtime_suspend(struct device *dev); +int tmio_mmc_host_runtime_resume(struct device *dev); + #endif diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index d3de74ab633..25f1ad6cbe0 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -256,7 +256,10 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) { /* We can only either use DMA for both Tx and Rx or not use it at all */ - if (pdata->dma) { + if (!pdata->dma) + return; + + if (!host->chan_tx && !host->chan_rx) { dma_cap_mask_t mask; dma_cap_zero(mask); @@ -284,18 +287,18 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); + } - tmio_mmc_enable_dma(host, true); + tmio_mmc_enable_dma(host, true); + + return; - return; ebouncebuf: - dma_release_channel(host->chan_rx); - host->chan_rx = NULL; + dma_release_channel(host->chan_rx); + host->chan_rx = NULL; ereqrx: - dma_release_channel(host->chan_tx); - host->chan_tx = NULL; - return; - } + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; } void tmio_mmc_release_dma(struct tmio_mmc_host *host) diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 710339a85c8..ad6347bb02d 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -39,6 +39,7 @@ #include <linux/module.h> #include <linux/pagemap.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/scatterlist.h> #include <linux/workqueue.h> #include <linux/spinlock.h> @@ -243,8 +244,12 @@ static void tmio_mmc_reset_work(struct work_struct *work) spin_lock_irqsave(&host->lock, flags); mrq = host->mrq; - /* request already finished */ - if (!mrq + /* + * is request already finished? Since we use a non-blocking + * cancel_delayed_work(), it can happen, that a .set_ios() call preempts + * us, so, have to check for IS_ERR(host->mrq) + */ + if (IS_ERR_OR_NULL(mrq) || time_is_after_jiffies(host->last_req_ts + msecs_to_jiffies(2000))) { spin_unlock_irqrestore(&host->lock, flags); @@ -264,16 +269,19 @@ static void tmio_mmc_reset_work(struct work_struct *work) host->cmd = NULL; host->data = NULL; - host->mrq = NULL; host->force_pio = false; spin_unlock_irqrestore(&host->lock, flags); tmio_mmc_reset(host); + /* Ready for new calls */ + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); } +/* called with host->lock held, interrupts disabled */ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) { struct mmc_request *mrq = host->mrq; @@ -281,13 +289,15 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host) if (!mrq) return; - host->mrq = NULL; host->cmd = NULL; host->data = NULL; host->force_pio = false; cancel_delayed_work(&host->delayed_reset_work); + host->mrq = NULL; + + /* FIXME: mmc_request_done() can schedule! */ mmc_request_done(host->mmc, mrq); } @@ -554,7 +564,7 @@ out: spin_unlock(&host->lock); } -static irqreturn_t tmio_mmc_irq(int irq, void *devid) +irqreturn_t tmio_mmc_irq(int irq, void *devid) { struct tmio_mmc_host *host = devid; struct tmio_mmc_data *pdata = host->pdata; @@ -649,6 +659,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid) out: return IRQ_HANDLED; } +EXPORT_SYMBOL(tmio_mmc_irq); static int tmio_mmc_start_data(struct tmio_mmc_host *host, struct mmc_data *data) @@ -685,15 +696,27 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct tmio_mmc_host *host = mmc_priv(mmc); + unsigned long flags; int ret; - if (host->mrq) + spin_lock_irqsave(&host->lock, flags); + + if (host->mrq) { pr_debug("request not null\n"); + if (IS_ERR(host->mrq)) { + spin_unlock_irqrestore(&host->lock, flags); + mrq->cmd->error = -EAGAIN; + mmc_request_done(mmc, mrq); + return; + } + } host->last_req_ts = jiffies; wmb(); host->mrq = mrq; + spin_unlock_irqrestore(&host->lock, flags); + if (mrq->data) { ret = tmio_mmc_start_data(host, mrq->data); if (ret) @@ -708,8 +731,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) } fail: - host->mrq = NULL; host->force_pio = false; + host->mrq = NULL; mrq->cmd->error = ret; mmc_request_done(mmc, mrq); } @@ -723,19 +746,54 @@ fail: static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_data *pdata = host->pdata; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (host->mrq) { + if (IS_ERR(host->mrq)) { + dev_dbg(&host->pdev->dev, + "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", + current->comm, task_pid_nr(current), + ios->clock, ios->power_mode); + host->mrq = ERR_PTR(-EINTR); + } else { + dev_dbg(&host->pdev->dev, + "%s.%d: CMD%u active since %lu, now %lu!\n", + current->comm, task_pid_nr(current), + host->mrq->cmd->opcode, host->last_req_ts, jiffies); + } + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + host->mrq = ERR_PTR(-EBUSY); + + spin_unlock_irqrestore(&host->lock, flags); if (ios->clock) tmio_mmc_set_clock(host, ios->clock); /* Power sequence - OFF -> UP -> ON */ if (ios->power_mode == MMC_POWER_UP) { + if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) { + pm_runtime_get_sync(&host->pdev->dev); + pdata->power = true; + } /* power up SD bus */ if (host->set_pwr) host->set_pwr(host->pdev, 1); } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* power down SD bus */ - if (ios->power_mode == MMC_POWER_OFF && host->set_pwr) - host->set_pwr(host->pdev, 0); + if (ios->power_mode == MMC_POWER_OFF) { + if (host->set_pwr) + host->set_pwr(host->pdev, 0); + if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && + pdata->power) { + pdata->power = false; + pm_runtime_put(&host->pdev->dev); + } + } tmio_mmc_clk_stop(host); } else { /* start bus clock */ @@ -753,6 +811,12 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* Let things settle. delay taken from winCE driver */ udelay(140); + if (PTR_ERR(host->mrq) == -EINTR) + dev_dbg(&host->pdev->dev, + "%s.%d: IOS interrupted: clk %u, mode %u", + current->comm, task_pid_nr(current), + ios->clock, ios->power_mode); + host->mrq = NULL; } static int tmio_mmc_get_ro(struct mmc_host *mmc) @@ -801,6 +865,7 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, if (!mmc) return -ENOMEM; + pdata->dev = &pdev->dev; _host = mmc_priv(mmc); _host->pdata = pdata; _host->mmc = mmc; @@ -834,24 +899,19 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, else mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - tmio_mmc_clk_stop(_host); - tmio_mmc_reset(_host); - - ret = platform_get_irq(pdev, 0); + pdata->power = false; + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_resume(&pdev->dev); if (ret < 0) - goto unmap_ctl; + goto pm_disable; - _host->irq = ret; + tmio_mmc_clk_stop(_host); + tmio_mmc_reset(_host); tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); if (pdata->flags & TMIO_MMC_SDIO_IRQ) tmio_mmc_enable_sdio_irq(mmc, 0); - ret = request_irq(_host->irq, tmio_mmc_irq, IRQF_DISABLED | - IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), _host); - if (ret) - goto unmap_ctl; - spin_lock_init(&_host->lock); /* Init delayed work for request timeouts */ @@ -860,6 +920,10 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, /* See if we also get DMA */ tmio_mmc_request_dma(_host, pdata); + /* We have to keep the device powered for its card detection to work */ + if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) + pm_runtime_get_noresume(&pdev->dev); + mmc_add_host(mmc); /* Unmask the IRQs we want to know about */ @@ -874,7 +938,8 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, return 0; -unmap_ctl: +pm_disable: + pm_runtime_disable(&pdev->dev); iounmap(_host->ctl); host_free: mmc_free_host(mmc); @@ -885,13 +950,88 @@ EXPORT_SYMBOL(tmio_mmc_host_probe); void tmio_mmc_host_remove(struct tmio_mmc_host *host) { + struct platform_device *pdev = host->pdev; + + /* + * We don't have to manipulate pdata->power here: if there is a card in + * the slot, the runtime PM is active and our .runtime_resume() will not + * be run. If there is no card in the slot and the platform can suspend + * the controller, the runtime PM is suspended and pdata->power == false, + * so, our .runtime_resume() will not try to detect a card in the slot. + */ + if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD) + pm_runtime_get_sync(&pdev->dev); + mmc_remove_host(host->mmc); cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_release_dma(host); - free_irq(host->irq, host); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + iounmap(host->ctl); mmc_free_host(host->mmc); } EXPORT_SYMBOL(tmio_mmc_host_remove); +#ifdef CONFIG_PM +int tmio_mmc_host_suspend(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct tmio_mmc_host *host = mmc_priv(mmc); + int ret = mmc_suspend_host(mmc); + + if (!ret) + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + + host->pm_error = pm_runtime_put_sync(dev); + + return ret; +} +EXPORT_SYMBOL(tmio_mmc_host_suspend); + +int tmio_mmc_host_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct tmio_mmc_host *host = mmc_priv(mmc); + + /* The MMC core will perform the complete set up */ + host->pdata->power = false; + + if (!host->pm_error) + pm_runtime_get_sync(dev); + + tmio_mmc_reset(mmc_priv(mmc)); + tmio_mmc_request_dma(host, host->pdata); + + return mmc_resume_host(mmc); +} +EXPORT_SYMBOL(tmio_mmc_host_resume); + +#endif /* CONFIG_PM */ + +int tmio_mmc_host_runtime_suspend(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend); + +int tmio_mmc_host_runtime_resume(struct device *dev) +{ + struct mmc_host *mmc = dev_get_drvdata(dev); + struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_data *pdata = host->pdata; + + tmio_mmc_reset(host); + + if (pdata->power) { + /* Only entered after a card-insert interrupt */ + tmio_mmc_set_ios(mmc, &mmc->ios); + mmc_detect_change(mmc, msecs_to_jiffies(100)); + } + + return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); + MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c new file mode 100644 index 00000000000..cbb03305b77 --- /dev/null +++ b/drivers/mmc/host/vub300.c @@ -0,0 +1,2506 @@ +/* + * Remote VUB300 SDIO/SDmem Host Controller Driver + * + * Copyright (C) 2010 Elan Digital Systems Limited + * + * based on USB Skeleton driver - 2.2 + * + * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 + * + * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot + * Any SDIO/SDmem/MMC device plugged into the VUB300 will appear, + * by virtue of this driver, to have been plugged into a local + * SDIO host controller, similar to, say, a PCI Ricoh controller + * This is because this kernel device driver is both a USB 2.0 + * client device driver AND an MMC host controller driver. Thus + * if there is an existing driver for the inserted SDIO/SDmem/MMC + * device then that driver will be used by the kernel to manage + * the device in exactly the same fashion as if it had been + * directly plugged into, say, a local pci bus Ricoh controller + * + * RANT: this driver was written using a display 128x48 - converting it + * to a line width of 80 makes it very difficult to support. In + * particular functions have been broken down into sub functions + * and the original meaningful names have been shortened into + * cryptic ones. + * The problem is that executing a fragment of code subject to + * two conditions means an indentation of 24, thus leaving only + * 56 characters for a C statement. And that is quite ridiculous! + * + * Data types: data passed to/from the VUB300 is fixed to a number of + * bits and driver data fields reflect that limit by using + * u8, u16, u32 + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/kref.h> +#include <linux/uaccess.h> +#include <linux/usb.h> +#include <linux/mutex.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> +#include <linux/workqueue.h> +#include <linux/ctype.h> +#include <linux/firmware.h> +#include <linux/scatterlist.h> + +struct host_controller_info { + u8 info_size; + u16 firmware_version; + u8 number_of_ports; +} __packed; + +#define FIRMWARE_BLOCK_BOUNDARY 1024 +struct sd_command_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; /* Bit7 - Rd/Wr */ + u8 command_index; + u8 transfer_size[4]; /* ReadSize + ReadSize */ + u8 response_type; + u8 arguments[4]; + u8 block_count[2]; + u8 block_size[2]; + u8 block_boundary[2]; + u8 reserved[44]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_irqpoll_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; /* Bit7 - Rd/Wr */ + u8 padding[16]; /* don't ask why !! */ + u8 poll_timeout_msb; + u8 poll_timeout_lsb; + u8 reserved[42]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_common_header { + u8 header_size; + u8 header_type; + u8 port_number; +} __packed; + +struct sd_response_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; + u8 command_index; + u8 command_response[0]; +} __packed; + +struct sd_status_header { + u8 header_size; + u8 header_type; + u8 port_number; + u16 port_flags; + u32 sdio_clock; + u16 host_header_size; + u16 func_header_size; + u16 ctrl_header_size; +} __packed; + +struct sd_error_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 error_code; +} __packed; + +struct sd_interrupt_header { + u8 header_size; + u8 header_type; + u8 port_number; +} __packed; + +struct offload_registers_access { + u8 command_byte[4]; + u8 Respond_Byte[4]; +} __packed; + +#define INTERRUPT_REGISTER_ACCESSES 15 +struct sd_offloaded_interrupt { + u8 header_size; + u8 header_type; + u8 port_number; + struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES]; +} __packed; + +struct sd_register_header { + u8 header_size; + u8 header_type; + u8 port_number; + u8 command_type; + u8 command_index; + u8 command_response[6]; +} __packed; + +#define PIGGYBACK_REGISTER_ACCESSES 14 +struct sd_offloaded_piggyback { + struct sd_register_header sdio; + struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES]; +} __packed; + +union sd_response { + struct sd_common_header common; + struct sd_status_header status; + struct sd_error_header error; + struct sd_interrupt_header interrupt; + struct sd_response_header response; + struct sd_offloaded_interrupt irq; + struct sd_offloaded_piggyback pig; +} __packed; + +union sd_command { + struct sd_command_header head; + struct sd_irqpoll_header poll; +} __packed; + +enum SD_RESPONSE_TYPE { + SDRT_UNSPECIFIED = 0, + SDRT_NONE, + SDRT_1, + SDRT_1B, + SDRT_2, + SDRT_3, + SDRT_4, + SDRT_5, + SDRT_5B, + SDRT_6, + SDRT_7, +}; + +#define RESPONSE_INTERRUPT 0x01 +#define RESPONSE_ERROR 0x02 +#define RESPONSE_STATUS 0x03 +#define RESPONSE_IRQ_DISABLED 0x05 +#define RESPONSE_IRQ_ENABLED 0x06 +#define RESPONSE_PIGGYBACKED 0x07 +#define RESPONSE_NO_INTERRUPT 0x08 +#define RESPONSE_PIG_DISABLED 0x09 +#define RESPONSE_PIG_ENABLED 0x0A +#define SD_ERROR_1BIT_TIMEOUT 0x01 +#define SD_ERROR_4BIT_TIMEOUT 0x02 +#define SD_ERROR_1BIT_CRC_WRONG 0x03 +#define SD_ERROR_4BIT_CRC_WRONG 0x04 +#define SD_ERROR_1BIT_CRC_ERROR 0x05 +#define SD_ERROR_4BIT_CRC_ERROR 0x06 +#define SD_ERROR_NO_CMD_ENDBIT 0x07 +#define SD_ERROR_NO_1BIT_DATEND 0x08 +#define SD_ERROR_NO_4BIT_DATEND 0x09 +#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT 0x0A +#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT 0x0B +#define SD_ERROR_ILLEGAL_COMMAND 0x0C +#define SD_ERROR_NO_DEVICE 0x0D +#define SD_ERROR_TRANSFER_LENGTH 0x0E +#define SD_ERROR_1BIT_DATA_TIMEOUT 0x0F +#define SD_ERROR_4BIT_DATA_TIMEOUT 0x10 +#define SD_ERROR_ILLEGAL_STATE 0x11 +#define SD_ERROR_UNKNOWN_ERROR 0x12 +#define SD_ERROR_RESERVED_ERROR 0x13 +#define SD_ERROR_INVALID_FUNCTION 0x14 +#define SD_ERROR_OUT_OF_RANGE 0x15 +#define SD_ERROR_STAT_CMD 0x16 +#define SD_ERROR_STAT_DATA 0x17 +#define SD_ERROR_STAT_CMD_TIMEOUT 0x18 +#define SD_ERROR_SDCRDY_STUCK 0x19 +#define SD_ERROR_UNHANDLED 0x1A +#define SD_ERROR_OVERRUN 0x1B +#define SD_ERROR_PIO_TIMEOUT 0x1C + +#define FUN(c) (0x000007 & (c->arg>>28)) +#define REG(c) (0x01FFFF & (c->arg>>9)) + +static int limit_speed_to_24_MHz; +module_param(limit_speed_to_24_MHz, bool, 0644); +MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); + +static int pad_input_to_usb_pkt; +module_param(pad_input_to_usb_pkt, bool, 0644); +MODULE_PARM_DESC(pad_input_to_usb_pkt, + "Pad USB data input transfers to whole USB Packet"); + +static int disable_offload_processing; +module_param(disable_offload_processing, bool, 0644); +MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); + +static int force_1_bit_data_xfers; +module_param(force_1_bit_data_xfers, bool, 0644); +MODULE_PARM_DESC(force_1_bit_data_xfers, + "Force SDIO Data Transfers to 1-bit Mode"); + +static int force_polling_for_irqs; +module_param(force_polling_for_irqs, bool, 0644); +MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); + +static int firmware_irqpoll_timeout = 1024; +module_param(firmware_irqpoll_timeout, int, 0644); +MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout"); + +static int force_max_req_size = 128; +module_param(force_max_req_size, int, 0644); +MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes"); + +#ifdef SMSC_DEVELOPMENT_BOARD +static int firmware_rom_wait_states = 0x04; +#else +static int firmware_rom_wait_states = 0x1C; +#endif + +module_param(firmware_rom_wait_states, bool, 0644); +MODULE_PARM_DESC(firmware_rom_wait_states, + "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); + +#define ELAN_VENDOR_ID 0x2201 +#define VUB300_VENDOR_ID 0x0424 +#define VUB300_PRODUCT_ID 0x012C +static struct usb_device_id vub300_table[] = { + {USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, + {USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, + {} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(usb, vub300_table); + +static struct workqueue_struct *cmndworkqueue; +static struct workqueue_struct *pollworkqueue; +static struct workqueue_struct *deadworkqueue; + +static inline int interface_to_InterfaceNumber(struct usb_interface *interface) +{ + if (!interface) + return -1; + if (!interface->cur_altsetting) + return -1; + return interface->cur_altsetting->desc.bInterfaceNumber; +} + +struct sdio_register { + unsigned func_num:3; + unsigned sdio_reg:17; + unsigned activate:1; + unsigned prepared:1; + unsigned regvalue:8; + unsigned response:8; + unsigned sparebit:26; +}; + +struct vub300_mmc_host { + struct usb_device *udev; + struct usb_interface *interface; + struct kref kref; + struct mutex cmd_mutex; + struct mutex irq_mutex; + char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */ + u8 cmnd_out_ep; /* EndPoint for commands */ + u8 cmnd_res_ep; /* EndPoint for responses */ + u8 data_out_ep; /* EndPoint for out data */ + u8 data_inp_ep; /* EndPoint for inp data */ + bool card_powered; + bool card_present; + bool read_only; + bool large_usb_packets; + bool app_spec; /* ApplicationSpecific */ + bool irq_enabled; /* by the MMC CORE */ + bool irq_disabled; /* in the firmware */ + unsigned bus_width:4; + u8 total_offload_count; + u8 dynamic_register_count; + u8 resp_len; + u32 datasize; + int errors; + int usb_transport_fail; + int usb_timed_out; + int irqs_queued; + struct sdio_register sdio_register[16]; + struct offload_interrupt_function_register { +#define MAXREGBITS 4 +#define MAXREGS (1<<MAXREGBITS) +#define MAXREGMASK (MAXREGS-1) + u8 offload_count; + u32 offload_point; + struct offload_registers_access reg[MAXREGS]; + } fn[8]; + u16 fbs[8]; /* Function Block Size */ + struct mmc_command *cmd; + struct mmc_request *req; + struct mmc_data *data; + struct mmc_host *mmc; + struct urb *urb; + struct urb *command_out_urb; + struct urb *command_res_urb; + struct completion command_complete; + struct completion irqpoll_complete; + union sd_command cmnd; + union sd_response resp; + struct timer_list sg_transfer_timer; + struct usb_sg_request sg_request; + struct timer_list inactivity_timer; + struct work_struct deadwork; + struct work_struct cmndwork; + struct delayed_work pollwork; + struct host_controller_info hc_info; + struct sd_status_header system_port_status; + u8 padded_buffer[64]; +}; + +#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref) +#define SET_TRANSFER_PSEUDOCODE 21 +#define SET_INTERRUPT_PSEUDOCODE 20 +#define SET_FAILURE_MODE 18 +#define SET_ROM_WAIT_STATES 16 +#define SET_IRQ_ENABLE 13 +#define SET_CLOCK_SPEED 11 +#define SET_FUNCTION_BLOCK_SIZE 9 +#define SET_SD_DATA_MODE 6 +#define SET_SD_POWER 4 +#define ENTER_DFU_MODE 3 +#define GET_HC_INF0 1 +#define GET_SYSTEM_PORT_STATUS 0 + +static void vub300_delete(struct kref *kref) +{ /* kref callback - softirq */ + struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref); + struct mmc_host *mmc = vub300->mmc; + usb_free_urb(vub300->command_out_urb); + vub300->command_out_urb = NULL; + usb_free_urb(vub300->command_res_urb); + vub300->command_res_urb = NULL; + usb_put_dev(vub300->udev); + mmc_free_host(mmc); + /* + * and hence also frees vub300 + * which is contained at the end of struct mmc + */ +} + +static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300) +{ + kref_get(&vub300->kref); + if (queue_work(cmndworkqueue, &vub300->cmndwork)) { + /* + * then the cmndworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the cmndworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay) +{ + kref_get(&vub300->kref); + if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) { + /* + * then the pollworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the pollworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void vub300_queue_dead_work(struct vub300_mmc_host *vub300) +{ + kref_get(&vub300->kref); + if (queue_work(deadworkqueue, &vub300->deadwork)) { + /* + * then the deadworkqueue was not previously + * running and the above get ref is obvious + * required and will be put when the thread + * terminates by a specific call + */ + } else { + /* + * the deadworkqueue was already running from + * a previous invocation and thus to keep the + * kref counts correct we must undo the get + */ + kref_put(&vub300->kref, vub300_delete); + } +} + +static void irqpoll_res_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) + vub300->usb_transport_fail = urb->status; + complete(&vub300->irqpoll_complete); +} + +static void irqpoll_out_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + vub300->usb_transport_fail = urb->status; + complete(&vub300->irqpoll_complete); + return; + } else { + int ret; + unsigned int pipe = + usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); + usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, + &vub300->resp, sizeof(vub300->resp), + irqpoll_res_completed, vub300); + vub300->command_res_urb->actual_length = 0; + ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); + if (ret) { + vub300->usb_transport_fail = ret; + complete(&vub300->irqpoll_complete); + } + return; + } +} + +static void send_irqpoll(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + int retval; + int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout); + vub300->cmnd.poll.header_size = 22; + vub300->cmnd.poll.header_type = 1; + vub300->cmnd.poll.port_number = 0; + vub300->cmnd.poll.command_type = 2; + vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout; + vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8); + usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, + usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep) + , &vub300->cmnd, sizeof(vub300->cmnd) + , irqpoll_out_completed, vub300); + retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); + if (0 > retval) { + vub300->usb_transport_fail = retval; + vub300_queue_poll_work(vub300, 1); + complete(&vub300->irqpoll_complete); + return; + } else { + return; + } +} + +static void new_system_port_status(struct vub300_mmc_host *vub300) +{ + int old_card_present = vub300->card_present; + int new_card_present = + (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; + vub300->read_only = + (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; + if (new_card_present && !old_card_present) { + dev_info(&vub300->udev->dev, "card just inserted\n"); + vub300->card_present = 1; + vub300->bus_width = 0; + if (disable_offload_processing) + strncpy(vub300->vub_name, "EMPTY Processing Disabled", + sizeof(vub300->vub_name)); + else + vub300->vub_name[0] = 0; + mmc_detect_change(vub300->mmc, 1); + } else if (!new_card_present && old_card_present) { + dev_info(&vub300->udev->dev, "card just ejected\n"); + vub300->card_present = 0; + mmc_detect_change(vub300->mmc, 0); + } else { + /* no change */ + } +} + +static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300, + struct offload_registers_access + *register_access, u8 func) +{ + u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count; + memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access, + sizeof(struct offload_registers_access)); + vub300->fn[func].offload_count += 1; + vub300->total_offload_count += 1; +} + +static void add_offloaded_reg(struct vub300_mmc_host *vub300, + struct offload_registers_access *register_access) +{ + u32 Register = ((0x03 & register_access->command_byte[0]) << 15) + | ((0xFF & register_access->command_byte[1]) << 7) + | ((0xFE & register_access->command_byte[2]) >> 1); + u8 func = ((0x70 & register_access->command_byte[0]) >> 4); + u8 regs = vub300->dynamic_register_count; + u8 i = 0; + while (0 < regs-- && 1 == vub300->sdio_register[i].activate) { + if (vub300->sdio_register[i].func_num == func && + vub300->sdio_register[i].sdio_reg == Register) { + if (vub300->sdio_register[i].prepared == 0) + vub300->sdio_register[i].prepared = 1; + vub300->sdio_register[i].response = + register_access->Respond_Byte[2]; + vub300->sdio_register[i].regvalue = + register_access->Respond_Byte[3]; + return; + } else { + i += 1; + continue; + } + }; + __add_offloaded_reg_to_fifo(vub300, register_access, func); +} + +static void check_vub300_port_status(struct vub300_mmc_host *vub300) +{ + /* + * cmd_mutex is held by vub300_pollwork_thread, + * vub300_deadwork_thread or vub300_cmndwork_thread + */ + int retval; + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_SYSTEM_PORT_STATUS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->system_port_status, + sizeof(vub300->system_port_status), HZ); + if (sizeof(vub300->system_port_status) == retval) + new_system_port_status(vub300); +} + +static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + if (vub300->command_res_urb->actual_length == 0) + return; + + switch (vub300->resp.common.header_type) { + case RESPONSE_INTERRUPT: + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + break; + case RESPONSE_ERROR: + if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE) + check_vub300_port_status(vub300); + break; + case RESPONSE_STATUS: + vub300->system_port_status = vub300->resp.status; + new_system_port_status(vub300); + if (!vub300->card_present) + vub300_queue_poll_work(vub300, HZ / 5); + break; + case RESPONSE_IRQ_DISABLED: + { + int offloaded_data_length = vub300->resp.common.header_size - 3; + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + break; + } + case RESPONSE_IRQ_ENABLED: + { + int offloaded_data_length = vub300->resp.common.header_size - 3; + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irq_enabled) + mmc_signal_sdio_irq(vub300->mmc); + else if (vub300->irqs_queued) + vub300->irqs_queued += 1; + else + vub300->irqs_queued += 1; + vub300->irq_disabled = 0; + mutex_unlock(&vub300->irq_mutex); + break; + } + case RESPONSE_NO_INTERRUPT: + vub300_queue_poll_work(vub300, 1); + break; + default: + break; + } +} + +static void __do_poll(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_pollwork_thread */ + long commretval; + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + init_completion(&vub300->irqpoll_complete); + send_irqpoll(vub300); + commretval = wait_for_completion_timeout(&vub300->irqpoll_complete, + msecs_to_jiffies(500)); + if (vub300->usb_transport_fail) { + /* no need to do anything */ + } else if (commretval == 0) { + vub300->usb_timed_out = 1; + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + } else if (commretval < 0) { + vub300_queue_poll_work(vub300, 1); + } else { /* commretval > 0 */ + __vub300_irqpoll_response(vub300); + } +} + +/* this thread runs only when the driver + * is trying to poll the device for an IRQ + */ +static void vub300_pollwork_thread(struct work_struct *work) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = container_of(work, + struct vub300_mmc_host, pollwork.work); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } + mutex_lock(&vub300->cmd_mutex); + if (vub300->cmd) { + vub300_queue_poll_work(vub300, 1); + } else if (!vub300->card_present) { + /* no need to do anything */ + } else { /* vub300->card_present */ + mutex_lock(&vub300->irq_mutex); + if (!vub300->irq_enabled) { + mutex_unlock(&vub300->irq_mutex); + } else if (vub300->irqs_queued) { + vub300->irqs_queued -= 1; + mmc_signal_sdio_irq(vub300->mmc); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + mutex_unlock(&vub300->irq_mutex); + } else { /* NOT vub300->irqs_queued */ + mutex_unlock(&vub300->irq_mutex); + __do_poll(vub300); + } + } + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_deadwork_thread(struct work_struct *work) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = + container_of(work, struct vub300_mmc_host, deadwork); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } + mutex_lock(&vub300->cmd_mutex); + if (vub300->cmd) { + /* + * a command got in as the inactivity + * timer expired - so we just let the + * processing of the command show if + * the device is dead + */ + } else if (vub300->card_present) { + check_vub300_port_status(vub300); + } else if (vub300->mmc && vub300->mmc->card && + mmc_card_present(vub300->mmc->card)) { + /* + * the MMC core must not have responded + * to the previous indication - lets + * hope that it eventually does so we + * will just ignore this for now + */ + } else { + check_vub300_port_status(vub300); + } + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_inactivity_timer_expired(unsigned long data) +{ /* softirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + } else if (vub300->cmd) { + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + } else { + vub300_queue_dead_work(vub300); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + } +} + +static int vub300_response_error(u8 error_code) +{ + switch (error_code) { + case SD_ERROR_PIO_TIMEOUT: + case SD_ERROR_1BIT_TIMEOUT: + case SD_ERROR_4BIT_TIMEOUT: + return -ETIMEDOUT; + case SD_ERROR_STAT_DATA: + case SD_ERROR_OVERRUN: + case SD_ERROR_STAT_CMD: + case SD_ERROR_STAT_CMD_TIMEOUT: + case SD_ERROR_SDCRDY_STUCK: + case SD_ERROR_UNHANDLED: + case SD_ERROR_1BIT_CRC_WRONG: + case SD_ERROR_4BIT_CRC_WRONG: + case SD_ERROR_1BIT_CRC_ERROR: + case SD_ERROR_4BIT_CRC_ERROR: + case SD_ERROR_NO_CMD_ENDBIT: + case SD_ERROR_NO_1BIT_DATEND: + case SD_ERROR_NO_4BIT_DATEND: + case SD_ERROR_1BIT_DATA_TIMEOUT: + case SD_ERROR_4BIT_DATA_TIMEOUT: + case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT: + case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT: + return -EILSEQ; + case 33: + return -EILSEQ; + case SD_ERROR_ILLEGAL_COMMAND: + return -EINVAL; + case SD_ERROR_NO_DEVICE: + return -ENOMEDIUM; + default: + return -ENODEV; + } +} + +static void command_res_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + /* we have to let the initiator handle the error */ + } else if (vub300->command_res_urb->actual_length == 0) { + /* + * we have seen this happen once or twice and + * we suspect a buggy USB host controller + */ + } else if (!vub300->data) { + /* this means that the command (typically CMD52) suceeded */ + } else if (vub300->resp.common.header_type != 0x02) { + /* + * this is an error response from the VUB300 chip + * and we let the initiator handle it + */ + } else if (vub300->urb) { + vub300->cmd->error = + vub300_response_error(vub300->resp.error.error_code); + usb_unlink_urb(vub300->urb); + } else { + vub300->cmd->error = + vub300_response_error(vub300->resp.error.error_code); + usb_sg_cancel(&vub300->sg_request); + } + complete(&vub300->command_complete); /* got_response_in */ +} + +static void command_out_completed(struct urb *urb) +{ /* urb completion handler - hardirq */ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; + if (urb->status) { + complete(&vub300->command_complete); + } else { + int ret; + unsigned int pipe = + usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); + usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, + &vub300->resp, sizeof(vub300->resp), + command_res_completed, vub300); + vub300->command_res_urb->actual_length = 0; + ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); + if (ret == 0) { + /* + * the urb completion handler will call + * our completion handler + */ + } else { + /* + * and thus we only call it directly + * when it will not be called + */ + complete(&vub300->command_complete); + } + } +} + +/* + * the STUFF bits are masked out for the comparisons + */ +static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300, + u32 cmd_arg) +{ + if ((0xFBFFFE00 & cmd_arg) == 0x80022200) + vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80022000) + vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80042200) + vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80042000) + vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80062200) + vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80062000) + vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80082200) + vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]); + else if ((0xFBFFFE00 & cmd_arg) == 0x80082000) + vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200) + vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000) + vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200) + vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000) + vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200) + vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]); + else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000) + vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]); + else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00) + vub300->bus_width = 1; + else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02) + vub300->bus_width = 4; +} + +static void send_command(struct vub300_mmc_host *vub300) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + struct mmc_command *cmd = vub300->cmd; + struct mmc_data *data = vub300->data; + int retval; + int i; + u8 response_type; + if (vub300->app_spec) { + switch (cmd->opcode) { + case 6: + response_type = SDRT_1; + vub300->resp_len = 6; + if (0x00000000 == (0x00000003 & cmd->arg)) + vub300->bus_width = 1; + else if (0x00000002 == (0x00000003 & cmd->arg)) + vub300->bus_width = 4; + else + dev_err(&vub300->udev->dev, + "unexpected ACMD6 bus_width=%d\n", + 0x00000003 & cmd->arg); + break; + case 13: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 22: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 23: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 41: + response_type = SDRT_3; + vub300->resp_len = 6; + break; + case 42: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 51: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 55: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + default: + vub300->resp_len = 0; + cmd->error = -EINVAL; + complete(&vub300->command_complete); + return; + } + vub300->app_spec = 0; + } else { + switch (cmd->opcode) { + case 0: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 1: + response_type = SDRT_3; + vub300->resp_len = 6; + break; + case 2: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 3: + response_type = SDRT_6; + vub300->resp_len = 6; + break; + case 4: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 5: + response_type = SDRT_4; + vub300->resp_len = 6; + break; + case 6: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 7: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 8: + response_type = SDRT_7; + vub300->resp_len = 6; + break; + case 9: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 10: + response_type = SDRT_2; + vub300->resp_len = 17; + break; + case 12: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 13: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 15: + response_type = SDRT_NONE; + vub300->resp_len = 0; + break; + case 16: + for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) + vub300->fbs[i] = 0xFFFF & cmd->arg; + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 17: + case 18: + case 24: + case 25: + case 27: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 28: + case 29: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 30: + case 32: + case 33: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 38: + response_type = SDRT_1B; + vub300->resp_len = 6; + break; + case 42: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + case 52: + response_type = SDRT_5; + vub300->resp_len = 6; + snoop_block_size_and_bus_width(vub300, cmd->arg); + break; + case 53: + response_type = SDRT_5; + vub300->resp_len = 6; + break; + case 55: + response_type = SDRT_1; + vub300->resp_len = 6; + vub300->app_spec = 1; + break; + case 56: + response_type = SDRT_1; + vub300->resp_len = 6; + break; + default: + vub300->resp_len = 0; + cmd->error = -EINVAL; + complete(&vub300->command_complete); + return; + } + } + /* + * it is a shame that we can not use "sizeof(struct sd_command_header)" + * this is because the packet _must_ be padded to 64 bytes + */ + vub300->cmnd.head.header_size = 20; + vub300->cmnd.head.header_type = 0x00; + vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ + vub300->cmnd.head.command_type = 0x00; /* standard read command */ + vub300->cmnd.head.response_type = response_type; + vub300->cmnd.head.command_index = cmd->opcode; + vub300->cmnd.head.arguments[0] = cmd->arg >> 24; + vub300->cmnd.head.arguments[1] = cmd->arg >> 16; + vub300->cmnd.head.arguments[2] = cmd->arg >> 8; + vub300->cmnd.head.arguments[3] = cmd->arg >> 0; + if (cmd->opcode == 52) { + int fn = 0x7 & (cmd->arg >> 28); + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; + vub300->cmnd.head.command_type = 0x00; + vub300->cmnd.head.transfer_size[0] = 0; + vub300->cmnd.head.transfer_size[1] = 0; + vub300->cmnd.head.transfer_size[2] = 0; + vub300->cmnd.head.transfer_size[3] = 0; + } else if (!data) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; + vub300->cmnd.head.command_type = 0x00; + vub300->cmnd.head.transfer_size[0] = 0; + vub300->cmnd.head.transfer_size[1] = 0; + vub300->cmnd.head.transfer_size[2] = 0; + vub300->cmnd.head.transfer_size[3] = 0; + } else if (cmd->opcode == 53) { + int fn = 0x7 & (cmd->arg >> 28); + if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ + vub300->cmnd.head.block_count[0] = + (data->blocks >> 8) & 0xFF; + vub300->cmnd.head.block_count[1] = + (data->blocks >> 0) & 0xFF; + vub300->cmnd.head.block_size[0] = + (data->blksz >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = + (data->blksz >> 0) & 0xFF; + } else { /* BYTE MODE */ + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + vub300->cmnd.head.block_size[0] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = + (vub300->datasize >> 0) & 0xFF; + } + vub300->cmnd.head.command_type = + (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; + vub300->cmnd.head.transfer_size[0] = + (vub300->datasize >> 24) & 0xFF; + vub300->cmnd.head.transfer_size[1] = + (vub300->datasize >> 16) & 0xFF; + vub300->cmnd.head.transfer_size[2] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.transfer_size[3] = + (vub300->datasize >> 0) & 0xFF; + if (vub300->datasize < vub300->fbs[fn]) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + } + } else { + vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; + vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; + vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; + vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; + vub300->cmnd.head.command_type = + (MMC_DATA_READ & data->flags) ? 0x00 : 0x80; + vub300->cmnd.head.transfer_size[0] = + (vub300->datasize >> 24) & 0xFF; + vub300->cmnd.head.transfer_size[1] = + (vub300->datasize >> 16) & 0xFF; + vub300->cmnd.head.transfer_size[2] = + (vub300->datasize >> 8) & 0xFF; + vub300->cmnd.head.transfer_size[3] = + (vub300->datasize >> 0) & 0xFF; + if (vub300->datasize < vub300->fbs[0]) { + vub300->cmnd.head.block_count[0] = 0; + vub300->cmnd.head.block_count[1] = 0; + } + } + if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { + u16 block_size = vub300->cmnd.head.block_size[1] | + (vub300->cmnd.head.block_size[0] << 8); + u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY - + (FIRMWARE_BLOCK_BOUNDARY % block_size); + vub300->cmnd.head.block_boundary[0] = + (block_boundary >> 8) & 0xFF; + vub300->cmnd.head.block_boundary[1] = + (block_boundary >> 0) & 0xFF; + } else { + vub300->cmnd.head.block_boundary[0] = 0; + vub300->cmnd.head.block_boundary[1] = 0; + } + usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, + usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep), + &vub300->cmnd, sizeof(vub300->cmnd), + command_out_completed, vub300); + retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); + if (retval < 0) { + cmd->error = retval; + complete(&vub300->command_complete); + return; + } else { + return; + } +} + +/* + * timer callback runs in atomic mode + * so it cannot call usb_kill_urb() + */ +static void vub300_sg_timed_out(unsigned long data) +{ + struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + vub300->usb_timed_out = 1; + usb_sg_cancel(&vub300->sg_request); + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); +} + +static u16 roundup_to_multiple_of_64(u16 number) +{ + return 0xFFC0 & (0x3F + number); +} + +/* + * this is a separate function to solve the 80 column width restriction + */ +static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, + const struct firmware *fw) +{ + u8 register_count = 0; + u16 ts = 0; + u16 interrupt_size = 0; + const u8 *data = fw->data; + int size = fw->size; + u8 c; + dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n", + vub300->vub_name); + do { + c = *data++; + } while (size-- && c); /* skip comment */ + dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data, + vub300->vub_name); + if (size < 4) { + dev_err(&vub300->udev->dev, + "corrupt offload pseudocode in firmware %s\n", + vub300->vub_name); + strncpy(vub300->vub_name, "corrupt offload pseudocode", + sizeof(vub300->vub_name)); + return; + } + interrupt_size += *data++; + size -= 1; + interrupt_size <<= 8; + interrupt_size += *data++; + size -= 1; + if (interrupt_size < size) { + u16 xfer_length = roundup_to_multiple_of_64(interrupt_size); + u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); + if (xfer_buffer) { + int retval; + memcpy(xfer_buffer, data, interrupt_size); + memset(xfer_buffer + interrupt_size, 0, + xfer_length - interrupt_size); + size -= interrupt_size; + data += interrupt_size; + retval = + usb_control_msg(vub300->udev, + usb_sndctrlpipe(vub300->udev, 0), + SET_INTERRUPT_PSEUDOCODE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, 0x0000, 0x0000, + xfer_buffer, xfer_length, HZ); + kfree(xfer_buffer); + if (retval < 0) { + strncpy(vub300->vub_name, + "SDIO pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "not enough memory for xfer buffer to send" + " INTERRUPT_PSEUDOCODE for %s %s\n", fw->data, + vub300->vub_name); + strncpy(vub300->vub_name, + "SDIO interrupt pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "corrupt interrupt pseudocode in firmware %s %s\n", + fw->data, vub300->vub_name); + strncpy(vub300->vub_name, "corrupt interrupt pseudocode", + sizeof(vub300->vub_name)); + return; + } + ts += *data++; + size -= 1; + ts <<= 8; + ts += *data++; + size -= 1; + if (ts < size) { + u16 xfer_length = roundup_to_multiple_of_64(ts); + u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); + if (xfer_buffer) { + int retval; + memcpy(xfer_buffer, data, ts); + memset(xfer_buffer + ts, 0, + xfer_length - ts); + size -= ts; + data += ts; + retval = + usb_control_msg(vub300->udev, + usb_sndctrlpipe(vub300->udev, 0), + SET_TRANSFER_PSEUDOCODE, + USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, 0x0000, 0x0000, + xfer_buffer, xfer_length, HZ); + kfree(xfer_buffer); + if (retval < 0) { + strncpy(vub300->vub_name, + "SDIO pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "not enough memory for xfer buffer to send" + " TRANSFER_PSEUDOCODE for %s %s\n", fw->data, + vub300->vub_name); + strncpy(vub300->vub_name, + "SDIO transfer pseudocode download failed", + sizeof(vub300->vub_name)); + return; + } + } else { + dev_err(&vub300->udev->dev, + "corrupt transfer pseudocode in firmware %s %s\n", + fw->data, vub300->vub_name); + strncpy(vub300->vub_name, "corrupt transfer pseudocode", + sizeof(vub300->vub_name)); + return; + } + register_count += *data++; + size -= 1; + if (register_count * 4 == size) { + int I = vub300->dynamic_register_count = register_count; + int i = 0; + while (I--) { + unsigned int func_num = 0; + vub300->sdio_register[i].func_num = *data++; + size -= 1; + func_num += *data++; + size -= 1; + func_num <<= 8; + func_num += *data++; + size -= 1; + func_num <<= 8; + func_num += *data++; + size -= 1; + vub300->sdio_register[i].sdio_reg = func_num; + vub300->sdio_register[i].activate = 1; + vub300->sdio_register[i].prepared = 0; + i += 1; + } + dev_info(&vub300->udev->dev, + "initialized %d dynamic pseudocode registers\n", + vub300->dynamic_register_count); + return; + } else { + dev_err(&vub300->udev->dev, + "corrupt dynamic registers in firmware %s\n", + vub300->vub_name); + strncpy(vub300->vub_name, "corrupt dynamic registers", + sizeof(vub300->vub_name)); + return; + } +} + +/* + * if the binary containing the EMPTY PseudoCode can not be found + * vub300->vub_name is set anyway in order to prevent an automatic retry + */ +static void download_offload_pseudocode(struct vub300_mmc_host *vub300) +{ + struct mmc_card *card = vub300->mmc->card; + int sdio_funcs = card->sdio_funcs; + const struct firmware *fw = NULL; + int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), + "vub_%04X%04X", card->cis.vendor, card->cis.device); + int n = 0; + int retval; + for (n = 0; n < sdio_funcs; n++) { + struct sdio_func *sf = card->sdio_func[n]; + l += snprintf(vub300->vub_name + l, + sizeof(vub300->vub_name) - l, "_%04X%04X", + sf->vendor, sf->device); + }; + snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); + dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", + vub300->vub_name); + retval = request_firmware(&fw, vub300->vub_name, &card->dev); + if (retval < 0) { + strncpy(vub300->vub_name, "vub_default.bin", + sizeof(vub300->vub_name)); + retval = request_firmware(&fw, vub300->vub_name, &card->dev); + if (retval < 0) { + strncpy(vub300->vub_name, + "no SDIO offload firmware found", + sizeof(vub300->vub_name)); + } else { + __download_offload_pseudocode(vub300, fw); + release_firmware(fw); + } + } else { + __download_offload_pseudocode(vub300, fw); + release_firmware(fw); + } +} + +static void vub300_usb_bulk_msg_completion(struct urb *urb) +{ /* urb completion handler - hardirq */ + complete((struct completion *)urb->context); +} + +static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300, + unsigned int pipe, void *data, int len, + int *actual_length, int timeout_msecs) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + struct usb_device *usb_dev = vub300->udev; + struct completion done; + int retval; + vub300->urb = usb_alloc_urb(0, GFP_KERNEL); + if (!vub300->urb) + return -ENOMEM; + usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len, + vub300_usb_bulk_msg_completion, NULL); + init_completion(&done); + vub300->urb->context = &done; + vub300->urb->actual_length = 0; + retval = usb_submit_urb(vub300->urb, GFP_KERNEL); + if (unlikely(retval)) + goto out; + if (!wait_for_completion_timeout + (&done, msecs_to_jiffies(timeout_msecs))) { + retval = -ETIMEDOUT; + usb_kill_urb(vub300->urb); + } else { + retval = vub300->urb->status; + } +out: + *actual_length = vub300->urb->actual_length; + usb_free_urb(vub300->urb); + vub300->urb = NULL; + return retval; +} + +static int __command_read_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, struct mmc_data *data) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + int linear_length = vub300->datasize; + int padded_length = vub300->large_usb_packets ? + ((511 + linear_length) >> 9) << 9 : + ((63 + linear_length) >> 6) << 6; + if ((padded_length == linear_length) || !pad_input_to_usb_pkt) { + int result; + unsigned pipe; + pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); + result = usb_sg_init(&vub300->sg_request, vub300->udev, + pipe, 0, data->sg, + data->sg_len, 0, GFP_KERNEL); + if (result < 0) { + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); + cmd->error = result; + data->bytes_xfered = 0; + return 0; + } else { + vub300->sg_transfer_timer.expires = + jiffies + msecs_to_jiffies(2000 + + (linear_length / 16384)); + add_timer(&vub300->sg_transfer_timer); + usb_sg_wait(&vub300->sg_request); + del_timer(&vub300->sg_transfer_timer); + if (vub300->sg_request.status < 0) { + cmd->error = vub300->sg_request.status; + data->bytes_xfered = 0; + return 0; + } else { + data->bytes_xfered = vub300->datasize; + return linear_length; + } + } + } else { + u8 *buf = kmalloc(padded_length, GFP_KERNEL); + if (buf) { + int result; + unsigned pipe = usb_rcvbulkpipe(vub300->udev, + vub300->data_inp_ep); + int actual_length = 0; + result = vub300_usb_bulk_msg(vub300, pipe, buf, + padded_length, &actual_length, + 2000 + (padded_length / 16384)); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + kfree(buf); + return 0; + } else if (actual_length < linear_length) { + cmd->error = -EREMOTEIO; + data->bytes_xfered = 0; + kfree(buf); + return 0; + } else { + sg_copy_from_buffer(data->sg, data->sg_len, buf, + linear_length); + kfree(buf); + data->bytes_xfered = vub300->datasize; + return linear_length; + } + } else { + cmd->error = -ENOMEM; + data->bytes_xfered = 0; + return 0; + } + } +} + +static int __command_write_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, struct mmc_data *data) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep); + int linear_length = vub300->datasize; + int modulo_64_length = linear_length & 0x003F; + int modulo_512_length = linear_length & 0x01FF; + if (linear_length < 64) { + int result; + int actual_length; + sg_copy_to_buffer(data->sg, data->sg_len, + vub300->padded_buffer, + sizeof(vub300->padded_buffer)); + memset(vub300->padded_buffer + linear_length, 0, + sizeof(vub300->padded_buffer) - linear_length); + result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer, + sizeof(vub300->padded_buffer), + &actual_length, 2000 + + (sizeof(vub300->padded_buffer) / + 16384)); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) || + (vub300->large_usb_packets && (64 > modulo_512_length)) + ) { /* don't you just love these work-rounds */ + int padded_length = ((63 + linear_length) >> 6) << 6; + u8 *buf = kmalloc(padded_length, GFP_KERNEL); + if (buf) { + int result; + int actual_length; + sg_copy_to_buffer(data->sg, data->sg_len, buf, + padded_length); + memset(buf + linear_length, 0, + padded_length - linear_length); + result = + vub300_usb_bulk_msg(vub300, pipe, buf, + padded_length, &actual_length, + 2000 + padded_length / 16384); + kfree(buf); + if (result < 0) { + cmd->error = result; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } else { + cmd->error = -ENOMEM; + data->bytes_xfered = 0; + } + } else { /* no data padding required */ + int result; + unsigned char buf[64 * 4]; + sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf)); + result = usb_sg_init(&vub300->sg_request, vub300->udev, + pipe, 0, data->sg, + data->sg_len, 0, GFP_KERNEL); + if (result < 0) { + usb_unlink_urb(vub300->command_out_urb); + usb_unlink_urb(vub300->command_res_urb); + cmd->error = result; + data->bytes_xfered = 0; + } else { + vub300->sg_transfer_timer.expires = + jiffies + msecs_to_jiffies(2000 + + linear_length / 16384); + add_timer(&vub300->sg_transfer_timer); + usb_sg_wait(&vub300->sg_request); + if (cmd->error) { + data->bytes_xfered = 0; + } else { + del_timer(&vub300->sg_transfer_timer); + if (vub300->sg_request.status < 0) { + cmd->error = vub300->sg_request.status; + data->bytes_xfered = 0; + } else { + data->bytes_xfered = vub300->datasize; + } + } + } + } + return linear_length; +} + +static void __vub300_command_response(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, + struct mmc_data *data, int data_length) +{ + /* cmd_mutex is held by vub300_cmndwork_thread */ + long respretval; + int msec_timeout = 1000 + data_length / 4; + respretval = + wait_for_completion_timeout(&vub300->command_complete, + msecs_to_jiffies(msec_timeout)); + if (respretval == 0) { /* TIMED OUT */ + /* we don't know which of "out" and "res" if any failed */ + int result; + vub300->usb_timed_out = 1; + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + cmd->error = -ETIMEDOUT; + result = usb_lock_device_for_reset(vub300->udev, + vub300->interface); + if (result == 0) { + result = usb_reset_device(vub300->udev); + usb_unlock_device(vub300->udev); + } + } else if (respretval < 0) { + /* we don't know which of "out" and "res" if any failed */ + usb_kill_urb(vub300->command_out_urb); + usb_kill_urb(vub300->command_res_urb); + cmd->error = respretval; + } else if (cmd->error) { + /* + * the error occured sending the command + * or recieving the response + */ + } else if (vub300->command_out_urb->status) { + vub300->usb_transport_fail = vub300->command_out_urb->status; + cmd->error = -EPROTO == vub300->command_out_urb->status ? + -ESHUTDOWN : vub300->command_out_urb->status; + } else if (vub300->command_res_urb->status) { + vub300->usb_transport_fail = vub300->command_res_urb->status; + cmd->error = -EPROTO == vub300->command_res_urb->status ? + -ESHUTDOWN : vub300->command_res_urb->status; + } else if (vub300->resp.common.header_type == 0x00) { + /* + * the command completed successfully + * and there was no piggybacked data + */ + } else if (vub300->resp.common.header_type == RESPONSE_ERROR) { + cmd->error = + vub300_response_error(vub300->resp.error.error_code); + if (vub300->data) + usb_sg_cancel(&vub300->sg_request); + } else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued += 1; + } else if (vub300->irq_enabled) { + vub300->irqs_queued += 1; + vub300_queue_poll_work(vub300, 0); + } else { + vub300->irqs_queued += 1; + } + vub300->irq_disabled = 1; + mutex_unlock(&vub300->irq_mutex); + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) { + int offloaded_data_length = + vub300->resp.common.header_size - + sizeof(struct sd_register_header); + int register_count = offloaded_data_length >> 3; + int ri = 0; + while (register_count--) { + add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); + ri += 1; + } + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued += 1; + } else if (vub300->irq_enabled) { + vub300->irqs_queued += 1; + vub300_queue_poll_work(vub300, 0); + } else { + vub300->irqs_queued += 1; + } + vub300->irq_disabled = 0; + mutex_unlock(&vub300->irq_mutex); + vub300->resp.common.header_size = + sizeof(struct sd_register_header); + vub300->resp.common.header_type = 0x00; + cmd->error = 0; + } else { + cmd->error = -EINVAL; + } +} + +static void construct_request_response(struct vub300_mmc_host *vub300, + struct mmc_command *cmd) +{ + int resp_len = vub300->resp_len; + int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1; + int bytes = 3 & less_cmd; + int words = less_cmd >> 2; + u8 *r = vub300->resp.response.command_response; + if (bytes == 3) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16) + | (r[3 + (words << 2)] << 8); + } else if (bytes == 2) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16); + } else if (bytes == 1) { + cmd->resp[words] = (r[1 + (words << 2)] << 24); + } + while (words-- > 0) { + cmd->resp[words] = (r[1 + (words << 2)] << 24) + | (r[2 + (words << 2)] << 16) + | (r[3 + (words << 2)] << 8) + | (r[4 + (words << 2)] << 0); + } + if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0])) + cmd->resp[0] &= 0xFFFFFF00; +} + +/* this thread runs only when there is an upper level command req outstanding */ +static void vub300_cmndwork_thread(struct work_struct *work) +{ + struct vub300_mmc_host *vub300 = + container_of(work, struct vub300_mmc_host, cmndwork); + if (!vub300->interface) { + kref_put(&vub300->kref, vub300_delete); + return; + } else { + struct mmc_request *req = vub300->req; + struct mmc_command *cmd = vub300->cmd; + struct mmc_data *data = vub300->data; + int data_length; + mutex_lock(&vub300->cmd_mutex); + init_completion(&vub300->command_complete); + if (likely(vub300->vub_name[0]) || !vub300->mmc->card || + !mmc_card_present(vub300->mmc->card)) { + /* + * the name of the EMPTY Pseudo firmware file + * is used as a flag to indicate that the file + * has been already downloaded to the VUB300 chip + */ + } else if (0 == vub300->mmc->card->sdio_funcs) { + strncpy(vub300->vub_name, "SD memory device", + sizeof(vub300->vub_name)); + } else { + download_offload_pseudocode(vub300); + } + send_command(vub300); + if (!data) + data_length = 0; + else if (MMC_DATA_READ & data->flags) + data_length = __command_read_data(vub300, cmd, data); + else + data_length = __command_write_data(vub300, cmd, data); + __vub300_command_response(vub300, cmd, data, data_length); + vub300->req = NULL; + vub300->cmd = NULL; + vub300->data = NULL; + if (cmd->error) { + if (cmd->error == -ENOMEDIUM) + check_vub300_port_status(vub300); + mutex_unlock(&vub300->cmd_mutex); + mmc_request_done(vub300->mmc, req); + kref_put(&vub300->kref, vub300_delete); + return; + } else { + construct_request_response(vub300, cmd); + vub300->resp_len = 0; + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + mmc_request_done(vub300->mmc, req); + return; + } + } +} + +static int examine_cyclic_buffer(struct vub300_mmc_host *vub300, + struct mmc_command *cmd, u8 Function) +{ + /* cmd_mutex is held by vub300_mmc_request */ + u8 cmd0 = 0xFF & (cmd->arg >> 24); + u8 cmd1 = 0xFF & (cmd->arg >> 16); + u8 cmd2 = 0xFF & (cmd->arg >> 8); + u8 cmd3 = 0xFF & (cmd->arg >> 0); + int first = MAXREGMASK & vub300->fn[Function].offload_point; + struct offload_registers_access *rf = &vub300->fn[Function].reg[first]; + if (cmd0 == rf->command_byte[0] && + cmd1 == rf->command_byte[1] && + cmd2 == rf->command_byte[2] && + cmd3 == rf->command_byte[3]) { + u8 checksum = 0x00; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (rf->Respond_Byte[0] << 24) + | (rf->Respond_Byte[1] << 16) + | (rf->Respond_Byte[2] << 8) + | (rf->Respond_Byte[3] << 0); + vub300->fn[Function].offload_point += 1; + vub300->fn[Function].offload_count -= 1; + vub300->total_offload_count -= 1; + return 1; + } else { + int delta = 1; /* because it does not match the first one */ + u8 register_count = vub300->fn[Function].offload_count - 1; + u32 register_point = vub300->fn[Function].offload_point + 1; + while (0 < register_count) { + int point = MAXREGMASK & register_point; + struct offload_registers_access *r = + &vub300->fn[Function].reg[point]; + if (cmd0 == r->command_byte[0] && + cmd1 == r->command_byte[1] && + cmd2 == r->command_byte[2] && + cmd3 == r->command_byte[3]) { + u8 checksum = 0x00; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (r->Respond_Byte[0] << 24) + | (r->Respond_Byte[1] << 16) + | (r->Respond_Byte[2] << 8) + | (r->Respond_Byte[3] << 0); + vub300->fn[Function].offload_point += delta; + vub300->fn[Function].offload_count -= delta; + vub300->total_offload_count -= delta; + return 1; + } else { + register_point += 1; + register_count -= 1; + delta += 1; + continue; + } + } + return 0; + } +} + +static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300, + struct mmc_command *cmd) +{ + /* cmd_mutex is held by vub300_mmc_request */ + u8 regs = vub300->dynamic_register_count; + u8 i = 0; + u8 func = FUN(cmd); + u32 reg = REG(cmd); + while (0 < regs--) { + if ((vub300->sdio_register[i].func_num == func) && + (vub300->sdio_register[i].sdio_reg == reg)) { + if (!vub300->sdio_register[i].prepared) { + return 0; + } else if ((0x80000000 & cmd->arg) == 0x80000000) { + /* + * a write to a dynamic register + * nullifies our offloaded value + */ + vub300->sdio_register[i].prepared = 0; + return 0; + } else { + u8 checksum = 0x00; + u8 rsp0 = 0x00; + u8 rsp1 = 0x00; + u8 rsp2 = vub300->sdio_register[i].response; + u8 rsp3 = vub300->sdio_register[i].regvalue; + vub300->sdio_register[i].prepared = 0; + cmd->resp[1] = checksum << 24; + cmd->resp[0] = (rsp0 << 24) + | (rsp1 << 16) + | (rsp2 << 8) + | (rsp3 << 0); + return 1; + } + } else { + i += 1; + continue; + } + }; + if (vub300->total_offload_count == 0) + return 0; + else if (vub300->fn[func].offload_count == 0) + return 0; + else + return examine_cyclic_buffer(vub300, cmd, func); +} + +static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ /* NOT irq */ + struct mmc_command *cmd = req->cmd; + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) { + cmd->error = -ESHUTDOWN; + mmc_request_done(mmc, req); + return; + } else { + struct mmc_data *data = req->data; + if (!vub300->card_powered) { + cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, req); + return; + } + if (!vub300->card_present) { + cmd->error = -ENOMEDIUM; + mmc_request_done(mmc, req); + return; + } + if (vub300->usb_transport_fail) { + cmd->error = vub300->usb_transport_fail; + mmc_request_done(mmc, req); + return; + } + if (!vub300->interface) { + cmd->error = -ENODEV; + mmc_request_done(mmc, req); + return; + } + kref_get(&vub300->kref); + mutex_lock(&vub300->cmd_mutex); + mod_timer(&vub300->inactivity_timer, jiffies + HZ); + /* + * for performance we have to return immediately + * if the requested data has been offloaded + */ + if (cmd->opcode == 52 && + satisfy_request_from_offloaded_data(vub300, cmd)) { + cmd->error = 0; + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + mmc_request_done(mmc, req); + return; + } else { + vub300->cmd = cmd; + vub300->req = req; + vub300->data = data; + if (data) + vub300->datasize = data->blksz * data->blocks; + else + vub300->datasize = 0; + vub300_queue_cmnd_work(vub300); + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); + /* + * the kernel lock diagnostics complain + * if the cmd_mutex * is "passed on" + * to the cmndwork thread, + * so we must release it now + * and re-acquire it in the cmndwork thread + */ + } + } +} + +static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], + struct mmc_ios *ios) +{ + int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */ + int retval; + u32 kHzClock; + if (ios->clock >= 48000000) + kHzClock = 48000; + else if (ios->clock >= 24000000) + kHzClock = 24000; + else if (ios->clock >= 20000000) + kHzClock = 20000; + else if (ios->clock >= 15000000) + kHzClock = 15000; + else if (ios->clock >= 200000) + kHzClock = 200; + else + kHzClock = 0; + { + int i; + u64 c = kHzClock; + for (i = 0; i < buf_array_size; i++) { + buf[i] = c; + c >>= 8; + } + } + retval = + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_CLOCK_SPEED, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x00, 0x00, buf, buf_array_size, HZ); + if (retval != 8) { + dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" + " %dkHz failed with retval=%d\n", kHzClock, retval); + } else { + dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED" + " %dkHz\n", kHzClock); + } +} + +static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) + return; + kref_get(&vub300->kref); + mutex_lock(&vub300->cmd_mutex); + if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) { + vub300->card_powered = 0; + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_SD_POWER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, NULL, 0, HZ); + /* must wait for the VUB300 u-proc to boot up */ + msleep(600); + } else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { + usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), + SET_SD_POWER, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0001, 0x0000, NULL, 0, HZ); + msleep(600); + vub300->card_powered = 1; + } else if (ios->power_mode == MMC_POWER_ON) { + u8 *buf = kmalloc(8, GFP_KERNEL); + if (buf) { + __set_clock_speed(vub300, buf, ios); + kfree(buf); + } + } else { + /* this should mean no change of state */ + } + mutex_unlock(&vub300->cmd_mutex); + kref_put(&vub300->kref, vub300_delete); +} + +static int vub300_mmc_get_ro(struct mmc_host *mmc) +{ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + return vub300->read_only; +} + +static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + if (!vub300->interface) + return; + kref_get(&vub300->kref); + if (enable) { + mutex_lock(&vub300->irq_mutex); + if (vub300->irqs_queued) { + vub300->irqs_queued -= 1; + mmc_signal_sdio_irq(vub300->mmc); + } else if (vub300->irq_disabled) { + vub300->irq_disabled = 0; + vub300->irq_enabled = 1; + vub300_queue_poll_work(vub300, 0); + } else if (vub300->irq_enabled) { + /* this should not happen, so we will just ignore it */ + } else { + vub300->irq_enabled = 1; + vub300_queue_poll_work(vub300, 0); + } + mutex_unlock(&vub300->irq_mutex); + } else { + vub300->irq_enabled = 0; + } + kref_put(&vub300->kref, vub300_delete); +} + +void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = mmc_priv(mmc); + dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); +} + +static struct mmc_host_ops vub300_mmc_ops = { + .request = vub300_mmc_request, + .set_ios = vub300_mmc_set_ios, + .get_ro = vub300_mmc_get_ro, + .enable_sdio_irq = vub300_enable_sdio_irq, + .init_card = vub300_init_card, +}; + +static int vub300_probe(struct usb_interface *interface, + const struct usb_device_id *id) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = NULL; + struct usb_host_interface *iface_desc; + struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); + int i; + int retval = -ENOMEM; + struct urb *command_out_urb; + struct urb *command_res_urb; + struct mmc_host *mmc; + char manufacturer[48]; + char product[32]; + char serial_number[32]; + usb_string(udev, udev->descriptor.iManufacturer, manufacturer, + sizeof(manufacturer)); + usb_string(udev, udev->descriptor.iProduct, product, sizeof(product)); + usb_string(udev, udev->descriptor.iSerialNumber, serial_number, + sizeof(serial_number)); + dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n", + udev->descriptor.idVendor, udev->descriptor.idProduct, + manufacturer, product, serial_number); + command_out_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!command_out_urb) { + retval = -ENOMEM; + dev_err(&vub300->udev->dev, + "not enough memory for the command_out_urb\n"); + goto error0; + } + command_res_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!command_res_urb) { + retval = -ENOMEM; + dev_err(&vub300->udev->dev, + "not enough memory for the command_res_urb\n"); + goto error1; + } + /* this also allocates memory for our VUB300 mmc host device */ + mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); + if (!mmc) { + retval = -ENOMEM; + dev_err(&vub300->udev->dev, + "not enough memory for the mmc_host\n"); + goto error4; + } + /* MMC core transfer sizes tunable parameters */ + mmc->caps = 0; + if (!force_1_bit_data_xfers) + mmc->caps |= MMC_CAP_4_BIT_DATA; + if (!force_polling_for_irqs) + mmc->caps |= MMC_CAP_SDIO_IRQ; + mmc->caps &= ~MMC_CAP_NEEDS_POLL; + /* + * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll + * for devices which results in spurious CMD7's being + * issued which stops some SDIO cards from working + */ + if (limit_speed_to_24_MHz) { + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + mmc->f_max = 24000000; + dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n"); + } else { + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + mmc->f_max = 48000000; + } + mmc->f_min = 200000; + mmc->max_blk_count = 511; + mmc->max_blk_size = 512; + mmc->max_segs = 128; + if (force_max_req_size) + mmc->max_req_size = force_max_req_size * 1024; + else + mmc->max_req_size = 64 * 1024; + mmc->max_seg_size = mmc->max_req_size; + mmc->ocr_avail = 0; + mmc->ocr_avail |= MMC_VDD_165_195; + mmc->ocr_avail |= MMC_VDD_20_21; + mmc->ocr_avail |= MMC_VDD_21_22; + mmc->ocr_avail |= MMC_VDD_22_23; + mmc->ocr_avail |= MMC_VDD_23_24; + mmc->ocr_avail |= MMC_VDD_24_25; + mmc->ocr_avail |= MMC_VDD_25_26; + mmc->ocr_avail |= MMC_VDD_26_27; + mmc->ocr_avail |= MMC_VDD_27_28; + mmc->ocr_avail |= MMC_VDD_28_29; + mmc->ocr_avail |= MMC_VDD_29_30; + mmc->ocr_avail |= MMC_VDD_30_31; + mmc->ocr_avail |= MMC_VDD_31_32; + mmc->ocr_avail |= MMC_VDD_32_33; + mmc->ocr_avail |= MMC_VDD_33_34; + mmc->ocr_avail |= MMC_VDD_34_35; + mmc->ocr_avail |= MMC_VDD_35_36; + mmc->ops = &vub300_mmc_ops; + vub300 = mmc_priv(mmc); + vub300->mmc = mmc; + vub300->card_powered = 0; + vub300->bus_width = 0; + vub300->cmnd.head.block_size[0] = 0x00; + vub300->cmnd.head.block_size[1] = 0x00; + vub300->app_spec = 0; + mutex_init(&vub300->cmd_mutex); + mutex_init(&vub300->irq_mutex); + vub300->command_out_urb = command_out_urb; + vub300->command_res_urb = command_res_urb; + vub300->usb_timed_out = 0; + vub300->dynamic_register_count = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) { + vub300->fn[i].offload_point = 0; + vub300->fn[i].offload_count = 0; + } + + vub300->total_offload_count = 0; + vub300->irq_enabled = 0; + vub300->irq_disabled = 0; + vub300->irqs_queued = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++) + vub300->sdio_register[i++].activate = 0; + + vub300->udev = udev; + vub300->interface = interface; + vub300->cmnd_res_ep = 0; + vub300->cmnd_out_ep = 0; + vub300->data_inp_ep = 0; + vub300->data_out_ep = 0; + + for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) + vub300->fbs[i] = 512; + + /* + * set up the endpoint information + * + * use the first pair of bulk-in and bulk-out + * endpoints for Command/Response+Interrupt + * + * use the second pair of bulk-in and bulk-out + * endpoints for Data In/Out + */ + vub300->large_usb_packets = 0; + iface_desc = interface->cur_altsetting; + for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { + struct usb_endpoint_descriptor *endpoint = + &iface_desc->endpoint[i].desc; + dev_info(&vub300->udev->dev, + "vub300 testing %s EndPoint(%d) %02X\n", + usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" : + usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" : + "UNKNOWN", i, endpoint->bEndpointAddress); + if (endpoint->wMaxPacketSize > 64) + vub300->large_usb_packets = 1; + if (usb_endpoint_is_bulk_in(endpoint)) { + if (!vub300->cmnd_res_ep) { + vub300->cmnd_res_ep = + endpoint->bEndpointAddress; + } else if (!vub300->data_inp_ep) { + vub300->data_inp_ep = + endpoint->bEndpointAddress; + } else { + dev_warn(&vub300->udev->dev, + "ignoring" + " unexpected bulk_in endpoint"); + } + } else if (usb_endpoint_is_bulk_out(endpoint)) { + if (!vub300->cmnd_out_ep) { + vub300->cmnd_out_ep = + endpoint->bEndpointAddress; + } else if (!vub300->data_out_ep) { + vub300->data_out_ep = + endpoint->bEndpointAddress; + } else { + dev_warn(&vub300->udev->dev, + "ignoring" + " unexpected bulk_out endpoint"); + } + } else { + dev_warn(&vub300->udev->dev, + "vub300 ignoring EndPoint(%d) %02X", i, + endpoint->bEndpointAddress); + } + } + if (vub300->cmnd_res_ep && vub300->cmnd_out_ep && + vub300->data_inp_ep && vub300->data_out_ep) { + dev_info(&vub300->udev->dev, + "vub300 %s packets" + " using EndPoints %02X %02X %02X %02X\n", + vub300->large_usb_packets ? "LARGE" : "SMALL", + vub300->cmnd_out_ep, vub300->cmnd_res_ep, + vub300->data_out_ep, vub300->data_inp_ep); + /* we have the expected EndPoints */ + } else { + dev_err(&vub300->udev->dev, + "Could not find two sets of bulk-in/out endpoint pairs\n"); + retval = -EINVAL; + goto error5; + } + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_HC_INF0, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->hc_info, + sizeof(vub300->hc_info), HZ); + if (retval < 0) + goto error5; + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + SET_ROM_WAIT_STATES, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + firmware_rom_wait_states, 0x0000, NULL, 0, HZ); + if (retval < 0) + goto error5; + dev_info(&vub300->udev->dev, + "operating_mode = %s %s %d MHz %s %d byte USB packets\n", + (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL", + (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit", + mmc->f_max / 1000000, + pad_input_to_usb_pkt ? "padding input data to" : "with", + vub300->large_usb_packets ? 512 : 64); + retval = + usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), + GET_SYSTEM_PORT_STATUS, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0x0000, 0x0000, &vub300->system_port_status, + sizeof(vub300->system_port_status), HZ); + if (retval < 0) { + goto error4; + } else if (sizeof(vub300->system_port_status) == retval) { + vub300->card_present = + (0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; + vub300->read_only = + (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; + } else { + goto error4; + } + usb_set_intfdata(interface, vub300); + INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); + INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); + INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); + kref_init(&vub300->kref); + init_timer(&vub300->sg_transfer_timer); + vub300->sg_transfer_timer.data = (unsigned long)vub300; + vub300->sg_transfer_timer.function = vub300_sg_timed_out; + kref_get(&vub300->kref); + init_timer(&vub300->inactivity_timer); + vub300->inactivity_timer.data = (unsigned long)vub300; + vub300->inactivity_timer.function = vub300_inactivity_timer_expired; + vub300->inactivity_timer.expires = jiffies + HZ; + add_timer(&vub300->inactivity_timer); + if (vub300->card_present) + dev_info(&vub300->udev->dev, + "USB vub300 remote SDIO host controller[%d]" + "connected with SD/SDIO card inserted\n", + interface_to_InterfaceNumber(interface)); + else + dev_info(&vub300->udev->dev, + "USB vub300 remote SDIO host controller[%d]" + "connected with no SD/SDIO card inserted\n", + interface_to_InterfaceNumber(interface)); + mmc_add_host(mmc); + return 0; +error5: + mmc_free_host(mmc); + /* + * and hence also frees vub300 + * which is contained at the end of struct mmc + */ +error4: + usb_free_urb(command_out_urb); +error1: + usb_free_urb(command_res_urb); +error0: + return retval; +} + +static void vub300_disconnect(struct usb_interface *interface) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(interface); + if (!vub300 || !vub300->mmc) { + return; + } else { + struct mmc_host *mmc = vub300->mmc; + if (!vub300->mmc) { + return; + } else { + int ifnum = interface_to_InterfaceNumber(interface); + usb_set_intfdata(interface, NULL); + /* prevent more I/O from starting */ + vub300->interface = NULL; + kref_put(&vub300->kref, vub300_delete); + mmc_remove_host(mmc); + pr_info("USB vub300 remote SDIO host controller[%d]" + " now disconnected", ifnum); + return; + } + } +} + +#ifdef CONFIG_PM +static int vub300_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + if (!vub300 || !vub300->mmc) { + return 0; + } else { + struct mmc_host *mmc = vub300->mmc; + mmc_suspend_host(mmc); + return 0; + } +} + +static int vub300_resume(struct usb_interface *intf) +{ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + if (!vub300 || !vub300->mmc) { + return 0; + } else { + struct mmc_host *mmc = vub300->mmc; + mmc_resume_host(mmc); + return 0; + } +} +#else +#define vub300_suspend NULL +#define vub300_resume NULL +#endif +static int vub300_pre_reset(struct usb_interface *intf) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + mutex_lock(&vub300->cmd_mutex); + return 0; +} + +static int vub300_post_reset(struct usb_interface *intf) +{ /* NOT irq */ + struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); + /* we are sure no URBs are active - no locking needed */ + vub300->errors = -EPIPE; + mutex_unlock(&vub300->cmd_mutex); + return 0; +} + +static struct usb_driver vub300_driver = { + .name = "vub300", + .probe = vub300_probe, + .disconnect = vub300_disconnect, + .suspend = vub300_suspend, + .resume = vub300_resume, + .pre_reset = vub300_pre_reset, + .post_reset = vub300_post_reset, + .id_table = vub300_table, + .supports_autosuspend = 1, +}; + +static int __init vub300_init(void) +{ /* NOT irq */ + int result; + + pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X", + firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout); + cmndworkqueue = create_singlethread_workqueue("kvub300c"); + if (!cmndworkqueue) { + pr_err("not enough memory for the REQUEST workqueue"); + result = -ENOMEM; + goto out1; + } + pollworkqueue = create_singlethread_workqueue("kvub300p"); + if (!pollworkqueue) { + pr_err("not enough memory for the IRQPOLL workqueue"); + result = -ENOMEM; + goto out2; + } + deadworkqueue = create_singlethread_workqueue("kvub300d"); + if (!deadworkqueue) { + pr_err("not enough memory for the EXPIRED workqueue"); + result = -ENOMEM; + goto out3; + } + result = usb_register(&vub300_driver); + if (result) { + pr_err("usb_register failed. Error number %d", result); + goto out4; + } + return 0; +out4: + destroy_workqueue(deadworkqueue); +out3: + destroy_workqueue(pollworkqueue); +out2: + destroy_workqueue(cmndworkqueue); +out1: + return result; +} + +static void __exit vub300_exit(void) +{ + usb_deregister(&vub300_driver); + flush_workqueue(cmndworkqueue); + flush_workqueue(pollworkqueue); + flush_workqueue(deadworkqueue); + destroy_workqueue(cmndworkqueue); + destroy_workqueue(pollworkqueue); + destroy_workqueue(deadworkqueue); +} + +module_init(vub300_init); +module_exit(vub300_exit); + +MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>"); +MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 209fbb70619..776a478e629 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_ATL2) += atlx/ obj-$(CONFIG_ATL1E) += atl1e/ obj-$(CONFIG_ATL1C) += atl1c/ obj-$(CONFIG_GIANFAR) += gianfar_driver.o +obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o obj-$(CONFIG_TEHUTI) += tehuti.o obj-$(CONFIG_ENIC) += enic/ obj-$(CONFIG_JME) += jme.o diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c index 9eb9b98a7ae..de51e8453c1 100644 --- a/drivers/net/arm/ixp4xx_eth.c +++ b/drivers/net/arm/ixp4xx_eth.c @@ -30,9 +30,12 @@ #include <linux/etherdevice.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/net_tstamp.h> #include <linux/phy.h> #include <linux/platform_device.h> +#include <linux/ptp_classify.h> #include <linux/slab.h> +#include <mach/ixp46x_ts.h> #include <mach/npe.h> #include <mach/qmgr.h> @@ -67,6 +70,10 @@ #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) #define TXDONE_QUEUE 31 +#define PTP_SLAVE_MODE 1 +#define PTP_MASTER_MODE 2 +#define PORT2CHANNEL(p) NPE_ID(p->id) + /* TX Control Registers */ #define TX_CNTRL0_TX_EN 0x01 #define TX_CNTRL0_HALFDUPLEX 0x02 @@ -171,6 +178,8 @@ struct port { int id; /* logical port ID */ int speed, duplex; u8 firmware[4]; + int hwts_tx_en; + int hwts_rx_en; }; /* NPE message structure */ @@ -246,6 +255,172 @@ static int ports_open; static struct port *npe_port_tab[MAX_NPES]; static struct dma_pool *dma_pool; +static struct sock_filter ptp_filter[] = { + PTP_FILTER +}; + +static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid) +{ + u8 *data = skb->data; + unsigned int offset; + u16 *hi, *id; + u32 lo; + + if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4) + return 0; + + offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + + if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid)) + return 0; + + hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID); + id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); + + memcpy(&lo, &hi[1], sizeof(lo)); + + return (uid_hi == ntohs(*hi) && + uid_lo == ntohl(lo) && + seqid == ntohs(*id)); +} + +static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *shhwtstamps; + struct ixp46x_ts_regs *regs; + u64 ns; + u32 ch, hi, lo, val; + u16 uid, seq; + + if (!port->hwts_rx_en) + return; + + ch = PORT2CHANNEL(port); + + regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + + val = __raw_readl(®s->channel[ch].ch_event); + + if (!(val & RX_SNAPSHOT_LOCKED)) + return; + + lo = __raw_readl(®s->channel[ch].src_uuid_lo); + hi = __raw_readl(®s->channel[ch].src_uuid_hi); + + uid = hi & 0xffff; + seq = (hi >> 16) & 0xffff; + + if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq))) + goto out; + + lo = __raw_readl(®s->channel[ch].rx_snap_lo); + hi = __raw_readl(®s->channel[ch].rx_snap_hi); + ns = ((u64) hi) << 32; + ns |= lo; + ns <<= TICKS_NS_SHIFT; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(ns); +out: + __raw_writel(RX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); +} + +static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct ixp46x_ts_regs *regs; + struct skb_shared_info *shtx; + u64 ns; + u32 ch, cnt, hi, lo, val; + + shtx = skb_shinfo(skb); + if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en)) + shtx->tx_flags |= SKBTX_IN_PROGRESS; + else + return; + + ch = PORT2CHANNEL(port); + + regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + + /* + * This really stinks, but we have to poll for the Tx time stamp. + * Usually, the time stamp is ready after 4 to 6 microseconds. + */ + for (cnt = 0; cnt < 100; cnt++) { + val = __raw_readl(®s->channel[ch].ch_event); + if (val & TX_SNAPSHOT_LOCKED) + break; + udelay(1); + } + if (!(val & TX_SNAPSHOT_LOCKED)) { + shtx->tx_flags &= ~SKBTX_IN_PROGRESS; + return; + } + + lo = __raw_readl(®s->channel[ch].tx_snap_lo); + hi = __raw_readl(®s->channel[ch].tx_snap_hi); + ns = ((u64) hi) << 32; + ns |= lo; + ns <<= TICKS_NS_SHIFT; + + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &shhwtstamps); + + __raw_writel(TX_SNAPSHOT_LOCKED, ®s->channel[ch].ch_event); +} + +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config cfg; + struct ixp46x_ts_regs *regs; + struct port *port = netdev_priv(netdev); + int ch; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (cfg.flags) /* reserved for future extensions */ + return -EINVAL; + + ch = PORT2CHANNEL(port); + regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_OFF: + port->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + port->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + port->hwts_rx_en = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + port->hwts_rx_en = PTP_SLAVE_MODE; + __raw_writel(0, ®s->channel[ch].ch_control); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + port->hwts_rx_en = PTP_MASTER_MODE; + __raw_writel(MASTER_MODE, ®s->channel[ch].ch_control); + break; + default: + return -ERANGE; + } + + /* Clear out any old time stamps. */ + __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED, + ®s->channel[ch].ch_event); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location, int write, u16 cmd) @@ -573,6 +748,7 @@ static int eth_poll(struct napi_struct *napi, int budget) debug_pkt(dev, "eth_poll", skb->data, skb->len); + ixp_rx_timestamp(port, skb); skb->protocol = eth_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; @@ -679,14 +855,12 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); - dev_kfree_skb(skb); #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); if (dma_mapping_error(&dev->dev, phys)) { -#ifdef __ARMEB__ dev_kfree_skb(skb); -#else +#ifndef __ARMEB__ kfree(mem); #endif dev->stats.tx_dropped++; @@ -728,6 +902,13 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) #if DEBUG_TX printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); #endif + + ixp_tx_timestamp(port, skb); + skb_tx_timestamp(skb); + +#ifndef __ARMEB__ + dev_kfree_skb(skb); +#endif return NETDEV_TX_OK; } @@ -783,6 +964,9 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) if (!netif_running(dev)) return -EINVAL; + if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP) + return hwtstamp_ioctl(dev, req, cmd); + return phy_mii_ioctl(port->phydev, req, cmd); } @@ -1171,6 +1355,11 @@ static int __devinit eth_init_one(struct platform_device *pdev) char phy_id[MII_BUS_ID_SIZE + 3]; int err; + if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) { + pr_err("ixp4xx_eth: bad ptp filter\n"); + return -EINVAL; + } + if (!(dev = alloc_etherdev(sizeof(struct port)))) return -ENOMEM; diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 2463b1c9792..81654ae16c6 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -1703,7 +1703,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) { struct be_mcc_wrb *wrb; struct be_cmd_req_rss_config *req; - u32 myhash[10]; + u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF, + 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF}; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index d5bd35b7f2e..289044332ed 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c @@ -2675,7 +2675,7 @@ alloc_mem_err: * Min size diferent for TPA and non-TPA queues */ if (ring_size < (fp->disable_tpa ? - MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) { + MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) { /* release memory allocated for this queue */ bnx2x_free_fp_mem_at(bp, index); return -ENOMEM; diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index a97d9be331d..4b70311a11e 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c @@ -2222,12 +2222,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp) u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param) { int mb_idx = BP_FW_MB_IDX(bp); - u32 seq = ++bp->fw_seq; + u32 seq; u32 rc = 0; u32 cnt = 1; u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; mutex_lock(&bp->fw_mb_mutex); + seq = ++bp->fw_seq; SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param); SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq)); diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 8f2d2e7c70e..2df9276720a 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -163,8 +163,6 @@ static int tlb_initialize(struct bonding *bond) struct tlb_client_info *new_hashtbl; int i; - spin_lock_init(&(bond_info->tx_hashtbl_lock)); - new_hashtbl = kzalloc(size, GFP_KERNEL); if (!new_hashtbl) { pr_err("%s: Error: Failed to allocate TLB hash table\n", @@ -747,8 +745,6 @@ static int rlb_initialize(struct bonding *bond) int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info); int i; - spin_lock_init(&(bond_info->rx_hashtbl_lock)); - new_hashtbl = kmalloc(size, GFP_KERNEL); if (!new_hashtbl) { pr_err("%s: Error: Failed to allocate RLB hash table\n", diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6dc42846154..6141667c5fb 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -852,7 +852,7 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) static void bond_resend_igmp_join_requests_delayed(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, - mcast_work.work); + mcast_work.work); bond_resend_igmp_join_requests(bond); } @@ -1172,10 +1172,12 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) } /* resend IGMP joins since active slave has changed or - * all were sent on curr_active_slave */ - if (((USES_PRIMARY(bond->params.mode) && new_active) || - bond->params.mode == BOND_MODE_ROUNDROBIN) && - netif_running(bond->dev)) { + * all were sent on curr_active_slave. + * resend only if bond is brought up with the affected + * bonding modes and the retransmission is enabled */ + if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) && + ((USES_PRIMARY(bond->params.mode) && new_active) || + bond->params.mode == BOND_MODE_ROUNDROBIN)) { bond->igmp_retrans = bond->params.resend_igmp; queue_delayed_work(bond->wq, &bond->mcast_work, 0); } @@ -1542,12 +1544,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) bond_dev->name, slave_dev->name); } - /* bond must be initialized by bond_open() before enslaving */ - if (!(bond_dev->flags & IFF_UP)) { - pr_warning("%s: master_dev is not up in bond_enslave\n", - bond_dev->name); - } - /* already enslaved */ if (slave_dev->flags & IFF_SLAVE) { pr_debug("Error, Device was already enslaved\n"); @@ -4834,9 +4830,19 @@ static int bond_init(struct net_device *bond_dev) { struct bonding *bond = netdev_priv(bond_dev); struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id); + struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond)); pr_debug("Begin bond_init for %s\n", bond_dev->name); + /* + * Initialize locks that may be required during + * en/deslave operations. All of the bond_open work + * (of which this is part) should really be moved to + * a phase prior to dev_open + */ + spin_lock_init(&(bond_info->tx_hashtbl_lock)); + spin_lock_init(&(bond_info->rx_hashtbl_lock)); + bond->wq = create_singlethread_workqueue(bond_dev->name); if (!bond->wq) return -ENOMEM; diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 4059bfc73db..88fcb25e554 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c @@ -227,12 +227,6 @@ static ssize_t bonding_store_slaves(struct device *d, struct net_device *dev; struct bonding *bond = to_bond(d); - /* Quick sanity check -- is the bond interface up? */ - if (!(bond->dev->flags & IFF_UP)) { - pr_warning("%s: doing slave updates when interface is down.\n", - bond->dev->name); - } - if (!rtnl_trylock()) return restart_syscall(); @@ -1539,8 +1533,8 @@ static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, * Show and set the number of IGMP membership reports to send on link failure */ static ssize_t bonding_show_resend_igmp(struct device *d, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { struct bonding *bond = to_bond(d); @@ -1548,8 +1542,8 @@ static ssize_t bonding_show_resend_igmp(struct device *d, } static ssize_t bonding_store_resend_igmp(struct device *d, - struct device_attribute *attr, - const char *buf, size_t count) + struct device_attribute *attr, + const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); @@ -1561,7 +1555,7 @@ static ssize_t bonding_store_resend_igmp(struct device *d, goto out; } - if (new_value < 0) { + if (new_value < 0 || new_value > 255) { pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n", bond->dev->name, new_value); ret = -EINVAL; diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index 6a0a8fca62b..3fd5a240034 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c @@ -2083,7 +2083,7 @@ static void ehea_set_multicast_list(struct net_device *dev) struct netdev_hw_addr *ha; int ret; - if (dev->flags & IFF_PROMISC) { + if (port->promisc) { ehea_promiscuous(dev, 1); return; } diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c new file mode 100644 index 00000000000..d8e175382d1 --- /dev/null +++ b/drivers/net/gianfar_ptp.c @@ -0,0 +1,588 @@ +/* + * PTP 1588 clock using the eTSEC + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/device.h> +#include <linux/hrtimer.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/timex.h> +#include <linux/io.h> + +#include <linux/ptp_clock_kernel.h> + +#include "gianfar.h" + +/* + * gianfar ptp registers + * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010 + */ +struct gianfar_ptp_registers { + u32 tmr_ctrl; /* Timer control register */ + u32 tmr_tevent; /* Timestamp event register */ + u32 tmr_temask; /* Timer event mask register */ + u32 tmr_pevent; /* Timestamp event register */ + u32 tmr_pemask; /* Timer event mask register */ + u32 tmr_stat; /* Timestamp status register */ + u32 tmr_cnt_h; /* Timer counter high register */ + u32 tmr_cnt_l; /* Timer counter low register */ + u32 tmr_add; /* Timer drift compensation addend register */ + u32 tmr_acc; /* Timer accumulator register */ + u32 tmr_prsc; /* Timer prescale */ + u8 res1[4]; + u32 tmroff_h; /* Timer offset high */ + u32 tmroff_l; /* Timer offset low */ + u8 res2[8]; + u32 tmr_alarm1_h; /* Timer alarm 1 high register */ + u32 tmr_alarm1_l; /* Timer alarm 1 high register */ + u32 tmr_alarm2_h; /* Timer alarm 2 high register */ + u32 tmr_alarm2_l; /* Timer alarm 2 high register */ + u8 res3[48]; + u32 tmr_fiper1; /* Timer fixed period interval */ + u32 tmr_fiper2; /* Timer fixed period interval */ + u32 tmr_fiper3; /* Timer fixed period interval */ + u8 res4[20]; + u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */ + u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ +}; + +/* Bit definitions for the TMR_CTRL register */ +#define ALM1P (1<<31) /* Alarm1 output polarity */ +#define ALM2P (1<<30) /* Alarm2 output polarity */ +#define FS (1<<28) /* FIPER start indication */ +#define PP1L (1<<27) /* Fiper1 pulse loopback mode enabled. */ +#define PP2L (1<<26) /* Fiper2 pulse loopback mode enabled. */ +#define TCLK_PERIOD_SHIFT (16) /* 1588 timer reference clock period. */ +#define TCLK_PERIOD_MASK (0x3ff) +#define RTPE (1<<15) /* Record Tx Timestamp to PAL Enable. */ +#define FRD (1<<14) /* FIPER Realignment Disable */ +#define ESFDP (1<<11) /* External Tx/Rx SFD Polarity. */ +#define ESFDE (1<<10) /* External Tx/Rx SFD Enable. */ +#define ETEP2 (1<<9) /* External trigger 2 edge polarity */ +#define ETEP1 (1<<8) /* External trigger 1 edge polarity */ +#define COPH (1<<7) /* Generated clock output phase. */ +#define CIPH (1<<6) /* External oscillator input clock phase */ +#define TMSR (1<<5) /* Timer soft reset. */ +#define BYP (1<<3) /* Bypass drift compensated clock */ +#define TE (1<<2) /* 1588 timer enable. */ +#define CKSEL_SHIFT (0) /* 1588 Timer reference clock source */ +#define CKSEL_MASK (0x3) + +/* Bit definitions for the TMR_TEVENT register */ +#define ETS2 (1<<25) /* External trigger 2 timestamp sampled */ +#define ETS1 (1<<24) /* External trigger 1 timestamp sampled */ +#define ALM2 (1<<17) /* Current time = alarm time register 2 */ +#define ALM1 (1<<16) /* Current time = alarm time register 1 */ +#define PP1 (1<<7) /* periodic pulse generated on FIPER1 */ +#define PP2 (1<<6) /* periodic pulse generated on FIPER2 */ +#define PP3 (1<<5) /* periodic pulse generated on FIPER3 */ + +/* Bit definitions for the TMR_TEMASK register */ +#define ETS2EN (1<<25) /* External trigger 2 timestamp enable */ +#define ETS1EN (1<<24) /* External trigger 1 timestamp enable */ +#define ALM2EN (1<<17) /* Timer ALM2 event enable */ +#define ALM1EN (1<<16) /* Timer ALM1 event enable */ +#define PP1EN (1<<7) /* Periodic pulse event 1 enable */ +#define PP2EN (1<<6) /* Periodic pulse event 2 enable */ + +/* Bit definitions for the TMR_PEVENT register */ +#define TXP2 (1<<9) /* PTP transmitted timestamp im TXTS2 */ +#define TXP1 (1<<8) /* PTP transmitted timestamp in TXTS1 */ +#define RXP (1<<0) /* PTP frame has been received */ + +/* Bit definitions for the TMR_PEMASK register */ +#define TXP2EN (1<<9) /* Transmit PTP packet event 2 enable */ +#define TXP1EN (1<<8) /* Transmit PTP packet event 1 enable */ +#define RXPEN (1<<0) /* Receive PTP packet event enable */ + +/* Bit definitions for the TMR_STAT register */ +#define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ +#define STAT_VEC_MASK (0x3f) + +/* Bit definitions for the TMR_PRSC register */ +#define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ +#define PRSC_OCK_MASK (0xffff) + + +#define DRIVER "gianfar_ptp" +#define DEFAULT_CKSEL 1 +#define N_ALARM 1 /* first alarm is used internally to reset fipers */ +#define N_EXT_TS 2 +#define REG_SIZE sizeof(struct gianfar_ptp_registers) + +struct etsects { + struct gianfar_ptp_registers *regs; + spinlock_t lock; /* protects regs */ + struct ptp_clock *clock; + struct ptp_clock_info caps; + struct resource *rsrc; + int irq; + u64 alarm_interval; /* for periodic alarm */ + u64 alarm_value; + u32 tclk_period; /* nanoseconds */ + u32 tmr_prsc; + u32 tmr_add; + u32 cksel; + u32 tmr_fiper1; + u32 tmr_fiper2; +}; + +/* + * Register access functions + */ + +/* Caller must hold etsects->lock. */ +static u64 tmr_cnt_read(struct etsects *etsects) +{ + u64 ns; + u32 lo, hi; + + lo = gfar_read(&etsects->regs->tmr_cnt_l); + hi = gfar_read(&etsects->regs->tmr_cnt_h); + ns = ((u64) hi) << 32; + ns |= lo; + return ns; +} + +/* Caller must hold etsects->lock. */ +static void tmr_cnt_write(struct etsects *etsects, u64 ns) +{ + u32 hi = ns >> 32; + u32 lo = ns & 0xffffffff; + + gfar_write(&etsects->regs->tmr_cnt_l, lo); + gfar_write(&etsects->regs->tmr_cnt_h, hi); +} + +/* Caller must hold etsects->lock. */ +static void set_alarm(struct etsects *etsects) +{ + u64 ns; + u32 lo, hi; + + ns = tmr_cnt_read(etsects) + 1500000000ULL; + ns = div_u64(ns, 1000000000UL) * 1000000000ULL; + ns -= etsects->tclk_period; + hi = ns >> 32; + lo = ns & 0xffffffff; + gfar_write(&etsects->regs->tmr_alarm1_l, lo); + gfar_write(&etsects->regs->tmr_alarm1_h, hi); +} + +/* Caller must hold etsects->lock. */ +static void set_fipers(struct etsects *etsects) +{ + u32 tmr_ctrl = gfar_read(&etsects->regs->tmr_ctrl); + + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl & (~TE)); + gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); + gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); + gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); + set_alarm(etsects); + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|TE); +} + +/* + * Interrupt service routine + */ + +static irqreturn_t isr(int irq, void *priv) +{ + struct etsects *etsects = priv; + struct ptp_clock_event event; + u64 ns; + u32 ack = 0, lo, hi, mask, val; + + val = gfar_read(&etsects->regs->tmr_tevent); + + if (val & ETS1) { + ack |= ETS1; + hi = gfar_read(&etsects->regs->tmr_etts1_h); + lo = gfar_read(&etsects->regs->tmr_etts1_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(etsects->clock, &event); + } + + if (val & ETS2) { + ack |= ETS2; + hi = gfar_read(&etsects->regs->tmr_etts2_h); + lo = gfar_read(&etsects->regs->tmr_etts2_l); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + ptp_clock_event(etsects->clock, &event); + } + + if (val & ALM2) { + ack |= ALM2; + if (etsects->alarm_value) { + event.type = PTP_CLOCK_ALARM; + event.index = 0; + event.timestamp = etsects->alarm_value; + ptp_clock_event(etsects->clock, &event); + } + if (etsects->alarm_interval) { + ns = etsects->alarm_value + etsects->alarm_interval; + hi = ns >> 32; + lo = ns & 0xffffffff; + spin_lock(&etsects->lock); + gfar_write(&etsects->regs->tmr_alarm2_l, lo); + gfar_write(&etsects->regs->tmr_alarm2_h, hi); + spin_unlock(&etsects->lock); + etsects->alarm_value = ns; + } else { + gfar_write(&etsects->regs->tmr_tevent, ALM2); + spin_lock(&etsects->lock); + mask = gfar_read(&etsects->regs->tmr_temask); + mask &= ~ALM2EN; + gfar_write(&etsects->regs->tmr_temask, mask); + spin_unlock(&etsects->lock); + etsects->alarm_value = 0; + etsects->alarm_interval = 0; + } + } + + if (val & PP1) { + ack |= PP1; + event.type = PTP_CLOCK_PPS; + ptp_clock_event(etsects->clock, &event); + } + + if (ack) { + gfar_write(&etsects->regs->tmr_tevent, ack); + return IRQ_HANDLED; + } else + return IRQ_NONE; +} + +/* + * PTP clock operations + */ + +static int ptp_gianfar_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 adj; + u32 diff, tmr_add; + int neg_adj = 0; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + tmr_add = etsects->tmr_add; + adj = tmr_add; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff; + + gfar_write(&etsects->regs->tmr_add, tmr_add); + + return 0; +} + +static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + unsigned long flags; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + spin_lock_irqsave(&etsects->lock, flags); + + now = tmr_cnt_read(etsects); + now += delta; + tmr_cnt_write(etsects, now); + + spin_unlock_irqrestore(&etsects->lock, flags); + + set_fipers(etsects); + + return 0; +} + +static int ptp_gianfar_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + u64 ns; + u32 remainder; + unsigned long flags; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + spin_lock_irqsave(&etsects->lock, flags); + + ns = tmr_cnt_read(etsects); + + spin_unlock_irqrestore(&etsects->lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); + ts->tv_nsec = remainder; + return 0; +} + +static int ptp_gianfar_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + u64 ns; + unsigned long flags; + struct etsects *etsects = container_of(ptp, struct etsects, caps); + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + spin_lock_irqsave(&etsects->lock, flags); + + tmr_cnt_write(etsects, ns); + set_fipers(etsects); + + spin_unlock_irqrestore(&etsects->lock, flags); + + return 0; +} + +static int ptp_gianfar_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct etsects *etsects = container_of(ptp, struct etsects, caps); + unsigned long flags; + u32 bit, mask; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + switch (rq->extts.index) { + case 0: + bit = ETS1EN; + break; + case 1: + bit = ETS2EN; + break; + default: + return -EINVAL; + } + spin_lock_irqsave(&etsects->lock, flags); + mask = gfar_read(&etsects->regs->tmr_temask); + if (on) + mask |= bit; + else + mask &= ~bit; + gfar_write(&etsects->regs->tmr_temask, mask); + spin_unlock_irqrestore(&etsects->lock, flags); + return 0; + + case PTP_CLK_REQ_PPS: + spin_lock_irqsave(&etsects->lock, flags); + mask = gfar_read(&etsects->regs->tmr_temask); + if (on) + mask |= PP1EN; + else + mask &= ~PP1EN; + gfar_write(&etsects->regs->tmr_temask, mask); + spin_unlock_irqrestore(&etsects->lock, flags); + return 0; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static struct ptp_clock_info ptp_gianfar_caps = { + .owner = THIS_MODULE, + .name = "gianfar clock", + .max_adj = 512000, + .n_alarm = N_ALARM, + .n_ext_ts = N_EXT_TS, + .n_per_out = 0, + .pps = 1, + .adjfreq = ptp_gianfar_adjfreq, + .adjtime = ptp_gianfar_adjtime, + .gettime = ptp_gianfar_gettime, + .settime = ptp_gianfar_settime, + .enable = ptp_gianfar_enable, +}; + +/* OF device tree */ + +static int get_of_u32(struct device_node *node, char *str, u32 *val) +{ + int plen; + const u32 *prop = of_get_property(node, str, &plen); + + if (!prop || plen != sizeof(*prop)) + return -1; + *val = *prop; + return 0; +} + +static int gianfar_ptp_probe(struct platform_device *dev) +{ + struct device_node *node = dev->dev.of_node; + struct etsects *etsects; + struct timespec now; + int err = -ENOMEM; + u32 tmr_ctrl; + unsigned long flags; + + etsects = kzalloc(sizeof(*etsects), GFP_KERNEL); + if (!etsects) + goto no_memory; + + err = -ENODEV; + + etsects->caps = ptp_gianfar_caps; + etsects->cksel = DEFAULT_CKSEL; + + if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || + get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || + get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) || + get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) || + get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) || + get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) { + pr_err("device tree node missing required elements\n"); + goto no_node; + } + + etsects->irq = platform_get_irq(dev, 0); + + if (etsects->irq == NO_IRQ) { + pr_err("irq not in device tree\n"); + goto no_node; + } + if (request_irq(etsects->irq, isr, 0, DRIVER, etsects)) { + pr_err("request_irq failed\n"); + goto no_node; + } + + etsects->rsrc = platform_get_resource(dev, IORESOURCE_MEM, 0); + if (!etsects->rsrc) { + pr_err("no resource\n"); + goto no_resource; + } + if (request_resource(&ioport_resource, etsects->rsrc)) { + pr_err("resource busy\n"); + goto no_resource; + } + + spin_lock_init(&etsects->lock); + + etsects->regs = ioremap(etsects->rsrc->start, + 1 + etsects->rsrc->end - etsects->rsrc->start); + if (!etsects->regs) { + pr_err("ioremap ptp registers failed\n"); + goto no_ioremap; + } + getnstimeofday(&now); + ptp_gianfar_settime(&etsects->caps, &now); + + tmr_ctrl = + (etsects->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT | + (etsects->cksel & CKSEL_MASK) << CKSEL_SHIFT; + + spin_lock_irqsave(&etsects->lock, flags); + + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl); + gfar_write(&etsects->regs->tmr_add, etsects->tmr_add); + gfar_write(&etsects->regs->tmr_prsc, etsects->tmr_prsc); + gfar_write(&etsects->regs->tmr_fiper1, etsects->tmr_fiper1); + gfar_write(&etsects->regs->tmr_fiper2, etsects->tmr_fiper2); + set_alarm(etsects); + gfar_write(&etsects->regs->tmr_ctrl, tmr_ctrl|FS|RTPE|TE); + + spin_unlock_irqrestore(&etsects->lock, flags); + + etsects->clock = ptp_clock_register(&etsects->caps); + if (IS_ERR(etsects->clock)) { + err = PTR_ERR(etsects->clock); + goto no_clock; + } + + dev_set_drvdata(&dev->dev, etsects); + + return 0; + +no_clock: +no_ioremap: + release_resource(etsects->rsrc); +no_resource: + free_irq(etsects->irq, etsects); +no_node: + kfree(etsects); +no_memory: + return err; +} + +static int gianfar_ptp_remove(struct platform_device *dev) +{ + struct etsects *etsects = dev_get_drvdata(&dev->dev); + + gfar_write(&etsects->regs->tmr_temask, 0); + gfar_write(&etsects->regs->tmr_ctrl, 0); + + ptp_clock_unregister(etsects->clock); + iounmap(etsects->regs); + release_resource(etsects->rsrc); + free_irq(etsects->irq, etsects); + kfree(etsects); + + return 0; +} + +static struct of_device_id match_table[] = { + { .compatible = "fsl,etsec-ptp" }, + {}, +}; + +static struct platform_driver gianfar_ptp_driver = { + .driver = { + .name = "gianfar_ptp", + .of_match_table = match_table, + .owner = THIS_MODULE, + }, + .probe = gianfar_ptp_probe, + .remove = gianfar_ptp_remove, +}; + +/* module operations */ + +static int __init ptp_gianfar_init(void) +{ + return platform_driver_register(&gianfar_ptp_driver); +} + +module_init(ptp_gianfar_init); + +static void __exit ptp_gianfar_exit(void) +{ + platform_driver_unregister(&gianfar_ptp_driver); +} + +module_exit(ptp_gianfar_exit); + +MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); +MODULE_DESCRIPTION("PTP clock using the eTSEC"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c index 96c95617195..32f07f868d8 100644 --- a/drivers/net/ioc3-eth.c +++ b/drivers/net/ioc3-eth.c @@ -915,7 +915,7 @@ static void ioc3_alloc_rings(struct net_device *dev) skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { - show_free_areas(); + show_free_areas(0); continue; } diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index f940dfa1f7f..9d4ce1aba10 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -67,27 +67,27 @@ static void bfin_sir_stop_tx(struct bfin_sir_port *port) disable_dma(port->tx_dma_channel); #endif - while (!(SIR_UART_GET_LSR(port) & THRE)) { + while (!(UART_GET_LSR(port) & THRE)) { cpu_relax(); continue; } - SIR_UART_STOP_TX(port); + UART_CLEAR_IER(port, ETBEI); } static void bfin_sir_enable_tx(struct bfin_sir_port *port) { - SIR_UART_ENABLE_TX(port); + UART_SET_IER(port, ETBEI); } static void bfin_sir_stop_rx(struct bfin_sir_port *port) { - SIR_UART_STOP_RX(port); + UART_CLEAR_IER(port, ERBFI); } static void bfin_sir_enable_rx(struct bfin_sir_port *port) { - SIR_UART_ENABLE_RX(port); + UART_SET_IER(port, ERBFI); } static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) @@ -116,7 +116,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) do { udelay(utime); - lsr = SIR_UART_GET_LSR(port); + lsr = UART_GET_LSR(port); } while (!(lsr & TEMT) && count--); /* The useconds for 1 bits to transmit */ @@ -125,27 +125,27 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) /* Clear UCEN bit to reset the UART state machine * and control registers */ - val = SIR_UART_GET_GCTL(port); + val = UART_GET_GCTL(port); val &= ~UCEN; - SIR_UART_PUT_GCTL(port, val); + UART_PUT_GCTL(port, val); /* Set DLAB in LCR to Access THR RBR IER */ - SIR_UART_SET_DLAB(port); + UART_SET_DLAB(port); SSYNC(); - SIR_UART_PUT_DLL(port, quot & 0xFF); - SIR_UART_PUT_DLH(port, (quot >> 8) & 0xFF); + UART_PUT_DLL(port, quot & 0xFF); + UART_PUT_DLH(port, (quot >> 8) & 0xFF); SSYNC(); /* Clear DLAB in LCR */ - SIR_UART_CLEAR_DLAB(port); + UART_CLEAR_DLAB(port); SSYNC(); - SIR_UART_PUT_LCR(port, lcr); + UART_PUT_LCR(port, lcr); - val = SIR_UART_GET_GCTL(port); + val = UART_GET_GCTL(port); val |= UCEN; - SIR_UART_PUT_GCTL(port, val); + UART_PUT_GCTL(port, val); ret = 0; break; @@ -154,12 +154,12 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) break; } - val = SIR_UART_GET_GCTL(port); + val = UART_GET_GCTL(port); /* If not add the 'RPOLC', we can't catch the receive interrupt. * It's related with the HW layout and the IR transiver. */ val |= IREN | RPOLC; - SIR_UART_PUT_GCTL(port, val); + UART_PUT_GCTL(port, val); return ret; } @@ -168,7 +168,7 @@ static int bfin_sir_is_receiving(struct net_device *dev) struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; - if (!(SIR_UART_GET_IER(port) & ERBFI)) + if (!(UART_GET_IER(port) & ERBFI)) return 0; return self->rx_buff.state != OUTSIDE_FRAME; } @@ -182,7 +182,7 @@ static void bfin_sir_tx_chars(struct net_device *dev) if (self->tx_buff.len != 0) { chr = *(self->tx_buff.data); - SIR_UART_PUT_CHAR(port, chr); + UART_PUT_CHAR(port, chr); self->tx_buff.data++; self->tx_buff.len--; } else { @@ -206,8 +206,8 @@ static void bfin_sir_rx_chars(struct net_device *dev) struct bfin_sir_port *port = self->sir_port; unsigned char ch; - SIR_UART_CLEAR_LSR(port); - ch = SIR_UART_GET_CHAR(port); + UART_CLEAR_LSR(port); + ch = UART_GET_CHAR(port); async_unwrap_char(dev, &self->stats, &self->rx_buff, ch); dev->last_rx = jiffies; } @@ -219,7 +219,7 @@ static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id) struct bfin_sir_port *port = self->sir_port; spin_lock(&self->lock); - while ((SIR_UART_GET_LSR(port) & DR)) + while ((UART_GET_LSR(port) & DR)) bfin_sir_rx_chars(dev); spin_unlock(&self->lock); @@ -233,7 +233,7 @@ static irqreturn_t bfin_sir_tx_int(int irq, void *dev_id) struct bfin_sir_port *port = self->sir_port; spin_lock(&self->lock); - if (SIR_UART_GET_LSR(port) & THRE) + if (UART_GET_LSR(port) & THRE) bfin_sir_tx_chars(dev); spin_unlock(&self->lock); @@ -312,7 +312,7 @@ static void bfin_sir_dma_rx_chars(struct net_device *dev) struct bfin_sir_port *port = self->sir_port; int i; - SIR_UART_CLEAR_LSR(port); + UART_CLEAR_LSR(port); for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++) async_unwrap_char(dev, &self->stats, &self->rx_buff, port->rx_dma_buf.buf[i]); @@ -430,11 +430,10 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev unsigned short val; bfin_sir_stop_rx(port); - SIR_UART_DISABLE_INTS(port); - val = SIR_UART_GET_GCTL(port); + val = UART_GET_GCTL(port); val &= ~(UCEN | IREN | RPOLC); - SIR_UART_PUT_GCTL(port, val); + UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA disable_dma(port->tx_dma_channel); @@ -518,12 +517,12 @@ static void bfin_sir_send_work(struct work_struct *work) * sending data. We also can set the speed, which will * reset all the UART. */ - val = SIR_UART_GET_GCTL(port); + val = UART_GET_GCTL(port); val &= ~(IREN | RPOLC); - SIR_UART_PUT_GCTL(port, val); + UART_PUT_GCTL(port, val); SSYNC(); val |= IREN | RPOLC; - SIR_UART_PUT_GCTL(port, val); + UART_PUT_GCTL(port, val); SSYNC(); /* bfin_sir_set_speed(port, self->speed); */ diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h index e3b285a6773..29cbde8501e 100644 --- a/drivers/net/irda/bfin_sir.h +++ b/drivers/net/irda/bfin_sir.h @@ -26,7 +26,6 @@ #include <asm/cacheflush.h> #include <asm/dma.h> #include <asm/portmux.h> -#include <mach/bfin_serial_5xx.h> #undef DRIVER_NAME #ifdef CONFIG_SIR_BFIN_DMA @@ -83,64 +82,10 @@ struct bfin_sir_self { #define DRIVER_NAME "bfin_sir" -#define SIR_UART_GET_CHAR(port) bfin_read16((port)->membase + OFFSET_RBR) -#define SIR_UART_GET_DLL(port) bfin_read16((port)->membase + OFFSET_DLL) -#define SIR_UART_GET_DLH(port) bfin_read16((port)->membase + OFFSET_DLH) -#define SIR_UART_GET_LCR(port) bfin_read16((port)->membase + OFFSET_LCR) -#define SIR_UART_GET_GCTL(port) bfin_read16((port)->membase + OFFSET_GCTL) - -#define SIR_UART_PUT_CHAR(port, v) bfin_write16(((port)->membase + OFFSET_THR), v) -#define SIR_UART_PUT_DLL(port, v) bfin_write16(((port)->membase + OFFSET_DLL), v) -#define SIR_UART_PUT_DLH(port, v) bfin_write16(((port)->membase + OFFSET_DLH), v) -#define SIR_UART_PUT_LCR(port, v) bfin_write16(((port)->membase + OFFSET_LCR), v) -#define SIR_UART_PUT_GCTL(port, v) bfin_write16(((port)->membase + OFFSET_GCTL), v) - -#ifdef CONFIG_BF54x -#define SIR_UART_GET_LSR(port) bfin_read16((port)->membase + OFFSET_LSR) -#define SIR_UART_GET_IER(port) bfin_read16((port)->membase + OFFSET_IER_SET) -#define SIR_UART_SET_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER_SET), v) -#define SIR_UART_CLEAR_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER_CLEAR), v) -#define SIR_UART_PUT_LSR(port, v) bfin_write16(((port)->membase + OFFSET_LSR), v) -#define SIR_UART_CLEAR_LSR(port) bfin_write16(((port)->membase + OFFSET_LSR), -1) - -#define SIR_UART_SET_DLAB(port) -#define SIR_UART_CLEAR_DLAB(port) - -#define SIR_UART_ENABLE_INTS(port, v) SIR_UART_SET_IER(port, v) -#define SIR_UART_DISABLE_INTS(port) SIR_UART_CLEAR_IER(port, 0xF) -#define SIR_UART_STOP_TX(port) do { SIR_UART_PUT_LSR(port, TFI); SIR_UART_CLEAR_IER(port, ETBEI); } while (0) -#define SIR_UART_ENABLE_TX(port) do { SIR_UART_SET_IER(port, ETBEI); } while (0) -#define SIR_UART_STOP_RX(port) do { SIR_UART_CLEAR_IER(port, ERBFI); } while (0) -#define SIR_UART_ENABLE_RX(port) do { SIR_UART_SET_IER(port, ERBFI); } while (0) -#else - -#define SIR_UART_GET_IIR(port) bfin_read16((port)->membase + OFFSET_IIR) -#define SIR_UART_GET_IER(port) bfin_read16((port)->membase + OFFSET_IER) -#define SIR_UART_PUT_IER(port, v) bfin_write16(((port)->membase + OFFSET_IER), v) - -#define SIR_UART_SET_DLAB(port) do { SIR_UART_PUT_LCR(port, SIR_UART_GET_LCR(port) | DLAB); } while (0) -#define SIR_UART_CLEAR_DLAB(port) do { SIR_UART_PUT_LCR(port, SIR_UART_GET_LCR(port) & ~DLAB); } while (0) - -#define SIR_UART_ENABLE_INTS(port, v) SIR_UART_PUT_IER(port, v) -#define SIR_UART_DISABLE_INTS(port) SIR_UART_PUT_IER(port, 0) -#define SIR_UART_STOP_TX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) & ~ETBEI); } while (0) -#define SIR_UART_ENABLE_TX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) | ETBEI); } while (0) -#define SIR_UART_STOP_RX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) & ~ERBFI); } while (0) -#define SIR_UART_ENABLE_RX(port) do { SIR_UART_PUT_IER(port, SIR_UART_GET_IER(port) | ERBFI); } while (0) - -static inline unsigned int SIR_UART_GET_LSR(struct bfin_sir_port *port) -{ - unsigned int lsr = bfin_read16(port->membase + OFFSET_LSR); - port->lsr |= (lsr & (BI|FE|PE|OE)); - return lsr | port->lsr; -} - -static inline void SIR_UART_CLEAR_LSR(struct bfin_sir_port *port) -{ - port->lsr = 0; - bfin_read16(port->membase + OFFSET_LSR); -} -#endif +#define port_membase(port) (((struct bfin_sir_port *)(port))->membase) +#define get_lsr_cache(port) (((struct bfin_sir_port *)(port))->lsr) +#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v)) +#include <asm/bfin_serial.h> static const unsigned short per[][4] = { /* rx pin tx pin NULL uart_number */ diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 13bebab65d0..2333215bbb3 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_FIXED_PHY) += fixed.o obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c new file mode 100644 index 00000000000..b0c9522bb53 --- /dev/null +++ b/drivers/net/phy/dp83640.c @@ -0,0 +1,1100 @@ +/* + * Driver for the National Semiconductor DP83640 PHYTER + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/ethtool.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/net_tstamp.h> +#include <linux/netdevice.h> +#include <linux/phy.h> +#include <linux/ptp_classify.h> +#include <linux/ptp_clock_kernel.h> + +#include "dp83640_reg.h" + +#define DP83640_PHY_ID 0x20005ce1 +#define PAGESEL 0x13 +#define LAYER4 0x02 +#define LAYER2 0x01 +#define MAX_RXTS 4 +#define MAX_TXTS 4 +#define N_EXT_TS 1 +#define PSF_PTPVER 2 +#define PSF_EVNT 0x4000 +#define PSF_RX 0x2000 +#define PSF_TX 0x1000 +#define EXT_EVENT 1 +#define EXT_GPIO 1 +#define CAL_EVENT 2 +#define CAL_GPIO 9 +#define CAL_TRIGGER 2 + +/* phyter seems to miss the mark by 16 ns */ +#define ADJTIME_FIX 16 + +#if defined(__BIG_ENDIAN) +#define ENDIAN_FLAG 0 +#elif defined(__LITTLE_ENDIAN) +#define ENDIAN_FLAG PSF_ENDIAN +#endif + +#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb)) + +struct phy_rxts { + u16 ns_lo; /* ns[15:0] */ + u16 ns_hi; /* overflow[1:0], ns[29:16] */ + u16 sec_lo; /* sec[15:0] */ + u16 sec_hi; /* sec[31:16] */ + u16 seqid; /* sequenceId[15:0] */ + u16 msgtype; /* messageType[3:0], hash[11:0] */ +}; + +struct phy_txts { + u16 ns_lo; /* ns[15:0] */ + u16 ns_hi; /* overflow[1:0], ns[29:16] */ + u16 sec_lo; /* sec[15:0] */ + u16 sec_hi; /* sec[31:16] */ +}; + +struct rxts { + struct list_head list; + unsigned long tmo; + u64 ns; + u16 seqid; + u8 msgtype; + u16 hash; +}; + +struct dp83640_clock; + +struct dp83640_private { + struct list_head list; + struct dp83640_clock *clock; + struct phy_device *phydev; + struct work_struct ts_work; + int hwts_tx_en; + int hwts_rx_en; + int layer; + int version; + /* remember state of cfg0 during calibration */ + int cfg0; + /* remember the last event time stamp */ + struct phy_txts edata; + /* list of rx timestamps */ + struct list_head rxts; + struct list_head rxpool; + struct rxts rx_pool_data[MAX_RXTS]; + /* protects above three fields from concurrent access */ + spinlock_t rx_lock; + /* queues of incoming and outgoing packets */ + struct sk_buff_head rx_queue; + struct sk_buff_head tx_queue; +}; + +struct dp83640_clock { + /* keeps the instance in the 'phyter_clocks' list */ + struct list_head list; + /* we create one clock instance per MII bus */ + struct mii_bus *bus; + /* protects extended registers from concurrent access */ + struct mutex extreg_lock; + /* remembers which page was last selected */ + int page; + /* our advertised capabilities */ + struct ptp_clock_info caps; + /* protects the three fields below from concurrent access */ + struct mutex clock_lock; + /* the one phyter from which we shall read */ + struct dp83640_private *chosen; + /* list of the other attached phyters, not chosen */ + struct list_head phylist; + /* reference to our PTP hardware clock */ + struct ptp_clock *ptp_clock; +}; + +/* globals */ + +static int chosen_phy = -1; +static ushort cal_gpio = 4; + +module_param(chosen_phy, int, 0444); +module_param(cal_gpio, ushort, 0444); + +MODULE_PARM_DESC(chosen_phy, \ + "The address of the PHY to use for the ancillary clock features"); +MODULE_PARM_DESC(cal_gpio, \ + "Which GPIO line to use for synchronizing multiple PHYs"); + +/* a list of clocks and a mutex to protect it */ +static LIST_HEAD(phyter_clocks); +static DEFINE_MUTEX(phyter_clocks_lock); + +static void rx_timestamp_work(struct work_struct *work); + +/* extended register access functions */ + +#define BROADCAST_ADDR 31 + +static inline int broadcast_write(struct mii_bus *bus, u32 regnum, u16 val) +{ + return mdiobus_write(bus, BROADCAST_ADDR, regnum, val); +} + +/* Caller must hold extreg_lock. */ +static int ext_read(struct phy_device *phydev, int page, u32 regnum) +{ + struct dp83640_private *dp83640 = phydev->priv; + int val; + + if (dp83640->clock->page != page) { + broadcast_write(phydev->bus, PAGESEL, page); + dp83640->clock->page = page; + } + val = phy_read(phydev, regnum); + + return val; +} + +/* Caller must hold extreg_lock. */ +static void ext_write(int broadcast, struct phy_device *phydev, + int page, u32 regnum, u16 val) +{ + struct dp83640_private *dp83640 = phydev->priv; + + if (dp83640->clock->page != page) { + broadcast_write(phydev->bus, PAGESEL, page); + dp83640->clock->page = page; + } + if (broadcast) + broadcast_write(phydev->bus, regnum, val); + else + phy_write(phydev, regnum, val); +} + +/* Caller must hold extreg_lock. */ +static int tdr_write(int bc, struct phy_device *dev, + const struct timespec *ts, u16 cmd) +{ + ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec & 0xffff);/* ns[15:0] */ + ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_nsec >> 16); /* ns[31:16] */ + ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec & 0xffff); /* sec[15:0] */ + ext_write(bc, dev, PAGE4, PTP_TDR, ts->tv_sec >> 16); /* sec[31:16]*/ + + ext_write(bc, dev, PAGE4, PTP_CTL, cmd); + + return 0; +} + +/* convert phy timestamps into driver timestamps */ + +static void phy2rxts(struct phy_rxts *p, struct rxts *rxts) +{ + u32 sec; + + sec = p->sec_lo; + sec |= p->sec_hi << 16; + + rxts->ns = p->ns_lo; + rxts->ns |= (p->ns_hi & 0x3fff) << 16; + rxts->ns += ((u64)sec) * 1000000000ULL; + rxts->seqid = p->seqid; + rxts->msgtype = (p->msgtype >> 12) & 0xf; + rxts->hash = p->msgtype & 0x0fff; + rxts->tmo = jiffies + HZ; +} + +static u64 phy2txts(struct phy_txts *p) +{ + u64 ns; + u32 sec; + + sec = p->sec_lo; + sec |= p->sec_hi << 16; + + ns = p->ns_lo; + ns |= (p->ns_hi & 0x3fff) << 16; + ns += ((u64)sec) * 1000000000ULL; + + return ns; +} + +/* ptp clock methods */ + +static int ptp_dp83640_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct dp83640_clock *clock = + container_of(ptp, struct dp83640_clock, caps); + struct phy_device *phydev = clock->chosen->phydev; + u64 rate; + int neg_adj = 0; + u16 hi, lo; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + rate = ppb; + rate <<= 26; + rate = div_u64(rate, 1953125); + + hi = (rate >> 16) & PTP_RATE_HI_MASK; + if (neg_adj) + hi |= PTP_RATE_DIR; + + lo = rate & 0xffff; + + mutex_lock(&clock->extreg_lock); + + ext_write(1, phydev, PAGE4, PTP_RATEH, hi); + ext_write(1, phydev, PAGE4, PTP_RATEL, lo); + + mutex_unlock(&clock->extreg_lock); + + return 0; +} + +static int ptp_dp83640_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct dp83640_clock *clock = + container_of(ptp, struct dp83640_clock, caps); + struct phy_device *phydev = clock->chosen->phydev; + struct timespec ts; + int err; + + delta += ADJTIME_FIX; + + ts = ns_to_timespec(delta); + + mutex_lock(&clock->extreg_lock); + + err = tdr_write(1, phydev, &ts, PTP_STEP_CLK); + + mutex_unlock(&clock->extreg_lock); + + return err; +} + +static int ptp_dp83640_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct dp83640_clock *clock = + container_of(ptp, struct dp83640_clock, caps); + struct phy_device *phydev = clock->chosen->phydev; + unsigned int val[4]; + + mutex_lock(&clock->extreg_lock); + + ext_write(0, phydev, PAGE4, PTP_CTL, PTP_RD_CLK); + + val[0] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[15:0] */ + val[1] = ext_read(phydev, PAGE4, PTP_TDR); /* ns[31:16] */ + val[2] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[15:0] */ + val[3] = ext_read(phydev, PAGE4, PTP_TDR); /* sec[31:16] */ + + mutex_unlock(&clock->extreg_lock); + + ts->tv_nsec = val[0] | (val[1] << 16); + ts->tv_sec = val[2] | (val[3] << 16); + + return 0; +} + +static int ptp_dp83640_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct dp83640_clock *clock = + container_of(ptp, struct dp83640_clock, caps); + struct phy_device *phydev = clock->chosen->phydev; + int err; + + mutex_lock(&clock->extreg_lock); + + err = tdr_write(1, phydev, ts, PTP_LOAD_CLK); + + mutex_unlock(&clock->extreg_lock); + + return err; +} + +static int ptp_dp83640_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct dp83640_clock *clock = + container_of(ptp, struct dp83640_clock, caps); + struct phy_device *phydev = clock->chosen->phydev; + u16 evnt; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + if (rq->extts.index != 0) + return -EINVAL; + evnt = EVNT_WR | (EXT_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT; + if (on) { + evnt |= (EXT_GPIO & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT; + evnt |= EVNT_RISE; + } + ext_write(0, phydev, PAGE5, PTP_EVNT, evnt); + return 0; + default: + break; + } + + return -EOPNOTSUPP; +} + +static u8 status_frame_dst[6] = { 0x01, 0x1B, 0x19, 0x00, 0x00, 0x00 }; +static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F }; + +static void enable_status_frames(struct phy_device *phydev, bool on) +{ + u16 cfg0 = 0, ver; + + if (on) + cfg0 = PSF_EVNT_EN | PSF_RXTS_EN | PSF_TXTS_EN | ENDIAN_FLAG; + + ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT; + + ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0); + ext_write(0, phydev, PAGE6, PSF_CFG1, ver); + + if (!phydev->attached_dev) { + pr_warning("dp83640: expected to find an attached netdevice\n"); + return; + } + + if (on) { + if (dev_mc_add(phydev->attached_dev, status_frame_dst)) + pr_warning("dp83640: failed to add mc address\n"); + } else { + if (dev_mc_del(phydev->attached_dev, status_frame_dst)) + pr_warning("dp83640: failed to delete mc address\n"); + } +} + +static bool is_status_frame(struct sk_buff *skb, int type) +{ + struct ethhdr *h = eth_hdr(skb); + + if (PTP_CLASS_V2_L2 == type && + !memcmp(h->h_source, status_frame_src, sizeof(status_frame_src))) + return true; + else + return false; +} + +static int expired(struct rxts *rxts) +{ + return time_after(jiffies, rxts->tmo); +} + +/* Caller must hold rx_lock. */ +static void prune_rx_ts(struct dp83640_private *dp83640) +{ + struct list_head *this, *next; + struct rxts *rxts; + + list_for_each_safe(this, next, &dp83640->rxts) { + rxts = list_entry(this, struct rxts, list); + if (expired(rxts)) { + list_del_init(&rxts->list); + list_add(&rxts->list, &dp83640->rxpool); + } + } +} + +/* synchronize the phyters so they act as one clock */ + +static void enable_broadcast(struct phy_device *phydev, int init_page, int on) +{ + int val; + phy_write(phydev, PAGESEL, 0); + val = phy_read(phydev, PHYCR2); + if (on) + val |= BC_WRITE; + else + val &= ~BC_WRITE; + phy_write(phydev, PHYCR2, val); + phy_write(phydev, PAGESEL, init_page); +} + +static void recalibrate(struct dp83640_clock *clock) +{ + s64 now, diff; + struct phy_txts event_ts; + struct timespec ts; + struct list_head *this; + struct dp83640_private *tmp; + struct phy_device *master = clock->chosen->phydev; + u16 cfg0, evnt, ptp_trig, trigger, val; + + trigger = CAL_TRIGGER; + + mutex_lock(&clock->extreg_lock); + + /* + * enable broadcast, disable status frames, enable ptp clock + */ + list_for_each(this, &clock->phylist) { + tmp = list_entry(this, struct dp83640_private, list); + enable_broadcast(tmp->phydev, clock->page, 1); + tmp->cfg0 = ext_read(tmp->phydev, PAGE5, PSF_CFG0); + ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, 0); + ext_write(0, tmp->phydev, PAGE4, PTP_CTL, PTP_ENABLE); + } + enable_broadcast(master, clock->page, 1); + cfg0 = ext_read(master, PAGE5, PSF_CFG0); + ext_write(0, master, PAGE5, PSF_CFG0, 0); + ext_write(0, master, PAGE4, PTP_CTL, PTP_ENABLE); + + /* + * enable an event timestamp + */ + evnt = EVNT_WR | EVNT_RISE | EVNT_SINGLE; + evnt |= (CAL_EVENT & EVNT_SEL_MASK) << EVNT_SEL_SHIFT; + evnt |= (cal_gpio & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT; + + list_for_each(this, &clock->phylist) { + tmp = list_entry(this, struct dp83640_private, list); + ext_write(0, tmp->phydev, PAGE5, PTP_EVNT, evnt); + } + ext_write(0, master, PAGE5, PTP_EVNT, evnt); + + /* + * configure a trigger + */ + ptp_trig = TRIG_WR | TRIG_IF_LATE | TRIG_PULSE; + ptp_trig |= (trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT; + ptp_trig |= (cal_gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT; + ext_write(0, master, PAGE5, PTP_TRIG, ptp_trig); + + /* load trigger */ + val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT; + val |= TRIG_LOAD; + ext_write(0, master, PAGE4, PTP_CTL, val); + + /* enable trigger */ + val &= ~TRIG_LOAD; + val |= TRIG_EN; + ext_write(0, master, PAGE4, PTP_CTL, val); + + /* disable trigger */ + val = (trigger & TRIG_SEL_MASK) << TRIG_SEL_SHIFT; + val |= TRIG_DIS; + ext_write(0, master, PAGE4, PTP_CTL, val); + + /* + * read out and correct offsets + */ + val = ext_read(master, PAGE4, PTP_STS); + pr_info("master PTP_STS 0x%04hx", val); + val = ext_read(master, PAGE4, PTP_ESTS); + pr_info("master PTP_ESTS 0x%04hx", val); + event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA); + event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA); + event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA); + event_ts.sec_hi = ext_read(master, PAGE4, PTP_EDATA); + now = phy2txts(&event_ts); + + list_for_each(this, &clock->phylist) { + tmp = list_entry(this, struct dp83640_private, list); + val = ext_read(tmp->phydev, PAGE4, PTP_STS); + pr_info("slave PTP_STS 0x%04hx", val); + val = ext_read(tmp->phydev, PAGE4, PTP_ESTS); + pr_info("slave PTP_ESTS 0x%04hx", val); + event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); + event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA); + event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA); + event_ts.sec_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA); + diff = now - (s64) phy2txts(&event_ts); + pr_info("slave offset %lld nanoseconds\n", diff); + diff += ADJTIME_FIX; + ts = ns_to_timespec(diff); + tdr_write(0, tmp->phydev, &ts, PTP_STEP_CLK); + } + + /* + * restore status frames + */ + list_for_each(this, &clock->phylist) { + tmp = list_entry(this, struct dp83640_private, list); + ext_write(0, tmp->phydev, PAGE5, PSF_CFG0, tmp->cfg0); + } + ext_write(0, master, PAGE5, PSF_CFG0, cfg0); + + mutex_unlock(&clock->extreg_lock); +} + +/* time stamping methods */ + +static void decode_evnt(struct dp83640_private *dp83640, + struct phy_txts *phy_txts, u16 ests) +{ + struct ptp_clock_event event; + int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; + + switch (words) { /* fall through in every case */ + case 3: + dp83640->edata.sec_hi = phy_txts->sec_hi; + case 2: + dp83640->edata.sec_lo = phy_txts->sec_lo; + case 1: + dp83640->edata.ns_hi = phy_txts->ns_hi; + case 0: + dp83640->edata.ns_lo = phy_txts->ns_lo; + } + + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = phy2txts(&dp83640->edata); + + ptp_clock_event(dp83640->clock->ptp_clock, &event); +} + +static void decode_rxts(struct dp83640_private *dp83640, + struct phy_rxts *phy_rxts) +{ + struct rxts *rxts; + unsigned long flags; + + spin_lock_irqsave(&dp83640->rx_lock, flags); + + prune_rx_ts(dp83640); + + if (list_empty(&dp83640->rxpool)) { + pr_warning("dp83640: rx timestamp pool is empty\n"); + goto out; + } + rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); + list_del_init(&rxts->list); + phy2rxts(phy_rxts, rxts); + list_add_tail(&rxts->list, &dp83640->rxts); +out: + spin_unlock_irqrestore(&dp83640->rx_lock, flags); +} + +static void decode_txts(struct dp83640_private *dp83640, + struct phy_txts *phy_txts) +{ + struct skb_shared_hwtstamps shhwtstamps; + struct sk_buff *skb; + u64 ns; + + /* We must already have the skb that triggered this. */ + + skb = skb_dequeue(&dp83640->tx_queue); + + if (!skb) { + pr_warning("dp83640: have timestamp but tx_queue empty\n"); + return; + } + ns = phy2txts(phy_txts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = ns_to_ktime(ns); + skb_complete_tx_timestamp(skb, &shhwtstamps); +} + +static void decode_status_frame(struct dp83640_private *dp83640, + struct sk_buff *skb) +{ + struct phy_rxts *phy_rxts; + struct phy_txts *phy_txts; + u8 *ptr; + int len, size; + u16 ests, type; + + ptr = skb->data + 2; + + for (len = skb_headlen(skb) - 2; len > sizeof(type); len -= size) { + + type = *(u16 *)ptr; + ests = type & 0x0fff; + type = type & 0xf000; + len -= sizeof(type); + ptr += sizeof(type); + + if (PSF_RX == type && len >= sizeof(*phy_rxts)) { + + phy_rxts = (struct phy_rxts *) ptr; + decode_rxts(dp83640, phy_rxts); + size = sizeof(*phy_rxts); + + } else if (PSF_TX == type && len >= sizeof(*phy_txts)) { + + phy_txts = (struct phy_txts *) ptr; + decode_txts(dp83640, phy_txts); + size = sizeof(*phy_txts); + + } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { + + phy_txts = (struct phy_txts *) ptr; + decode_evnt(dp83640, phy_txts, ests); + size = sizeof(*phy_txts); + + } else { + size = 0; + break; + } + ptr += size; + } +} + +static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts) +{ + u16 *seqid; + unsigned int offset; + u8 *msgtype, *data = skb_mac_header(skb); + + /* check sequenceID, messageType, 12 bit hash of offset 20-29 */ + + switch (type) { + case PTP_CLASS_V1_IPV4: + case PTP_CLASS_V2_IPV4: + offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; + break; + case PTP_CLASS_V1_IPV6: + case PTP_CLASS_V2_IPV6: + offset = OFF_PTP6; + break; + case PTP_CLASS_V2_L2: + offset = ETH_HLEN; + break; + case PTP_CLASS_V2_VLAN: + offset = ETH_HLEN + VLAN_HLEN; + break; + default: + return 0; + } + + if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid)) + return 0; + + if (unlikely(type & PTP_CLASS_V1)) + msgtype = data + offset + OFF_PTP_CONTROL; + else + msgtype = data + offset; + + seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID); + + return (rxts->msgtype == (*msgtype & 0xf) && + rxts->seqid == ntohs(*seqid)); +} + +static void dp83640_free_clocks(void) +{ + struct dp83640_clock *clock; + struct list_head *this, *next; + + mutex_lock(&phyter_clocks_lock); + + list_for_each_safe(this, next, &phyter_clocks) { + clock = list_entry(this, struct dp83640_clock, list); + if (!list_empty(&clock->phylist)) { + pr_warning("phy list non-empty while unloading"); + BUG(); + } + list_del(&clock->list); + mutex_destroy(&clock->extreg_lock); + mutex_destroy(&clock->clock_lock); + put_device(&clock->bus->dev); + kfree(clock); + } + + mutex_unlock(&phyter_clocks_lock); +} + +static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus) +{ + INIT_LIST_HEAD(&clock->list); + clock->bus = bus; + mutex_init(&clock->extreg_lock); + mutex_init(&clock->clock_lock); + INIT_LIST_HEAD(&clock->phylist); + clock->caps.owner = THIS_MODULE; + sprintf(clock->caps.name, "dp83640 timer"); + clock->caps.max_adj = 1953124; + clock->caps.n_alarm = 0; + clock->caps.n_ext_ts = N_EXT_TS; + clock->caps.n_per_out = 0; + clock->caps.pps = 0; + clock->caps.adjfreq = ptp_dp83640_adjfreq; + clock->caps.adjtime = ptp_dp83640_adjtime; + clock->caps.gettime = ptp_dp83640_gettime; + clock->caps.settime = ptp_dp83640_settime; + clock->caps.enable = ptp_dp83640_enable; + /* + * Get a reference to this bus instance. + */ + get_device(&bus->dev); +} + +static int choose_this_phy(struct dp83640_clock *clock, + struct phy_device *phydev) +{ + if (chosen_phy == -1 && !clock->chosen) + return 1; + + if (chosen_phy == phydev->addr) + return 1; + + return 0; +} + +static struct dp83640_clock *dp83640_clock_get(struct dp83640_clock *clock) +{ + if (clock) + mutex_lock(&clock->clock_lock); + return clock; +} + +/* + * Look up and lock a clock by bus instance. + * If there is no clock for this bus, then create it first. + */ +static struct dp83640_clock *dp83640_clock_get_bus(struct mii_bus *bus) +{ + struct dp83640_clock *clock = NULL, *tmp; + struct list_head *this; + + mutex_lock(&phyter_clocks_lock); + + list_for_each(this, &phyter_clocks) { + tmp = list_entry(this, struct dp83640_clock, list); + if (tmp->bus == bus) { + clock = tmp; + break; + } + } + if (clock) + goto out; + + clock = kzalloc(sizeof(struct dp83640_clock), GFP_KERNEL); + if (!clock) + goto out; + + dp83640_clock_init(clock, bus); + list_add_tail(&phyter_clocks, &clock->list); +out: + mutex_unlock(&phyter_clocks_lock); + + return dp83640_clock_get(clock); +} + +static void dp83640_clock_put(struct dp83640_clock *clock) +{ + mutex_unlock(&clock->clock_lock); +} + +static int dp83640_probe(struct phy_device *phydev) +{ + struct dp83640_clock *clock; + struct dp83640_private *dp83640; + int err = -ENOMEM, i; + + if (phydev->addr == BROADCAST_ADDR) + return 0; + + clock = dp83640_clock_get_bus(phydev->bus); + if (!clock) + goto no_clock; + + dp83640 = kzalloc(sizeof(struct dp83640_private), GFP_KERNEL); + if (!dp83640) + goto no_memory; + + dp83640->phydev = phydev; + INIT_WORK(&dp83640->ts_work, rx_timestamp_work); + + INIT_LIST_HEAD(&dp83640->rxts); + INIT_LIST_HEAD(&dp83640->rxpool); + for (i = 0; i < MAX_RXTS; i++) + list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool); + + phydev->priv = dp83640; + + spin_lock_init(&dp83640->rx_lock); + skb_queue_head_init(&dp83640->rx_queue); + skb_queue_head_init(&dp83640->tx_queue); + + dp83640->clock = clock; + + if (choose_this_phy(clock, phydev)) { + clock->chosen = dp83640; + clock->ptp_clock = ptp_clock_register(&clock->caps); + if (IS_ERR(clock->ptp_clock)) { + err = PTR_ERR(clock->ptp_clock); + goto no_register; + } + } else + list_add_tail(&dp83640->list, &clock->phylist); + + if (clock->chosen && !list_empty(&clock->phylist)) + recalibrate(clock); + else + enable_broadcast(dp83640->phydev, clock->page, 1); + + dp83640_clock_put(clock); + return 0; + +no_register: + clock->chosen = NULL; + kfree(dp83640); +no_memory: + dp83640_clock_put(clock); +no_clock: + return err; +} + +static void dp83640_remove(struct phy_device *phydev) +{ + struct dp83640_clock *clock; + struct list_head *this, *next; + struct dp83640_private *tmp, *dp83640 = phydev->priv; + + if (phydev->addr == BROADCAST_ADDR) + return; + + enable_status_frames(phydev, false); + cancel_work_sync(&dp83640->ts_work); + + clock = dp83640_clock_get(dp83640->clock); + + if (dp83640 == clock->chosen) { + ptp_clock_unregister(clock->ptp_clock); + clock->chosen = NULL; + } else { + list_for_each_safe(this, next, &clock->phylist) { + tmp = list_entry(this, struct dp83640_private, list); + if (tmp == dp83640) { + list_del_init(&tmp->list); + break; + } + } + } + + dp83640_clock_put(clock); + kfree(dp83640); +} + +static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr) +{ + struct dp83640_private *dp83640 = phydev->priv; + struct hwtstamp_config cfg; + u16 txcfg0, rxcfg0; + + if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) + return -EFAULT; + + if (cfg.flags) /* reserved for future extensions */ + return -EINVAL; + + switch (cfg.tx_type) { + case HWTSTAMP_TX_OFF: + dp83640->hwts_tx_en = 0; + break; + case HWTSTAMP_TX_ON: + dp83640->hwts_tx_en = 1; + break; + default: + return -ERANGE; + } + + switch (cfg.rx_filter) { + case HWTSTAMP_FILTER_NONE: + dp83640->hwts_rx_en = 0; + dp83640->layer = 0; + dp83640->version = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + dp83640->hwts_rx_en = 1; + dp83640->layer = LAYER4; + dp83640->version = 1; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + dp83640->hwts_rx_en = 1; + dp83640->layer = LAYER4; + dp83640->version = 2; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + dp83640->hwts_rx_en = 1; + dp83640->layer = LAYER2; + dp83640->version = 2; + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + dp83640->hwts_rx_en = 1; + dp83640->layer = LAYER4|LAYER2; + dp83640->version = 2; + break; + default: + return -ERANGE; + } + + txcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT; + rxcfg0 = (dp83640->version & TX_PTP_VER_MASK) << TX_PTP_VER_SHIFT; + + if (dp83640->layer & LAYER2) { + txcfg0 |= TX_L2_EN; + rxcfg0 |= RX_L2_EN; + } + if (dp83640->layer & LAYER4) { + txcfg0 |= TX_IPV6_EN | TX_IPV4_EN; + rxcfg0 |= RX_IPV6_EN | RX_IPV4_EN; + } + + if (dp83640->hwts_tx_en) + txcfg0 |= TX_TS_EN; + + if (dp83640->hwts_rx_en) + rxcfg0 |= RX_TS_EN; + + mutex_lock(&dp83640->clock->extreg_lock); + + if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) { + enable_status_frames(phydev, true); + ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE); + } + + ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0); + ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0); + + mutex_unlock(&dp83640->clock->extreg_lock); + + return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; +} + +static void rx_timestamp_work(struct work_struct *work) +{ + struct dp83640_private *dp83640 = + container_of(work, struct dp83640_private, ts_work); + struct list_head *this, *next; + struct rxts *rxts; + struct skb_shared_hwtstamps *shhwtstamps; + struct sk_buff *skb; + unsigned int type; + unsigned long flags; + + /* Deliver each deferred packet, with or without a time stamp. */ + + while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) { + type = SKB_PTP_TYPE(skb); + spin_lock_irqsave(&dp83640->rx_lock, flags); + list_for_each_safe(this, next, &dp83640->rxts) { + rxts = list_entry(this, struct rxts, list); + if (match(skb, type, rxts)) { + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns); + list_del_init(&rxts->list); + list_add(&rxts->list, &dp83640->rxpool); + break; + } + } + spin_unlock_irqrestore(&dp83640->rx_lock, flags); + netif_rx(skb); + } + + /* Clear out expired time stamps. */ + + spin_lock_irqsave(&dp83640->rx_lock, flags); + prune_rx_ts(dp83640); + spin_unlock_irqrestore(&dp83640->rx_lock, flags); +} + +static bool dp83640_rxtstamp(struct phy_device *phydev, + struct sk_buff *skb, int type) +{ + struct dp83640_private *dp83640 = phydev->priv; + + if (!dp83640->hwts_rx_en) + return false; + + if (is_status_frame(skb, type)) { + decode_status_frame(dp83640, skb); + /* Let the stack drop this frame. */ + return false; + } + + SKB_PTP_TYPE(skb) = type; + skb_queue_tail(&dp83640->rx_queue, skb); + schedule_work(&dp83640->ts_work); + + return true; +} + +static void dp83640_txtstamp(struct phy_device *phydev, + struct sk_buff *skb, int type) +{ + struct dp83640_private *dp83640 = phydev->priv; + + if (!dp83640->hwts_tx_en) { + kfree_skb(skb); + return; + } + skb_queue_tail(&dp83640->tx_queue, skb); + schedule_work(&dp83640->ts_work); +} + +static struct phy_driver dp83640_driver = { + .phy_id = DP83640_PHY_ID, + .phy_id_mask = 0xfffffff0, + .name = "NatSemi DP83640", + .features = PHY_BASIC_FEATURES, + .flags = 0, + .probe = dp83640_probe, + .remove = dp83640_remove, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .hwtstamp = dp83640_hwtstamp, + .rxtstamp = dp83640_rxtstamp, + .txtstamp = dp83640_txtstamp, + .driver = {.owner = THIS_MODULE,} +}; + +static int __init dp83640_init(void) +{ + return phy_driver_register(&dp83640_driver); +} + +static void __exit dp83640_exit(void) +{ + dp83640_free_clocks(); + phy_driver_unregister(&dp83640_driver); +} + +MODULE_DESCRIPTION("National Semiconductor DP83640 PHY driver"); +MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); +MODULE_LICENSE("GPL"); + +module_init(dp83640_init); +module_exit(dp83640_exit); + +static struct mdio_device_id __maybe_unused dp83640_tbl[] = { + { DP83640_PHY_ID, 0xfffffff0 }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, dp83640_tbl); diff --git a/drivers/net/phy/dp83640_reg.h b/drivers/net/phy/dp83640_reg.h new file mode 100644 index 00000000000..e7fe4111700 --- /dev/null +++ b/drivers/net/phy/dp83640_reg.h @@ -0,0 +1,267 @@ +/* dp83640_reg.h + * Generated by regen.tcl on Thu Feb 17 10:02:48 AM CET 2011 + */ +#ifndef HAVE_DP83640_REGISTERS +#define HAVE_DP83640_REGISTERS + +#define PAGE0 0x0000 +#define PHYCR2 0x001c /* PHY Control Register 2 */ + +#define PAGE4 0x0004 +#define PTP_CTL 0x0014 /* PTP Control Register */ +#define PTP_TDR 0x0015 /* PTP Time Data Register */ +#define PTP_STS 0x0016 /* PTP Status Register */ +#define PTP_TSTS 0x0017 /* PTP Trigger Status Register */ +#define PTP_RATEL 0x0018 /* PTP Rate Low Register */ +#define PTP_RATEH 0x0019 /* PTP Rate High Register */ +#define PTP_RDCKSUM 0x001a /* PTP Read Checksum */ +#define PTP_WRCKSUM 0x001b /* PTP Write Checksum */ +#define PTP_TXTS 0x001c /* PTP Transmit Timestamp Register, in four 16-bit reads */ +#define PTP_RXTS 0x001d /* PTP Receive Timestamp Register, in six? 16-bit reads */ +#define PTP_ESTS 0x001e /* PTP Event Status Register */ +#define PTP_EDATA 0x001f /* PTP Event Data Register */ + +#define PAGE5 0x0005 +#define PTP_TRIG 0x0014 /* PTP Trigger Configuration Register */ +#define PTP_EVNT 0x0015 /* PTP Event Configuration Register */ +#define PTP_TXCFG0 0x0016 /* PTP Transmit Configuration Register 0 */ +#define PTP_TXCFG1 0x0017 /* PTP Transmit Configuration Register 1 */ +#define PSF_CFG0 0x0018 /* PHY Status Frame Configuration Register 0 */ +#define PTP_RXCFG0 0x0019 /* PTP Receive Configuration Register 0 */ +#define PTP_RXCFG1 0x001a /* PTP Receive Configuration Register 1 */ +#define PTP_RXCFG2 0x001b /* PTP Receive Configuration Register 2 */ +#define PTP_RXCFG3 0x001c /* PTP Receive Configuration Register 3 */ +#define PTP_RXCFG4 0x001d /* PTP Receive Configuration Register 4 */ +#define PTP_TRDL 0x001e /* PTP Temporary Rate Duration Low Register */ +#define PTP_TRDH 0x001f /* PTP Temporary Rate Duration High Register */ + +#define PAGE6 0x0006 +#define PTP_COC 0x0014 /* PTP Clock Output Control Register */ +#define PSF_CFG1 0x0015 /* PHY Status Frame Configuration Register 1 */ +#define PSF_CFG2 0x0016 /* PHY Status Frame Configuration Register 2 */ +#define PSF_CFG3 0x0017 /* PHY Status Frame Configuration Register 3 */ +#define PSF_CFG4 0x0018 /* PHY Status Frame Configuration Register 4 */ +#define PTP_SFDCFG 0x0019 /* PTP SFD Configuration Register */ +#define PTP_INTCTL 0x001a /* PTP Interrupt Control Register */ +#define PTP_CLKSRC 0x001b /* PTP Clock Source Register */ +#define PTP_ETR 0x001c /* PTP Ethernet Type Register */ +#define PTP_OFF 0x001d /* PTP Offset Register */ +#define PTP_GPIOMON 0x001e /* PTP GPIO Monitor Register */ +#define PTP_RXHASH 0x001f /* PTP Receive Hash Register */ + +/* Bit definitions for the PHYCR2 register */ +#define BC_WRITE (1<<11) /* Broadcast Write Enable */ + +/* Bit definitions for the PTP_CTL register */ +#define TRIG_SEL_SHIFT (10) /* PTP Trigger Select */ +#define TRIG_SEL_MASK (0x7) +#define TRIG_DIS (1<<9) /* Disable PTP Trigger */ +#define TRIG_EN (1<<8) /* Enable PTP Trigger */ +#define TRIG_READ (1<<7) /* Read PTP Trigger */ +#define TRIG_LOAD (1<<6) /* Load PTP Trigger */ +#define PTP_RD_CLK (1<<5) /* Read PTP Clock */ +#define PTP_LOAD_CLK (1<<4) /* Load PTP Clock */ +#define PTP_STEP_CLK (1<<3) /* Step PTP Clock */ +#define PTP_ENABLE (1<<2) /* Enable PTP Clock */ +#define PTP_DISABLE (1<<1) /* Disable PTP Clock */ +#define PTP_RESET (1<<0) /* Reset PTP Clock */ + +/* Bit definitions for the PTP_STS register */ +#define TXTS_RDY (1<<11) /* Transmit Timestamp Ready */ +#define RXTS_RDY (1<<10) /* Receive Timestamp Ready */ +#define TRIG_DONE (1<<9) /* PTP Trigger Done */ +#define EVENT_RDY (1<<8) /* PTP Event Timestamp Ready */ +#define TXTS_IE (1<<3) /* Transmit Timestamp Interrupt Enable */ +#define RXTS_IE (1<<2) /* Receive Timestamp Interrupt Enable */ +#define TRIG_IE (1<<1) /* Trigger Interrupt Enable */ +#define EVENT_IE (1<<0) /* Event Interrupt Enable */ + +/* Bit definitions for the PTP_TSTS register */ +#define TRIG7_ERROR (1<<15) /* Trigger 7 Error */ +#define TRIG7_ACTIVE (1<<14) /* Trigger 7 Active */ +#define TRIG6_ERROR (1<<13) /* Trigger 6 Error */ +#define TRIG6_ACTIVE (1<<12) /* Trigger 6 Active */ +#define TRIG5_ERROR (1<<11) /* Trigger 5 Error */ +#define TRIG5_ACTIVE (1<<10) /* Trigger 5 Active */ +#define TRIG4_ERROR (1<<9) /* Trigger 4 Error */ +#define TRIG4_ACTIVE (1<<8) /* Trigger 4 Active */ +#define TRIG3_ERROR (1<<7) /* Trigger 3 Error */ +#define TRIG3_ACTIVE (1<<6) /* Trigger 3 Active */ +#define TRIG2_ERROR (1<<5) /* Trigger 2 Error */ +#define TRIG2_ACTIVE (1<<4) /* Trigger 2 Active */ +#define TRIG1_ERROR (1<<3) /* Trigger 1 Error */ +#define TRIG1_ACTIVE (1<<2) /* Trigger 1 Active */ +#define TRIG0_ERROR (1<<1) /* Trigger 0 Error */ +#define TRIG0_ACTIVE (1<<0) /* Trigger 0 Active */ + +/* Bit definitions for the PTP_RATEH register */ +#define PTP_RATE_DIR (1<<15) /* PTP Rate Direction */ +#define PTP_TMP_RATE (1<<14) /* PTP Temporary Rate */ +#define PTP_RATE_HI_SHIFT (0) /* PTP Rate High 10-bits */ +#define PTP_RATE_HI_MASK (0x3ff) + +/* Bit definitions for the PTP_ESTS register */ +#define EVNTS_MISSED_SHIFT (8) /* Indicates number of events missed */ +#define EVNTS_MISSED_MASK (0x7) +#define EVNT_TS_LEN_SHIFT (6) /* Indicates length of the Timestamp field in 16-bit words minus 1 */ +#define EVNT_TS_LEN_MASK (0x3) +#define EVNT_RF (1<<5) /* Indicates whether the event is a rise or falling event */ +#define EVNT_NUM_SHIFT (2) /* Indicates Event Timestamp Unit which detected an event */ +#define EVNT_NUM_MASK (0x7) +#define MULT_EVNT (1<<1) /* Indicates multiple events were detected at the same time */ +#define EVENT_DET (1<<0) /* PTP Event Detected */ + +/* Bit definitions for the PTP_EDATA register */ +#define E7_RISE (1<<15) /* Indicates direction of Event 7 */ +#define E7_DET (1<<14) /* Indicates Event 7 detected */ +#define E6_RISE (1<<13) /* Indicates direction of Event 6 */ +#define E6_DET (1<<12) /* Indicates Event 6 detected */ +#define E5_RISE (1<<11) /* Indicates direction of Event 5 */ +#define E5_DET (1<<10) /* Indicates Event 5 detected */ +#define E4_RISE (1<<9) /* Indicates direction of Event 4 */ +#define E4_DET (1<<8) /* Indicates Event 4 detected */ +#define E3_RISE (1<<7) /* Indicates direction of Event 3 */ +#define E3_DET (1<<6) /* Indicates Event 3 detected */ +#define E2_RISE (1<<5) /* Indicates direction of Event 2 */ +#define E2_DET (1<<4) /* Indicates Event 2 detected */ +#define E1_RISE (1<<3) /* Indicates direction of Event 1 */ +#define E1_DET (1<<2) /* Indicates Event 1 detected */ +#define E0_RISE (1<<1) /* Indicates direction of Event 0 */ +#define E0_DET (1<<0) /* Indicates Event 0 detected */ + +/* Bit definitions for the PTP_TRIG register */ +#define TRIG_PULSE (1<<15) /* generate a Pulse rather than a single edge */ +#define TRIG_PER (1<<14) /* generate a periodic signal */ +#define TRIG_IF_LATE (1<<13) /* trigger immediately if already past */ +#define TRIG_NOTIFY (1<<12) /* Trigger Notification Enable */ +#define TRIG_GPIO_SHIFT (8) /* Trigger GPIO Connection, value 1-12 */ +#define TRIG_GPIO_MASK (0xf) +#define TRIG_TOGGLE (1<<7) /* Trigger Toggle Mode Enable */ +#define TRIG_CSEL_SHIFT (1) /* Trigger Configuration Select */ +#define TRIG_CSEL_MASK (0x7) +#define TRIG_WR (1<<0) /* Trigger Configuration Write */ + +/* Bit definitions for the PTP_EVNT register */ +#define EVNT_RISE (1<<14) /* Event Rise Detect Enable */ +#define EVNT_FALL (1<<13) /* Event Fall Detect Enable */ +#define EVNT_SINGLE (1<<12) /* enable single event capture operation */ +#define EVNT_GPIO_SHIFT (8) /* Event GPIO Connection, value 1-12 */ +#define EVNT_GPIO_MASK (0xf) +#define EVNT_SEL_SHIFT (1) /* Event Select */ +#define EVNT_SEL_MASK (0x7) +#define EVNT_WR (1<<0) /* Event Configuration Write */ + +/* Bit definitions for the PTP_TXCFG0 register */ +#define SYNC_1STEP (1<<15) /* insert timestamp into transmit Sync Messages */ +#define DR_INSERT (1<<13) /* Insert Delay_Req Timestamp in Delay_Resp (dangerous) */ +#define NTP_TS_EN (1<<12) /* Enable Timestamping of NTP Packets */ +#define IGNORE_2STEP (1<<11) /* Ignore Two_Step flag for One-Step operation */ +#define CRC_1STEP (1<<10) /* Disable checking of CRC for One-Step operation */ +#define CHK_1STEP (1<<9) /* Enable UDP Checksum correction for One-Step Operation */ +#define IP1588_EN (1<<8) /* Enable IEEE 1588 defined IP address filter */ +#define TX_L2_EN (1<<7) /* Layer2 Timestamp Enable */ +#define TX_IPV6_EN (1<<6) /* IPv6 Timestamp Enable */ +#define TX_IPV4_EN (1<<5) /* IPv4 Timestamp Enable */ +#define TX_PTP_VER_SHIFT (1) /* Enable Timestamp capture for IEEE 1588 version X */ +#define TX_PTP_VER_MASK (0xf) +#define TX_TS_EN (1<<0) /* Transmit Timestamp Enable */ + +/* Bit definitions for the PTP_TXCFG1 register */ +#define BYTE0_MASK_SHIFT (8) /* Bit mask to be used for matching Byte0 of the PTP Message */ +#define BYTE0_MASK_MASK (0xff) +#define BYTE0_DATA_SHIFT (0) /* Data to be used for matching Byte0 of the PTP Message */ +#define BYTE0_DATA_MASK (0xff) + +/* Bit definitions for the PSF_CFG0 register */ +#define MAC_SRC_ADD_SHIFT (11) /* Status Frame Mac Source Address */ +#define MAC_SRC_ADD_MASK (0x3) +#define MIN_PRE_SHIFT (8) /* Status Frame Minimum Preamble */ +#define MIN_PRE_MASK (0x7) +#define PSF_ENDIAN (1<<7) /* Status Frame Endian Control */ +#define PSF_IPV4 (1<<6) /* Status Frame IPv4 Enable */ +#define PSF_PCF_RD (1<<5) /* Control Frame Read PHY Status Frame Enable */ +#define PSF_ERR_EN (1<<4) /* Error PHY Status Frame Enable */ +#define PSF_TXTS_EN (1<<3) /* Transmit Timestamp PHY Status Frame Enable */ +#define PSF_RXTS_EN (1<<2) /* Receive Timestamp PHY Status Frame Enable */ +#define PSF_TRIG_EN (1<<1) /* Trigger PHY Status Frame Enable */ +#define PSF_EVNT_EN (1<<0) /* Event PHY Status Frame Enable */ + +/* Bit definitions for the PTP_RXCFG0 register */ +#define DOMAIN_EN (1<<15) /* Domain Match Enable */ +#define ALT_MAST_DIS (1<<14) /* Alternate Master Timestamp Disable */ +#define USER_IP_SEL (1<<13) /* Selects portion of IP address accessible thru PTP_RXCFG2 */ +#define USER_IP_EN (1<<12) /* Enable User-programmed IP address filter */ +#define RX_SLAVE (1<<11) /* Receive Slave Only */ +#define IP1588_EN_SHIFT (8) /* Enable IEEE 1588 defined IP address filters */ +#define IP1588_EN_MASK (0xf) +#define RX_L2_EN (1<<7) /* Layer2 Timestamp Enable */ +#define RX_IPV6_EN (1<<6) /* IPv6 Timestamp Enable */ +#define RX_IPV4_EN (1<<5) /* IPv4 Timestamp Enable */ +#define RX_PTP_VER_SHIFT (1) /* Enable Timestamp capture for IEEE 1588 version X */ +#define RX_PTP_VER_MASK (0xf) +#define RX_TS_EN (1<<0) /* Receive Timestamp Enable */ + +/* Bit definitions for the PTP_RXCFG1 register */ +#define BYTE0_MASK_SHIFT (8) /* Bit mask to be used for matching Byte0 of the PTP Message */ +#define BYTE0_MASK_MASK (0xff) +#define BYTE0_DATA_SHIFT (0) /* Data to be used for matching Byte0 of the PTP Message */ +#define BYTE0_DATA_MASK (0xff) + +/* Bit definitions for the PTP_RXCFG3 register */ +#define TS_MIN_IFG_SHIFT (12) /* Minimum Inter-frame Gap */ +#define TS_MIN_IFG_MASK (0xf) +#define ACC_UDP (1<<11) /* Record Timestamp if UDP Checksum Error */ +#define ACC_CRC (1<<10) /* Record Timestamp if CRC Error */ +#define TS_APPEND (1<<9) /* Append Timestamp for L2 */ +#define TS_INSERT (1<<8) /* Enable Timestamp Insertion */ +#define PTP_DOMAIN_SHIFT (0) /* PTP Message domainNumber field */ +#define PTP_DOMAIN_MASK (0xff) + +/* Bit definitions for the PTP_RXCFG4 register */ +#define IPV4_UDP_MOD (1<<15) /* Enable IPV4 UDP Modification */ +#define TS_SEC_EN (1<<14) /* Enable Timestamp Seconds */ +#define TS_SEC_LEN_SHIFT (12) /* Inserted Timestamp Seconds Length */ +#define TS_SEC_LEN_MASK (0x3) +#define RXTS_NS_OFF_SHIFT (6) /* Receive Timestamp Nanoseconds offset */ +#define RXTS_NS_OFF_MASK (0x3f) +#define RXTS_SEC_OFF_SHIFT (0) /* Receive Timestamp Seconds offset */ +#define RXTS_SEC_OFF_MASK (0x3f) + +/* Bit definitions for the PTP_COC register */ +#define PTP_CLKOUT_EN (1<<15) /* PTP Clock Output Enable */ +#define PTP_CLKOUT_SEL (1<<14) /* PTP Clock Output Source Select */ +#define PTP_CLKOUT_SPEEDSEL (1<<13) /* PTP Clock Output I/O Speed Select */ +#define PTP_CLKDIV_SHIFT (0) /* PTP Clock Divide-by Value */ +#define PTP_CLKDIV_MASK (0xff) + +/* Bit definitions for the PSF_CFG1 register */ +#define PTPRESERVED_SHIFT (12) /* PTP v2 reserved field */ +#define PTPRESERVED_MASK (0xf) +#define VERSIONPTP_SHIFT (8) /* PTP v2 versionPTP field */ +#define VERSIONPTP_MASK (0xf) +#define TRANSPORT_SPECIFIC_SHIFT (4) /* PTP v2 Header transportSpecific field */ +#define TRANSPORT_SPECIFIC_MASK (0xf) +#define MESSAGETYPE_SHIFT (0) /* PTP v2 messageType field */ +#define MESSAGETYPE_MASK (0xf) + +/* Bit definitions for the PTP_SFDCFG register */ +#define TX_SFD_GPIO_SHIFT (4) /* TX SFD GPIO Select, value 1-12 */ +#define TX_SFD_GPIO_MASK (0xf) +#define RX_SFD_GPIO_SHIFT (0) /* RX SFD GPIO Select, value 1-12 */ +#define RX_SFD_GPIO_MASK (0xf) + +/* Bit definitions for the PTP_INTCTL register */ +#define PTP_INT_GPIO_SHIFT (0) /* PTP Interrupt GPIO Select */ +#define PTP_INT_GPIO_MASK (0xf) + +/* Bit definitions for the PTP_CLKSRC register */ +#define CLK_SRC_SHIFT (14) /* PTP Clock Source Select */ +#define CLK_SRC_MASK (0x3) +#define CLK_SRC_PER_SHIFT (0) /* PTP Clock Source Period */ +#define CLK_SRC_PER_MASK (0x7f) + +/* Bit definitions for the PTP_OFF register */ +#define PTP_OFFSET_SHIFT (0) /* PTP Message offset from preceding header */ +#define PTP_OFFSET_MASK (0xff) + +#endif diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index db19332a7d8..f4b01c638a3 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -292,6 +292,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)}, {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)}, {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)}, + {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */ {} }; diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c index 1e980fdd9d7..1e2af96fc29 100644 --- a/drivers/net/tile/tilepro.c +++ b/drivers/net/tile/tilepro.c @@ -1658,11 +1658,9 @@ static int tile_net_stop(struct net_device *dev) while (tile_net_lepp_free_comps(dev, true)) /* loop */; - /* Wipe the EPP queue. */ + /* Wipe the EPP queue, and wait till the stores hit the EPP. */ memset(priv->eq, 0, sizeof(lepp_queue_t)); - - /* Evict the EPP queue. */ - finv_buffer(priv->eq, EQ_SIZE); + mb(); return 0; } @@ -2398,7 +2396,7 @@ static void tile_net_cleanup(void) struct net_device *dev = tile_net_devs[i]; struct tile_net_priv *priv = netdev_priv(dev); unregister_netdev(dev); - finv_buffer(priv->eq, EQ_SIZE); + finv_buffer_remote(priv->eq, EQ_SIZE, 0); __free_pages(priv->eq_pages, EQ_ORDER); free_netdev(dev); } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 4ab557d0287..cdd3ae48610 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -54,7 +54,7 @@ #include <linux/usb/usbnet.h> #include <linux/usb/cdc.h> -#define DRIVER_VERSION "06-May-2011" +#define DRIVER_VERSION "24-May-2011" /* CDC NCM subclass 3.2.1 */ #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 @@ -134,8 +134,6 @@ struct cdc_ncm_ctx { u16 tx_ndp_modulus; u16 tx_seq; u16 connected; - u8 data_claimed; - u8 control_claimed; }; static void cdc_ncm_tx_timeout(unsigned long arg); @@ -460,17 +458,6 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) del_timer_sync(&ctx->tx_timer); - if (ctx->data_claimed) { - usb_set_intfdata(ctx->data, NULL); - usb_driver_release_interface(driver_of(ctx->intf), ctx->data); - } - - if (ctx->control_claimed) { - usb_set_intfdata(ctx->control, NULL); - usb_driver_release_interface(driver_of(ctx->intf), - ctx->control); - } - if (ctx->tx_rem_skb != NULL) { dev_kfree_skb_any(ctx->tx_rem_skb); ctx->tx_rem_skb = NULL; @@ -495,7 +482,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); if (ctx == NULL) - goto error; + return -ENODEV; memset(ctx, 0, sizeof(*ctx)); @@ -568,46 +555,36 @@ advance: /* check if we got everything */ if ((ctx->control == NULL) || (ctx->data == NULL) || - (ctx->ether_desc == NULL)) + (ctx->ether_desc == NULL) || (ctx->control != intf)) goto error; /* claim interfaces, if any */ - if (ctx->data != intf) { - temp = usb_driver_claim_interface(driver, ctx->data, dev); - if (temp) - goto error; - ctx->data_claimed = 1; - } - - if (ctx->control != intf) { - temp = usb_driver_claim_interface(driver, ctx->control, dev); - if (temp) - goto error; - ctx->control_claimed = 1; - } + temp = usb_driver_claim_interface(driver, ctx->data, dev); + if (temp) + goto error; iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; /* reset data interface */ temp = usb_set_interface(dev->udev, iface_no, 0); if (temp) - goto error; + goto error2; /* initialize data interface */ if (cdc_ncm_setup(ctx)) - goto error; + goto error2; /* configure data interface */ temp = usb_set_interface(dev->udev, iface_no, 1); if (temp) - goto error; + goto error2; cdc_ncm_find_endpoints(ctx, ctx->data); cdc_ncm_find_endpoints(ctx, ctx->control); if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) || (ctx->status_ep == NULL)) - goto error; + goto error2; dev->net->ethtool_ops = &cdc_ncm_ethtool_ops; @@ -617,7 +594,7 @@ advance: temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress); if (temp) - goto error; + goto error2; dev_info(&dev->udev->dev, "MAC-Address: " "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n", @@ -642,38 +619,38 @@ advance: ctx->tx_speed = ctx->rx_speed = 0; return 0; +error2: + usb_set_intfdata(ctx->control, NULL); + usb_set_intfdata(ctx->data, NULL); + usb_driver_release_interface(driver, ctx->data); error: cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]); dev->data[0] = 0; - dev_info(&dev->udev->dev, "Descriptor failure\n"); + dev_info(&dev->udev->dev, "bind() failure\n"); return -ENODEV; } static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf) { struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; - struct usb_driver *driver; + struct usb_driver *driver = driver_of(intf); if (ctx == NULL) return; /* no setup */ - driver = driver_of(intf); - - usb_set_intfdata(ctx->data, NULL); - usb_set_intfdata(ctx->control, NULL); - usb_set_intfdata(ctx->intf, NULL); - - /* release interfaces, if any */ - if (ctx->data_claimed) { + /* disconnect master --> disconnect slave */ + if (intf == ctx->control && ctx->data) { + usb_set_intfdata(ctx->data, NULL); usb_driver_release_interface(driver, ctx->data); - ctx->data_claimed = 0; - } + ctx->data = NULL; - if (ctx->control_claimed) { + } else if (intf == ctx->data && ctx->control) { + usb_set_intfdata(ctx->control, NULL); usb_driver_release_interface(driver, ctx->control); - ctx->control_claimed = 0; + ctx->control = NULL; } + usb_set_intfdata(ctx->intf, NULL); cdc_ncm_free(ctx); } diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h index d7227539484..0f1f05f6c4f 100644 --- a/drivers/net/via-velocity.h +++ b/drivers/net/via-velocity.h @@ -1096,7 +1096,7 @@ struct mac_regs { volatile __le16 PatternCRC[8]; /* 0xB0 */ volatile __le32 ByteMask[4][4]; /* 0xC0 */ -} __packed; +}; enum hw_mib { diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index a70c512f05d..55cf71fbffe 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -4501,17 +4501,15 @@ static int setup_proc_entry( struct net_device *dev, struct proc_dir_entry *entry; /* First setup the device directory */ strcpy(apriv->proc_name,dev->name); - apriv->proc_entry = create_proc_entry(apriv->proc_name, - S_IFDIR|airo_perm, - airo_entry); + apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm, + airo_entry); if (!apriv->proc_entry) goto fail; apriv->proc_entry->uid = proc_uid; apriv->proc_entry->gid = proc_gid; /* Setup the StatsDelta */ - entry = proc_create_data("StatsDelta", - S_IFREG | (S_IRUGO&proc_perm), + entry = proc_create_data("StatsDelta", S_IRUGO & proc_perm, apriv->proc_entry, &proc_statsdelta_ops, dev); if (!entry) goto fail_stats_delta; @@ -4519,8 +4517,7 @@ static int setup_proc_entry( struct net_device *dev, entry->gid = proc_gid; /* Setup the Stats */ - entry = proc_create_data("Stats", - S_IFREG | (S_IRUGO&proc_perm), + entry = proc_create_data("Stats", S_IRUGO & proc_perm, apriv->proc_entry, &proc_stats_ops, dev); if (!entry) goto fail_stats; @@ -4528,8 +4525,7 @@ static int setup_proc_entry( struct net_device *dev, entry->gid = proc_gid; /* Setup the Status */ - entry = proc_create_data("Status", - S_IFREG | (S_IRUGO&proc_perm), + entry = proc_create_data("Status", S_IRUGO & proc_perm, apriv->proc_entry, &proc_status_ops, dev); if (!entry) goto fail_status; @@ -4537,8 +4533,7 @@ static int setup_proc_entry( struct net_device *dev, entry->gid = proc_gid; /* Setup the Config */ - entry = proc_create_data("Config", - S_IFREG | proc_perm, + entry = proc_create_data("Config", proc_perm, apriv->proc_entry, &proc_config_ops, dev); if (!entry) goto fail_config; @@ -4546,8 +4541,7 @@ static int setup_proc_entry( struct net_device *dev, entry->gid = proc_gid; /* Setup the SSID */ - entry = proc_create_data("SSID", - S_IFREG | proc_perm, + entry = proc_create_data("SSID", proc_perm, apriv->proc_entry, &proc_SSID_ops, dev); if (!entry) goto fail_ssid; @@ -4555,8 +4549,7 @@ static int setup_proc_entry( struct net_device *dev, entry->gid = proc_gid; /* Setup the APList */ - entry = proc_create_data("APList", - S_IFREG | proc_perm, + entry = proc_create_data("APList", proc_perm, apriv->proc_entry, &proc_APList_ops, dev); if (!entry) goto fail_aplist; @@ -4564,8 +4557,7 @@ static int setup_proc_entry( struct net_device *dev, entry->gid = proc_gid; /* Setup the BSSList */ - entry = proc_create_data("BSSList", - S_IFREG | proc_perm, + entry = proc_create_data("BSSList", proc_perm, apriv->proc_entry, &proc_BSSList_ops, dev); if (!entry) goto fail_bsslist; @@ -4573,8 +4565,7 @@ static int setup_proc_entry( struct net_device *dev, entry->gid = proc_gid; /* Setup the WepKey */ - entry = proc_create_data("WepKey", - S_IFREG | proc_perm, + entry = proc_create_data("WepKey", proc_perm, apriv->proc_entry, &proc_wepkey_ops, dev); if (!entry) goto fail_wepkey; @@ -5706,9 +5697,7 @@ static int __init airo_init_module( void ) { int i; - airo_entry = create_proc_entry("driver/aironet", - S_IFDIR | airo_perm, - NULL); + airo_entry = proc_mkdir_mode("driver/aironet", airo_perm, NULL); if (airo_entry) { airo_entry->uid = proc_uid; diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c index 61956392f2d..5b49cd03bfd 100644 --- a/drivers/net/wireless/ath/ath9k/ahb.c +++ b/drivers/net/wireless/ath/ath9k/ahb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> * diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 5a1f4f511bc..bfb6481f01f 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h index 0cd6783de88..dbab5b9ce49 100644 --- a/drivers/net/wireless/ath/ath9k/ani.h +++ b/drivers/net/wireless/ath/ath9k/ani.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h index 36f7d0639db..234617c948a 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 4bf9dab4f2b..441bb33f17a 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h index 69a94c7e45c..6d2e2f3303f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c index cb611b287b3..015d9743993 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c index f44c84ab5dc..f344cc2b3d5 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h index 6203eed860d..7573257731b 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c index 7a332f16b79..077e8a6983f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index a57e963cf0d..2fe0a34cbab 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h index 47780ef1c89..453af6dc514 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index f915a3dbfca..e8ac70da5ac 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c index f276cb922b4..f48051c5009 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index d985841ff40..0ca7635d066 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h index afb0b5ee186..ab21a491598 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h @@ -1,3 +1,19 @@ +/* + * Copyright (c) 2010-2011 Atheros Communications Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + #ifndef AR9003_EEPROM_H #define AR9003_EEPROM_H diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index a55eddbb258..392bf0f8ff1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c index be6adec33dd..10d71f7d3fc 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h index 45cc7e80436..c50449387bf 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 356d2fd7882..e4d6a87ec53 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 25f3c2fdf2b..eee23ecd118 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index c7505b48e5c..443090d278e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2002-2010 Atheros Communications, Inc. + * Copyright (c) 2010-2011 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index fbdde29f0ab..611ea6ce850 100644 --- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 03b37d7be1c..f75068b4b31 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -397,6 +397,9 @@ struct ath_beacon { struct ath_descdma bdma; struct ath_txq *cabq; struct list_head bbuf; + + bool tx_processed; + bool tx_last; }; void ath_beacon_tasklet(unsigned long data); diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c index 637dbc5f7b6..d4d8ceced89 100644 --- a/drivers/net/wireless/ath/ath9k/beacon.c +++ b/drivers/net/wireless/ath/ath9k/beacon.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -18,6 +18,12 @@ #define FUDGE 2 +static void ath9k_reset_beacon_status(struct ath_softc *sc) +{ + sc->beacon.tx_processed = false; + sc->beacon.tx_last = false; +} + /* * This function will modify certain transmit queue properties depending on * the operating mode of the station (AP or AdHoc). Parameters are AIFS @@ -72,6 +78,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp, struct ieee80211_supported_band *sband; u8 rate = 0; + ath9k_reset_beacon_status(sc); + ds = bf->bf_desc; flags = ATH9K_TXDESC_NOACK; @@ -134,6 +142,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, struct ieee80211_tx_info *info; int cabq_depth; + ath9k_reset_beacon_status(sc); + avp = (void *)vif->drv_priv; cabq = sc->beacon.cabq; @@ -351,9 +361,7 @@ void ath_beacon_tasklet(unsigned long data) struct ath_buf *bf = NULL; struct ieee80211_vif *vif; int slot; - u32 bfaddr, bc = 0, tsftu; - u64 tsf; - u16 intval; + u32 bfaddr, bc = 0; /* * Check if the previous beacon has gone out. If @@ -388,17 +396,27 @@ void ath_beacon_tasklet(unsigned long data) * on the tsf to safeguard against missing an swba. */ - intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL; - tsf = ath9k_hw_gettsf64(ah); - tsf += TU_TO_USEC(ah->config.sw_beacon_response_time); - tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF); - slot = (tsftu % (intval * ATH_BCBUF)) / intval; - vif = sc->beacon.bslot[slot]; + if (ah->opmode == NL80211_IFTYPE_AP) { + u16 intval; + u32 tsftu; + u64 tsf; + + intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL; + tsf = ath9k_hw_gettsf64(ah); + tsf += TU_TO_USEC(ah->config.sw_beacon_response_time); + tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF); + slot = (tsftu % (intval * ATH_BCBUF)) / intval; + vif = sc->beacon.bslot[slot]; + + ath_dbg(common, ATH_DBG_BEACON, + "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", + slot, tsf, tsftu / ATH_BCBUF, intval, vif); + } else { + slot = 0; + vif = sc->beacon.bslot[slot]; + } - ath_dbg(common, ATH_DBG_BEACON, - "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", - slot, tsf, tsftu / ATH_BCBUF, intval, vif); bfaddr = 0; if (vif) { @@ -636,6 +654,8 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc, struct ath_common *common = ath9k_hw_common(ah); u32 tsf, delta, intval, nexttbtt; + ath9k_reset_beacon_status(sc); + tsf = ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE); intval = TU_TO_USEC(conf->beacon_interval & ATH9K_BEACON_PERIOD); @@ -646,7 +666,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc, delta = (tsf - sc->beacon.bc_tstamp); else delta = (tsf + 1 + (~0U - sc->beacon.bc_tstamp)); - nexttbtt = tsf + roundup(delta, intval); + nexttbtt = tsf + intval - (delta % intval); } ath_dbg(common, ATH_DBG_BEACON, diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c index 23f15a7ca7f..41ce0b13988 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.c +++ b/drivers/net/wireless/ath/ath9k/btcoex.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Atheros Communications Inc. + * Copyright (c) 2009-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h index a9efca83d67..234f77689b1 100644 --- a/drivers/net/wireless/ath/ath9k/btcoex.h +++ b/drivers/net/wireless/ath/ath9k/btcoex.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Atheros Communications Inc. + * Copyright (c) 2009-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 558b228a717..a1250c586e4 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h index 4420780fa3b..1bef41d1b1f 100644 --- a/drivers/net/wireless/ath/ath9k/calib.h +++ b/drivers/net/wireless/ath/ath9k/calib.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c index 74535e6dfb8..fa6bd2d189e 100644 --- a/drivers/net/wireless/ath/ath9k/common.c +++ b/drivers/net/wireless/ath/ath9k/common.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Atheros Communications Inc. + * Copyright (c) 2009-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h index 5124f1420b3..77ec288b5a7 100644 --- a/drivers/net/wireless/ath/ath9k/common.h +++ b/drivers/net/wireless/ath/ath9k/common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009 Atheros Communications Inc. + * Copyright (c) 2009-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c index bad1a87249b..d55ffd7d4bd 100644 --- a/drivers/net/wireless/ath/ath9k/debug.c +++ b/drivers/net/wireless/ath/ath9k/debug.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -435,6 +435,7 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, conf->channel_type, channel_type_str(conf->channel_type)); + ath9k_ps_wakeup(sc); put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr); put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); len += snprintf(buf + len, sizeof(buf) - len, @@ -444,6 +445,7 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf, len += snprintf(buf + len, sizeof(buf) - len, "addrmask: %pM\n", addr); tmp = ath9k_hw_getrxfilter(sc->sc_ah); + ath9k_ps_restore(sc); len += snprintf(buf + len, sizeof(buf) - len, "rfilt: 0x%x", tmp); if (tmp & ATH9K_RX_FILTER_UCAST) @@ -725,6 +727,7 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf, break; } + ath9k_ps_wakeup(sc); len += snprintf(buf + len, size - len, "curbssid: %pM\n" "OP-Mode: %s(%i)\n" @@ -734,6 +737,7 @@ static ssize_t read_file_misc(struct file *file, char __user *user_buf, REG_READ(ah, AR_BEACON_PERIOD)); reg = REG_READ(ah, AR_TIMER_MODE); + ath9k_ps_restore(sc); len += snprintf(buf + len, size - len, "Timer-Mode-Register: 0x%x (", reg); if (reg & AR_TBTT_TIMER_EN) @@ -1050,7 +1054,9 @@ static ssize_t read_file_regval(struct file *file, char __user *user_buf, unsigned int len; u32 regval; + ath9k_ps_wakeup(sc); regval = REG_READ_D(ah, sc->debug.regidx); + ath9k_ps_restore(sc); len = sprintf(buf, "0x%08x\n", regval); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1072,7 +1078,9 @@ static ssize_t write_file_regval(struct file *file, const char __user *user_buf, if (strict_strtoul(buf, 0, ®val)) return -EINVAL; + ath9k_ps_wakeup(sc); REG_WRITE_D(ah, sc->debug.regidx, regval); + ath9k_ps_restore(sc); return count; } diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h index 5488a324cc1..8ce6ad80f4e 100644 --- a/drivers/net/wireless/ath/ath9k/debug.h +++ b/drivers/net/wireless/ath/ath9k/debug.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index 8c18bed3a55..e61404dda8c 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h index 3e316133f11..de99c0da52e 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.h +++ b/drivers/net/wireless/ath/ath9k/eeprom.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c index 6f714dd7236..5b1e894f3d6 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index b87db476309..7856f0d4512 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c index c031854b569..17f0a680620 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c index 0349b3a1cc5..bc713fc2819 100644 --- a/drivers/net/wireless/ath/ath9k/gpio.c +++ b/drivers/net/wireless/ath/ath9k/gpio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 2e3a33a5340..260f1f37a60 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h index 2bdcdbc14b1..794f63094e5 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.h +++ b/drivers/net/wireless/ath/ath9k/hif_usb.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -18,7 +18,7 @@ #define HTC_USB_H #define MAJOR_VERSION_REQ 1 -#define MINOR_VERSION_REQ 2 +#define MINOR_VERSION_REQ 3 #define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB)) diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index dfc7a982fc7..5bc022087e6 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -46,15 +46,8 @@ extern struct ieee80211_ops ath9k_htc_ops; extern int htc_modparam_nohwcrypt; enum htc_phymode { - HTC_MODE_AUTO = 0, - HTC_MODE_11A = 1, - HTC_MODE_11B = 2, - HTC_MODE_11G = 3, - HTC_MODE_FH = 4, - HTC_MODE_TURBO_A = 5, - HTC_MODE_TURBO_G = 6, - HTC_MODE_11NA = 7, - HTC_MODE_11NG = 8 + HTC_MODE_11NA = 0, + HTC_MODE_11NG = 1 }; enum htc_opmode { @@ -123,18 +116,13 @@ struct ath9k_htc_target_vif { u8 pad; } __packed; -#define ATH_HTC_STA_AUTH 0x0001 -#define ATH_HTC_STA_QOS 0x0002 -#define ATH_HTC_STA_ERP 0x0004 -#define ATH_HTC_STA_HT 0x0008 - struct ath9k_htc_target_sta { u8 macaddr[ETH_ALEN]; u8 bssid[ETH_ALEN]; u8 sta_index; u8 vif_index; u8 is_vif_sta; - __be16 flags; /* ATH_HTC_STA_* */ + __be16 flags; __be16 htcap; __be16 maxampdu; u8 pad; @@ -285,9 +273,9 @@ struct ath9k_htc_rx { }; #define ATH9K_HTC_TX_CLEANUP_INTERVAL 50 /* ms */ -#define ATH9K_HTC_TX_TIMEOUT_INTERVAL 2500 /* ms */ +#define ATH9K_HTC_TX_TIMEOUT_INTERVAL 3000 /* ms */ #define ATH9K_HTC_TX_RESERVE 10 -#define ATH9K_HTC_TX_TIMEOUT_COUNT 20 +#define ATH9K_HTC_TX_TIMEOUT_COUNT 40 #define ATH9K_HTC_TX_THRESHOLD (MAX_TX_BUF_NUM - ATH9K_HTC_TX_RESERVE) #define ATH9K_HTC_OP_TX_QUEUES_STOP BIT(0) @@ -450,6 +438,7 @@ struct ath9k_htc_priv { u8 vif_sta_pos[ATH9K_HTC_MAX_VIF]; u8 num_ibss_vif; u8 num_sta_vif; + u8 num_sta_assoc_vif; u8 num_ap_vif; u16 op_flags; diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c index 0ded2c66d5f..aa6a7311870 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c index af57fe5aab9..db2352e5cc0 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index bfdc8a88718..61e6d395071 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -258,7 +258,7 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid, */ if (IS_AR7010_DEVICE(drv_info)) - priv->htc->credits = 48; + priv->htc->credits = 45; else priv->htc->credits = 33; @@ -769,11 +769,6 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv, hw->channel_change_time = 5000; hw->max_listen_interval = 10; - if (AR_SREV_9271(priv->ah)) - hw->max_tx_aggregation_subframes = MAX_TX_AMPDU_SUBFRAMES_9271; - else - hw->max_tx_aggregation_subframes = MAX_TX_AMPDU_SUBFRAMES_7010; - hw->vif_data_size = sizeof(struct ath9k_htc_vif); hw->sta_data_size = sizeof(struct ath9k_htc_sta); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index 5aa104fe7ee..7b779689543 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -26,7 +26,7 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, { enum htc_phymode mode; - mode = HTC_MODE_AUTO; + mode = -EINVAL; switch (ichan->chanmode) { case CHANNEL_G: @@ -45,6 +45,8 @@ static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv, break; } + WARN_ON(mode < 0); + return mode; } @@ -500,9 +502,6 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, tsta.maxampdu = cpu_to_be16(maxampdu); } - if (sta && sta->ht_cap.ht_supported) - tsta.flags = cpu_to_be16(ATH_HTC_STA_HT); - WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); if (ret) { if (sta) @@ -582,7 +581,7 @@ int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv, memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target)); tcap.ampdu_limit = cpu_to_be32(0xffff); - tcap.ampdu_subframes = priv->hw->max_tx_aggregation_subframes; + tcap.ampdu_subframes = 0xff; tcap.enable_coex = enable_coex; tcap.tx_chainmask = priv->ah->caps.tx_chainmask; @@ -1165,6 +1164,8 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, ath9k_htc_set_opmode(priv); + ath9k_htc_set_bssid_mask(priv, vif); + /* * Stop ANI only if there are no associated station interfaces. */ @@ -1435,6 +1436,37 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw, return ret; } +static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv) +{ + struct ath_common *common = ath9k_hw_common(priv->ah); + + ath9k_hw_write_associd(priv->ah); + ath_dbg(common, ATH_DBG_CONFIG, + "BSSID: %pM aid: 0x%x\n", + common->curbssid, common->curaid); +} + +static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif) +{ + struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; + struct ath_common *common = ath9k_hw_common(priv->ah); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + + if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) { + common->curaid = bss_conf->aid; + memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); + } +} + +static void ath9k_htc_choose_set_bssid(struct ath9k_htc_priv *priv) +{ + if (priv->num_sta_assoc_vif == 1) { + ieee80211_iterate_active_interfaces_atomic(priv->hw, + ath9k_htc_bss_iter, priv); + ath9k_htc_set_bssid(priv); + } +} + static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1443,43 +1475,32 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw, struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; struct ath_common *common = ath9k_hw_common(ah); - bool set_assoc; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); - /* - * Set the HW AID/BSSID only for the first station interface - * or in IBSS mode. - */ - set_assoc = !!((priv->ah->opmode == NL80211_IFTYPE_ADHOC) || - ((priv->ah->opmode == NL80211_IFTYPE_STATION) && - (priv->num_sta_vif == 1))); - - if (changed & BSS_CHANGED_ASSOC) { - if (set_assoc) { - ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", - bss_conf->assoc); + ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", + bss_conf->assoc); - common->curaid = bss_conf->assoc ? - bss_conf->aid : 0; + bss_conf->assoc ? + priv->num_sta_assoc_vif++ : priv->num_sta_assoc_vif--; - if (bss_conf->assoc) + if (priv->ah->opmode == NL80211_IFTYPE_STATION) { + if (bss_conf->assoc && (priv->num_sta_assoc_vif == 1)) ath9k_htc_start_ani(priv); - else + else if (priv->num_sta_assoc_vif == 0) ath9k_htc_stop_ani(priv); } } if (changed & BSS_CHANGED_BSSID) { - if (set_assoc) { + if (priv->ah->opmode == NL80211_IFTYPE_ADHOC) { + common->curaid = bss_conf->aid; memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); - ath9k_hw_write_associd(ah); - - ath_dbg(common, ATH_DBG_CONFIG, - "BSSID: %pM aid: 0x%x\n", - common->curbssid, common->curaid); + ath9k_htc_set_bssid(priv); + } else if (priv->ah->opmode == NL80211_IFTYPE_STATION) { + ath9k_htc_choose_set_bssid(priv); } } diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index a898dac2233..2d81c700e20 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -875,6 +875,7 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) rfilt |= ATH9K_RX_FILTER_CONTROL; if ((ah->opmode == NL80211_IFTYPE_STATION) && + (priv->nvifs <= 1) && !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC)) rfilt |= ATH9K_RX_FILTER_MYBEACON; else @@ -888,6 +889,9 @@ u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv) if (priv->rxfilter & FIF_PSPOLL) rfilt |= ATH9K_RX_FILTER_PSPOLL; + if (priv->nvifs > 1) + rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL; + return rfilt; #undef RX_FILTER_PRESERVE diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index cee970fdf65..1b90ed8795c 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h index 91a5305db95..e1ffbb6bd63 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.h +++ b/drivers/net/wireless/ath/ath9k/htc_hst.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h index 8b8f0445aef..2f3e07263fc 100644 --- a/drivers/net/wireless/ath/ath9k/hw-ops.h +++ b/drivers/net/wireless/ath/ath9k/hw-ops.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index b75b5dca4e2..72543ce8f61 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index 7af2773d2bf..57435ce6279 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2010 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index b172d150951..45c585a337e 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index bd6d2b9d736..c2091f1f409 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h index b60c130917f..8e848c4d16b 100644 --- a/drivers/net/wireless/ath/ath9k/mac.h +++ b/drivers/net/wireless/ath/ath9k/mac.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 17ebdf1e8b7..a198ee374b0 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -2332,6 +2332,45 @@ static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw) return false; } +int ath9k_tx_last_beacon(struct ieee80211_hw *hw) +{ + struct ath_softc *sc = hw->priv; + struct ath_hw *ah = sc->sc_ah; + struct ieee80211_vif *vif; + struct ath_vif *avp; + struct ath_buf *bf; + struct ath_tx_status ts; + int status; + + vif = sc->beacon.bslot[0]; + if (!vif) + return 0; + + avp = (void *)vif->drv_priv; + if (!avp->is_bslot_active) + return 0; + + if (!sc->beacon.tx_processed) { + tasklet_disable(&sc->bcon_tasklet); + + bf = avp->av_bcbuf; + if (!bf || !bf->bf_mpdu) + goto skip; + + status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts); + if (status == -EINPROGRESS) + goto skip; + + sc->beacon.tx_processed = true; + sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK); + +skip: + tasklet_enable(&sc->bcon_tasklet); + } + + return sc->beacon.tx_last; +} + struct ieee80211_ops ath9k_ops = { .tx = ath9k_tx, .start = ath9k_start, @@ -2356,4 +2395,5 @@ struct ieee80211_ops ath9k_ops = { .set_coverage_class = ath9k_set_coverage_class, .flush = ath9k_flush, .tx_frames_pending = ath9k_tx_frames_pending, + .tx_last_beacon = ath9k_tx_last_beacon, }; diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 9c65459be10..b8cbfc70721 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h index 9441bf8ca2f..8b380305b0f 100644 --- a/drivers/net/wireless/ath/ath9k/phy.h +++ b/drivers/net/wireless/ath/ath9k/phy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 4ccbf2ddb55..17542214c93 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -1,6 +1,6 @@ /* * Copyright (c) 2004 Video54 Technologies, Inc. - * Copyright (c) 2004-2009 Atheros Communications, Inc. + * Copyright (c) 2004-2011 Atheros Communications, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h index 5d984b8acdb..c3d850207be 100644 --- a/drivers/net/wireless/ath/ath9k/rc.h +++ b/drivers/net/wireless/ath/ath9k/rc.h @@ -1,7 +1,7 @@ /* * Copyright (c) 2004 Sam Leffler, Errno Consulting * Copyright (c) 2004 Video54 Technologies, Inc. - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 4f52e0429f9..07e35e59c9e 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index 456f3ec20fe..c18ee9921fb 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index f9b1eb4853c..35422fc1f2c 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h index 6095eeb6e02..fde6da619f3 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.h +++ b/drivers/net/wireless/ath/ath9k/wmi.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010 Atheros Communications Inc. + * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 97dd1fac98b..3779b8977d4 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2008-2009 Atheros Communications Inc. + * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h index bb578690935..4da01a9f568 100644 --- a/drivers/net/wireless/ath/carl9170/carl9170.h +++ b/drivers/net/wireless/ath/carl9170/carl9170.h @@ -286,6 +286,10 @@ struct ar9170 { unsigned int tx_seq_table; } fw; + /* interface configuration combinations */ + struct ieee80211_iface_limit if_comb_limits[1]; + struct ieee80211_iface_combination if_combs[1]; + /* reset / stuck frames/queue detection */ struct work_struct restart_work; struct work_struct ping_work; diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c index 9517ede9e2d..221957c5d37 100644 --- a/drivers/net/wireless/ath/carl9170/fw.c +++ b/drivers/net/wireless/ath/carl9170/fw.c @@ -151,6 +151,7 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) const struct carl9170fw_chk_desc *chk_desc; const struct carl9170fw_last_desc *last_desc; const struct carl9170fw_txsq_desc *txsq_desc; + u16 if_comb_types; last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC, sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER); @@ -268,6 +269,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) if (SUPP(CARL9170FW_WOL)) device_set_wakeup_enable(&ar->udev->dev, true); + if_comb_types = BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_P2P_CLIENT); + ar->fw.vif_num = otus_desc->vif_num; ar->fw.cmd_bufs = otus_desc->cmd_bufs; ar->fw.address = le32_to_cpu(otus_desc->fw_address); @@ -294,12 +298,25 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len) ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); if (SUPP(CARL9170FW_WLANTX_CAB)) { - ar->hw->wiphy->interface_modes |= + if_comb_types |= BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_GO); } } + ar->if_comb_limits[0].max = ar->fw.vif_num; + ar->if_comb_limits[0].types = if_comb_types; + + ar->if_combs[0].num_different_channels = 1; + ar->if_combs[0].max_interfaces = ar->fw.vif_num; + ar->if_combs[0].limits = ar->if_comb_limits; + ar->if_combs[0].n_limits = ARRAY_SIZE(ar->if_comb_limits); + + ar->hw->wiphy->iface_combinations = ar->if_combs; + ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ar->if_combs); + + ar->hw->wiphy->interface_modes |= if_comb_types; + txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER); diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 7d5c65ea94e..54d093c2ab4 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1570,14 +1570,8 @@ void *carl9170_alloc(size_t priv_size) INIT_LIST_HEAD(&ar->vif_list); init_completion(&ar->tx_flush); - /* - * Note: - * IBSS/ADHOC and AP mode are only enabled, if the firmware - * supports these modes. The code which will add the - * additional interface_modes is in fw.c. - */ - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_P2P_CLIENT); + /* firmware decides which modes we support */ + hw->wiphy->interface_modes = 0; hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_REPORTS_TX_ACK_STATUS | diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c index cc11d66f15b..3f508e59f14 100644 --- a/drivers/net/wireless/ath/hw.c +++ b/drivers/net/wireless/ath/hw.c @@ -43,7 +43,7 @@ * set of ~ ( MAC XOR BSSID ) for all bssids we handle. * * When you do this you are essentially computing the common bits of all your - * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with + * BSSes. Later it is assumed the hardware will "and" (&) the BSSID mask with * the MAC address to obtain the relevant bits and compare the result with * (frame's BSSID & mask) to see if they match. * @@ -71,8 +71,8 @@ * On loop iteration for BSSID-02: * bssid_mask &= ~(0001 ^ 1001) * bssid_mask = (1010) & ~(0001 ^ 1001) - * bssid_mask = (1010) & ~(1001) - * bssid_mask = (1010) & (0110) + * bssid_mask = (1010) & ~(1000) + * bssid_mask = (1010) & (0111) * bssid_mask = 0010 * * A bssid_mask of 0010 means "only pay attention to the second least @@ -102,11 +102,9 @@ * * IFRAME-02: 0001 (we should allow) * - * allow = (0001 & 1010) == 1010 - * * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0; * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0; - * --> allow = (0010) == (0010) + * --> allow = (0000) == (0000) * --> allow = 1 * * Other examples: diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index ebc93c1bb5e..25a78cfb7d1 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -567,6 +567,8 @@ struct b43_dma { struct b43_dmaring *tx_ring_mcast; /* Multicast */ struct b43_dmaring *rx_ring; + + u32 translation; /* Routing bits */ }; struct b43_pio_txqueue; @@ -705,7 +707,7 @@ enum { /* Data structure for one wireless device (802.11 core) */ struct b43_wldev { - struct ssb_device *dev; + struct ssb_device *sdev; struct b43_wl *wl; /* The device initialization status. @@ -879,22 +881,34 @@ static inline enum ieee80211_band b43_current_band(struct b43_wl *wl) static inline u16 b43_read16(struct b43_wldev *dev, u16 offset) { - return ssb_read16(dev->dev, offset); + return ssb_read16(dev->sdev, offset); } static inline void b43_write16(struct b43_wldev *dev, u16 offset, u16 value) { - ssb_write16(dev->dev, offset, value); + ssb_write16(dev->sdev, offset, value); } static inline u32 b43_read32(struct b43_wldev *dev, u16 offset) { - return ssb_read32(dev->dev, offset); + return ssb_read32(dev->sdev, offset); } static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value) { - ssb_write32(dev->dev, offset, value); + ssb_write32(dev->sdev, offset, value); +} + +static inline void b43_block_read(struct b43_wldev *dev, void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + ssb_block_read(dev->sdev, buffer, count, offset, reg_width); +} + +static inline void b43_block_write(struct b43_wldev *dev, const void *buffer, + size_t count, u16 offset, u8 reg_width) +{ + ssb_block_write(dev->sdev, buffer, count, offset, reg_width); } static inline bool b43_using_pio_transfers(struct b43_wldev *dev) diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index ff0f5ba14b2..47d44bcff37 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -80,7 +80,7 @@ static void op32_fill_descriptor(struct b43_dmaring *ring, addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK); addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; - addr |= ssb_dma_translation(ring->dev->dev); + addr |= ring->dev->dma.translation; ctl = bufsize & B43_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= B43_DMA32_DCTL_DTABLEEND; @@ -174,7 +174,7 @@ static void op64_fill_descriptor(struct b43_dmaring *ring, addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK); addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK) >> SSB_DMA_TRANSLATION_SHIFT; - addrhi |= (ssb_dma_translation(ring->dev->dev) << 1); + addrhi |= (ring->dev->dma.translation << 1); if (slot == ring->nr_slots - 1) ctl0 |= B43_DMA64_DCTL0_DTABLEEND; if (start) @@ -333,10 +333,10 @@ static inline dma_addr_t dmaaddr; if (tx) { - dmaaddr = dma_map_single(ring->dev->dev->dma_dev, + dmaaddr = dma_map_single(ring->dev->sdev->dma_dev, buf, len, DMA_TO_DEVICE); } else { - dmaaddr = dma_map_single(ring->dev->dev->dma_dev, + dmaaddr = dma_map_single(ring->dev->sdev->dma_dev, buf, len, DMA_FROM_DEVICE); } @@ -348,10 +348,10 @@ static inline dma_addr_t addr, size_t len, int tx) { if (tx) { - dma_unmap_single(ring->dev->dev->dma_dev, + dma_unmap_single(ring->dev->sdev->dma_dev, addr, len, DMA_TO_DEVICE); } else { - dma_unmap_single(ring->dev->dev->dma_dev, + dma_unmap_single(ring->dev->sdev->dma_dev, addr, len, DMA_FROM_DEVICE); } } @@ -361,7 +361,7 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - dma_sync_single_for_cpu(ring->dev->dev->dma_dev, + dma_sync_single_for_cpu(ring->dev->sdev->dma_dev, addr, len, DMA_FROM_DEVICE); } @@ -370,7 +370,7 @@ static inline dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); - dma_sync_single_for_device(ring->dev->dev->dma_dev, + dma_sync_single_for_device(ring->dev->sdev->dma_dev, addr, len, DMA_FROM_DEVICE); } @@ -401,7 +401,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) */ if (ring->type == B43_DMA_64BIT) flags |= GFP_DMA; - ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, + ring->descbase = dma_alloc_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE, &(ring->dmabase), flags); if (!ring->descbase) { @@ -415,7 +415,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring) static void free_ringmemory(struct b43_dmaring *ring) { - dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, + dma_free_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE, ring->descbase, ring->dmabase); } @@ -523,7 +523,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring, dma_addr_t addr, size_t buffersize, bool dma_to_device) { - if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr))) + if (unlikely(dma_mapping_error(ring->dev->sdev->dma_dev, addr))) return 1; switch (ring->type) { @@ -658,7 +658,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring) int err = 0; u32 value; u32 addrext; - u32 trans = ssb_dma_translation(ring->dev->dev); + u32 trans = ring->dev->dma.translation; if (ring->tx) { if (ring->type == B43_DMA_64BIT) { @@ -869,7 +869,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, goto err_kfree_meta; /* test for ability to dma to txhdr_cache */ - dma_test = dma_map_single(dev->dev->dma_dev, + dma_test = dma_map_single(dev->sdev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); @@ -884,7 +884,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, if (!ring->txhdr_cache) goto err_kfree_meta; - dma_test = dma_map_single(dev->dev->dma_dev, + dma_test = dma_map_single(dev->sdev->dma_dev, ring->txhdr_cache, b43_txhdr_size(dev), DMA_TO_DEVICE); @@ -898,7 +898,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, } } - dma_unmap_single(dev->dev->dma_dev, + dma_unmap_single(dev->sdev->dma_dev, dma_test, b43_txhdr_size(dev), DMA_TO_DEVICE); } @@ -1013,9 +1013,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask) /* Try to set the DMA mask. If it fails, try falling back to a * lower mask, as we can always also support a lower one. */ while (1) { - err = dma_set_mask(dev->dev->dma_dev, mask); + err = dma_set_mask(dev->sdev->dma_dev, mask); if (!err) { - err = dma_set_coherent_mask(dev->dev->dma_dev, mask); + err = dma_set_coherent_mask(dev->sdev->dma_dev, mask); if (!err) break; } @@ -1055,6 +1055,7 @@ int b43_dma_init(struct b43_wldev *dev) err = b43_dma_set_mask(dev, dmamask); if (err) return err; + dma->translation = ssb_dma_translation(dev->sdev); err = -ENOMEM; /* setup TX DMA channels. */ @@ -1084,7 +1085,7 @@ int b43_dma_init(struct b43_wldev *dev) goto err_destroy_mcast; /* No support for the TX status DMA ring. */ - B43_WARN_ON(dev->dev->id.revision < 5); + B43_WARN_ON(dev->sdev->id.revision < 5); b43dbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type); diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c index c587115dd2b..0cafafe368a 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/b43/leds.c @@ -138,7 +138,7 @@ static int b43_register_led(struct b43_wldev *dev, struct b43_led *led, led->led_dev.default_trigger = default_trigger; led->led_dev.brightness_set = b43_led_brightness_set; - err = led_classdev_register(dev->dev->dev, &led->led_dev); + err = led_classdev_register(dev->sdev->dev, &led->led_dev); if (err) { b43warn(dev->wl, "LEDs: Failed to register %s\n", name); led->wl = NULL; @@ -215,7 +215,7 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev, enum b43_led_behaviour *behaviour, bool *activelow) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; u8 sprom[4]; sprom[0] = bus->sprom.gpio0; diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c index 94e4f1378fc..2ef7d4b3854 100644 --- a/drivers/net/wireless/b43/lo.c +++ b/drivers/net/wireless/b43/lo.c @@ -98,7 +98,7 @@ static u16 lo_measure_feedthrough(struct b43_wldev *dev, rfover |= pga; rfover |= lna; rfover |= trsw_rx; - if ((dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) + if ((dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) && phy->rev > 6) rfover |= B43_PHY_RFOVERVAL_EXTLNA; @@ -387,7 +387,7 @@ struct lo_g_saved_values { static void lo_measure_setup(struct b43_wldev *dev, struct lo_g_saved_values *sav) { - struct ssb_sprom *sprom = &dev->dev->bus->sprom; + struct ssb_sprom *sprom = &dev->sdev->bus->sprom; struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; struct b43_txpower_lo_control *lo = gphy->lo_control; diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 5a43984bdce..eb415968698 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -548,7 +548,7 @@ void b43_tsf_read(struct b43_wldev *dev, u64 *tsf) { u32 low, high; - B43_WARN_ON(dev->dev->id.revision < 3); + B43_WARN_ON(dev->sdev->id.revision < 3); /* The hardware guarantees us an atomic read, if we * read the low register first. */ @@ -586,7 +586,7 @@ static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) { u32 low, high; - B43_WARN_ON(dev->dev->id.revision < 3); + B43_WARN_ON(dev->sdev->id.revision < 3); low = tsf; high = (tsf >> 32); @@ -714,7 +714,7 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on) b43_ram_write(dev, i * 4, buffer[i]); b43_write16(dev, 0x0568, 0x0000); - if (dev->dev->id.revision < 11) + if (dev->sdev->id.revision < 11) b43_write16(dev, 0x07C0, 0x0000); else b43_write16(dev, 0x07C0, 0x0100); @@ -1132,7 +1132,7 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) b43_write32(dev, B43_MMIO_MACCTL, macctl); /* Commit write */ b43_read32(dev, B43_MMIO_MACCTL); - if (awake && dev->dev->id.revision >= 5) { + if (awake && dev->sdev->id.revision >= 5) { /* Wait for the microcode to wake up. */ for (i = 0; i < 100; i++) { ucstat = b43_shm_read16(dev, B43_SHM_SHARED, @@ -1144,29 +1144,35 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) } } -void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) +static void b43_ssb_wireless_core_reset(struct b43_wldev *dev, u32 flags) { u32 tmslow; - u32 macctl; flags |= B43_TMSLOW_PHYCLKEN; flags |= B43_TMSLOW_PHYRESET; if (dev->phy.type == B43_PHYTYPE_N) flags |= B43_TMSLOW_PHY_BANDWIDTH_20MHZ; /* Make 20 MHz def */ - ssb_device_enable(dev->dev, flags); + ssb_device_enable(dev->sdev, flags); msleep(2); /* Wait for the PLL to turn on. */ /* Now take the PHY out of Reset again */ - tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow = ssb_read32(dev->sdev, SSB_TMSLOW); tmslow |= SSB_TMSLOW_FGC; tmslow &= ~B43_TMSLOW_PHYRESET; - ssb_write32(dev->dev, SSB_TMSLOW, tmslow); - ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + ssb_write32(dev->sdev, SSB_TMSLOW, tmslow); + ssb_read32(dev->sdev, SSB_TMSLOW); /* flush */ msleep(1); tmslow &= ~SSB_TMSLOW_FGC; - ssb_write32(dev->dev, SSB_TMSLOW, tmslow); - ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ + ssb_write32(dev->sdev, SSB_TMSLOW, tmslow); + ssb_read32(dev->sdev, SSB_TMSLOW); /* flush */ msleep(1); +} + +void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) +{ + u32 macctl; + + b43_ssb_wireless_core_reset(dev, flags); /* Turn Analog ON, but only if we already know the PHY-type. * This protects against very early setup where we don't know the @@ -1215,7 +1221,7 @@ static void drain_txstatus_queue(struct b43_wldev *dev) { u32 dummy; - if (dev->dev->id.revision < 5) + if (dev->sdev->id.revision < 5) return; /* Read all entries from the microcode TXstatus FIFO * and throw them away. @@ -1421,9 +1427,9 @@ u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev, /* Get the mask of available antennas. */ if (dev->phy.gmode) - antenna_mask = dev->dev->bus->sprom.ant_available_bg; + antenna_mask = dev->sdev->bus->sprom.ant_available_bg; else - antenna_mask = dev->dev->bus->sprom.ant_available_a; + antenna_mask = dev->sdev->bus->sprom.ant_available_a; if (!(antenna_mask & (1 << (antenna_nr - 1)))) { /* This antenna is not available. Fall back to default. */ @@ -1638,7 +1644,7 @@ static void b43_beacon_update_trigger_work(struct work_struct *work) mutex_lock(&wl->mutex); dev = wl->current_dev; if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) { - if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) { /* wl->mutex is enough. */ b43_do_beacon_update_trigger_work(dev); mmiowb(); @@ -1683,7 +1689,7 @@ static void b43_update_templates(struct b43_wl *wl) static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) { b43_time_lock(dev); - if (dev->dev->id.revision >= 3) { + if (dev->sdev->id.revision >= 3) { b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16)); b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10)); } else { @@ -2057,7 +2063,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx, B43_WARN_ON(1); return -ENOSYS; } - err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev); + err = request_firmware(&blob, ctx->fwname, ctx->dev->sdev->dev); if (err == -ENOENT) { snprintf(ctx->errors[ctx->req_type], sizeof(ctx->errors[ctx->req_type]), @@ -2107,13 +2113,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) { struct b43_wldev *dev = ctx->dev; struct b43_firmware *fw = &ctx->dev->fw; - const u8 rev = ctx->dev->dev->id.revision; + const u8 rev = ctx->dev->sdev->id.revision; const char *filename; u32 tmshigh; int err; /* Get microcode */ - tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); if ((rev >= 5) && (rev <= 10)) filename = "ucode5"; else if ((rev >= 11) && (rev <= 12)) @@ -2152,6 +2157,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) switch (dev->phy.type) { case B43_PHYTYPE_A: if ((rev >= 5) && (rev <= 10)) { + tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH); if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) filename = "a0g1initvals5"; else @@ -2196,6 +2202,7 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx) switch (dev->phy.type) { case B43_PHYTYPE_A: if ((rev >= 5) && (rev <= 10)) { + tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH); if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) filename = "a0g1bsinitvals5"; else @@ -2441,7 +2448,7 @@ static int b43_upload_microcode(struct b43_wldev *dev) snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "%u.%u", dev->fw.rev, dev->fw.patch); - wiphy->hw_version = dev->dev->id.coreid; + wiphy->hw_version = dev->sdev->id.coreid; if (b43_is_old_txhdr_format(dev)) { /* We're over the deadline, but we keep support for old fw @@ -2557,10 +2564,20 @@ out: /* Initialize the GPIOs * http://bcm-specs.sipsolutions.net/GPIO */ +static struct ssb_device *b43_ssb_gpio_dev(struct b43_wldev *dev) +{ + struct ssb_bus *bus = dev->sdev->bus; + +#ifdef CONFIG_SSB_DRIVER_PCICORE + return (bus->chipco.dev ? bus->chipco.dev : bus->pcicore.dev); +#else + return bus->chipco.dev; +#endif +} + static int b43_gpio_init(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; - struct ssb_device *gpiodev, *pcidev = NULL; + struct ssb_device *gpiodev; u32 mask, set; b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) @@ -2571,7 +2588,7 @@ static int b43_gpio_init(struct b43_wldev *dev) mask = 0x0000001F; set = 0x0000000F; - if (dev->dev->bus->chip_id == 0x4301) { + if (dev->sdev->bus->chip_id == 0x4301) { mask |= 0x0060; set |= 0x0060; } @@ -2582,25 +2599,21 @@ static int b43_gpio_init(struct b43_wldev *dev) mask |= 0x0180; set |= 0x0180; } - if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) { + if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } - if (dev->dev->id.revision >= 2) + if (dev->sdev->id.revision >= 2) mask |= 0x0010; /* FIXME: This is redundant. */ -#ifdef CONFIG_SSB_DRIVER_PCICORE - pcidev = bus->pcicore.dev; -#endif - gpiodev = bus->chipco.dev ? : pcidev; - if (!gpiodev) - return 0; - ssb_write32(gpiodev, B43_GPIO_CONTROL, - (ssb_read32(gpiodev, B43_GPIO_CONTROL) - & mask) | set); + gpiodev = b43_ssb_gpio_dev(dev); + if (gpiodev) + ssb_write32(gpiodev, B43_GPIO_CONTROL, + (ssb_read32(gpiodev, B43_GPIO_CONTROL) + & mask) | set); return 0; } @@ -2608,16 +2621,11 @@ static int b43_gpio_init(struct b43_wldev *dev) /* Turn off all GPIO stuff. Call this on module unload, for example. */ static void b43_gpio_cleanup(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; - struct ssb_device *gpiodev, *pcidev = NULL; + struct ssb_device *gpiodev; -#ifdef CONFIG_SSB_DRIVER_PCICORE - pcidev = bus->pcicore.dev; -#endif - gpiodev = bus->chipco.dev ? : pcidev; - if (!gpiodev) - return; - ssb_write32(gpiodev, B43_GPIO_CONTROL, 0); + gpiodev = b43_ssb_gpio_dev(dev); + if (gpiodev) + ssb_write32(gpiodev, B43_GPIO_CONTROL, 0); } /* http://bcm-specs.sipsolutions.net/EnableMac */ @@ -2689,12 +2697,12 @@ out: /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */ void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on) { - u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + u32 tmslow = ssb_read32(dev->sdev, SSB_TMSLOW); if (on) tmslow |= B43_TMSLOW_MACPHYCLKEN; else tmslow &= ~B43_TMSLOW_MACPHYCLKEN; - ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_write32(dev->sdev, SSB_TMSLOW, tmslow); } static void b43_adjust_opmode(struct b43_wldev *dev) @@ -2733,15 +2741,15 @@ static void b43_adjust_opmode(struct b43_wldev *dev) /* Workaround: On old hardware the HW-MAC-address-filter * doesn't work properly, so always run promisc in filter * it in software. */ - if (dev->dev->id.revision <= 4) + if (dev->sdev->id.revision <= 4) ctl |= B43_MACCTL_PROMISC; b43_write32(dev, B43_MMIO_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) { - if (dev->dev->bus->chip_id == 0x4306 && - dev->dev->bus->chip_rev == 3) + if (dev->sdev->bus->chip_id == 0x4306 && + dev->sdev->bus->chip_rev == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; @@ -2899,7 +2907,7 @@ static int b43_chip_init(struct b43_wldev *dev) b43_write16(dev, 0x005E, value16); } b43_write32(dev, 0x0100, 0x01000000); - if (dev->dev->id.revision < 5) + if (dev->sdev->id.revision < 5) b43_write32(dev, 0x010C, 0x01000000); b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) @@ -2914,7 +2922,7 @@ static int b43_chip_init(struct b43_wldev *dev) /* Initially set the wireless operation mode. */ b43_adjust_opmode(dev); - if (dev->dev->id.revision < 3) { + if (dev->sdev->id.revision < 3) { b43_write16(dev, 0x060E, 0x0000); b43_write16(dev, 0x0610, 0x8000); b43_write16(dev, 0x0604, 0x0000); @@ -2934,7 +2942,7 @@ static int b43_chip_init(struct b43_wldev *dev) b43_mac_phy_clock_set(dev, true); b43_write16(dev, B43_MMIO_POWERUP_DELAY, - dev->dev->bus->chipco.fast_pwrup_delay); + dev->sdev->bus->chipco.fast_pwrup_delay); err = 0; b43dbg(dev->wl, "Chip initialized\n"); @@ -3097,7 +3105,7 @@ static int b43_validate_chipaccess(struct b43_wldev *dev) b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0); b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4); - if ((dev->dev->id.revision >= 3) && (dev->dev->id.revision <= 10)) { + if ((dev->sdev->id.revision >= 3) && (dev->sdev->id.revision <= 10)) { /* The 32bit register shadows the two 16bit registers * with update sideeffects. Validate this. */ b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA); @@ -3450,7 +3458,7 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf) static void b43_put_phy_into_reset(struct b43_wldev *dev) { - struct ssb_device *sdev = dev->dev; + struct ssb_device *sdev = dev->sdev; u32 tmslow; tmslow = ssb_read32(sdev, SSB_TMSLOW); @@ -3946,7 +3954,7 @@ redo: /* Disable interrupts on the device. */ b43_set_status(dev, B43_STAT_INITIALIZED); - if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) { /* wl->mutex is locked. That is enough. */ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ @@ -3959,11 +3967,11 @@ redo: /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */ orig_dev = dev; mutex_unlock(&wl->mutex); - if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) { b43_sdio_free_irq(dev); } else { - synchronize_irq(dev->dev->irq); - free_irq(dev->dev->irq, dev); + synchronize_irq(dev->sdev->irq); + free_irq(dev->sdev->irq, dev); } mutex_lock(&wl->mutex); dev = wl->current_dev; @@ -3996,18 +4004,19 @@ static int b43_wireless_core_start(struct b43_wldev *dev) B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); drain_txstatus_queue(dev); - if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { + if (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) { err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler); if (err) { b43err(dev->wl, "Cannot request SDIO IRQ\n"); goto out; } } else { - err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler, + err = request_threaded_irq(dev->sdev->irq, b43_interrupt_handler, b43_interrupt_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { - b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); + b43err(dev->wl, "Cannot request IRQ-%d\n", + dev->sdev->irq); goto out; } } @@ -4087,10 +4096,10 @@ static int b43_phy_versioning(struct b43_wldev *dev) analog_type, phy_type, phy_rev); /* Get RADIO versioning */ - if (dev->dev->bus->chip_id == 0x4317) { - if (dev->dev->bus->chip_rev == 0) + if (dev->sdev->bus->chip_id == 0x4317) { + if (dev->sdev->bus->chip_rev == 0) tmp = 0x3205017F; - else if (dev->dev->bus->chip_rev == 1) + else if (dev->sdev->bus->chip_rev == 1) tmp = 0x4205017F; else tmp = 0x5205017F; @@ -4195,7 +4204,7 @@ static void setup_struct_wldev_for_init(struct b43_wldev *dev) static void b43_bluetooth_coext_enable(struct b43_wldev *dev) { - struct ssb_sprom *sprom = &dev->dev->bus->sprom; + struct ssb_sprom *sprom = &dev->sdev->bus->sprom; u64 hf; if (!modparam_btcoex) @@ -4222,16 +4231,16 @@ static void b43_bluetooth_coext_disable(struct b43_wldev *dev) static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; u32 tmp; if ((bus->chip_id == 0x4311 && bus->chip_rev == 2) || (bus->chip_id == 0x4312)) { - tmp = ssb_read32(dev->dev, SSB_IMCFGLO); + tmp = ssb_read32(dev->sdev, SSB_IMCFGLO); tmp &= ~SSB_IMCFGLO_REQTO; tmp &= ~SSB_IMCFGLO_SERTO; tmp |= 0x3; - ssb_write32(dev->dev, SSB_IMCFGLO, tmp); + ssb_write32(dev->sdev, SSB_IMCFGLO, tmp); ssb_commit_settings(bus); } } @@ -4301,14 +4310,14 @@ static void b43_wireless_core_exit(struct b43_wldev *dev) dev->wl->current_beacon = NULL; } - ssb_device_disable(dev->dev, 0); - ssb_bus_may_powerdown(dev->dev->bus); + ssb_device_disable(dev->sdev, 0); + ssb_bus_may_powerdown(dev->sdev->bus); } /* Initialize a wireless core */ static int b43_wireless_core_init(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct ssb_sprom *sprom = &bus->sprom; struct b43_phy *phy = &dev->phy; int err; @@ -4320,7 +4329,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev) err = ssb_bus_powerup(bus, 0); if (err) goto out; - if (!ssb_device_is_enabled(dev->dev)) { + if (!ssb_device_is_enabled(dev->sdev)) { tmp = phy->gmode ? B43_TMSLOW_GMODE : 0; b43_wireless_core_reset(dev, tmp); } @@ -4330,7 +4339,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev) phy->ops->prepare_structs(dev); /* Enable IRQ routing to this device. */ - ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); + ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->sdev); b43_imcfglo_timeouts_workaround(dev); b43_bluetooth_coext_disable(dev); @@ -4343,7 +4352,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev) if (err) goto err_busdown; b43_shm_write16(dev, B43_SHM_SHARED, - B43_SHM_SH_WLCOREREV, dev->dev->id.revision); + B43_SHM_SH_WLCOREREV, dev->sdev->id.revision); hf = b43_hf_read(dev); if (phy->type == B43_PHYTYPE_G) { hf |= B43_HF_SYMW; @@ -4390,8 +4399,8 @@ static int b43_wireless_core_init(struct b43_wldev *dev) /* Maximum Contention Window */ b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); - if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || - (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || + if ((dev->sdev->bus->bustype == SSB_BUSTYPE_PCMCIA) || + (dev->sdev->bus->bustype == SSB_BUSTYPE_SDIO) || dev->use_pio) { dev->__using_pio_transfers = 1; err = b43_pio_init(dev); @@ -4728,7 +4737,7 @@ static void b43_wireless_core_detach(struct b43_wldev *dev) static int b43_wireless_core_attach(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL; int err; bool have_2ghz_phy = 0, have_5ghz_phy = 0; @@ -4747,10 +4756,10 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) goto out; } /* Get the PHY type. */ - if (dev->dev->id.revision >= 5) { + if (dev->sdev->id.revision >= 5) { u32 tmshigh; - tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); + tmshigh = ssb_read32(dev->sdev, SSB_TMSHIGH); have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY); have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY); } else @@ -4823,7 +4832,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev) INIT_WORK(&dev->restart_work, b43_chip_reset); dev->phy.ops->switch_analog(dev, 0); - ssb_device_disable(dev->dev, 0); + ssb_device_disable(dev->sdev, 0); ssb_bus_may_powerdown(bus); out: @@ -4864,7 +4873,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) goto out; wldev->use_pio = b43_modparam_pio; - wldev->dev = dev; + wldev->sdev = dev; wldev->wl = wl; b43_set_status(wldev, B43_STAT_UNINIT); wldev->bad_frames_preempt = modparam_bad_frames_preempt; @@ -4925,19 +4934,16 @@ static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl) ieee80211_free_hw(hw); } -static int b43_wireless_init(struct ssb_device *dev) +static struct b43_wl *b43_wireless_init(struct ssb_device *dev) { struct ssb_sprom *sprom = &dev->bus->sprom; struct ieee80211_hw *hw; struct b43_wl *wl; - int err = -ENOMEM; - - b43_sprom_fixup(dev->bus); hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); if (!hw) { b43err(NULL, "Could not allocate ieee80211 device\n"); - goto out; + return ERR_PTR(-ENOMEM); } wl = hw_to_b43_wl(hw); @@ -4971,12 +4977,9 @@ static int b43_wireless_init(struct ssb_device *dev) INIT_WORK(&wl->tx_work, b43_tx_work); skb_queue_head_init(&wl->tx_queue); - ssb_set_devtypedata(dev, wl); b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n", dev->bus->chip_id, dev->id.revision); - err = 0; -out: - return err; + return wl; } static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id) @@ -4989,11 +4992,14 @@ static int b43_ssb_probe(struct ssb_device *dev, const struct ssb_device_id *id) if (!wl) { /* Probing the first core. Must setup common struct b43_wl */ first = 1; - err = b43_wireless_init(dev); - if (err) + b43_sprom_fixup(dev->bus); + wl = b43_wireless_init(dev); + if (IS_ERR(wl)) { + err = PTR_ERR(wl); goto out; - wl = ssb_get_devtypedata(dev); - B43_WARN_ON(!wl); + } + ssb_set_devtypedata(dev, wl); + B43_WARN_ON(ssb_get_devtypedata(dev) != wl); } err = b43_one_core_attach(dev, wl); if (err) diff --git a/drivers/net/wireless/b43/phy_a.c b/drivers/net/wireless/b43/phy_a.c index b6428ec16dd..b01c8ced57c 100644 --- a/drivers/net/wireless/b43/phy_a.c +++ b/drivers/net/wireless/b43/phy_a.c @@ -265,7 +265,7 @@ static void hardware_pctl_init_aphy(struct b43_wldev *dev) void b43_phy_inita(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; /* This lowlevel A-PHY init is also called from G-PHY init. @@ -311,7 +311,7 @@ void b43_phy_inita(struct b43_wldev *dev) } if ((phy->type == B43_PHYTYPE_G) && - (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) { + (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) { b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF); } } @@ -323,17 +323,17 @@ static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev) struct b43_phy_a *aphy = phy->a; s16 pab0, pab1, pab2; - pab0 = (s16) (dev->dev->bus->sprom.pa1b0); - pab1 = (s16) (dev->dev->bus->sprom.pa1b1); - pab2 = (s16) (dev->dev->bus->sprom.pa1b2); + pab0 = (s16) (dev->sdev->bus->sprom.pa1b0); + pab1 = (s16) (dev->sdev->bus->sprom.pa1b1); + pab2 = (s16) (dev->sdev->bus->sprom.pa1b2); if (pab0 != 0 && pab1 != 0 && pab2 != 0 && pab0 != -1 && pab1 != -1 && pab2 != -1) { /* The pabX values are set in SPROM. Use them. */ - if ((s8) dev->dev->bus->sprom.itssi_a != 0 && - (s8) dev->dev->bus->sprom.itssi_a != -1) + if ((s8) dev->sdev->bus->sprom.itssi_a != 0 && + (s8) dev->sdev->bus->sprom.itssi_a != -1) aphy->tgt_idle_tssi = - (s8) (dev->dev->bus->sprom.itssi_a); + (s8) (dev->sdev->bus->sprom.itssi_a); else aphy->tgt_idle_tssi = 62; aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c index b5c5ce94d3f..e46b2f4f092 100644 --- a/drivers/net/wireless/b43/phy_common.c +++ b/drivers/net/wireless/b43/phy_common.c @@ -168,7 +168,7 @@ void b43_phy_lock(struct b43_wldev *dev) B43_WARN_ON(dev->phy.phy_locked); dev->phy.phy_locked = 1; #endif - B43_WARN_ON(dev->dev->id.revision < 3); + B43_WARN_ON(dev->sdev->id.revision < 3); if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); @@ -180,7 +180,7 @@ void b43_phy_unlock(struct b43_wldev *dev) B43_WARN_ON(!dev->phy.phy_locked); dev->phy.phy_locked = 0; #endif - B43_WARN_ON(dev->dev->id.revision < 3); + B43_WARN_ON(dev->sdev->id.revision < 3); if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) b43_power_saving_ctl_bits(dev, 0); @@ -368,8 +368,8 @@ void b43_phy_txpower_check(struct b43_wldev *dev, unsigned int flags) /* The next check will be needed in two seconds, or later. */ phy->next_txpwr_check_time = round_jiffies(now + (HZ * 2)); - if ((dev->dev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && - (dev->dev->bus->boardinfo.type == SSB_BOARD_BU4306)) + if ((dev->sdev->bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && + (dev->sdev->bus->boardinfo.type == SSB_BOARD_BU4306)) return; /* No software txpower adjustment needed */ result = phy->ops->recalc_txpower(dev, !!(flags & B43_TXPWR_IGNORE_TSSI)); diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c index be482816701..1758a282f91 100644 --- a/drivers/net/wireless/b43/phy_g.c +++ b/drivers/net/wireless/b43/phy_g.c @@ -718,7 +718,7 @@ static void b43_calc_nrssi_threshold(struct b43_wldev *dev) B43_WARN_ON(phy->type != B43_PHYTYPE_G); if (!phy->gmode || - !(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { + !(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { tmp16 = b43_nrssi_hw_read(dev, 0x20); if (tmp16 >= 0x20) tmp16 -= 0x40; @@ -1114,7 +1114,7 @@ static u16 radio2050_rfover_val(struct b43_wldev *dev, { struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; - struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + struct ssb_sprom *sprom = &(dev->sdev->bus->sprom); if (!phy->gmode) return 0; @@ -1491,7 +1491,7 @@ static u16 b43_radio_init2050(struct b43_wldev *dev) static void b43_phy_initb5(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; u16 offset, value; @@ -1620,7 +1620,7 @@ static void b43_phy_initb6(struct b43_wldev *dev) b43_radio_write16(dev, 0x5A, 0x88); b43_radio_write16(dev, 0x5B, 0x6B); b43_radio_write16(dev, 0x5C, 0x0F); - if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) { + if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_ALTIQ) { b43_radio_write16(dev, 0x5D, 0xFA); b43_radio_write16(dev, 0x5E, 0xD8); } else { @@ -1787,7 +1787,7 @@ static void b43_calc_loopback_gain(struct b43_wldev *dev) b43_phy_set(dev, B43_PHY_RFOVER, 0x0100); b43_phy_mask(dev, B43_PHY_RFOVERVAL, 0xCFFF); - if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) { + if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) { if (phy->rev >= 7) { b43_phy_set(dev, B43_PHY_RFOVER, 0x0800); b43_phy_set(dev, B43_PHY_RFOVERVAL, 0x8000); @@ -1922,7 +1922,7 @@ static void b43_hardware_pctl_init_gphy(struct b43_wldev *dev) /* Initialize B/G PHY power control */ static void b43_phy_init_pctl(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; struct b43_phy_g *gphy = phy->g; struct b43_rfatt old_rfatt; @@ -2053,7 +2053,7 @@ static void b43_phy_initg(struct b43_wldev *dev) if (phy->rev >= 6) { b43_phy_maskset(dev, B43_PHY_CCK(0x36), 0x0FFF, (gphy->lo_control->tx_bias << 12)); } - if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) + if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x8075); else b43_phy_write(dev, B43_PHY_CCK(0x2E), 0x807F); @@ -2066,7 +2066,7 @@ static void b43_phy_initg(struct b43_wldev *dev) b43_phy_write(dev, B43_PHY_LO_MASK, 0x8078); } - if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { + if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) { /* The specs state to update the NRSSI LT with * the value 0x7FFFFFFF here. I think that is some weird * compiler optimization in the original driver. @@ -2088,8 +2088,8 @@ static void b43_phy_initg(struct b43_wldev *dev) /* FIXME: The spec says in the following if, the 0 should be replaced 'if OFDM may not be used in the current locale' but OFDM is legal everywhere */ - if ((dev->dev->bus->chip_id == 0x4306 - && dev->dev->bus->chip_package == 2) || 0) { + if ((dev->sdev->bus->chip_id == 0x4306 + && dev->sdev->bus->chip_package == 2) || 0) { b43_phy_mask(dev, B43_PHY_CRS0, 0xBFFF); b43_phy_mask(dev, B43_PHY_OFDM(0xC3), 0x7FFF); } @@ -2105,7 +2105,7 @@ void b43_gphy_channel_switch(struct b43_wldev *dev, b43_write16(dev, B43_MMIO_CHANNEL, channel2freq_bg(channel)); if (channel == 14) { - if (dev->dev->bus->sprom.country_code == + if (dev->sdev->bus->sprom.country_code == SSB_SPROM1CCODE_JAPAN) b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ACPR); @@ -2136,7 +2136,7 @@ static void default_baseband_attenuation(struct b43_wldev *dev, static void default_radio_attenuation(struct b43_wldev *dev, struct b43_rfatt *rf) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; rf->with_padmix = 0; @@ -2384,11 +2384,11 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) struct b43_phy_g *gphy = phy->g; s16 pab0, pab1, pab2; - pab0 = (s16) (dev->dev->bus->sprom.pa0b0); - pab1 = (s16) (dev->dev->bus->sprom.pa0b1); - pab2 = (s16) (dev->dev->bus->sprom.pa0b2); + pab0 = (s16) (dev->sdev->bus->sprom.pa0b0); + pab1 = (s16) (dev->sdev->bus->sprom.pa0b1); + pab2 = (s16) (dev->sdev->bus->sprom.pa0b2); - B43_WARN_ON((dev->dev->bus->chip_id == 0x4301) && + B43_WARN_ON((dev->sdev->bus->chip_id == 0x4301) && (phy->radio_ver != 0x2050)); /* Not supported anymore */ gphy->dyn_tssi_tbl = 0; @@ -2396,10 +2396,10 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev) if (pab0 != 0 && pab1 != 0 && pab2 != 0 && pab0 != -1 && pab1 != -1 && pab2 != -1) { /* The pabX values are set in SPROM. Use them. */ - if ((s8) dev->dev->bus->sprom.itssi_bg != 0 && - (s8) dev->dev->bus->sprom.itssi_bg != -1) { + if ((s8) dev->sdev->bus->sprom.itssi_bg != 0 && + (s8) dev->sdev->bus->sprom.itssi_bg != -1) { gphy->tgt_idle_tssi = - (s8) (dev->dev->bus->sprom.itssi_bg); + (s8) (dev->sdev->bus->sprom.itssi_bg); } else gphy->tgt_idle_tssi = 62; gphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, @@ -2840,7 +2840,7 @@ static void b43_gphy_op_adjust_txpower(struct b43_wldev *dev) B43_TXCTL_TXMIX; rfatt += 2; bbatt += 2; - } else if (dev->dev->bus->sprom. + } else if (dev->sdev->bus->sprom. boardflags_lo & B43_BFL_PACTRL) { bbatt += 4 * (rfatt - 2); @@ -2914,14 +2914,14 @@ static enum b43_txpwr_result b43_gphy_op_recalc_txpower(struct b43_wldev *dev, estimated_pwr = b43_gphy_estimate_power_out(dev, average_tssi); B43_WARN_ON(phy->type != B43_PHYTYPE_G); - max_pwr = dev->dev->bus->sprom.maxpwr_bg; - if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) + max_pwr = dev->sdev->bus->sprom.maxpwr_bg; + if (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) max_pwr -= 3; /* minus 0.75 */ if (unlikely(max_pwr >= INT_TO_Q52(30/*dBm*/))) { b43warn(dev->wl, "Invalid max-TX-power value in SPROM.\n"); max_pwr = INT_TO_Q52(20); /* fake it */ - dev->dev->bus->sprom.maxpwr_bg = max_pwr; + dev->sdev->bus->sprom.maxpwr_bg = max_pwr; } /* Get desired power (in Q5.2) */ @@ -3014,7 +3014,7 @@ static void b43_gphy_op_pwork_60sec(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; - if (!(dev->dev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) + if (!(dev->sdev->bus->sprom.boardflags_lo & B43_BFL_RSSI)) return; b43_mac_suspend(dev); diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c index fd50eb11624..012c8da2f94 100644 --- a/drivers/net/wireless/b43/phy_lp.c +++ b/drivers/net/wireless/b43/phy_lp.c @@ -86,7 +86,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev) static void lpphy_read_band_sprom(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; u16 cckpo, maxpwr; u32 ofdmpo; int i; @@ -214,7 +214,7 @@ static void lpphy_table_init(struct b43_wldev *dev) static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy_lp *lpphy = dev->phy.lp; u16 tmp, tmp2; @@ -412,7 +412,7 @@ static void lpphy_restore_dig_flt_state(struct b43_wldev *dev) static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy_lp *lpphy = dev->phy.lp; b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50); @@ -519,7 +519,7 @@ struct b2062_freqdata { static void lpphy_2062_init(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; u32 crystalfreq, tmp, ref; unsigned int i; const struct b2062_freqdata *fd = NULL; @@ -697,7 +697,7 @@ static void lpphy_radio_init(struct b43_wldev *dev) lpphy_sync_stx(dev); b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80); b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0); - if (dev->dev->bus->chip_id == 0x4325) { + if (dev->sdev->bus->chip_id == 0x4325) { // TODO SSB PMU recalibration } } @@ -1289,7 +1289,7 @@ finish: static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF; int i; @@ -1840,7 +1840,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains, static void lpphy_papd_cal_txpwr(struct b43_wldev *dev) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct lpphy_tx_gains gains, oldgains; int old_txpctl, old_afe_ovr, old_rf, old_bbmult; @@ -1870,7 +1870,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx, bool rx, bool pa, struct lpphy_tx_gains *gains) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; const struct lpphy_rx_iq_comp *iqcomp = NULL; struct lpphy_tx_gains nogains, oldgains; u16 tmp; @@ -2408,7 +2408,7 @@ static const struct b206x_channel b2063_chantbl[] = { static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF); udelay(20); @@ -2432,7 +2432,7 @@ static int lpphy_b2062_tune(struct b43_wldev *dev, unsigned int channel) { struct b43_phy_lp *lpphy = dev->phy.lp; - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; const struct b206x_channel *chandata = NULL; u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9; @@ -2522,7 +2522,7 @@ static void lpphy_b2063_vco_calib(struct b43_wldev *dev) static int lpphy_b2063_tune(struct b43_wldev *dev, unsigned int channel) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; static const struct b206x_channel *chandata = NULL; u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000; diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c index b075a3f82a4..9ed65157bef 100644 --- a/drivers/net/wireless/b43/phy_n.c +++ b/drivers/net/wireless/b43/phy_n.c @@ -299,7 +299,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) static void b43_nphy_tx_power_fix(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; - struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + struct ssb_sprom *sprom = &(dev->sdev->bus->sprom); u8 txpi[2], bbmult, i; u16 tmp, radio_gain, dac_gain; @@ -423,8 +423,8 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev) static void b43_radio_init2055_post(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; - struct ssb_sprom *sprom = &(dev->dev->bus->sprom); - struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo); + struct ssb_sprom *sprom = &(dev->sdev->bus->sprom); + struct ssb_boardinfo *binfo = &(dev->sdev->bus->boardinfo); int i; u16 val; bool workaround = false; @@ -609,12 +609,12 @@ static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force) if (dev->phy.type != B43_PHYTYPE_N) return; - tmslow = ssb_read32(dev->dev, SSB_TMSLOW); + tmslow = ssb_read32(dev->sdev, SSB_TMSLOW); if (force) tmslow |= SSB_TMSLOW_FGC; else tmslow &= ~SSB_TMSLOW_FGC; - ssb_write32(dev->dev, SSB_TMSLOW, tmslow); + ssb_write32(dev->sdev, SSB_TMSLOW, tmslow); } /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ @@ -959,7 +959,7 @@ static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0); b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0); - ssb_chipco_gpio_control(&dev->dev->bus->chipco, 0xFC00, + ssb_chipco_gpio_control(&dev->sdev->bus->chipco, 0xFC00, 0xFC00); b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) & @@ -983,7 +983,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) { u16 tmp; - if (dev->dev->id.revision == 16) + if (dev->sdev->id.revision == 16) b43_mac_suspend(dev); tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL); @@ -993,7 +993,7 @@ static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) tmp |= (val & mask); b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp); - if (dev->dev->id.revision == 16) + if (dev->sdev->id.revision == 16) b43_mac_enable(dev); return tmp; @@ -1168,7 +1168,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) { struct b43_phy_n *nphy = dev->phy.n; - struct ssb_sprom *sprom = &(dev->dev->bus->sprom); + struct ssb_sprom *sprom = &(dev->sdev->bus->sprom); /* PHY rev 0, 1, 2 */ u8 i, j; @@ -1373,7 +1373,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ static void b43_nphy_workarounds(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; @@ -3586,7 +3586,7 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) */ int b43_phy_initn(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; struct b43_phy_n *nphy = phy->n; u8 tx_pwr_state; @@ -3601,7 +3601,7 @@ int b43_phy_initn(struct b43_wldev *dev) if ((dev->phy.rev >= 3) && (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) && (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) { - chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40); + chipco_set32(&dev->sdev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40); } nphy->deaf_count = 0; b43_nphy_tables_init(dev); diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index aa12273ae71..72ab94df756 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c @@ -111,7 +111,7 @@ static u16 index_to_pioqueue_base(struct b43_wldev *dev, B43_MMIO_PIO11_BASE5, }; - if (dev->dev->id.revision >= 11) { + if (dev->sdev->id.revision >= 11) { B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11)); return bases_rev11[index]; } @@ -121,14 +121,14 @@ static u16 index_to_pioqueue_base(struct b43_wldev *dev, static u16 pio_txqueue_offset(struct b43_wldev *dev) { - if (dev->dev->id.revision >= 11) + if (dev->sdev->id.revision >= 11) return 0x18; return 0; } static u16 pio_rxqueue_offset(struct b43_wldev *dev) { - if (dev->dev->id.revision >= 11) + if (dev->sdev->id.revision >= 11) return 0x38; return 8; } @@ -144,7 +144,7 @@ static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev, if (!q) return NULL; q->dev = dev; - q->rev = dev->dev->id.revision; + q->rev = dev->sdev->id.revision; q->mmio_base = index_to_pioqueue_base(dev, index) + pio_txqueue_offset(dev); q->index = index; @@ -178,7 +178,7 @@ static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev, if (!q) return NULL; q->dev = dev; - q->rev = dev->dev->id.revision; + q->rev = dev->sdev->id.revision; q->mmio_base = index_to_pioqueue_base(dev, index) + pio_rxqueue_offset(dev); @@ -339,7 +339,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; b43_piotx_write16(q, B43_PIO_TXCTL, ctl); - ssb_block_write(dev->dev, data, (data_len & ~1), + b43_block_write(dev, data, (data_len & ~1), q->mmio_base + B43_PIO_TXDATA, sizeof(u16)); if (data_len & 1) { @@ -351,7 +351,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, b43_piotx_write16(q, B43_PIO_TXCTL, ctl); tail[0] = data[data_len - 1]; tail[1] = 0; - ssb_block_write(dev->dev, tail, 2, + b43_block_write(dev, tail, 2, q->mmio_base + B43_PIO_TXDATA, sizeof(u16)); } @@ -393,7 +393,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31; b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); - ssb_block_write(dev->dev, data, (data_len & ~3), + b43_block_write(dev, data, (data_len & ~3), q->mmio_base + B43_PIO8_TXDATA, sizeof(u32)); if (data_len & 3) { @@ -421,7 +421,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, break; } b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); - ssb_block_write(dev->dev, tail, 4, + b43_block_write(dev, tail, 4, q->mmio_base + B43_PIO8_TXDATA, sizeof(u32)); } @@ -657,11 +657,11 @@ data_ready: /* Get the preamble (RX header) */ if (q->rev >= 8) { - ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), + b43_block_read(dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { - ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr), + b43_block_read(dev, rxhdr, sizeof(*rxhdr), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } @@ -697,7 +697,7 @@ data_ready: skb_reserve(skb, 2); skb_put(skb, len + padding); if (q->rev >= 8) { - ssb_block_read(dev->dev, skb->data + padding, (len & ~3), + b43_block_read(dev, skb->data + padding, (len & ~3), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { @@ -705,7 +705,7 @@ data_ready: BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4); /* Read the last few bytes. */ - ssb_block_read(dev->dev, tail, 4, + b43_block_read(dev, tail, 4, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); switch (len & 3) { @@ -724,7 +724,7 @@ data_ready: } } } else { - ssb_block_read(dev->dev, skb->data + padding, (len & ~1), + b43_block_read(dev, skb->data + padding, (len & ~1), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { @@ -732,7 +732,7 @@ data_ready: BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2); /* Read the last byte. */ - ssb_block_read(dev->dev, tail, 2, + b43_block_read(dev, tail, 2, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); skb->data[len + padding - 1] = tail[0]; diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c index 86bc0a0f735..a617efe3828 100644 --- a/drivers/net/wireless/b43/rfkill.c +++ b/drivers/net/wireless/b43/rfkill.c @@ -37,7 +37,7 @@ void b43_rfkill_poll(struct ieee80211_hw *hw) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev = wl->current_dev; - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; bool enabled; bool brought_up = false; @@ -47,7 +47,7 @@ void b43_rfkill_poll(struct ieee80211_hw *hw) mutex_unlock(&wl->mutex); return; } - ssb_device_enable(dev->dev, 0); + ssb_device_enable(dev->sdev, 0); brought_up = true; } @@ -63,7 +63,7 @@ void b43_rfkill_poll(struct ieee80211_hw *hw) } if (brought_up) { - ssb_device_disable(dev->dev, 0); + ssb_device_disable(dev->sdev, 0); ssb_bus_may_powerdown(bus); } diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c index 09e2dfd7b17..808e25b7970 100644 --- a/drivers/net/wireless/b43/sdio.c +++ b/drivers/net/wireless/b43/sdio.c @@ -66,7 +66,7 @@ static void b43_sdio_interrupt_dispatcher(struct sdio_func *func) int b43_sdio_request_irq(struct b43_wldev *dev, void (*handler)(struct b43_wldev *dev)) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct sdio_func *func = bus->host_sdio; struct b43_sdio *sdio = sdio_get_drvdata(func); int err; @@ -82,7 +82,7 @@ int b43_sdio_request_irq(struct b43_wldev *dev, void b43_sdio_free_irq(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct sdio_func *func = bus->host_sdio; struct b43_sdio *sdio = sdio_get_drvdata(func); diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c index f1ae4e05a32..57af619725c 100644 --- a/drivers/net/wireless/b43/sysfs.c +++ b/drivers/net/wireless/b43/sysfs.c @@ -140,7 +140,7 @@ static DEVICE_ATTR(interference, 0644, int b43_sysfs_register(struct b43_wldev *wldev) { - struct device *dev = wldev->dev->dev; + struct device *dev = wldev->sdev->dev; B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED); @@ -149,7 +149,7 @@ int b43_sysfs_register(struct b43_wldev *wldev) void b43_sysfs_unregister(struct b43_wldev *wldev) { - struct device *dev = wldev->dev->dev; + struct device *dev = wldev->sdev->dev; device_remove_file(dev, &dev_attr_interference); } diff --git a/drivers/net/wireless/b43/tables_lpphy.c b/drivers/net/wireless/b43/tables_lpphy.c index 61027ee84fb..59df3c64af6 100644 --- a/drivers/net/wireless/b43/tables_lpphy.c +++ b/drivers/net/wireless/b43/tables_lpphy.c @@ -2304,7 +2304,7 @@ void lpphy_rev0_1_table_init(struct b43_wldev *dev) void lpphy_rev2plus_table_init(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; int i; B43_WARN_ON(dev->phy.rev < 2); @@ -2416,7 +2416,7 @@ void lpphy_write_gain_table_bulk(struct b43_wldev *dev, int offset, int count, void lpphy_init_tx_gain_table(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; switch (dev->phy.rev) { case 0: diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c index 9a335da65b4..8f4db448ec3 100644 --- a/drivers/net/wireless/b43/wa.c +++ b/drivers/net/wireless/b43/wa.c @@ -458,7 +458,7 @@ static void b43_wa_rssi_adc(struct b43_wldev *dev) static void b43_wa_boards_a(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; if (bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM && bus->boardinfo.type == SSB_BOARD_BU4306 && @@ -486,7 +486,7 @@ static void b43_wa_boards_a(struct b43_wldev *dev) static void b43_wa_boards_g(struct b43_wldev *dev) { - struct ssb_bus *bus = dev->dev->bus; + struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; if (bus->boardinfo.vendor != SSB_BOARDVENDOR_BCM || diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index e5be381c17b..c8f99aebe01 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -547,7 +547,7 @@ static s8 b43_rssi_postprocess(struct b43_wldev *dev, else tmp -= 3; } else { - if (dev->dev->bus->sprom. + if (dev->sdev->bus->sprom. boardflags_lo & B43_BFL_RSSI) { if (in_rssi > 63) in_rssi = 63; diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index b4c81931e13..61d4a11f566 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -171,10 +171,6 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) static struct iwl_lib_ops iwl1000_lib = { .set_hw_params = iwl1000_hw_set_hw_params, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, .rx_handler_setup = iwlagn_rx_handler_setup, .setup_deferred_work = iwlagn_setup_deferred_work, .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 89b8da7a6c8..86feec86d13 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -195,9 +195,9 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv, struct ieee80211_vif *vif = ctx->vif; struct iwl_host_cmd hcmd = { .id = REPLY_CHANNEL_SWITCH, - .len = sizeof(cmd), + .len = { sizeof(cmd), }, .flags = CMD_SYNC, - .data = &cmd, + .data = { &cmd, }, }; cmd.band = priv->band == IEEE80211_BAND_2GHZ; @@ -252,10 +252,6 @@ static int iwl2030_hw_channel_switch(struct iwl_priv *priv, static struct iwl_lib_ops iwl2000_lib = { .set_hw_params = iwl2000_hw_set_hw_params, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, .rx_handler_setup = iwlagn_rx_handler_setup, .setup_deferred_work = iwlagn_bt_setup_deferred_work, .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 98f81df166e..a70b8cfafda 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -282,9 +282,9 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, struct ieee80211_vif *vif = ctx->vif; struct iwl_host_cmd hcmd = { .id = REPLY_CHANNEL_SWITCH, - .len = sizeof(cmd), + .len = { sizeof(cmd), }, .flags = CMD_SYNC, - .data = &cmd, + .data = { &cmd, }, }; cmd.band = priv->band == IEEE80211_BAND_2GHZ; @@ -339,10 +339,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, static struct iwl_lib_ops iwl5000_lib = { .set_hw_params = iwl5000_hw_set_hw_params, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, .rx_handler_setup = iwlagn_rx_handler_setup, .setup_deferred_work = iwlagn_setup_deferred_work, .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, @@ -374,10 +370,6 @@ static struct iwl_lib_ops iwl5000_lib = { static struct iwl_lib_ops iwl5150_lib = { .set_hw_params = iwl5150_hw_set_hw_params, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, .rx_handler_setup = iwlagn_rx_handler_setup, .setup_deferred_work = iwlagn_setup_deferred_work, .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index a7921f9a03c..f8c710db6e6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -221,9 +221,9 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, struct ieee80211_vif *vif = ctx->vif; struct iwl_host_cmd hcmd = { .id = REPLY_CHANNEL_SWITCH, - .len = sizeof(cmd), + .len = { sizeof(cmd), }, .flags = CMD_SYNC, - .data = &cmd, + .data = { &cmd, }, }; cmd.band = priv->band == IEEE80211_BAND_2GHZ; @@ -278,10 +278,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, static struct iwl_lib_ops iwl6000_lib = { .set_hw_params = iwl6000_hw_set_hw_params, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, .rx_handler_setup = iwlagn_rx_handler_setup, .setup_deferred_work = iwlagn_setup_deferred_work, .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr, @@ -314,10 +310,6 @@ static struct iwl_lib_ops iwl6000_lib = { static struct iwl_lib_ops iwl6030_lib = { .set_hw_params = iwl6000_hw_set_hw_params, - .txq_set_sched = iwlagn_txq_set_sched, - .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, - .txq_free_tfd = iwl_hw_txq_free_tfd, - .txq_init = iwl_hw_tx_queue_init, .rx_handler_setup = iwlagn_bt_rx_handler_setup, .setup_deferred_work = iwlagn_bt_setup_deferred_work, .cancel_deferred_work = iwlagn_bt_cancel_deferred_work, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c index 39d1e47a097..c9255def108 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c @@ -87,14 +87,14 @@ int iwl_send_calib_results(struct iwl_priv *priv) struct iwl_host_cmd hcmd = { .id = REPLY_PHY_CALIBRATION_CMD, - .flags = CMD_SIZE_HUGE, }; for (i = 0; i < IWL_CALIB_MAX; i++) { if ((BIT(i) & priv->hw_params.calib_init_cfg) && priv->calib_results[i].buf) { - hcmd.len = priv->calib_results[i].buf_len; - hcmd.data = priv->calib_results[i].buf; + hcmd.len[0] = priv->calib_results[i].buf_len; + hcmd.data[0] = priv->calib_results[i].buf; + hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; ret = iwl_send_cmd_sync(priv, &hcmd); if (ret) { IWL_ERR(priv, "Error %d iteration %d\n", @@ -456,9 +456,9 @@ static int iwl_sensitivity_write(struct iwl_priv *priv) struct iwl_sensitivity_data *data = NULL; struct iwl_host_cmd cmd_out = { .id = SENSITIVITY_CMD, - .len = sizeof(struct iwl_sensitivity_cmd), + .len = { sizeof(struct iwl_sensitivity_cmd), }, .flags = CMD_ASYNC, - .data = &cmd, + .data = { &cmd, }, }; data = &(priv->sensitivity_data); @@ -491,9 +491,9 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv) struct iwl_sensitivity_data *data = NULL; struct iwl_host_cmd cmd_out = { .id = SENSITIVITY_CMD, - .len = sizeof(struct iwl_enhance_sensitivity_cmd), + .len = { sizeof(struct iwl_enhance_sensitivity_cmd), }, .flags = CMD_ASYNC, - .data = &cmd, + .data = { &cmd, }, }; data = &(priv->sensitivity_data); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c index 8e79653aed9..f803fb62f8b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c @@ -1140,8 +1140,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) { struct iwl_host_cmd cmd = { .id = REPLY_SCAN_CMD, - .len = sizeof(struct iwl_scan_cmd), - .flags = CMD_SIZE_HUGE, + .len = { sizeof(struct iwl_scan_cmd), }, }; struct iwl_scan_cmd *scan; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; @@ -1425,10 +1424,11 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) return -EIO; } - cmd.len += le16_to_cpu(scan->tx_cmd.len) + + cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) + scan->channel_count * sizeof(struct iwl_scan_channel); - cmd.data = scan; - scan->len = cpu_to_le16(cmd.len); + cmd.data[0] = scan; + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + scan->len = cpu_to_le16(cmd.len[0]); /* set scan bit here for PAN params */ set_bit(STATUS_SCAN_HW, &priv->status); @@ -1520,9 +1520,9 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control) struct iwl_txfifo_flush_cmd flush_cmd; struct iwl_host_cmd cmd = { .id = REPLY_TXFIFO_FLUSH, - .len = sizeof(struct iwl_txfifo_flush_cmd), + .len = { sizeof(struct iwl_txfifo_flush_cmd), }, .flags = CMD_SYNC, - .data = &flush_cmd, + .data = { &flush_cmd, }, }; might_sleep(); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c index 91f26556ac2..592b0cfcf71 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c @@ -335,6 +335,32 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data, return tid; } +#ifdef CONFIG_MAC80211_DEBUGFS +static void rs_program_fix_rate(struct iwl_priv *priv, + struct iwl_lq_sta *lq_sta) +{ + struct iwl_station_priv *sta_priv = + container_of(lq_sta, struct iwl_station_priv, lq_sta); + struct iwl_rxon_context *ctx = sta_priv->common.ctx; + + lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ + lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ + + lq_sta->dbg_fixed_rate = priv->dbg_fixed_rate; + + IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", + lq_sta->lq.sta_id, priv->dbg_fixed_rate); + + if (priv->dbg_fixed_rate) { + rs_fill_link_cmd(NULL, lq_sta, priv->dbg_fixed_rate); + iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, + false); + } +} +#endif + /* get the traffic load value for tid */ @@ -1046,7 +1072,10 @@ done: /* See if there's a better rate or modulation mode to try. */ if (sta && sta->supp_rates[sband->band]) rs_rate_scale_perform(priv, skb, sta, lq_sta); - +#ifdef CONFIG_MAC80211_DEBUGFS + if (priv->dbg_fixed_rate != lq_sta->dbg_fixed_rate) + rs_program_fix_rate(priv, lq_sta); +#endif if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) rs_bt_update_lq(priv, ctx, lq_sta); } @@ -2170,11 +2199,11 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) * setup rate table in uCode * return rate_n_flags as used in the table */ -static u32 rs_update_rate_tbl(struct iwl_priv *priv, - struct iwl_rxon_context *ctx, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - int index, u8 is_green) +static void rs_update_rate_tbl(struct iwl_priv *priv, + struct iwl_rxon_context *ctx, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int index, u8 is_green) { u32 rate; @@ -2182,8 +2211,6 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv, rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); rs_fill_link_cmd(priv, lq_sta, rate); iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false); - - return rate; } /* @@ -2212,7 +2239,6 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, u8 update_lq = 0; struct iwl_scale_tbl_info *tbl, *tbl1; u16 rate_scale_index_msk = 0; - u32 rate; u8 is_green = 0; u8 active_tbl = 0; u8 done_search = 0; @@ -2299,8 +2325,8 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); /* get "active" rate info */ index = iwl_hwrate_to_plcp_idx(tbl->current_rate); - rate = rs_update_rate_tbl(priv, ctx, lq_sta, - tbl, index, is_green); + rs_update_rate_tbl(priv, ctx, lq_sta, tbl, + index, is_green); } return; } @@ -2541,8 +2567,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, lq_update: /* Replace uCode's rate table for the destination station. */ if (update_lq) - rate = rs_update_rate_tbl(priv, ctx, lq_sta, - tbl, index, is_green); + rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green); if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) { /* Should we stay with this modulation mode, @@ -2871,6 +2896,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; lq_sta->is_agg = 0; + priv->dbg_fixed_rate = 0; #ifdef CONFIG_MAC80211_DEBUGFS lq_sta->dbg_fixed_rate = 0; #endif @@ -3045,7 +3071,6 @@ static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, IWL_DEBUG_RATE(priv, "leave\n"); } - #ifdef CONFIG_MAC80211_DEBUGFS static int open_file_generic(struct inode *inode, struct file *file) { @@ -3070,6 +3095,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta, IWL_DEBUG_RATE(priv, "Fixed rate ON\n"); } else { lq_sta->dbg_fixed_rate = 0; + priv->dbg_fixed_rate = 0; IWL_ERR(priv, "Invalid antenna selection 0x%X, Valid is 0x%X\n", ant_sel_tx, valid_tx_ant); @@ -3088,9 +3114,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, char buf[64]; size_t buf_size; u32 parsed_rate; - struct iwl_station_priv *sta_priv = - container_of(lq_sta, struct iwl_station_priv, lq_sta); - struct iwl_rxon_context *ctx = sta_priv->common.ctx; + priv = lq_sta->drv; memset(buf, 0, sizeof(buf)); @@ -3099,23 +3123,11 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file, return -EFAULT; if (sscanf(buf, "%x", &parsed_rate) == 1) - lq_sta->dbg_fixed_rate = parsed_rate; + priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = parsed_rate; else - lq_sta->dbg_fixed_rate = 0; + priv->dbg_fixed_rate = lq_sta->dbg_fixed_rate = 0; - lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */ - lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */ - - IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n", - lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate); - - if (lq_sta->dbg_fixed_rate) { - rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); - iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, - false); - } + rs_program_fix_rate(priv, lq_sta); return count; } @@ -3143,7 +3155,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, lq_sta->total_failed, lq_sta->total_success, lq_sta->active_legacy_rate); desc += sprintf(buff+desc, "fixed rate 0x%X\n", - lq_sta->dbg_fixed_rate); + priv->dbg_fixed_rate); desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "", (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "", @@ -3254,14 +3266,10 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = { static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - char buff[120]; - int desc = 0; - struct iwl_lq_sta *lq_sta = file->private_data; - struct iwl_priv *priv; struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl]; - - priv = lq_sta->drv; + char buff[120]; + int desc = 0; if (is_Ht(tbl->lq_type)) desc += sprintf(buff+desc, diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c index 02387430f7f..a95ad84c537 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c @@ -289,7 +289,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) /* cast away the const for active_rxon in this function */ struct iwl_rxon_cmd *active = (void *)&ctx->active; bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); - bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK); int ret; lockdep_assert_held(&priv->mutex); @@ -389,11 +388,9 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) * AP station must be done after the BSSID is set to correctly * set up filters in the device. */ - if ((old_assoc && new_assoc) || !new_assoc) { - ret = iwlagn_rxon_disconn(priv, ctx); - if (ret) - return ret; - } + ret = iwlagn_rxon_disconn(priv, ctx); + if (ret) + return ret; if (new_assoc) return iwlagn_rxon_connect(priv, ctx); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c index 079275f2c64..0bd722cee5a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c @@ -144,7 +144,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, size_t cmd_size = sizeof(struct iwl_wep_cmd); struct iwl_host_cmd cmd = { .id = ctx->wep_key_cmd, - .data = wep_cmd, + .data = { wep_cmd, }, .flags = CMD_SYNC, }; @@ -172,7 +172,7 @@ static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX; - cmd.len = cmd_size; + cmd.len[0] = cmd_size; if (not_empty || send_if_empty) return iwl_send_cmd(priv, &cmd); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c index 342de780a36..4974cd7837c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c @@ -755,12 +755,10 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) spin_unlock(&priv->sta_lock); /* Attach buffers to TFD */ - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - txcmd_phys, firstlen, 1, 0); + iwlagn_txq_attach_buf_to_tfd(priv, txq, txcmd_phys, firstlen, 1); if (secondlen > 0) - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - phys_addr, secondlen, - 0, 0); + iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, + secondlen, 0); scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + offsetof(struct iwl_tx_cmd, scratch); @@ -916,7 +914,7 @@ int iwlagn_txq_ctx_alloc(struct iwl_priv *priv) spin_lock_irqsave(&priv->lock, flags); /* Turn off all Tx DMA fifos */ - priv->cfg->ops->lib->txq_set_sched(priv, 0); + iwlagn_txq_set_sched(priv, 0); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); @@ -954,7 +952,7 @@ void iwlagn_txq_ctx_reset(struct iwl_priv *priv) spin_lock_irqsave(&priv->lock, flags); /* Turn off all Tx DMA fifos */ - priv->cfg->ops->lib->txq_set_sched(priv, 0); + iwlagn_txq_set_sched(priv, 0); /* Tell NIC where to find the "keep warm" buffer */ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4); @@ -980,7 +978,7 @@ void iwlagn_txq_ctx_stop(struct iwl_priv *priv) /* Turn off all Tx DMA fifos */ spin_lock_irqsave(&priv->lock, flags); - priv->cfg->ops->lib->txq_set_sched(priv, 0); + iwlagn_txq_set_sched(priv, 0); /* Stop each Tx DMA channel, and wait for it to be idle */ for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) { @@ -1263,7 +1261,7 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) iwlagn_txq_inval_byte_cnt_tbl(priv, txq); - priv->cfg->ops->lib->txq_free_tfd(priv, txq); + iwlagn_txq_free_tfd(priv, txq); } return nfreed; } diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c index 8bda0e8d666..97de5d9de67 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c @@ -217,8 +217,8 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv) struct iwl_calib_cfg_cmd calib_cfg_cmd; struct iwl_host_cmd cmd = { .id = CALIBRATION_CFG_CMD, - .len = sizeof(struct iwl_calib_cfg_cmd), - .data = &calib_cfg_cmd, + .len = { sizeof(struct iwl_calib_cfg_cmd), }, + .data = { &calib_cfg_cmd, }, }; memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); @@ -440,7 +440,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv) IWL_MASK(0, priv->hw_params.max_txq_num)); /* Activate all Tx DMA/FIFO channels */ - priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7)); + iwlagn_txq_set_sched(priv, IWL_MASK(0, 7)); /* map queues to FIFOs */ if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 08e3cae4fa5..11c6c1169e7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -134,12 +134,10 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) struct iwl_tx_beacon_cmd *tx_beacon_cmd; struct iwl_host_cmd cmd = { .id = REPLY_TX_BEACON, - .flags = CMD_SIZE_HUGE, }; u32 frame_size; u32 rate_flags; u32 rate; - int err; /* * We have to set up the TX command, the TX Beacon command, and the @@ -156,17 +154,15 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) if (WARN_ON(!priv->beacon_skb)) return -EINVAL; - /* Allocate beacon memory */ - tx_beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd) + priv->beacon_skb->len, - GFP_KERNEL); + /* Allocate beacon command */ + if (!priv->beacon_cmd) + priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL); + tx_beacon_cmd = priv->beacon_cmd; if (!tx_beacon_cmd) return -ENOMEM; frame_size = priv->beacon_skb->len; - /* Set up TX beacon contents */ - memcpy(tx_beacon_cmd->frame, priv->beacon_skb->data, frame_size); - /* Set up TX command fields */ tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size); tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id; @@ -175,7 +171,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK; /* Set up TX beacon command fields */ - iwl_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame, + iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data, frame_size); /* Set up packet rate and flags */ @@ -189,164 +185,14 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv) rate_flags); /* Submit command */ - cmd.len = sizeof(*tx_beacon_cmd) + frame_size; - cmd.data = tx_beacon_cmd; - - err = iwl_send_cmd_sync(priv, &cmd); - - /* Free temporary storage */ - kfree(tx_beacon_cmd); - - return err; -} - -static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + cmd.len[0] = sizeof(*tx_beacon_cmd); + cmd.data[0] = tx_beacon_cmd; + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + cmd.len[1] = frame_size; + cmd.data[1] = priv->beacon_skb->data; + cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY; - dma_addr_t addr = get_unaligned_le32(&tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - addr |= - ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; - - return addr; -} - -static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - -static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, - dma_addr_t addr, u16 len) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - if (sizeof(dma_addr_t) > sizeof(u32)) - hi_n_len |= ((addr >> 16) >> 16) & 0xF; - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd->num_tbs = idx + 1; -} - -static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) -{ - return tfd->num_tbs & 0x1f; -} - -/** - * iwl_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @priv - driver private data - * @txq - tx queue - * - * Does NOT advance any TFD circular buffer read/write indexes - * Does NOT free the TFD itself (which is within circular buffer) - */ -void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) -{ - struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds; - struct iwl_tfd *tfd; - struct pci_dev *dev = priv->pci_dev; - int index = txq->q.read_ptr; - int i; - int num_tbs; - - tfd = &tfd_tmp[index]; - - /* Sanity check on number of chunks */ - num_tbs = iwl_tfd_get_num_tbs(tfd); - - if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* Unmap tx_cmd */ - if (num_tbs) - pci_unmap_single(dev, - dma_unmap_addr(&txq->meta[index], mapping), - dma_unmap_len(&txq->meta[index], len), - PCI_DMA_BIDIRECTIONAL); - - /* Unmap chunks, if any. */ - for (i = 1; i < num_tbs; i++) - pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), - iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); - - /* free SKB */ - if (txq->txb) { - struct sk_buff *skb; - - skb = txq->txb[txq->q.read_ptr].skb; - - /* can be called from irqs-disabled context */ - if (skb) { - dev_kfree_skb_any(skb); - txq->txb[txq->q.read_ptr].skb = NULL; - } - } -} - -int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, - u8 reset, u8 pad) -{ - struct iwl_queue *q; - struct iwl_tfd *tfd, *tfd_tmp; - u32 num_tbs; - - q = &txq->q; - tfd_tmp = (struct iwl_tfd *)txq->tfds; - tfd = &tfd_tmp[q->write_ptr]; - - if (reset) - memset(tfd, 0, sizeof(*tfd)); - - num_tbs = iwl_tfd_get_num_tbs(tfd); - - /* Each TFD can point to a maximum 20 Tx buffers */ - if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(priv, "Error can not send more than %d chunks\n", - IWL_NUM_OF_TBS); - return -EINVAL; - } - - if (WARN_ON(addr & ~DMA_BIT_MASK(36))) - return -EINVAL; - - if (unlikely(addr & ~IWL_TX_DMA_MASK)) - IWL_ERR(priv, "Unaligned address = %llx\n", - (unsigned long long)addr); - - iwl_tfd_set_tb(tfd, num_tbs, addr, len); - - return 0; -} - -/* - * Tell nic where to find circular buffer of Tx Frame Descriptors for - * given Tx queue, and enable the DMA channel used for that queue. - * - * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA - * channels supported in hardware. - */ -int iwl_hw_tx_queue_init(struct iwl_priv *priv, - struct iwl_tx_queue *txq) -{ - int txq_id = txq->q.id; - - /* Circular buffer (TFD queue in DRAM) physical base address */ - iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), - txq->q.dma_addr >> 8); - - return 0; + return iwl_send_cmd_sync(priv, &cmd); } static void iwl_bg_beacon_update(struct work_struct *work) @@ -1776,10 +1622,7 @@ static const char *desc_lookup(u32 num) void iwl_dump_nic_error_log(struct iwl_priv *priv) { - u32 data2, line; - u32 desc, time, count, base, data1; - u32 blink1, blink2, ilink1, ilink2; - u32 pc, hcmd; + u32 base; struct iwl_error_event_table table; base = priv->device_pointers.error_event_table; @@ -1802,37 +1645,40 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv) iwl_read_targ_mem_words(priv, base, &table, sizeof(table)); - count = table.valid; - - if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(priv, "Start IWL Error Log Dump:\n"); IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", - priv->status, count); - } - - desc = table.error_id; - priv->isr_stats.err_code = desc; - pc = table.pc; - blink1 = table.blink1; - blink2 = table.blink2; - ilink1 = table.ilink1; - ilink2 = table.ilink2; - data1 = table.data1; - data2 = table.data2; - line = table.line; - time = table.tsf_low; - hcmd = table.hcmd; - - trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, - blink1, blink2, ilink1, ilink2); - - IWL_ERR(priv, "Desc Time " - "data1 data2 line\n"); - IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n", - desc_lookup(desc), desc, time, data1, data2, line); - IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n"); - IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", - pc, blink1, blink2, ilink1, ilink2, hcmd); + priv->status, table.valid); + } + + priv->isr_stats.err_code = table.error_id; + + trace_iwlwifi_dev_ucode_error(priv, table.error_id, table.tsf_low, + table.data1, table.data2, table.line, + table.blink1, table.blink2, table.ilink1, + table.ilink2, table.bcon_time, table.gp1, + table.gp2, table.gp3, table.ucode_ver, + table.hw_ver, table.brd_ver); + IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, + desc_lookup(table.error_id)); + IWL_ERR(priv, "0x%08X | uPc\n", table.pc); + IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2); + IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1); + IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2); + IWL_ERR(priv, "0x%08X | data1\n", table.data1); + IWL_ERR(priv, "0x%08X | data2\n", table.data2); + IWL_ERR(priv, "0x%08X | line\n", table.line); + IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time); + IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low); + IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi); + IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1); + IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2); + IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver); + IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver); + IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver); + IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd); } #define EVENT_START_OFFSET (4 * sizeof(u32)) @@ -2114,8 +1960,8 @@ static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg) struct iwl_calib_cfg_cmd calib_cfg_cmd; struct iwl_host_cmd cmd = { .id = CALIBRATION_CFG_CMD, - .len = sizeof(struct iwl_calib_cfg_cmd), - .data = &calib_cfg_cmd, + .len = { sizeof(struct iwl_calib_cfg_cmd), }, + .data = { &calib_cfg_cmd, }, }; memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd)); @@ -3395,6 +3241,7 @@ static void iwl_uninit_drv(struct iwl_priv *priv) iwlcore_free_geos(priv); iwl_free_channel_map(priv); kfree(priv->scan_cmd); + kfree(priv->beacon_cmd); } struct ieee80211_ops iwlagn_hw_ops = { @@ -3812,6 +3659,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev) */ set_bit(STATUS_EXIT_PENDING, &priv->status); + iwl_testmode_cleanup(priv); iwl_leds_exit(priv); if (priv->mac80211_registered) { diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h index fe33fe8aa41..2495fe7a58c 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h @@ -191,12 +191,10 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); void iwl_setup_rx_handlers(struct iwl_priv *priv); /* tx */ -void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); -int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, +void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); +int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, - dma_addr_t addr, u16 len, u8 reset, u8 pad); -int iwl_hw_tx_queue_init(struct iwl_priv *priv, - struct iwl_tx_queue *txq); + dma_addr_t addr, u16 len, u8 reset); void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, struct ieee80211_tx_info *info); int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb); @@ -345,6 +343,7 @@ extern int iwl_alive_start(struct iwl_priv *priv); #ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL extern int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len); extern void iwl_testmode_init(struct iwl_priv *priv); +extern void iwl_testmode_cleanup(struct iwl_priv *priv); #else static inline int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) @@ -355,6 +354,10 @@ static inline void iwl_testmode_init(struct iwl_priv *priv) { } +static inline +void iwl_testmode_cleanup(struct iwl_priv *priv) +{ +} #endif #endif /* __iwl_agn_h__ */ diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 5fdad653211..6ee5f1aa555 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -205,7 +205,6 @@ enum { #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) #define SEQ_TO_INDEX(s) ((s) & 0xff) #define INDEX_TO_SEQ(i) ((i) & 0xff) -#define SEQ_HUGE_FRAME cpu_to_le16(0x4000) #define SEQ_RX_FRAME cpu_to_le16(0x8000) /** @@ -234,9 +233,7 @@ struct iwl_cmd_header { * * 0:7 tfd index - position within TX queue * 8:12 TX queue id - * 13 reserved - * 14 huge - driver sets this to indicate command is in the - * 'huge' storage at the end of the command buffers + * 13:14 reserved * 15 unsolicited RX or uCode-originated notification */ __le16 sequence; diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h index 5b5b0cce4a5..3bb76f6ea41 100644 --- a/drivers/net/wireless/iwlwifi/iwl-core.h +++ b/drivers/net/wireless/iwlwifi/iwl-core.h @@ -127,16 +127,6 @@ struct iwl_temp_ops { struct iwl_lib_ops { /* set hw dependent parameters */ int (*set_hw_params)(struct iwl_priv *priv); - /* Handling TX */ - void (*txq_set_sched)(struct iwl_priv *priv, u32 mask); - int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv, - struct iwl_tx_queue *txq, - dma_addr_t addr, - u16 len, u8 reset, u8 pad); - void (*txq_free_tfd)(struct iwl_priv *priv, - struct iwl_tx_queue *txq); - int (*txq_init)(struct iwl_priv *priv, - struct iwl_tx_queue *txq); /* setup Rx handler */ void (*rx_handler_setup)(struct iwl_priv *priv); /* setup deferred work */ diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h index 214e4658c49..22a6e3ec709 100644 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h @@ -48,8 +48,6 @@ #include "iwl-agn-rs.h" #include "iwl-agn-tt.h" -#define U32_PAD(n) ((4-(n))&0x3) - struct iwl_tx_queue; /* CT-KILL constants */ @@ -83,7 +81,7 @@ struct iwl_tx_queue; #define MAX_RTS_THRESHOLD 2347U #define MAX_MSDU_SIZE 2304U #define MAX_MPDU_SIZE 2346U -#define DEFAULT_BEACON_INTERVAL 100U +#define DEFAULT_BEACON_INTERVAL 200U #define DEFAULT_SHORT_RETRY_LIMIT 7U #define DEFAULT_LONG_RETRY_LIMIT 4U @@ -112,8 +110,6 @@ struct iwl_cmd_meta { struct iwl_device_cmd *cmd, struct iwl_rx_packet *pkt); - /* The CMD_SIZE_HUGE flag bit indicates that the command - * structure is stored at the end of the shared queue memory. */ u32 flags; DEFINE_DMA_UNMAP_ADDR(mapping); @@ -123,7 +119,23 @@ struct iwl_cmd_meta { /* * Generic queue structure * - * Contains common data for Rx and Tx queues + * Contains common data for Rx and Tx queues. + * + * Note the difference between n_bd and n_window: the hardware + * always assumes 256 descriptors, so n_bd is always 256 (unless + * there might be HW changes in the future). For the normal TX + * queues, n_window, which is the size of the software queue data + * is also 256; however, for the command queue, n_window is only + * 32 since we don't need so many commands pending. Since the HW + * still uses 256 BDs for DMA though, n_bd stays 256. As a result, + * the software buffers (in the variables @meta, @txb in struct + * iwl_tx_queue) only have 32 entries, while the HW buffers (@tfds + * in the same struct) have 256. + * This means that we end up with the following: + * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | + * SW entries: | 0 | ... | 31 | + * where N is a number between 0 and 7. This means that the SW + * data is a window overlayed over the HW queue. */ struct iwl_queue { int n_bd; /* number of BDs in this queue */ @@ -165,7 +177,7 @@ struct iwl_tx_info { struct iwl_tx_queue { struct iwl_queue q; - void *tfds; + struct iwl_tfd *tfds; struct iwl_device_cmd **cmd; struct iwl_cmd_meta *meta; struct iwl_tx_info *txb; @@ -247,7 +259,6 @@ enum { CMD_SYNC = 0, CMD_SIZE_NORMAL = 0, CMD_NO_SKB = 0, - CMD_SIZE_HUGE = (1 << 0), CMD_ASYNC = (1 << 1), CMD_WANT_SKB = (1 << 2), CMD_MAPPED = (1 << 3), @@ -259,8 +270,8 @@ enum { * struct iwl_device_cmd * * For allocation of the command and tx queues, this establishes the overall - * size of the largest command we send to uCode, except for a scan command - * (which is relatively huge; space is allocated separately). + * size of the largest command we send to uCode, except for commands that + * aren't fully copied and use other TFD space. */ struct iwl_device_cmd { struct iwl_cmd_header hdr; /* uCode API */ @@ -277,15 +288,21 @@ struct iwl_device_cmd { #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) +#define IWL_MAX_CMD_TFDS 2 + +enum iwl_hcmd_dataflag { + IWL_HCMD_DFL_NOCOPY = BIT(0), +}; struct iwl_host_cmd { - const void *data; + const void *data[IWL_MAX_CMD_TFDS]; unsigned long reply_page; void (*callback)(struct iwl_priv *priv, struct iwl_device_cmd *cmd, struct iwl_rx_packet *pkt); u32 flags; - u16 len; + u16 len[IWL_MAX_CMD_TFDS]; + u8 dataflags[IWL_MAX_CMD_TFDS]; u8 id; }; @@ -688,17 +705,8 @@ static inline int iwl_queue_used(const struct iwl_queue *q, int i) } -static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) +static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) { - /* - * This is for init calibration result and scan command which - * required buffer > TFD_MAX_PAYLOAD_SIZE, - * the big buffer at end of command array - */ - if (is_huge) - return q->n_window; /* must be power of 2 */ - - /* Otherwise, use normal size buffers */ return index & (q->n_window - 1); } @@ -1171,6 +1179,14 @@ enum iwl_scan_type { IWL_SCAN_OFFCH_TX, }; +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL +struct iwl_testmode_trace { + u8 *cpu_addr; + u8 *trace_addr; + dma_addr_t dma_addr; + bool trace_enabled; +}; +#endif struct iwl_priv { /* ieee device used by generic ieee processing code */ @@ -1452,6 +1468,7 @@ struct iwl_priv { struct work_struct beacon_update; struct iwl_rxon_context *beacon_ctx; struct sk_buff *beacon_skb; + void *beacon_cmd; struct work_struct tt_work; struct work_struct ct_enter; @@ -1501,6 +1518,11 @@ struct iwl_priv { struct led_classdev led; unsigned long blink_on, blink_off; bool led_registered; +#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL + struct iwl_testmode_trace testmode_trace; +#endif + u32 dbg_fixed_rate; + }; /*iwl_priv */ static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id) diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h index f00172cb8a6..2c84ba95afc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h +++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h @@ -137,20 +137,27 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event, #define TRACE_SYSTEM iwlwifi TRACE_EVENT(iwlwifi_dev_hcmd, - TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags), - TP_ARGS(priv, hcmd, len, flags), + TP_PROTO(struct iwl_priv *priv, u32 flags, + const void *hcmd0, size_t len0, + const void *hcmd1, size_t len1, + const void *hcmd2, size_t len2), + TP_ARGS(priv, flags, hcmd0, len0, hcmd1, len1, hcmd2, len2), TP_STRUCT__entry( PRIV_ENTRY - __dynamic_array(u8, hcmd, len) + __dynamic_array(u8, hcmd0, len0) + __dynamic_array(u8, hcmd1, len1) + __dynamic_array(u8, hcmd2, len2) __field(u32, flags) ), TP_fast_assign( PRIV_ASSIGN; - memcpy(__get_dynamic_array(hcmd), hcmd, len); + memcpy(__get_dynamic_array(hcmd0), hcmd0, len0); + memcpy(__get_dynamic_array(hcmd1), hcmd1, len1); + memcpy(__get_dynamic_array(hcmd2), hcmd2, len2); __entry->flags = flags; ), TP_printk("[%p] hcmd %#.2x (%ssync)", - __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0], + __entry->priv, ((u8 *)__get_dynamic_array(hcmd0))[0], __entry->flags & CMD_ASYNC ? "a" : "") ); @@ -202,15 +209,18 @@ TRACE_EVENT(iwlwifi_dev_tx, ); TRACE_EVENT(iwlwifi_dev_ucode_error, - TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time, + TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low, u32 data1, u32 data2, u32 line, u32 blink1, - u32 blink2, u32 ilink1, u32 ilink2), - TP_ARGS(priv, desc, time, data1, data2, line, - blink1, blink2, ilink1, ilink2), + u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, + u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver, + u32 brd_ver), + TP_ARGS(priv, desc, tsf_low, data1, data2, line, + blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, + gp3, ucode_ver, hw_ver, brd_ver), TP_STRUCT__entry( PRIV_ENTRY __field(u32, desc) - __field(u32, time) + __field(u32, tsf_low) __field(u32, data1) __field(u32, data2) __field(u32, line) @@ -218,11 +228,18 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __field(u32, blink2) __field(u32, ilink1) __field(u32, ilink2) + __field(u32, bcon_time) + __field(u32, gp1) + __field(u32, gp2) + __field(u32, gp3) + __field(u32, ucode_ver) + __field(u32, hw_ver) + __field(u32, brd_ver) ), TP_fast_assign( PRIV_ASSIGN; __entry->desc = desc; - __entry->time = time; + __entry->tsf_low = tsf_low; __entry->data1 = data1; __entry->data2 = data2; __entry->line = line; @@ -230,12 +247,25 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __entry->blink2 = blink2; __entry->ilink1 = ilink1; __entry->ilink2 = ilink2; + __entry->bcon_time = bcon_time; + __entry->gp1 = gp1; + __entry->gp2 = gp2; + __entry->gp3 = gp3; + __entry->ucode_ver = ucode_ver; + __entry->hw_ver = hw_ver; + __entry->brd_ver = brd_ver; ), TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, " - "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X", - __entry->priv, __entry->desc, __entry->time, __entry->data1, + "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " + "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X uCode 0x%08X " + "hw 0x%08X brd 0x%08X", + __entry->priv, __entry->desc, __entry->tsf_low, + __entry->data1, __entry->data2, __entry->line, __entry->blink1, - __entry->blink2, __entry->ilink1, __entry->ilink2) + __entry->blink2, __entry->ilink1, __entry->ilink2, + __entry->bcon_time, __entry->gp1, __entry->gp2, + __entry->gp3, __entry->ucode_ver, __entry->hw_ver, + __entry->brd_ver) ); TRACE_EVENT(iwlwifi_dev_ucode_event, diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index c8397962632..47a56bc1cd1 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -216,15 +216,14 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv) static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode) { - u32 otpgp; + iwl_read32(priv, CSR_OTP_GP_REG); - otpgp = iwl_read32(priv, CSR_OTP_GP_REG); if (mode == IWL_OTP_ACCESS_ABSOLUTE) iwl_clear_bit(priv, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_OTP_ACCESS_MODE); + CSR_OTP_GP_REG_OTP_ACCESS_MODE); else iwl_set_bit(priv, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_OTP_ACCESS_MODE); + CSR_OTP_GP_REG_OTP_ACCESS_MODE); } static int iwlcore_get_nvm_type(struct iwl_priv *priv, u32 hw_rev) diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c index 8f0beb992cc..76f99662314 100644 --- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c +++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c @@ -188,6 +188,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) cmd_idx = iwl_enqueue_hcmd(priv, cmd); if (cmd_idx < 0) { ret = cmd_idx; + clear_bit(STATUS_HCMD_ACTIVE, &priv->status); IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; @@ -264,8 +265,8 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) { struct iwl_host_cmd cmd = { .id = id, - .len = len, - .data = data, + .len = { len, }, + .data = { data, }, }; return iwl_send_cmd_sync(priv, &cmd); @@ -279,8 +280,8 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, { struct iwl_host_cmd cmd = { .id = id, - .len = len, - .data = data, + .len = { len, }, + .data = { data, }, }; cmd.flags |= CMD_ASYNC; diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c index 439187f903c..7c23beb49d7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-led.c +++ b/drivers/net/wireless/iwlwifi/iwl-led.c @@ -107,8 +107,8 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) { struct iwl_host_cmd cmd = { .id = REPLY_LEDS_CMD, - .len = sizeof(struct iwl_led_cmd), - .data = led_cmd, + .len = { sizeof(struct iwl_led_cmd), }, + .data = { led_cmd, }, .flags = CMD_ASYNC, .callback = NULL, }; diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c index 3c8cebde16c..7df2814fd4f 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sta.c +++ b/drivers/net/wireless/iwlwifi/iwl-sta.c @@ -141,7 +141,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, struct iwl_host_cmd cmd = { .id = REPLY_ADD_STA, .flags = flags, - .data = data, + .data = { data, }, }; u8 sta_id __maybe_unused = sta->sta.sta_id; @@ -155,7 +155,7 @@ int iwl_send_add_sta(struct iwl_priv *priv, might_sleep(); } - cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); + cmd.len[0] = priv->cfg->ops->utils->build_addsta_hcmd(sta, data); ret = iwl_send_cmd(priv, &cmd); if (ret || (flags & CMD_ASYNC)) @@ -401,9 +401,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv, struct iwl_host_cmd cmd = { .id = REPLY_REMOVE_STA, - .len = sizeof(struct iwl_rem_sta_cmd), + .len = { sizeof(struct iwl_rem_sta_cmd), }, .flags = CMD_SYNC, - .data = &rm_sta_cmd, + .data = { &rm_sta_cmd, }, }; memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); @@ -760,9 +760,9 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_host_cmd cmd = { .id = REPLY_TX_LINK_QUALITY_CMD, - .len = sizeof(struct iwl_link_quality_cmd), + .len = { sizeof(struct iwl_link_quality_cmd), }, .flags = flags, - .data = lq, + .data = { lq, }, }; if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-sv-open.c index 89b6696622c..69b7e6bf2d6 100644 --- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c +++ b/drivers/net/wireless/iwlwifi/iwl-sv-open.c @@ -97,6 +97,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_TRACE_DATA] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, }; /* @@ -167,6 +174,31 @@ nla_put_failure: void iwl_testmode_init(struct iwl_priv *priv) { priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; + priv->testmode_trace.trace_enabled = false; +} + +static void iwl_trace_cleanup(struct iwl_priv *priv) +{ + struct device *dev = &priv->pci_dev->dev; + + if (priv->testmode_trace.trace_enabled) { + if (priv->testmode_trace.cpu_addr && + priv->testmode_trace.dma_addr) + dma_free_coherent(dev, + TRACE_TOTAL_SIZE, + priv->testmode_trace.cpu_addr, + priv->testmode_trace.dma_addr); + priv->testmode_trace.trace_enabled = false; + priv->testmode_trace.cpu_addr = NULL; + priv->testmode_trace.trace_addr = NULL; + priv->testmode_trace.dma_addr = 0; + } +} + + +void iwl_testmode_cleanup(struct iwl_priv *priv) +{ + iwl_trace_cleanup(priv); } /* @@ -198,10 +230,11 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb) } cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); - cmd.data = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); - cmd.len = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); + cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); + cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; IWL_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x," - " len %d\n", cmd.id, cmd.flags, cmd.len); + " len %d\n", cmd.id, cmd.flags, cmd.len[0]); /* ok, let's submit the command to ucode */ return iwl_send_cmd(priv, &cmd); } @@ -388,6 +421,38 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) "Error starting the device: %d\n", status); break; + case IWL_TM_CMD_APP2DEV_GET_EEPROM: + if (priv->eeprom) { + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, + priv->cfg->base_params->eeprom_size + 20); + if (!skb) { + IWL_DEBUG_INFO(priv, + "Error allocating memory\n"); + return -ENOMEM; + } + NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, + IWL_TM_CMD_DEV2APP_EEPROM_RSP); + NLA_PUT(skb, IWL_TM_ATTR_EEPROM, + priv->cfg->base_params->eeprom_size, + priv->eeprom); + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_DEBUG_INFO(priv, + "Error sending msg : %d\n", + status); + } else + return -EFAULT; + break; + + case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: + if (!tb[IWL_TM_ATTR_FIXRATE]) { + IWL_DEBUG_INFO(priv, + "Error finding fixrate setting\n"); + return -ENOMSG; + } + priv->dbg_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); + break; + default: IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n"); return -ENOSYS; @@ -399,6 +464,102 @@ nla_put_failure: return -EMSGSIZE; } + +/* + * This function handles the user application commands for uCode trace + * + * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the + * handlers respectively. + * + * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned + * value of the actual command execution is replied to the user application. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct iwl_priv *priv = hw->priv; + struct sk_buff *skb; + int status = 0; + struct device *dev = &priv->pci_dev->dev; + + switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { + case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: + if (priv->testmode_trace.trace_enabled) + return -EBUSY; + + priv->testmode_trace.cpu_addr = + dma_alloc_coherent(dev, + TRACE_TOTAL_SIZE, + &priv->testmode_trace.dma_addr, + GFP_KERNEL); + if (!priv->testmode_trace.cpu_addr) + return -ENOMEM; + priv->testmode_trace.trace_enabled = true; + priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN( + priv->testmode_trace.cpu_addr, 0x100); + memset(priv->testmode_trace.trace_addr, 0x03B, + TRACE_BUFF_SIZE); + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, + sizeof(priv->testmode_trace.dma_addr) + 20); + if (!skb) { + IWL_DEBUG_INFO(priv, + "Error allocating memory\n"); + iwl_trace_cleanup(priv); + return -ENOMEM; + } + NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR, + sizeof(priv->testmode_trace.dma_addr), + (u64 *)&priv->testmode_trace.dma_addr); + status = cfg80211_testmode_reply(skb); + if (status < 0) { + IWL_DEBUG_INFO(priv, + "Error sending msg : %d\n", + status); + } + break; + + case IWL_TM_CMD_APP2DEV_END_TRACE: + iwl_trace_cleanup(priv); + break; + + case IWL_TM_CMD_APP2DEV_READ_TRACE: + if (priv->testmode_trace.trace_enabled && + priv->testmode_trace.trace_addr) { + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, + 20 + TRACE_BUFF_SIZE); + if (skb == NULL) { + IWL_DEBUG_INFO(priv, + "Error allocating memory\n"); + return -ENOMEM; + } + NLA_PUT(skb, IWL_TM_ATTR_TRACE_DATA, + TRACE_BUFF_SIZE, + priv->testmode_trace.trace_addr); + status = cfg80211_testmode_reply(skb); + if (status < 0) { + IWL_DEBUG_INFO(priv, + "Error sending msg : %d\n", status); + } + } else + return -EFAULT; + break; + + default: + IWL_DEBUG_INFO(priv, "Unknown testmode mem command ID\n"); + return -ENOSYS; + } + return status; + +nla_put_failure: + kfree_skb(skb); + if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == + IWL_TM_CMD_APP2DEV_BEGIN_TRACE) + iwl_trace_cleanup(priv); + return -EMSGSIZE; +} + /* The testmode gnl message handler that takes the gnl message from the * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then * invoke the corresponding handlers. @@ -455,9 +616,19 @@ int iwl_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW: + case IWL_TM_CMD_APP2DEV_GET_EEPROM: + case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); result = iwl_testmode_driver(hw, tb); break; + + case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: + case IWL_TM_CMD_APP2DEV_END_TRACE: + case IWL_TM_CMD_APP2DEV_READ_TRACE: + IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n"); + result = iwl_testmode_trace(hw, tb); + break; + default: IWL_DEBUG_INFO(priv, "Unknown testmode command\n"); result = -ENOSYS; diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h index 31f8949f280..a88085e9b36 100644 --- a/drivers/net/wireless/iwlwifi/iwl-testmode.h +++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h @@ -88,9 +88,15 @@ enum iwl_tm_cmd_t { IWL_TM_CMD_APP2DEV_LOAD_INIT_FW, IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB, IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW, + IWL_TM_CMD_APP2DEV_GET_EEPROM, + IWL_TM_CMD_APP2DEV_FIXRATE_REQ, /* if there is other new command for the driver layer operation, * append them here */ + /* commands fom user space for uCode trace operations */ + IWL_TM_CMD_APP2DEV_BEGIN_TRACE, + IWL_TM_CMD_APP2DEV_END_TRACE, + IWL_TM_CMD_APP2DEV_READ_TRACE, /* commands from kernel space to carry the synchronous response * to user application */ @@ -99,6 +105,11 @@ enum iwl_tm_cmd_t { /* commands from kernel space to multicast the spontaneous messages * to user application */ IWL_TM_CMD_DEV2APP_UCODE_RX_PKT, + + /* commands from kernel space to carry the eeprom response + * to user application */ + IWL_TM_CMD_DEV2APP_EEPROM_RSP, + IWL_TM_CMD_MAX, }; @@ -144,8 +155,31 @@ enum iwl_tm_attr_t { * application */ IWL_TM_ATTR_UCODE_RX_PKT, + /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_DEV2APP_EEPROM, + * The mandatory fields are: + * IWL_TM_ATTR_EEPROM for the data content responging to the user + * application */ + IWL_TM_ATTR_EEPROM, + + /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_XXX_TRACE, + * The mandatory fields are: + * IWL_TM_ATTR_MEM_TRACE_ADDR for the trace address + */ + IWL_TM_ATTR_TRACE_ADDR, + IWL_TM_ATTR_TRACE_DATA, + + /* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_FIXRATE_REQ, + * The mandatory fields are: + * IWL_TM_ATTR_FIXRATE for the fixed rate + */ + IWL_TM_ATTR_FIXRATE, + IWL_TM_ATTR_MAX, }; +/* uCode trace buffer */ +#define TRACE_BUFF_SIZE 0x20000 +#define TRACE_BUFF_PADD 0x2000 +#define TRACE_TOTAL_SIZE (TRACE_BUFF_SIZE + TRACE_BUFF_PADD) #endif diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c index e69597ea43e..686e176b5eb 100644 --- a/drivers/net/wireless/iwlwifi/iwl-tx.c +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c @@ -32,6 +32,7 @@ #include <linux/slab.h> #include <net/mac80211.h> #include "iwl-eeprom.h" +#include "iwl-agn.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-sta.h" @@ -85,6 +86,158 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) txq->need_update = 0; } +static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + dma_addr_t addr = get_unaligned_le32(&tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + addr |= + ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; + + return addr; +} + +static inline u16 iwl_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +static inline void iwl_tfd_set_tb(struct iwl_tfd *tfd, u8 idx, + dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + if (sizeof(dma_addr_t) > sizeof(u32)) + hi_n_len |= ((addr >> 16) >> 16) & 0xF; + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + +static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, + struct iwl_tfd *tfd) +{ + struct pci_dev *dev = priv->pci_dev; + int i; + int num_tbs; + + /* Sanity check on number of chunks */ + num_tbs = iwl_tfd_get_num_tbs(tfd); + + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* Unmap tx_cmd */ + if (num_tbs) + pci_unmap_single(dev, + dma_unmap_addr(meta, mapping), + dma_unmap_len(meta, len), + PCI_DMA_BIDIRECTIONAL); + + /* Unmap chunks, if any. */ + for (i = 1; i < num_tbs; i++) + pci_unmap_single(dev, iwl_tfd_tb_get_addr(tfd, i), + iwl_tfd_tb_get_len(tfd, i), PCI_DMA_TODEVICE); +} + +/** + * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @priv - driver private data + * @txq - tx queue + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + struct iwl_tfd *tfd_tmp = txq->tfds; + int index = txq->q.read_ptr; + + iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index]); + + /* free SKB */ + if (txq->txb) { + struct sk_buff *skb; + + skb = txq->txb[txq->q.read_ptr].skb; + + /* can be called from irqs-disabled context */ + if (skb) { + dev_kfree_skb_any(skb); + txq->txb[txq->q.read_ptr].skb = NULL; + } + } +} + +int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, + struct iwl_tx_queue *txq, + dma_addr_t addr, u16 len, + u8 reset) +{ + struct iwl_queue *q; + struct iwl_tfd *tfd, *tfd_tmp; + u32 num_tbs; + + q = &txq->q; + tfd_tmp = txq->tfds; + tfd = &tfd_tmp[q->write_ptr]; + + if (reset) + memset(tfd, 0, sizeof(*tfd)); + + num_tbs = iwl_tfd_get_num_tbs(tfd); + + /* Each TFD can point to a maximum 20 Tx buffers */ + if (num_tbs >= IWL_NUM_OF_TBS) { + IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_NUM_OF_TBS); + return -EINVAL; + } + + if (WARN_ON(addr & ~DMA_BIT_MASK(36))) + return -EINVAL; + + if (unlikely(addr & ~IWL_TX_DMA_MASK)) + IWL_ERR(priv, "Unaligned address = %llx\n", + (unsigned long long)addr); + + iwl_tfd_set_tb(tfd, num_tbs, addr, len); + + return 0; +} + +/* + * Tell nic where to find circular buffer of Tx Frame Descriptors for + * given Tx queue, and enable the DMA channel used for that queue. + * + * supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA + * channels supported in hardware. + */ +static int iwlagn_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq) +{ + int txq_id = txq->q.id; + + /* Circular buffer (TFD queue in DRAM) physical base address */ + iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), + txq->q.dma_addr >> 8); + + return 0; +} + /** * iwl_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's */ @@ -97,7 +250,7 @@ void iwl_tx_queue_unmap(struct iwl_priv *priv, int txq_id) return; while (q->write_ptr != q->read_ptr) { - priv->cfg->ops->lib->txq_free_tfd(priv, txq); + iwlagn_txq_free_tfd(priv, txq); q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } } @@ -154,7 +307,7 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv) return; while (q->read_ptr != q->write_ptr) { - i = get_cmd_index(q, q->read_ptr, 0); + i = get_cmd_index(q, q->read_ptr); if (txq->meta[i].flags & CMD_MAPPED) { pci_unmap_single(priv->pci_dev, @@ -166,15 +319,6 @@ void iwl_cmd_queue_unmap(struct iwl_priv *priv) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); } - - i = q->n_window; - if (txq->meta[i].flags & CMD_MAPPED) { - pci_unmap_single(priv->pci_dev, - dma_unmap_addr(&txq->meta[i], mapping), - dma_unmap_len(&txq->meta[i], len), - PCI_DMA_BIDIRECTIONAL); - txq->meta[i].flags = 0; - } } /** @@ -194,7 +338,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv) iwl_cmd_queue_unmap(priv); /* De-alloc array of command/tx buffers */ - for (i = 0; i <= TFD_CMD_SLOTS; i++) + for (i = 0; i < TFD_CMD_SLOTS; i++) kfree(txq->cmd[i]); /* De-alloc circular buffer of TFDs */ @@ -334,33 +478,17 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, { int i, len; int ret; - int actual_slots = slots_num; - - /* - * Alloc buffer array for commands (Tx or other types of commands). - * For the command queue (#4/#9), allocate command space + one big - * command for scan, since scan command is very huge; the system will - * not have two scans at the same time, so only one is needed. - * For normal Tx queues (all other queues), no super-size command - * space is needed. - */ - if (txq_id == priv->cmd_queue) - actual_slots++; - txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots, + txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * slots_num, GFP_KERNEL); - txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots, + txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * slots_num, GFP_KERNEL); if (!txq->meta || !txq->cmd) goto out_free_arrays; len = sizeof(struct iwl_device_cmd); - for (i = 0; i < actual_slots; i++) { - /* only happens for cmd queue */ - if (i == slots_num) - len = IWL_MAX_CMD_SIZE; - + for (i = 0; i < slots_num; i++) { txq->cmd[i] = kmalloc(len, GFP_KERNEL); if (!txq->cmd[i]) goto err; @@ -391,11 +519,11 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq, return ret; /* Tell device where to find queue */ - priv->cfg->ops->lib->txq_init(priv, txq); + iwlagn_tx_queue_init(priv, txq); return 0; err: - for (i = 0; i < actual_slots; i++) + for (i = 0; i < slots_num; i++) kfree(txq->cmd[i]); out_free_arrays: kfree(txq->meta); @@ -420,7 +548,7 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); /* Tell device where to find queue */ - priv->cfg->ops->lib->txq_init(priv, txq); + iwlagn_tx_queue_init(priv, txq); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -443,23 +571,49 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) dma_addr_t phys_addr; unsigned long flags; u32 idx; - u16 fix_size; + u16 copy_size, cmd_size; bool is_ct_kill = false; + bool had_nocopy = false; + int i; + u8 *cmd_dest; +#ifdef CONFIG_IWLWIFI_DEVICE_TRACING + const void *trace_bufs[IWL_MAX_CMD_TFDS + 1] = {}; + int trace_lens[IWL_MAX_CMD_TFDS + 1] = {}; + int trace_idx; +#endif + + if (test_bit(STATUS_FW_ERROR, &priv->status)) { + IWL_WARN(priv, "fw recovery, no hcmd send\n"); + return -EIO; + } - fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); + copy_size = sizeof(out_cmd->hdr); + cmd_size = sizeof(out_cmd->hdr); + + /* need one for the header if the first is NOCOPY */ + BUILD_BUG_ON(IWL_MAX_CMD_TFDS > IWL_NUM_OF_TBS - 1); + + for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + if (!cmd->len[i]) + continue; + if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { + had_nocopy = true; + } else { + /* NOCOPY must not be followed by normal! */ + if (WARN_ON(had_nocopy)) + return -EINVAL; + copy_size += cmd->len[i]; + } + cmd_size += cmd->len[i]; + } /* * If any of the command structures end up being larger than - * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then - * we will need to increase the size of the TFD entries - * Also, check to see if command buffer should not exceed the size - * of device_cmd and max_cmd_size. + * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically + * allocated into separate TFDs, then we will need to + * increase the size of the buffers. */ - if (WARN_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && - !(cmd->flags & CMD_SIZE_HUGE))) - return -EINVAL; - - if (WARN_ON(fix_size > IWL_MAX_CMD_SIZE)) + if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) return -EINVAL; if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { @@ -468,14 +622,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) return -EIO; } - /* - * As we only have a single huge buffer, check that the command - * is synchronous (otherwise buffers could end up being reused). - */ - - if (WARN_ON((cmd->flags & CMD_ASYNC) && (cmd->flags & CMD_SIZE_HUGE))) - return -EINVAL; - spin_lock_irqsave(&priv->hcmd_lock, flags); if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { @@ -490,7 +636,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) return -ENOSPC; } - idx = get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); + idx = get_cmd_index(q, q->write_ptr); out_cmd = txq->cmd[idx]; out_meta = &txq->meta[idx]; @@ -505,57 +651,84 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) if (cmd->flags & CMD_ASYNC) out_meta->callback = cmd->callback; - out_cmd->hdr.cmd = cmd->id; - memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); - - /* At this point, the out_cmd now has all of the incoming cmd - * information */ + /* set up the header */ + out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.flags = 0; out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); - if (cmd->flags & CMD_SIZE_HUGE) - out_cmd->hdr.sequence |= SEQ_HUGE_FRAME; - -#ifdef CONFIG_IWLWIFI_DEBUG - switch (out_cmd->hdr.cmd) { - case REPLY_TX_LINK_QUALITY_CMD: - case SENSITIVITY_CMD: - IWL_DEBUG_HC_DUMP(priv, "Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - get_cmd_string(out_cmd->hdr.cmd), - out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), fix_size, - q->write_ptr, idx, priv->cmd_queue); - break; - default: - IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " - "%d bytes at %d[%d]:%d\n", - get_cmd_string(out_cmd->hdr.cmd), - out_cmd->hdr.cmd, - le16_to_cpu(out_cmd->hdr.sequence), fix_size, - q->write_ptr, idx, priv->cmd_queue); + INDEX_TO_SEQ(q->write_ptr)); + + /* and copy the data that needs to be copied */ + + cmd_dest = &out_cmd->cmd.payload[0]; + for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + if (!cmd->len[i]) + continue; + if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) + break; + memcpy(cmd_dest, cmd->data[i], cmd->len[i]); + cmd_dest += cmd->len[i]; } -#endif + + IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " + "%d bytes at %d[%d]:%d\n", + get_cmd_string(out_cmd->hdr.cmd), + out_cmd->hdr.cmd, + le16_to_cpu(out_cmd->hdr.sequence), cmd_size, + q->write_ptr, idx, priv->cmd_queue); + phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr, - fix_size, PCI_DMA_BIDIRECTIONAL); + copy_size, PCI_DMA_BIDIRECTIONAL); if (unlikely(pci_dma_mapping_error(priv->pci_dev, phys_addr))) { idx = -ENOMEM; goto out; } dma_unmap_addr_set(out_meta, mapping, phys_addr); - dma_unmap_len_set(out_meta, len, fix_size); + dma_unmap_len_set(out_meta, len, copy_size); + + iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); +#ifdef CONFIG_IWLWIFI_DEVICE_TRACING + trace_bufs[0] = &out_cmd->hdr; + trace_lens[0] = copy_size; + trace_idx = 1; +#endif + + for (i = 0; i < IWL_MAX_CMD_TFDS; i++) { + if (!cmd->len[i]) + continue; + if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) + continue; + phys_addr = pci_map_single(priv->pci_dev, (void *)cmd->data[i], + cmd->len[i], PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pci_dev, phys_addr)) { + iwlagn_unmap_tfd(priv, out_meta, + &txq->tfds[q->write_ptr]); + idx = -ENOMEM; + goto out; + } + + iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, + cmd->len[i], 0); +#ifdef CONFIG_IWLWIFI_DEVICE_TRACING + trace_bufs[trace_idx] = cmd->data[i]; + trace_lens[trace_idx] = cmd->len[i]; + trace_idx++; +#endif + } out_meta->flags = cmd->flags | CMD_MAPPED; txq->need_update = 1; - trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags); - - priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, - phys_addr, fix_size, 1, - U32_PAD(cmd->len)); + /* check that tracing gets all possible blocks */ + BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); +#ifdef CONFIG_IWLWIFI_DEVICE_TRACING + trace_iwlwifi_dev_hcmd(priv, cmd->flags, + trace_bufs[0], trace_lens[0], + trace_bufs[1], trace_lens[1], + trace_bufs[2], trace_lens[2]); +#endif /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); @@ -573,8 +746,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ -static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, - int idx, int cmd_idx) +static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) { struct iwl_tx_queue *txq = &priv->txq[txq_id]; struct iwl_queue *q = &txq->q; @@ -614,7 +786,6 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) int txq_id = SEQ_TO_QUEUE(sequence); int index = SEQ_TO_INDEX(sequence); int cmd_index; - bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME); struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; @@ -632,14 +803,11 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) return; } - cmd_index = get_cmd_index(&txq->q, index, huge); + cmd_index = get_cmd_index(&txq->q, index); cmd = txq->cmd[cmd_index]; meta = &txq->meta[cmd_index]; - pci_unmap_single(priv->pci_dev, - dma_unmap_addr(meta, mapping), - dma_unmap_len(meta, len), - PCI_DMA_BIDIRECTIONAL); + iwlagn_unmap_tfd(priv, meta, &txq->tfds[index]); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { @@ -650,7 +818,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) spin_lock_irqsave(&priv->hcmd_lock, flags); - iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); + iwl_hcmd_queue_reclaim(priv, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { clear_bit(STATUS_HCMD_ACTIVE, &priv->status); diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c index 5665a1a9b99..a414768f40f 100644 --- a/drivers/net/wireless/iwmc3200wifi/rx.c +++ b/drivers/net/wireless/iwmc3200wifi/rx.c @@ -565,7 +565,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status) && iwm->conf.mode == UMAC_MODE_BSS) { cancel_delayed_work(&iwm->disconnect); - cfg80211_roamed(iwm_to_ndev(iwm), + cfg80211_roamed(iwm_to_ndev(iwm), NULL, complete->bssid, iwm->req_ie, iwm->req_ie_len, iwm->resp_ie, iwm->resp_ie_len, @@ -586,7 +586,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, WLAN_STATUS_SUCCESS, GFP_KERNEL); else - cfg80211_roamed(iwm_to_ndev(iwm), + cfg80211_roamed(iwm_to_ndev(iwm), NULL, complete->bssid, iwm->req_ie, iwm->req_ie_len, iwm->resp_ie, iwm->resp_ie_len, diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c index d3d5e0853c4..f807447e4d9 100644 --- a/drivers/net/wireless/mwifiex/11n_aggr.c +++ b/drivers/net/wireless/mwifiex/11n_aggr.c @@ -196,6 +196,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, if (skb_src) pra_list->total_pkts_size -= skb_src->len; + atomic_dec(&priv->wmm.tx_pkts_queued); + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); @@ -257,6 +259,8 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv, pra_list->total_pkts_size += skb_aggr->len; + atomic_inc(&priv->wmm.tx_pkts_queued); + tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h index 672701dc272..8316b3cd92c 100644 --- a/drivers/net/wireless/mwifiex/main.h +++ b/drivers/net/wireless/mwifiex/main.h @@ -69,7 +69,8 @@ struct mwifiex_drv_mode { #define MWIFIEX_TIMER_10S 10000 #define MWIFIEX_TIMER_1S 1000 -#define MAX_TX_PENDING 60 +#define MAX_TX_PENDING 100 +#define LOW_TX_PENDING 80 #define MWIFIEX_UPLD_SIZE (2312) @@ -202,6 +203,7 @@ struct mwifiex_tid_tbl { #define WMM_HIGHEST_PRIORITY 7 #define HIGH_PRIO_TID 7 #define LOW_PRIO_TID 0 +#define NO_PKT_PRIO_TID (-1) struct mwifiex_wmm_desc { struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; @@ -213,7 +215,10 @@ struct mwifiex_wmm_desc { u32 drv_pkt_delay_max; u8 queue_priority[IEEE80211_MAX_QUEUES]; u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */ - + /* Number of transmit packets queued */ + atomic_t tx_pkts_queued; + /* Tracks highest priority with a packet queued */ + atomic_t highest_queued_prio; }; struct mwifiex_802_11_security { diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c index 210120889df..aaa50c07419 100644 --- a/drivers/net/wireless/mwifiex/txrx.c +++ b/drivers/net/wireless/mwifiex/txrx.c @@ -140,7 +140,9 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter, } else { priv->stats.tx_errors++; } - atomic_dec(&adapter->tx_pending); + + if (atomic_dec_return(&adapter->tx_pending) >= LOW_TX_PENDING) + goto done; for (i = 0; i < adapter->priv_num; i++) { diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index faa09e32902..91634daec30 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -177,14 +177,20 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv) * This function map ACs to TIDs. */ static void -mwifiex_wmm_queue_priorities_tid(u8 queue_priority[]) +mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm) { + u8 *queue_priority = wmm->queue_priority; int i; for (i = 0; i < 4; ++i) { tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1]; tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0]; } + + for (i = 0; i < MAX_NUM_TID; ++i) + tos_to_tid_inv[tos_to_tid[i]] = (u8)i; + + atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID); } /* @@ -246,7 +252,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, } } - mwifiex_wmm_queue_priorities_tid(priv->wmm.queue_priority); + mwifiex_wmm_queue_priorities_tid(&priv->wmm); } /* @@ -399,6 +405,9 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) priv->add_ba_param.timeout = MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT; priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE; priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE; + + atomic_set(&priv->wmm.tx_pkts_queued, 0); + atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); } } @@ -408,17 +417,13 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) int mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) { - int i, j; + int i; struct mwifiex_private *priv; - for (j = 0; j < adapter->priv_num; ++j) { - priv = adapter->priv[j]; - if (priv) { - for (i = 0; i < MAX_NUM_TID; i++) - if (!mwifiex_wmm_is_ra_list_empty( - &priv->wmm.tid_tbl_ptr[i].ra_list)) - return false; - } + for (i = 0; i < adapter->priv_num; ++i) { + priv = adapter->priv[i]; + if (priv && atomic_read(&priv->wmm.tx_pkts_queued)) + return false; } return true; @@ -468,6 +473,9 @@ static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv) for (i = 0; i < MAX_NUM_TID; i++) mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i]. ra_list); + + atomic_set(&priv->wmm.tx_pkts_queued, 0); + atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); } /* @@ -638,6 +646,13 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_adapter *adapter, ra_list->total_pkts_size += skb->len; + atomic_inc(&priv->wmm.tx_pkts_queued); + + if (atomic_read(&priv->wmm.highest_queued_prio) < + tos_to_tid_inv[tid_down]) + atomic_set(&priv->wmm.highest_queued_prio, + tos_to_tid_inv[tid_down]); + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } @@ -863,9 +878,14 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, } do { + atomic_t *hqp; + spinlock_t *lock; + priv_tmp = bssprio_node->priv; + hqp = &priv_tmp->wmm.highest_queued_prio; + lock = &priv_tmp->wmm.ra_list_spinlock; - for (i = HIGH_PRIO_TID; i >= LOW_PRIO_TID; --i) { + for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) { tid_ptr = &(priv_tmp)->wmm. tid_tbl_ptr[tos_to_tid[i]]; @@ -903,6 +923,11 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, is_list_empty = skb_queue_empty(&ptr->skb_head); if (!is_list_empty) { + spin_lock_irqsave(lock, flags); + if (atomic_read(hqp) > i) + atomic_set(hqp, i); + spin_unlock_irqrestore(lock, + flags); *priv = priv_tmp; *tid = tos_to_tid[i]; return ptr; @@ -921,6 +946,12 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, } while (ptr != head); } + /* No packet at any TID for this priv. Mark as such + * to skip checking TIDs for this priv (until pkt is + * added). + */ + atomic_set(hqp, NO_PKT_PRIO_TID); + /* Get next bss priority node */ bssprio_node = list_first_entry(&bssprio_node->list, struct mwifiex_bss_prio_node, @@ -1028,6 +1059,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv, .bss_prio_cur->list, struct mwifiex_bss_prio_node, list); + atomic_dec(&priv->wmm.tx_pkts_queued); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); } @@ -1134,6 +1166,7 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv, .bss_prio_cur->list, struct mwifiex_bss_prio_node, list); + atomic_dec(&priv->wmm.tx_pkts_queued); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); } @@ -1227,5 +1260,5 @@ mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter) if (mwifiex_dequeue_tx_packet(adapter)) break; - } while (true); + } while (!mwifiex_wmm_lists_empty(adapter)); } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index e18358725b6..a8f3bc740df 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -82,6 +82,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ + {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 518542b4bf9..29f93893066 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -2830,7 +2830,8 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev) req_ie_len, resp_ie, resp_ie_len, 0, GFP_KERNEL); else - cfg80211_roamed(usbdev->net, bssid, req_ie, req_ie_len, + cfg80211_roamed(usbdev->net, NULL, bssid, + req_ie, req_ie_len, resp_ie, resp_ie_len, GFP_KERNEL); } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c index 2bb71195e97..39b0297ce92 100644 --- a/drivers/net/wireless/rtlwifi/ps.c +++ b/drivers/net/wireless/rtlwifi/ps.c @@ -190,7 +190,7 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw) ppsc->swrf_processing = true; - if (ppsc->inactive_pwrstate == ERFOFF && + if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) { if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) && RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM) && diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c index c5424cad43c..d2cc81586a6 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c +++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c @@ -728,7 +728,7 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw, return; rtlphy->set_bwmode_inprogress = true; if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) { - rtlphy->set_bwmode_inprogress = false; + rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw); } else { RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("FALSE driver sleep or unload\n")); diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c index 73ae8a43184..abe0fcc7536 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c @@ -366,6 +366,75 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, return true; } +void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_phy *rtlphy = &(rtlpriv->phy); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u8 reg_bw_opmode; + u8 reg_prsr_rsc; + + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, + ("Switch to %s bandwidth\n", + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz")) + + if (is_hal_stop(rtlhal)) { + rtlphy->set_bwmode_inprogress = false; + return; + } + + reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE); + reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2); + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + reg_bw_opmode |= BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + break; + case HT_CHANNEL_WIDTH_20_40: + reg_bw_opmode &= ~BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + reg_prsr_rsc = + (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5); + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw)); + break; + } + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1); + + rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND, + (mac->cur_40_prime_sc >> 1)); + rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0); + + rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)), + (mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1); + break; + default: + RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw)); + break; + } + rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + rtlphy->set_bwmode_inprogress = false; + RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n")); +} + void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) { u8 tmpreg; diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h index ad580852cc7..be2c92adef3 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h @@ -257,5 +257,6 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw, u8 configtype); bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw, u8 configtype); +void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw); #endif diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 390bbb5ee11..373dc78af1d 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -232,6 +232,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = { .config_bb_with_headerfile = _rtl92ce_phy_config_bb_with_headerfile, .config_bb_with_pgheaderfile = _rtl92ce_phy_config_bb_with_pgheaderfile, .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate, + .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback, .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower, }; diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index db9a763aaa7..d29365a232a 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1581,7 +1581,9 @@ static int xennet_connect(struct net_device *dev) if (err) return err; + rtnl_lock(); netdev_update_features(dev); + rtnl_unlock(); spin_lock_bh(&np->rx_lock); spin_lock_irq(&np->tx_lock); diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c index 94a114aa8e2..b1396e5b295 100644 --- a/drivers/platform/x86/ibm_rtl.c +++ b/drivers/platform/x86/ibm_rtl.c @@ -81,6 +81,19 @@ static void __iomem *rtl_cmd_addr; static u8 rtl_cmd_type; static u8 rtl_cmd_width; +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#endif + static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len) { if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO) diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 85c8ad43c0c..5ffe7c39814 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -344,6 +344,19 @@ struct ips_driver { static bool ips_gpu_turbo_enabled(struct ips_driver *ips); +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#endif + /** * ips_cpu_busy - is CPU busy? * @ips: IPS driver struct diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig new file mode 100644 index 00000000000..68d72010229 --- /dev/null +++ b/drivers/ptp/Kconfig @@ -0,0 +1,75 @@ +# +# PTP clock support configuration +# + +menu "PTP clock support" + +comment "Enable Device Drivers -> PPS to see the PTP clock options." + depends on PPS=n + +config PTP_1588_CLOCK + tristate "PTP clock support" + depends on EXPERIMENTAL + depends on PPS + help + The IEEE 1588 standard defines a method to precisely + synchronize distributed clocks over Ethernet networks. The + standard defines a Precision Time Protocol (PTP), which can + be used to achieve synchronization within a few dozen + microseconds. In addition, with the help of special hardware + time stamping units, it can be possible to achieve + synchronization to within a few hundred nanoseconds. + + This driver adds support for PTP clocks as character + devices. If you want to use a PTP clock, then you should + also enable at least one clock driver as well. + + To compile this driver as a module, choose M here: the module + will be called ptp. + +config PTP_1588_CLOCK_GIANFAR + tristate "Freescale eTSEC as PTP clock" + depends on PTP_1588_CLOCK + depends on GIANFAR + help + This driver adds support for using the eTSEC as a PTP + clock. This clock is only useful if your PTP programs are + getting hardware time stamps on the PTP Ethernet packets + using the SO_TIMESTAMPING API. + + To compile this driver as a module, choose M here: the module + will be called gianfar_ptp. + +config PTP_1588_CLOCK_IXP46X + tristate "Intel IXP46x as PTP clock" + depends on PTP_1588_CLOCK + depends on IXP4XX_ETH + help + This driver adds support for using the IXP46X as a PTP + clock. This clock is only useful if your PTP programs are + getting hardware time stamps on the PTP Ethernet packets + using the SO_TIMESTAMPING API. + + To compile this driver as a module, choose M here: the module + will be called ptp_ixp46x. + +comment "Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks." + depends on PTP_1588_CLOCK && (PHYLIB=n || NETWORK_PHY_TIMESTAMPING=n) + +config DP83640_PHY + tristate "Driver for the National Semiconductor DP83640 PHYTER" + depends on PTP_1588_CLOCK + depends on NETWORK_PHY_TIMESTAMPING + depends on PHYLIB + ---help--- + Supports the DP83640 PHYTER with IEEE 1588 features. + + This driver adds support for using the DP83640 as a PTP + clock. This clock is only useful if your PTP programs are + getting hardware time stamps on the PTP Ethernet packets + using the SO_TIMESTAMPING API. + + In order for this to work, your MAC driver must also + implement the skb_tx_timetamp() function. + +endmenu diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile new file mode 100644 index 00000000000..f6933e83de7 --- /dev/null +++ b/drivers/ptp/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for PTP 1588 clock support. +# + +ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o +obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o +obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c new file mode 100644 index 00000000000..a8d03aeb405 --- /dev/null +++ b/drivers/ptp/ptp_chardev.c @@ -0,0 +1,159 @@ +/* + * PTP 1588 clock support - character device implementation. + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/module.h> +#include <linux/posix-clock.h> +#include <linux/poll.h> +#include <linux/sched.h> + +#include "ptp_private.h" + +int ptp_open(struct posix_clock *pc, fmode_t fmode) +{ + return 0; +} + +long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) +{ + struct ptp_clock_caps caps; + struct ptp_clock_request req; + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_clock_info *ops = ptp->info; + int enable, err = 0; + + switch (cmd) { + + case PTP_CLOCK_GETCAPS: + memset(&caps, 0, sizeof(caps)); + caps.max_adj = ptp->info->max_adj; + caps.n_alarm = ptp->info->n_alarm; + caps.n_ext_ts = ptp->info->n_ext_ts; + caps.n_per_out = ptp->info->n_per_out; + caps.pps = ptp->info->pps; + err = copy_to_user((void __user *)arg, &caps, sizeof(caps)); + break; + + case PTP_EXTTS_REQUEST: + if (copy_from_user(&req.extts, (void __user *)arg, + sizeof(req.extts))) { + err = -EFAULT; + break; + } + if (req.extts.index >= ops->n_ext_ts) { + err = -EINVAL; + break; + } + req.type = PTP_CLK_REQ_EXTTS; + enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0; + err = ops->enable(ops, &req, enable); + break; + + case PTP_PEROUT_REQUEST: + if (copy_from_user(&req.perout, (void __user *)arg, + sizeof(req.perout))) { + err = -EFAULT; + break; + } + if (req.perout.index >= ops->n_per_out) { + err = -EINVAL; + break; + } + req.type = PTP_CLK_REQ_PEROUT; + enable = req.perout.period.sec || req.perout.period.nsec; + err = ops->enable(ops, &req, enable); + break; + + case PTP_ENABLE_PPS: + if (!capable(CAP_SYS_TIME)) + return -EPERM; + req.type = PTP_CLK_REQ_PPS; + enable = arg ? 1 : 0; + err = ops->enable(ops, &req, enable); + break; + + default: + err = -ENOTTY; + break; + } + return err; +} + +unsigned int ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait) +{ + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + + poll_wait(fp, &ptp->tsev_wq, wait); + + return queue_cnt(&ptp->tsevq) ? POLLIN : 0; +} + +ssize_t ptp_read(struct posix_clock *pc, + uint rdflags, char __user *buf, size_t cnt) +{ + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct timestamp_event_queue *queue = &ptp->tsevq; + struct ptp_extts_event event[PTP_BUF_TIMESTAMPS]; + unsigned long flags; + size_t qcnt, i; + + if (cnt % sizeof(struct ptp_extts_event) != 0) + return -EINVAL; + + if (cnt > sizeof(event)) + cnt = sizeof(event); + + cnt = cnt / sizeof(struct ptp_extts_event); + + if (mutex_lock_interruptible(&ptp->tsevq_mux)) + return -ERESTARTSYS; + + if (wait_event_interruptible(ptp->tsev_wq, + ptp->defunct || queue_cnt(queue))) { + mutex_unlock(&ptp->tsevq_mux); + return -ERESTARTSYS; + } + + if (ptp->defunct) + return -ENODEV; + + spin_lock_irqsave(&queue->lock, flags); + + qcnt = queue_cnt(queue); + + if (cnt > qcnt) + cnt = qcnt; + + for (i = 0; i < cnt; i++) { + event[i] = queue->buf[queue->head]; + queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; + } + + spin_unlock_irqrestore(&queue->lock, flags); + + cnt = cnt * sizeof(struct ptp_extts_event); + + mutex_unlock(&ptp->tsevq_mux); + + if (copy_to_user(buf, event, cnt)) { + mutex_unlock(&ptp->tsevq_mux); + return -EFAULT; + } + + return cnt; +} diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c new file mode 100644 index 00000000000..cf3f9997546 --- /dev/null +++ b/drivers/ptp/ptp_clock.c @@ -0,0 +1,343 @@ +/* + * PTP 1588 clock support + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/posix-clock.h> +#include <linux/pps_kernel.h> +#include <linux/slab.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +#include "ptp_private.h" + +#define PTP_MAX_ALARMS 4 +#define PTP_MAX_CLOCKS 8 +#define PTP_PPS_DEFAULTS (PPS_CAPTUREASSERT | PPS_OFFSETASSERT) +#define PTP_PPS_EVENT PPS_CAPTUREASSERT +#define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) + +/* private globals */ + +static dev_t ptp_devt; +static struct class *ptp_class; + +static DECLARE_BITMAP(ptp_clocks_map, PTP_MAX_CLOCKS); +static DEFINE_MUTEX(ptp_clocks_mutex); /* protects 'ptp_clocks_map' */ + +/* time stamp event queue operations */ + +static inline int queue_free(struct timestamp_event_queue *q) +{ + return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1; +} + +static void enqueue_external_timestamp(struct timestamp_event_queue *queue, + struct ptp_clock_event *src) +{ + struct ptp_extts_event *dst; + unsigned long flags; + s64 seconds; + u32 remainder; + + seconds = div_u64_rem(src->timestamp, 1000000000, &remainder); + + spin_lock_irqsave(&queue->lock, flags); + + dst = &queue->buf[queue->tail]; + dst->index = src->index; + dst->t.sec = seconds; + dst->t.nsec = remainder; + + if (!queue_free(queue)) + queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; + + queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS; + + spin_unlock_irqrestore(&queue->lock, flags); +} + +static s32 scaled_ppm_to_ppb(long ppm) +{ + /* + * The 'freq' field in the 'struct timex' is in parts per + * million, but with a 16 bit binary fractional field. + * + * We want to calculate + * + * ppb = scaled_ppm * 1000 / 2^16 + * + * which simplifies to + * + * ppb = scaled_ppm * 125 / 2^13 + */ + s64 ppb = 1 + ppm; + ppb *= 125; + ppb >>= 13; + return (s32) ppb; +} + +/* posix clock implementation */ + +static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) +{ + return 1; /* always round timer functions to one nanosecond */ +} + +static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) +{ + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + return ptp->info->settime(ptp->info, tp); +} + +static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp) +{ + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + return ptp->info->gettime(ptp->info, tp); +} + +static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) +{ + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + struct ptp_clock_info *ops; + int err = -EOPNOTSUPP; + + ops = ptp->info; + + if (tx->modes & ADJ_SETOFFSET) { + struct timespec ts; + ktime_t kt; + s64 delta; + + ts.tv_sec = tx->time.tv_sec; + ts.tv_nsec = tx->time.tv_usec; + + if (!(tx->modes & ADJ_NANO)) + ts.tv_nsec *= 1000; + + if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) + return -EINVAL; + + kt = timespec_to_ktime(ts); + delta = ktime_to_ns(kt); + err = ops->adjtime(ops, delta); + + } else if (tx->modes & ADJ_FREQUENCY) { + + err = ops->adjfreq(ops, scaled_ppm_to_ppb(tx->freq)); + } + + return err; +} + +static struct posix_clock_operations ptp_clock_ops = { + .owner = THIS_MODULE, + .clock_adjtime = ptp_clock_adjtime, + .clock_gettime = ptp_clock_gettime, + .clock_getres = ptp_clock_getres, + .clock_settime = ptp_clock_settime, + .ioctl = ptp_ioctl, + .open = ptp_open, + .poll = ptp_poll, + .read = ptp_read, +}; + +static void delete_ptp_clock(struct posix_clock *pc) +{ + struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + + mutex_destroy(&ptp->tsevq_mux); + + /* Remove the clock from the bit map. */ + mutex_lock(&ptp_clocks_mutex); + clear_bit(ptp->index, ptp_clocks_map); + mutex_unlock(&ptp_clocks_mutex); + + kfree(ptp); +} + +/* public interface */ + +struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info) +{ + struct ptp_clock *ptp; + int err = 0, index, major = MAJOR(ptp_devt); + + if (info->n_alarm > PTP_MAX_ALARMS) + return ERR_PTR(-EINVAL); + + /* Find a free clock slot and reserve it. */ + err = -EBUSY; + mutex_lock(&ptp_clocks_mutex); + index = find_first_zero_bit(ptp_clocks_map, PTP_MAX_CLOCKS); + if (index < PTP_MAX_CLOCKS) + set_bit(index, ptp_clocks_map); + else + goto no_slot; + + /* Initialize a clock structure. */ + err = -ENOMEM; + ptp = kzalloc(sizeof(struct ptp_clock), GFP_KERNEL); + if (ptp == NULL) + goto no_memory; + + ptp->clock.ops = ptp_clock_ops; + ptp->clock.release = delete_ptp_clock; + ptp->info = info; + ptp->devid = MKDEV(major, index); + ptp->index = index; + spin_lock_init(&ptp->tsevq.lock); + mutex_init(&ptp->tsevq_mux); + init_waitqueue_head(&ptp->tsev_wq); + + /* Create a new device in our class. */ + ptp->dev = device_create(ptp_class, NULL, ptp->devid, ptp, + "ptp%d", ptp->index); + if (IS_ERR(ptp->dev)) + goto no_device; + + dev_set_drvdata(ptp->dev, ptp); + + err = ptp_populate_sysfs(ptp); + if (err) + goto no_sysfs; + + /* Register a new PPS source. */ + if (info->pps) { + struct pps_source_info pps; + memset(&pps, 0, sizeof(pps)); + snprintf(pps.name, PPS_MAX_NAME_LEN, "ptp%d", index); + pps.mode = PTP_PPS_MODE; + pps.owner = info->owner; + ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS); + if (!ptp->pps_source) { + pr_err("failed to register pps source\n"); + goto no_pps; + } + } + + /* Create a posix clock. */ + err = posix_clock_register(&ptp->clock, ptp->devid); + if (err) { + pr_err("failed to create posix clock\n"); + goto no_clock; + } + + mutex_unlock(&ptp_clocks_mutex); + return ptp; + +no_clock: + if (ptp->pps_source) + pps_unregister_source(ptp->pps_source); +no_pps: + ptp_cleanup_sysfs(ptp); +no_sysfs: + device_destroy(ptp_class, ptp->devid); +no_device: + mutex_destroy(&ptp->tsevq_mux); + kfree(ptp); +no_memory: + clear_bit(index, ptp_clocks_map); +no_slot: + mutex_unlock(&ptp_clocks_mutex); + return ERR_PTR(err); +} +EXPORT_SYMBOL(ptp_clock_register); + +int ptp_clock_unregister(struct ptp_clock *ptp) +{ + ptp->defunct = 1; + wake_up_interruptible(&ptp->tsev_wq); + + /* Release the clock's resources. */ + if (ptp->pps_source) + pps_unregister_source(ptp->pps_source); + ptp_cleanup_sysfs(ptp); + device_destroy(ptp_class, ptp->devid); + + posix_clock_unregister(&ptp->clock); + return 0; +} +EXPORT_SYMBOL(ptp_clock_unregister); + +void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event) +{ + struct pps_event_time evt; + + switch (event->type) { + + case PTP_CLOCK_ALARM: + break; + + case PTP_CLOCK_EXTTS: + enqueue_external_timestamp(&ptp->tsevq, event); + wake_up_interruptible(&ptp->tsev_wq); + break; + + case PTP_CLOCK_PPS: + pps_get_ts(&evt); + pps_event(ptp->pps_source, &evt, PTP_PPS_EVENT, NULL); + break; + } +} +EXPORT_SYMBOL(ptp_clock_event); + +/* module operations */ + +static void __exit ptp_exit(void) +{ + class_destroy(ptp_class); + unregister_chrdev_region(ptp_devt, PTP_MAX_CLOCKS); +} + +static int __init ptp_init(void) +{ + int err; + + ptp_class = class_create(THIS_MODULE, "ptp"); + if (IS_ERR(ptp_class)) { + pr_err("ptp: failed to allocate class\n"); + return PTR_ERR(ptp_class); + } + + err = alloc_chrdev_region(&ptp_devt, 0, PTP_MAX_CLOCKS, "ptp"); + if (err < 0) { + pr_err("ptp: failed to allocate device region\n"); + goto no_region; + } + + ptp_class->dev_attrs = ptp_dev_attrs; + pr_info("PTP clock support registered\n"); + return 0; + +no_region: + class_destroy(ptp_class); + return err; +} + +subsys_initcall(ptp_init); +module_exit(ptp_exit); + +MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); +MODULE_DESCRIPTION("PTP clocks support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c new file mode 100644 index 00000000000..803d665b15e --- /dev/null +++ b/drivers/ptp/ptp_ixp46x.c @@ -0,0 +1,332 @@ +/* + * PTP 1588 clock using the IXP46X + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include <linux/ptp_clock_kernel.h> +#include <mach/ixp46x_ts.h> + +#define DRIVER "ptp_ixp46x" +#define N_EXT_TS 2 +#define MASTER_GPIO 8 +#define MASTER_IRQ 25 +#define SLAVE_GPIO 7 +#define SLAVE_IRQ 24 + +struct ixp_clock { + struct ixp46x_ts_regs *regs; + struct ptp_clock *ptp_clock; + struct ptp_clock_info caps; + int exts0_enabled; + int exts1_enabled; +}; + +DEFINE_SPINLOCK(register_lock); + +/* + * Register access functions + */ + +static u64 ixp_systime_read(struct ixp46x_ts_regs *regs) +{ + u64 ns; + u32 lo, hi; + + lo = __raw_readl(®s->systime_lo); + hi = __raw_readl(®s->systime_hi); + + ns = ((u64) hi) << 32; + ns |= lo; + ns <<= TICKS_NS_SHIFT; + + return ns; +} + +static void ixp_systime_write(struct ixp46x_ts_regs *regs, u64 ns) +{ + u32 hi, lo; + + ns >>= TICKS_NS_SHIFT; + hi = ns >> 32; + lo = ns & 0xffffffff; + + __raw_writel(lo, ®s->systime_lo); + __raw_writel(hi, ®s->systime_hi); +} + +/* + * Interrupt service routine + */ + +static irqreturn_t isr(int irq, void *priv) +{ + struct ixp_clock *ixp_clock = priv; + struct ixp46x_ts_regs *regs = ixp_clock->regs; + struct ptp_clock_event event; + u32 ack = 0, lo, hi, val; + + val = __raw_readl(®s->event); + + if (val & TSER_SNS) { + ack |= TSER_SNS; + if (ixp_clock->exts0_enabled) { + hi = __raw_readl(®s->asms_hi); + lo = __raw_readl(®s->asms_lo); + event.type = PTP_CLOCK_EXTTS; + event.index = 0; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + event.timestamp <<= TICKS_NS_SHIFT; + ptp_clock_event(ixp_clock->ptp_clock, &event); + } + } + + if (val & TSER_SNM) { + ack |= TSER_SNM; + if (ixp_clock->exts1_enabled) { + hi = __raw_readl(®s->amms_hi); + lo = __raw_readl(®s->amms_lo); + event.type = PTP_CLOCK_EXTTS; + event.index = 1; + event.timestamp = ((u64) hi) << 32; + event.timestamp |= lo; + event.timestamp <<= TICKS_NS_SHIFT; + ptp_clock_event(ixp_clock->ptp_clock, &event); + } + } + + if (val & TTIPEND) + ack |= TTIPEND; /* this bit seems to be always set */ + + if (ack) { + __raw_writel(ack, ®s->event); + return IRQ_HANDLED; + } else + return IRQ_NONE; +} + +/* + * PTP clock operations + */ + +static int ptp_ixp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + u64 adj; + u32 diff, addend; + int neg_adj = 0; + struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); + struct ixp46x_ts_regs *regs = ixp_clock->regs; + + if (ppb < 0) { + neg_adj = 1; + ppb = -ppb; + } + addend = DEFAULT_ADDEND; + adj = addend; + adj *= ppb; + diff = div_u64(adj, 1000000000ULL); + + addend = neg_adj ? addend - diff : addend + diff; + + __raw_writel(addend, ®s->addend); + + return 0; +} + +static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + s64 now; + unsigned long flags; + struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); + struct ixp46x_ts_regs *regs = ixp_clock->regs; + + spin_lock_irqsave(®ister_lock, flags); + + now = ixp_systime_read(regs); + now += delta; + ixp_systime_write(regs, now); + + spin_unlock_irqrestore(®ister_lock, flags); + + return 0; +} + +static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + u64 ns; + u32 remainder; + unsigned long flags; + struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); + struct ixp46x_ts_regs *regs = ixp_clock->regs; + + spin_lock_irqsave(®ister_lock, flags); + + ns = ixp_systime_read(regs); + + spin_unlock_irqrestore(®ister_lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); + ts->tv_nsec = remainder; + return 0; +} + +static int ptp_ixp_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + u64 ns; + unsigned long flags; + struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); + struct ixp46x_ts_regs *regs = ixp_clock->regs; + + ns = ts->tv_sec * 1000000000ULL; + ns += ts->tv_nsec; + + spin_lock_irqsave(®ister_lock, flags); + + ixp_systime_write(regs, ns); + + spin_unlock_irqrestore(®ister_lock, flags); + + return 0; +} + +static int ptp_ixp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + switch (rq->extts.index) { + case 0: + ixp_clock->exts0_enabled = on ? 1 : 0; + break; + case 1: + ixp_clock->exts1_enabled = on ? 1 : 0; + break; + default: + return -EINVAL; + } + return 0; + default: + break; + } + + return -EOPNOTSUPP; +} + +static struct ptp_clock_info ptp_ixp_caps = { + .owner = THIS_MODULE, + .name = "IXP46X timer", + .max_adj = 66666655, + .n_ext_ts = N_EXT_TS, + .pps = 0, + .adjfreq = ptp_ixp_adjfreq, + .adjtime = ptp_ixp_adjtime, + .gettime = ptp_ixp_gettime, + .settime = ptp_ixp_settime, + .enable = ptp_ixp_enable, +}; + +/* module operations */ + +static struct ixp_clock ixp_clock; + +static int setup_interrupt(int gpio) +{ + int irq; + + gpio_line_config(gpio, IXP4XX_GPIO_IN); + + irq = gpio_to_irq(gpio); + + if (NO_IRQ == irq) + return NO_IRQ; + + if (irq_set_irq_type(irq, IRQF_TRIGGER_FALLING)) { + pr_err("cannot set trigger type for irq %d\n", irq); + return NO_IRQ; + } + + if (request_irq(irq, isr, 0, DRIVER, &ixp_clock)) { + pr_err("request_irq failed for irq %d\n", irq); + return NO_IRQ; + } + + return irq; +} + +static void __exit ptp_ixp_exit(void) +{ + free_irq(MASTER_IRQ, &ixp_clock); + free_irq(SLAVE_IRQ, &ixp_clock); + ptp_clock_unregister(ixp_clock.ptp_clock); +} + +static int __init ptp_ixp_init(void) +{ + if (!cpu_is_ixp46x()) + return -ENODEV; + + ixp_clock.regs = + (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + + ixp_clock.caps = ptp_ixp_caps; + + ixp_clock.ptp_clock = ptp_clock_register(&ixp_clock.caps); + + if (IS_ERR(ixp_clock.ptp_clock)) + return PTR_ERR(ixp_clock.ptp_clock); + + __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend); + __raw_writel(1, &ixp_clock.regs->trgt_lo); + __raw_writel(0, &ixp_clock.regs->trgt_hi); + __raw_writel(TTIPEND, &ixp_clock.regs->event); + + if (MASTER_IRQ != setup_interrupt(MASTER_GPIO)) { + pr_err("failed to setup gpio %d as irq\n", MASTER_GPIO); + goto no_master; + } + if (SLAVE_IRQ != setup_interrupt(SLAVE_GPIO)) { + pr_err("failed to setup gpio %d as irq\n", SLAVE_GPIO); + goto no_slave; + } + + return 0; +no_slave: + free_irq(MASTER_IRQ, &ixp_clock); +no_master: + ptp_clock_unregister(ixp_clock.ptp_clock); + return -ENODEV; +} + +module_init(ptp_ixp_init); +module_exit(ptp_ixp_exit); + +MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>"); +MODULE_DESCRIPTION("PTP clock using the IXP46X timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h new file mode 100644 index 00000000000..4d5b5082c3b --- /dev/null +++ b/drivers/ptp/ptp_private.h @@ -0,0 +1,92 @@ +/* + * PTP 1588 clock support - private declarations for the core module. + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#ifndef _PTP_PRIVATE_H_ +#define _PTP_PRIVATE_H_ + +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/mutex.h> +#include <linux/posix-clock.h> +#include <linux/ptp_clock.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/time.h> + +#define PTP_MAX_TIMESTAMPS 128 +#define PTP_BUF_TIMESTAMPS 30 + +struct timestamp_event_queue { + struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; + int head; + int tail; + spinlock_t lock; +}; + +struct ptp_clock { + struct posix_clock clock; + struct device *dev; + struct ptp_clock_info *info; + dev_t devid; + int index; /* index into clocks.map */ + struct pps_device *pps_source; + struct timestamp_event_queue tsevq; /* simple fifo for time stamps */ + struct mutex tsevq_mux; /* one process at a time reading the fifo */ + wait_queue_head_t tsev_wq; + int defunct; /* tells readers to go away when clock is being removed */ +}; + +/* + * The function queue_cnt() is safe for readers to call without + * holding q->lock. Readers use this function to verify that the queue + * is nonempty before proceeding with a dequeue operation. The fact + * that a writer might concurrently increment the tail does not + * matter, since the queue remains nonempty nonetheless. + */ +static inline int queue_cnt(struct timestamp_event_queue *q) +{ + int cnt = q->tail - q->head; + return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt; +} + +/* + * see ptp_chardev.c + */ + +long ptp_ioctl(struct posix_clock *pc, + unsigned int cmd, unsigned long arg); + +int ptp_open(struct posix_clock *pc, fmode_t fmode); + +ssize_t ptp_read(struct posix_clock *pc, + uint flags, char __user *buf, size_t cnt); + +uint ptp_poll(struct posix_clock *pc, + struct file *fp, poll_table *wait); + +/* + * see ptp_sysfs.c + */ + +extern struct device_attribute ptp_dev_attrs[]; + +int ptp_cleanup_sysfs(struct ptp_clock *ptp); + +int ptp_populate_sysfs(struct ptp_clock *ptp); + +#endif diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c new file mode 100644 index 00000000000..2f93926ac97 --- /dev/null +++ b/drivers/ptp/ptp_sysfs.c @@ -0,0 +1,230 @@ +/* + * PTP 1588 clock support - sysfs interface. + * + * Copyright (C) 2010 OMICRON electronics GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/capability.h> + +#include "ptp_private.h" + +static ssize_t clock_name_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name); +} + +#define PTP_SHOW_INT(name) \ +static ssize_t name##_show(struct device *dev, \ + struct device_attribute *attr, char *page) \ +{ \ + struct ptp_clock *ptp = dev_get_drvdata(dev); \ + return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->name); \ +} + +PTP_SHOW_INT(max_adj); +PTP_SHOW_INT(n_alarm); +PTP_SHOW_INT(n_ext_ts); +PTP_SHOW_INT(n_per_out); +PTP_SHOW_INT(pps); + +#define PTP_RO_ATTR(_var, _name) { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _var##_show, \ +} + +struct device_attribute ptp_dev_attrs[] = { + PTP_RO_ATTR(clock_name, clock_name), + PTP_RO_ATTR(max_adj, max_adjustment), + PTP_RO_ATTR(n_alarm, n_alarms), + PTP_RO_ATTR(n_ext_ts, n_external_timestamps), + PTP_RO_ATTR(n_per_out, n_periodic_outputs), + PTP_RO_ATTR(pps, pps_available), + __ATTR_NULL, +}; + +static ssize_t extts_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *ops = ptp->info; + struct ptp_clock_request req = { .type = PTP_CLK_REQ_EXTTS }; + int cnt, enable; + int err = -EINVAL; + + cnt = sscanf(buf, "%u %d", &req.extts.index, &enable); + if (cnt != 2) + goto out; + if (req.extts.index >= ops->n_ext_ts) + goto out; + + err = ops->enable(ops, &req, enable ? 1 : 0); + if (err) + goto out; + + return count; +out: + return err; +} + +static ssize_t extts_fifo_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct timestamp_event_queue *queue = &ptp->tsevq; + struct ptp_extts_event event; + unsigned long flags; + size_t qcnt; + int cnt = 0; + + memset(&event, 0, sizeof(event)); + + if (mutex_lock_interruptible(&ptp->tsevq_mux)) + return -ERESTARTSYS; + + spin_lock_irqsave(&queue->lock, flags); + qcnt = queue_cnt(queue); + if (qcnt) { + event = queue->buf[queue->head]; + queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; + } + spin_unlock_irqrestore(&queue->lock, flags); + + if (!qcnt) + goto out; + + cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n", + event.index, event.t.sec, event.t.nsec); +out: + mutex_unlock(&ptp->tsevq_mux); + return cnt; +} + +static ssize_t period_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *ops = ptp->info; + struct ptp_clock_request req = { .type = PTP_CLK_REQ_PEROUT }; + int cnt, enable, err = -EINVAL; + + cnt = sscanf(buf, "%u %lld %u %lld %u", &req.perout.index, + &req.perout.start.sec, &req.perout.start.nsec, + &req.perout.period.sec, &req.perout.period.nsec); + if (cnt != 5) + goto out; + if (req.perout.index >= ops->n_per_out) + goto out; + + enable = req.perout.period.sec || req.perout.period.nsec; + err = ops->enable(ops, &req, enable); + if (err) + goto out; + + return count; +out: + return err; +} + +static ssize_t pps_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *ops = ptp->info; + struct ptp_clock_request req = { .type = PTP_CLK_REQ_PPS }; + int cnt, enable; + int err = -EINVAL; + + if (!capable(CAP_SYS_TIME)) + return -EPERM; + + cnt = sscanf(buf, "%d", &enable); + if (cnt != 1) + goto out; + + err = ops->enable(ops, &req, enable ? 1 : 0); + if (err) + goto out; + + return count; +out: + return err; +} + +static DEVICE_ATTR(extts_enable, 0220, NULL, extts_enable_store); +static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL); +static DEVICE_ATTR(period, 0220, NULL, period_store); +static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); + +int ptp_cleanup_sysfs(struct ptp_clock *ptp) +{ + struct device *dev = ptp->dev; + struct ptp_clock_info *info = ptp->info; + + if (info->n_ext_ts) { + device_remove_file(dev, &dev_attr_extts_enable); + device_remove_file(dev, &dev_attr_fifo); + } + if (info->n_per_out) + device_remove_file(dev, &dev_attr_period); + + if (info->pps) + device_remove_file(dev, &dev_attr_pps_enable); + + return 0; +} + +int ptp_populate_sysfs(struct ptp_clock *ptp) +{ + struct device *dev = ptp->dev; + struct ptp_clock_info *info = ptp->info; + int err; + + if (info->n_ext_ts) { + err = device_create_file(dev, &dev_attr_extts_enable); + if (err) + goto out1; + err = device_create_file(dev, &dev_attr_fifo); + if (err) + goto out2; + } + if (info->n_per_out) { + err = device_create_file(dev, &dev_attr_period); + if (err) + goto out3; + } + if (info->pps) { + err = device_create_file(dev, &dev_attr_pps_enable); + if (err) + goto out4; + } + return 0; +out4: + if (info->n_per_out) + device_remove_file(dev, &dev_attr_period); +out3: + if (info->n_ext_ts) + device_remove_file(dev, &dev_attr_fifo); +out2: + if (info->n_ext_ts) + device_remove_file(dev, &dev_attr_extts_enable); +out1: + return err; +} diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index b9f29e0d429..f0b13a0d185 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -274,6 +274,13 @@ config REGULATOR_AB8500 This driver supports the regulators found on the ST-Ericsson mixed signal AB8500 PMIC +config REGULATOR_DB8500_PRCMU + bool "ST-Ericsson DB8500 Voltage Domain Regulators" + depends on MFD_DB8500_PRCMU + help + This driver supports the voltage domain regulators controlled by the + DB8500 PRCMU + config REGULATOR_TPS6586X tristate "TI TPS6586X Power regulators" depends on MFD_TPS6586X diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index d72a4275677..165ff5371e9 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -41,5 +41,6 @@ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o +obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c new file mode 100644 index 00000000000..1089a961616 --- /dev/null +++ b/drivers/regulator/db8500-prcmu.c @@ -0,0 +1,558 @@ +/* + * Copyright (C) ST-Ericsson SA 2010 + * + * License Terms: GNU General Public License v2 + * Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson + * Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson + * + * Power domain regulators on DB8500 + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/spinlock.h> +#include <linux/platform_device.h> +#include <linux/mfd/core.h> +#include <linux/mfd/db8500-prcmu.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/db8500-prcmu.h> + +/* + * power state reference count + */ +static int power_state_active_cnt; /* will initialize to zero */ +static DEFINE_SPINLOCK(power_state_active_lock); + +static void power_state_active_enable(void) +{ + unsigned long flags; + + spin_lock_irqsave(&power_state_active_lock, flags); + power_state_active_cnt++; + spin_unlock_irqrestore(&power_state_active_lock, flags); +} + +static int power_state_active_disable(void) +{ + int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&power_state_active_lock, flags); + if (power_state_active_cnt <= 0) { + pr_err("power state: unbalanced enable/disable calls\n"); + ret = -EINVAL; + goto out; + } + + power_state_active_cnt--; +out: + spin_unlock_irqrestore(&power_state_active_lock, flags); + return ret; +} + +/* + * Exported interface for CPUIdle only. This function is called when interrupts + * are turned off. Hence, no locking. + */ +int power_state_active_is_enabled(void) +{ + return (power_state_active_cnt > 0); +} + +/** + * struct db8500_regulator_info - db8500 regulator information + * @dev: device pointer + * @desc: regulator description + * @rdev: regulator device pointer + * @is_enabled: status of the regulator + * @epod_id: id for EPOD (power domain) + * @is_ramret: RAM retention switch for EPOD (power domain) + * @operating_point: operating point (only for vape, to be removed) + * + */ +struct db8500_regulator_info { + struct device *dev; + struct regulator_desc desc; + struct regulator_dev *rdev; + bool is_enabled; + u16 epod_id; + bool is_ramret; + bool exclude_from_power_state; + unsigned int operating_point; +}; + +static int db8500_regulator_enable(struct regulator_dev *rdev) +{ + struct db8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n", + info->desc.name); + + info->is_enabled = true; + if (!info->exclude_from_power_state) + power_state_active_enable(); + + return 0; +} + +static int db8500_regulator_disable(struct regulator_dev *rdev) +{ + struct db8500_regulator_info *info = rdev_get_drvdata(rdev); + int ret = 0; + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n", + info->desc.name); + + info->is_enabled = false; + if (!info->exclude_from_power_state) + ret = power_state_active_disable(); + + return ret; +} + +static int db8500_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct db8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):" + " %i\n", info->desc.name, info->is_enabled); + + return info->is_enabled; +} + +/* db8500 regulator operations */ +static struct regulator_ops db8500_regulator_ops = { + .enable = db8500_regulator_enable, + .disable = db8500_regulator_disable, + .is_enabled = db8500_regulator_is_enabled, +}; + +/* + * EPOD control + */ +static bool epod_on[NUM_EPOD_ID]; +static bool epod_ramret[NUM_EPOD_ID]; + +static int enable_epod(u16 epod_id, bool ramret) +{ + int ret; + + if (ramret) { + if (!epod_on[epod_id]) { + ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET); + if (ret < 0) + return ret; + } + epod_ramret[epod_id] = true; + } else { + ret = prcmu_set_epod(epod_id, EPOD_STATE_ON); + if (ret < 0) + return ret; + epod_on[epod_id] = true; + } + + return 0; +} + +static int disable_epod(u16 epod_id, bool ramret) +{ + int ret; + + if (ramret) { + if (!epod_on[epod_id]) { + ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF); + if (ret < 0) + return ret; + } + epod_ramret[epod_id] = false; + } else { + if (epod_ramret[epod_id]) { + ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET); + if (ret < 0) + return ret; + } else { + ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF); + if (ret < 0) + return ret; + } + epod_on[epod_id] = false; + } + + return 0; +} + +/* + * Regulator switch + */ +static int db8500_regulator_switch_enable(struct regulator_dev *rdev) +{ + struct db8500_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n", + info->desc.name); + + ret = enable_epod(info->epod_id, info->is_ramret); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "regulator-switch-%s-enable: prcmu call failed\n", + info->desc.name); + goto out; + } + + info->is_enabled = true; +out: + return ret; +} + +static int db8500_regulator_switch_disable(struct regulator_dev *rdev) +{ + struct db8500_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n", + info->desc.name); + + ret = disable_epod(info->epod_id, info->is_ramret); + if (ret < 0) { + dev_err(rdev_get_dev(rdev), + "regulator_switch-%s-disable: prcmu call failed\n", + info->desc.name); + goto out; + } + + info->is_enabled = 0; +out: + return ret; +} + +static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev) +{ + struct db8500_regulator_info *info = rdev_get_drvdata(rdev); + + if (info == NULL) + return -EINVAL; + + dev_vdbg(rdev_get_dev(rdev), + "regulator-switch-%s-is_enabled (is_enabled): %i\n", + info->desc.name, info->is_enabled); + + return info->is_enabled; +} + +static struct regulator_ops db8500_regulator_switch_ops = { + .enable = db8500_regulator_switch_enable, + .disable = db8500_regulator_switch_disable, + .is_enabled = db8500_regulator_switch_is_enabled, +}; + +/* + * Regulator information + */ +static struct db8500_regulator_info + db8500_regulator_info[DB8500_NUM_REGULATORS] = { + [DB8500_REGULATOR_VAPE] = { + .desc = { + .name = "db8500-vape", + .id = DB8500_REGULATOR_VAPE, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + [DB8500_REGULATOR_VARM] = { + .desc = { + .name = "db8500-varm", + .id = DB8500_REGULATOR_VARM, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + [DB8500_REGULATOR_VMODEM] = { + .desc = { + .name = "db8500-vmodem", + .id = DB8500_REGULATOR_VMODEM, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + [DB8500_REGULATOR_VPLL] = { + .desc = { + .name = "db8500-vpll", + .id = DB8500_REGULATOR_VPLL, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + [DB8500_REGULATOR_VSMPS1] = { + .desc = { + .name = "db8500-vsmps1", + .id = DB8500_REGULATOR_VSMPS1, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + [DB8500_REGULATOR_VSMPS2] = { + .desc = { + .name = "db8500-vsmps2", + .id = DB8500_REGULATOR_VSMPS2, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .exclude_from_power_state = true, + }, + [DB8500_REGULATOR_VSMPS3] = { + .desc = { + .name = "db8500-vsmps3", + .id = DB8500_REGULATOR_VSMPS3, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + [DB8500_REGULATOR_VRF1] = { + .desc = { + .name = "db8500-vrf1", + .id = DB8500_REGULATOR_VRF1, + .ops = &db8500_regulator_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + }, + [DB8500_REGULATOR_SWITCH_SVAMMDSP] = { + .desc = { + .name = "db8500-sva-mmdsp", + .id = DB8500_REGULATOR_SWITCH_SVAMMDSP, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_SVAMMDSP, + }, + [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = { + .desc = { + .name = "db8500-sva-mmdsp-ret", + .id = DB8500_REGULATOR_SWITCH_SVAMMDSPRET, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_SVAMMDSP, + .is_ramret = true, + }, + [DB8500_REGULATOR_SWITCH_SVAPIPE] = { + .desc = { + .name = "db8500-sva-pipe", + .id = DB8500_REGULATOR_SWITCH_SVAPIPE, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_SVAPIPE, + }, + [DB8500_REGULATOR_SWITCH_SIAMMDSP] = { + .desc = { + .name = "db8500-sia-mmdsp", + .id = DB8500_REGULATOR_SWITCH_SIAMMDSP, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_SIAMMDSP, + }, + [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = { + .desc = { + .name = "db8500-sia-mmdsp-ret", + .id = DB8500_REGULATOR_SWITCH_SIAMMDSPRET, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_SIAMMDSP, + .is_ramret = true, + }, + [DB8500_REGULATOR_SWITCH_SIAPIPE] = { + .desc = { + .name = "db8500-sia-pipe", + .id = DB8500_REGULATOR_SWITCH_SIAPIPE, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_SIAPIPE, + }, + [DB8500_REGULATOR_SWITCH_SGA] = { + .desc = { + .name = "db8500-sga", + .id = DB8500_REGULATOR_SWITCH_SGA, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_SGA, + }, + [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = { + .desc = { + .name = "db8500-b2r2-mcde", + .id = DB8500_REGULATOR_SWITCH_B2R2_MCDE, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_B2R2_MCDE, + }, + [DB8500_REGULATOR_SWITCH_ESRAM12] = { + .desc = { + .name = "db8500-esram12", + .id = DB8500_REGULATOR_SWITCH_ESRAM12, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_ESRAM12, + .is_enabled = true, + }, + [DB8500_REGULATOR_SWITCH_ESRAM12RET] = { + .desc = { + .name = "db8500-esram12-ret", + .id = DB8500_REGULATOR_SWITCH_ESRAM12RET, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_ESRAM12, + .is_ramret = true, + }, + [DB8500_REGULATOR_SWITCH_ESRAM34] = { + .desc = { + .name = "db8500-esram34", + .id = DB8500_REGULATOR_SWITCH_ESRAM34, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_ESRAM34, + .is_enabled = true, + }, + [DB8500_REGULATOR_SWITCH_ESRAM34RET] = { + .desc = { + .name = "db8500-esram34-ret", + .id = DB8500_REGULATOR_SWITCH_ESRAM34RET, + .ops = &db8500_regulator_switch_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + }, + .epod_id = EPOD_ID_ESRAM34, + .is_ramret = true, + }, +}; + +static int __devinit db8500_regulator_probe(struct platform_device *pdev) +{ + struct regulator_init_data *db8500_init_data = mfd_get_data(pdev); + int i, err; + + /* register all regulators */ + for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) { + struct db8500_regulator_info *info; + struct regulator_init_data *init_data = &db8500_init_data[i]; + + /* assign per-regulator data */ + info = &db8500_regulator_info[i]; + info->dev = &pdev->dev; + + /* register with the regulator framework */ + info->rdev = regulator_register(&info->desc, &pdev->dev, + init_data, info); + if (IS_ERR(info->rdev)) { + err = PTR_ERR(info->rdev); + dev_err(&pdev->dev, "failed to register %s: err %i\n", + info->desc.name, err); + + /* if failing, unregister all earlier regulators */ + i--; + while (i >= 0) { + info = &db8500_regulator_info[i]; + regulator_unregister(info->rdev); + i--; + } + return err; + } + + dev_dbg(rdev_get_dev(info->rdev), + "regulator-%s-probed\n", info->desc.name); + } + + return 0; +} + +static int __exit db8500_regulator_remove(struct platform_device *pdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) { + struct db8500_regulator_info *info; + info = &db8500_regulator_info[i]; + + dev_vdbg(rdev_get_dev(info->rdev), + "regulator-%s-remove\n", info->desc.name); + + regulator_unregister(info->rdev); + } + + return 0; +} + +static struct platform_driver db8500_regulator_driver = { + .driver = { + .name = "db8500-prcmu-regulators", + .owner = THIS_MODULE, + }, + .probe = db8500_regulator_probe, + .remove = __exit_p(db8500_regulator_remove), +}; + +static int __init db8500_regulator_init(void) +{ + int ret; + + ret = platform_driver_register(&db8500_regulator_driver); + if (ret < 0) + return -ENODEV; + + return 0; +} + +static void __exit db8500_regulator_exit(void) +{ + platform_driver_unregister(&db8500_regulator_driver); +} + +arch_initcall(db8500_regulator_init); +module_exit(db8500_regulator_exit); + +MODULE_AUTHOR("STMicroelectronics/ST-Ericsson"); +MODULE_DESCRIPTION("DB8500 regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 42891726ea7..b8f4e9e66cd 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -992,4 +992,11 @@ config RTC_DRV_TEGRA This drive can also be built as a module. If so, the module will be called rtc-tegra. +config RTC_DRV_TILE + tristate "Tilera hypervisor RTC support" + depends on TILE + help + Enable support for the Linux driver side of the Tilera + hypervisor's real-time clock interface. + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index ca91c3c42e9..9574748d1c7 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -93,6 +93,7 @@ obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o +obj-$(CONFIG_RTC_DRV_TILE) += rtc-tile.o obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o diff --git a/drivers/rtc/rtc-tile.c b/drivers/rtc/rtc-tile.c new file mode 100644 index 00000000000..eb65dafee66 --- /dev/null +++ b/drivers/rtc/rtc-tile.c @@ -0,0 +1,162 @@ +/* + * Copyright 2011 Tilera Corporation. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + * + * Tilera-specific RTC driver. + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/rtc.h> +#include <linux/platform_device.h> + +/* Platform device pointer. */ +static struct platform_device *tile_rtc_platform_device; + +/* + * RTC read routine. Gets time info from RTC chip via hypervisor syscall. + */ +static int read_rtc_time(struct device *dev, struct rtc_time *tm) +{ + HV_RTCTime hvtm = hv_get_rtc(); + + tm->tm_sec = hvtm.tm_sec; + tm->tm_min = hvtm.tm_min; + tm->tm_hour = hvtm.tm_hour; + tm->tm_mday = hvtm.tm_mday; + tm->tm_mon = hvtm.tm_mon; + tm->tm_year = hvtm.tm_year; + tm->tm_wday = 0; + tm->tm_yday = 0; + tm->tm_isdst = 0; + + if (rtc_valid_tm(tm) < 0) + dev_warn(dev, "Read invalid date/time from RTC\n"); + + return 0; +} + +/* + * RTC write routine. Sends time info to hypervisor via syscall, to be + * written to RTC chip. + */ +static int set_rtc_time(struct device *dev, struct rtc_time *tm) +{ + HV_RTCTime hvtm; + + hvtm.tm_sec = tm->tm_sec; + hvtm.tm_min = tm->tm_min; + hvtm.tm_hour = tm->tm_hour; + hvtm.tm_mday = tm->tm_mday; + hvtm.tm_mon = tm->tm_mon; + hvtm.tm_year = tm->tm_year; + + hv_set_rtc(hvtm); + + return 0; +} + +/* + * RTC read/write ops. + */ +static const struct rtc_class_ops tile_rtc_ops = { + .read_time = read_rtc_time, + .set_time = set_rtc_time, +}; + +/* + * Device probe routine. + */ +static int __devinit tile_rtc_probe(struct platform_device *dev) +{ + struct rtc_device *rtc; + + rtc = rtc_device_register("tile", + &dev->dev, &tile_rtc_ops, THIS_MODULE); + + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(dev, rtc); + + return 0; +} + +/* + * Device cleanup routine. + */ +static int __devexit tile_rtc_remove(struct platform_device *dev) +{ + struct rtc_device *rtc = platform_get_drvdata(dev); + + if (rtc) + rtc_device_unregister(rtc); + + platform_set_drvdata(dev, NULL); + + return 0; +} + +static struct platform_driver tile_rtc_platform_driver = { + .driver = { + .name = "rtc-tile", + .owner = THIS_MODULE, + }, + .probe = tile_rtc_probe, + .remove = __devexit_p(tile_rtc_remove), +}; + +/* + * Driver init routine. + */ +static int __init tile_rtc_driver_init(void) +{ + int err; + + err = platform_driver_register(&tile_rtc_platform_driver); + if (err) + return err; + + tile_rtc_platform_device = platform_device_alloc("rtc-tile", 0); + if (tile_rtc_platform_device == NULL) { + err = -ENOMEM; + goto exit_driver_unregister; + } + + err = platform_device_add(tile_rtc_platform_device); + if (err) + goto exit_device_put; + + return 0; + +exit_device_put: + platform_device_put(tile_rtc_platform_device); + +exit_driver_unregister: + platform_driver_unregister(&tile_rtc_platform_driver); + return err; +} + +/* + * Driver cleanup routine. + */ +static void __exit tile_rtc_driver_exit(void) +{ + platform_driver_unregister(&tile_rtc_platform_driver); +} + +module_init(tile_rtc_driver_init); +module_exit(tile_rtc_driver_exit); + +MODULE_DESCRIPTION("Tilera-specific Real Time Clock Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc-tile"); diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index 35381cb0936..03e522b2fe0 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -655,6 +655,27 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha, return 0; } +#ifndef readq +static inline __u64 readq(const volatile void __iomem *addr) +{ + const volatile u32 __iomem *p = addr; + u32 low, high; + + low = readl(p); + high = readl(p + 1); + + return low + ((u64)high << 32); +} +#endif + +#ifndef writeq +static inline void writeq(__u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val >> 32, addr+4); +} +#endif + static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha, u64 off, void *data, int size) { diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 95019c747cc..4778e270716 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -636,7 +636,7 @@ static int sr_probe(struct device *dev) disk->first_minor = minor; sprintf(disk->disk_name, "sr%d", minor); disk->fops = &sr_bdops; - disk->flags = GENHD_FL_CD; + disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST; blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT); diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c index 31d7ba8299e..77dfb4070c1 100644 --- a/drivers/staging/ath6kl/os/linux/cfg80211.c +++ b/drivers/staging/ath6kl/os/linux/cfg80211.c @@ -587,7 +587,7 @@ ar6k_cfg80211_connect_event(struct ar6_softc *ar, u16 channel, WLAN_STATUS_SUCCESS, GFP_KERNEL); } else { /* inform roam event to cfg80211 */ - cfg80211_roamed(ar->arNetDev, bssid, + cfg80211_roamed(ar->arNetDev, ibss_channel, bssid, assocReqIe, assocReqLen, assocRespIe, assocRespLen, GFP_KERNEL); diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c index e3b409bb984..1827b0bf920 100644 --- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c @@ -2869,7 +2869,7 @@ wl_bss_roaming_done(struct wl_priv *wl, struct net_device *ndev, wl_update_prof(wl, NULL, &e->addr, WL_PROF_BSSID); wl_update_bss_info(wl); - cfg80211_roamed(ndev, + cfg80211_roamed(ndev, NULL, (u8 *)wl_read_prof(wl, WL_PROF_BSSID), conn_info->req_ie, conn_info->req_ie_len, conn_info->resp_ie, conn_info->resp_ie_len, GFP_KERNEL); diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c index 76378397b76..fb466f4c92e 100644 --- a/drivers/staging/wlan-ng/cfg80211.c +++ b/drivers/staging/wlan-ng/cfg80211.c @@ -695,7 +695,7 @@ void prism2_disconnected(wlandevice_t *wlandev) void prism2_roamed(wlandevice_t *wlandev) { - cfg80211_roamed(wlandev->netdev, wlandev->bssid, + cfg80211_roamed(wlandev->netdev, NULL, wlandev->bssid, NULL, 0, NULL, 0, GFP_KERNEL); } diff --git a/drivers/staging/zcache/zcache.c b/drivers/staging/zcache/zcache.c index b8a2b30a157..77ac2d4d3ef 100644 --- a/drivers/staging/zcache/zcache.c +++ b/drivers/staging/zcache/zcache.c @@ -1181,9 +1181,12 @@ static bool zcache_freeze; /* * zcache shrinker interface (only useful for ephemeral pages, so zbud only) */ -static int shrink_zcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) +static int shrink_zcache_memory(struct shrinker *shrink, + struct shrink_control *sc) { int ret = -1; + int nr = sc->nr_to_scan; + gfp_t gfp_mask = sc->gfp_mask; if (nr >= 0) { if (!(gfp_mask & __GFP_FS)) diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c index d5bfd41707e..e0a77540b8c 100644 --- a/drivers/tty/serial/68328serial.c +++ b/drivers/tty/serial/68328serial.c @@ -281,7 +281,7 @@ static void receive_chars(struct m68k_serial *info, unsigned short rx) #ifdef CONFIG_MAGIC_SYSRQ } else if (ch == 0x10) { /* ^P */ show_state(); - show_free_areas(); + show_free_areas(0); show_buffers(); /* show_net_buffers(); */ return; diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index c63d0d152af..f2cb7503fcb 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -15,6 +15,7 @@ *Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/serial_reg.h> +#include <linux/slab.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/serial_core.h> diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index f9916ca5ca4..549b960667c 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1460,6 +1460,14 @@ config FB_S3 ---help--- Driver for graphics boards with S3 Trio / S3 Virge chip. +config FB_S3_DDC + bool "DDC for S3 support" + depends on FB_S3 + select FB_DDC + default y + help + Say Y here if you want DDC support for your S3 graphics card. + config FB_SAVAGE tristate "S3 Savage support" depends on FB && PCI && EXPERIMENTAL @@ -1983,6 +1991,18 @@ config FB_SH_MOBILE_HDMI ---help--- Driver for the on-chip SH-Mobile HDMI controller. +config FB_SH_MOBILE_MERAM + tristate "SuperH Mobile MERAM read ahead support for LCDC" + depends on FB_SH_MOBILE_LCDC + default y + ---help--- + Enable MERAM support for the SH-Mobile LCD controller. + + This will allow for caching of the framebuffer to provide more + reliable access under heavy main memory bus traffic situations. + Up to 4 memory channels can be configured, allowing 4 RGB or + 2 YCbCr framebuffers to be configured. + config FB_TMIO tristate "Toshiba Mobile IO FrameBuffer support" depends on FB && MFD_CORE @@ -2246,29 +2266,43 @@ config FB_METRONOME config FB_MB862XX tristate "Fujitsu MB862xx GDC support" depends on FB + depends on PCI || (OF && PPC) select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT ---help--- Frame buffer driver for Fujitsu Carmine/Coral-P(A)/Lime controllers. +choice + prompt "GDC variant" + depends on FB_MB862XX + config FB_MB862XX_PCI_GDC bool "Carmine/Coral-P(A) GDC" - depends on PCI && FB_MB862XX + depends on PCI ---help--- This enables framebuffer support for Fujitsu Carmine/Coral-P(A) PCI graphics controller devices. config FB_MB862XX_LIME bool "Lime GDC" - depends on FB_MB862XX - depends on OF && !FB_MB862XX_PCI_GDC - depends on PPC + depends on OF && PPC select FB_FOREIGN_ENDIAN select FB_LITTLE_ENDIAN ---help--- Framebuffer support for Fujitsu Lime GDC on host CPU bus. +endchoice + +config FB_MB862XX_I2C + bool "Support I2C bus on MB862XX GDC" + depends on FB_MB862XX && I2C + default y + help + Selecting this option adds Coral-P(A)/Lime GDC I2C bus adapter + driver to support accessing I2C devices on controller's I2C bus. + These are usually some video decoder chips. + config FB_EP93XX tristate "EP93XX frame buffer support" depends on FB && ARCH_EP93XX diff --git a/drivers/video/Makefile b/drivers/video/Makefile index 2ea44b6625f..8b83129e209 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -130,6 +130,7 @@ obj-$(CONFIG_FB_UDL) += udlfb.o obj-$(CONFIG_FB_XILINX) += xilinxfb.o obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o +obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o obj-$(CONFIG_FB_OMAP) += omap/ obj-y += omap2/ diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c index e5d6b56d444..5ea6596dd82 100644 --- a/drivers/video/amifb.c +++ b/drivers/video/amifb.c @@ -2224,22 +2224,23 @@ static int amifb_ioctl(struct fb_info *info, * Allocate, Clear and Align a Block of Chip Memory */ -static u_long unaligned_chipptr = 0; +static void *aligned_chipptr; static inline u_long __init chipalloc(u_long size) { - size += PAGE_SIZE-1; - if (!(unaligned_chipptr = (u_long)amiga_chip_alloc(size, - "amifb [RAM]"))) - panic("No Chip RAM for frame buffer"); - memset((void *)unaligned_chipptr, 0, size); - return PAGE_ALIGN(unaligned_chipptr); + aligned_chipptr = amiga_chip_alloc(size, "amifb [RAM]"); + if (!aligned_chipptr) { + pr_err("amifb: No Chip RAM for frame buffer"); + return 0; + } + memset(aligned_chipptr, 0, size); + return (u_long)aligned_chipptr; } static inline void chipfree(void) { - if (unaligned_chipptr) - amiga_chip_free((void *)unaligned_chipptr); + if (aligned_chipptr) + amiga_chip_free(aligned_chipptr); } @@ -2295,7 +2296,7 @@ default_chipset: defmode = amiga_vblank == 50 ? DEFMODE_PAL : DEFMODE_NTSC; if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT > - VIDEOMEMSIZE_ECS_1M) + VIDEOMEMSIZE_ECS_2M) fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M; else fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M; @@ -2312,7 +2313,7 @@ default_chipset: maxfmode = TAG_FMODE_4; defmode = DEFMODE_AGA; if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT > - VIDEOMEMSIZE_AGA_1M) + VIDEOMEMSIZE_AGA_2M) fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M; else fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M; @@ -2385,6 +2386,10 @@ default_chipset: DUMMYSPRITEMEMSIZE+ COPINITSIZE+ 4*COPLISTSIZE); + if (!chipptr) { + err = -ENOMEM; + goto amifb_error; + } assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len); assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE); diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c index af3119707db..d1aee730d7d 100644 --- a/drivers/video/backlight/adp5520_bl.c +++ b/drivers/video/backlight/adp5520_bl.c @@ -211,8 +211,12 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev, const char *buf, size_t count) { struct adp5520_bl *data = dev_get_drvdata(dev); + int ret; + + ret = strict_strtoul(buf, 10, &data->cached_daylight_max); + if (ret < 0) + return ret; - strict_strtoul(buf, 10, &data->cached_daylight_max); return adp5520_store(dev, buf, count, ADP5520_DAYLIGHT_MAX); } static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show, diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 8b7d47386f3..fcdac872522 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -899,7 +899,7 @@ static struct fb_ops da8xx_fb_ops = { .fb_blank = cfb_blank, }; -static int __init fb_probe(struct platform_device *device) +static int __devinit fb_probe(struct platform_device *device) { struct da8xx_lcdc_platform_data *fb_pdata = device->dev.platform_data; @@ -1165,7 +1165,7 @@ static int fb_resume(struct platform_device *dev) static struct platform_driver da8xx_fb_driver = { .probe = fb_probe, - .remove = fb_remove, + .remove = __devexit_p(fb_remove), .suspend = fb_suspend, .resume = fb_resume, .driver = { diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c index 4eb38db36e4..fb205843c2c 100644 --- a/drivers/video/efifb.c +++ b/drivers/video/efifb.c @@ -242,9 +242,9 @@ static int set_system(const struct dmi_system_id *id) return 0; } - printk(KERN_INFO "efifb: dmi detected %s - framebuffer at %p " + printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x " "(%dx%d, stride %d)\n", id->ident, - (void *)screen_info.lfb_base, screen_info.lfb_width, + screen_info.lfb_base, screen_info.lfb_width, screen_info.lfb_height, screen_info.lfb_linelength); diff --git a/drivers/video/mb862xx/Makefile b/drivers/video/mb862xx/Makefile index d7777714166..5707ed0e31a 100644 --- a/drivers/video/mb862xx/Makefile +++ b/drivers/video/mb862xx/Makefile @@ -2,4 +2,7 @@ # Makefile for the MB862xx framebuffer driver # -obj-$(CONFIG_FB_MB862XX) := mb862xxfb.o mb862xxfb_accel.o +obj-$(CONFIG_FB_MB862XX) += mb862xxfb.o + +mb862xxfb-y := mb862xxfbdrv.o mb862xxfb_accel.o +mb862xxfb-$(CONFIG_FB_MB862XX_I2C) += mb862xx-i2c.o diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c new file mode 100644 index 00000000000..b953099edd8 --- /dev/null +++ b/drivers/video/mb862xx/mb862xx-i2c.c @@ -0,0 +1,178 @@ +/* + * Coral-P(A)/Lime I2C adapter driver + * + * (C) 2011 DENX Software Engineering, Anatolij Gustschin <agust@denx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/fb.h> +#include <linux/i2c.h> +#include <linux/io.h> +#include <linux/delay.h> + +#include "mb862xxfb.h" +#include "mb862xx_reg.h" + +static int mb862xx_i2c_wait_event(struct i2c_adapter *adap) +{ + struct mb862xxfb_par *par = adap->algo_data; + u32 reg; + + do { + udelay(1); + reg = inreg(i2c, GC_I2C_BCR); + if (reg & (I2C_INT | I2C_BER)) + break; + } while (1); + + return (reg & I2C_BER) ? 0 : 1; +} + +static int mb862xx_i2c_do_address(struct i2c_adapter *adap, int addr) +{ + struct mb862xxfb_par *par = adap->algo_data; + + outreg(i2c, GC_I2C_DAR, addr); + outreg(i2c, GC_I2C_CCR, I2C_CLOCK_AND_ENABLE); + outreg(i2c, GC_I2C_BCR, par->i2c_rs ? I2C_REPEATED_START : I2C_START); + if (!mb862xx_i2c_wait_event(adap)) + return -EIO; + par->i2c_rs = !(inreg(i2c, GC_I2C_BSR) & I2C_LRB); + return par->i2c_rs; +} + +static int mb862xx_i2c_write_byte(struct i2c_adapter *adap, u8 byte) +{ + struct mb862xxfb_par *par = adap->algo_data; + + outreg(i2c, GC_I2C_DAR, byte); + outreg(i2c, GC_I2C_BCR, I2C_START); + if (!mb862xx_i2c_wait_event(adap)) + return -EIO; + return !(inreg(i2c, GC_I2C_BSR) & I2C_LRB); +} + +static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last) +{ + struct mb862xxfb_par *par = adap->algo_data; + + outreg(i2c, GC_I2C_BCR, I2C_START | (last ? 0 : I2C_ACK)); + if (!mb862xx_i2c_wait_event(adap)) + return 0; + *byte = inreg(i2c, GC_I2C_DAR); + return 1; +} + +void mb862xx_i2c_stop(struct i2c_adapter *adap) +{ + struct mb862xxfb_par *par = adap->algo_data; + + outreg(i2c, GC_I2C_BCR, I2C_STOP); + outreg(i2c, GC_I2C_CCR, I2C_DISABLE); + par->i2c_rs = 0; +} + +static int mb862xx_i2c_read(struct i2c_adapter *adap, struct i2c_msg *m) +{ + int i, ret = 0; + int last = m->len - 1; + + for (i = 0; i < m->len; i++) { + if (!mb862xx_i2c_read_byte(adap, &m->buf[i], i == last)) { + ret = -EIO; + break; + } + } + return ret; +} + +static int mb862xx_i2c_write(struct i2c_adapter *adap, struct i2c_msg *m) +{ + int i, ret = 0; + + for (i = 0; i < m->len; i++) { + if (!mb862xx_i2c_write_byte(adap, m->buf[i])) { + ret = -EIO; + break; + } + } + return ret; +} + +static int mb862xx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + int num) +{ + struct mb862xxfb_par *par = adap->algo_data; + struct i2c_msg *m; + int addr; + int i = 0, err = 0; + + dev_dbg(par->dev, "%s: %d msgs\n", __func__, num); + + for (i = 0; i < num; i++) { + m = &msgs[i]; + if (!m->len) { + dev_dbg(par->dev, "%s: null msgs\n", __func__); + continue; + } + addr = m->addr; + if (m->flags & I2C_M_RD) + addr |= 1; + + err = mb862xx_i2c_do_address(adap, addr); + if (err < 0) + break; + if (m->flags & I2C_M_RD) + err = mb862xx_i2c_read(adap, m); + else + err = mb862xx_i2c_write(adap, m); + } + + if (i) + mb862xx_i2c_stop(adap); + + return (err < 0) ? err : i; +} + +static u32 mb862xx_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_SMBUS_BYTE_DATA; +} + +static const struct i2c_algorithm mb862xx_algo = { + .master_xfer = mb862xx_xfer, + .functionality = mb862xx_func, +}; + +static struct i2c_adapter mb862xx_i2c_adapter = { + .name = "MB862xx I2C adapter", + .algo = &mb862xx_algo, + .owner = THIS_MODULE, +}; + +int mb862xx_i2c_init(struct mb862xxfb_par *par) +{ + int ret; + + mb862xx_i2c_adapter.algo_data = par; + par->adap = &mb862xx_i2c_adapter; + + ret = i2c_add_adapter(par->adap); + if (ret < 0) { + dev_err(par->dev, "failed to add %s\n", + mb862xx_i2c_adapter.name); + } + return ret; +} + +void mb862xx_i2c_exit(struct mb862xxfb_par *par) +{ + if (par->adap) { + i2c_del_adapter(par->adap); + par->adap = NULL; + } +} diff --git a/drivers/video/mb862xx/mb862xx_reg.h b/drivers/video/mb862xx/mb862xx_reg.h index 2ba65e11850..9df48b8edc9 100644 --- a/drivers/video/mb862xx/mb862xx_reg.h +++ b/drivers/video/mb862xx/mb862xx_reg.h @@ -5,11 +5,8 @@ #ifndef _MB862XX_REG_H #define _MB862XX_REG_H -#ifdef MB862XX_MMIO_BOTTOM -#define MB862XX_MMIO_BASE 0x03fc0000 -#else #define MB862XX_MMIO_BASE 0x01fc0000 -#endif +#define MB862XX_MMIO_HIGH_BASE 0x03fc0000 #define MB862XX_I2C_BASE 0x0000c000 #define MB862XX_DISP_BASE 0x00010000 #define MB862XX_CAP_BASE 0x00018000 @@ -23,6 +20,7 @@ #define GC_IMASK 0x00000024 #define GC_SRST 0x0000002c #define GC_CCF 0x00000038 +#define GC_RSW 0x0000005c #define GC_CID 0x000000f0 #define GC_REVISION 0x00000084 @@ -53,10 +51,16 @@ #define GC_L0OA0 0x00000024 #define GC_L0DA0 0x00000028 #define GC_L0DY_L0DX 0x0000002c +#define GC_L1M 0x00000030 +#define GC_L1DA 0x00000034 #define GC_DCM1 0x00000100 #define GC_L0EM 0x00000110 #define GC_L0WY_L0WX 0x00000114 #define GC_L0WH_L0WW 0x00000118 +#define GC_L1EM 0x00000120 +#define GC_L1WY_L1WX 0x00000124 +#define GC_L1WH_L1WW 0x00000128 +#define GC_DLS 0x00000180 #define GC_DCM2 0x00000104 #define GC_DCM3 0x00000108 #define GC_CPM_CUTC 0x000000a0 @@ -68,6 +72,11 @@ #define GC_CPM_CEN0 0x00100000 #define GC_CPM_CEN1 0x00200000 +#define GC_DCM1_DEN 0x80000000 +#define GC_DCM1_L1E 0x00020000 +#define GC_L1M_16 0x80000000 +#define GC_L1M_YC 0x40000000 +#define GC_L1M_CS 0x20000000 #define GC_DCM01_ESY 0x00000004 #define GC_DCM01_SC 0x00003f00 @@ -79,9 +88,50 @@ #define GC_L0M_L0C_16 0x80000000 #define GC_L0EM_L0EC_24 0x40000000 #define GC_L0M_L0W_UNIT 64 +#define GC_L1EM_DM 0x02000000 #define GC_DISP_REFCLK_400 400 +/* I2C */ +#define GC_I2C_BSR 0x00000000 /* BSR */ +#define GC_I2C_BCR 0x00000004 /* BCR */ +#define GC_I2C_CCR 0x00000008 /* CCR */ +#define GC_I2C_ADR 0x0000000C /* ADR */ +#define GC_I2C_DAR 0x00000010 /* DAR */ + +#define I2C_DISABLE 0x00000000 +#define I2C_STOP 0x00000000 +#define I2C_START 0x00000010 +#define I2C_REPEATED_START 0x00000030 +#define I2C_CLOCK_AND_ENABLE 0x0000003f +#define I2C_READY 0x01 +#define I2C_INT 0x01 +#define I2C_INTE 0x02 +#define I2C_ACK 0x08 +#define I2C_BER 0x80 +#define I2C_BEIE 0x40 +#define I2C_TRX 0x80 +#define I2C_LRB 0x10 + +/* Capture registers and bits */ +#define GC_CAP_VCM 0x00000000 +#define GC_CAP_CSC 0x00000004 +#define GC_CAP_VCS 0x00000008 +#define GC_CAP_CBM 0x00000010 +#define GC_CAP_CBOA 0x00000014 +#define GC_CAP_CBLA 0x00000018 +#define GC_CAP_IMG_START 0x0000001C +#define GC_CAP_IMG_END 0x00000020 +#define GC_CAP_CMSS 0x00000048 +#define GC_CAP_CMDS 0x0000004C + +#define GC_VCM_VIE 0x80000000 +#define GC_VCM_CM 0x03000000 +#define GC_VCM_VS_PAL 0x00000002 +#define GC_CBM_OO 0x80000000 +#define GC_CBM_HRV 0x00000010 +#define GC_CBM_CBST 0x00000001 + /* Carmine specific */ #define MB86297_DRAW_BASE 0x00020000 #define MB86297_DISP0_BASE 0x00100000 diff --git a/drivers/video/mb862xx/mb862xxfb.h b/drivers/video/mb862xx/mb862xxfb.h index d7e7cb76bbf..8550630c1e0 100644 --- a/drivers/video/mb862xx/mb862xxfb.h +++ b/drivers/video/mb862xx/mb862xxfb.h @@ -1,6 +1,26 @@ #ifndef __MB862XX_H__ #define __MB862XX_H__ +struct mb862xx_l1_cfg { + unsigned short sx; + unsigned short sy; + unsigned short sw; + unsigned short sh; + unsigned short dx; + unsigned short dy; + unsigned short dw; + unsigned short dh; + int mirror; +}; + +#define MB862XX_BASE 'M' +#define MB862XX_L1_GET_CFG _IOR(MB862XX_BASE, 0, struct mb862xx_l1_cfg*) +#define MB862XX_L1_SET_CFG _IOW(MB862XX_BASE, 1, struct mb862xx_l1_cfg*) +#define MB862XX_L1_ENABLE _IOW(MB862XX_BASE, 2, int) +#define MB862XX_L1_CAP_CTL _IOW(MB862XX_BASE, 3, int) + +#ifdef __KERNEL__ + #define PCI_VENDOR_ID_FUJITSU_LIMITED 0x10cf #define PCI_DEVICE_ID_FUJITSU_CORALP 0x2019 #define PCI_DEVICE_ID_FUJITSU_CORALPA 0x201e @@ -38,6 +58,8 @@ struct mb862xxfb_par { void __iomem *mmio_base; /* remapped registers */ size_t mapped_vram; /* length of remapped vram */ size_t mmio_len; /* length of register region */ + unsigned long cap_buf; /* capture buffers offset */ + size_t cap_len; /* length of capture buffers */ void __iomem *host; /* relocatable reg. bases */ void __iomem *i2c; @@ -57,11 +79,23 @@ struct mb862xxfb_par { unsigned int refclk; /* disp. reference clock */ struct mb862xx_gc_mode *gc_mode; /* GDC mode init data */ int pre_init; /* don't init display if 1 */ + struct i2c_adapter *adap; /* GDC I2C bus adapter */ + int i2c_rs; + + struct mb862xx_l1_cfg l1_cfg; + int l1_stride; u32 pseudo_palette[16]; }; extern void mb862xxfb_init_accel(struct fb_info *info, int xres); +#ifdef CONFIG_FB_MB862XX_I2C +extern int mb862xx_i2c_init(struct mb862xxfb_par *par); +extern void mb862xx_i2c_exit(struct mb862xxfb_par *par); +#else +static inline int mb862xx_i2c_init(struct mb862xxfb_par *par) { return 0; } +static inline void mb862xx_i2c_exit(struct mb862xxfb_par *par) { } +#endif #if defined(CONFIG_FB_MB862XX_LIME) && defined(CONFIG_FB_MB862XX_PCI_GDC) #error "Select Lime GDC or CoralP/Carmine support, but not both together" @@ -82,4 +116,6 @@ extern void mb862xxfb_init_accel(struct fb_info *info, int xres); #define pack(a, b) (((a) << 16) | (b)) +#endif /* __KERNEL__ */ + #endif diff --git a/drivers/video/mb862xx/mb862xxfb.c b/drivers/video/mb862xx/mb862xxfbdrv.c index c76e663a6cd..ea39336addf 100644 --- a/drivers/video/mb862xx/mb862xxfb.c +++ b/drivers/video/mb862xx/mb862xxfbdrv.c @@ -27,7 +27,7 @@ #define NR_PALETTE 256 #define MB862XX_MEM_SIZE 0x1000000 -#define CORALP_MEM_SIZE 0x4000000 +#define CORALP_MEM_SIZE 0x2000000 #define CARMINE_MEM_SIZE 0x8000000 #define DRV_NAME "mb862xxfb" @@ -309,6 +309,97 @@ static int mb862xxfb_blank(int mode, struct fb_info *fbi) return 0; } +static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd, + unsigned long arg) +{ + struct mb862xxfb_par *par = fbi->par; + struct mb862xx_l1_cfg *l1_cfg = &par->l1_cfg; + void __user *argp = (void __user *)arg; + int *enable; + u32 l1em = 0; + + switch (cmd) { + case MB862XX_L1_GET_CFG: + if (copy_to_user(argp, l1_cfg, sizeof(*l1_cfg))) + return -EFAULT; + break; + case MB862XX_L1_SET_CFG: + if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) + return -EFAULT; + if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { + /* downscaling */ + outreg(cap, GC_CAP_CSC, + pack((l1_cfg->sh << 11) / l1_cfg->dh, + (l1_cfg->sw << 11) / l1_cfg->dw)); + l1em = inreg(disp, GC_L1EM); + l1em &= ~GC_L1EM_DM; + } else if ((l1_cfg->sw <= l1_cfg->dw) && + (l1_cfg->sh <= l1_cfg->dh)) { + /* upscaling */ + outreg(cap, GC_CAP_CSC, + pack((l1_cfg->sh << 11) / l1_cfg->dh, + (l1_cfg->sw << 11) / l1_cfg->dw)); + outreg(cap, GC_CAP_CMSS, + pack(l1_cfg->sw >> 1, l1_cfg->sh)); + outreg(cap, GC_CAP_CMDS, + pack(l1_cfg->dw >> 1, l1_cfg->dh)); + l1em = inreg(disp, GC_L1EM); + l1em |= GC_L1EM_DM; + } + + if (l1_cfg->mirror) { + outreg(cap, GC_CAP_CBM, + inreg(cap, GC_CAP_CBM) | GC_CBM_HRV); + l1em |= l1_cfg->dw * 2 - 8; + } else { + outreg(cap, GC_CAP_CBM, + inreg(cap, GC_CAP_CBM) & ~GC_CBM_HRV); + l1em &= 0xffff0000; + } + outreg(disp, GC_L1EM, l1em); + break; + case MB862XX_L1_ENABLE: + enable = (int *)arg; + if (*enable) { + outreg(disp, GC_L1DA, par->cap_buf); + outreg(cap, GC_CAP_IMG_START, + pack(l1_cfg->sy >> 1, l1_cfg->sx)); + outreg(cap, GC_CAP_IMG_END, + pack(l1_cfg->sh, l1_cfg->sw)); + outreg(disp, GC_L1M, GC_L1M_16 | GC_L1M_YC | GC_L1M_CS | + (par->l1_stride << 16)); + outreg(disp, GC_L1WY_L1WX, + pack(l1_cfg->dy, l1_cfg->dx)); + outreg(disp, GC_L1WH_L1WW, + pack(l1_cfg->dh - 1, l1_cfg->dw)); + outreg(disp, GC_DLS, 1); + outreg(cap, GC_CAP_VCM, + GC_VCM_VIE | GC_VCM_CM | GC_VCM_VS_PAL); + outreg(disp, GC_DCM1, inreg(disp, GC_DCM1) | + GC_DCM1_DEN | GC_DCM1_L1E); + } else { + outreg(cap, GC_CAP_VCM, + inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE); + outreg(disp, GC_DCM1, + inreg(disp, GC_DCM1) & ~GC_DCM1_L1E); + } + break; + case MB862XX_L1_CAP_CTL: + enable = (int *)arg; + if (*enable) { + outreg(cap, GC_CAP_VCM, + inreg(cap, GC_CAP_VCM) | GC_VCM_VIE); + } else { + outreg(cap, GC_CAP_VCM, + inreg(cap, GC_CAP_VCM) & ~GC_VCM_VIE); + } + break; + default: + return -EINVAL; + } + return 0; +} + /* framebuffer ops */ static struct fb_ops mb862xxfb_ops = { .owner = THIS_MODULE, @@ -320,6 +411,7 @@ static struct fb_ops mb862xxfb_ops = { .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, + .fb_ioctl = mb862xxfb_ioctl, }; /* initialize fb_info data */ @@ -328,6 +420,7 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi) struct mb862xxfb_par *par = fbi->par; struct mb862xx_gc_mode *mode = par->gc_mode; unsigned long reg; + int stride; fbi->fbops = &mb862xxfb_ops; fbi->pseudo_palette = par->pseudo_palette; @@ -336,7 +429,6 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi) strcpy(fbi->fix.id, DRV_NAME); fbi->fix.smem_start = (unsigned long)par->fb_base_phys; - fbi->fix.smem_len = par->mapped_vram; fbi->fix.mmio_start = (unsigned long)par->mmio_base_phys; fbi->fix.mmio_len = par->mmio_len; fbi->fix.accel = FB_ACCEL_NONE; @@ -420,6 +512,28 @@ static int mb862xxfb_init_fbinfo(struct fb_info *fbi) FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; fbi->fix.line_length = (fbi->var.xres_virtual * fbi->var.bits_per_pixel) / 8; + fbi->fix.smem_len = fbi->fix.line_length * fbi->var.yres_virtual; + + /* + * reserve space for capture buffers and two cursors + * at the end of vram: 720x576 * 2 * 2.2 + 64x64 * 16. + */ + par->cap_buf = par->mapped_vram - 0x1bd800 - 0x10000; + par->cap_len = 0x1bd800; + par->l1_cfg.sx = 0; + par->l1_cfg.sy = 0; + par->l1_cfg.sw = 720; + par->l1_cfg.sh = 576; + par->l1_cfg.dx = 0; + par->l1_cfg.dy = 0; + par->l1_cfg.dw = 720; + par->l1_cfg.dh = 576; + stride = par->l1_cfg.sw * (fbi->var.bits_per_pixel / 8); + par->l1_stride = stride / 64 + ((stride % 64) ? 1 : 0); + outreg(cap, GC_CAP_CBM, GC_CBM_OO | GC_CBM_CBST | + (par->l1_stride << 16)); + outreg(cap, GC_CAP_CBOA, par->cap_buf); + outreg(cap, GC_CAP_CBLA, par->cap_buf + par->cap_len); return 0; } @@ -742,22 +856,38 @@ static int coralp_init(struct mb862xxfb_par *par) par->refclk = GC_DISP_REFCLK_400; + if (par->mapped_vram >= 0x2000000) { + /* relocate gdc registers space */ + writel(1, par->fb_base + MB862XX_MMIO_BASE + GC_RSW); + udelay(1); /* wait at least 20 bus cycles */ + } + ver = inreg(host, GC_CID); cn = (ver & GC_CID_CNAME_MSK) >> 8; ver = ver & GC_CID_VERSION_MSK; if (cn == 3) { + unsigned long reg; + dev_info(par->dev, "Fujitsu Coral-%s GDC Rev.%d found\n",\ (ver == 6) ? "P" : (ver == 8) ? "PA" : "?", par->pdev->revision); - outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133); - udelay(200); - outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL); - udelay(10); + reg = inreg(disp, GC_DCM1); + if (reg & GC_DCM01_DEN && reg & GC_DCM01_L0E) + par->pre_init = 1; + + if (!par->pre_init) { + outreg(host, GC_CCF, GC_CCF_CGE_166 | GC_CCF_COT_133); + udelay(200); + outreg(host, GC_MMR, GC_MMR_CORALP_EVB_VAL); + udelay(10); + } /* Clear interrupt status */ outreg(host, GC_IST, 0); } else { return -ENODEV; } + + mb862xx_i2c_init(par); return 0; } @@ -899,7 +1029,13 @@ static int __devinit mb862xx_pci_probe(struct pci_dev *pdev, case PCI_DEVICE_ID_FUJITSU_CORALPA: par->fb_base_phys = pci_resource_start(par->pdev, 0); par->mapped_vram = CORALP_MEM_SIZE; - par->mmio_base_phys = par->fb_base_phys + MB862XX_MMIO_BASE; + if (par->mapped_vram >= 0x2000000) { + par->mmio_base_phys = par->fb_base_phys + + MB862XX_MMIO_HIGH_BASE; + } else { + par->mmio_base_phys = par->fb_base_phys + + MB862XX_MMIO_BASE; + } par->mmio_len = MB862XX_MMIO_SIZE; par->type = BT_CORALP; break; @@ -1009,6 +1145,8 @@ static void __devexit mb862xx_pci_remove(struct pci_dev *pdev) outreg(host, GC_IMASK, 0); } + mb862xx_i2c_exit(par); + device_remove_file(&pdev->dev, &dev_attr_dispregs); pci_set_drvdata(pdev, NULL); diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c index 529483467ab..0ccd7adf47b 100644 --- a/drivers/video/omap/dispc.c +++ b/drivers/video/omap/dispc.c @@ -922,14 +922,14 @@ static int get_dss_clocks(void) return PTR_ERR(dispc.dss_ick); } - dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck"); + dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "fck"); if (IS_ERR(dispc.dss1_fck)) { dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); clk_put(dispc.dss_ick); return PTR_ERR(dispc.dss1_fck); } - dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_fck"); + dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_clk"); if (IS_ERR(dispc.dss_54m_fck)) { dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); clk_put(dispc.dss_ick); diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c index e264efd0278..b3ddd743d8a 100644 --- a/drivers/video/omap/omapfb_main.c +++ b/drivers/video/omap/omapfb_main.c @@ -90,7 +90,7 @@ static void omapdss_release(struct device *dev) /* dummy device for clocks */ static struct platform_device omapdss_device = { - .name = "omapdss", + .name = "omapdss_dss", .id = -1, .dev = { .release = omapdss_release, diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c index eada9f12efc..0c6981f1a4a 100644 --- a/drivers/video/omap/rfbi.c +++ b/drivers/video/omap/rfbi.c @@ -90,7 +90,7 @@ static int rfbi_get_clocks(void) return PTR_ERR(rfbi.dss_ick); } - rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "dss1_fck"); + rfbi.dss1_fck = clk_get(&rfbi.fbdev->dssdev->dev, "fck"); if (IS_ERR(rfbi.dss1_fck)) { dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n"); clk_put(rfbi.dss_ick); diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile index d853d05dad3..5ddef129f79 100644 --- a/drivers/video/omap2/Makefile +++ b/drivers/video/omap2/Makefile @@ -1,6 +1,6 @@ obj-$(CONFIG_OMAP2_VRAM) += vram.o obj-$(CONFIG_OMAP2_VRFB) += vrfb.o -obj-y += dss/ -obj-y += omapfb/ +obj-$(CONFIG_OMAP2_DSS) += dss/ +obj-$(CONFIG_FB_OMAP2) += omapfb/ obj-y += displays/ diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig index d18ad6b2372..609a2807317 100644 --- a/drivers/video/omap2/displays/Kconfig +++ b/drivers/video/omap2/displays/Kconfig @@ -3,6 +3,7 @@ menu "OMAP2/3 Display Device Drivers" config PANEL_GENERIC_DPI tristate "Generic DPI Panel" + depends on OMAP2_DSS_DPI help Generic DPI panel driver. Supports DVI output for Beagle and OMAP3 SDP. @@ -11,20 +12,20 @@ config PANEL_GENERIC_DPI config PANEL_LGPHILIPS_LB035Q02 tristate "LG.Philips LB035Q02 LCD Panel" - depends on OMAP2_DSS && SPI + depends on OMAP2_DSS_DPI && SPI help LCD Panel used on the Gumstix Overo Palo35 config PANEL_SHARP_LS037V7DW01 tristate "Sharp LS037V7DW01 LCD Panel" - depends on OMAP2_DSS + depends on OMAP2_DSS_DPI select BACKLIGHT_CLASS_DEVICE help LCD Panel used in TI's SDP3430 and EVM boards config PANEL_NEC_NL8048HL11_01B tristate "NEC NL8048HL11-01B Panel" - depends on OMAP2_DSS + depends on OMAP2_DSS_DPI help This NEC NL8048HL11-01B panel is TFT LCD used in the Zoom2/3/3630 sdp boards. @@ -37,7 +38,7 @@ config PANEL_TAAL config PANEL_TPO_TD043MTEA1 tristate "TPO TD043MTEA1 LCD Panel" - depends on OMAP2_DSS && SPI + depends on OMAP2_DSS_DPI && SPI help LCD Panel used in OMAP3 Pandora diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c index 7e04c921aa2..dbd59b8e5b3 100644 --- a/drivers/video/omap2/displays/panel-acx565akm.c +++ b/drivers/video/omap2/displays/panel-acx565akm.c @@ -30,7 +30,7 @@ #include <linux/backlight.h> #include <linux/fb.h> -#include <plat/display.h> +#include <video/omapdss.h> #define MIPID_CMD_READ_DISP_ID 0x04 #define MIPID_CMD_READ_RED 0x06 diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c index 4a9b9ff5946..9c90f75653f 100644 --- a/drivers/video/omap2/displays/panel-generic-dpi.c +++ b/drivers/video/omap2/displays/panel-generic-dpi.c @@ -33,8 +33,9 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> +#include <video/omapdss.h> -#include <plat/panel-generic-dpi.h> +#include <video/omap-panel-generic-dpi.h> struct panel_config { struct omap_video_timings timings; @@ -181,6 +182,56 @@ static struct panel_config generic_dpi_panels[] = { .power_off_delay = 0, .name = "samsung_lte430wq_f0c", }, + + /* Seiko 70WVW1TZ3Z3 */ + { + { + .x_res = 800, + .y_res = 480, + + .pixel_clock = 33000, + + .hsw = 128, + .hfp = 10, + .hbp = 10, + + .vsw = 2, + .vfp = 4, + .vbp = 11, + }, + .acbi = 0x0, + .acb = 0x0, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS, + .power_on_delay = 0, + .power_off_delay = 0, + .name = "seiko_70wvw1tz3", + }, + + /* Powertip PH480272T */ + { + { + .x_res = 480, + .y_res = 272, + + .pixel_clock = 9000, + + .hsw = 40, + .hfp = 2, + .hbp = 2, + + .vsw = 10, + .vfp = 2, + .vbp = 2, + }, + .acbi = 0x0, + .acb = 0x0, + .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | + OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, + .power_on_delay = 0, + .power_off_delay = 0, + .name = "powertip_ph480272t", + }, }; struct panel_drv_data { @@ -285,7 +336,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev) return 0; } -static void generic_dpi_panel_remove(struct omap_dss_device *dssdev) +static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev) { struct panel_drv_data *drv_data = dev_get_drvdata(&dssdev->dev); @@ -358,7 +409,7 @@ static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev, static struct omap_dss_driver dpi_driver = { .probe = generic_dpi_panel_probe, - .remove = generic_dpi_panel_remove, + .remove = __exit_p(generic_dpi_panel_remove), .enable = generic_dpi_panel_enable, .disable = generic_dpi_panel_disable, diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c index 271324db243..e0eb35be303 100644 --- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c @@ -21,7 +21,7 @@ #include <linux/spi/spi.h> #include <linux/mutex.h> -#include <plat/display.h> +#include <video/omapdss.h> struct lb035q02_data { struct mutex lock; diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c index 925e0fadff5..2ba9d0ca187 100644 --- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c +++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c @@ -22,7 +22,7 @@ #include <linux/backlight.h> #include <linux/fb.h> -#include <plat/display.h> +#include <video/omapdss.h> #define LCD_XRES 800 #define LCD_YRES 480 diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c index d2b35d2df2a..ba38b3ad17d 100644 --- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c @@ -25,7 +25,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <plat/display.h> +#include <video/omapdss.h> struct sharp_data { struct backlight_device *bl; @@ -120,7 +120,7 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev) return 0; } -static void sharp_ls_panel_remove(struct omap_dss_device *dssdev) +static void __exit sharp_ls_panel_remove(struct omap_dss_device *dssdev) { struct sharp_data *sd = dev_get_drvdata(&dssdev->dev); struct backlight_device *bl = sd->bl; @@ -205,7 +205,7 @@ static int sharp_ls_panel_resume(struct omap_dss_device *dssdev) static struct omap_dss_driver sharp_ls_driver = { .probe = sharp_ls_panel_probe, - .remove = sharp_ls_panel_remove, + .remove = __exit_p(sharp_ls_panel_remove), .enable = sharp_ls_panel_enable, .disable = sharp_ls_panel_disable, diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index adc9900458e..fdd5d4ae437 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -33,8 +33,8 @@ #include <linux/regulator/consumer.h> #include <linux/mutex.h> -#include <plat/display.h> -#include <plat/nokia-dsi-panel.h> +#include <video/omapdss.h> +#include <video/omap-panel-nokia-dsi.h> /* DSI Virtual channel. Hardcoded for now. */ #define TCH 0 @@ -63,12 +63,12 @@ #define DCS_GET_ID2 0xdb #define DCS_GET_ID3 0xdc -#define TAAL_ESD_CHECK_PERIOD msecs_to_jiffies(5000) - static irqreturn_t taal_te_isr(int irq, void *data); static void taal_te_timeout_work_callback(struct work_struct *work); static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable); +static int taal_panel_reset(struct omap_dss_device *dssdev); + struct panel_regulator { struct regulator *regulator; const char *name; @@ -229,8 +229,14 @@ struct taal_data { bool intro_printed; - struct workqueue_struct *esd_wq; + struct workqueue_struct *workqueue; + struct delayed_work esd_work; + unsigned esd_interval; + + bool ulps_enabled; + unsigned ulps_timeout; + struct delayed_work ulps_work; struct panel_config *panel_config; }; @@ -242,6 +248,7 @@ static inline struct nokia_dsi_panel_data } static void taal_esd_work(struct work_struct *work); +static void taal_ulps_work(struct work_struct *work); static void hw_guard_start(struct taal_data *td, int guard_msec) { @@ -264,7 +271,7 @@ static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data) int r; u8 buf[1]; - r = dsi_vc_dcs_read(td->channel, dcs_cmd, buf, 1); + r = dsi_vc_dcs_read(td->dssdev, td->channel, dcs_cmd, buf, 1); if (r < 0) return r; @@ -276,7 +283,7 @@ static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data) static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd) { - return dsi_vc_dcs_write(td->channel, &dcs_cmd, 1); + return dsi_vc_dcs_write(td->dssdev, td->channel, &dcs_cmd, 1); } static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param) @@ -284,7 +291,7 @@ static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param) u8 buf[2]; buf[0] = dcs_cmd; buf[1] = param; - return dsi_vc_dcs_write(td->channel, buf, 2); + return dsi_vc_dcs_write(td->dssdev, td->channel, buf, 2); } static int taal_sleep_in(struct taal_data *td) @@ -296,7 +303,7 @@ static int taal_sleep_in(struct taal_data *td) hw_guard_wait(td); cmd = DCS_SLEEP_IN; - r = dsi_vc_dcs_write_nosync(td->channel, &cmd, 1); + r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1); if (r) return r; @@ -402,7 +409,7 @@ static int taal_set_update_window(struct taal_data *td, buf[3] = (x2 >> 8) & 0xff; buf[4] = (x2 >> 0) & 0xff; - r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf)); + r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; @@ -412,15 +419,132 @@ static int taal_set_update_window(struct taal_data *td, buf[3] = (y2 >> 8) & 0xff; buf[4] = (y2 >> 0) & 0xff; - r = dsi_vc_dcs_write_nosync(td->channel, buf, sizeof(buf)); + r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf)); if (r) return r; - dsi_vc_send_bta_sync(td->channel); + dsi_vc_send_bta_sync(td->dssdev, td->channel); return r; } +static void taal_queue_esd_work(struct omap_dss_device *dssdev) +{ + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + + if (td->esd_interval > 0) + queue_delayed_work(td->workqueue, &td->esd_work, + msecs_to_jiffies(td->esd_interval)); +} + +static void taal_cancel_esd_work(struct omap_dss_device *dssdev) +{ + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + + cancel_delayed_work(&td->esd_work); +} + +static void taal_queue_ulps_work(struct omap_dss_device *dssdev) +{ + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + + if (td->ulps_timeout > 0) + queue_delayed_work(td->workqueue, &td->ulps_work, + msecs_to_jiffies(td->ulps_timeout)); +} + +static void taal_cancel_ulps_work(struct omap_dss_device *dssdev) +{ + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + + cancel_delayed_work(&td->ulps_work); +} + +static int taal_enter_ulps(struct omap_dss_device *dssdev) +{ + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); + int r; + + if (td->ulps_enabled) + return 0; + + taal_cancel_ulps_work(dssdev); + + r = _taal_enable_te(dssdev, false); + if (r) + goto err; + + disable_irq(gpio_to_irq(panel_data->ext_te_gpio)); + + omapdss_dsi_display_disable(dssdev, false, true); + + td->ulps_enabled = true; + + return 0; + +err: + dev_err(&dssdev->dev, "enter ULPS failed"); + taal_panel_reset(dssdev); + + td->ulps_enabled = false; + + taal_queue_ulps_work(dssdev); + + return r; +} + +static int taal_exit_ulps(struct omap_dss_device *dssdev) +{ + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); + int r; + + if (!td->ulps_enabled) + return 0; + + r = omapdss_dsi_display_enable(dssdev); + if (r) + goto err; + + omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); + + r = _taal_enable_te(dssdev, true); + if (r) + goto err; + + enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); + + taal_queue_ulps_work(dssdev); + + td->ulps_enabled = false; + + return 0; + +err: + dev_err(&dssdev->dev, "exit ULPS failed"); + r = taal_panel_reset(dssdev); + + enable_irq(gpio_to_irq(panel_data->ext_te_gpio)); + td->ulps_enabled = false; + + taal_queue_ulps_work(dssdev); + + return r; +} + +static int taal_wake_up(struct omap_dss_device *dssdev) +{ + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + + if (td->ulps_enabled) + return taal_exit_ulps(dssdev); + + taal_cancel_ulps_work(dssdev); + taal_queue_ulps_work(dssdev); + return 0; +} + static int taal_bl_update_status(struct backlight_device *dev) { struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev); @@ -441,9 +565,13 @@ static int taal_bl_update_status(struct backlight_device *dev) if (td->use_dsi_bl) { if (td->enabled) { - dsi_bus_lock(); - r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level); - dsi_bus_unlock(); + dsi_bus_lock(dssdev); + + r = taal_wake_up(dssdev); + if (!r) + r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level); + + dsi_bus_unlock(dssdev); } else { r = 0; } @@ -504,9 +632,13 @@ static ssize_t taal_num_errors_show(struct device *dev, mutex_lock(&td->lock); if (td->enabled) { - dsi_bus_lock(); - r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors); - dsi_bus_unlock(); + dsi_bus_lock(dssdev); + + r = taal_wake_up(dssdev); + if (!r) + r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors); + + dsi_bus_unlock(dssdev); } else { r = -ENODEV; } @@ -530,9 +662,13 @@ static ssize_t taal_hw_revision_show(struct device *dev, mutex_lock(&td->lock); if (td->enabled) { - dsi_bus_lock(); - r = taal_get_id(td, &id1, &id2, &id3); - dsi_bus_unlock(); + dsi_bus_lock(dssdev); + + r = taal_wake_up(dssdev); + if (!r) + r = taal_get_id(td, &id1, &id2, &id3); + + dsi_bus_unlock(dssdev); } else { r = -ENODEV; } @@ -579,6 +715,7 @@ static ssize_t store_cabc_mode(struct device *dev, struct omap_dss_device *dssdev = to_dss_device(dev); struct taal_data *td = dev_get_drvdata(&dssdev->dev); int i; + int r; for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) { if (sysfs_streq(cabc_modes[i], buf)) @@ -591,10 +728,19 @@ static ssize_t store_cabc_mode(struct device *dev, mutex_lock(&td->lock); if (td->enabled) { - dsi_bus_lock(); - if (!td->cabc_broken) - taal_dcs_write_1(td, DCS_WRITE_CABC, i); - dsi_bus_unlock(); + dsi_bus_lock(dssdev); + + if (!td->cabc_broken) { + r = taal_wake_up(dssdev); + if (r) + goto err; + + r = taal_dcs_write_1(td, DCS_WRITE_CABC, i); + if (r) + goto err; + } + + dsi_bus_unlock(dssdev); } td->cabc_mode = i; @@ -602,6 +748,10 @@ static ssize_t store_cabc_mode(struct device *dev, mutex_unlock(&td->lock); return count; +err: + dsi_bus_unlock(dssdev); + mutex_unlock(&td->lock); + return r; } static ssize_t show_cabc_available_modes(struct device *dev, @@ -620,18 +770,161 @@ static ssize_t show_cabc_available_modes(struct device *dev, return len < PAGE_SIZE ? len : PAGE_SIZE - 1; } +static ssize_t taal_store_esd_interval(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + + unsigned long t; + int r; + + r = strict_strtoul(buf, 10, &t); + if (r) + return r; + + mutex_lock(&td->lock); + taal_cancel_esd_work(dssdev); + td->esd_interval = t; + if (td->enabled) + taal_queue_esd_work(dssdev); + mutex_unlock(&td->lock); + + return count; +} + +static ssize_t taal_show_esd_interval(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + unsigned t; + + mutex_lock(&td->lock); + t = td->esd_interval; + mutex_unlock(&td->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static ssize_t taal_store_ulps(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + unsigned long t; + int r; + + r = strict_strtoul(buf, 10, &t); + if (r) + return r; + + mutex_lock(&td->lock); + + if (td->enabled) { + dsi_bus_lock(dssdev); + + if (t) + r = taal_enter_ulps(dssdev); + else + r = taal_wake_up(dssdev); + + dsi_bus_unlock(dssdev); + } + + mutex_unlock(&td->lock); + + if (r) + return r; + + return count; +} + +static ssize_t taal_show_ulps(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + unsigned t; + + mutex_lock(&td->lock); + t = td->ulps_enabled; + mutex_unlock(&td->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + +static ssize_t taal_store_ulps_timeout(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + unsigned long t; + int r; + + r = strict_strtoul(buf, 10, &t); + if (r) + return r; + + mutex_lock(&td->lock); + td->ulps_timeout = t; + + if (td->enabled) { + /* taal_wake_up will restart the timer */ + dsi_bus_lock(dssdev); + r = taal_wake_up(dssdev); + dsi_bus_unlock(dssdev); + } + + mutex_unlock(&td->lock); + + if (r) + return r; + + return count; +} + +static ssize_t taal_show_ulps_timeout(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct omap_dss_device *dssdev = to_dss_device(dev); + struct taal_data *td = dev_get_drvdata(&dssdev->dev); + unsigned t; + + mutex_lock(&td->lock); + t = td->ulps_timeout; + mutex_unlock(&td->lock); + + return snprintf(buf, PAGE_SIZE, "%u\n", t); +} + static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL); static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL); static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR, show_cabc_mode, store_cabc_mode); static DEVICE_ATTR(cabc_available_modes, S_IRUGO, show_cabc_available_modes, NULL); +static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR, + taal_show_esd_interval, taal_store_esd_interval); +static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, + taal_show_ulps, taal_store_ulps); +static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, + taal_show_ulps_timeout, taal_store_ulps_timeout); static struct attribute *taal_attrs[] = { &dev_attr_num_dsi_errors.attr, &dev_attr_hw_revision.attr, &dev_attr_cabc_mode.attr, &dev_attr_cabc_available_modes.attr, + &dev_attr_esd_interval.attr, + &dev_attr_ulps.attr, + &dev_attr_ulps_timeout.attr, NULL, }; @@ -700,6 +993,9 @@ static int taal_probe(struct omap_dss_device *dssdev) } td->dssdev = dssdev; td->panel_config = panel_config; + td->esd_interval = panel_data->esd_interval; + td->ulps_enabled = false; + td->ulps_timeout = panel_data->ulps_timeout; mutex_init(&td->lock); @@ -710,13 +1006,14 @@ static int taal_probe(struct omap_dss_device *dssdev) if (r) goto err_reg; - td->esd_wq = create_singlethread_workqueue("taal_esd"); - if (td->esd_wq == NULL) { + td->workqueue = create_singlethread_workqueue("taal_esd"); + if (td->workqueue == NULL) { dev_err(&dssdev->dev, "can't create ESD workqueue\n"); r = -ENOMEM; goto err_wq; } INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work); + INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work); dev_set_drvdata(&dssdev->dev, td); @@ -734,8 +1031,8 @@ static int taal_probe(struct omap_dss_device *dssdev) props.max_brightness = 127; props.type = BACKLIGHT_RAW; - bldev = backlight_device_register("taal", &dssdev->dev, dssdev, - &taal_bl_ops, &props); + bldev = backlight_device_register(dev_name(&dssdev->dev), &dssdev->dev, + dssdev, &taal_bl_ops, &props); if (IS_ERR(bldev)) { r = PTR_ERR(bldev); goto err_bl; @@ -810,7 +1107,7 @@ err_irq: err_gpio: backlight_device_unregister(bldev); err_bl: - destroy_workqueue(td->esd_wq); + destroy_workqueue(td->workqueue); err_wq: free_regulators(panel_config->regulators, panel_config->num_regulators); err_reg: @@ -819,7 +1116,7 @@ err: return r; } -static void taal_remove(struct omap_dss_device *dssdev) +static void __exit taal_remove(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); @@ -841,8 +1138,9 @@ static void taal_remove(struct omap_dss_device *dssdev) taal_bl_update_status(bldev); backlight_device_unregister(bldev); - cancel_delayed_work(&td->esd_work); - destroy_workqueue(td->esd_wq); + taal_cancel_ulps_work(dssdev); + taal_cancel_esd_work(dssdev); + destroy_workqueue(td->workqueue); /* reset, to be sure that the panel is in a valid state */ taal_hw_reset(dssdev); @@ -867,7 +1165,7 @@ static int taal_power_on(struct omap_dss_device *dssdev) taal_hw_reset(dssdev); - omapdss_dsi_vc_enable_hs(td->channel, false); + omapdss_dsi_vc_enable_hs(dssdev, td->channel, false); r = taal_sleep_out(td); if (r) @@ -924,7 +1222,7 @@ static int taal_power_on(struct omap_dss_device *dssdev) td->intro_printed = true; } - omapdss_dsi_vc_enable_hs(td->channel, true); + omapdss_dsi_vc_enable_hs(dssdev, td->channel, true); return 0; err: @@ -932,7 +1230,7 @@ err: taal_hw_reset(dssdev); - omapdss_dsi_display_disable(dssdev); + omapdss_dsi_display_disable(dssdev, true, false); err0: return r; } @@ -955,15 +1253,23 @@ static void taal_power_off(struct omap_dss_device *dssdev) taal_hw_reset(dssdev); } - omapdss_dsi_display_disable(dssdev); + omapdss_dsi_display_disable(dssdev, true, false); td->enabled = 0; } +static int taal_panel_reset(struct omap_dss_device *dssdev) +{ + dev_err(&dssdev->dev, "performing LCD reset\n"); + + taal_power_off(dssdev); + taal_hw_reset(dssdev); + return taal_power_on(dssdev); +} + static int taal_enable(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); - struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; dev_dbg(&dssdev->dev, "enable\n"); @@ -975,18 +1281,16 @@ static int taal_enable(struct omap_dss_device *dssdev) goto err; } - dsi_bus_lock(); + dsi_bus_lock(dssdev); r = taal_power_on(dssdev); - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); if (r) goto err; - if (panel_data->use_esd_check) - queue_delayed_work(td->esd_wq, &td->esd_work, - TAAL_ESD_CHECK_PERIOD); + taal_queue_esd_work(dssdev); dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; @@ -1007,14 +1311,17 @@ static void taal_disable(struct omap_dss_device *dssdev) mutex_lock(&td->lock); - cancel_delayed_work(&td->esd_work); + taal_cancel_ulps_work(dssdev); + taal_cancel_esd_work(dssdev); - dsi_bus_lock(); + dsi_bus_lock(dssdev); - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) + if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { + taal_wake_up(dssdev); taal_power_off(dssdev); + } - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; @@ -1035,13 +1342,16 @@ static int taal_suspend(struct omap_dss_device *dssdev) goto err; } - cancel_delayed_work(&td->esd_work); + taal_cancel_ulps_work(dssdev); + taal_cancel_esd_work(dssdev); - dsi_bus_lock(); + dsi_bus_lock(dssdev); - taal_power_off(dssdev); + r = taal_wake_up(dssdev); + if (!r) + taal_power_off(dssdev); - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; @@ -1056,7 +1366,6 @@ err: static int taal_resume(struct omap_dss_device *dssdev) { struct taal_data *td = dev_get_drvdata(&dssdev->dev); - struct nokia_dsi_panel_data *panel_data = get_panel_data(dssdev); int r; dev_dbg(&dssdev->dev, "resume\n"); @@ -1068,19 +1377,17 @@ static int taal_resume(struct omap_dss_device *dssdev) goto err; } - dsi_bus_lock(); + dsi_bus_lock(dssdev); r = taal_power_on(dssdev); - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); if (r) { dssdev->state = OMAP_DSS_DISPLAY_DISABLED; } else { dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - if (panel_data->use_esd_check) - queue_delayed_work(td->esd_wq, &td->esd_work, - TAAL_ESD_CHECK_PERIOD); + taal_queue_esd_work(dssdev); } mutex_unlock(&td->lock); @@ -1095,7 +1402,7 @@ static void taal_framedone_cb(int err, void *data) { struct omap_dss_device *dssdev = data; dev_dbg(&dssdev->dev, "framedone, err %d\n", err); - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); } static irqreturn_t taal_te_isr(int irq, void *data) @@ -1123,7 +1430,7 @@ static irqreturn_t taal_te_isr(int irq, void *data) return IRQ_HANDLED; err: dev_err(&dssdev->dev, "start update failed\n"); - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); return IRQ_HANDLED; } @@ -1136,7 +1443,7 @@ static void taal_te_timeout_work_callback(struct work_struct *work) dev_err(&dssdev->dev, "TE not received for 250ms!\n"); atomic_set(&td->do_update, 0); - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); } static int taal_update(struct omap_dss_device *dssdev, @@ -1149,7 +1456,11 @@ static int taal_update(struct omap_dss_device *dssdev, dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); mutex_lock(&td->lock); - dsi_bus_lock(); + dsi_bus_lock(dssdev); + + r = taal_wake_up(dssdev); + if (r) + goto err; if (!td->enabled) { r = 0; @@ -1184,7 +1495,7 @@ static int taal_update(struct omap_dss_device *dssdev, mutex_unlock(&td->lock); return 0; err: - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } @@ -1196,8 +1507,8 @@ static int taal_sync(struct omap_dss_device *dssdev) dev_dbg(&dssdev->dev, "sync\n"); mutex_lock(&td->lock); - dsi_bus_lock(); - dsi_bus_unlock(); + dsi_bus_lock(dssdev); + dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); dev_dbg(&dssdev->dev, "sync done\n"); @@ -1235,9 +1546,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable) if (td->te_enabled == enable) goto end; - dsi_bus_lock(); + dsi_bus_lock(dssdev); if (td->enabled) { + r = taal_wake_up(dssdev); + if (r) + goto err; + r = _taal_enable_te(dssdev, enable); if (r) goto err; @@ -1245,13 +1560,13 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable) td->te_enabled = enable; - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; @@ -1281,9 +1596,13 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate) if (td->rotate == rotate) goto end; - dsi_bus_lock(); + dsi_bus_lock(dssdev); if (td->enabled) { + r = taal_wake_up(dssdev); + if (r) + goto err; + r = taal_set_addr_mode(td, rotate, td->mirror); if (r) goto err; @@ -1291,12 +1610,12 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate) td->rotate = rotate; - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } @@ -1325,8 +1644,12 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable) if (td->mirror == enable) goto end; - dsi_bus_lock(); + dsi_bus_lock(dssdev); if (td->enabled) { + r = taal_wake_up(dssdev); + if (r) + goto err; + r = taal_set_addr_mode(td, td->rotate, enable); if (r) goto err; @@ -1334,12 +1657,12 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable) td->mirror = enable; - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); end: mutex_unlock(&td->lock); return 0; err: - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return r; } @@ -1369,7 +1692,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num) goto err1; } - dsi_bus_lock(); + dsi_bus_lock(dssdev); + + r = taal_wake_up(dssdev); + if (r) + goto err2; r = taal_dcs_read_1(td, DCS_GET_ID1, &id1); if (r) @@ -1381,11 +1708,11 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num) if (r) goto err2; - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); mutex_unlock(&td->lock); return 0; err2: - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; @@ -1415,7 +1742,11 @@ static int taal_memory_read(struct omap_dss_device *dssdev, dssdev->panel.timings.x_res * dssdev->panel.timings.y_res * 3); - dsi_bus_lock(); + dsi_bus_lock(dssdev); + + r = taal_wake_up(dssdev); + if (r) + goto err2; /* plen 1 or 2 goes into short packet. until checksum error is fixed, * use short packets. plen 32 works, but bigger packets seem to cause @@ -1427,7 +1758,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev, taal_set_update_window(td, x, y, w, h); - r = dsi_vc_set_max_rx_packet_size(td->channel, plen); + r = dsi_vc_set_max_rx_packet_size(dssdev, td->channel, plen); if (r) goto err2; @@ -1435,7 +1766,7 @@ static int taal_memory_read(struct omap_dss_device *dssdev, u8 dcs_cmd = first ? 0x2e : 0x3e; first = 0; - r = dsi_vc_dcs_read(td->channel, dcs_cmd, + r = dsi_vc_dcs_read(dssdev, td->channel, dcs_cmd, buf + buf_used, size - buf_used); if (r < 0) { @@ -1461,14 +1792,35 @@ static int taal_memory_read(struct omap_dss_device *dssdev, r = buf_used; err3: - dsi_vc_set_max_rx_packet_size(td->channel, 1); + dsi_vc_set_max_rx_packet_size(dssdev, td->channel, 1); err2: - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); err1: mutex_unlock(&td->lock); return r; } +static void taal_ulps_work(struct work_struct *work) +{ + struct taal_data *td = container_of(work, struct taal_data, + ulps_work.work); + struct omap_dss_device *dssdev = td->dssdev; + + mutex_lock(&td->lock); + + if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !td->enabled) { + mutex_unlock(&td->lock); + return; + } + + dsi_bus_lock(dssdev); + + taal_enter_ulps(dssdev); + + dsi_bus_unlock(dssdev); + mutex_unlock(&td->lock); +} + static void taal_esd_work(struct work_struct *work) { struct taal_data *td = container_of(work, struct taal_data, @@ -1485,7 +1837,13 @@ static void taal_esd_work(struct work_struct *work) return; } - dsi_bus_lock(); + dsi_bus_lock(dssdev); + + r = taal_wake_up(dssdev); + if (r) { + dev_err(&dssdev->dev, "failed to exit ULPS\n"); + goto err; + } r = taal_dcs_read_1(td, DCS_RDDSDR, &state1); if (r) { @@ -1521,22 +1879,20 @@ static void taal_esd_work(struct work_struct *work) goto err; } - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); - queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD); + taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); return; err: dev_err(&dssdev->dev, "performing LCD reset\n"); - taal_power_off(dssdev); - taal_hw_reset(dssdev); - taal_power_on(dssdev); + taal_panel_reset(dssdev); - dsi_bus_unlock(); + dsi_bus_unlock(dssdev); - queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD); + taal_queue_esd_work(dssdev); mutex_unlock(&td->lock); } @@ -1557,7 +1913,7 @@ static enum omap_dss_update_mode taal_get_update_mode( static struct omap_dss_driver taal_driver = { .probe = taal_probe, - .remove = taal_remove, + .remove = __exit_p(taal_remove), .enable = taal_enable, .disable = taal_disable, diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c index dbe9d43b485..2462b9ec666 100644 --- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c @@ -17,7 +17,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <plat/display.h> +#include <video/omapdss.h> #define TPO_R02_MODE(x) ((x) & 7) #define TPO_R02_MODE_800x480 7 @@ -144,13 +144,15 @@ static ssize_t tpo_td043_vmirror_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev); - long val; + int val; int ret; - ret = strict_strtol(buf, 0, &val); + ret = kstrtoint(buf, 0, &val); if (ret < 0) return ret; + val = !!val; + ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val); if (ret < 0) return ret; @@ -175,7 +177,7 @@ static ssize_t tpo_td043_mode_store(struct device *dev, long val; int ret; - ret = strict_strtol(buf, 0, &val); + ret = kstrtol(buf, 0, &val); if (ret != 0 || val & ~7) return -EINVAL; diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index bfc5da0e970..6b3e2da1141 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig @@ -80,7 +80,7 @@ config OMAP2_DSS_SDI config OMAP2_DSS_DSI bool "DSI support" - depends on ARCH_OMAP3 + depends on ARCH_OMAP3 || ARCH_OMAP4 default n help MIPI DSI (Display Serial Interface) support. @@ -90,14 +90,6 @@ config OMAP2_DSS_DSI See http://www.mipi.org/ for DSI spesifications. -config OMAP2_DSS_USE_DSI_PLL - bool "Use DSI PLL for PCLK (EXPERIMENTAL)" - default n - depends on OMAP2_DSS_DSI - help - Use DSI PLL to generate pixel clock. Currently only for DPI output. - DSI PLL can be used to generate higher and more precise pixel clocks. - config OMAP2_DSS_FAKE_VSYNC bool "Fake VSYNC irq from manual update displays" default n @@ -125,4 +117,27 @@ config OMAP2_DSS_MIN_FCK_PER_PCK Max FCK is 173MHz, so this doesn't work if your PCK is very high. +config OMAP2_DSS_SLEEP_BEFORE_RESET + bool "Sleep 50ms before DSS reset" + default y + help + For some unknown reason we may get SYNC_LOST errors from the display + subsystem at initialization time if we don't sleep before resetting + the DSS. See the source (dss.c) for more comments. + + However, 50ms is quite long time to sleep, and with some + configurations the SYNC_LOST may never happen, so the sleep can + be disabled here. + +config OMAP2_DSS_SLEEP_AFTER_VENC_RESET + bool "Sleep 20ms after VENC reset" + default y + help + There is a 20ms sleep after VENC reset which seemed to fix the + reset. The reason for the bug is unclear, and it's also unclear + on what platforms this happens. + + This option enables the sleep, and is enabled by default. You can + disable the sleep if it doesn't cause problems on your platform. + endif diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c index 1aa2ed1e786..3da426719dd 100644 --- a/drivers/video/omap2/dss/core.c +++ b/drivers/video/omap2/dss/core.c @@ -33,7 +33,7 @@ #include <linux/device.h> #include <linux/regulator/consumer.h> -#include <plat/display.h> +#include <video/omapdss.h> #include "dss.h" #include "dss_features.h" @@ -54,6 +54,9 @@ unsigned int dss_debug; module_param_named(debug, dss_debug, bool, 0644); #endif +static int omap_dss_register_device(struct omap_dss_device *); +static void omap_dss_unregister_device(struct omap_dss_device *); + /* REGULATORS */ struct regulator *dss_get_vdds_dsi(void) @@ -124,8 +127,7 @@ static int dss_initialize_debugfs(void) #endif #if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS) - debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir, - &dsi_dump_irqs, &dss_debug_fops); + dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops); #endif debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir, @@ -137,8 +139,7 @@ static int dss_initialize_debugfs(void) &rfbi_dump_regs, &dss_debug_fops); #endif #ifdef CONFIG_OMAP2_DSS_DSI - debugfs_create_file("dsi", S_IRUGO, dss_debugfs_dir, - &dsi_dump_regs, &dss_debug_fops); + dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops); #endif #ifdef CONFIG_OMAP2_DSS_VENC debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir, @@ -480,7 +481,7 @@ static void omap_dss_dev_release(struct device *dev) reset_device(dev, 0); } -int omap_dss_register_device(struct omap_dss_device *dssdev) +static int omap_dss_register_device(struct omap_dss_device *dssdev) { static int dev_num; @@ -494,7 +495,7 @@ int omap_dss_register_device(struct omap_dss_device *dssdev) return device_register(&dssdev->dev); } -void omap_dss_unregister_device(struct omap_dss_device *dssdev) +static void omap_dss_unregister_device(struct omap_dss_device *dssdev) { device_unregister(&dssdev->dev); } diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 7804779c9da..7a9a2e7d968 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -37,99 +37,15 @@ #include <plat/sram.h> #include <plat/clock.h> -#include <plat/display.h> +#include <video/omapdss.h> #include "dss.h" #include "dss_features.h" +#include "dispc.h" /* DISPC */ #define DISPC_SZ_REGS SZ_4K -struct dispc_reg { u16 idx; }; - -#define DISPC_REG(idx) ((const struct dispc_reg) { idx }) - -/* - * DISPC common registers and - * DISPC channel registers , ch = 0 for LCD, ch = 1 for - * DIGIT, and ch = 2 for LCD2 - */ -#define DISPC_REVISION DISPC_REG(0x0000) -#define DISPC_SYSCONFIG DISPC_REG(0x0010) -#define DISPC_SYSSTATUS DISPC_REG(0x0014) -#define DISPC_IRQSTATUS DISPC_REG(0x0018) -#define DISPC_IRQENABLE DISPC_REG(0x001C) -#define DISPC_CONTROL DISPC_REG(0x0040) -#define DISPC_CONTROL2 DISPC_REG(0x0238) -#define DISPC_CONFIG DISPC_REG(0x0044) -#define DISPC_CONFIG2 DISPC_REG(0x0620) -#define DISPC_CAPABLE DISPC_REG(0x0048) -#define DISPC_DEFAULT_COLOR(ch) DISPC_REG(ch == 0 ? 0x004C : \ - (ch == 1 ? 0x0050 : 0x03AC)) -#define DISPC_TRANS_COLOR(ch) DISPC_REG(ch == 0 ? 0x0054 : \ - (ch == 1 ? 0x0058 : 0x03B0)) -#define DISPC_LINE_STATUS DISPC_REG(0x005C) -#define DISPC_LINE_NUMBER DISPC_REG(0x0060) -#define DISPC_TIMING_H(ch) DISPC_REG(ch != 2 ? 0x0064 : 0x0400) -#define DISPC_TIMING_V(ch) DISPC_REG(ch != 2 ? 0x0068 : 0x0404) -#define DISPC_POL_FREQ(ch) DISPC_REG(ch != 2 ? 0x006C : 0x0408) -#define DISPC_DIVISORo(ch) DISPC_REG(ch != 2 ? 0x0070 : 0x040C) -#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074) -#define DISPC_SIZE_DIG DISPC_REG(0x0078) -#define DISPC_SIZE_LCD(ch) DISPC_REG(ch != 2 ? 0x007C : 0x03CC) - -/* DISPC GFX plane */ -#define DISPC_GFX_BA0 DISPC_REG(0x0080) -#define DISPC_GFX_BA1 DISPC_REG(0x0084) -#define DISPC_GFX_POSITION DISPC_REG(0x0088) -#define DISPC_GFX_SIZE DISPC_REG(0x008C) -#define DISPC_GFX_ATTRIBUTES DISPC_REG(0x00A0) -#define DISPC_GFX_FIFO_THRESHOLD DISPC_REG(0x00A4) -#define DISPC_GFX_FIFO_SIZE_STATUS DISPC_REG(0x00A8) -#define DISPC_GFX_ROW_INC DISPC_REG(0x00AC) -#define DISPC_GFX_PIXEL_INC DISPC_REG(0x00B0) -#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4) -#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8) - -#define DISPC_DATA_CYCLE1(ch) DISPC_REG(ch != 2 ? 0x01D4 : 0x03C0) -#define DISPC_DATA_CYCLE2(ch) DISPC_REG(ch != 2 ? 0x01D8 : 0x03C4) -#define DISPC_DATA_CYCLE3(ch) DISPC_REG(ch != 2 ? 0x01DC : 0x03C8) -#define DISPC_CPR_COEF_R(ch) DISPC_REG(ch != 2 ? 0x0220 : 0x03BC) -#define DISPC_CPR_COEF_G(ch) DISPC_REG(ch != 2 ? 0x0224 : 0x03B8) -#define DISPC_CPR_COEF_B(ch) DISPC_REG(ch != 2 ? 0x0228 : 0x03B4) - -#define DISPC_GFX_PRELOAD DISPC_REG(0x022C) - -/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */ -#define DISPC_VID_REG(n, idx) DISPC_REG(0x00BC + (n)*0x90 + idx) - -#define DISPC_VID_BA0(n) DISPC_VID_REG(n, 0x0000) -#define DISPC_VID_BA1(n) DISPC_VID_REG(n, 0x0004) -#define DISPC_VID_POSITION(n) DISPC_VID_REG(n, 0x0008) -#define DISPC_VID_SIZE(n) DISPC_VID_REG(n, 0x000C) -#define DISPC_VID_ATTRIBUTES(n) DISPC_VID_REG(n, 0x0010) -#define DISPC_VID_FIFO_THRESHOLD(n) DISPC_VID_REG(n, 0x0014) -#define DISPC_VID_FIFO_SIZE_STATUS(n) DISPC_VID_REG(n, 0x0018) -#define DISPC_VID_ROW_INC(n) DISPC_VID_REG(n, 0x001C) -#define DISPC_VID_PIXEL_INC(n) DISPC_VID_REG(n, 0x0020) -#define DISPC_VID_FIR(n) DISPC_VID_REG(n, 0x0024) -#define DISPC_VID_PICTURE_SIZE(n) DISPC_VID_REG(n, 0x0028) -#define DISPC_VID_ACCU0(n) DISPC_VID_REG(n, 0x002C) -#define DISPC_VID_ACCU1(n) DISPC_VID_REG(n, 0x0030) - -/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -#define DISPC_VID_FIR_COEF_H(n, i) DISPC_REG(0x00F0 + (n)*0x90 + (i)*0x8) -/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -#define DISPC_VID_FIR_COEF_HV(n, i) DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8) -/* coef index i = {0, 1, 2, 3, 4} */ -#define DISPC_VID_CONV_COEF(n, i) DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4) -/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ -#define DISPC_VID_FIR_COEF_V(n, i) DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4) - -#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04) - -#define DISPC_DIVISOR DISPC_REG(0x0804) - #define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \ DISPC_IRQ_OCP_ERR | \ DISPC_IRQ_VID1_FIFO_UNDERFLOW | \ @@ -167,10 +83,6 @@ struct dispc_v_coef { #define REG_FLD_MOD(idx, val, start, end) \ dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end)) -static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES, - DISPC_VID_ATTRIBUTES(0), - DISPC_VID_ATTRIBUTES(1) }; - struct dispc_irq_stats { unsigned long last_reset; unsigned irq_count; @@ -198,25 +110,38 @@ static struct { #endif } dispc; +enum omap_color_component { + /* used for all color formats for OMAP3 and earlier + * and for RGB and Y color component on OMAP4 + */ + DISPC_COLOR_COMPONENT_RGB_Y = 1 << 0, + /* used for UV component for + * OMAP_DSS_COLOR_YUV2, OMAP_DSS_COLOR_UYVY, OMAP_DSS_COLOR_NV12 + * color formats on OMAP4 + */ + DISPC_COLOR_COMPONENT_UV = 1 << 1, +}; + static void _omap_dispc_set_irqs(void); -static inline void dispc_write_reg(const struct dispc_reg idx, u32 val) +static inline void dispc_write_reg(const u16 idx, u32 val) { - __raw_writel(val, dispc.base + idx.idx); + __raw_writel(val, dispc.base + idx); } -static inline u32 dispc_read_reg(const struct dispc_reg idx) +static inline u32 dispc_read_reg(const u16 idx) { - return __raw_readl(dispc.base + idx.idx); + return __raw_readl(dispc.base + idx); } #define SR(reg) \ - dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg) + dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) #define RR(reg) \ - dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)]) + dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)]) void dispc_save_context(void) { + int i; if (cpu_is_omap24xx()) return; @@ -224,157 +149,153 @@ void dispc_save_context(void) SR(IRQENABLE); SR(CONTROL); SR(CONFIG); - SR(DEFAULT_COLOR(0)); - SR(DEFAULT_COLOR(1)); - SR(TRANS_COLOR(0)); - SR(TRANS_COLOR(1)); + SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD)); + SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT)); + SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD)); + SR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT)); SR(LINE_NUMBER); - SR(TIMING_H(0)); - SR(TIMING_V(0)); - SR(POL_FREQ(0)); - SR(DIVISORo(0)); + SR(TIMING_H(OMAP_DSS_CHANNEL_LCD)); + SR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); + SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); + SR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); SR(GLOBAL_ALPHA); - SR(SIZE_DIG); - SR(SIZE_LCD(0)); + SR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); + SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { SR(CONTROL2); - SR(DEFAULT_COLOR(2)); - SR(TRANS_COLOR(2)); - SR(SIZE_LCD(2)); - SR(TIMING_H(2)); - SR(TIMING_V(2)); - SR(POL_FREQ(2)); - SR(DIVISORo(2)); + SR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2)); + SR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2)); + SR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2)); + SR(TIMING_H(OMAP_DSS_CHANNEL_LCD2)); + SR(TIMING_V(OMAP_DSS_CHANNEL_LCD2)); + SR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2)); + SR(DIVISORo(OMAP_DSS_CHANNEL_LCD2)); SR(CONFIG2); } - SR(GFX_BA0); - SR(GFX_BA1); - SR(GFX_POSITION); - SR(GFX_SIZE); - SR(GFX_ATTRIBUTES); - SR(GFX_FIFO_THRESHOLD); - SR(GFX_ROW_INC); - SR(GFX_PIXEL_INC); - SR(GFX_WINDOW_SKIP); - SR(GFX_TABLE_BA); - - SR(DATA_CYCLE1(0)); - SR(DATA_CYCLE2(0)); - SR(DATA_CYCLE3(0)); - - SR(CPR_COEF_R(0)); - SR(CPR_COEF_G(0)); - SR(CPR_COEF_B(0)); + SR(OVL_BA0(OMAP_DSS_GFX)); + SR(OVL_BA1(OMAP_DSS_GFX)); + SR(OVL_POSITION(OMAP_DSS_GFX)); + SR(OVL_SIZE(OMAP_DSS_GFX)); + SR(OVL_ATTRIBUTES(OMAP_DSS_GFX)); + SR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX)); + SR(OVL_ROW_INC(OMAP_DSS_GFX)); + SR(OVL_PIXEL_INC(OMAP_DSS_GFX)); + SR(OVL_WINDOW_SKIP(OMAP_DSS_GFX)); + SR(OVL_TABLE_BA(OMAP_DSS_GFX)); + + SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD)); + SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); + SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); + + SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); + SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); + SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { - SR(CPR_COEF_B(2)); - SR(CPR_COEF_G(2)); - SR(CPR_COEF_R(2)); + SR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); + SR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); + SR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); - SR(DATA_CYCLE1(2)); - SR(DATA_CYCLE2(2)); - SR(DATA_CYCLE3(2)); + SR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); + SR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); + SR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); } - SR(GFX_PRELOAD); + SR(OVL_PRELOAD(OMAP_DSS_GFX)); /* VID1 */ - SR(VID_BA0(0)); - SR(VID_BA1(0)); - SR(VID_POSITION(0)); - SR(VID_SIZE(0)); - SR(VID_ATTRIBUTES(0)); - SR(VID_FIFO_THRESHOLD(0)); - SR(VID_ROW_INC(0)); - SR(VID_PIXEL_INC(0)); - SR(VID_FIR(0)); - SR(VID_PICTURE_SIZE(0)); - SR(VID_ACCU0(0)); - SR(VID_ACCU1(0)); - - SR(VID_FIR_COEF_H(0, 0)); - SR(VID_FIR_COEF_H(0, 1)); - SR(VID_FIR_COEF_H(0, 2)); - SR(VID_FIR_COEF_H(0, 3)); - SR(VID_FIR_COEF_H(0, 4)); - SR(VID_FIR_COEF_H(0, 5)); - SR(VID_FIR_COEF_H(0, 6)); - SR(VID_FIR_COEF_H(0, 7)); - - SR(VID_FIR_COEF_HV(0, 0)); - SR(VID_FIR_COEF_HV(0, 1)); - SR(VID_FIR_COEF_HV(0, 2)); - SR(VID_FIR_COEF_HV(0, 3)); - SR(VID_FIR_COEF_HV(0, 4)); - SR(VID_FIR_COEF_HV(0, 5)); - SR(VID_FIR_COEF_HV(0, 6)); - SR(VID_FIR_COEF_HV(0, 7)); - - SR(VID_CONV_COEF(0, 0)); - SR(VID_CONV_COEF(0, 1)); - SR(VID_CONV_COEF(0, 2)); - SR(VID_CONV_COEF(0, 3)); - SR(VID_CONV_COEF(0, 4)); - - SR(VID_FIR_COEF_V(0, 0)); - SR(VID_FIR_COEF_V(0, 1)); - SR(VID_FIR_COEF_V(0, 2)); - SR(VID_FIR_COEF_V(0, 3)); - SR(VID_FIR_COEF_V(0, 4)); - SR(VID_FIR_COEF_V(0, 5)); - SR(VID_FIR_COEF_V(0, 6)); - SR(VID_FIR_COEF_V(0, 7)); - - SR(VID_PRELOAD(0)); + SR(OVL_BA0(OMAP_DSS_VIDEO1)); + SR(OVL_BA1(OMAP_DSS_VIDEO1)); + SR(OVL_POSITION(OMAP_DSS_VIDEO1)); + SR(OVL_SIZE(OMAP_DSS_VIDEO1)); + SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1)); + SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1)); + SR(OVL_ROW_INC(OMAP_DSS_VIDEO1)); + SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1)); + SR(OVL_FIR(OMAP_DSS_VIDEO1)); + SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1)); + SR(OVL_ACCU0(OMAP_DSS_VIDEO1)); + SR(OVL_ACCU1(OMAP_DSS_VIDEO1)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 5; i++) + SR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + SR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); + SR(OVL_BA1_UV(OMAP_DSS_VIDEO1)); + SR(OVL_FIR2(OMAP_DSS_VIDEO1)); + SR(OVL_ACCU2_0(OMAP_DSS_VIDEO1)); + SR(OVL_ACCU2_1(OMAP_DSS_VIDEO1)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i)); + } + if (dss_has_feature(FEAT_ATTR2)) + SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); + + SR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); /* VID2 */ - SR(VID_BA0(1)); - SR(VID_BA1(1)); - SR(VID_POSITION(1)); - SR(VID_SIZE(1)); - SR(VID_ATTRIBUTES(1)); - SR(VID_FIFO_THRESHOLD(1)); - SR(VID_ROW_INC(1)); - SR(VID_PIXEL_INC(1)); - SR(VID_FIR(1)); - SR(VID_PICTURE_SIZE(1)); - SR(VID_ACCU0(1)); - SR(VID_ACCU1(1)); - - SR(VID_FIR_COEF_H(1, 0)); - SR(VID_FIR_COEF_H(1, 1)); - SR(VID_FIR_COEF_H(1, 2)); - SR(VID_FIR_COEF_H(1, 3)); - SR(VID_FIR_COEF_H(1, 4)); - SR(VID_FIR_COEF_H(1, 5)); - SR(VID_FIR_COEF_H(1, 6)); - SR(VID_FIR_COEF_H(1, 7)); - - SR(VID_FIR_COEF_HV(1, 0)); - SR(VID_FIR_COEF_HV(1, 1)); - SR(VID_FIR_COEF_HV(1, 2)); - SR(VID_FIR_COEF_HV(1, 3)); - SR(VID_FIR_COEF_HV(1, 4)); - SR(VID_FIR_COEF_HV(1, 5)); - SR(VID_FIR_COEF_HV(1, 6)); - SR(VID_FIR_COEF_HV(1, 7)); - - SR(VID_CONV_COEF(1, 0)); - SR(VID_CONV_COEF(1, 1)); - SR(VID_CONV_COEF(1, 2)); - SR(VID_CONV_COEF(1, 3)); - SR(VID_CONV_COEF(1, 4)); - - SR(VID_FIR_COEF_V(1, 0)); - SR(VID_FIR_COEF_V(1, 1)); - SR(VID_FIR_COEF_V(1, 2)); - SR(VID_FIR_COEF_V(1, 3)); - SR(VID_FIR_COEF_V(1, 4)); - SR(VID_FIR_COEF_V(1, 5)); - SR(VID_FIR_COEF_V(1, 6)); - SR(VID_FIR_COEF_V(1, 7)); - - SR(VID_PRELOAD(1)); + SR(OVL_BA0(OMAP_DSS_VIDEO2)); + SR(OVL_BA1(OMAP_DSS_VIDEO2)); + SR(OVL_POSITION(OMAP_DSS_VIDEO2)); + SR(OVL_SIZE(OMAP_DSS_VIDEO2)); + SR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2)); + SR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2)); + SR(OVL_ROW_INC(OMAP_DSS_VIDEO2)); + SR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2)); + SR(OVL_FIR(OMAP_DSS_VIDEO2)); + SR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2)); + SR(OVL_ACCU0(OMAP_DSS_VIDEO2)); + SR(OVL_ACCU1(OMAP_DSS_VIDEO2)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 5; i++) + SR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + SR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); + SR(OVL_BA1_UV(OMAP_DSS_VIDEO2)); + SR(OVL_FIR2(OMAP_DSS_VIDEO2)); + SR(OVL_ACCU2_0(OMAP_DSS_VIDEO2)); + SR(OVL_ACCU2_1(OMAP_DSS_VIDEO2)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + SR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i)); + } + if (dss_has_feature(FEAT_ATTR2)) + SR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); + + SR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); if (dss_has_feature(FEAT_CORE_CLK_DIV)) SR(DIVISOR); @@ -382,160 +303,158 @@ void dispc_save_context(void) void dispc_restore_context(void) { + int i; RR(SYSCONFIG); /*RR(IRQENABLE);*/ /*RR(CONTROL);*/ RR(CONFIG); - RR(DEFAULT_COLOR(0)); - RR(DEFAULT_COLOR(1)); - RR(TRANS_COLOR(0)); - RR(TRANS_COLOR(1)); + RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD)); + RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT)); + RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD)); + RR(TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT)); RR(LINE_NUMBER); - RR(TIMING_H(0)); - RR(TIMING_V(0)); - RR(POL_FREQ(0)); - RR(DIVISORo(0)); + RR(TIMING_H(OMAP_DSS_CHANNEL_LCD)); + RR(TIMING_V(OMAP_DSS_CHANNEL_LCD)); + RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD)); + RR(DIVISORo(OMAP_DSS_CHANNEL_LCD)); RR(GLOBAL_ALPHA); - RR(SIZE_DIG); - RR(SIZE_LCD(0)); + RR(SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); + RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { - RR(DEFAULT_COLOR(2)); - RR(TRANS_COLOR(2)); - RR(SIZE_LCD(2)); - RR(TIMING_H(2)); - RR(TIMING_V(2)); - RR(POL_FREQ(2)); - RR(DIVISORo(2)); + RR(DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2)); + RR(TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2)); + RR(SIZE_MGR(OMAP_DSS_CHANNEL_LCD2)); + RR(TIMING_H(OMAP_DSS_CHANNEL_LCD2)); + RR(TIMING_V(OMAP_DSS_CHANNEL_LCD2)); + RR(POL_FREQ(OMAP_DSS_CHANNEL_LCD2)); + RR(DIVISORo(OMAP_DSS_CHANNEL_LCD2)); RR(CONFIG2); } - RR(GFX_BA0); - RR(GFX_BA1); - RR(GFX_POSITION); - RR(GFX_SIZE); - RR(GFX_ATTRIBUTES); - RR(GFX_FIFO_THRESHOLD); - RR(GFX_ROW_INC); - RR(GFX_PIXEL_INC); - RR(GFX_WINDOW_SKIP); - RR(GFX_TABLE_BA); - - RR(DATA_CYCLE1(0)); - RR(DATA_CYCLE2(0)); - RR(DATA_CYCLE3(0)); - - RR(CPR_COEF_R(0)); - RR(CPR_COEF_G(0)); - RR(CPR_COEF_B(0)); + RR(OVL_BA0(OMAP_DSS_GFX)); + RR(OVL_BA1(OMAP_DSS_GFX)); + RR(OVL_POSITION(OMAP_DSS_GFX)); + RR(OVL_SIZE(OMAP_DSS_GFX)); + RR(OVL_ATTRIBUTES(OMAP_DSS_GFX)); + RR(OVL_FIFO_THRESHOLD(OMAP_DSS_GFX)); + RR(OVL_ROW_INC(OMAP_DSS_GFX)); + RR(OVL_PIXEL_INC(OMAP_DSS_GFX)); + RR(OVL_WINDOW_SKIP(OMAP_DSS_GFX)); + RR(OVL_TABLE_BA(OMAP_DSS_GFX)); + + + RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD)); + RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); + RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); + + RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); + RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); + RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { - RR(DATA_CYCLE1(2)); - RR(DATA_CYCLE2(2)); - RR(DATA_CYCLE3(2)); + RR(DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); + RR(DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); + RR(DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); - RR(CPR_COEF_B(2)); - RR(CPR_COEF_G(2)); - RR(CPR_COEF_R(2)); + RR(CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); + RR(CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); + RR(CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); } - RR(GFX_PRELOAD); + RR(OVL_PRELOAD(OMAP_DSS_GFX)); /* VID1 */ - RR(VID_BA0(0)); - RR(VID_BA1(0)); - RR(VID_POSITION(0)); - RR(VID_SIZE(0)); - RR(VID_ATTRIBUTES(0)); - RR(VID_FIFO_THRESHOLD(0)); - RR(VID_ROW_INC(0)); - RR(VID_PIXEL_INC(0)); - RR(VID_FIR(0)); - RR(VID_PICTURE_SIZE(0)); - RR(VID_ACCU0(0)); - RR(VID_ACCU1(0)); - - RR(VID_FIR_COEF_H(0, 0)); - RR(VID_FIR_COEF_H(0, 1)); - RR(VID_FIR_COEF_H(0, 2)); - RR(VID_FIR_COEF_H(0, 3)); - RR(VID_FIR_COEF_H(0, 4)); - RR(VID_FIR_COEF_H(0, 5)); - RR(VID_FIR_COEF_H(0, 6)); - RR(VID_FIR_COEF_H(0, 7)); - - RR(VID_FIR_COEF_HV(0, 0)); - RR(VID_FIR_COEF_HV(0, 1)); - RR(VID_FIR_COEF_HV(0, 2)); - RR(VID_FIR_COEF_HV(0, 3)); - RR(VID_FIR_COEF_HV(0, 4)); - RR(VID_FIR_COEF_HV(0, 5)); - RR(VID_FIR_COEF_HV(0, 6)); - RR(VID_FIR_COEF_HV(0, 7)); - - RR(VID_CONV_COEF(0, 0)); - RR(VID_CONV_COEF(0, 1)); - RR(VID_CONV_COEF(0, 2)); - RR(VID_CONV_COEF(0, 3)); - RR(VID_CONV_COEF(0, 4)); - - RR(VID_FIR_COEF_V(0, 0)); - RR(VID_FIR_COEF_V(0, 1)); - RR(VID_FIR_COEF_V(0, 2)); - RR(VID_FIR_COEF_V(0, 3)); - RR(VID_FIR_COEF_V(0, 4)); - RR(VID_FIR_COEF_V(0, 5)); - RR(VID_FIR_COEF_V(0, 6)); - RR(VID_FIR_COEF_V(0, 7)); - - RR(VID_PRELOAD(0)); + RR(OVL_BA0(OMAP_DSS_VIDEO1)); + RR(OVL_BA1(OMAP_DSS_VIDEO1)); + RR(OVL_POSITION(OMAP_DSS_VIDEO1)); + RR(OVL_SIZE(OMAP_DSS_VIDEO1)); + RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO1)); + RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1)); + RR(OVL_ROW_INC(OMAP_DSS_VIDEO1)); + RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO1)); + RR(OVL_FIR(OMAP_DSS_VIDEO1)); + RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1)); + RR(OVL_ACCU0(OMAP_DSS_VIDEO1)); + RR(OVL_ACCU1(OMAP_DSS_VIDEO1)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 5; i++) + RR(OVL_CONV_COEF(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, i)); + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + RR(OVL_BA0_UV(OMAP_DSS_VIDEO1)); + RR(OVL_BA1_UV(OMAP_DSS_VIDEO1)); + RR(OVL_FIR2(OMAP_DSS_VIDEO1)); + RR(OVL_ACCU2_0(OMAP_DSS_VIDEO1)); + RR(OVL_ACCU2_1(OMAP_DSS_VIDEO1)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, i)); + } + if (dss_has_feature(FEAT_ATTR2)) + RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); + + RR(OVL_PRELOAD(OMAP_DSS_VIDEO1)); /* VID2 */ - RR(VID_BA0(1)); - RR(VID_BA1(1)); - RR(VID_POSITION(1)); - RR(VID_SIZE(1)); - RR(VID_ATTRIBUTES(1)); - RR(VID_FIFO_THRESHOLD(1)); - RR(VID_ROW_INC(1)); - RR(VID_PIXEL_INC(1)); - RR(VID_FIR(1)); - RR(VID_PICTURE_SIZE(1)); - RR(VID_ACCU0(1)); - RR(VID_ACCU1(1)); - - RR(VID_FIR_COEF_H(1, 0)); - RR(VID_FIR_COEF_H(1, 1)); - RR(VID_FIR_COEF_H(1, 2)); - RR(VID_FIR_COEF_H(1, 3)); - RR(VID_FIR_COEF_H(1, 4)); - RR(VID_FIR_COEF_H(1, 5)); - RR(VID_FIR_COEF_H(1, 6)); - RR(VID_FIR_COEF_H(1, 7)); - - RR(VID_FIR_COEF_HV(1, 0)); - RR(VID_FIR_COEF_HV(1, 1)); - RR(VID_FIR_COEF_HV(1, 2)); - RR(VID_FIR_COEF_HV(1, 3)); - RR(VID_FIR_COEF_HV(1, 4)); - RR(VID_FIR_COEF_HV(1, 5)); - RR(VID_FIR_COEF_HV(1, 6)); - RR(VID_FIR_COEF_HV(1, 7)); - - RR(VID_CONV_COEF(1, 0)); - RR(VID_CONV_COEF(1, 1)); - RR(VID_CONV_COEF(1, 2)); - RR(VID_CONV_COEF(1, 3)); - RR(VID_CONV_COEF(1, 4)); - - RR(VID_FIR_COEF_V(1, 0)); - RR(VID_FIR_COEF_V(1, 1)); - RR(VID_FIR_COEF_V(1, 2)); - RR(VID_FIR_COEF_V(1, 3)); - RR(VID_FIR_COEF_V(1, 4)); - RR(VID_FIR_COEF_V(1, 5)); - RR(VID_FIR_COEF_V(1, 6)); - RR(VID_FIR_COEF_V(1, 7)); - - RR(VID_PRELOAD(1)); + RR(OVL_BA0(OMAP_DSS_VIDEO2)); + RR(OVL_BA1(OMAP_DSS_VIDEO2)); + RR(OVL_POSITION(OMAP_DSS_VIDEO2)); + RR(OVL_SIZE(OMAP_DSS_VIDEO2)); + RR(OVL_ATTRIBUTES(OMAP_DSS_VIDEO2)); + RR(OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2)); + RR(OVL_ROW_INC(OMAP_DSS_VIDEO2)); + RR(OVL_PIXEL_INC(OMAP_DSS_VIDEO2)); + RR(OVL_FIR(OMAP_DSS_VIDEO2)); + RR(OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2)); + RR(OVL_ACCU0(OMAP_DSS_VIDEO2)); + RR(OVL_ACCU1(OMAP_DSS_VIDEO2)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 5; i++) + RR(OVL_CONV_COEF(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, i)); + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + RR(OVL_BA0_UV(OMAP_DSS_VIDEO2)); + RR(OVL_BA1_UV(OMAP_DSS_VIDEO2)); + RR(OVL_FIR2(OMAP_DSS_VIDEO2)); + RR(OVL_ACCU2_0(OMAP_DSS_VIDEO2)); + RR(OVL_ACCU2_1(OMAP_DSS_VIDEO2)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, i)); + + for (i = 0; i < 8; i++) + RR(OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, i)); + } + if (dss_has_feature(FEAT_ATTR2)) + RR(OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); + + RR(OVL_PRELOAD(OMAP_DSS_VIDEO2)); if (dss_has_feature(FEAT_CORE_CLK_DIV)) RR(DIVISOR); @@ -632,27 +551,43 @@ end: static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value) { + dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value); +} + +static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value) +{ + dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value); +} + +static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value) +{ + dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value); +} + +static void _dispc_write_firh2_reg(enum omap_plane plane, int reg, u32 value) +{ BUG_ON(plane == OMAP_DSS_GFX); - dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value); + dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value); } -static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value) +static void _dispc_write_firhv2_reg(enum omap_plane plane, int reg, u32 value) { BUG_ON(plane == OMAP_DSS_GFX); - dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value); + dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value); } -static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value) +static void _dispc_write_firv2_reg(enum omap_plane plane, int reg, u32 value) { BUG_ON(plane == OMAP_DSS_GFX); - dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value); + dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value); } static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup, - int vscaleup, int five_taps) + int vscaleup, int five_taps, + enum omap_color_component color_comp) { /* Coefficients for horizontal up-sampling */ static const struct dispc_h_coef coef_hup[8] = { @@ -750,8 +685,14 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup, | FLD_VAL(v_coef[i].vc1, 23, 16) | FLD_VAL(v_coef[i].vc2, 31, 24); - _dispc_write_firh_reg(plane, i, h); - _dispc_write_firhv_reg(plane, i, hv); + if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) { + _dispc_write_firh_reg(plane, i, h); + _dispc_write_firhv_reg(plane, i, hv); + } else { + _dispc_write_firh2_reg(plane, i, h); + _dispc_write_firhv2_reg(plane, i, hv); + } + } if (five_taps) { @@ -759,7 +700,10 @@ static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup, u32 v; v = FLD_VAL(v_coef[i].vc00, 7, 0) | FLD_VAL(v_coef[i].vc22, 15, 8); - _dispc_write_firv_reg(plane, i, v); + if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) + _dispc_write_firv_reg(plane, i, v); + else + _dispc_write_firv2_reg(plane, i, v); } } } @@ -779,72 +723,83 @@ static void _dispc_setup_color_conv_coef(void) ct = &ctbl_bt601_5; - dispc_write_reg(DISPC_VID_CONV_COEF(0, 0), CVAL(ct->rcr, ct->ry)); - dispc_write_reg(DISPC_VID_CONV_COEF(0, 1), CVAL(ct->gy, ct->rcb)); - dispc_write_reg(DISPC_VID_CONV_COEF(0, 2), CVAL(ct->gcb, ct->gcr)); - dispc_write_reg(DISPC_VID_CONV_COEF(0, 3), CVAL(ct->bcr, ct->by)); - dispc_write_reg(DISPC_VID_CONV_COEF(0, 4), CVAL(0, ct->bcb)); - - dispc_write_reg(DISPC_VID_CONV_COEF(1, 0), CVAL(ct->rcr, ct->ry)); - dispc_write_reg(DISPC_VID_CONV_COEF(1, 1), CVAL(ct->gy, ct->rcb)); - dispc_write_reg(DISPC_VID_CONV_COEF(1, 2), CVAL(ct->gcb, ct->gcr)); - dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by)); - dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0, ct->bcb)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0), + CVAL(ct->rcr, ct->ry)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1), + CVAL(ct->gy, ct->rcb)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2), + CVAL(ct->gcb, ct->gcr)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3), + CVAL(ct->bcr, ct->by)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4), + CVAL(0, ct->bcb)); + + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0), + CVAL(ct->rcr, ct->ry)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1), + CVAL(ct->gy, ct->rcb)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2), + CVAL(ct->gcb, ct->gcr)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3), + CVAL(ct->bcr, ct->by)); + dispc_write_reg(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4), + CVAL(0, ct->bcb)); #undef CVAL - REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11); - REG_FLD_MOD(DISPC_VID_ATTRIBUTES(1), ct->full_range, 11, 11); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1), + ct->full_range, 11, 11); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2), + ct->full_range, 11, 11); } static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr) { - const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0, - DISPC_VID_BA0(0), - DISPC_VID_BA0(1) }; - - dispc_write_reg(ba0_reg[plane], paddr); + dispc_write_reg(DISPC_OVL_BA0(plane), paddr); } static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr) { - const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1, - DISPC_VID_BA1(0), - DISPC_VID_BA1(1) }; + dispc_write_reg(DISPC_OVL_BA1(plane), paddr); +} - dispc_write_reg(ba1_reg[plane], paddr); +static void _dispc_set_plane_ba0_uv(enum omap_plane plane, u32 paddr) +{ + dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr); } -static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y) +static void _dispc_set_plane_ba1_uv(enum omap_plane plane, u32 paddr) { - const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION, - DISPC_VID_POSITION(0), - DISPC_VID_POSITION(1) }; + dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr); +} +static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y) +{ u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0); - dispc_write_reg(pos_reg[plane], val); + + dispc_write_reg(DISPC_OVL_POSITION(plane), val); } static void _dispc_set_pic_size(enum omap_plane plane, int width, int height) { - const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE, - DISPC_VID_PICTURE_SIZE(0), - DISPC_VID_PICTURE_SIZE(1) }; u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); - dispc_write_reg(siz_reg[plane], val); + + if (plane == OMAP_DSS_GFX) + dispc_write_reg(DISPC_OVL_SIZE(plane), val); + else + dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val); } static void _dispc_set_vid_size(enum omap_plane plane, int width, int height) { u32 val; - const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0), - DISPC_VID_SIZE(1) }; BUG_ON(plane == OMAP_DSS_GFX); val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); - dispc_write_reg(vsi_reg[plane-1], val); + + dispc_write_reg(DISPC_OVL_SIZE(plane), val); } static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable) @@ -856,7 +811,7 @@ static void _dispc_set_pre_mult_alpha(enum omap_plane plane, bool enable) plane == OMAP_DSS_VIDEO1) return; - REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 28, 28); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28); } static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha) @@ -876,61 +831,93 @@ static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha) static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc) { - const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC, - DISPC_VID_PIXEL_INC(0), - DISPC_VID_PIXEL_INC(1) }; - - dispc_write_reg(ri_reg[plane], inc); + dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc); } static void _dispc_set_row_inc(enum omap_plane plane, s32 inc) { - const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC, - DISPC_VID_ROW_INC(0), - DISPC_VID_ROW_INC(1) }; - - dispc_write_reg(ri_reg[plane], inc); + dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc); } static void _dispc_set_color_mode(enum omap_plane plane, enum omap_color_mode color_mode) { u32 m = 0; - - switch (color_mode) { - case OMAP_DSS_COLOR_CLUT1: - m = 0x0; break; - case OMAP_DSS_COLOR_CLUT2: - m = 0x1; break; - case OMAP_DSS_COLOR_CLUT4: - m = 0x2; break; - case OMAP_DSS_COLOR_CLUT8: - m = 0x3; break; - case OMAP_DSS_COLOR_RGB12U: - m = 0x4; break; - case OMAP_DSS_COLOR_ARGB16: - m = 0x5; break; - case OMAP_DSS_COLOR_RGB16: - m = 0x6; break; - case OMAP_DSS_COLOR_RGB24U: - m = 0x8; break; - case OMAP_DSS_COLOR_RGB24P: - m = 0x9; break; - case OMAP_DSS_COLOR_YUV2: - m = 0xa; break; - case OMAP_DSS_COLOR_UYVY: - m = 0xb; break; - case OMAP_DSS_COLOR_ARGB32: - m = 0xc; break; - case OMAP_DSS_COLOR_RGBA32: - m = 0xd; break; - case OMAP_DSS_COLOR_RGBX32: - m = 0xe; break; - default: - BUG(); break; + if (plane != OMAP_DSS_GFX) { + switch (color_mode) { + case OMAP_DSS_COLOR_NV12: + m = 0x0; break; + case OMAP_DSS_COLOR_RGB12U: + m = 0x1; break; + case OMAP_DSS_COLOR_RGBA16: + m = 0x2; break; + case OMAP_DSS_COLOR_RGBX16: + m = 0x4; break; + case OMAP_DSS_COLOR_ARGB16: + m = 0x5; break; + case OMAP_DSS_COLOR_RGB16: + m = 0x6; break; + case OMAP_DSS_COLOR_ARGB16_1555: + m = 0x7; break; + case OMAP_DSS_COLOR_RGB24U: + m = 0x8; break; + case OMAP_DSS_COLOR_RGB24P: + m = 0x9; break; + case OMAP_DSS_COLOR_YUV2: + m = 0xa; break; + case OMAP_DSS_COLOR_UYVY: + m = 0xb; break; + case OMAP_DSS_COLOR_ARGB32: + m = 0xc; break; + case OMAP_DSS_COLOR_RGBA32: + m = 0xd; break; + case OMAP_DSS_COLOR_RGBX32: + m = 0xe; break; + case OMAP_DSS_COLOR_XRGB16_1555: + m = 0xf; break; + default: + BUG(); break; + } + } else { + switch (color_mode) { + case OMAP_DSS_COLOR_CLUT1: + m = 0x0; break; + case OMAP_DSS_COLOR_CLUT2: + m = 0x1; break; + case OMAP_DSS_COLOR_CLUT4: + m = 0x2; break; + case OMAP_DSS_COLOR_CLUT8: + m = 0x3; break; + case OMAP_DSS_COLOR_RGB12U: + m = 0x4; break; + case OMAP_DSS_COLOR_ARGB16: + m = 0x5; break; + case OMAP_DSS_COLOR_RGB16: + m = 0x6; break; + case OMAP_DSS_COLOR_ARGB16_1555: + m = 0x7; break; + case OMAP_DSS_COLOR_RGB24U: + m = 0x8; break; + case OMAP_DSS_COLOR_RGB24P: + m = 0x9; break; + case OMAP_DSS_COLOR_YUV2: + m = 0xa; break; + case OMAP_DSS_COLOR_UYVY: + m = 0xb; break; + case OMAP_DSS_COLOR_ARGB32: + m = 0xc; break; + case OMAP_DSS_COLOR_RGBA32: + m = 0xd; break; + case OMAP_DSS_COLOR_RGBX32: + m = 0xe; break; + case OMAP_DSS_COLOR_XRGB16_1555: + m = 0xf; break; + default: + BUG(); break; + } } - REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); } static void _dispc_set_channel_out(enum omap_plane plane, @@ -953,7 +940,7 @@ static void _dispc_set_channel_out(enum omap_plane plane, return; } - val = dispc_read_reg(dispc_reg_att[plane]); + val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); if (dss_has_feature(FEAT_MGR_LCD2)) { switch (channel) { case OMAP_DSS_CHANNEL_LCD: @@ -977,7 +964,7 @@ static void _dispc_set_channel_out(enum omap_plane plane, } else { val = FLD_MOD(val, channel, shift, shift); } - dispc_write_reg(dispc_reg_att[plane], val); + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); } void dispc_set_burst_size(enum omap_plane plane, @@ -1001,9 +988,9 @@ void dispc_set_burst_size(enum omap_plane plane, return; } - val = dispc_read_reg(dispc_reg_att[plane]); + val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); val = FLD_MOD(val, burst_size, shift+1, shift); - dispc_write_reg(dispc_reg_att[plane], val); + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); enable_clocks(0); } @@ -1028,9 +1015,9 @@ static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable) BUG_ON(plane == OMAP_DSS_GFX); - val = dispc_read_reg(dispc_reg_att[plane]); + val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); val = FLD_MOD(val, enable, 9, 9); - dispc_write_reg(dispc_reg_att[plane], val); + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val); } void dispc_enable_replication(enum omap_plane plane, bool enable) @@ -1043,7 +1030,7 @@ void dispc_enable_replication(enum omap_plane plane, bool enable) bit = 10; enable_clocks(1); - REG_FLD_MOD(dispc_reg_att[plane], enable, bit, bit); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit); enable_clocks(0); } @@ -1053,7 +1040,7 @@ void dispc_set_lcd_size(enum omap_channel channel, u16 width, u16 height) BUG_ON((width > (1 << 11)) || (height > (1 << 11))); val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); enable_clocks(1); - dispc_write_reg(DISPC_SIZE_LCD(channel), val); + dispc_write_reg(DISPC_SIZE_MGR(channel), val); enable_clocks(0); } @@ -1063,15 +1050,12 @@ void dispc_set_digit_size(u16 width, u16 height) BUG_ON((width > (1 << 11)) || (height > (1 << 11))); val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); enable_clocks(1); - dispc_write_reg(DISPC_SIZE_DIG, val); + dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); enable_clocks(0); } static void dispc_read_plane_fifo_sizes(void) { - const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS, - DISPC_VID_FIFO_SIZE_STATUS(0), - DISPC_VID_FIFO_SIZE_STATUS(1) }; u32 size; int plane; u8 start, end; @@ -1081,7 +1065,8 @@ static void dispc_read_plane_fifo_sizes(void) dss_feat_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end); for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) { - size = FLD_GET(dispc_read_reg(fsz_reg[plane]), start, end); + size = FLD_GET(dispc_read_reg(DISPC_OVL_FIFO_SIZE_STATUS(plane)), + start, end); dispc.fifo_size[plane] = size; } @@ -1095,23 +1080,22 @@ u32 dispc_get_plane_fifo_size(enum omap_plane plane) void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high) { - const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD, - DISPC_VID_FIFO_THRESHOLD(0), - DISPC_VID_FIFO_THRESHOLD(1) }; u8 hi_start, hi_end, lo_start, lo_end; + dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); + dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end); + enable_clocks(1); DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n", plane, - REG_GET(ftrs_reg[plane], 11, 0), - REG_GET(ftrs_reg[plane], 27, 16), + REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), + lo_start, lo_end), + REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane), + hi_start, hi_end), low, high); - dss_feat_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end); - dss_feat_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end); - - dispc_write_reg(ftrs_reg[plane], + dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane), FLD_VAL(high, hi_start, hi_end) | FLD_VAL(low, lo_start, lo_end)); @@ -1128,106 +1112,120 @@ void dispc_enable_fifomerge(bool enable) enable_clocks(0); } -static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc) +static void _dispc_set_fir(enum omap_plane plane, + int hinc, int vinc, + enum omap_color_component color_comp) { u32 val; - const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0), - DISPC_VID_FIR(1) }; - u8 hinc_start, hinc_end, vinc_start, vinc_end; - - BUG_ON(plane == OMAP_DSS_GFX); - dss_feat_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end); - dss_feat_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end); + if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) { + u8 hinc_start, hinc_end, vinc_start, vinc_end; - val = FLD_VAL(vinc, vinc_start, vinc_end) | - FLD_VAL(hinc, hinc_start, hinc_end); + dss_feat_get_reg_field(FEAT_REG_FIRHINC, + &hinc_start, &hinc_end); + dss_feat_get_reg_field(FEAT_REG_FIRVINC, + &vinc_start, &vinc_end); + val = FLD_VAL(vinc, vinc_start, vinc_end) | + FLD_VAL(hinc, hinc_start, hinc_end); - dispc_write_reg(fir_reg[plane-1], val); + dispc_write_reg(DISPC_OVL_FIR(plane), val); + } else { + val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0); + dispc_write_reg(DISPC_OVL_FIR2(plane), val); + } } static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu) { u32 val; - const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0), - DISPC_VID_ACCU0(1) }; u8 hor_start, hor_end, vert_start, vert_end; - BUG_ON(plane == OMAP_DSS_GFX); - dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); val = FLD_VAL(vaccu, vert_start, vert_end) | FLD_VAL(haccu, hor_start, hor_end); - dispc_write_reg(ac0_reg[plane-1], val); + dispc_write_reg(DISPC_OVL_ACCU0(plane), val); } static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu) { u32 val; - const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0), - DISPC_VID_ACCU1(1) }; u8 hor_start, hor_end, vert_start, vert_end; - BUG_ON(plane == OMAP_DSS_GFX); - dss_feat_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end); dss_feat_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end); val = FLD_VAL(vaccu, vert_start, vert_end) | FLD_VAL(haccu, hor_start, hor_end); - dispc_write_reg(ac1_reg[plane-1], val); + dispc_write_reg(DISPC_OVL_ACCU1(plane), val); +} + +static void _dispc_set_vid_accu2_0(enum omap_plane plane, int haccu, int vaccu) +{ + u32 val; + + val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0); + dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val); } +static void _dispc_set_vid_accu2_1(enum omap_plane plane, int haccu, int vaccu) +{ + u32 val; -static void _dispc_set_scaling(enum omap_plane plane, + val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0); + dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val); +} + +static void _dispc_set_scale_param(enum omap_plane plane, u16 orig_width, u16 orig_height, u16 out_width, u16 out_height, - bool ilace, bool five_taps, - bool fieldmode) + bool five_taps, u8 rotation, + enum omap_color_component color_comp) { - int fir_hinc; - int fir_vinc; + int fir_hinc, fir_vinc; int hscaleup, vscaleup; - int accu0 = 0; - int accu1 = 0; - u32 l; - - BUG_ON(plane == OMAP_DSS_GFX); hscaleup = orig_width <= out_width; vscaleup = orig_height <= out_height; - _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps); + _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps, color_comp); - if (!orig_width || orig_width == out_width) - fir_hinc = 0; - else - fir_hinc = 1024 * orig_width / out_width; + fir_hinc = 1024 * orig_width / out_width; + fir_vinc = 1024 * orig_height / out_height; - if (!orig_height || orig_height == out_height) - fir_vinc = 0; - else - fir_vinc = 1024 * orig_height / out_height; + _dispc_set_fir(plane, fir_hinc, fir_vinc, color_comp); +} - _dispc_set_fir(plane, fir_hinc, fir_vinc); +static void _dispc_set_scaling_common(enum omap_plane plane, + u16 orig_width, u16 orig_height, + u16 out_width, u16 out_height, + bool ilace, bool five_taps, + bool fieldmode, enum omap_color_mode color_mode, + u8 rotation) +{ + int accu0 = 0; + int accu1 = 0; + u32 l; - l = dispc_read_reg(dispc_reg_att[plane]); + _dispc_set_scale_param(plane, orig_width, orig_height, + out_width, out_height, five_taps, + rotation, DISPC_COLOR_COMPONENT_RGB_Y); + l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); /* RESIZEENABLE and VERTICALTAPS */ l &= ~((0x3 << 5) | (0x1 << 21)); - l |= fir_hinc ? (1 << 5) : 0; - l |= fir_vinc ? (1 << 6) : 0; + l |= (orig_width != out_width) ? (1 << 5) : 0; + l |= (orig_height != out_height) ? (1 << 6) : 0; l |= five_taps ? (1 << 21) : 0; /* VRESIZECONF and HRESIZECONF */ if (dss_has_feature(FEAT_RESIZECONF)) { l &= ~(0x3 << 7); - l |= hscaleup ? 0 : (1 << 7); - l |= vscaleup ? 0 : (1 << 8); + l |= (orig_width <= out_width) ? 0 : (1 << 7); + l |= (orig_height <= out_height) ? 0 : (1 << 8); } /* LINEBUFFERSPLIT */ @@ -1236,7 +1234,7 @@ static void _dispc_set_scaling(enum omap_plane plane, l |= five_taps ? (1 << 22) : 0; } - dispc_write_reg(dispc_reg_att[plane], l); + dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l); /* * field 0 = even field = bottom field @@ -1244,7 +1242,7 @@ static void _dispc_set_scaling(enum omap_plane plane, */ if (ilace && !fieldmode) { accu1 = 0; - accu0 = (fir_vinc / 2) & 0x3ff; + accu0 = ((1024 * orig_height / out_height) / 2) & 0x3ff; if (accu0 >= 1024/2) { accu1 = 1024/2; accu0 -= accu1; @@ -1255,6 +1253,93 @@ static void _dispc_set_scaling(enum omap_plane plane, _dispc_set_vid_accu1(plane, 0, accu1); } +static void _dispc_set_scaling_uv(enum omap_plane plane, + u16 orig_width, u16 orig_height, + u16 out_width, u16 out_height, + bool ilace, bool five_taps, + bool fieldmode, enum omap_color_mode color_mode, + u8 rotation) +{ + int scale_x = out_width != orig_width; + int scale_y = out_height != orig_height; + + if (!dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) + return; + if ((color_mode != OMAP_DSS_COLOR_YUV2 && + color_mode != OMAP_DSS_COLOR_UYVY && + color_mode != OMAP_DSS_COLOR_NV12)) { + /* reset chroma resampling for RGB formats */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8); + return; + } + switch (color_mode) { + case OMAP_DSS_COLOR_NV12: + /* UV is subsampled by 2 vertically*/ + orig_height >>= 1; + /* UV is subsampled by 2 horz.*/ + orig_width >>= 1; + break; + case OMAP_DSS_COLOR_YUV2: + case OMAP_DSS_COLOR_UYVY: + /*For YUV422 with 90/270 rotation, + *we don't upsample chroma + */ + if (rotation == OMAP_DSS_ROT_0 || + rotation == OMAP_DSS_ROT_180) + /* UV is subsampled by 2 hrz*/ + orig_width >>= 1; + /* must use FIR for YUV422 if rotated */ + if (rotation != OMAP_DSS_ROT_0) + scale_x = scale_y = true; + break; + default: + BUG(); + } + + if (out_width != orig_width) + scale_x = true; + if (out_height != orig_height) + scale_y = true; + + _dispc_set_scale_param(plane, orig_width, orig_height, + out_width, out_height, five_taps, + rotation, DISPC_COLOR_COMPONENT_UV); + + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), + (scale_x || scale_y) ? 1 : 0, 8, 8); + /* set H scaling */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5); + /* set V scaling */ + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6); + + _dispc_set_vid_accu2_0(plane, 0x80, 0); + _dispc_set_vid_accu2_1(plane, 0x80, 0); +} + +static void _dispc_set_scaling(enum omap_plane plane, + u16 orig_width, u16 orig_height, + u16 out_width, u16 out_height, + bool ilace, bool five_taps, + bool fieldmode, enum omap_color_mode color_mode, + u8 rotation) +{ + BUG_ON(plane == OMAP_DSS_GFX); + + _dispc_set_scaling_common(plane, + orig_width, orig_height, + out_width, out_height, + ilace, five_taps, + fieldmode, color_mode, + rotation); + + _dispc_set_scaling_uv(plane, + orig_width, orig_height, + out_width, out_height, + ilace, five_taps, + fieldmode, color_mode, + rotation); +} + static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation, bool mirroring, enum omap_color_mode color_mode) { @@ -1302,9 +1387,10 @@ static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation, row_repeat = false; } - REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12); if (dss_has_feature(FEAT_ROWREPEATENABLE)) - REG_FLD_MOD(dispc_reg_att[plane], row_repeat ? 1 : 0, 18, 18); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), + row_repeat ? 1 : 0, 18, 18); } static int color_mode_to_bpp(enum omap_color_mode color_mode) @@ -1317,12 +1403,17 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode) case OMAP_DSS_COLOR_CLUT4: return 4; case OMAP_DSS_COLOR_CLUT8: + case OMAP_DSS_COLOR_NV12: return 8; case OMAP_DSS_COLOR_RGB12U: case OMAP_DSS_COLOR_RGB16: case OMAP_DSS_COLOR_ARGB16: case OMAP_DSS_COLOR_YUV2: case OMAP_DSS_COLOR_UYVY: + case OMAP_DSS_COLOR_RGBA16: + case OMAP_DSS_COLOR_RGBX16: + case OMAP_DSS_COLOR_ARGB16_1555: + case OMAP_DSS_COLOR_XRGB16_1555: return 16; case OMAP_DSS_COLOR_RGB24P: return 24; @@ -1655,7 +1746,7 @@ static int _dispc_setup_plane(enum omap_plane plane, enum omap_dss_rotation_type rotation_type, u8 rotation, int mirror, u8 global_alpha, u8 pre_mult_alpha, - enum omap_channel channel) + enum omap_channel channel, u32 puv_addr) { const int maxdownscale = cpu_is_omap34xx() ? 4 : 2; bool five_taps = 0; @@ -1704,7 +1795,8 @@ static int _dispc_setup_plane(enum omap_plane plane, return -EINVAL; if (color_mode == OMAP_DSS_COLOR_YUV2 || - color_mode == OMAP_DSS_COLOR_UYVY) + color_mode == OMAP_DSS_COLOR_UYVY || + color_mode == OMAP_DSS_COLOR_NV12) cconv = 1; /* Must use 5-tap filter? */ @@ -1778,6 +1870,12 @@ static int _dispc_setup_plane(enum omap_plane plane, _dispc_set_plane_ba0(plane, paddr + offset0); _dispc_set_plane_ba1(plane, paddr + offset1); + if (OMAP_DSS_COLOR_NV12 == color_mode) { + _dispc_set_plane_ba0_uv(plane, puv_addr + offset0); + _dispc_set_plane_ba1_uv(plane, puv_addr + offset1); + } + + _dispc_set_row_inc(plane, row_inc); _dispc_set_pix_inc(plane, pix_inc); @@ -1791,7 +1889,8 @@ static int _dispc_setup_plane(enum omap_plane plane, if (plane != OMAP_DSS_GFX) { _dispc_set_scaling(plane, width, height, out_width, out_height, - ilace, five_taps, fieldmode); + ilace, five_taps, fieldmode, + color_mode, rotation); _dispc_set_vid_size(plane, out_width, out_height); _dispc_set_vid_color_conv(plane, cconv); } @@ -1806,7 +1905,7 @@ static int _dispc_setup_plane(enum omap_plane plane, static void _dispc_enable_plane(enum omap_plane plane, bool enable) { - REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0); + REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0); } static void dispc_disable_isr(void *data, u32 mask) @@ -2353,14 +2452,20 @@ static void dispc_get_lcd_divisor(enum omap_channel channel, int *lck_div, unsigned long dispc_fclk_rate(void) { + struct platform_device *dsidev; unsigned long r = 0; switch (dss_get_dispc_clk_source()) { - case DSS_CLK_SRC_FCK: + case OMAP_DSS_CLK_SRC_FCK: r = dss_clk_get_rate(DSS_CLK_FCK); break; - case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - r = dsi_get_pll_hsdiv_dispc_rate(); + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + dsidev = dsi_get_dsidev_from_id(0); + r = dsi_get_pll_hsdiv_dispc_rate(dsidev); + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + dsidev = dsi_get_dsidev_from_id(1); + r = dsi_get_pll_hsdiv_dispc_rate(dsidev); break; default: BUG(); @@ -2371,6 +2476,7 @@ unsigned long dispc_fclk_rate(void) unsigned long dispc_lclk_rate(enum omap_channel channel) { + struct platform_device *dsidev; int lcd; unsigned long r; u32 l; @@ -2380,11 +2486,16 @@ unsigned long dispc_lclk_rate(enum omap_channel channel) lcd = FLD_GET(l, 23, 16); switch (dss_get_lcd_clk_source(channel)) { - case DSS_CLK_SRC_FCK: + case OMAP_DSS_CLK_SRC_FCK: r = dss_clk_get_rate(DSS_CLK_FCK); break; - case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: - r = dsi_get_pll_hsdiv_dispc_rate(); + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + dsidev = dsi_get_dsidev_from_id(0); + r = dsi_get_pll_hsdiv_dispc_rate(dsidev); + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + dsidev = dsi_get_dsidev_from_id(1); + r = dsi_get_pll_hsdiv_dispc_rate(dsidev); break; default: BUG(); @@ -2412,8 +2523,8 @@ void dispc_dump_clocks(struct seq_file *s) { int lcd, pcd; u32 l; - enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); - enum dss_clk_source lcd_clk_src; + enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); + enum omap_dss_clk_source lcd_clk_src; enable_clocks(1); @@ -2516,7 +2627,7 @@ void dispc_dump_irqs(struct seq_file *s) void dispc_dump_regs(struct seq_file *s) { -#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r)) +#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r)) dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); @@ -2528,152 +2639,227 @@ void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_CONTROL); DUMPREG(DISPC_CONFIG); DUMPREG(DISPC_CAPABLE); - DUMPREG(DISPC_DEFAULT_COLOR(0)); - DUMPREG(DISPC_DEFAULT_COLOR(1)); - DUMPREG(DISPC_TRANS_COLOR(0)); - DUMPREG(DISPC_TRANS_COLOR(1)); + DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_DIGIT)); + DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_DIGIT)); DUMPREG(DISPC_LINE_STATUS); DUMPREG(DISPC_LINE_NUMBER); - DUMPREG(DISPC_TIMING_H(0)); - DUMPREG(DISPC_TIMING_V(0)); - DUMPREG(DISPC_POL_FREQ(0)); - DUMPREG(DISPC_DIVISORo(0)); + DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD)); DUMPREG(DISPC_GLOBAL_ALPHA); - DUMPREG(DISPC_SIZE_DIG); - DUMPREG(DISPC_SIZE_LCD(0)); + DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT)); + DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { DUMPREG(DISPC_CONTROL2); DUMPREG(DISPC_CONFIG2); - DUMPREG(DISPC_DEFAULT_COLOR(2)); - DUMPREG(DISPC_TRANS_COLOR(2)); - DUMPREG(DISPC_TIMING_H(2)); - DUMPREG(DISPC_TIMING_V(2)); - DUMPREG(DISPC_POL_FREQ(2)); - DUMPREG(DISPC_DIVISORo(2)); - DUMPREG(DISPC_SIZE_LCD(2)); - } - - DUMPREG(DISPC_GFX_BA0); - DUMPREG(DISPC_GFX_BA1); - DUMPREG(DISPC_GFX_POSITION); - DUMPREG(DISPC_GFX_SIZE); - DUMPREG(DISPC_GFX_ATTRIBUTES); - DUMPREG(DISPC_GFX_FIFO_THRESHOLD); - DUMPREG(DISPC_GFX_FIFO_SIZE_STATUS); - DUMPREG(DISPC_GFX_ROW_INC); - DUMPREG(DISPC_GFX_PIXEL_INC); - DUMPREG(DISPC_GFX_WINDOW_SKIP); - DUMPREG(DISPC_GFX_TABLE_BA); - - DUMPREG(DISPC_DATA_CYCLE1(0)); - DUMPREG(DISPC_DATA_CYCLE2(0)); - DUMPREG(DISPC_DATA_CYCLE3(0)); - - DUMPREG(DISPC_CPR_COEF_R(0)); - DUMPREG(DISPC_CPR_COEF_G(0)); - DUMPREG(DISPC_CPR_COEF_B(0)); + DUMPREG(DISPC_DEFAULT_COLOR(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_TRANS_COLOR(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_TIMING_H(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_TIMING_V(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_POL_FREQ(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_LCD2)); + } + + DUMPREG(DISPC_OVL_BA0(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_BA1(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_WINDOW_SKIP(OMAP_DSS_GFX)); + DUMPREG(DISPC_OVL_TABLE_BA(OMAP_DSS_GFX)); + + DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD)); + + DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD)); + DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD)); if (dss_has_feature(FEAT_MGR_LCD2)) { - DUMPREG(DISPC_DATA_CYCLE1(2)); - DUMPREG(DISPC_DATA_CYCLE2(2)); - DUMPREG(DISPC_DATA_CYCLE3(2)); - - DUMPREG(DISPC_CPR_COEF_R(2)); - DUMPREG(DISPC_CPR_COEF_G(2)); - DUMPREG(DISPC_CPR_COEF_B(2)); - } - - DUMPREG(DISPC_GFX_PRELOAD); - - DUMPREG(DISPC_VID_BA0(0)); - DUMPREG(DISPC_VID_BA1(0)); - DUMPREG(DISPC_VID_POSITION(0)); - DUMPREG(DISPC_VID_SIZE(0)); - DUMPREG(DISPC_VID_ATTRIBUTES(0)); - DUMPREG(DISPC_VID_FIFO_THRESHOLD(0)); - DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(0)); - DUMPREG(DISPC_VID_ROW_INC(0)); - DUMPREG(DISPC_VID_PIXEL_INC(0)); - DUMPREG(DISPC_VID_FIR(0)); - DUMPREG(DISPC_VID_PICTURE_SIZE(0)); - DUMPREG(DISPC_VID_ACCU0(0)); - DUMPREG(DISPC_VID_ACCU1(0)); - - DUMPREG(DISPC_VID_BA0(1)); - DUMPREG(DISPC_VID_BA1(1)); - DUMPREG(DISPC_VID_POSITION(1)); - DUMPREG(DISPC_VID_SIZE(1)); - DUMPREG(DISPC_VID_ATTRIBUTES(1)); - DUMPREG(DISPC_VID_FIFO_THRESHOLD(1)); - DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(1)); - DUMPREG(DISPC_VID_ROW_INC(1)); - DUMPREG(DISPC_VID_PIXEL_INC(1)); - DUMPREG(DISPC_VID_FIR(1)); - DUMPREG(DISPC_VID_PICTURE_SIZE(1)); - DUMPREG(DISPC_VID_ACCU0(1)); - DUMPREG(DISPC_VID_ACCU1(1)); - - DUMPREG(DISPC_VID_FIR_COEF_H(0, 0)); - DUMPREG(DISPC_VID_FIR_COEF_H(0, 1)); - DUMPREG(DISPC_VID_FIR_COEF_H(0, 2)); - DUMPREG(DISPC_VID_FIR_COEF_H(0, 3)); - DUMPREG(DISPC_VID_FIR_COEF_H(0, 4)); - DUMPREG(DISPC_VID_FIR_COEF_H(0, 5)); - DUMPREG(DISPC_VID_FIR_COEF_H(0, 6)); - DUMPREG(DISPC_VID_FIR_COEF_H(0, 7)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 0)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 1)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 2)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 3)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 4)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 5)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 6)); - DUMPREG(DISPC_VID_FIR_COEF_HV(0, 7)); - DUMPREG(DISPC_VID_CONV_COEF(0, 0)); - DUMPREG(DISPC_VID_CONV_COEF(0, 1)); - DUMPREG(DISPC_VID_CONV_COEF(0, 2)); - DUMPREG(DISPC_VID_CONV_COEF(0, 3)); - DUMPREG(DISPC_VID_CONV_COEF(0, 4)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 0)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 1)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 2)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 3)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 4)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 5)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 6)); - DUMPREG(DISPC_VID_FIR_COEF_V(0, 7)); - - DUMPREG(DISPC_VID_FIR_COEF_H(1, 0)); - DUMPREG(DISPC_VID_FIR_COEF_H(1, 1)); - DUMPREG(DISPC_VID_FIR_COEF_H(1, 2)); - DUMPREG(DISPC_VID_FIR_COEF_H(1, 3)); - DUMPREG(DISPC_VID_FIR_COEF_H(1, 4)); - DUMPREG(DISPC_VID_FIR_COEF_H(1, 5)); - DUMPREG(DISPC_VID_FIR_COEF_H(1, 6)); - DUMPREG(DISPC_VID_FIR_COEF_H(1, 7)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 0)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 1)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 2)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 3)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 4)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 5)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 6)); - DUMPREG(DISPC_VID_FIR_COEF_HV(1, 7)); - DUMPREG(DISPC_VID_CONV_COEF(1, 0)); - DUMPREG(DISPC_VID_CONV_COEF(1, 1)); - DUMPREG(DISPC_VID_CONV_COEF(1, 2)); - DUMPREG(DISPC_VID_CONV_COEF(1, 3)); - DUMPREG(DISPC_VID_CONV_COEF(1, 4)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 0)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 1)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 2)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 3)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 4)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 5)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 6)); - DUMPREG(DISPC_VID_FIR_COEF_V(1, 7)); - - DUMPREG(DISPC_VID_PRELOAD(0)); - DUMPREG(DISPC_VID_PRELOAD(1)); + DUMPREG(DISPC_DATA_CYCLE1(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_DATA_CYCLE2(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_DATA_CYCLE3(OMAP_DSS_CHANNEL_LCD2)); + + DUMPREG(DISPC_CPR_COEF_R(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_CPR_COEF_G(OMAP_DSS_CHANNEL_LCD2)); + DUMPREG(DISPC_CPR_COEF_B(OMAP_DSS_CHANNEL_LCD2)); + } + + DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_GFX)); + + DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO1)); + + DUMPREG(DISPC_OVL_BA0(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_BA1(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_POSITION(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_SIZE(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_ATTRIBUTES(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_FIFO_THRESHOLD(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_FIFO_SIZE_STATUS(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_ROW_INC(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_PIXEL_INC(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_FIR(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_PICTURE_SIZE(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_ACCU0(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_ACCU1(OMAP_DSS_VIDEO2)); + + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO1, 7)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO1, 7)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO1, 7)); + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO1)); + + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO1, 7)); + + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO1, 7)); + + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO1, 7)); + } + if (dss_has_feature(FEAT_ATTR2)) + DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO1)); + + + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_H(OMAP_DSS_VIDEO2, 7)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_HV(OMAP_DSS_VIDEO2, 7)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_CONV_COEF(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_V(OMAP_DSS_VIDEO2, 7)); + + if (dss_has_feature(FEAT_HANDLE_UV_SEPARATE)) { + DUMPREG(DISPC_OVL_BA0_UV(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_BA1_UV(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_FIR2(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_ACCU2_0(OMAP_DSS_VIDEO2)); + DUMPREG(DISPC_OVL_ACCU2_1(OMAP_DSS_VIDEO2)); + + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_H2(OMAP_DSS_VIDEO2, 7)); + + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_HV2(OMAP_DSS_VIDEO2, 7)); + + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 0)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 1)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 2)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 3)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 4)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 5)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 6)); + DUMPREG(DISPC_OVL_FIR_COEF_V2(OMAP_DSS_VIDEO2, 7)); + } + if (dss_has_feature(FEAT_ATTR2)) + DUMPREG(DISPC_OVL_ATTRIBUTES2(OMAP_DSS_VIDEO2)); + + DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO1)); + DUMPREG(DISPC_OVL_PRELOAD(OMAP_DSS_VIDEO2)); dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); #undef DUMPREG @@ -3388,11 +3574,12 @@ int dispc_setup_plane(enum omap_plane plane, bool ilace, enum omap_dss_rotation_type rotation_type, u8 rotation, bool mirror, u8 global_alpha, - u8 pre_mult_alpha, enum omap_channel channel) + u8 pre_mult_alpha, enum omap_channel channel, + u32 puv_addr) { int r = 0; - DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> " + DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d, %d, %dx%d -> " "%dx%d, ilace %d, cmode %x, rot %d, mir %d chan %d\n", plane, paddr, screen_width, pos_x, pos_y, width, height, @@ -3411,7 +3598,8 @@ int dispc_setup_plane(enum omap_plane plane, rotation_type, rotation, mirror, global_alpha, - pre_mult_alpha, channel); + pre_mult_alpha, + channel, puv_addr); enable_clocks(0); diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h new file mode 100644 index 00000000000..6c9ee0a0efb --- /dev/null +++ b/drivers/video/omap2/dss/dispc.h @@ -0,0 +1,691 @@ +/* + * linux/drivers/video/omap2/dss/dispc.h + * + * Copyright (C) 2011 Texas Instruments + * Author: Archit Taneja <archit@ti.com> + * + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __OMAP2_DISPC_REG_H +#define __OMAP2_DISPC_REG_H + +/* DISPC common registers */ +#define DISPC_REVISION 0x0000 +#define DISPC_SYSCONFIG 0x0010 +#define DISPC_SYSSTATUS 0x0014 +#define DISPC_IRQSTATUS 0x0018 +#define DISPC_IRQENABLE 0x001C +#define DISPC_CONTROL 0x0040 +#define DISPC_CONFIG 0x0044 +#define DISPC_CAPABLE 0x0048 +#define DISPC_LINE_STATUS 0x005C +#define DISPC_LINE_NUMBER 0x0060 +#define DISPC_GLOBAL_ALPHA 0x0074 +#define DISPC_CONTROL2 0x0238 +#define DISPC_CONFIG2 0x0620 +#define DISPC_DIVISOR 0x0804 + +/* DISPC overlay registers */ +#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA0_OFFSET(n)) +#define DISPC_OVL_BA1(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA1_OFFSET(n)) +#define DISPC_OVL_BA0_UV(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA0_UV_OFFSET(n)) +#define DISPC_OVL_BA1_UV(n) (DISPC_OVL_BASE(n) + \ + DISPC_BA1_UV_OFFSET(n)) +#define DISPC_OVL_POSITION(n) (DISPC_OVL_BASE(n) + \ + DISPC_POS_OFFSET(n)) +#define DISPC_OVL_SIZE(n) (DISPC_OVL_BASE(n) + \ + DISPC_SIZE_OFFSET(n)) +#define DISPC_OVL_ATTRIBUTES(n) (DISPC_OVL_BASE(n) + \ + DISPC_ATTR_OFFSET(n)) +#define DISPC_OVL_ATTRIBUTES2(n) (DISPC_OVL_BASE(n) + \ + DISPC_ATTR2_OFFSET(n)) +#define DISPC_OVL_FIFO_THRESHOLD(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIFO_THRESH_OFFSET(n)) +#define DISPC_OVL_FIFO_SIZE_STATUS(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIFO_SIZE_STATUS_OFFSET(n)) +#define DISPC_OVL_ROW_INC(n) (DISPC_OVL_BASE(n) + \ + DISPC_ROW_INC_OFFSET(n)) +#define DISPC_OVL_PIXEL_INC(n) (DISPC_OVL_BASE(n) + \ + DISPC_PIX_INC_OFFSET(n)) +#define DISPC_OVL_WINDOW_SKIP(n) (DISPC_OVL_BASE(n) + \ + DISPC_WINDOW_SKIP_OFFSET(n)) +#define DISPC_OVL_TABLE_BA(n) (DISPC_OVL_BASE(n) + \ + DISPC_TABLE_BA_OFFSET(n)) +#define DISPC_OVL_FIR(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_OFFSET(n)) +#define DISPC_OVL_FIR2(n) (DISPC_OVL_BASE(n) + \ + DISPC_FIR2_OFFSET(n)) +#define DISPC_OVL_PICTURE_SIZE(n) (DISPC_OVL_BASE(n) + \ + DISPC_PIC_SIZE_OFFSET(n)) +#define DISPC_OVL_ACCU0(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU0_OFFSET(n)) +#define DISPC_OVL_ACCU1(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU1_OFFSET(n)) +#define DISPC_OVL_ACCU2_0(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU2_0_OFFSET(n)) +#define DISPC_OVL_ACCU2_1(n) (DISPC_OVL_BASE(n) + \ + DISPC_ACCU2_1_OFFSET(n)) +#define DISPC_OVL_FIR_COEF_H(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_H_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_HV(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_HV_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_H2(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_H2_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_HV2(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_HV2_OFFSET(n, i)) +#define DISPC_OVL_CONV_COEF(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_CONV_COEF_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_V(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_V_OFFSET(n, i)) +#define DISPC_OVL_FIR_COEF_V2(n, i) (DISPC_OVL_BASE(n) + \ + DISPC_FIR_COEF_V2_OFFSET(n, i)) +#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \ + DISPC_PRELOAD_OFFSET(n)) + +/* DISPC manager/channel specific registers */ +static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x004C; + case OMAP_DSS_CHANNEL_DIGIT: + return 0x0050; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03AC; + default: + BUG(); + } +} + +static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0054; + case OMAP_DSS_CHANNEL_DIGIT: + return 0x0058; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03B0; + default: + BUG(); + } +} + +static inline u16 DISPC_TIMING_H(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0064; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x0400; + default: + BUG(); + } +} + +static inline u16 DISPC_TIMING_V(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0068; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x0404; + default: + BUG(); + } +} + +static inline u16 DISPC_POL_FREQ(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x006C; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x0408; + default: + BUG(); + } +} + +static inline u16 DISPC_DIVISORo(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0070; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x040C; + default: + BUG(); + } +} + +/* Named as DISPC_SIZE_LCD, DISPC_SIZE_DIGIT and DISPC_SIZE_LCD2 in TRM */ +static inline u16 DISPC_SIZE_MGR(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x007C; + case OMAP_DSS_CHANNEL_DIGIT: + return 0x0078; + case OMAP_DSS_CHANNEL_LCD2: + return 0x03CC; + default: + BUG(); + } +} + +static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x01D4; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x03C0; + default: + BUG(); + } +} + +static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x01D8; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x03C4; + default: + BUG(); + } +} + +static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x01DC; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x03C8; + default: + BUG(); + } +} + +static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0220; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x03BC; + default: + BUG(); + } +} + +static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0224; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x03B8; + default: + BUG(); + } +} + +static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel) +{ + switch (channel) { + case OMAP_DSS_CHANNEL_LCD: + return 0x0228; + case OMAP_DSS_CHANNEL_DIGIT: + BUG(); + case OMAP_DSS_CHANNEL_LCD2: + return 0x03B4; + default: + BUG(); + } +} + +/* DISPC overlay register base addresses */ +static inline u16 DISPC_OVL_BASE(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0080; + case OMAP_DSS_VIDEO1: + return 0x00BC; + case OMAP_DSS_VIDEO2: + return 0x014C; + default: + BUG(); + } +} + +/* DISPC overlay register offsets */ +static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0000; + default: + BUG(); + } +} + +static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0004; + default: + BUG(); + } +} + +static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0544; + case OMAP_DSS_VIDEO2: + return 0x04BC; + default: + BUG(); + } +} + +static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0548; + case OMAP_DSS_VIDEO2: + return 0x04C0; + default: + BUG(); + } +} + +static inline u16 DISPC_POS_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0008; + default: + BUG(); + } +} + +static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x000C; + default: + BUG(); + } +} + +static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0020; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0010; + default: + BUG(); + } +} + +static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0568; + case OMAP_DSS_VIDEO2: + return 0x04DC; + default: + BUG(); + } +} + +static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0024; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0014; + default: + BUG(); + } +} + +static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0028; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0018; + default: + BUG(); + } +} + +static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x002C; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x001C; + default: + BUG(); + } +} + +static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0030; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0020; + default: + BUG(); + } +} + +static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0034; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + BUG(); + default: + BUG(); + } +} + +static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x0038; + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + BUG(); + default: + BUG(); + } +} + +static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0024; + default: + BUG(); + } +} + +static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0580; + case OMAP_DSS_VIDEO2: + return 0x055C; + default: + BUG(); + } +} + +static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0028; + default: + BUG(); + } +} + + +static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x002C; + default: + BUG(); + } +} + +static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0584; + case OMAP_DSS_VIDEO2: + return 0x0560; + default: + BUG(); + } +} + +static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0030; + default: + BUG(); + } +} + +static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0588; + case OMAP_DSS_VIDEO2: + return 0x0564; + default: + BUG(); + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0034 + i * 0x8; + default: + BUG(); + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x058C + i * 0x8; + case OMAP_DSS_VIDEO2: + return 0x0568 + i * 0x8; + default: + BUG(); + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0038 + i * 0x8; + default: + BUG(); + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0590 + i * 8; + case OMAP_DSS_VIDEO2: + return 0x056C + i * 0x8; + default: + BUG(); + } +} + +/* coef index i = {0, 1, 2, 3, 4,} */ +static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + case OMAP_DSS_VIDEO2: + return 0x0074 + i * 0x4; + default: + BUG(); + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x0124 + i * 0x4; + case OMAP_DSS_VIDEO2: + return 0x00B4 + i * 0x4; + default: + BUG(); + } +} + +/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */ +static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i) +{ + switch (plane) { + case OMAP_DSS_GFX: + BUG(); + case OMAP_DSS_VIDEO1: + return 0x05CC + i * 0x4; + case OMAP_DSS_VIDEO2: + return 0x05A8 + i * 0x4; + default: + BUG(); + } +} + +static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane) +{ + switch (plane) { + case OMAP_DSS_GFX: + return 0x01AC; + case OMAP_DSS_VIDEO1: + return 0x0174; + case OMAP_DSS_VIDEO2: + return 0x00E8; + default: + BUG(); + } +} +#endif diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index a85a6f38b40..c2dfc8c5005 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c @@ -27,7 +27,7 @@ #include <linux/jiffies.h> #include <linux/platform_device.h> -#include <plat/display.h> +#include <video/omapdss.h> #include "dss.h" static ssize_t display_enabled_show(struct device *dev, @@ -44,9 +44,13 @@ static ssize_t display_enabled_store(struct device *dev, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - bool enabled, r; + int r, enabled; - enabled = simple_strtoul(buf, NULL, 10); + r = kstrtoint(buf, 0, &enabled); + if (r) + return r; + + enabled = !!enabled; if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) { if (enabled) { @@ -82,7 +86,9 @@ static ssize_t display_upd_mode_store(struct device *dev, if (!dssdev->driver->set_update_mode) return -EINVAL; - val = simple_strtoul(buf, NULL, 10); + r = kstrtoint(buf, 0, &val); + if (r) + return r; switch (val) { case OMAP_DSS_UPDATE_DISABLED: @@ -114,13 +120,16 @@ static ssize_t display_tear_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - unsigned long te; - int r; + int te, r; if (!dssdev->driver->enable_te || !dssdev->driver->get_te) return -ENOENT; - te = simple_strtoul(buf, NULL, 0); + r = kstrtoint(buf, 0, &te); + if (r) + return r; + + te = !!te; r = dssdev->driver->enable_te(dssdev, te); if (r) @@ -196,13 +205,14 @@ static ssize_t display_rotate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - unsigned long rot; - int r; + int rot, r; if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) return -ENOENT; - rot = simple_strtoul(buf, NULL, 0); + r = kstrtoint(buf, 0, &rot); + if (r) + return r; r = dssdev->driver->set_rotate(dssdev, rot); if (r) @@ -226,13 +236,16 @@ static ssize_t display_mirror_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - unsigned long mirror; - int r; + int mirror, r; if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror) return -ENOENT; - mirror = simple_strtoul(buf, NULL, 0); + r = kstrtoint(buf, 0, &mirror); + if (r) + return r; + + mirror = !!mirror; r = dssdev->driver->set_mirror(dssdev, mirror); if (r) @@ -259,14 +272,15 @@ static ssize_t display_wss_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - unsigned long wss; + u32 wss; int r; if (!dssdev->driver->get_wss || !dssdev->driver->set_wss) return -ENOENT; - if (strict_strtoul(buf, 0, &wss)) - return -EINVAL; + r = kstrtou32(buf, 0, &wss); + if (r) + return r; if (wss > 0xfffff) return -EINVAL; diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 2d3ca4ca4a0..ff6bd30132d 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -30,16 +30,40 @@ #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" static struct { struct regulator *vdds_dsi_reg; + struct platform_device *dsidev; } dpi; -#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL +static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk) +{ + int dsi_module; + + dsi_module = clk == OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ? 0 : 1; + + return dsi_get_dsidev_from_id(dsi_module); +} + +static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev) +{ + if (dssdev->clocks.dispc.dispc_fclk_src == + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC || + dssdev->clocks.dispc.dispc_fclk_src == + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC || + dssdev->clocks.dispc.channel.lcd_clk_src == + OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC || + dssdev->clocks.dispc.channel.lcd_clk_src == + OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC) + return true; + else + return false; +} + static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) @@ -48,16 +72,16 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, struct dispc_clock_info dispc_cinfo; int r; - r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo, - &dispc_cinfo); + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req, + &dsi_cinfo, &dispc_cinfo); if (r) return r; - r = dsi_pll_set_clock_div(&dsi_cinfo); + r = dsi_pll_set_clock_div(dpi.dsidev, &dsi_cinfo); if (r) return r; - dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC); + dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); r = dispc_set_clock_div(dssdev->manager->id, &dispc_cinfo); if (r) @@ -69,7 +93,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, return 0; } -#else + static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) @@ -96,13 +120,12 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, return 0; } -#endif static int dpi_set_mode(struct omap_dss_device *dssdev) { struct omap_video_timings *t = &dssdev->panel.timings; - int lck_div, pck_div; - unsigned long fck; + int lck_div = 0, pck_div = 0; + unsigned long fck = 0; unsigned long pck; bool is_tft; int r = 0; @@ -114,13 +137,12 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; -#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL - r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, - &lck_div, &pck_div); -#else - r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, &fck, - &lck_div, &pck_div); -#endif + if (dpi_use_dsi_pll(dssdev)) + r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, + &fck, &lck_div, &pck_div); + else + r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, + &fck, &lck_div, &pck_div); if (r) goto err0; @@ -179,12 +201,13 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err2; -#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL - dss_clk_enable(DSS_CLK_SYSCK); - r = dsi_pll_init(dssdev, 0, 1); - if (r) - goto err3; -#endif + if (dpi_use_dsi_pll(dssdev)) { + dss_clk_enable(DSS_CLK_SYSCK); + r = dsi_pll_init(dpi.dsidev, 0, 1); + if (r) + goto err3; + } + r = dpi_set_mode(dssdev); if (r) goto err4; @@ -196,11 +219,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) return 0; err4: -#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL - dsi_pll_uninit(); + if (dpi_use_dsi_pll(dssdev)) + dsi_pll_uninit(dpi.dsidev, true); err3: - dss_clk_disable(DSS_CLK_SYSCK); -#endif + if (dpi_use_dsi_pll(dssdev)) + dss_clk_disable(DSS_CLK_SYSCK); err2: dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); if (cpu_is_omap34xx()) @@ -216,11 +239,11 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev) { dssdev->manager->disable(dssdev->manager); -#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL - dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); - dsi_pll_uninit(); - dss_clk_disable(DSS_CLK_SYSCK); -#endif + if (dpi_use_dsi_pll(dssdev)) { + dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); + dsi_pll_uninit(dpi.dsidev, true); + dss_clk_disable(DSS_CLK_SYSCK); + } dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); @@ -251,6 +274,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev, int lck_div, pck_div; unsigned long fck; unsigned long pck; + struct dispc_clock_info dispc_cinfo; if (!dispc_lcd_timings_ok(timings)) return -EINVAL; @@ -260,11 +284,9 @@ int dpi_check_timings(struct omap_dss_device *dssdev, is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; -#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL - { + if (dpi_use_dsi_pll(dssdev)) { struct dsi_clock_info dsi_cinfo; - struct dispc_clock_info dispc_cinfo; - r = dsi_pll_calc_clock_div_pck(is_tft, + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, timings->pixel_clock * 1000, &dsi_cinfo, &dispc_cinfo); @@ -272,13 +294,8 @@ int dpi_check_timings(struct omap_dss_device *dssdev, return r; fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; - lck_div = dispc_cinfo.lck_div; - pck_div = dispc_cinfo.pck_div; - } -#else - { + } else { struct dss_clock_info dss_cinfo; - struct dispc_clock_info dispc_cinfo; r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); @@ -286,10 +303,10 @@ int dpi_check_timings(struct omap_dss_device *dssdev, return r; fck = dss_cinfo.fck; - lck_div = dispc_cinfo.lck_div; - pck_div = dispc_cinfo.pck_div; } -#endif + + lck_div = dispc_cinfo.lck_div; + pck_div = dispc_cinfo.pck_div; pck = fck / lck_div / pck_div / 1000; @@ -316,6 +333,12 @@ int dpi_init_display(struct omap_dss_device *dssdev) dpi.vdds_dsi_reg = vdds_dsi; } + if (dpi_use_dsi_pll(dssdev)) { + enum omap_dss_clk_source dispc_fclk_src = + dssdev->clocks.dispc.dispc_fclk_src; + dpi.dsidev = dpi_get_dsidev(dispc_fclk_src); + } + return 0; } diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 0a7f1a47f8e..345757cfcbe 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -33,8 +33,11 @@ #include <linux/regulator/consumer.h> #include <linux/wait.h> #include <linux/workqueue.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/debugfs.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/clock.h> #include "dss.h" @@ -56,6 +59,7 @@ struct dsi_reg { u16 idx; }; #define DSI_IRQSTATUS DSI_REG(0x0018) #define DSI_IRQENABLE DSI_REG(0x001C) #define DSI_CTRL DSI_REG(0x0040) +#define DSI_GNQ DSI_REG(0x0044) #define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048) #define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C) #define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050) @@ -90,6 +94,7 @@ struct dsi_reg { u16 idx; }; #define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004) #define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008) #define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014) +#define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028) /* DSI_PLL_CTRL_SCP */ @@ -99,11 +104,11 @@ struct dsi_reg { u16 idx; }; #define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C) #define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010) -#define REG_GET(idx, start, end) \ - FLD_GET(dsi_read_reg(idx), start, end) +#define REG_GET(dsidev, idx, start, end) \ + FLD_GET(dsi_read_reg(dsidev, idx), start, end) -#define REG_FLD_MOD(idx, val, start, end) \ - dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end)) +#define REG_FLD_MOD(dsidev, idx, val, start, end) \ + dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end)) /* Global interrupts */ #define DSI_IRQ_VC0 (1 << 0) @@ -147,31 +152,50 @@ struct dsi_reg { u16 idx; }; #define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) #define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) #define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) +#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3) +#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4) #define DSI_CIO_IRQ_ERRESC1 (1 << 5) #define DSI_CIO_IRQ_ERRESC2 (1 << 6) #define DSI_CIO_IRQ_ERRESC3 (1 << 7) +#define DSI_CIO_IRQ_ERRESC4 (1 << 8) +#define DSI_CIO_IRQ_ERRESC5 (1 << 9) #define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) #define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) #define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) +#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13) +#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14) #define DSI_CIO_IRQ_STATEULPS1 (1 << 15) #define DSI_CIO_IRQ_STATEULPS2 (1 << 16) #define DSI_CIO_IRQ_STATEULPS3 (1 << 17) +#define DSI_CIO_IRQ_STATEULPS4 (1 << 18) +#define DSI_CIO_IRQ_STATEULPS5 (1 << 19) #define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) #define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) #define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) #define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) #define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) #define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29) #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) #define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) #define DSI_CIO_IRQ_ERROR_MASK \ (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \ - DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ - DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRCONTROL1 | \ - DSI_CIO_IRQ_ERRCONTROL2 | DSI_CIO_IRQ_ERRCONTROL3 | \ + DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \ + DSI_CIO_IRQ_ERRSYNCESC5 | \ + DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ + DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \ + DSI_CIO_IRQ_ERRESC5 | \ + DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \ + DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \ + DSI_CIO_IRQ_ERRCONTROL5 | \ DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \ DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \ - DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3) + DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5) #define DSI_DT_DCS_SHORT_WRITE_0 0x05 #define DSI_DT_DCS_SHORT_WRITE_1 0x15 @@ -208,6 +232,19 @@ enum dsi_vc_mode { DSI_VC_MODE_VP, }; +enum dsi_lane { + DSI_CLK_P = 1 << 0, + DSI_CLK_N = 1 << 1, + DSI_DATA1_P = 1 << 2, + DSI_DATA1_N = 1 << 3, + DSI_DATA2_P = 1 << 4, + DSI_DATA2_N = 1 << 5, + DSI_DATA3_P = 1 << 6, + DSI_DATA3_N = 1 << 7, + DSI_DATA4_P = 1 << 8, + DSI_DATA4_N = 1 << 9, +}; + struct dsi_update_region { u16 x, y, w, h; struct omap_dss_device *device; @@ -227,14 +264,16 @@ struct dsi_isr_tables { struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; }; -static struct -{ +struct dsi_data { struct platform_device *pdev; void __iomem *base; int irq; + void (*dsi_mux_pads)(bool enable); + struct dsi_clock_info current_cinfo; + bool vdds_dsi_enabled; struct regulator *vdds_dsi_reg; struct { @@ -258,8 +297,7 @@ static struct struct dsi_update_region update_region; bool te_enabled; - - struct workqueue_struct *workqueue; + bool ulps_enabled; void (*framedone_callback)(int, void *); void *framedone_data; @@ -292,21 +330,63 @@ static struct unsigned long regm_dispc_max, regm_dsi_max; unsigned long fint_min, fint_max; unsigned long lpdiv_max; -} dsi; + + int num_data_lanes; + + unsigned scp_clk_refcount; +}; + +struct dsi_packet_sent_handler_data { + struct platform_device *dsidev; + struct completion *completion; +}; + +static struct platform_device *dsi_pdev_map[MAX_NUM_DSI]; #ifdef DEBUG static unsigned int dsi_perf; module_param_named(dsi_perf, dsi_perf, bool, 0644); #endif -static inline void dsi_write_reg(const struct dsi_reg idx, u32 val) +static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev) { - __raw_writel(val, dsi.base + idx.idx); + return dev_get_drvdata(&dsidev->dev); } -static inline u32 dsi_read_reg(const struct dsi_reg idx) +static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev) { - return __raw_readl(dsi.base + idx.idx); + return dsi_pdev_map[dssdev->phy.dsi.module]; +} + +struct platform_device *dsi_get_dsidev_from_id(int module) +{ + return dsi_pdev_map[module]; +} + +static int dsi_get_dsidev_id(struct platform_device *dsidev) +{ + /* TEMP: Pass 0 as the dsi module index till the time the dsi platform + * device names aren't changed to the form "omapdss_dsi.0", + * "omapdss_dsi.1" and so on */ + BUG_ON(dsidev->id != -1); + + return 0; +} + +static inline void dsi_write_reg(struct platform_device *dsidev, + const struct dsi_reg idx, u32 val) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + __raw_writel(val, dsi->base + idx.idx); +} + +static inline u32 dsi_read_reg(struct platform_device *dsidev, + const struct dsi_reg idx) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return __raw_readl(dsi->base + idx.idx); } @@ -318,21 +398,29 @@ void dsi_restore_context(void) { } -void dsi_bus_lock(void) +void dsi_bus_lock(struct omap_dss_device *dssdev) { - down(&dsi.bus_lock); + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + down(&dsi->bus_lock); } EXPORT_SYMBOL(dsi_bus_lock); -void dsi_bus_unlock(void) +void dsi_bus_unlock(struct omap_dss_device *dssdev) { - up(&dsi.bus_lock); + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + up(&dsi->bus_lock); } EXPORT_SYMBOL(dsi_bus_unlock); -static bool dsi_bus_is_locked(void) +static bool dsi_bus_is_locked(struct platform_device *dsidev) { - return dsi.bus_lock.count == 0; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->bus_lock.count == 0; } static void dsi_completion_handler(void *data, u32 mask) @@ -340,12 +428,12 @@ static void dsi_completion_handler(void *data, u32 mask) complete((struct completion *)data); } -static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum, - int value) +static inline int wait_for_bit_change(struct platform_device *dsidev, + const struct dsi_reg idx, int bitnum, int value) { int t = 100000; - while (REG_GET(idx, bitnum, bitnum) != value) { + while (REG_GET(dsidev, idx, bitnum, bitnum) != value) { if (--t == 0) return !value; } @@ -354,18 +442,21 @@ static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum, } #ifdef DEBUG -static void dsi_perf_mark_setup(void) +static void dsi_perf_mark_setup(struct platform_device *dsidev) { - dsi.perf_setup_time = ktime_get(); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + dsi->perf_setup_time = ktime_get(); } -static void dsi_perf_mark_start(void) +static void dsi_perf_mark_start(struct platform_device *dsidev) { - dsi.perf_start_time = ktime_get(); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + dsi->perf_start_time = ktime_get(); } -static void dsi_perf_show(const char *name) +static void dsi_perf_show(struct platform_device *dsidev, const char *name) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); ktime_t t, setup_time, trans_time; u32 total_bytes; u32 setup_us, trans_us, total_us; @@ -375,21 +466,21 @@ static void dsi_perf_show(const char *name) t = ktime_get(); - setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time); + setup_time = ktime_sub(dsi->perf_start_time, dsi->perf_setup_time); setup_us = (u32)ktime_to_us(setup_time); if (setup_us == 0) setup_us = 1; - trans_time = ktime_sub(t, dsi.perf_start_time); + trans_time = ktime_sub(t, dsi->perf_start_time); trans_us = (u32)ktime_to_us(trans_time); if (trans_us == 0) trans_us = 1; total_us = setup_us + trans_us; - total_bytes = dsi.update_region.w * - dsi.update_region.h * - dsi.update_region.device->ctrl.pixel_size / 8; + total_bytes = dsi->update_region.w * + dsi->update_region.h * + dsi->update_region.device->ctrl.pixel_size / 8; printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), " "%u bytes, %u kbytes/sec\n", @@ -402,9 +493,9 @@ static void dsi_perf_show(const char *name) total_bytes * 1000 / total_us); } #else -#define dsi_perf_mark_setup() -#define dsi_perf_mark_start() -#define dsi_perf_show(x) +#define dsi_perf_mark_setup(x) +#define dsi_perf_mark_start(x) +#define dsi_perf_show(x, y) #endif static void print_irq_status(u32 status) @@ -510,38 +601,42 @@ static void print_irq_status_cio(u32 status) } #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS -static void dsi_collect_irq_stats(u32 irqstatus, u32 *vcstatus, u32 ciostatus) +static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus, + u32 *vcstatus, u32 ciostatus) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int i; - spin_lock(&dsi.irq_stats_lock); + spin_lock(&dsi->irq_stats_lock); - dsi.irq_stats.irq_count++; - dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs); + dsi->irq_stats.irq_count++; + dss_collect_irq_stats(irqstatus, dsi->irq_stats.dsi_irqs); for (i = 0; i < 4; ++i) - dss_collect_irq_stats(vcstatus[i], dsi.irq_stats.vc_irqs[i]); + dss_collect_irq_stats(vcstatus[i], dsi->irq_stats.vc_irqs[i]); - dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs); + dss_collect_irq_stats(ciostatus, dsi->irq_stats.cio_irqs); - spin_unlock(&dsi.irq_stats_lock); + spin_unlock(&dsi->irq_stats_lock); } #else -#define dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus) +#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus) #endif static int debug_irq; -static void dsi_handle_irq_errors(u32 irqstatus, u32 *vcstatus, u32 ciostatus) +static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus, + u32 *vcstatus, u32 ciostatus) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int i; if (irqstatus & DSI_IRQ_ERROR_MASK) { DSSERR("DSI error, irqstatus %x\n", irqstatus); print_irq_status(irqstatus); - spin_lock(&dsi.errors_lock); - dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK; - spin_unlock(&dsi.errors_lock); + spin_lock(&dsi->errors_lock); + dsi->errors |= irqstatus & DSI_IRQ_ERROR_MASK; + spin_unlock(&dsi->errors_lock); } else if (debug_irq) { print_irq_status(irqstatus); } @@ -602,22 +697,27 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables, static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) { + struct platform_device *dsidev; + struct dsi_data *dsi; u32 irqstatus, vcstatus[4], ciostatus; int i; - spin_lock(&dsi.irq_lock); + dsidev = (struct platform_device *) arg; + dsi = dsi_get_dsidrv_data(dsidev); + + spin_lock(&dsi->irq_lock); - irqstatus = dsi_read_reg(DSI_IRQSTATUS); + irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS); /* IRQ is not for us */ if (!irqstatus) { - spin_unlock(&dsi.irq_lock); + spin_unlock(&dsi->irq_lock); return IRQ_NONE; } - dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); + dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK); /* flush posted write */ - dsi_read_reg(DSI_IRQSTATUS); + dsi_read_reg(dsidev, DSI_IRQSTATUS); for (i = 0; i < 4; ++i) { if ((irqstatus & (1 << i)) == 0) { @@ -625,45 +725,47 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg) continue; } - vcstatus[i] = dsi_read_reg(DSI_VC_IRQSTATUS(i)); + vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); - dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus[i]); + dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]); /* flush posted write */ - dsi_read_reg(DSI_VC_IRQSTATUS(i)); + dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i)); } if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) { - ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); + ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); - dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus); + dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus); /* flush posted write */ - dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS); + dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS); } else { ciostatus = 0; } #ifdef DSI_CATCH_MISSING_TE if (irqstatus & DSI_IRQ_TE_TRIGGER) - del_timer(&dsi.te_timer); + del_timer(&dsi->te_timer); #endif /* make a copy and unlock, so that isrs can unregister * themselves */ - memcpy(&dsi.isr_tables_copy, &dsi.isr_tables, sizeof(dsi.isr_tables)); + memcpy(&dsi->isr_tables_copy, &dsi->isr_tables, + sizeof(dsi->isr_tables)); - spin_unlock(&dsi.irq_lock); + spin_unlock(&dsi->irq_lock); - dsi_handle_isrs(&dsi.isr_tables_copy, irqstatus, vcstatus, ciostatus); + dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus); - dsi_handle_irq_errors(irqstatus, vcstatus, ciostatus); + dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus); - dsi_collect_irq_stats(irqstatus, vcstatus, ciostatus); + dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus); return IRQ_HANDLED; } -/* dsi.irq_lock has to be locked by the caller */ -static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array, +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_configure_irqs(struct platform_device *dsidev, + struct dsi_isr_data *isr_array, unsigned isr_array_size, u32 default_mask, const struct dsi_reg enable_reg, const struct dsi_reg status_reg) @@ -684,61 +786,67 @@ static void _omap_dsi_configure_irqs(struct dsi_isr_data *isr_array, mask |= isr_data->mask; } - old_mask = dsi_read_reg(enable_reg); + old_mask = dsi_read_reg(dsidev, enable_reg); /* clear the irqstatus for newly enabled irqs */ - dsi_write_reg(status_reg, (mask ^ old_mask) & mask); - dsi_write_reg(enable_reg, mask); + dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask); + dsi_write_reg(dsidev, enable_reg, mask); /* flush posted writes */ - dsi_read_reg(enable_reg); - dsi_read_reg(status_reg); + dsi_read_reg(dsidev, enable_reg); + dsi_read_reg(dsidev, status_reg); } -/* dsi.irq_lock has to be locked by the caller */ -static void _omap_dsi_set_irqs(void) +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_set_irqs(struct platform_device *dsidev) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); u32 mask = DSI_IRQ_ERROR_MASK; #ifdef DSI_CATCH_MISSING_TE mask |= DSI_IRQ_TE_TRIGGER; #endif - _omap_dsi_configure_irqs(dsi.isr_tables.isr_table, - ARRAY_SIZE(dsi.isr_tables.isr_table), mask, + _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table, + ARRAY_SIZE(dsi->isr_tables.isr_table), mask, DSI_IRQENABLE, DSI_IRQSTATUS); } -/* dsi.irq_lock has to be locked by the caller */ -static void _omap_dsi_set_irqs_vc(int vc) +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc) { - _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_vc[vc], - ARRAY_SIZE(dsi.isr_tables.isr_table_vc[vc]), + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]), DSI_VC_IRQ_ERROR_MASK, DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc)); } -/* dsi.irq_lock has to be locked by the caller */ -static void _omap_dsi_set_irqs_cio(void) +/* dsi->irq_lock has to be locked by the caller */ +static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev) { - _omap_dsi_configure_irqs(dsi.isr_tables.isr_table_cio, - ARRAY_SIZE(dsi.isr_tables.isr_table_cio), + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio, + ARRAY_SIZE(dsi->isr_tables.isr_table_cio), DSI_CIO_IRQ_ERROR_MASK, DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS); } -static void _dsi_initialize_irq(void) +static void _dsi_initialize_irq(struct platform_device *dsidev) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; int vc; - spin_lock_irqsave(&dsi.irq_lock, flags); + spin_lock_irqsave(&dsi->irq_lock, flags); - memset(&dsi.isr_tables, 0, sizeof(dsi.isr_tables)); + memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables)); - _omap_dsi_set_irqs(); + _omap_dsi_set_irqs(dsidev); for (vc = 0; vc < 4; ++vc) - _omap_dsi_set_irqs_vc(vc); - _omap_dsi_set_irqs_cio(); + _omap_dsi_set_irqs_vc(dsidev, vc); + _omap_dsi_set_irqs_cio(dsidev); - spin_unlock_irqrestore(&dsi.irq_lock, flags); + spin_unlock_irqrestore(&dsi->irq_lock, flags); } static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask, @@ -797,126 +905,137 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask, return -EINVAL; } -static int dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask) +static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr, + void *arg, u32 mask) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; int r; - spin_lock_irqsave(&dsi.irq_lock, flags); + spin_lock_irqsave(&dsi->irq_lock, flags); - r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table, - ARRAY_SIZE(dsi.isr_tables.isr_table)); + r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table, + ARRAY_SIZE(dsi->isr_tables.isr_table)); if (r == 0) - _omap_dsi_set_irqs(); + _omap_dsi_set_irqs(dsidev); - spin_unlock_irqrestore(&dsi.irq_lock, flags); + spin_unlock_irqrestore(&dsi->irq_lock, flags); return r; } -static int dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask) +static int dsi_unregister_isr(struct platform_device *dsidev, + omap_dsi_isr_t isr, void *arg, u32 mask) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; int r; - spin_lock_irqsave(&dsi.irq_lock, flags); + spin_lock_irqsave(&dsi->irq_lock, flags); - r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table, - ARRAY_SIZE(dsi.isr_tables.isr_table)); + r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table, + ARRAY_SIZE(dsi->isr_tables.isr_table)); if (r == 0) - _omap_dsi_set_irqs(); + _omap_dsi_set_irqs(dsidev); - spin_unlock_irqrestore(&dsi.irq_lock, flags); + spin_unlock_irqrestore(&dsi->irq_lock, flags); return r; } -static int dsi_register_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, - u32 mask) +static int dsi_register_isr_vc(struct platform_device *dsidev, int channel, + omap_dsi_isr_t isr, void *arg, u32 mask) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; int r; - spin_lock_irqsave(&dsi.irq_lock, flags); + spin_lock_irqsave(&dsi->irq_lock, flags); r = _dsi_register_isr(isr, arg, mask, - dsi.isr_tables.isr_table_vc[channel], - ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); + dsi->isr_tables.isr_table_vc[channel], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); if (r == 0) - _omap_dsi_set_irqs_vc(channel); + _omap_dsi_set_irqs_vc(dsidev, channel); - spin_unlock_irqrestore(&dsi.irq_lock, flags); + spin_unlock_irqrestore(&dsi->irq_lock, flags); return r; } -static int dsi_unregister_isr_vc(int channel, omap_dsi_isr_t isr, void *arg, - u32 mask) +static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel, + omap_dsi_isr_t isr, void *arg, u32 mask) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; int r; - spin_lock_irqsave(&dsi.irq_lock, flags); + spin_lock_irqsave(&dsi->irq_lock, flags); r = _dsi_unregister_isr(isr, arg, mask, - dsi.isr_tables.isr_table_vc[channel], - ARRAY_SIZE(dsi.isr_tables.isr_table_vc[channel])); + dsi->isr_tables.isr_table_vc[channel], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); if (r == 0) - _omap_dsi_set_irqs_vc(channel); + _omap_dsi_set_irqs_vc(dsidev, channel); - spin_unlock_irqrestore(&dsi.irq_lock, flags); + spin_unlock_irqrestore(&dsi->irq_lock, flags); return r; } -static int dsi_register_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) +static int dsi_register_isr_cio(struct platform_device *dsidev, + omap_dsi_isr_t isr, void *arg, u32 mask) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; int r; - spin_lock_irqsave(&dsi.irq_lock, flags); + spin_lock_irqsave(&dsi->irq_lock, flags); - r = _dsi_register_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, - ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); + r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, + ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); if (r == 0) - _omap_dsi_set_irqs_cio(); + _omap_dsi_set_irqs_cio(dsidev); - spin_unlock_irqrestore(&dsi.irq_lock, flags); + spin_unlock_irqrestore(&dsi->irq_lock, flags); return r; } -static int dsi_unregister_isr_cio(omap_dsi_isr_t isr, void *arg, u32 mask) +static int dsi_unregister_isr_cio(struct platform_device *dsidev, + omap_dsi_isr_t isr, void *arg, u32 mask) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; int r; - spin_lock_irqsave(&dsi.irq_lock, flags); + spin_lock_irqsave(&dsi->irq_lock, flags); - r = _dsi_unregister_isr(isr, arg, mask, dsi.isr_tables.isr_table_cio, - ARRAY_SIZE(dsi.isr_tables.isr_table_cio)); + r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, + ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); if (r == 0) - _omap_dsi_set_irqs_cio(); + _omap_dsi_set_irqs_cio(dsidev); - spin_unlock_irqrestore(&dsi.irq_lock, flags); + spin_unlock_irqrestore(&dsi->irq_lock, flags); return r; } -static u32 dsi_get_errors(void) +static u32 dsi_get_errors(struct platform_device *dsidev) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; u32 e; - spin_lock_irqsave(&dsi.errors_lock, flags); - e = dsi.errors; - dsi.errors = 0; - spin_unlock_irqrestore(&dsi.errors_lock, flags); + spin_lock_irqsave(&dsi->errors_lock, flags); + e = dsi->errors; + dsi->errors = 0; + spin_unlock_irqrestore(&dsi->errors_lock, flags); return e; } @@ -930,23 +1049,27 @@ static inline void enable_clocks(bool enable) } /* source clock for DSI PLL. this could also be PCLKFREE */ -static inline void dsi_enable_pll_clock(bool enable) +static inline void dsi_enable_pll_clock(struct platform_device *dsidev, + bool enable) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + if (enable) dss_clk_enable(DSS_CLK_SYSCK); else dss_clk_disable(DSS_CLK_SYSCK); - if (enable && dsi.pll_locked) { - if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) + if (enable && dsi->pll_locked) { + if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) DSSERR("cannot lock PLL when enabling clocks\n"); } } #ifdef DEBUG -static void _dsi_print_reset_status(void) +static void _dsi_print_reset_status(struct platform_device *dsidev) { u32 l; + int b0, b1, b2; if (!dss_debug) return; @@ -954,35 +1077,47 @@ static void _dsi_print_reset_status(void) /* A dummy read using the SCP interface to any DSIPHY register is * required after DSIPHY reset to complete the reset of the DSI complex * I/O. */ - l = dsi_read_reg(DSI_DSIPHY_CFG5); + l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); printk(KERN_DEBUG "DSI resets: "); - l = dsi_read_reg(DSI_PLL_STATUS); + l = dsi_read_reg(dsidev, DSI_PLL_STATUS); printk("PLL (%d) ", FLD_GET(l, 0, 0)); - l = dsi_read_reg(DSI_COMPLEXIO_CFG1); + l = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1); printk("CIO (%d) ", FLD_GET(l, 29, 29)); - l = dsi_read_reg(DSI_DSIPHY_CFG5); - printk("PHY (%x, %d, %d, %d)\n", - FLD_GET(l, 28, 26), + if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) { + b0 = 28; + b1 = 27; + b2 = 26; + } else { + b0 = 24; + b1 = 25; + b2 = 26; + } + + l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); + printk("PHY (%x%x%x, %d, %d, %d)\n", + FLD_GET(l, b0, b0), + FLD_GET(l, b1, b1), + FLD_GET(l, b2, b2), FLD_GET(l, 29, 29), FLD_GET(l, 30, 30), FLD_GET(l, 31, 31)); } #else -#define _dsi_print_reset_status() +#define _dsi_print_reset_status(x) #endif -static inline int dsi_if_enable(bool enable) +static inline int dsi_if_enable(struct platform_device *dsidev, bool enable) { DSSDBG("dsi_if_enable(%d)\n", enable); enable = enable ? 1 : 0; - REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */ + REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */ - if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) { + if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) { DSSERR("Failed to set dsi_if_enable to %d\n", enable); return -EIO; } @@ -990,31 +1125,38 @@ static inline int dsi_if_enable(bool enable) return 0; } -unsigned long dsi_get_pll_hsdiv_dispc_rate(void) +unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) { - return dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk; } -static unsigned long dsi_get_pll_hsdiv_dsi_rate(void) +static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev) { - return dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk; } -static unsigned long dsi_get_txbyteclkhs(void) +static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev) { - return dsi.current_cinfo.clkin4ddr / 16; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + return dsi->current_cinfo.clkin4ddr / 16; } -static unsigned long dsi_fclk_rate(void) +static unsigned long dsi_fclk_rate(struct platform_device *dsidev) { unsigned long r; + int dsi_module = dsi_get_dsidev_id(dsidev); - if (dss_get_dsi_clk_source() == DSS_CLK_SRC_FCK) { + if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { /* DSI FCLK source is DSS_CLK_FCK */ r = dss_clk_get_rate(DSS_CLK_FCK); } else { /* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */ - r = dsi_get_pll_hsdiv_dsi_rate(); + r = dsi_get_pll_hsdiv_dsi_rate(dsidev); } return r; @@ -1022,31 +1164,50 @@ static unsigned long dsi_fclk_rate(void) static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long dsi_fclk; unsigned lp_clk_div; unsigned long lp_clk; - lp_clk_div = dssdev->phy.dsi.div.lp_clk_div; + lp_clk_div = dssdev->clocks.dsi.lp_clk_div; - if (lp_clk_div == 0 || lp_clk_div > dsi.lpdiv_max) + if (lp_clk_div == 0 || lp_clk_div > dsi->lpdiv_max) return -EINVAL; - dsi_fclk = dsi_fclk_rate(); + dsi_fclk = dsi_fclk_rate(dsidev); lp_clk = dsi_fclk / 2 / lp_clk_div; DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk); - dsi.current_cinfo.lp_clk = lp_clk; - dsi.current_cinfo.lp_clk_div = lp_clk_div; + dsi->current_cinfo.lp_clk = lp_clk; + dsi->current_cinfo.lp_clk_div = lp_clk_div; - REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */ + /* LP_CLK_DIVISOR */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0); - REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, - 21, 21); /* LP_RX_SYNCHRO_ENABLE */ + /* LP_RX_SYNCHRO_ENABLE */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21); return 0; } +static void dsi_enable_scp_clk(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->scp_clk_refcount++ == 0) + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */ +} + +static void dsi_disable_scp_clk(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + WARN_ON(dsi->scp_clk_refcount == 0); + if (--dsi->scp_clk_refcount == 0) + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */ +} enum dsi_pll_power_state { DSI_PLL_POWER_OFF = 0x0, @@ -1055,14 +1216,21 @@ enum dsi_pll_power_state { DSI_PLL_POWER_ON_DIV = 0x3, }; -static int dsi_pll_power(enum dsi_pll_power_state state) +static int dsi_pll_power(struct platform_device *dsidev, + enum dsi_pll_power_state state) { int t = 0; - REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */ + /* DSI-PLL power command 0x3 is not working */ + if (dss_has_feature(FEAT_DSI_PLL_PWR_BUG) && + state == DSI_PLL_POWER_ON_DIV) + state = DSI_PLL_POWER_ON_ALL; + + /* PLL_PWR_CMD */ + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_STATUS */ - while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) { + while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) { if (++t > 1000) { DSSERR("Failed to set DSI PLL power mode to %d\n", state); @@ -1078,16 +1246,19 @@ static int dsi_pll_power(enum dsi_pll_power_state state) static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, struct dsi_clock_info *cinfo) { - if (cinfo->regn == 0 || cinfo->regn > dsi.regn_max) + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max) return -EINVAL; - if (cinfo->regm == 0 || cinfo->regm > dsi.regm_max) + if (cinfo->regm == 0 || cinfo->regm > dsi->regm_max) return -EINVAL; - if (cinfo->regm_dispc > dsi.regm_dispc_max) + if (cinfo->regm_dispc > dsi->regm_dispc_max) return -EINVAL; - if (cinfo->regm_dsi > dsi.regm_dsi_max) + if (cinfo->regm_dsi > dsi->regm_dsi_max) return -EINVAL; if (cinfo->use_sys_clk) { @@ -1106,7 +1277,7 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1)); - if (cinfo->fint > dsi.fint_max || cinfo->fint < dsi.fint_min) + if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min) return -EINVAL; cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint; @@ -1129,10 +1300,11 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, return 0; } -int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, - struct dsi_clock_info *dsi_cinfo, +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, + unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, struct dispc_clock_info *dispc_cinfo) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct dsi_clock_info cur, best; struct dispc_clock_info best_dispc; int min_fck_per_pck; @@ -1143,10 +1315,10 @@ int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, max_dss_fck = dss_feat_get_param_max(FEAT_PARAM_DSS_FCK); - if (req_pck == dsi.cache_req_pck && - dsi.cache_cinfo.clkin == dss_sys_clk) { + if (req_pck == dsi->cache_req_pck && + dsi->cache_cinfo.clkin == dss_sys_clk) { DSSDBG("DSI clock info found from cache\n"); - *dsi_cinfo = dsi.cache_cinfo; + *dsi_cinfo = dsi->cache_cinfo; dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo); return 0; @@ -1176,17 +1348,17 @@ retry: /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */ /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ - for (cur.regn = 1; cur.regn < dsi.regn_max; ++cur.regn) { + for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) { if (cur.highfreq == 0) cur.fint = cur.clkin / cur.regn; else cur.fint = cur.clkin / (2 * cur.regn); - if (cur.fint > dsi.fint_max || cur.fint < dsi.fint_min) + if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min) continue; /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ - for (cur.regm = 1; cur.regm < dsi.regm_max; ++cur.regm) { + for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) { unsigned long a, b; a = 2 * cur.regm * (cur.clkin/1000); @@ -1198,8 +1370,8 @@ retry: /* dsi_pll_hsdiv_dispc_clk(MHz) = * DSIPHY(MHz) / regm_dispc < 173MHz/186Mhz */ - for (cur.regm_dispc = 1; cur.regm_dispc < dsi.regm_dispc_max; - ++cur.regm_dispc) { + for (cur.regm_dispc = 1; cur.regm_dispc < + dsi->regm_dispc_max; ++cur.regm_dispc) { struct dispc_clock_info cur_dispc; cur.dsi_pll_hsdiv_dispc_clk = cur.clkin4ddr / cur.regm_dispc; @@ -1259,34 +1431,39 @@ found: if (dispc_cinfo) *dispc_cinfo = best_dispc; - dsi.cache_req_pck = req_pck; - dsi.cache_clk_freq = 0; - dsi.cache_cinfo = best; + dsi->cache_req_pck = req_pck; + dsi->cache_clk_freq = 0; + dsi->cache_cinfo = best; return 0; } -int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) +int dsi_pll_set_clock_div(struct platform_device *dsidev, + struct dsi_clock_info *cinfo) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int r = 0; u32 l; - int f; + int f = 0; u8 regn_start, regn_end, regm_start, regm_end; u8 regm_dispc_start, regm_dispc_end, regm_dsi_start, regm_dsi_end; DSSDBGF(); - dsi.current_cinfo.fint = cinfo->fint; - dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr; - dsi.current_cinfo.dsi_pll_hsdiv_dispc_clk = + dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk; + dsi->current_cinfo.highfreq = cinfo->highfreq; + + dsi->current_cinfo.fint = cinfo->fint; + dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr; + dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk = cinfo->dsi_pll_hsdiv_dispc_clk; - dsi.current_cinfo.dsi_pll_hsdiv_dsi_clk = + dsi->current_cinfo.dsi_pll_hsdiv_dsi_clk = cinfo->dsi_pll_hsdiv_dsi_clk; - dsi.current_cinfo.regn = cinfo->regn; - dsi.current_cinfo.regm = cinfo->regm; - dsi.current_cinfo.regm_dispc = cinfo->regm_dispc; - dsi.current_cinfo.regm_dsi = cinfo->regm_dsi; + dsi->current_cinfo.regn = cinfo->regn; + dsi->current_cinfo.regm = cinfo->regm; + dsi->current_cinfo.regm_dispc = cinfo->regm_dispc; + dsi->current_cinfo.regm_dsi = cinfo->regm_dsi; DSSDBG("DSI Fint %ld\n", cinfo->fint); @@ -1309,12 +1486,12 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4); DSSDBG("regm_dispc = %d, %s (%s) = %lu\n", cinfo->regm_dispc, - dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), - dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), + dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), + dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), cinfo->dsi_pll_hsdiv_dispc_clk); DSSDBG("regm_dsi = %d, %s (%s) = %lu\n", cinfo->regm_dsi, - dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), - dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), + dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), + dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), cinfo->dsi_pll_hsdiv_dsi_clk); dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGN, ®n_start, ®n_end); @@ -1324,9 +1501,10 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) dss_feat_get_reg_field(FEAT_REG_DSIPLL_REGM_DSI, ®m_dsi_start, ®m_dsi_end); - REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */ + /* DSI_PLL_AUTOMODE = manual */ + REG_FLD_MOD(dsidev, DSI_PLL_CONTROL, 0, 0, 0); - l = dsi_read_reg(DSI_PLL_CONFIGURATION1); + l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION1); l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */ /* DSI_PLL_REGN */ l = FLD_MOD(l, cinfo->regn - 1, regn_start, regn_end); @@ -1338,22 +1516,22 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) /* DSIPROTO_CLOCK_DIV */ l = FLD_MOD(l, cinfo->regm_dsi > 0 ? cinfo->regm_dsi - 1 : 0, regm_dsi_start, regm_dsi_end); - dsi_write_reg(DSI_PLL_CONFIGURATION1, l); - - BUG_ON(cinfo->fint < dsi.fint_min || cinfo->fint > dsi.fint_max); - if (cinfo->fint < 1000000) - f = 0x3; - else if (cinfo->fint < 1250000) - f = 0x4; - else if (cinfo->fint < 1500000) - f = 0x5; - else if (cinfo->fint < 1750000) - f = 0x6; - else - f = 0x7; + dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION1, l); + + BUG_ON(cinfo->fint < dsi->fint_min || cinfo->fint > dsi->fint_max); + + if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) { + f = cinfo->fint < 1000000 ? 0x3 : + cinfo->fint < 1250000 ? 0x4 : + cinfo->fint < 1500000 ? 0x5 : + cinfo->fint < 1750000 ? 0x6 : + 0x7; + } + + l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2); - l = dsi_read_reg(DSI_PLL_CONFIGURATION2); - l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ + if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) + l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1, 11, 11); /* DSI_PLL_CLKSEL */ l = FLD_MOD(l, cinfo->highfreq, @@ -1361,25 +1539,25 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ - dsi_write_reg(DSI_PLL_CONFIGURATION2, l); + dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l); - REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ + REG_FLD_MOD(dsidev, DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */ - if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) { + if (wait_for_bit_change(dsidev, DSI_PLL_GO, 0, 0) != 0) { DSSERR("dsi pll go bit not going down.\n"); r = -EIO; goto err; } - if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) { + if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) { DSSERR("cannot lock PLL\n"); r = -EIO; goto err; } - dsi.pll_locked = 1; + dsi->pll_locked = 1; - l = dsi_read_reg(DSI_PLL_CONFIGURATION2); + l = dsi_read_reg(dsidev, DSI_PLL_CONFIGURATION2); l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */ l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */ l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */ @@ -1394,52 +1572,53 @@ int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo) l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */ l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */ l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */ - dsi_write_reg(DSI_PLL_CONFIGURATION2, l); + dsi_write_reg(dsidev, DSI_PLL_CONFIGURATION2, l); DSSDBG("PLL config done\n"); err: return r; } -int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, +int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, bool enable_hsdiv) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int r = 0; enum dsi_pll_power_state pwstate; DSSDBG("PLL init\n"); -#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL - /* - * HACK: this is just a quick hack to get the USE_DSI_PLL - * option working. USE_DSI_PLL is itself a big hack, and - * should be removed. - */ - if (dsi.vdds_dsi_reg == NULL) { + if (dsi->vdds_dsi_reg == NULL) { struct regulator *vdds_dsi; - vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); + vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi"); if (IS_ERR(vdds_dsi)) { DSSERR("can't get VDDS_DSI regulator\n"); return PTR_ERR(vdds_dsi); } - dsi.vdds_dsi_reg = vdds_dsi; + dsi->vdds_dsi_reg = vdds_dsi; } -#endif enable_clocks(1); - dsi_enable_pll_clock(1); + dsi_enable_pll_clock(dsidev, 1); + /* + * Note: SCP CLK is not required on OMAP3, but it is required on OMAP4. + */ + dsi_enable_scp_clk(dsidev); - r = regulator_enable(dsi.vdds_dsi_reg); - if (r) - goto err0; + if (!dsi->vdds_dsi_enabled) { + r = regulator_enable(dsi->vdds_dsi_reg); + if (r) + goto err0; + dsi->vdds_dsi_enabled = true; + } /* XXX PLL does not come out of reset without this... */ dispc_pck_free_enable(1); - if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) { + if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) { DSSERR("PLL not coming out of reset.\n"); r = -ENODEV; dispc_pck_free_enable(0); @@ -1459,7 +1638,7 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, else pwstate = DSI_PLL_POWER_OFF; - r = dsi_pll_power(pwstate); + r = dsi_pll_power(dsidev, pwstate); if (r) goto err1; @@ -1468,42 +1647,53 @@ int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, return 0; err1: - regulator_disable(dsi.vdds_dsi_reg); + if (dsi->vdds_dsi_enabled) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } err0: + dsi_disable_scp_clk(dsidev); enable_clocks(0); - dsi_enable_pll_clock(0); + dsi_enable_pll_clock(dsidev, 0); return r; } -void dsi_pll_uninit(void) +void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi->pll_locked = 0; + dsi_pll_power(dsidev, DSI_PLL_POWER_OFF); + if (disconnect_lanes) { + WARN_ON(!dsi->vdds_dsi_enabled); + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } + + dsi_disable_scp_clk(dsidev); enable_clocks(0); - dsi_enable_pll_clock(0); + dsi_enable_pll_clock(dsidev, 0); - dsi.pll_locked = 0; - dsi_pll_power(DSI_PLL_POWER_OFF); - regulator_disable(dsi.vdds_dsi_reg); DSSDBG("PLL uninit done\n"); } -void dsi_dump_clocks(struct seq_file *s) +static void dsi_dump_dsidev_clocks(struct platform_device *dsidev, + struct seq_file *s) { - int clksel; - struct dsi_clock_info *cinfo = &dsi.current_cinfo; - enum dss_clk_source dispc_clk_src, dsi_clk_src; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dsi_clock_info *cinfo = &dsi->current_cinfo; + enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; + int dsi_module = dsi_get_dsidev_id(dsidev); dispc_clk_src = dss_get_dispc_clk_source(); - dsi_clk_src = dss_get_dsi_clk_source(); + dsi_clk_src = dss_get_dsi_clk_source(dsi_module); enable_clocks(1); - clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11); - - seq_printf(s, "- DSI PLL -\n"); + seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); seq_printf(s, "dsi pll source = %s\n", - clksel == 0 ? - "dss_sys_clk" : "pclkfree"); + cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree"); seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn); @@ -1515,7 +1705,7 @@ void dsi_dump_clocks(struct seq_file *s) dss_feat_get_clk_source_name(dispc_clk_src), cinfo->dsi_pll_hsdiv_dispc_clk, cinfo->regm_dispc, - dispc_clk_src == DSS_CLK_SRC_FCK ? + dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? "off" : "on"); seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n", @@ -1523,45 +1713,55 @@ void dsi_dump_clocks(struct seq_file *s) dss_feat_get_clk_source_name(dsi_clk_src), cinfo->dsi_pll_hsdiv_dsi_clk, cinfo->regm_dsi, - dsi_clk_src == DSS_CLK_SRC_FCK ? + dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? "off" : "on"); - seq_printf(s, "- DSI -\n"); + seq_printf(s, "- DSI%d -\n", dsi_module + 1); seq_printf(s, "dsi fclk source = %s (%s)\n", dss_get_generic_clk_source_name(dsi_clk_src), dss_feat_get_clk_source_name(dsi_clk_src)); - seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate()); + seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); seq_printf(s, "DDR_CLK\t\t%lu\n", cinfo->clkin4ddr / 4); - seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs()); + seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev)); seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk); - seq_printf(s, "VP_CLK\t\t%lu\n" - "VP_PCLK\t\t%lu\n", - dispc_lclk_rate(OMAP_DSS_CHANNEL_LCD), - dispc_pclk_rate(OMAP_DSS_CHANNEL_LCD)); - enable_clocks(0); } +void dsi_dump_clocks(struct seq_file *s) +{ + struct platform_device *dsidev; + int i; + + for (i = 0; i < MAX_NUM_DSI; i++) { + dsidev = dsi_get_dsidev_from_id(i); + if (dsidev) + dsi_dump_dsidev_clocks(dsidev, s); + } +} + #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS -void dsi_dump_irqs(struct seq_file *s) +static void dsi_dump_dsidev_irqs(struct platform_device *dsidev, + struct seq_file *s) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned long flags; struct dsi_irq_stats stats; + int dsi_module = dsi_get_dsidev_id(dsidev); - spin_lock_irqsave(&dsi.irq_stats_lock, flags); + spin_lock_irqsave(&dsi->irq_stats_lock, flags); - stats = dsi.irq_stats; - memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats)); - dsi.irq_stats.last_reset = jiffies; + stats = dsi->irq_stats; + memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats)); + dsi->irq_stats.last_reset = jiffies; - spin_unlock_irqrestore(&dsi.irq_stats_lock, flags); + spin_unlock_irqrestore(&dsi->irq_stats_lock, flags); seq_printf(s, "period %u ms\n", jiffies_to_msecs(jiffies - stats.last_reset)); @@ -1570,7 +1770,7 @@ void dsi_dump_irqs(struct seq_file *s) #define PIS(x) \ seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); - seq_printf(s, "-- DSI interrupts --\n"); + seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1); PIS(VC0); PIS(VC1); PIS(VC2); @@ -1636,13 +1836,45 @@ void dsi_dump_irqs(struct seq_file *s) PIS(ULPSACTIVENOT_ALL1); #undef PIS } + +static void dsi1_dump_irqs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(0); + + dsi_dump_dsidev_irqs(dsidev, s); +} + +static void dsi2_dump_irqs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(1); + + dsi_dump_dsidev_irqs(dsidev, s); +} + +void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, + const struct file_operations *debug_fops) +{ + struct platform_device *dsidev; + + dsidev = dsi_get_dsidev_from_id(0); + if (dsidev) + debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir, + &dsi1_dump_irqs, debug_fops); + + dsidev = dsi_get_dsidev_from_id(1); + if (dsidev) + debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir, + &dsi2_dump_irqs, debug_fops); +} #endif -void dsi_dump_regs(struct seq_file *s) +static void dsi_dump_dsidev_regs(struct platform_device *dsidev, + struct seq_file *s) { -#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r)) +#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r)) dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); + dsi_enable_scp_clk(dsidev); DUMPREG(DSI_REVISION); DUMPREG(DSI_SYSCONFIG); @@ -1714,25 +1946,57 @@ void dsi_dump_regs(struct seq_file *s) DUMPREG(DSI_PLL_CONFIGURATION1); DUMPREG(DSI_PLL_CONFIGURATION2); + dsi_disable_scp_clk(dsidev); dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); #undef DUMPREG } -enum dsi_complexio_power_state { +static void dsi1_dump_regs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(0); + + dsi_dump_dsidev_regs(dsidev, s); +} + +static void dsi2_dump_regs(struct seq_file *s) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_id(1); + + dsi_dump_dsidev_regs(dsidev, s); +} + +void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, + const struct file_operations *debug_fops) +{ + struct platform_device *dsidev; + + dsidev = dsi_get_dsidev_from_id(0); + if (dsidev) + debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir, + &dsi1_dump_regs, debug_fops); + + dsidev = dsi_get_dsidev_from_id(1); + if (dsidev) + debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir, + &dsi2_dump_regs, debug_fops); +} +enum dsi_cio_power_state { DSI_COMPLEXIO_POWER_OFF = 0x0, DSI_COMPLEXIO_POWER_ON = 0x1, DSI_COMPLEXIO_POWER_ULPS = 0x2, }; -static int dsi_complexio_power(enum dsi_complexio_power_state state) +static int dsi_cio_power(struct platform_device *dsidev, + enum dsi_cio_power_state state) { int t = 0; /* PWR_CMD */ - REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27); + REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27); /* PWR_STATUS */ - while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) { + while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1), + 26, 25) != state) { if (++t > 1000) { DSSERR("failed to set complexio power state to " "%d\n", state); @@ -1744,9 +2008,70 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state) return 0; } -static void dsi_complexio_config(struct omap_dss_device *dssdev) +/* Number of data lanes present on DSI interface */ +static inline int dsi_get_num_data_lanes(struct platform_device *dsidev) { + /* DSI on OMAP3 doesn't have register DSI_GNQ, set number + * of data lanes as 2 by default */ + if (dss_has_feature(FEAT_DSI_GNQ)) + return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */ + else + return 2; +} + +/* Number of data lanes used by the dss device */ +static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev) +{ + int num_data_lanes = 0; + + if (dssdev->phy.dsi.data1_lane != 0) + num_data_lanes++; + if (dssdev->phy.dsi.data2_lane != 0) + num_data_lanes++; + if (dssdev->phy.dsi.data3_lane != 0) + num_data_lanes++; + if (dssdev->phy.dsi.data4_lane != 0) + num_data_lanes++; + + return num_data_lanes; +} + +static unsigned dsi_get_line_buf_size(struct platform_device *dsidev) +{ + int val; + + /* line buffer on OMAP3 is 1024 x 24bits */ + /* XXX: for some reason using full buffer size causes + * considerable TX slowdown with update sizes that fill the + * whole buffer */ + if (!dss_has_feature(FEAT_DSI_GNQ)) + return 1023 * 3; + + val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */ + + switch (val) { + case 1: + return 512 * 3; /* 512x24 bits */ + case 2: + return 682 * 3; /* 682x24 bits */ + case 3: + return 853 * 3; /* 853x24 bits */ + case 4: + return 1024 * 3; /* 1024x24 bits */ + case 5: + return 1194 * 3; /* 1194x24 bits */ + case 6: + return 1365 * 3; /* 1365x24 bits */ + default: + BUG(); + } +} + +static void dsi_set_lane_config(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); u32 r; + int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev); int clk_lane = dssdev->phy.dsi.clk_lane; int data1_lane = dssdev->phy.dsi.data1_lane; @@ -1755,14 +2080,28 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev) int data1_pol = dssdev->phy.dsi.data1_pol; int data2_pol = dssdev->phy.dsi.data2_pol; - r = dsi_read_reg(DSI_COMPLEXIO_CFG1); + r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1); r = FLD_MOD(r, clk_lane, 2, 0); r = FLD_MOD(r, clk_pol, 3, 3); r = FLD_MOD(r, data1_lane, 6, 4); r = FLD_MOD(r, data1_pol, 7, 7); r = FLD_MOD(r, data2_lane, 10, 8); r = FLD_MOD(r, data2_pol, 11, 11); - dsi_write_reg(DSI_COMPLEXIO_CFG1, r); + if (num_data_lanes_dssdev > 2) { + int data3_lane = dssdev->phy.dsi.data3_lane; + int data3_pol = dssdev->phy.dsi.data3_pol; + + r = FLD_MOD(r, data3_lane, 14, 12); + r = FLD_MOD(r, data3_pol, 15, 15); + } + if (num_data_lanes_dssdev > 3) { + int data4_lane = dssdev->phy.dsi.data4_lane; + int data4_pol = dssdev->phy.dsi.data4_pol; + + r = FLD_MOD(r, data4_lane, 18, 16); + r = FLD_MOD(r, data4_pol, 19, 19); + } + dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r); /* The configuration of the DSI complex I/O (number of data lanes, position, differential order) should not be changed while @@ -1776,27 +2115,31 @@ static void dsi_complexio_config(struct omap_dss_device *dssdev) DSI complex I/O configuration is unknown. */ /* - REG_FLD_MOD(DSI_CTRL, 1, 0, 0); - REG_FLD_MOD(DSI_CTRL, 0, 0, 0); - REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); - REG_FLD_MOD(DSI_CTRL, 1, 0, 0); + REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0); + REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0); + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); + REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0); */ } -static inline unsigned ns2ddr(unsigned ns) +static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + /* convert time in ns to ddr ticks, rounding up */ - unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; + unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4; return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000; } -static inline unsigned ddr2ns(unsigned ddr) +static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr) { - unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + unsigned long ddr_clk = dsi->current_cinfo.clkin4ddr / 4; return ddr * 1000 * 1000 / (ddr_clk / 1000); } -static void dsi_complexio_timings(void) +static void dsi_cio_timings(struct platform_device *dsidev) { u32 r; u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit; @@ -1808,139 +2151,323 @@ static void dsi_complexio_timings(void) /* 1 * DDR_CLK = 2 * UI */ /* min 40ns + 4*UI max 85ns + 6*UI */ - ths_prepare = ns2ddr(70) + 2; + ths_prepare = ns2ddr(dsidev, 70) + 2; /* min 145ns + 10*UI */ - ths_prepare_ths_zero = ns2ddr(175) + 2; + ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2; /* min max(8*UI, 60ns+4*UI) */ - ths_trail = ns2ddr(60) + 5; + ths_trail = ns2ddr(dsidev, 60) + 5; /* min 100ns */ - ths_exit = ns2ddr(145); + ths_exit = ns2ddr(dsidev, 145); /* tlpx min 50n */ - tlpx_half = ns2ddr(25); + tlpx_half = ns2ddr(dsidev, 25); /* min 60ns */ - tclk_trail = ns2ddr(60) + 2; + tclk_trail = ns2ddr(dsidev, 60) + 2; /* min 38ns, max 95ns */ - tclk_prepare = ns2ddr(65); + tclk_prepare = ns2ddr(dsidev, 65); /* min tclk-prepare + tclk-zero = 300ns */ - tclk_zero = ns2ddr(260); + tclk_zero = ns2ddr(dsidev, 260); DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n", - ths_prepare, ddr2ns(ths_prepare), - ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero)); + ths_prepare, ddr2ns(dsidev, ths_prepare), + ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero)); DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n", - ths_trail, ddr2ns(ths_trail), - ths_exit, ddr2ns(ths_exit)); + ths_trail, ddr2ns(dsidev, ths_trail), + ths_exit, ddr2ns(dsidev, ths_exit)); DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), " "tclk_zero %u (%uns)\n", - tlpx_half, ddr2ns(tlpx_half), - tclk_trail, ddr2ns(tclk_trail), - tclk_zero, ddr2ns(tclk_zero)); + tlpx_half, ddr2ns(dsidev, tlpx_half), + tclk_trail, ddr2ns(dsidev, tclk_trail), + tclk_zero, ddr2ns(dsidev, tclk_zero)); DSSDBG("tclk_prepare %u (%uns)\n", - tclk_prepare, ddr2ns(tclk_prepare)); + tclk_prepare, ddr2ns(dsidev, tclk_prepare)); /* program timings */ - r = dsi_read_reg(DSI_DSIPHY_CFG0); + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); r = FLD_MOD(r, ths_prepare, 31, 24); r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16); r = FLD_MOD(r, ths_trail, 15, 8); r = FLD_MOD(r, ths_exit, 7, 0); - dsi_write_reg(DSI_DSIPHY_CFG0, r); + dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r); - r = dsi_read_reg(DSI_DSIPHY_CFG1); + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); r = FLD_MOD(r, tlpx_half, 22, 16); r = FLD_MOD(r, tclk_trail, 15, 8); r = FLD_MOD(r, tclk_zero, 7, 0); - dsi_write_reg(DSI_DSIPHY_CFG1, r); + dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r); - r = dsi_read_reg(DSI_DSIPHY_CFG2); + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); r = FLD_MOD(r, tclk_prepare, 7, 0); - dsi_write_reg(DSI_DSIPHY_CFG2, r); + dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r); } +static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev, + enum dsi_lane lanes) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int clk_lane = dssdev->phy.dsi.clk_lane; + int data1_lane = dssdev->phy.dsi.data1_lane; + int data2_lane = dssdev->phy.dsi.data2_lane; + int data3_lane = dssdev->phy.dsi.data3_lane; + int data4_lane = dssdev->phy.dsi.data4_lane; + int clk_pol = dssdev->phy.dsi.clk_pol; + int data1_pol = dssdev->phy.dsi.data1_pol; + int data2_pol = dssdev->phy.dsi.data2_pol; + int data3_pol = dssdev->phy.dsi.data3_pol; + int data4_pol = dssdev->phy.dsi.data4_pol; + + u32 l = 0; + u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26; + + if (lanes & DSI_CLK_P) + l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1)); + if (lanes & DSI_CLK_N) + l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0)); + + if (lanes & DSI_DATA1_P) + l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1)); + if (lanes & DSI_DATA1_N) + l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0)); + + if (lanes & DSI_DATA2_P) + l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1)); + if (lanes & DSI_DATA2_N) + l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0)); + + if (lanes & DSI_DATA3_P) + l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1)); + if (lanes & DSI_DATA3_N) + l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0)); + + if (lanes & DSI_DATA4_P) + l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1)); + if (lanes & DSI_DATA4_N) + l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0)); + /* + * Bits in REGLPTXSCPDAT4TO0DXDY: + * 17: DY0 18: DX0 + * 19: DY1 20: DX1 + * 21: DY2 22: DX2 + * 23: DY3 24: DX3 + * 25: DY4 26: DX4 + */ + + /* Set the lane override configuration */ + + /* REGLPTXSCPDAT4TO0DXDY */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17); -static int dsi_complexio_init(struct omap_dss_device *dssdev) + /* Enable lane override */ + + /* ENLPTXSCPDAT */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27); +} + +static void dsi_cio_disable_lane_override(struct platform_device *dsidev) { - int r = 0; + /* Disable lane override */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */ + /* Reset the lane override configuration */ + /* REGLPTXSCPDAT4TO0DXDY */ + REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17); +} + +static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + int t; + int bits[3]; + bool in_use[3]; + + if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) { + bits[0] = 28; + bits[1] = 27; + bits[2] = 26; + } else { + bits[0] = 24; + bits[1] = 25; + bits[2] = 26; + } + + in_use[0] = false; + in_use[1] = false; + in_use[2] = false; + + if (dssdev->phy.dsi.clk_lane != 0) + in_use[dssdev->phy.dsi.clk_lane - 1] = true; + if (dssdev->phy.dsi.data1_lane != 0) + in_use[dssdev->phy.dsi.data1_lane - 1] = true; + if (dssdev->phy.dsi.data2_lane != 0) + in_use[dssdev->phy.dsi.data2_lane - 1] = true; + + t = 100000; + while (true) { + u32 l; + int i; + int ok; + + l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); + + ok = 0; + for (i = 0; i < 3; ++i) { + if (!in_use[i] || (l & (1 << bits[i]))) + ok++; + } + + if (ok == 3) + break; + + if (--t == 0) { + for (i = 0; i < 3; ++i) { + if (!in_use[i] || (l & (1 << bits[i]))) + continue; + + DSSERR("CIO TXCLKESC%d domain not coming " \ + "out of reset\n", i); + } + return -EIO; + } + } + + return 0; +} + +static int dsi_cio_init(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int r; + int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev); + u32 l; - DSSDBG("dsi_complexio_init\n"); + DSSDBGF(); - /* CIO_CLK_ICG, enable L3 clk to CIO */ - REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14); + if (dsi->dsi_mux_pads) + dsi->dsi_mux_pads(true); + + dsi_enable_scp_clk(dsidev); /* A dummy read using the SCP interface to any DSIPHY register is * required after DSIPHY reset to complete the reset of the DSI complex * I/O. */ - dsi_read_reg(DSI_DSIPHY_CFG5); + dsi_read_reg(dsidev, DSI_DSIPHY_CFG5); - if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) { - DSSERR("ComplexIO PHY not coming out of reset.\n"); - r = -ENODEV; - goto err; + if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) { + DSSERR("CIO SCP Clock domain not coming out of reset.\n"); + r = -EIO; + goto err_scp_clk_dom; } - dsi_complexio_config(dssdev); + dsi_set_lane_config(dssdev); + + /* set TX STOP MODE timer to maximum for this operation */ + l = dsi_read_reg(dsidev, DSI_TIMING1); + l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ + l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */ + l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */ + l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ + dsi_write_reg(dsidev, DSI_TIMING1, l); - r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON); + if (dsi->ulps_enabled) { + u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P; + DSSDBG("manual ulps exit\n"); + + /* ULPS is exited by Mark-1 state for 1ms, followed by + * stop state. DSS HW cannot do this via the normal + * ULPS exit sequence, as after reset the DSS HW thinks + * that we are not in ULPS mode, and refuses to send the + * sequence. So we need to send the ULPS exit sequence + * manually. + */ + + if (num_data_lanes_dssdev > 2) + lane_mask |= DSI_DATA3_P; + + if (num_data_lanes_dssdev > 3) + lane_mask |= DSI_DATA4_P; + + dsi_cio_enable_lane_override(dssdev, lane_mask); + } + + r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON); if (r) - goto err; + goto err_cio_pwr; - if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) { - DSSERR("ComplexIO not coming out of reset.\n"); + if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) { + DSSERR("CIO PWR clock domain not coming out of reset.\n"); r = -ENODEV; - goto err; + goto err_cio_pwr_dom; } - if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) { - DSSERR("ComplexIO LDO power down.\n"); - r = -ENODEV; - goto err; + dsi_if_enable(dsidev, true); + dsi_if_enable(dsidev, false); + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ + + r = dsi_cio_wait_tx_clk_esc_reset(dssdev); + if (r) + goto err_tx_clk_esc_rst; + + if (dsi->ulps_enabled) { + /* Keep Mark-1 state for 1ms (as per DSI spec) */ + ktime_t wait = ns_to_ktime(1000 * 1000); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&wait, HRTIMER_MODE_REL); + + /* Disable the override. The lanes should be set to Mark-11 + * state by the HW */ + dsi_cio_disable_lane_override(dsidev); } - dsi_complexio_timings(); + /* FORCE_TX_STOP_MODE_IO */ + REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15); - /* - The configuration of the DSI complex I/O (number of data lanes, - position, differential order) should not be changed while - DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the - hardware to recognize a new configuration of the complex I/O (done - in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow - this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next - reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20] - LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN - bit to 1. If the sequence is not followed, the DSi complex I/O - configuration is undetermined. - */ - dsi_if_enable(1); - dsi_if_enable(0); - REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */ - dsi_if_enable(1); - dsi_if_enable(0); + dsi_cio_timings(dsidev); + + dsi->ulps_enabled = false; DSSDBG("CIO init done\n"); -err: + + return 0; + +err_tx_clk_esc_rst: + REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */ +err_cio_pwr_dom: + dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); +err_cio_pwr: + if (dsi->ulps_enabled) + dsi_cio_disable_lane_override(dsidev); +err_scp_clk_dom: + dsi_disable_scp_clk(dsidev); + if (dsi->dsi_mux_pads) + dsi->dsi_mux_pads(false); return r; } -static void dsi_complexio_uninit(void) +static void dsi_cio_uninit(struct platform_device *dsidev) { - dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); + dsi_disable_scp_clk(dsidev); + if (dsi->dsi_mux_pads) + dsi->dsi_mux_pads(false); } -static int _dsi_wait_reset(void) +static int _dsi_wait_reset(struct platform_device *dsidev) { int t = 0; - while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) { + while (REG_GET(dsidev, DSI_SYSSTATUS, 0, 0) == 0) { if (++t > 5) { DSSERR("soft reset failed\n"); return -ENODEV; @@ -1951,28 +2478,30 @@ static int _dsi_wait_reset(void) return 0; } -static int _dsi_reset(void) +static int _dsi_reset(struct platform_device *dsidev) { /* Soft reset */ - REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1); - return _dsi_wait_reset(); + REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 1, 1); + return _dsi_wait_reset(dsidev); } -static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2, +static void dsi_config_tx_fifo(struct platform_device *dsidev, + enum fifo_size size1, enum fifo_size size2, enum fifo_size size3, enum fifo_size size4) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); u32 r = 0; int add = 0; int i; - dsi.vc[0].fifo_size = size1; - dsi.vc[1].fifo_size = size2; - dsi.vc[2].fifo_size = size3; - dsi.vc[3].fifo_size = size4; + dsi->vc[0].fifo_size = size1; + dsi->vc[1].fifo_size = size2; + dsi->vc[2].fifo_size = size3; + dsi->vc[3].fifo_size = size4; for (i = 0; i < 4; i++) { u8 v; - int size = dsi.vc[i].fifo_size; + int size = dsi->vc[i].fifo_size; if (add + size > 4) { DSSERR("Illegal FIFO configuration\n"); @@ -1985,24 +2514,26 @@ static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2, add += size; } - dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r); + dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r); } -static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2, +static void dsi_config_rx_fifo(struct platform_device *dsidev, + enum fifo_size size1, enum fifo_size size2, enum fifo_size size3, enum fifo_size size4) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); u32 r = 0; int add = 0; int i; - dsi.vc[0].fifo_size = size1; - dsi.vc[1].fifo_size = size2; - dsi.vc[2].fifo_size = size3; - dsi.vc[3].fifo_size = size4; + dsi->vc[0].fifo_size = size1; + dsi->vc[1].fifo_size = size2; + dsi->vc[2].fifo_size = size3; + dsi->vc[3].fifo_size = size4; for (i = 0; i < 4; i++) { u8 v; - int size = dsi.vc[i].fifo_size; + int size = dsi->vc[i].fifo_size; if (add + size > 4) { DSSERR("Illegal FIFO configuration\n"); @@ -2015,18 +2546,18 @@ static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2, add += size; } - dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r); + dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r); } -static int dsi_force_tx_stop_mode_io(void) +static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev) { u32 r; - r = dsi_read_reg(DSI_TIMING1); + r = dsi_read_reg(dsidev, DSI_TIMING1); r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ - dsi_write_reg(DSI_TIMING1, r); + dsi_write_reg(dsidev, DSI_TIMING1, r); - if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) { + if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) { DSSERR("TX_STOP bit not going down\n"); return -EIO; } @@ -2034,16 +2565,135 @@ static int dsi_force_tx_stop_mode_io(void) return 0; } -static int dsi_vc_enable(int channel, bool enable) +static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel) +{ + return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0); +} + +static void dsi_packet_sent_handler_vp(void *data, u32 mask) +{ + struct dsi_packet_sent_handler_data *vp_data = + (struct dsi_packet_sent_handler_data *) data; + struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev); + const int channel = dsi->update_channel; + u8 bit = dsi->te_enabled ? 30 : 31; + + if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0) + complete(vp_data->completion); +} + +static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + DECLARE_COMPLETION_ONSTACK(completion); + struct dsi_packet_sent_handler_data vp_data = { dsidev, &completion }; + int r = 0; + u8 bit; + + bit = dsi->te_enabled ? 30 : 31; + + r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, + &vp_data, DSI_VC_IRQ_PACKET_SENT); + if (r) + goto err0; + + /* Wait for completion only if TE_EN/TE_START is still set */ + if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) { + if (wait_for_completion_timeout(&completion, + msecs_to_jiffies(10)) == 0) { + DSSERR("Failed to complete previous frame transfer\n"); + r = -EIO; + goto err1; + } + } + + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, + &vp_data, DSI_VC_IRQ_PACKET_SENT); + + return 0; +err1: + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp, + &vp_data, DSI_VC_IRQ_PACKET_SENT); +err0: + return r; +} + +static void dsi_packet_sent_handler_l4(void *data, u32 mask) +{ + struct dsi_packet_sent_handler_data *l4_data = + (struct dsi_packet_sent_handler_data *) data; + struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev); + const int channel = dsi->update_channel; + + if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0) + complete(l4_data->completion); +} + +static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel) +{ + DECLARE_COMPLETION_ONSTACK(completion); + struct dsi_packet_sent_handler_data l4_data = { dsidev, &completion }; + int r = 0; + + r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, + &l4_data, DSI_VC_IRQ_PACKET_SENT); + if (r) + goto err0; + + /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */ + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) { + if (wait_for_completion_timeout(&completion, + msecs_to_jiffies(10)) == 0) { + DSSERR("Failed to complete previous l4 transfer\n"); + r = -EIO; + goto err1; + } + } + + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, + &l4_data, DSI_VC_IRQ_PACKET_SENT); + + return 0; +err1: + dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4, + &l4_data, DSI_VC_IRQ_PACKET_SENT); +err0: + return r; +} + +static int dsi_sync_vc(struct platform_device *dsidev, int channel) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + WARN_ON(in_interrupt()); + + if (!dsi_vc_is_enabled(dsidev, channel)) + return 0; + + switch (dsi->vc[channel].mode) { + case DSI_VC_MODE_VP: + return dsi_sync_vc_vp(dsidev, channel); + case DSI_VC_MODE_L4: + return dsi_sync_vc_l4(dsidev, channel); + default: + BUG(); + } +} + +static int dsi_vc_enable(struct platform_device *dsidev, int channel, + bool enable) { DSSDBG("dsi_vc_enable channel %d, enable %d\n", channel, enable); enable = enable ? 1 : 0; - REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0); + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0); - if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) { + if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), + 0, enable) != enable) { DSSERR("Failed to set dsi_vc_enable to %d\n", enable); return -EIO; } @@ -2051,13 +2701,13 @@ static int dsi_vc_enable(int channel, bool enable) return 0; } -static void dsi_vc_initial_config(int channel) +static void dsi_vc_initial_config(struct platform_device *dsidev, int channel) { u32 r; DSSDBGF("%d", channel); - r = dsi_read_reg(DSI_VC_CTRL(channel)); + r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel)); if (FLD_GET(r, 15, 15)) /* VC_BUSY */ DSSERR("VC(%d) busy when trying to configure it!\n", @@ -2070,85 +2720,107 @@ static void dsi_vc_initial_config(int channel) r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */ r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */ r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */ + if (dss_has_feature(FEAT_DSI_VC_OCP_WIDTH)) + r = FLD_MOD(r, 3, 11, 10); /* OCP_WIDTH = 32 bit */ r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ - dsi_write_reg(DSI_VC_CTRL(channel), r); + dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r); } -static int dsi_vc_config_l4(int channel) +static int dsi_vc_config_l4(struct platform_device *dsidev, int channel) { - if (dsi.vc[channel].mode == DSI_VC_MODE_L4) + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->vc[channel].mode == DSI_VC_MODE_L4) return 0; DSSDBGF("%d", channel); - dsi_vc_enable(channel, 0); + dsi_sync_vc(dsidev, channel); + + dsi_vc_enable(dsidev, channel, 0); /* VC_BUSY */ - if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { + if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) { DSSERR("vc(%d) busy when trying to config for L4\n", channel); return -EIO; } - REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */ + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */ + + /* DCS_CMD_ENABLE */ + if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 30, 30); - dsi_vc_enable(channel, 1); + dsi_vc_enable(dsidev, channel, 1); - dsi.vc[channel].mode = DSI_VC_MODE_L4; + dsi->vc[channel].mode = DSI_VC_MODE_L4; return 0; } -static int dsi_vc_config_vp(int channel) +static int dsi_vc_config_vp(struct platform_device *dsidev, int channel) { - if (dsi.vc[channel].mode == DSI_VC_MODE_VP) + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->vc[channel].mode == DSI_VC_MODE_VP) return 0; DSSDBGF("%d", channel); - dsi_vc_enable(channel, 0); + dsi_sync_vc(dsidev, channel); + + dsi_vc_enable(dsidev, channel, 0); /* VC_BUSY */ - if (wait_for_bit_change(DSI_VC_CTRL(channel), 15, 0) != 0) { + if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) { DSSERR("vc(%d) busy when trying to config for VP\n", channel); return -EIO; } - REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */ + /* SOURCE, 1 = video port */ + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 1, 1); + + /* DCS_CMD_ENABLE */ + if (dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 30, 30); - dsi_vc_enable(channel, 1); + dsi_vc_enable(dsidev, channel, 1); - dsi.vc[channel].mode = DSI_VC_MODE_VP; + dsi->vc[channel].mode = DSI_VC_MODE_VP; return 0; } -void omapdss_dsi_vc_enable_hs(int channel, bool enable) +void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel, + bool enable) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); - WARN_ON(!dsi_bus_is_locked()); + WARN_ON(!dsi_bus_is_locked(dsidev)); - dsi_vc_enable(channel, 0); - dsi_if_enable(0); + dsi_vc_enable(dsidev, channel, 0); + dsi_if_enable(dsidev, 0); - REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9); + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9); - dsi_vc_enable(channel, 1); - dsi_if_enable(1); + dsi_vc_enable(dsidev, channel, 1); + dsi_if_enable(dsidev, 1); - dsi_force_tx_stop_mode_io(); + dsi_force_tx_stop_mode_io(dsidev); } EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs); -static void dsi_vc_flush_long_data(int channel) +static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel) { - while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { + while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { u32 val; - val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); + val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", (val >> 0) & 0xff, (val >> 8) & 0xff, @@ -2194,13 +2866,14 @@ static void dsi_show_rx_ack_with_err(u16 err) DSSERR("\t\tDSI Protocol Violation\n"); } -static u16 dsi_vc_flush_receive_data(int channel) +static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev, + int channel) { /* RX_FIFO_NOT_EMPTY */ - while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { + while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { u32 val; u8 dt; - val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); + val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); DSSERR("\trawval %#08x\n", val); dt = FLD_GET(val, 5, 0); if (dt == DSI_DT_RX_ACK_WITH_ERR) { @@ -2215,7 +2888,7 @@ static u16 dsi_vc_flush_receive_data(int channel) } else if (dt == DSI_DT_RX_DCS_LONG_READ) { DSSERR("\tDCS long response, len %d\n", FLD_GET(val, 23, 8)); - dsi_vc_flush_long_data(channel); + dsi_vc_flush_long_data(dsidev, channel); } else { DSSERR("\tunknown datatype 0x%02x\n", dt); } @@ -2223,40 +2896,44 @@ static u16 dsi_vc_flush_receive_data(int channel) return 0; } -static int dsi_vc_send_bta(int channel) +static int dsi_vc_send_bta(struct platform_device *dsidev, int channel) { - if (dsi.debug_write || dsi.debug_read) + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->debug_write || dsi->debug_read) DSSDBG("dsi_vc_send_bta %d\n", channel); - WARN_ON(!dsi_bus_is_locked()); + WARN_ON(!dsi_bus_is_locked(dsidev)); - if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ + /* RX_FIFO_NOT_EMPTY */ + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); - dsi_vc_flush_receive_data(channel); + dsi_vc_flush_receive_data(dsidev, channel); } - REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ + REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ return 0; } -int dsi_vc_send_bta_sync(int channel) +int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); DECLARE_COMPLETION_ONSTACK(completion); int r = 0; u32 err; - r = dsi_register_isr_vc(channel, dsi_completion_handler, + r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler, &completion, DSI_VC_IRQ_BTA); if (r) goto err0; - r = dsi_register_isr(dsi_completion_handler, &completion, + r = dsi_register_isr(dsidev, dsi_completion_handler, &completion, DSI_IRQ_ERROR_MASK); if (r) goto err1; - r = dsi_vc_send_bta(channel); + r = dsi_vc_send_bta(dsidev, channel); if (r) goto err2; @@ -2267,41 +2944,42 @@ int dsi_vc_send_bta_sync(int channel) goto err2; } - err = dsi_get_errors(); + err = dsi_get_errors(dsidev); if (err) { DSSERR("Error while sending BTA: %x\n", err); r = -EIO; goto err2; } err2: - dsi_unregister_isr(dsi_completion_handler, &completion, + dsi_unregister_isr(dsidev, dsi_completion_handler, &completion, DSI_IRQ_ERROR_MASK); err1: - dsi_unregister_isr_vc(channel, dsi_completion_handler, + dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler, &completion, DSI_VC_IRQ_BTA); err0: return r; } EXPORT_SYMBOL(dsi_vc_send_bta_sync); -static inline void dsi_vc_write_long_header(int channel, u8 data_type, - u16 len, u8 ecc) +static inline void dsi_vc_write_long_header(struct platform_device *dsidev, + int channel, u8 data_type, u16 len, u8 ecc) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); u32 val; u8 data_id; - WARN_ON(!dsi_bus_is_locked()); + WARN_ON(!dsi_bus_is_locked(dsidev)); - data_id = data_type | dsi.vc[channel].vc_id << 6; + data_id = data_type | dsi->vc[channel].vc_id << 6; val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | FLD_VAL(ecc, 31, 24); - dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val); + dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val); } -static inline void dsi_vc_write_long_payload(int channel, - u8 b1, u8 b2, u8 b3, u8 b4) +static inline void dsi_vc_write_long_payload(struct platform_device *dsidev, + int channel, u8 b1, u8 b2, u8 b3, u8 b4) { u32 val; @@ -2310,34 +2988,35 @@ static inline void dsi_vc_write_long_payload(int channel, /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", b1, b2, b3, b4, val); */ - dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val); + dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val); } -static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, - u8 ecc) +static int dsi_vc_send_long(struct platform_device *dsidev, int channel, + u8 data_type, u8 *data, u16 len, u8 ecc) { /*u32 val; */ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int i; u8 *p; int r = 0; u8 b1, b2, b3, b4; - if (dsi.debug_write) + if (dsi->debug_write) DSSDBG("dsi_vc_send_long, %d bytes\n", len); /* len + header */ - if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) { + if (dsi->vc[channel].fifo_size * 32 * 4 < len + 4) { DSSERR("unable to send long packet: packet too long.\n"); return -EINVAL; } - dsi_vc_config_l4(channel); + dsi_vc_config_l4(dsidev, channel); - dsi_vc_write_long_header(channel, data_type, len, ecc); + dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc); p = data; for (i = 0; i < len >> 2; i++) { - if (dsi.debug_write) + if (dsi->debug_write) DSSDBG("\tsending full packet %d\n", i); b1 = *p++; @@ -2345,14 +3024,14 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, b3 = *p++; b4 = *p++; - dsi_vc_write_long_payload(channel, b1, b2, b3, b4); + dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4); } i = len % 4; if (i) { b1 = 0; b2 = 0; b3 = 0; - if (dsi.debug_write) + if (dsi->debug_write) DSSDBG("\tsending remainder bytes %d\n", i); switch (i) { @@ -2370,62 +3049,69 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len, break; } - dsi_vc_write_long_payload(channel, b1, b2, b3, 0); + dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0); } return r; } -static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc) +static int dsi_vc_send_short(struct platform_device *dsidev, int channel, + u8 data_type, u16 data, u8 ecc) { + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); u32 r; u8 data_id; - WARN_ON(!dsi_bus_is_locked()); + WARN_ON(!dsi_bus_is_locked(dsidev)); - if (dsi.debug_write) + if (dsi->debug_write) DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", channel, data_type, data & 0xff, (data >> 8) & 0xff); - dsi_vc_config_l4(channel); + dsi_vc_config_l4(dsidev, channel); - if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) { + if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) { DSSERR("ERROR FIFO FULL, aborting transfer\n"); return -EINVAL; } - data_id = data_type | dsi.vc[channel].vc_id << 6; + data_id = data_type | dsi->vc[channel].vc_id << 6; r = (data_id << 0) | (data << 8) | (ecc << 24); - dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r); + dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r); return 0; } -int dsi_vc_send_null(int channel) +int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); u8 nullpkg[] = {0, 0, 0, 0}; - return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0); + + return dsi_vc_send_long(dsidev, channel, DSI_DT_NULL_PACKET, nullpkg, + 4, 0); } EXPORT_SYMBOL(dsi_vc_send_null); -int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len) +int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel, + u8 *data, int len) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); int r; BUG_ON(len == 0); if (len == 1) { - r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0, + r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_0, data[0], 0); } else if (len == 2) { - r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1, + r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_SHORT_WRITE_1, data[0] | (data[1] << 8), 0); } else { /* 0x39 = DCS Long Write */ - r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE, + r = dsi_vc_send_long(dsidev, channel, DSI_DT_DCS_LONG_WRITE, data, len, 0); } @@ -2433,21 +3119,24 @@ int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len) } EXPORT_SYMBOL(dsi_vc_dcs_write_nosync); -int dsi_vc_dcs_write(int channel, u8 *data, int len) +int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data, + int len) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); int r; - r = dsi_vc_dcs_write_nosync(channel, data, len); + r = dsi_vc_dcs_write_nosync(dssdev, channel, data, len); if (r) goto err; - r = dsi_vc_send_bta_sync(channel); + r = dsi_vc_send_bta_sync(dssdev, channel); if (r) goto err; - if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */ + /* RX_FIFO_NOT_EMPTY */ + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) { DSSERR("rx fifo not empty after write, dumping data:\n"); - dsi_vc_flush_receive_data(channel); + dsi_vc_flush_receive_data(dsidev, channel); r = -EIO; goto err; } @@ -2460,47 +3149,51 @@ err: } EXPORT_SYMBOL(dsi_vc_dcs_write); -int dsi_vc_dcs_write_0(int channel, u8 dcs_cmd) +int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd) { - return dsi_vc_dcs_write(channel, &dcs_cmd, 1); + return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1); } EXPORT_SYMBOL(dsi_vc_dcs_write_0); -int dsi_vc_dcs_write_1(int channel, u8 dcs_cmd, u8 param) +int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 param) { u8 buf[2]; buf[0] = dcs_cmd; buf[1] = param; - return dsi_vc_dcs_write(channel, buf, 2); + return dsi_vc_dcs_write(dssdev, channel, buf, 2); } EXPORT_SYMBOL(dsi_vc_dcs_write_1); -int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) +int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 *buf, int buflen) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); u32 val; u8 dt; int r; - if (dsi.debug_read) + if (dsi->debug_read) DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd); - r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0); + r = dsi_vc_send_short(dsidev, channel, DSI_DT_DCS_READ, dcs_cmd, 0); if (r) goto err; - r = dsi_vc_send_bta_sync(channel); + r = dsi_vc_send_bta_sync(dssdev, channel); if (r) goto err; /* RX_FIFO_NOT_EMPTY */ - if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) { + if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) { DSSERR("RX fifo empty when trying to read.\n"); r = -EIO; goto err; } - val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); - if (dsi.debug_read) + val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel)); + if (dsi->debug_read) DSSDBG("\theader: %08x\n", val); dt = FLD_GET(val, 5, 0); if (dt == DSI_DT_RX_ACK_WITH_ERR) { @@ -2511,7 +3204,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) } else if (dt == DSI_DT_RX_SHORT_READ_1) { u8 data = FLD_GET(val, 15, 8); - if (dsi.debug_read) + if (dsi->debug_read) DSSDBG("\tDCS short response, 1 byte: %02x\n", data); if (buflen < 1) { @@ -2524,7 +3217,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) return 1; } else if (dt == DSI_DT_RX_SHORT_READ_2) { u16 data = FLD_GET(val, 23, 8); - if (dsi.debug_read) + if (dsi->debug_read) DSSDBG("\tDCS short response, 2 byte: %04x\n", data); if (buflen < 2) { @@ -2539,7 +3232,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) } else if (dt == DSI_DT_RX_DCS_LONG_READ) { int w; int len = FLD_GET(val, 23, 8); - if (dsi.debug_read) + if (dsi->debug_read) DSSDBG("\tDCS long response, len %d\n", len); if (len > buflen) { @@ -2550,8 +3243,9 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen) /* two byte checksum ends the packet, not included in len */ for (w = 0; w < len + 2;) { int b; - val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel)); - if (dsi.debug_read) + val = dsi_read_reg(dsidev, + DSI_VC_SHORT_PACKET_HEADER(channel)); + if (dsi->debug_read) DSSDBG("\t\t%02x %02x %02x %02x\n", (val >> 0) & 0xff, (val >> 8) & 0xff, @@ -2582,11 +3276,12 @@ err: } EXPORT_SYMBOL(dsi_vc_dcs_read); -int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data) +int dsi_vc_dcs_read_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 *data) { int r; - r = dsi_vc_dcs_read(channel, dcs_cmd, data, 1); + r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, data, 1); if (r < 0) return r; @@ -2598,12 +3293,13 @@ int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data) } EXPORT_SYMBOL(dsi_vc_dcs_read_1); -int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2) +int dsi_vc_dcs_read_2(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, + u8 *data1, u8 *data2) { u8 buf[2]; int r; - r = dsi_vc_dcs_read(channel, dcs_cmd, buf, 2); + r = dsi_vc_dcs_read(dssdev, channel, dcs_cmd, buf, 2); if (r < 0) return r; @@ -2618,14 +3314,94 @@ int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u8 *data1, u8 *data2) } EXPORT_SYMBOL(dsi_vc_dcs_read_2); -int dsi_vc_set_max_rx_packet_size(int channel, u16 len) +int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel, + u16 len) { - return dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE, + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + + return dsi_vc_send_short(dsidev, channel, DSI_DT_SET_MAX_RET_PKG_SIZE, len, 0); } EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size); -static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) +static int dsi_enter_ulps(struct platform_device *dsidev) +{ + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + DECLARE_COMPLETION_ONSTACK(completion); + int r; + + DSSDBGF(); + + WARN_ON(!dsi_bus_is_locked(dsidev)); + + WARN_ON(dsi->ulps_enabled); + + if (dsi->ulps_enabled) + return 0; + + if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) { + DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n"); + return -EIO; + } + + dsi_sync_vc(dsidev, 0); + dsi_sync_vc(dsidev, 1); + dsi_sync_vc(dsidev, 2); + dsi_sync_vc(dsidev, 3); + + dsi_force_tx_stop_mode_io(dsidev); + + dsi_vc_enable(dsidev, 0, false); + dsi_vc_enable(dsidev, 1, false); + dsi_vc_enable(dsidev, 2, false); + dsi_vc_enable(dsidev, 3, false); + + if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */ + DSSERR("HS busy when enabling ULPS\n"); + return -EIO; + } + + if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */ + DSSERR("LP busy when enabling ULPS\n"); + return -EIO; + } + + r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion, + DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + if (r) + return r; + + /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */ + /* LANEx_ULPS_SIG2 */ + REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2), + 7, 5); + + if (wait_for_completion_timeout(&completion, + msecs_to_jiffies(1000)) == 0) { + DSSERR("ULPS enable timeout\n"); + r = -EIO; + goto err; + } + + dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, + DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + + dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS); + + dsi_if_enable(dsidev, false); + + dsi->ulps_enabled = true; + + return 0; + +err: + dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion, + DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + return r; +} + +static void dsi_set_lp_rx_timeout(struct platform_device *dsidev, + unsigned ticks, bool x4, bool x16) { unsigned long fck; unsigned long total_ticks; @@ -2634,14 +3410,14 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) BUG_ON(ticks > 0x1fff); /* ticks in DSI_FCK */ - fck = dsi_fclk_rate(); + fck = dsi_fclk_rate(dsidev); - r = dsi_read_reg(DSI_TIMING2); + r = dsi_read_reg(dsidev, DSI_TIMING2); r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */ r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */ r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */ r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */ - dsi_write_reg(DSI_TIMING2, r); + dsi_write_reg(dsidev, DSI_TIMING2, r); total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); @@ -2651,7 +3427,8 @@ static void dsi_set_lp_rx_timeout(unsigned ticks, bool x4, bool x16) (total_ticks * 1000) / (fck / 1000 / 1000)); } -static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) +static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks, + bool x8, bool x16) { unsigned long fck; unsigned long total_ticks; @@ -2660,14 +3437,14 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) BUG_ON(ticks > 0x1fff); /* ticks in DSI_FCK */ - fck = dsi_fclk_rate(); + fck = dsi_fclk_rate(dsidev); - r = dsi_read_reg(DSI_TIMING1); + r = dsi_read_reg(dsidev, DSI_TIMING1); r = FLD_MOD(r, 1, 31, 31); /* TA_TO */ r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */ r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */ r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */ - dsi_write_reg(DSI_TIMING1, r); + dsi_write_reg(dsidev, DSI_TIMING1, r); total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1); @@ -2677,7 +3454,8 @@ static void dsi_set_ta_timeout(unsigned ticks, bool x8, bool x16) (total_ticks * 1000) / (fck / 1000 / 1000)); } -static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) +static void dsi_set_stop_state_counter(struct platform_device *dsidev, + unsigned ticks, bool x4, bool x16) { unsigned long fck; unsigned long total_ticks; @@ -2686,14 +3464,14 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) BUG_ON(ticks > 0x1fff); /* ticks in DSI_FCK */ - fck = dsi_fclk_rate(); + fck = dsi_fclk_rate(dsidev); - r = dsi_read_reg(DSI_TIMING1); + r = dsi_read_reg(dsidev, DSI_TIMING1); r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */ r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */ r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */ r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */ - dsi_write_reg(DSI_TIMING1, r); + dsi_write_reg(dsidev, DSI_TIMING1, r); total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); @@ -2703,7 +3481,8 @@ static void dsi_set_stop_state_counter(unsigned ticks, bool x4, bool x16) (total_ticks * 1000) / (fck / 1000 / 1000)); } -static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) +static void dsi_set_hs_tx_timeout(struct platform_device *dsidev, + unsigned ticks, bool x4, bool x16) { unsigned long fck; unsigned long total_ticks; @@ -2712,14 +3491,14 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) BUG_ON(ticks > 0x1fff); /* ticks in TxByteClkHS */ - fck = dsi_get_txbyteclkhs(); + fck = dsi_get_txbyteclkhs(dsidev); - r = dsi_read_reg(DSI_TIMING2); + r = dsi_read_reg(dsidev, DSI_TIMING2); r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */ r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */ r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */ r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */ - dsi_write_reg(DSI_TIMING2, r); + dsi_write_reg(dsidev, DSI_TIMING2, r); total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1); @@ -2730,24 +3509,25 @@ static void dsi_set_hs_tx_timeout(unsigned ticks, bool x4, bool x16) } static int dsi_proto_config(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); u32 r; int buswidth = 0; - dsi_config_tx_fifo(DSI_FIFO_SIZE_32, + dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32, DSI_FIFO_SIZE_32, DSI_FIFO_SIZE_32, DSI_FIFO_SIZE_32); - dsi_config_rx_fifo(DSI_FIFO_SIZE_32, + dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32, DSI_FIFO_SIZE_32, DSI_FIFO_SIZE_32, DSI_FIFO_SIZE_32); /* XXX what values for the timeouts? */ - dsi_set_stop_state_counter(0x1000, false, false); - dsi_set_ta_timeout(0x1fff, true, true); - dsi_set_lp_rx_timeout(0x1fff, true, true); - dsi_set_hs_tx_timeout(0x1fff, true, true); + dsi_set_stop_state_counter(dsidev, 0x1000, false, false); + dsi_set_ta_timeout(dsidev, 0x1fff, true, true); + dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true); + dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true); switch (dssdev->ctrl.pixel_size) { case 16: @@ -2763,7 +3543,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev) BUG(); } - r = dsi_read_reg(DSI_CTRL); + r = dsi_read_reg(dsidev, DSI_CTRL); r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */ r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */ r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */ @@ -2773,21 +3553,25 @@ static int dsi_proto_config(struct omap_dss_device *dssdev) r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */ r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */ r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */ - r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ - r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */ + if (!dss_has_feature(FEAT_DSI_DCS_CMD_CONFIG_VC)) { + r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */ + /* DCS_CMD_CODE, 1=start, 0=continue */ + r = FLD_MOD(r, 0, 25, 25); + } - dsi_write_reg(DSI_CTRL, r); + dsi_write_reg(dsidev, DSI_CTRL, r); - dsi_vc_initial_config(0); - dsi_vc_initial_config(1); - dsi_vc_initial_config(2); - dsi_vc_initial_config(3); + dsi_vc_initial_config(dsidev, 0); + dsi_vc_initial_config(dsidev, 1); + dsi_vc_initial_config(dsidev, 2); + dsi_vc_initial_config(dsidev, 3); return 0; } static void dsi_proto_timings(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail; unsigned tclk_pre, tclk_post; unsigned ths_prepare, ths_prepare_ths_zero, ths_zero; @@ -2797,32 +3581,27 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) unsigned ths_eot; u32 r; - r = dsi_read_reg(DSI_DSIPHY_CFG0); + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0); ths_prepare = FLD_GET(r, 31, 24); ths_prepare_ths_zero = FLD_GET(r, 23, 16); ths_zero = ths_prepare_ths_zero - ths_prepare; ths_trail = FLD_GET(r, 15, 8); ths_exit = FLD_GET(r, 7, 0); - r = dsi_read_reg(DSI_DSIPHY_CFG1); + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1); tlpx = FLD_GET(r, 22, 16) * 2; tclk_trail = FLD_GET(r, 15, 8); tclk_zero = FLD_GET(r, 7, 0); - r = dsi_read_reg(DSI_DSIPHY_CFG2); + r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2); tclk_prepare = FLD_GET(r, 7, 0); /* min 8*UI */ tclk_pre = 20; /* min 60ns + 52*UI */ - tclk_post = ns2ddr(60) + 26; + tclk_post = ns2ddr(dsidev, 60) + 26; - /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */ - if (dssdev->phy.dsi.data1_lane != 0 && - dssdev->phy.dsi.data2_lane != 0) - ths_eot = 2; - else - ths_eot = 4; + ths_eot = DIV_ROUND_UP(4, dsi_get_num_data_lanes_dssdev(dssdev)); ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare, 4); @@ -2831,10 +3610,10 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255); BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255); - r = dsi_read_reg(DSI_CLK_TIMING); + r = dsi_read_reg(dsidev, DSI_CLK_TIMING); r = FLD_MOD(r, ddr_clk_pre, 15, 8); r = FLD_MOD(r, ddr_clk_post, 7, 0); - dsi_write_reg(DSI_CLK_TIMING, r); + dsi_write_reg(dsidev, DSI_CLK_TIMING, r); DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n", ddr_clk_pre, @@ -2848,7 +3627,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) r = FLD_VAL(enter_hs_mode_lat, 31, 16) | FLD_VAL(exit_hs_mode_lat, 15, 0); - dsi_write_reg(DSI_VM_TIMING7, r); + dsi_write_reg(dsidev, DSI_VM_TIMING7, r); DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n", enter_hs_mode_lat, exit_hs_mode_lat); @@ -2858,25 +3637,27 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev) #define DSI_DECL_VARS \ int __dsi_cb = 0; u32 __dsi_cv = 0; -#define DSI_FLUSH(ch) \ +#define DSI_FLUSH(dsidev, ch) \ if (__dsi_cb > 0) { \ /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \ - dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \ + dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \ __dsi_cb = __dsi_cv = 0; \ } -#define DSI_PUSH(ch, data) \ +#define DSI_PUSH(dsidev, ch, data) \ do { \ __dsi_cv |= (data) << (__dsi_cb * 8); \ /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \ if (++__dsi_cb > 3) \ - DSI_FLUSH(ch); \ + DSI_FLUSH(dsidev, ch); \ } while (0) static int dsi_update_screen_l4(struct omap_dss_device *dssdev, int x, int y, int w, int h) { /* Note: supports only 24bit colors in 32bit container */ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int first = 1; int fifo_stalls = 0; int max_dsi_packet_size; @@ -2915,7 +3696,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, * in fifo */ /* When using CPU, max long packet size is TX buffer size */ - max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4; + max_dsi_packet_size = dsi->vc[0].fifo_size * 32 * 4; /* we seem to get better perf if we divide the tx fifo to half, and while the other half is being sent, we fill the other half @@ -2944,35 +3725,36 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, #if 1 /* using fifo not empty */ /* TX_FIFO_NOT_EMPTY */ - while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) { + while (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(0)), 5, 5)) { fifo_stalls++; if (fifo_stalls > 0xfffff) { DSSERR("fifo stalls overflow, pixels left %d\n", pixels_left); - dsi_if_enable(0); + dsi_if_enable(dsidev, 0); return -EIO; } udelay(1); } #elif 1 /* using fifo emptiness */ - while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 < + while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 < max_dsi_packet_size) { fifo_stalls++; if (fifo_stalls > 0xfffff) { DSSERR("fifo stalls overflow, pixels left %d\n", pixels_left); - dsi_if_enable(0); + dsi_if_enable(dsidev, 0); return -EIO; } } #else - while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) { + while ((REG_GET(dsidev, DSI_TX_FIFO_VC_EMPTINESS, + 7, 0) + 1) * 4 == 0) { fifo_stalls++; if (fifo_stalls > 0xfffff) { DSSERR("fifo stalls overflow, pixels left %d\n", pixels_left); - dsi_if_enable(0); + dsi_if_enable(dsidev, 0); return -EIO; } } @@ -2981,17 +3763,17 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, pixels_left -= pixels; - dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE, + dsi_vc_write_long_header(dsidev, 0, DSI_DT_DCS_LONG_WRITE, 1 + pixels * bytespp, 0); - DSI_PUSH(0, dcs_cmd); + DSI_PUSH(dsidev, 0, dcs_cmd); while (pixels-- > 0) { u32 pix = __raw_readl(data++); - DSI_PUSH(0, (pix >> 16) & 0xff); - DSI_PUSH(0, (pix >> 8) & 0xff); - DSI_PUSH(0, (pix >> 0) & 0xff); + DSI_PUSH(dsidev, 0, (pix >> 16) & 0xff); + DSI_PUSH(dsidev, 0, (pix >> 8) & 0xff); + DSI_PUSH(dsidev, 0, (pix >> 0) & 0xff); current_x++; if (current_x == x+w) { @@ -3000,7 +3782,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, } } - DSI_FLUSH(0); + DSI_FLUSH(dsidev, 0); } return 0; @@ -3009,6 +3791,8 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev, static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); unsigned bytespp; unsigned bytespl; unsigned bytespf; @@ -3017,16 +3801,13 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, unsigned packet_len; u32 l; int r; - const unsigned channel = dsi.update_channel; - /* line buffer is 1024 x 24bits */ - /* XXX: for some reason using full buffer size causes considerable TX - * slowdown with update sizes that fill the whole buffer */ - const unsigned line_buf_size = 1023 * 3; + const unsigned channel = dsi->update_channel; + const unsigned line_buf_size = dsi_get_line_buf_size(dsidev); DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n", x, y, w, h); - dsi_vc_config_vp(channel); + dsi_vc_config_vp(dsidev, channel); bytespp = dssdev->ctrl.pixel_size / 8; bytespl = w * bytespp; @@ -3047,15 +3828,16 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, total_len += (bytespf % packet_payload) + 1; l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ - dsi_write_reg(DSI_VC_TE(channel), l); + dsi_write_reg(dsidev, DSI_VC_TE(channel), l); - dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0); + dsi_vc_write_long_header(dsidev, channel, DSI_DT_DCS_LONG_WRITE, + packet_len, 0); - if (dsi.te_enabled) + if (dsi->te_enabled) l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ else l = FLD_MOD(l, 1, 31, 31); /* TE_START */ - dsi_write_reg(DSI_VC_TE(channel), l); + dsi_write_reg(dsidev, DSI_VC_TE(channel), l); /* We put SIDLEMODE to no-idle for the duration of the transfer, * because DSS interrupts are not capable of waking up the CPU and the @@ -3065,23 +3847,23 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev, */ dispc_disable_sidle(); - dsi_perf_mark_start(); + dsi_perf_mark_start(dsidev); - r = queue_delayed_work(dsi.workqueue, &dsi.framedone_timeout_work, - msecs_to_jiffies(250)); + r = schedule_delayed_work(&dsi->framedone_timeout_work, + msecs_to_jiffies(250)); BUG_ON(r == 0); dss_start_update(dssdev); - if (dsi.te_enabled) { + if (dsi->te_enabled) { /* disable LP_RX_TO, so that we can receive TE. Time to wait * for TE is longer than the timer allows */ - REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ + REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ - dsi_vc_send_bta(channel); + dsi_vc_send_bta(dsidev, channel); #ifdef DSI_CATCH_MISSING_TE - mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250)); + mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250)); #endif } } @@ -3093,41 +3875,28 @@ static void dsi_te_timeout(unsigned long arg) } #endif -static void dsi_framedone_bta_callback(void *data, u32 mask); - -static void dsi_handle_framedone(int error) +static void dsi_handle_framedone(struct platform_device *dsidev, int error) { - const int channel = dsi.update_channel; - - dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback, - NULL, DSI_VC_IRQ_BTA); - - cancel_delayed_work(&dsi.framedone_timeout_work); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); /* SIDLEMODE back to smart-idle */ dispc_enable_sidle(); - if (dsi.te_enabled) { + if (dsi->te_enabled) { /* enable LP_RX_TO again after the TE */ - REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ + REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ } - /* RX_FIFO_NOT_EMPTY */ - if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { - DSSERR("Received error during frame transfer:\n"); - dsi_vc_flush_receive_data(channel); - if (!error) - error = -EIO; - } - - dsi.framedone_callback(error, dsi.framedone_data); + dsi->framedone_callback(error, dsi->framedone_data); if (!error) - dsi_perf_show("DISPC"); + dsi_perf_show(dsidev, "DISPC"); } static void dsi_framedone_timeout_work_callback(struct work_struct *work) { + struct dsi_data *dsi = container_of(work, struct dsi_data, + framedone_timeout_work.work); /* XXX While extremely unlikely, we could get FRAMEDONE interrupt after * 250ms which would conflict with this timeout work. What should be * done is first cancel the transfer on the HW, and then cancel the @@ -3137,70 +3906,34 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work) DSSERR("Framedone not received for 250ms!\n"); - dsi_handle_framedone(-ETIMEDOUT); -} - -static void dsi_framedone_bta_callback(void *data, u32 mask) -{ - dsi_handle_framedone(0); - -#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC - dispc_fake_vsync_irq(); -#endif + dsi_handle_framedone(dsi->pdev, -ETIMEDOUT); } static void dsi_framedone_irq_callback(void *data, u32 mask) { - const int channel = dsi.update_channel; - int r; + struct omap_dss_device *dssdev = (struct omap_dss_device *) data; + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); /* Note: We get FRAMEDONE when DISPC has finished sending pixels and * turns itself off. However, DSI still has the pixels in its buffers, * and is sending the data. */ - if (dsi.te_enabled) { - /* enable LP_RX_TO again after the TE */ - REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ - } - - /* Send BTA after the frame. We need this for the TE to work, as TE - * trigger is only sent for BTAs without preceding packet. Thus we need - * to BTA after the pixel packets so that next BTA will cause TE - * trigger. - * - * This is not needed when TE is not in use, but we do it anyway to - * make sure that the transfer has been completed. It would be more - * optimal, but more complex, to wait only just before starting next - * transfer. - * - * Also, as there's no interrupt telling when the transfer has been - * done and the channel could be reconfigured, the only way is to - * busyloop until TE_SIZE is zero. With BTA we can do this - * asynchronously. - * */ - - r = dsi_register_isr_vc(channel, dsi_framedone_bta_callback, - NULL, DSI_VC_IRQ_BTA); - if (r) { - DSSERR("Failed to register BTA ISR\n"); - dsi_handle_framedone(-EIO); - return; - } + __cancel_delayed_work(&dsi->framedone_timeout_work); - r = dsi_vc_send_bta(channel); - if (r) { - DSSERR("BTA after framedone failed\n"); - dsi_unregister_isr_vc(channel, dsi_framedone_bta_callback, - NULL, DSI_VC_IRQ_BTA); - dsi_handle_framedone(-EIO); - } + dsi_handle_framedone(dsidev, 0); + +#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC + dispc_fake_vsync_irq(); +#endif } int omap_dsi_prepare_update(struct omap_dss_device *dssdev, u16 *x, u16 *y, u16 *w, u16 *h, bool enlarge_update_area) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); u16 dw, dh; dssdev->driver->get_resolution(dssdev, &dw, &dh); @@ -3220,7 +3953,7 @@ int omap_dsi_prepare_update(struct omap_dss_device *dssdev, if (*w == 0 || *h == 0) return -EINVAL; - dsi_perf_mark_setup(); + dsi_perf_mark_setup(dsidev); if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { dss_setup_partial_planes(dssdev, x, y, w, h, @@ -3237,7 +3970,10 @@ int omap_dsi_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h, void (*callback)(int, void *), void *data) { - dsi.update_channel = channel; + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi->update_channel = channel; /* OMAP DSS cannot send updates of odd widths. * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON @@ -3246,14 +3982,14 @@ int omap_dsi_update(struct omap_dss_device *dssdev, BUG_ON(x % 2 == 1); if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) { - dsi.framedone_callback = callback; - dsi.framedone_data = data; + dsi->framedone_callback = callback; + dsi->framedone_data = data; - dsi.update_region.x = x; - dsi.update_region.y = y; - dsi.update_region.w = w; - dsi.update_region.h = h; - dsi.update_region.device = dssdev; + dsi->update_region.x = x; + dsi->update_region.y = y; + dsi->update_region.w = w; + dsi->update_region.h = h; + dsi->update_region.device = dssdev; dsi_update_screen_dispc(dssdev, x, y, w, h); } else { @@ -3263,7 +3999,7 @@ int omap_dsi_update(struct omap_dss_device *dssdev, if (r) return r; - dsi_perf_show("L4"); + dsi_perf_show(dsidev, "L4"); callback(0, data); } @@ -3276,9 +4012,13 @@ EXPORT_SYMBOL(omap_dsi_update); static int dsi_display_init_dispc(struct omap_dss_device *dssdev) { int r; + u32 irq; + + irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? + DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; - r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL, - DISPC_IRQ_FRAMEDONE); + r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev, + irq); if (r) { DSSERR("can't get FRAMEDONE irq\n"); return r; @@ -3311,28 +4051,34 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev) static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) { - omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL, - DISPC_IRQ_FRAMEDONE); + u32 irq; + + irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? + DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; + + omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev, + irq); } static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); struct dsi_clock_info cinfo; int r; /* we always use DSS_CLK_SYSCK as input clock */ cinfo.use_sys_clk = true; - cinfo.regn = dssdev->phy.dsi.div.regn; - cinfo.regm = dssdev->phy.dsi.div.regm; - cinfo.regm_dispc = dssdev->phy.dsi.div.regm_dispc; - cinfo.regm_dsi = dssdev->phy.dsi.div.regm_dsi; + cinfo.regn = dssdev->clocks.dsi.regn; + cinfo.regm = dssdev->clocks.dsi.regm; + cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc; + cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi; r = dsi_calc_clock_rates(dssdev, &cinfo); if (r) { DSSERR("Failed to calc dsi clocks\n"); return r; } - r = dsi_pll_set_clock_div(&cinfo); + r = dsi_pll_set_clock_div(dsidev, &cinfo); if (r) { DSSERR("Failed to set dsi clocks\n"); return r; @@ -3343,14 +4089,15 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); struct dispc_clock_info dispc_cinfo; int r; unsigned long long fck; - fck = dsi_get_pll_hsdiv_dispc_rate(); + fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); - dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div; - dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div; + dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; + dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; r = dispc_calc_clock_rates(fck, &dispc_cinfo); if (r) { @@ -3369,11 +4116,11 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) static int dsi_display_init_dsi(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + int dsi_module = dsi_get_dsidev_id(dsidev); int r; - _dsi_print_reset_status(); - - r = dsi_pll_init(dssdev, true, true); + r = dsi_pll_init(dsidev, true, true); if (r) goto err0; @@ -3381,8 +4128,10 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev) if (r) goto err1; - dss_select_dispc_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC); - dss_select_dsi_clk_source(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI); + dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); + dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src); + dss_select_lcd_clk_source(dssdev->manager->id, + dssdev->clocks.dispc.channel.lcd_clk_src); DSSDBG("PLL OK\n"); @@ -3390,82 +4139,92 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev) if (r) goto err2; - r = dsi_complexio_init(dssdev); + r = dsi_cio_init(dssdev); if (r) goto err2; - _dsi_print_reset_status(); + _dsi_print_reset_status(dsidev); dsi_proto_timings(dssdev); dsi_set_lp_clk_divisor(dssdev); if (1) - _dsi_print_reset_status(); + _dsi_print_reset_status(dsidev); r = dsi_proto_config(dssdev); if (r) goto err3; /* enable interface */ - dsi_vc_enable(0, 1); - dsi_vc_enable(1, 1); - dsi_vc_enable(2, 1); - dsi_vc_enable(3, 1); - dsi_if_enable(1); - dsi_force_tx_stop_mode_io(); + dsi_vc_enable(dsidev, 0, 1); + dsi_vc_enable(dsidev, 1, 1); + dsi_vc_enable(dsidev, 2, 1); + dsi_vc_enable(dsidev, 3, 1); + dsi_if_enable(dsidev, 1); + dsi_force_tx_stop_mode_io(dsidev); return 0; err3: - dsi_complexio_uninit(); + dsi_cio_uninit(dsidev); err2: - dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); - dss_select_dsi_clk_source(DSS_CLK_SRC_FCK); + dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); err1: - dsi_pll_uninit(); + dsi_pll_uninit(dsidev, true); err0: return r; } -static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev) +static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev, + bool disconnect_lanes, bool enter_ulps) { - /* disable interface */ - dsi_if_enable(0); - dsi_vc_enable(0, 0); - dsi_vc_enable(1, 0); - dsi_vc_enable(2, 0); - dsi_vc_enable(3, 0); + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int dsi_module = dsi_get_dsidev_id(dsidev); - dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); - dss_select_dsi_clk_source(DSS_CLK_SRC_FCK); - dsi_complexio_uninit(); - dsi_pll_uninit(); + if (enter_ulps && !dsi->ulps_enabled) + dsi_enter_ulps(dsidev); + + /* disable interface */ + dsi_if_enable(dsidev, 0); + dsi_vc_enable(dsidev, 0, 0); + dsi_vc_enable(dsidev, 1, 0); + dsi_vc_enable(dsidev, 2, 0); + dsi_vc_enable(dsidev, 3, 0); + + dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); + dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); + dsi_cio_uninit(dsidev); + dsi_pll_uninit(dsidev, disconnect_lanes); } -static int dsi_core_init(void) +static int dsi_core_init(struct platform_device *dsidev) { /* Autoidle */ - REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0); + REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 0, 0); /* ENWAKEUP */ - REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2); + REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 1, 2, 2); /* SIDLEMODE smart-idle */ - REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3); + REG_FLD_MOD(dsidev, DSI_SYSCONFIG, 2, 4, 3); - _dsi_initialize_irq(); + _dsi_initialize_irq(dsidev); return 0; } int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int r = 0; DSSDBG("dsi_display_enable\n"); - WARN_ON(!dsi_bus_is_locked()); + WARN_ON(!dsi_bus_is_locked(dsidev)); - mutex_lock(&dsi.lock); + mutex_lock(&dsi->lock); r = omap_dss_start_device(dssdev); if (r) { @@ -3474,13 +4233,13 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) } enable_clocks(1); - dsi_enable_pll_clock(1); + dsi_enable_pll_clock(dsidev, 1); - r = _dsi_reset(); + r = _dsi_reset(dsidev); if (r) goto err1; - dsi_core_init(); + dsi_core_init(dsidev); r = dsi_display_init_dispc(dssdev); if (r) @@ -3490,7 +4249,7 @@ int omapdss_dsi_display_enable(struct omap_dss_device *dssdev) if (r) goto err2; - mutex_unlock(&dsi.lock); + mutex_unlock(&dsi->lock); return 0; @@ -3498,39 +4257,46 @@ err2: dsi_display_uninit_dispc(dssdev); err1: enable_clocks(0); - dsi_enable_pll_clock(0); + dsi_enable_pll_clock(dsidev, 0); omap_dss_stop_device(dssdev); err0: - mutex_unlock(&dsi.lock); + mutex_unlock(&dsi->lock); DSSDBG("dsi_display_enable FAILED\n"); return r; } EXPORT_SYMBOL(omapdss_dsi_display_enable); -void omapdss_dsi_display_disable(struct omap_dss_device *dssdev) +void omapdss_dsi_display_disable(struct omap_dss_device *dssdev, + bool disconnect_lanes, bool enter_ulps) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + DSSDBG("dsi_display_disable\n"); - WARN_ON(!dsi_bus_is_locked()); + WARN_ON(!dsi_bus_is_locked(dsidev)); - mutex_lock(&dsi.lock); + mutex_lock(&dsi->lock); dsi_display_uninit_dispc(dssdev); - dsi_display_uninit_dsi(dssdev); + dsi_display_uninit_dsi(dssdev, disconnect_lanes, enter_ulps); enable_clocks(0); - dsi_enable_pll_clock(0); + dsi_enable_pll_clock(dsidev, 0); omap_dss_stop_device(dssdev); - mutex_unlock(&dsi.lock); + mutex_unlock(&dsi->lock); } EXPORT_SYMBOL(omapdss_dsi_display_disable); int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable) { - dsi.te_enabled = enable; + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi->te_enabled = enable; return 0; } EXPORT_SYMBOL(omapdss_dsi_enable_te); @@ -3550,23 +4316,33 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, int dsi_init_display(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + int dsi_module = dsi_get_dsidev_id(dsidev); + DSSDBG("DSI init\n"); /* XXX these should be figured out dynamically */ dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; - if (dsi.vdds_dsi_reg == NULL) { + if (dsi->vdds_dsi_reg == NULL) { struct regulator *vdds_dsi; - vdds_dsi = regulator_get(&dsi.pdev->dev, "vdds_dsi"); + vdds_dsi = regulator_get(&dsi->pdev->dev, "vdds_dsi"); if (IS_ERR(vdds_dsi)) { DSSERR("can't get VDDS_DSI regulator\n"); return PTR_ERR(vdds_dsi); } - dsi.vdds_dsi_reg = vdds_dsi; + dsi->vdds_dsi_reg = vdds_dsi; + } + + if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) { + DSSERR("DSI%d can't support more than %d data lanes\n", + dsi_module + 1, dsi->num_data_lanes); + return -EINVAL; } return 0; @@ -3574,11 +4350,13 @@ int dsi_init_display(struct omap_dss_device *dssdev) int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); int i; - for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { - if (!dsi.vc[i].dssdev) { - dsi.vc[i].dssdev = dssdev; + for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { + if (!dsi->vc[i].dssdev) { + dsi->vc[i].dssdev = dssdev; *channel = i; return 0; } @@ -3591,6 +4369,9 @@ EXPORT_SYMBOL(omap_dsi_request_vc); int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + if (vc_id < 0 || vc_id > 3) { DSSERR("VC ID out of range\n"); return -EINVAL; @@ -3601,13 +4382,13 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) return -EINVAL; } - if (dsi.vc[channel].dssdev != dssdev) { + if (dsi->vc[channel].dssdev != dssdev) { DSSERR("Virtual Channel not allocated to display %s\n", dssdev->name); return -EINVAL; } - dsi.vc[channel].vc_id = vc_id; + dsi->vc[channel].vc_id = vc_id; return 0; } @@ -3615,143 +4396,172 @@ EXPORT_SYMBOL(omap_dsi_set_vc_id); void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + if ((channel >= 0 && channel <= 3) && - dsi.vc[channel].dssdev == dssdev) { - dsi.vc[channel].dssdev = NULL; - dsi.vc[channel].vc_id = 0; + dsi->vc[channel].dssdev == dssdev) { + dsi->vc[channel].dssdev = NULL; + dsi->vc[channel].vc_id = 0; } } EXPORT_SYMBOL(omap_dsi_release_vc); -void dsi_wait_pll_hsdiv_dispc_active(void) +void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev) { - if (wait_for_bit_change(DSI_PLL_STATUS, 7, 1) != 1) + if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 7, 1) != 1) DSSERR("%s (%s) not active\n", - dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), - dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC)); + dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC), + dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC)); } -void dsi_wait_pll_hsdiv_dsi_active(void) +void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev) { - if (wait_for_bit_change(DSI_PLL_STATUS, 8, 1) != 1) + if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 8, 1) != 1) DSSERR("%s (%s) not active\n", - dss_get_generic_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), - dss_feat_get_clk_source_name(DSS_CLK_SRC_DSI_PLL_HSDIV_DSI)); + dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI), + dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI)); } -static void dsi_calc_clock_param_ranges(void) +static void dsi_calc_clock_param_ranges(struct platform_device *dsidev) { - dsi.regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN); - dsi.regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM); - dsi.regm_dispc_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC); - dsi.regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI); - dsi.fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT); - dsi.fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT); - dsi.lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi->regn_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGN); + dsi->regm_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM); + dsi->regm_dispc_max = + dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DISPC); + dsi->regm_dsi_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_REGM_DSI); + dsi->fint_min = dss_feat_get_param_min(FEAT_PARAM_DSIPLL_FINT); + dsi->fint_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_FINT); + dsi->lpdiv_max = dss_feat_get_param_max(FEAT_PARAM_DSIPLL_LPDIV); } -static int dsi_init(struct platform_device *pdev) +static int dsi_init(struct platform_device *dsidev) { + struct omap_display_platform_data *dss_plat_data; + struct omap_dss_board_info *board_info; u32 rev; - int r, i; + int r, i, dsi_module = dsi_get_dsidev_id(dsidev); struct resource *dsi_mem; + struct dsi_data *dsi; + + dsi = kzalloc(sizeof(*dsi), GFP_KERNEL); + if (!dsi) { + r = -ENOMEM; + goto err0; + } + + dsi->pdev = dsidev; + dsi_pdev_map[dsi_module] = dsidev; + dev_set_drvdata(&dsidev->dev, dsi); + + dss_plat_data = dsidev->dev.platform_data; + board_info = dss_plat_data->board_data; + dsi->dsi_mux_pads = board_info->dsi_mux_pads; - spin_lock_init(&dsi.irq_lock); - spin_lock_init(&dsi.errors_lock); - dsi.errors = 0; + spin_lock_init(&dsi->irq_lock); + spin_lock_init(&dsi->errors_lock); + dsi->errors = 0; #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS - spin_lock_init(&dsi.irq_stats_lock); - dsi.irq_stats.last_reset = jiffies; + spin_lock_init(&dsi->irq_stats_lock); + dsi->irq_stats.last_reset = jiffies; #endif - mutex_init(&dsi.lock); - sema_init(&dsi.bus_lock, 1); + mutex_init(&dsi->lock); + sema_init(&dsi->bus_lock, 1); - dsi.workqueue = create_singlethread_workqueue("dsi"); - if (dsi.workqueue == NULL) - return -ENOMEM; - - INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work, + INIT_DELAYED_WORK_DEFERRABLE(&dsi->framedone_timeout_work, dsi_framedone_timeout_work_callback); #ifdef DSI_CATCH_MISSING_TE - init_timer(&dsi.te_timer); - dsi.te_timer.function = dsi_te_timeout; - dsi.te_timer.data = 0; + init_timer(&dsi->te_timer); + dsi->te_timer.function = dsi_te_timeout; + dsi->te_timer.data = 0; #endif - dsi_mem = platform_get_resource(dsi.pdev, IORESOURCE_MEM, 0); + dsi_mem = platform_get_resource(dsi->pdev, IORESOURCE_MEM, 0); if (!dsi_mem) { DSSERR("can't get IORESOURCE_MEM DSI\n"); r = -EINVAL; goto err1; } - dsi.base = ioremap(dsi_mem->start, resource_size(dsi_mem)); - if (!dsi.base) { + dsi->base = ioremap(dsi_mem->start, resource_size(dsi_mem)); + if (!dsi->base) { DSSERR("can't ioremap DSI\n"); r = -ENOMEM; goto err1; } - dsi.irq = platform_get_irq(dsi.pdev, 0); - if (dsi.irq < 0) { + dsi->irq = platform_get_irq(dsi->pdev, 0); + if (dsi->irq < 0) { DSSERR("platform_get_irq failed\n"); r = -ENODEV; goto err2; } - r = request_irq(dsi.irq, omap_dsi_irq_handler, IRQF_SHARED, - "OMAP DSI1", dsi.pdev); + r = request_irq(dsi->irq, omap_dsi_irq_handler, IRQF_SHARED, + dev_name(&dsidev->dev), dsi->pdev); if (r < 0) { DSSERR("request_irq failed\n"); goto err2; } /* DSI VCs initialization */ - for (i = 0; i < ARRAY_SIZE(dsi.vc); i++) { - dsi.vc[i].mode = DSI_VC_MODE_L4; - dsi.vc[i].dssdev = NULL; - dsi.vc[i].vc_id = 0; + for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { + dsi->vc[i].mode = DSI_VC_MODE_L4; + dsi->vc[i].dssdev = NULL; + dsi->vc[i].vc_id = 0; } - dsi_calc_clock_param_ranges(); + dsi_calc_clock_param_ranges(dsidev); enable_clocks(1); - rev = dsi_read_reg(DSI_REVISION); - dev_dbg(&pdev->dev, "OMAP DSI rev %d.%d\n", + rev = dsi_read_reg(dsidev, DSI_REVISION); + dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); + dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev); + enable_clocks(0); return 0; err2: - iounmap(dsi.base); + iounmap(dsi->base); err1: - destroy_workqueue(dsi.workqueue); + kfree(dsi); +err0: return r; } -static void dsi_exit(void) +static void dsi_exit(struct platform_device *dsidev) { - if (dsi.vdds_dsi_reg != NULL) { - regulator_put(dsi.vdds_dsi_reg); - dsi.vdds_dsi_reg = NULL; + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + if (dsi->vdds_dsi_reg != NULL) { + if (dsi->vdds_dsi_enabled) { + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; + } + + regulator_put(dsi->vdds_dsi_reg); + dsi->vdds_dsi_reg = NULL; } - free_irq(dsi.irq, dsi.pdev); - iounmap(dsi.base); + free_irq(dsi->irq, dsi->pdev); + iounmap(dsi->base); - destroy_workqueue(dsi.workqueue); + kfree(dsi); DSSDBG("omap_dsi_exit\n"); } /* DSI1 HW IP initialisation */ -static int omap_dsi1hw_probe(struct platform_device *pdev) +static int omap_dsi1hw_probe(struct platform_device *dsidev) { int r; - dsi.pdev = pdev; - r = dsi_init(pdev); + + r = dsi_init(dsidev); if (r) { DSSERR("Failed to initialize DSI\n"); goto err_dsi; @@ -3760,9 +4570,12 @@ err_dsi: return r; } -static int omap_dsi1hw_remove(struct platform_device *pdev) +static int omap_dsi1hw_remove(struct platform_device *dsidev) { - dsi_exit(); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + + dsi_exit(dsidev); + WARN_ON(dsi->scp_clk_refcount > 0); return 0; } diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index 3f1fee63c67..d9489d5c4f0 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -29,7 +29,7 @@ #include <linux/seq_file.h> #include <linux/clk.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/clock.h> #include "dss.h" #include "dss_features.h" @@ -45,7 +45,6 @@ struct dss_reg { #define DSS_REVISION DSS_REG(0x0000) #define DSS_SYSCONFIG DSS_REG(0x0010) #define DSS_SYSSTATUS DSS_REG(0x0014) -#define DSS_IRQSTATUS DSS_REG(0x0018) #define DSS_CONTROL DSS_REG(0x0040) #define DSS_SDI_CONTROL DSS_REG(0x0044) #define DSS_PLL_CONTROL DSS_REG(0x0048) @@ -75,17 +74,17 @@ static struct { struct dss_clock_info cache_dss_cinfo; struct dispc_clock_info cache_dispc_cinfo; - enum dss_clk_source dsi_clk_source; - enum dss_clk_source dispc_clk_source; - enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; + enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; + enum omap_dss_clk_source dispc_clk_source; + enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; u32 ctx[DSS_SZ_REGS / sizeof(u32)]; } dss; static const char * const dss_generic_clk_source_names[] = { - [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", - [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", - [DSS_CLK_SRC_FCK] = "DSS_FCK", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", + [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", }; static void dss_clk_enable_all_no_ctx(void); @@ -230,7 +229,7 @@ void dss_sdi_disable(void) REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ } -const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src) +const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) { return dss_generic_clk_source_names[clk_src]; } @@ -246,8 +245,8 @@ void dss_dump_clocks(struct seq_file *s) seq_printf(s, "- DSS -\n"); - fclk_name = dss_get_generic_clk_source_name(DSS_CLK_SRC_FCK); - fclk_real_name = dss_feat_get_clk_source_name(DSS_CLK_SRC_FCK); + fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); + fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK); fclk_rate = dss_clk_get_rate(DSS_CLK_FCK); if (dss.dpll4_m4_ck) { @@ -286,7 +285,6 @@ void dss_dump_regs(struct seq_file *s) DUMPREG(DSS_REVISION); DUMPREG(DSS_SYSCONFIG); DUMPREG(DSS_SYSSTATUS); - DUMPREG(DSS_IRQSTATUS); DUMPREG(DSS_CONTROL); if (dss_feat_get_supported_displays(OMAP_DSS_CHANNEL_LCD) & @@ -300,18 +298,25 @@ void dss_dump_regs(struct seq_file *s) #undef DUMPREG } -void dss_select_dispc_clk_source(enum dss_clk_source clk_src) +void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) { + struct platform_device *dsidev; int b; u8 start, end; switch (clk_src) { - case DSS_CLK_SRC_FCK: + case OMAP_DSS_CLK_SRC_FCK: b = 0; break; - case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: b = 1; - dsi_wait_pll_hsdiv_dispc_active(); + dsidev = dsi_get_dsidev_from_id(0); + dsi_wait_pll_hsdiv_dispc_active(dsidev); + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + b = 2; + dsidev = dsi_get_dsidev_from_id(1); + dsi_wait_pll_hsdiv_dispc_active(dsidev); break; default: BUG(); @@ -324,17 +329,27 @@ void dss_select_dispc_clk_source(enum dss_clk_source clk_src) dss.dispc_clk_source = clk_src; } -void dss_select_dsi_clk_source(enum dss_clk_source clk_src) +void dss_select_dsi_clk_source(int dsi_module, + enum omap_dss_clk_source clk_src) { + struct platform_device *dsidev; int b; switch (clk_src) { - case DSS_CLK_SRC_FCK: + case OMAP_DSS_CLK_SRC_FCK: b = 0; break; - case DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: + BUG_ON(dsi_module != 0); + b = 1; + dsidev = dsi_get_dsidev_from_id(0); + dsi_wait_pll_hsdiv_dsi_active(dsidev); + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: + BUG_ON(dsi_module != 1); b = 1; - dsi_wait_pll_hsdiv_dsi_active(); + dsidev = dsi_get_dsidev_from_id(1); + dsi_wait_pll_hsdiv_dsi_active(dsidev); break; default: BUG(); @@ -342,25 +357,33 @@ void dss_select_dsi_clk_source(enum dss_clk_source clk_src) REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */ - dss.dsi_clk_source = clk_src; + dss.dsi_clk_source[dsi_module] = clk_src; } void dss_select_lcd_clk_source(enum omap_channel channel, - enum dss_clk_source clk_src) + enum omap_dss_clk_source clk_src) { + struct platform_device *dsidev; int b, ix, pos; if (!dss_has_feature(FEAT_LCD_CLK_SRC)) return; switch (clk_src) { - case DSS_CLK_SRC_FCK: + case OMAP_DSS_CLK_SRC_FCK: b = 0; break; - case DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: + case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: BUG_ON(channel != OMAP_DSS_CHANNEL_LCD); b = 1; - dsi_wait_pll_hsdiv_dispc_active(); + dsidev = dsi_get_dsidev_from_id(0); + dsi_wait_pll_hsdiv_dispc_active(dsidev); + break; + case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: + BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2); + b = 1; + dsidev = dsi_get_dsidev_from_id(1); + dsi_wait_pll_hsdiv_dispc_active(dsidev); break; default: BUG(); @@ -373,20 +396,26 @@ void dss_select_lcd_clk_source(enum omap_channel channel, dss.lcd_clk_source[ix] = clk_src; } -enum dss_clk_source dss_get_dispc_clk_source(void) +enum omap_dss_clk_source dss_get_dispc_clk_source(void) { return dss.dispc_clk_source; } -enum dss_clk_source dss_get_dsi_clk_source(void) +enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) { - return dss.dsi_clk_source; + return dss.dsi_clk_source[dsi_module]; } -enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) +enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) { - int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; - return dss.lcd_clk_source[ix]; + if (dss_has_feature(FEAT_LCD_CLK_SRC)) { + int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; + return dss.lcd_clk_source[ix]; + } else { + /* LCD_CLK source is the same as DISPC_FCLK source for + * OMAP2 and OMAP3 */ + return dss.dispc_clk_source; + } } /* calculate clock rates using dividers in cinfo */ @@ -659,13 +688,18 @@ static int dss_init(void) * the kernel resets it */ omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440); +#ifdef CONFIG_OMAP2_DSS_SLEEP_BEFORE_RESET /* We need to wait here a bit, otherwise we sometimes start to * get synclost errors, and after that only power cycle will * restore DSS functionality. I have no idea why this happens. * And we have to wait _before_ resetting the DSS, but after * enabling clocks. + * + * This bug was at least present on OMAP3430. It's unknown + * if it happens on OMAP2 or OMAP3630. */ msleep(50); +#endif _omap_dss_reset(); @@ -700,10 +734,11 @@ static int dss_init(void) dss.dpll4_m4_ck = dpll4_m4_ck; - dss.dsi_clk_source = DSS_CLK_SRC_FCK; - dss.dispc_clk_source = DSS_CLK_SRC_FCK; - dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK; - dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK; + dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; + dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; + dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; + dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; + dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; dss_save_context(); @@ -1015,6 +1050,14 @@ static void core_dump_clocks(struct seq_file *s) dss.dss_video_fck }; + const char *names[5] = { + "ick", + "fck", + "sys_clk", + "tv_fck", + "video_fck" + }; + seq_printf(s, "- CORE -\n"); seq_printf(s, "internal clk count\t\t%u\n", dss.num_clks_enabled); @@ -1022,8 +1065,11 @@ static void core_dump_clocks(struct seq_file *s) for (i = 0; i < 5; i++) { if (!clocks[i]) continue; - seq_printf(s, "%-15s\t%lu\t%d\n", + seq_printf(s, "%s (%s)%*s\t%lu\t%d\n", + names[i], clocks[i]->name, + 24 - strlen(names[i]) - strlen(clocks[i]->name), + "", clk_get_rate(clocks[i]), clocks[i]->usecount); } diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index c2f582bb19c..8ab6d43329b 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -117,15 +117,6 @@ enum dss_clock { DSS_CLK_VIDFCK = 1 << 4, /* DSS_96M_FCLK*/ }; -enum dss_clk_source { - DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK - * OMAP4: PLL1_CLK1 */ - DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK - * OMAP4: PLL1_CLK2 */ - DSS_CLK_SRC_FCK, /* OMAP2/3: DSS1_ALWON_FCLK - * OMAP4: DSS_FCLK */ -}; - enum dss_hdmi_venc_clk_source_select { DSS_VENC_TV_CLK = 0, DSS_HDMI_M_PCLK = 1, @@ -236,7 +227,7 @@ void dss_clk_enable(enum dss_clock clks); void dss_clk_disable(enum dss_clock clks); unsigned long dss_clk_get_rate(enum dss_clock clk); int dss_need_ctx_restore(void); -const char *dss_get_generic_clk_source_name(enum dss_clk_source clk_src); +const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); void dss_dump_clocks(struct seq_file *s); void dss_dump_regs(struct seq_file *s); @@ -248,13 +239,14 @@ void dss_sdi_init(u8 datapairs); int dss_sdi_enable(void); void dss_sdi_disable(void); -void dss_select_dispc_clk_source(enum dss_clk_source clk_src); -void dss_select_dsi_clk_source(enum dss_clk_source clk_src); +void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src); +void dss_select_dsi_clk_source(int dsi_module, + enum omap_dss_clk_source clk_src); void dss_select_lcd_clk_source(enum omap_channel channel, - enum dss_clk_source clk_src); -enum dss_clk_source dss_get_dispc_clk_source(void); -enum dss_clk_source dss_get_dsi_clk_source(void); -enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); + enum omap_dss_clk_source clk_src); +enum omap_dss_clk_source dss_get_dispc_clk_source(void); +enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); +enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); void dss_set_venc_output(enum omap_dss_venc_type type); void dss_set_dac_pwrdn_bgz(bool enable); @@ -284,31 +276,39 @@ static inline void sdi_exit(void) /* DSI */ #ifdef CONFIG_OMAP2_DSS_DSI + +struct dentry; +struct file_operations; + int dsi_init_platform_driver(void); void dsi_uninit_platform_driver(void); void dsi_dump_clocks(struct seq_file *s); -void dsi_dump_irqs(struct seq_file *s); -void dsi_dump_regs(struct seq_file *s); +void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir, + const struct file_operations *debug_fops); +void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir, + const struct file_operations *debug_fops); void dsi_save_context(void); void dsi_restore_context(void); int dsi_init_display(struct omap_dss_device *display); void dsi_irq_handler(void); -unsigned long dsi_get_pll_hsdiv_dispc_rate(void); -int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo); -int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck, - struct dsi_clock_info *cinfo, +unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); +int dsi_pll_set_clock_div(struct platform_device *dsidev, + struct dsi_clock_info *cinfo); +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, + unsigned long req_pck, struct dsi_clock_info *cinfo, struct dispc_clock_info *dispc_cinfo); -int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk, +int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, bool enable_hsdiv); -void dsi_pll_uninit(void); +void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes); void dsi_get_overlay_fifo_thresholds(enum omap_plane plane, u32 fifo_size, enum omap_burst_size *burst_size, u32 *fifo_low, u32 *fifo_high); -void dsi_wait_pll_hsdiv_dispc_active(void); -void dsi_wait_pll_hsdiv_dsi_active(void); +void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev); +void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); +struct platform_device *dsi_get_dsidev_from_id(int module); #else static inline int dsi_init_platform_driver(void) { @@ -317,17 +317,47 @@ static inline int dsi_init_platform_driver(void) static inline void dsi_uninit_platform_driver(void) { } -static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(void) +static inline unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev) { WARN("%s: DSI not compiled in, returning rate as 0\n", __func__); return 0; } -static inline void dsi_wait_pll_hsdiv_dispc_active(void) +static inline int dsi_pll_set_clock_div(struct platform_device *dsidev, + struct dsi_clock_info *cinfo) +{ + WARN("%s: DSI not compiled in\n", __func__); + return -ENODEV; +} +static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, + bool is_tft, unsigned long req_pck, + struct dsi_clock_info *dsi_cinfo, + struct dispc_clock_info *dispc_cinfo) +{ + WARN("%s: DSI not compiled in\n", __func__); + return -ENODEV; +} +static inline int dsi_pll_init(struct platform_device *dsidev, + bool enable_hsclk, bool enable_hsdiv) { + WARN("%s: DSI not compiled in\n", __func__); + return -ENODEV; } -static inline void dsi_wait_pll_hsdiv_dsi_active(void) +static inline void dsi_pll_uninit(struct platform_device *dsidev, + bool disconnect_lanes) { } +static inline void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev) +{ +} +static inline void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev) +{ +} +static inline struct platform_device *dsi_get_dsidev_from_id(int module) +{ + WARN("%s: DSI not compiled in, returning platform device as NULL\n", + __func__); + return NULL; +} #endif /* DPI */ @@ -391,7 +421,8 @@ int dispc_setup_plane(enum omap_plane plane, enum omap_dss_rotation_type rotation_type, u8 rotation, bool mirror, u8 global_alpha, u8 pre_mult_alpha, - enum omap_channel channel); + enum omap_channel channel, + u32 puv_addr); bool dispc_go_busy(enum omap_channel channel); void dispc_go(enum omap_channel channel); @@ -485,13 +516,6 @@ void hdmi_panel_exit(void); int rfbi_init_platform_driver(void); void rfbi_uninit_platform_driver(void); void rfbi_dump_regs(struct seq_file *s); - -int rfbi_configure(int rfbi_module, int bpp, int lines); -void rfbi_enable_rfbi(bool enable); -void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, - u16 height, void (callback)(void *data), void *data); -void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t); -unsigned long rfbi_get_max_tx_rate(void); int rfbi_init_display(struct omap_dss_device *display); #else static inline int rfbi_init_platform_driver(void) diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c index aa1622241d0..1c18888e5df 100644 --- a/drivers/video/omap2/dss/dss_features.c +++ b/drivers/video/omap2/dss/dss_features.c @@ -22,7 +22,7 @@ #include <linux/err.h> #include <linux/slab.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" @@ -52,7 +52,7 @@ struct omap_dss_features { }; /* This struct is assigned to one of the below during initialization */ -static struct omap_dss_features *omap_current_dss_features; +static const struct omap_dss_features *omap_current_dss_features; static const struct dss_reg_field omap2_dss_reg_fields[] = { [FEAT_REG_FIRHINC] = { 11, 0 }, @@ -177,22 +177,55 @@ static const enum omap_color_mode omap3_dss_supported_color_modes[] = { OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32, }; +static const enum omap_color_mode omap4_dss_supported_color_modes[] = { + /* OMAP_DSS_GFX */ + OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 | + OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 | + OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 | + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32 | + OMAP_DSS_COLOR_ARGB16_1555, + + /* OMAP_DSS_VIDEO1 */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | + OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | + OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | + OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | + OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | + OMAP_DSS_COLOR_RGBX32, + + /* OMAP_DSS_VIDEO2 */ + OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB12U | + OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_ARGB16_1555 | + OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_NV12 | + OMAP_DSS_COLOR_RGBA16 | OMAP_DSS_COLOR_RGB24U | + OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_UYVY | + OMAP_DSS_COLOR_ARGB16 | OMAP_DSS_COLOR_XRGB16_1555 | + OMAP_DSS_COLOR_ARGB32 | OMAP_DSS_COLOR_RGBX16 | + OMAP_DSS_COLOR_RGBX32, +}; + static const char * const omap2_dss_clk_source_names[] = { - [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", - [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", - [DSS_CLK_SRC_FCK] = "DSS_FCLK1", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A", + [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1", }; static const char * const omap3_dss_clk_source_names[] = { - [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", - [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", - [DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK", + [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK", }; static const char * const omap4_dss_clk_source_names[] = { - [DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", - [DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", - [DSS_CLK_SRC_FCK] = "DSS_FCLK", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1", + [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2", + [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1", + [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2", }; static const struct dss_param_range omap2_dss_param_range[] = { @@ -226,7 +259,7 @@ static const struct dss_param_range omap4_dss_param_range[] = { }; /* OMAP2 DSS Features */ -static struct omap_dss_features omap2_dss_features = { +static const struct omap_dss_features omap2_dss_features = { .reg_fields = omap2_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap2_dss_reg_fields), @@ -244,7 +277,7 @@ static struct omap_dss_features omap2_dss_features = { }; /* OMAP3 DSS Features */ -static struct omap_dss_features omap3430_dss_features = { +static const struct omap_dss_features omap3430_dss_features = { .reg_fields = omap3_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), @@ -252,7 +285,8 @@ static struct omap_dss_features omap3430_dss_features = { FEAT_GLOBAL_ALPHA | FEAT_LCDENABLEPOL | FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | - FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF, + FEAT_LINEBUFFERSPLIT | FEAT_RESIZECONF | + FEAT_DSI_PLL_FREQSEL | FEAT_DSI_REVERSE_TXCLKESC, .num_mgrs = 2, .num_ovls = 3, @@ -262,7 +296,7 @@ static struct omap_dss_features omap3430_dss_features = { .dss_params = omap3_dss_param_range, }; -static struct omap_dss_features omap3630_dss_features = { +static const struct omap_dss_features omap3630_dss_features = { .reg_fields = omap3_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap3_dss_reg_fields), @@ -271,7 +305,8 @@ static struct omap_dss_features omap3630_dss_features = { FEAT_LCDENABLESIGNAL | FEAT_PCKFREEENABLE | FEAT_PRE_MULT_ALPHA | FEAT_FUNCGATED | FEAT_ROWREPEATENABLE | FEAT_LINEBUFFERSPLIT | - FEAT_RESIZECONF, + FEAT_RESIZECONF | FEAT_DSI_PLL_PWR_BUG | + FEAT_DSI_PLL_FREQSEL, .num_mgrs = 2, .num_ovls = 3, @@ -282,19 +317,43 @@ static struct omap_dss_features omap3630_dss_features = { }; /* OMAP4 DSS Features */ -static struct omap_dss_features omap4_dss_features = { +/* For OMAP4430 ES 1.0 revision */ +static const struct omap_dss_features omap4430_es1_0_dss_features = { .reg_fields = omap4_dss_reg_fields, .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), .has_feature = FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA | FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | - FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC, + FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | + FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | + FEAT_DSI_GNQ | FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, .num_mgrs = 3, .num_ovls = 3, .supported_displays = omap4_dss_supported_displays, - .supported_color_modes = omap3_dss_supported_color_modes, + .supported_color_modes = omap4_dss_supported_color_modes, + .clksrc_names = omap4_dss_clk_source_names, + .dss_params = omap4_dss_param_range, +}; + +/* For all the other OMAP4 versions */ +static const struct omap_dss_features omap4_dss_features = { + .reg_fields = omap4_dss_reg_fields, + .num_reg_fields = ARRAY_SIZE(omap4_dss_reg_fields), + + .has_feature = + FEAT_GLOBAL_ALPHA | FEAT_PRE_MULT_ALPHA | + FEAT_MGR_LCD2 | FEAT_GLOBAL_ALPHA_VID1 | + FEAT_CORE_CLK_DIV | FEAT_LCD_CLK_SRC | + FEAT_DSI_DCS_CMD_CONFIG_VC | FEAT_DSI_VC_OCP_WIDTH | + FEAT_DSI_GNQ | FEAT_HDMI_CTS_SWMODE | + FEAT_HANDLE_UV_SEPARATE | FEAT_ATTR2, + + .num_mgrs = 3, + .num_ovls = 3, + .supported_displays = omap4_dss_supported_displays, + .supported_color_modes = omap4_dss_supported_color_modes, .clksrc_names = omap4_dss_clk_source_names, .dss_params = omap4_dss_param_range, }; @@ -337,7 +396,7 @@ bool dss_feat_color_mode_supported(enum omap_plane plane, color_mode; } -const char *dss_feat_get_clk_source_name(enum dss_clk_source id) +const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id) { return omap_current_dss_features->clksrc_names[id]; } @@ -365,6 +424,10 @@ void dss_features_init(void) omap_current_dss_features = &omap3630_dss_features; else if (cpu_is_omap34xx()) omap_current_dss_features = &omap3430_dss_features; - else + else if (omap_rev() == OMAP4430_REV_ES1_0) + omap_current_dss_features = &omap4430_es1_0_dss_features; + else if (cpu_is_omap44xx()) omap_current_dss_features = &omap4_dss_features; + else + DSSWARN("Unsupported OMAP version"); } diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index 12e9c4ef0de..07b346f7d91 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h @@ -23,23 +23,34 @@ #define MAX_DSS_MANAGERS 3 #define MAX_DSS_OVERLAYS 3 #define MAX_DSS_LCD_MANAGERS 2 +#define MAX_NUM_DSI 2 /* DSS has feature id */ enum dss_feat_id { - FEAT_GLOBAL_ALPHA = 1 << 0, - FEAT_GLOBAL_ALPHA_VID1 = 1 << 1, - FEAT_PRE_MULT_ALPHA = 1 << 2, - FEAT_LCDENABLEPOL = 1 << 3, - FEAT_LCDENABLESIGNAL = 1 << 4, - FEAT_PCKFREEENABLE = 1 << 5, - FEAT_FUNCGATED = 1 << 6, - FEAT_MGR_LCD2 = 1 << 7, - FEAT_LINEBUFFERSPLIT = 1 << 8, - FEAT_ROWREPEATENABLE = 1 << 9, - FEAT_RESIZECONF = 1 << 10, + FEAT_GLOBAL_ALPHA = 1 << 0, + FEAT_GLOBAL_ALPHA_VID1 = 1 << 1, + FEAT_PRE_MULT_ALPHA = 1 << 2, + FEAT_LCDENABLEPOL = 1 << 3, + FEAT_LCDENABLESIGNAL = 1 << 4, + FEAT_PCKFREEENABLE = 1 << 5, + FEAT_FUNCGATED = 1 << 6, + FEAT_MGR_LCD2 = 1 << 7, + FEAT_LINEBUFFERSPLIT = 1 << 8, + FEAT_ROWREPEATENABLE = 1 << 9, + FEAT_RESIZECONF = 1 << 10, /* Independent core clk divider */ - FEAT_CORE_CLK_DIV = 1 << 11, - FEAT_LCD_CLK_SRC = 1 << 12, + FEAT_CORE_CLK_DIV = 1 << 11, + FEAT_LCD_CLK_SRC = 1 << 12, + /* DSI-PLL power command 0x3 is not working */ + FEAT_DSI_PLL_PWR_BUG = 1 << 13, + FEAT_DSI_PLL_FREQSEL = 1 << 14, + FEAT_DSI_DCS_CMD_CONFIG_VC = 1 << 15, + FEAT_DSI_VC_OCP_WIDTH = 1 << 16, + FEAT_DSI_REVERSE_TXCLKESC = 1 << 17, + FEAT_DSI_GNQ = 1 << 18, + FEAT_HDMI_CTS_SWMODE = 1 << 19, + FEAT_HANDLE_UV_SEPARATE = 1 << 20, + FEAT_ATTR2 = 1 << 21, }; /* DSS register field id */ @@ -77,7 +88,7 @@ enum omap_display_type dss_feat_get_supported_displays(enum omap_channel channel enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane); bool dss_feat_color_mode_supported(enum omap_plane plane, enum omap_color_mode color_mode); -const char *dss_feat_get_clk_source_name(enum dss_clk_source id); +const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id); bool dss_has_feature(enum dss_feat_id id); void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index a981def8099..b0555f4f0a7 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -29,10 +29,16 @@ #include <linux/mutex.h> #include <linux/delay.h> #include <linux/string.h> -#include <plat/display.h> +#include <video/omapdss.h> +#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ + defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) +#include <sound/soc.h> +#include <sound/pcm_params.h> +#endif #include "dss.h" #include "hdmi.h" +#include "dss_features.h" static struct { struct mutex lock; @@ -1052,25 +1058,26 @@ static void update_hdmi_timings(struct hdmi_config *cfg, cfg->timings.hsync_pol = cea_vesa_timings[code].hsync_pol; } -static void hdmi_compute_pll(unsigned long clkin, int phy, - int n, struct hdmi_pll_info *pi) +static void hdmi_compute_pll(struct omap_dss_device *dssdev, int phy, + struct hdmi_pll_info *pi) { - unsigned long refclk; + unsigned long clkin, refclk; u32 mf; + clkin = dss_clk_get_rate(DSS_CLK_SYSCK) / 10000; /* * Input clock is predivided by N + 1 * out put of which is reference clk */ - refclk = clkin / (n + 1); - pi->regn = n; + pi->regn = dssdev->clocks.hdmi.regn; + refclk = clkin / (pi->regn + 1); /* * multiplier is pixel_clk/ref_clk * Multiplying by 100 to avoid fractional part removal */ - pi->regm = (phy * 100/(refclk))/100; - pi->regm2 = 1; + pi->regm = (phy * 100 / (refclk)) / 100; + pi->regm2 = dssdev->clocks.hdmi.regm2; /* * fractional multiplier is remainder of the difference between @@ -1078,14 +1085,14 @@ static void hdmi_compute_pll(unsigned long clkin, int phy, * multiplied by 2^18(262144) divided by the reference clock */ mf = (phy - pi->regm * refclk) * 262144; - pi->regmf = mf/(refclk); + pi->regmf = mf / (refclk); /* * Dcofreq should be set to 1 if required pixel clock * is greater than 1000MHz */ pi->dcofreq = phy > 1000 * 100; - pi->regsd = ((pi->regm * clkin / 10) / ((n + 1) * 250) + 5) / 10; + pi->regsd = ((pi->regm * clkin / 10) / ((pi->regn + 1) * 250) + 5) / 10; DSSDBG("M = %d Mf = %d\n", pi->regm, pi->regmf); DSSDBG("range = %d sd = %d\n", pi->dcofreq, pi->regsd); @@ -1106,7 +1113,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) int r, code = 0; struct hdmi_pll_info pll_data; struct omap_video_timings *p; - int clkin, n, phy; + unsigned long phy; hdmi_enable_clocks(1); @@ -1126,11 +1133,9 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) dssdev->panel.timings = cea_vesa_timings[code].timings; update_hdmi_timings(&hdmi.cfg, p, code); - clkin = 3840; /* 38.4 MHz */ - n = 15; /* this is a constant for our math */ phy = p->pixel_clock; - hdmi_compute_pll(clkin, phy, n, &pll_data); + hdmi_compute_pll(dssdev, phy, &pll_data); hdmi_wp_video_start(0); @@ -1160,7 +1165,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev) * dynamically by user. This can be moved to single location , say * Boardfile. */ - dss_select_dispc_clk_source(DSS_CLK_SRC_FCK); + dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); /* bypass TV gamma table */ dispc_enable_gamma_table(0); @@ -1275,10 +1280,420 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev) mutex_unlock(&hdmi.lock); } +#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ + defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) +static void hdmi_wp_audio_config_format( + struct hdmi_audio_format *aud_fmt) +{ + u32 r; + + DSSDBG("Enter hdmi_wp_audio_config_format\n"); + + r = hdmi_read_reg(HDMI_WP_AUDIO_CFG); + r = FLD_MOD(r, aud_fmt->stereo_channels, 26, 24); + r = FLD_MOD(r, aud_fmt->active_chnnls_msk, 23, 16); + r = FLD_MOD(r, aud_fmt->en_sig_blk_strt_end, 5, 5); + r = FLD_MOD(r, aud_fmt->type, 4, 4); + r = FLD_MOD(r, aud_fmt->justification, 3, 3); + r = FLD_MOD(r, aud_fmt->sample_order, 2, 2); + r = FLD_MOD(r, aud_fmt->samples_per_word, 1, 1); + r = FLD_MOD(r, aud_fmt->sample_size, 0, 0); + hdmi_write_reg(HDMI_WP_AUDIO_CFG, r); +} + +static void hdmi_wp_audio_config_dma(struct hdmi_audio_dma *aud_dma) +{ + u32 r; + + DSSDBG("Enter hdmi_wp_audio_config_dma\n"); + + r = hdmi_read_reg(HDMI_WP_AUDIO_CFG2); + r = FLD_MOD(r, aud_dma->transfer_size, 15, 8); + r = FLD_MOD(r, aud_dma->block_size, 7, 0); + hdmi_write_reg(HDMI_WP_AUDIO_CFG2, r); + + r = hdmi_read_reg(HDMI_WP_AUDIO_CTRL); + r = FLD_MOD(r, aud_dma->mode, 9, 9); + r = FLD_MOD(r, aud_dma->fifo_threshold, 8, 0); + hdmi_write_reg(HDMI_WP_AUDIO_CTRL, r); +} + +static void hdmi_core_audio_config(struct hdmi_core_audio_config *cfg) +{ + u32 r; + + /* audio clock recovery parameters */ + r = hdmi_read_reg(HDMI_CORE_AV_ACR_CTRL); + r = FLD_MOD(r, cfg->use_mclk, 2, 2); + r = FLD_MOD(r, cfg->en_acr_pkt, 1, 1); + r = FLD_MOD(r, cfg->cts_mode, 0, 0); + hdmi_write_reg(HDMI_CORE_AV_ACR_CTRL, r); + + REG_FLD_MOD(HDMI_CORE_AV_N_SVAL1, cfg->n, 7, 0); + REG_FLD_MOD(HDMI_CORE_AV_N_SVAL2, cfg->n >> 8, 7, 0); + REG_FLD_MOD(HDMI_CORE_AV_N_SVAL3, cfg->n >> 16, 7, 0); + + if (cfg->cts_mode == HDMI_AUDIO_CTS_MODE_SW) { + REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL1, cfg->cts, 7, 0); + REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL2, cfg->cts >> 8, 7, 0); + REG_FLD_MOD(HDMI_CORE_AV_CTS_SVAL3, cfg->cts >> 16, 7, 0); + } else { + /* + * HDMI IP uses this configuration to divide the MCLK to + * update CTS value. + */ + REG_FLD_MOD(HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0); + + /* Configure clock for audio packets */ + REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_1, + cfg->aud_par_busclk, 7, 0); + REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_2, + (cfg->aud_par_busclk >> 8), 7, 0); + REG_FLD_MOD(HDMI_CORE_AV_AUD_PAR_BUSCLK_3, + (cfg->aud_par_busclk >> 16), 7, 0); + } + + /* Override of SPDIF sample frequency with value in I2S_CHST4 */ + REG_FLD_MOD(HDMI_CORE_AV_SPDIF_CTRL, cfg->fs_override, 1, 1); + + /* I2S parameters */ + REG_FLD_MOD(HDMI_CORE_AV_I2S_CHST4, cfg->freq_sample, 3, 0); + + r = hdmi_read_reg(HDMI_CORE_AV_I2S_IN_CTRL); + r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7); + r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6); + r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5); + r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4); + r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3); + r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2); + r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1); + r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0); + hdmi_write_reg(HDMI_CORE_AV_I2S_IN_CTRL, r); + + r = hdmi_read_reg(HDMI_CORE_AV_I2S_CHST5); + r = FLD_MOD(r, cfg->freq_sample, 7, 4); + r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1); + r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0); + hdmi_write_reg(HDMI_CORE_AV_I2S_CHST5, r); + + REG_FLD_MOD(HDMI_CORE_AV_I2S_IN_LEN, cfg->i2s_cfg.in_length_bits, 3, 0); + + /* Audio channels and mode parameters */ + REG_FLD_MOD(HDMI_CORE_AV_HDMI_CTRL, cfg->layout, 2, 1); + r = hdmi_read_reg(HDMI_CORE_AV_AUD_MODE); + r = FLD_MOD(r, cfg->i2s_cfg.active_sds, 7, 4); + r = FLD_MOD(r, cfg->en_dsd_audio, 3, 3); + r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2); + r = FLD_MOD(r, cfg->en_spdif, 1, 1); + hdmi_write_reg(HDMI_CORE_AV_AUD_MODE, r); +} + +static void hdmi_core_audio_infoframe_config( + struct hdmi_core_infoframe_audio *info_aud) +{ + u8 val; + u8 sum = 0, checksum = 0; + + /* + * Set audio info frame type, version and length as + * described in HDMI 1.4a Section 8.2.2 specification. + * Checksum calculation is defined in Section 5.3.5. + */ + hdmi_write_reg(HDMI_CORE_AV_AUDIO_TYPE, 0x84); + hdmi_write_reg(HDMI_CORE_AV_AUDIO_VERS, 0x01); + hdmi_write_reg(HDMI_CORE_AV_AUDIO_LEN, 0x0a); + sum += 0x84 + 0x001 + 0x00a; + + val = (info_aud->db1_coding_type << 4) + | (info_aud->db1_channel_count - 1); + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(0), val); + sum += val; + + val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size; + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(1), val); + sum += val; + + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(2), 0x00); + + val = info_aud->db4_channel_alloc; + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(3), val); + sum += val; + + val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3); + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(4), val); + sum += val; + + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(5), 0x00); + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(6), 0x00); + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(7), 0x00); + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(8), 0x00); + hdmi_write_reg(HDMI_CORE_AV_AUD_DBYTE(9), 0x00); + + checksum = 0x100 - sum; + hdmi_write_reg(HDMI_CORE_AV_AUDIO_CHSUM, checksum); + + /* + * TODO: Add MPEG and SPD enable and repeat cfg when EDID parsing + * is available. + */ +} + +static int hdmi_config_audio_acr(u32 sample_freq, u32 *n, u32 *cts) +{ + u32 r; + u32 deep_color = 0; + u32 pclk = hdmi.cfg.timings.timings.pixel_clock; + + if (n == NULL || cts == NULL) + return -EINVAL; + /* + * Obtain current deep color configuration. This needed + * to calculate the TMDS clock based on the pixel clock. + */ + r = REG_GET(HDMI_WP_VIDEO_CFG, 1, 0); + switch (r) { + case 1: /* No deep color selected */ + deep_color = 100; + break; + case 2: /* 10-bit deep color selected */ + deep_color = 125; + break; + case 3: /* 12-bit deep color selected */ + deep_color = 150; + break; + default: + return -EINVAL; + } + + switch (sample_freq) { + case 32000: + if ((deep_color == 125) && ((pclk == 54054) + || (pclk == 74250))) + *n = 8192; + else + *n = 4096; + break; + case 44100: + *n = 6272; + break; + case 48000: + if ((deep_color == 125) && ((pclk == 54054) + || (pclk == 74250))) + *n = 8192; + else + *n = 6144; + break; + default: + *n = 0; + return -EINVAL; + } + + /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ + *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10); + + return 0; +} + +static int hdmi_audio_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params, + struct snd_soc_dai *dai) +{ + struct hdmi_audio_format audio_format; + struct hdmi_audio_dma audio_dma; + struct hdmi_core_audio_config core_cfg; + struct hdmi_core_infoframe_audio aud_if_cfg; + int err, n, cts; + enum hdmi_core_audio_sample_freq sample_freq; + + switch (params_format(params)) { + case SNDRV_PCM_FORMAT_S16_LE: + core_cfg.i2s_cfg.word_max_length = + HDMI_AUDIO_I2S_MAX_WORD_20BITS; + core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS; + core_cfg.i2s_cfg.in_length_bits = + HDMI_AUDIO_I2S_INPUT_LENGTH_16; + core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT; + audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES; + audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS; + audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT; + audio_dma.transfer_size = 0x10; + break; + case SNDRV_PCM_FORMAT_S24_LE: + core_cfg.i2s_cfg.word_max_length = + HDMI_AUDIO_I2S_MAX_WORD_24BITS; + core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS; + core_cfg.i2s_cfg.in_length_bits = + HDMI_AUDIO_I2S_INPUT_LENGTH_24; + audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE; + audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS; + audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT; + core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; + audio_dma.transfer_size = 0x20; + break; + default: + return -EINVAL; + } + + switch (params_rate(params)) { + case 32000: + sample_freq = HDMI_AUDIO_FS_32000; + break; + case 44100: + sample_freq = HDMI_AUDIO_FS_44100; + break; + case 48000: + sample_freq = HDMI_AUDIO_FS_48000; + break; + default: + return -EINVAL; + } + + err = hdmi_config_audio_acr(params_rate(params), &n, &cts); + if (err < 0) + return err; + + /* Audio wrapper config */ + audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL; + audio_format.active_chnnls_msk = 0x03; + audio_format.type = HDMI_AUDIO_TYPE_LPCM; + audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; + /* Disable start/stop signals of IEC 60958 blocks */ + audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF; + + audio_dma.block_size = 0xC0; + audio_dma.mode = HDMI_AUDIO_TRANSF_DMA; + audio_dma.fifo_threshold = 0x20; /* in number of samples */ + + hdmi_wp_audio_config_dma(&audio_dma); + hdmi_wp_audio_config_format(&audio_format); + + /* + * I2S config + */ + core_cfg.i2s_cfg.en_high_bitrate_aud = false; + /* Only used with high bitrate audio */ + core_cfg.i2s_cfg.cbit_order = false; + /* Serial data and word select should change on sck rising edge */ + core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING; + core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM; + /* Set I2S word select polarity */ + core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT; + core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST; + /* Set serial data to word select shift. See Phillips spec. */ + core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT; + /* Enable one of the four available serial data channels */ + core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN; + + /* Core audio config */ + core_cfg.freq_sample = sample_freq; + core_cfg.n = n; + core_cfg.cts = cts; + if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) { + core_cfg.aud_par_busclk = 0; + core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW; + core_cfg.use_mclk = false; + } else { + core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8); + core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW; + core_cfg.use_mclk = true; + core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS; + } + core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH; + core_cfg.en_spdif = false; + /* Use sample frequency from channel status word */ + core_cfg.fs_override = true; + /* Enable ACR packets */ + core_cfg.en_acr_pkt = true; + /* Disable direct streaming digital audio */ + core_cfg.en_dsd_audio = false; + /* Use parallel audio interface */ + core_cfg.en_parallel_aud_input = true; + + hdmi_core_audio_config(&core_cfg); + + /* + * Configure packet + * info frame audio see doc CEA861-D page 74 + */ + aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM; + aud_if_cfg.db1_channel_count = 2; + aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM; + aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM; + aud_if_cfg.db4_channel_alloc = 0x00; + aud_if_cfg.db5_downmix_inh = false; + aud_if_cfg.db5_lsv = 0; + + hdmi_core_audio_infoframe_config(&aud_if_cfg); + return 0; +} + +static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd, + struct snd_soc_dai *dai) +{ + int err = 0; + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + case SNDRV_PCM_TRIGGER_RESUME: + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: + REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 1, 0, 0); + REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 31, 31); + REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 1, 30, 30); + break; + + case SNDRV_PCM_TRIGGER_STOP: + case SNDRV_PCM_TRIGGER_SUSPEND: + case SNDRV_PCM_TRIGGER_PAUSE_PUSH: + REG_FLD_MOD(HDMI_CORE_AV_AUD_MODE, 0, 0, 0); + REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 30, 30); + REG_FLD_MOD(HDMI_WP_AUDIO_CTRL, 0, 31, 31); + break; + default: + err = -EINVAL; + } + return err; +} + +static int hdmi_audio_startup(struct snd_pcm_substream *substream, + struct snd_soc_dai *dai) +{ + if (!hdmi.mode) { + pr_err("Current video settings do not support audio.\n"); + return -EIO; + } + return 0; +} + +static struct snd_soc_codec_driver hdmi_audio_codec_drv = { +}; + +static struct snd_soc_dai_ops hdmi_audio_codec_ops = { + .hw_params = hdmi_audio_hw_params, + .trigger = hdmi_audio_trigger, + .startup = hdmi_audio_startup, +}; + +static struct snd_soc_dai_driver hdmi_codec_dai_drv = { + .name = "hdmi-audio-codec", + .playback = { + .channels_min = 2, + .channels_max = 2, + .rates = SNDRV_PCM_RATE_32000 | + SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, + .formats = SNDRV_PCM_FMTBIT_S16_LE | + SNDRV_PCM_FMTBIT_S24_LE, + }, + .ops = &hdmi_audio_codec_ops, +}; +#endif + /* HDMI HW IP initialisation */ static int omapdss_hdmihw_probe(struct platform_device *pdev) { struct resource *hdmi_mem; +#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ + defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) + int ret; +#endif hdmi.pdata = pdev->dev.platform_data; hdmi.pdev = pdev; @@ -1300,6 +1715,17 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev) hdmi_panel_init(); +#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ + defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) + + /* Register ASoC codec DAI */ + ret = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv, + &hdmi_codec_dai_drv, 1); + if (ret) { + DSSERR("can't register ASoC HDMI audio codec\n"); + return ret; + } +#endif return 0; } @@ -1307,6 +1733,11 @@ static int omapdss_hdmihw_remove(struct platform_device *pdev) { hdmi_panel_exit(); +#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ + defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) + snd_soc_unregister_codec(&pdev->dev); +#endif + iounmap(hdmi.base_wp); return 0; diff --git a/drivers/video/omap2/dss/hdmi.h b/drivers/video/omap2/dss/hdmi.h index 9887ab96da3..c885f9cb065 100644 --- a/drivers/video/omap2/dss/hdmi.h +++ b/drivers/video/omap2/dss/hdmi.h @@ -22,7 +22,7 @@ #define _OMAP4_DSS_HDMI_H_ #include <linux/string.h> -#include <plat/display.h> +#include <video/omapdss.h> #define HDMI_WP 0x0 #define HDMI_CORE_SYS 0x400 @@ -48,6 +48,10 @@ struct hdmi_reg { u16 idx; }; #define HDMI_WP_VIDEO_TIMING_H HDMI_WP_REG(0x68) #define HDMI_WP_VIDEO_TIMING_V HDMI_WP_REG(0x6C) #define HDMI_WP_WP_CLK HDMI_WP_REG(0x70) +#define HDMI_WP_AUDIO_CFG HDMI_WP_REG(0x80) +#define HDMI_WP_AUDIO_CFG2 HDMI_WP_REG(0x84) +#define HDMI_WP_AUDIO_CTRL HDMI_WP_REG(0x88) +#define HDMI_WP_AUDIO_DATA HDMI_WP_REG(0x8C) /* HDMI IP Core System */ #define HDMI_CORE_SYS_REG(idx) HDMI_REG(HDMI_CORE_SYS + idx) @@ -105,6 +109,8 @@ struct hdmi_reg { u16 idx; }; #define HDMI_CORE_AV_AVI_DBYTE_NELEMS HDMI_CORE_AV_REG(15) #define HDMI_CORE_AV_SPD_DBYTE HDMI_CORE_AV_REG(0x190) #define HDMI_CORE_AV_SPD_DBYTE_NELEMS HDMI_CORE_AV_REG(27) +#define HDMI_CORE_AV_AUD_DBYTE(n) HDMI_CORE_AV_REG(n * 4 + 0x210) +#define HDMI_CORE_AV_AUD_DBYTE_NELEMS HDMI_CORE_AV_REG(10) #define HDMI_CORE_AV_MPEG_DBYTE HDMI_CORE_AV_REG(0x290) #define HDMI_CORE_AV_MPEG_DBYTE_NELEMS HDMI_CORE_AV_REG(27) #define HDMI_CORE_AV_GEN_DBYTE HDMI_CORE_AV_REG(0x300) @@ -153,6 +159,10 @@ struct hdmi_reg { u16 idx; }; #define HDMI_CORE_AV_SPD_VERS HDMI_CORE_AV_REG(0x184) #define HDMI_CORE_AV_SPD_LEN HDMI_CORE_AV_REG(0x188) #define HDMI_CORE_AV_SPD_CHSUM HDMI_CORE_AV_REG(0x18C) +#define HDMI_CORE_AV_AUDIO_TYPE HDMI_CORE_AV_REG(0x200) +#define HDMI_CORE_AV_AUDIO_VERS HDMI_CORE_AV_REG(0x204) +#define HDMI_CORE_AV_AUDIO_LEN HDMI_CORE_AV_REG(0x208) +#define HDMI_CORE_AV_AUDIO_CHSUM HDMI_CORE_AV_REG(0x20C) #define HDMI_CORE_AV_MPEG_TYPE HDMI_CORE_AV_REG(0x280) #define HDMI_CORE_AV_MPEG_VERS HDMI_CORE_AV_REG(0x284) #define HDMI_CORE_AV_MPEG_LEN HDMI_CORE_AV_REG(0x288) @@ -272,7 +282,7 @@ enum hdmi_core_packet_ctrl { HDMI_PACKETREPEATOFF = 0 }; -/* INFOFRAME_AVI_ definitions */ +/* INFOFRAME_AVI_ and INFOFRAME_AUDIO_ definitions */ enum hdmi_core_infoframe { HDMI_INFOFRAME_AVI_DB1Y_RGB = 0, HDMI_INFOFRAME_AVI_DB1Y_YUV422 = 1, @@ -317,7 +327,36 @@ enum hdmi_core_infoframe { HDMI_INFOFRAME_AVI_DB5PR_7 = 6, HDMI_INFOFRAME_AVI_DB5PR_8 = 7, HDMI_INFOFRAME_AVI_DB5PR_9 = 8, - HDMI_INFOFRAME_AVI_DB5PR_10 = 9 + HDMI_INFOFRAME_AVI_DB5PR_10 = 9, + HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0, + HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1, + HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2, + HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3, + HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4, + HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5, + HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6, + HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7, + HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8, + HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9, + HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10, + HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11, + HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12, + HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13, + HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14, + HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0, + HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1, + HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2, + HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3, + HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4, + HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5, + HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6, + HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7, + HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0, + HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1, + HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2, + HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3, + HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0, + HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1 }; enum hdmi_packing_mode { @@ -327,6 +366,121 @@ enum hdmi_packing_mode { HDMI_PACK_ALREADYPACKED = 7 }; +enum hdmi_core_audio_sample_freq { + HDMI_AUDIO_FS_32000 = 0x3, + HDMI_AUDIO_FS_44100 = 0x0, + HDMI_AUDIO_FS_48000 = 0x2, + HDMI_AUDIO_FS_88200 = 0x8, + HDMI_AUDIO_FS_96000 = 0xA, + HDMI_AUDIO_FS_176400 = 0xC, + HDMI_AUDIO_FS_192000 = 0xE, + HDMI_AUDIO_FS_NOT_INDICATED = 0x1 +}; + +enum hdmi_core_audio_layout { + HDMI_AUDIO_LAYOUT_2CH = 0, + HDMI_AUDIO_LAYOUT_8CH = 1 +}; + +enum hdmi_core_cts_mode { + HDMI_AUDIO_CTS_MODE_HW = 0, + HDMI_AUDIO_CTS_MODE_SW = 1 +}; + +enum hdmi_stereo_channels { + HDMI_AUDIO_STEREO_NOCHANNELS = 0, + HDMI_AUDIO_STEREO_ONECHANNEL = 1, + HDMI_AUDIO_STEREO_TWOCHANNELS = 2, + HDMI_AUDIO_STEREO_THREECHANNELS = 3, + HDMI_AUDIO_STEREO_FOURCHANNELS = 4 +}; + +enum hdmi_audio_type { + HDMI_AUDIO_TYPE_LPCM = 0, + HDMI_AUDIO_TYPE_IEC = 1 +}; + +enum hdmi_audio_justify { + HDMI_AUDIO_JUSTIFY_LEFT = 0, + HDMI_AUDIO_JUSTIFY_RIGHT = 1 +}; + +enum hdmi_audio_sample_order { + HDMI_AUDIO_SAMPLE_RIGHT_FIRST = 0, + HDMI_AUDIO_SAMPLE_LEFT_FIRST = 1 +}; + +enum hdmi_audio_samples_perword { + HDMI_AUDIO_ONEWORD_ONESAMPLE = 0, + HDMI_AUDIO_ONEWORD_TWOSAMPLES = 1 +}; + +enum hdmi_audio_sample_size { + HDMI_AUDIO_SAMPLE_16BITS = 0, + HDMI_AUDIO_SAMPLE_24BITS = 1 +}; + +enum hdmi_audio_transf_mode { + HDMI_AUDIO_TRANSF_DMA = 0, + HDMI_AUDIO_TRANSF_IRQ = 1 +}; + +enum hdmi_audio_blk_strt_end_sig { + HDMI_AUDIO_BLOCK_SIG_STARTEND_ON = 0, + HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF = 1 +}; + +enum hdmi_audio_i2s_config { + HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0, + HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1, + HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0, + HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1, + HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0, + HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1, + HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0, + HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1, + HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6, + HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2, + HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4, + HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5, + HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1, + HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6, + HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2, + HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4, + HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5, + HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0, + HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1, + HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0, + HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1, + HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0, + HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2, + HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12, + HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4, + HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8, + HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10, + HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13, + HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5, + HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9, + HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11, + HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0, + HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1, + HDMI_AUDIO_I2S_SD0_EN = 1, + HDMI_AUDIO_I2S_SD1_EN = 1 << 1, + HDMI_AUDIO_I2S_SD2_EN = 1 << 2, + HDMI_AUDIO_I2S_SD3_EN = 1 << 3, +}; + +enum hdmi_audio_mclk_mode { + HDMI_AUDIO_MCLK_128FS = 0, + HDMI_AUDIO_MCLK_256FS = 1, + HDMI_AUDIO_MCLK_384FS = 2, + HDMI_AUDIO_MCLK_512FS = 3, + HDMI_AUDIO_MCLK_768FS = 4, + HDMI_AUDIO_MCLK_1024FS = 5, + HDMI_AUDIO_MCLK_1152FS = 6, + HDMI_AUDIO_MCLK_192FS = 7 +}; + struct hdmi_core_video_config { enum hdmi_core_inputbus_width ip_bus_width; enum hdmi_core_dither_trunc op_dither_truc; @@ -376,6 +530,19 @@ struct hdmi_core_infoframe_avi { u16 db12_13_pixel_sofright; /* Pixel number start of right bar */ }; +/* + * Refer to section 8.2 in HDMI 1.3 specification for + * details about infoframe databytes + */ +struct hdmi_core_infoframe_audio { + u8 db1_coding_type; + u8 db1_channel_count; + u8 db2_sample_freq; + u8 db2_sample_size; + u8 db4_channel_alloc; + bool db5_downmix_inh; + u8 db5_lsv; /* Level shift values for downmix */ +}; struct hdmi_core_packet_enable_repeat { u32 audio_pkt; @@ -412,4 +579,53 @@ struct hdmi_config { struct hdmi_cm cm; }; +struct hdmi_audio_format { + enum hdmi_stereo_channels stereo_channels; + u8 active_chnnls_msk; + enum hdmi_audio_type type; + enum hdmi_audio_justify justification; + enum hdmi_audio_sample_order sample_order; + enum hdmi_audio_samples_perword samples_per_word; + enum hdmi_audio_sample_size sample_size; + enum hdmi_audio_blk_strt_end_sig en_sig_blk_strt_end; +}; + +struct hdmi_audio_dma { + u8 transfer_size; + u8 block_size; + enum hdmi_audio_transf_mode mode; + u16 fifo_threshold; +}; + +struct hdmi_core_audio_i2s_config { + u8 word_max_length; + u8 word_length; + u8 in_length_bits; + u8 justification; + u8 en_high_bitrate_aud; + u8 sck_edge_mode; + u8 cbit_order; + u8 vbit; + u8 ws_polarity; + u8 direction; + u8 shift; + u8 active_sds; +}; + +struct hdmi_core_audio_config { + struct hdmi_core_audio_i2s_config i2s_cfg; + enum hdmi_core_audio_sample_freq freq_sample; + bool fs_override; + u32 n; + u32 cts; + u32 aud_par_busclk; + enum hdmi_core_audio_layout layout; + enum hdmi_core_cts_mode cts_mode; + bool use_mclk; + enum hdmi_audio_mclk_mode mclk_mode; + bool en_acr_pkt; + bool en_dsd_audio; + bool en_parallel_aud_input; + bool en_spdif; +}; #endif diff --git a/drivers/video/omap2/dss/hdmi_omap4_panel.c b/drivers/video/omap2/dss/hdmi_omap4_panel.c index ffb5de94131..7d4f2bd7c50 100644 --- a/drivers/video/omap2/dss/hdmi_omap4_panel.c +++ b/drivers/video/omap2/dss/hdmi_omap4_panel.c @@ -24,7 +24,7 @@ #include <linux/io.h> #include <linux/mutex.h> #include <linux/module.h> -#include <plat/display.h> +#include <video/omapdss.h> #include "dss.h" diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index bcd37ec8695..9aeea50e33f 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c @@ -29,7 +29,7 @@ #include <linux/spinlock.h> #include <linux/jiffies.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" @@ -393,6 +393,7 @@ struct overlay_cache_data { u32 paddr; void __iomem *vaddr; + u32 p_uv_addr; /* relevant for NV12 format only */ u16 screen_width; u16 width; u16 height; @@ -775,10 +776,17 @@ static int configure_overlay(enum omap_plane plane) } switch (c->color_mode) { + case OMAP_DSS_COLOR_NV12: + bpp = 8; + break; case OMAP_DSS_COLOR_RGB16: case OMAP_DSS_COLOR_ARGB16: case OMAP_DSS_COLOR_YUV2: case OMAP_DSS_COLOR_UYVY: + case OMAP_DSS_COLOR_RGBA16: + case OMAP_DSS_COLOR_RGBX16: + case OMAP_DSS_COLOR_ARGB16_1555: + case OMAP_DSS_COLOR_XRGB16_1555: bpp = 16; break; @@ -854,7 +862,8 @@ static int configure_overlay(enum omap_plane plane) c->mirror, c->global_alpha, c->pre_mult_alpha, - c->channel); + c->channel, + c->p_uv_addr); if (r) { /* this shouldn't happen */ @@ -1269,6 +1278,7 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr) oc->paddr = ovl->info.paddr; oc->vaddr = ovl->info.vaddr; + oc->p_uv_addr = ovl->info.p_uv_addr; oc->screen_width = ovl->info.screen_width; oc->width = ovl->info.width; oc->height = ovl->info.height; diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index f1aca6d0401..0f08025b1f0 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c @@ -31,7 +31,7 @@ #include <linux/delay.h> #include <linux/slab.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" @@ -201,12 +201,16 @@ static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf) static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf, size_t size) { - int r; + int r, enable; struct omap_overlay_info info; ovl->get_overlay_info(ovl, &info); - info.enabled = simple_strtoul(buf, NULL, 10); + r = kstrtoint(buf, 0, &enable); + if (r) + return r; + + info.enabled = !!enable; r = ovl->set_overlay_info(ovl, &info); if (r) @@ -231,8 +235,13 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, const char *buf, size_t size) { int r; + u8 alpha; struct omap_overlay_info info; + r = kstrtou8(buf, 0, &alpha); + if (r) + return r; + ovl->get_overlay_info(ovl, &info); /* Video1 plane does not support global alpha @@ -242,7 +251,7 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl, ovl->id == OMAP_DSS_VIDEO1) info.global_alpha = 255; else - info.global_alpha = simple_strtoul(buf, NULL, 10); + info.global_alpha = alpha; r = ovl->set_overlay_info(ovl, &info); if (r) @@ -268,8 +277,13 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl, const char *buf, size_t size) { int r; + u8 alpha; struct omap_overlay_info info; + r = kstrtou8(buf, 0, &alpha); + if (r) + return r; + ovl->get_overlay_info(ovl, &info); /* only GFX and Video2 plane support pre alpha multiplied @@ -279,7 +293,7 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl, ovl->id == OMAP_DSS_VIDEO1) info.pre_mult_alpha = 0; else - info.pre_mult_alpha = simple_strtoul(buf, NULL, 10); + info.pre_mult_alpha = alpha; r = ovl->set_overlay_info(ovl, &info); if (r) @@ -491,13 +505,18 @@ static int omap_dss_set_manager(struct omap_overlay *ovl, ovl->manager = mgr; dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK); - /* XXX: on manual update display, in auto update mode, a bug happens - * here. When an overlay is first enabled on LCD, then it's disabled, - * and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT - * errors. Waiting before changing the channel_out fixes it. I'm - * guessing that the overlay is still somehow being used for the LCD, - * but I don't understand how or why. */ - msleep(40); + /* XXX: When there is an overlay on a DSI manual update display, and + * the overlay is first disabled, then moved to tv, and enabled, we + * seem to get SYNC_LOST_DIGIT error. + * + * Waiting doesn't seem to help, but updating the manual update display + * after disabling the overlay seems to fix this. This hints that the + * overlay is perhaps somehow tied to the LCD output until the output + * is updated. + * + * Userspace workaround for this is to update the LCD after disabling + * the overlay, but before moving the overlay to TV. + */ dispc_set_channel_out(ovl->id, mgr->id); dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index 5ea17f49c61..c06fbe0bc67 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -32,8 +32,9 @@ #include <linux/ktime.h> #include <linux/hrtimer.h> #include <linux/seq_file.h> +#include <linux/semaphore.h> -#include <plat/display.h> +#include <video/omapdss.h> #include "dss.h" struct rfbi_reg { u16 idx; }; @@ -65,9 +66,6 @@ struct rfbi_reg { u16 idx; }; #define REG_FLD_MOD(idx, val, start, end) \ rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end)) -/* To work around an RFBI transfer rate limitation */ -#define OMAP_RFBI_RATE_LIMIT 1 - enum omap_rfbi_cycleformat { OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0, OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1, @@ -89,11 +87,6 @@ enum omap_rfbi_parallelmode { OMAP_DSS_RFBI_PARALLELMODE_16 = 3, }; -enum update_cmd { - RFBI_CMD_UPDATE = 0, - RFBI_CMD_SYNC = 1, -}; - static int rfbi_convert_timings(struct rfbi_timings *t); static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div); @@ -114,20 +107,9 @@ static struct { struct omap_dss_device *dssdev[2]; - struct kfifo cmd_fifo; - spinlock_t cmd_lock; - struct completion cmd_done; - atomic_t cmd_fifo_full; - atomic_t cmd_pending; + struct semaphore bus_lock; } rfbi; -struct update_region { - u16 x; - u16 y; - u16 w; - u16 h; -}; - static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val) { __raw_writel(val, rfbi.base + idx.idx); @@ -146,9 +128,20 @@ static void rfbi_enable_clocks(bool enable) dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK); } +void rfbi_bus_lock(void) +{ + down(&rfbi.bus_lock); +} +EXPORT_SYMBOL(rfbi_bus_lock); + +void rfbi_bus_unlock(void) +{ + up(&rfbi.bus_lock); +} +EXPORT_SYMBOL(rfbi_bus_unlock); + void omap_rfbi_write_command(const void *buf, u32 len) { - rfbi_enable_clocks(1); switch (rfbi.parallelmode) { case OMAP_DSS_RFBI_PARALLELMODE_8: { @@ -172,13 +165,11 @@ void omap_rfbi_write_command(const void *buf, u32 len) default: BUG(); } - rfbi_enable_clocks(0); } EXPORT_SYMBOL(omap_rfbi_write_command); void omap_rfbi_read_data(void *buf, u32 len) { - rfbi_enable_clocks(1); switch (rfbi.parallelmode) { case OMAP_DSS_RFBI_PARALLELMODE_8: { @@ -206,13 +197,11 @@ void omap_rfbi_read_data(void *buf, u32 len) default: BUG(); } - rfbi_enable_clocks(0); } EXPORT_SYMBOL(omap_rfbi_read_data); void omap_rfbi_write_data(const void *buf, u32 len) { - rfbi_enable_clocks(1); switch (rfbi.parallelmode) { case OMAP_DSS_RFBI_PARALLELMODE_8: { @@ -237,7 +226,6 @@ void omap_rfbi_write_data(const void *buf, u32 len) BUG(); } - rfbi_enable_clocks(0); } EXPORT_SYMBOL(omap_rfbi_write_data); @@ -249,8 +237,6 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, int horiz_offset = scr_width - w; int i; - rfbi_enable_clocks(1); - if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 && rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) { const u16 __iomem *pd = buf; @@ -295,12 +281,10 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, } else { BUG(); } - - rfbi_enable_clocks(0); } EXPORT_SYMBOL(omap_rfbi_write_pixels); -void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, +static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, u16 height, void (*callback)(void *data), void *data) { u32 l; @@ -317,8 +301,6 @@ void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, rfbi.framedone_callback = callback; rfbi.framedone_callback_data = data; - rfbi_enable_clocks(1); - rfbi_write_reg(RFBI_PIXEL_CNT, width * height); l = rfbi_read_reg(RFBI_CONTROL); @@ -337,15 +319,11 @@ static void framedone_callback(void *data, u32 mask) REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0); - rfbi_enable_clocks(0); - callback = rfbi.framedone_callback; rfbi.framedone_callback = NULL; if (callback != NULL) callback(rfbi.framedone_callback_data); - - atomic_set(&rfbi.cmd_pending, 0); } #if 1 /* VERBOSE */ @@ -435,7 +413,7 @@ static int calc_extif_timings(struct rfbi_timings *t) } -void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) +static void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) { int r; @@ -447,7 +425,6 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) BUG_ON(!t->converted); - rfbi_enable_clocks(1); rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]); rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]); @@ -456,7 +433,6 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t) (t->tim[2] ? 1 : 0), 4, 4); rfbi_print_timings(); - rfbi_enable_clocks(0); } static int ps_to_rfbi_ticks(int time, int div) @@ -472,59 +448,6 @@ static int ps_to_rfbi_ticks(int time, int div) return ret; } -#ifdef OMAP_RFBI_RATE_LIMIT -unsigned long rfbi_get_max_tx_rate(void) -{ - unsigned long l4_rate, dss1_rate; - int min_l4_ticks = 0; - int i; - - /* According to TI this can't be calculated so make the - * adjustments for a couple of known frequencies and warn for - * others. - */ - static const struct { - unsigned long l4_clk; /* HZ */ - unsigned long dss1_clk; /* HZ */ - unsigned long min_l4_ticks; - } ftab[] = { - { 55, 132, 7, }, /* 7.86 MPix/s */ - { 110, 110, 12, }, /* 9.16 MPix/s */ - { 110, 132, 10, }, /* 11 Mpix/s */ - { 120, 120, 10, }, /* 12 Mpix/s */ - { 133, 133, 10, }, /* 13.3 Mpix/s */ - }; - - l4_rate = rfbi.l4_khz / 1000; - dss1_rate = dss_clk_get_rate(DSS_CLK_FCK) / 1000000; - - for (i = 0; i < ARRAY_SIZE(ftab); i++) { - /* Use a window instead of an exact match, to account - * for different DPLL multiplier / divider pairs. - */ - if (abs(ftab[i].l4_clk - l4_rate) < 3 && - abs(ftab[i].dss1_clk - dss1_rate) < 3) { - min_l4_ticks = ftab[i].min_l4_ticks; - break; - } - } - if (i == ARRAY_SIZE(ftab)) { - /* Can't be sure, return anyway the maximum not - * rate-limited. This might cause a problem only for the - * tearing synchronisation. - */ - DSSERR("can't determine maximum RFBI transfer rate\n"); - return rfbi.l4_khz * 1000; - } - return rfbi.l4_khz * 1000 / min_l4_ticks; -} -#else -int rfbi_get_max_tx_rate(void) -{ - return rfbi.l4_khz * 1000; -} -#endif - static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div) { *clk_period = 1000000000 / rfbi.l4_khz; @@ -644,7 +567,6 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode, DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n", mode, hs, vs, hs_pol_inv, vs_pol_inv); - rfbi_enable_clocks(1); rfbi_write_reg(RFBI_HSYNC_WIDTH, hs); rfbi_write_reg(RFBI_VSYNC_WIDTH, vs); @@ -657,7 +579,6 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode, l &= ~(1 << 20); else l |= 1 << 20; - rfbi_enable_clocks(0); return 0; } @@ -672,7 +593,6 @@ int omap_rfbi_enable_te(bool enable, unsigned line) if (line > (1 << 11) - 1) return -EINVAL; - rfbi_enable_clocks(1); l = rfbi_read_reg(RFBI_CONFIG(0)); l &= ~(0x3 << 2); if (enable) { @@ -682,50 +602,12 @@ int omap_rfbi_enable_te(bool enable, unsigned line) rfbi.te_enabled = 0; rfbi_write_reg(RFBI_CONFIG(0), l); rfbi_write_reg(RFBI_LINE_NUMBER, line); - rfbi_enable_clocks(0); return 0; } EXPORT_SYMBOL(omap_rfbi_enable_te); -#if 0 -static void rfbi_enable_config(int enable1, int enable2) -{ - u32 l; - int cs = 0; - - if (enable1) - cs |= 1<<0; - if (enable2) - cs |= 1<<1; - - rfbi_enable_clocks(1); - - l = rfbi_read_reg(RFBI_CONTROL); - - l = FLD_MOD(l, cs, 3, 2); - l = FLD_MOD(l, 0, 1, 1); - - rfbi_write_reg(RFBI_CONTROL, l); - - - l = rfbi_read_reg(RFBI_CONFIG(0)); - l = FLD_MOD(l, 0, 3, 2); /* TRIGGERMODE: ITE */ - /*l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */ - /*l |= FLD_VAL(0, 8, 7); */ /* L4FORMAT, 1pix/L4 */ - - l = FLD_MOD(l, 0, 16, 16); /* A0POLARITY */ - l = FLD_MOD(l, 1, 20, 20); /* TE_VSYNC_POLARITY */ - l = FLD_MOD(l, 1, 21, 21); /* HSYNCPOLARITY */ - - l = FLD_MOD(l, OMAP_DSS_RFBI_PARALLELMODE_8, 1, 0); - rfbi_write_reg(RFBI_CONFIG(0), l); - - rfbi_enable_clocks(0); -} -#endif - -int rfbi_configure(int rfbi_module, int bpp, int lines) +static int rfbi_configure(int rfbi_module, int bpp, int lines) { u32 l; int cycle1 = 0, cycle2 = 0, cycle3 = 0; @@ -821,8 +703,6 @@ int rfbi_configure(int rfbi_module, int bpp, int lines) break; } - rfbi_enable_clocks(1); - REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */ l = 0; @@ -856,11 +736,15 @@ int rfbi_configure(int rfbi_module, int bpp, int lines) DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n", bpp, lines, cycle1, cycle2, cycle3); - rfbi_enable_clocks(0); - return 0; } -EXPORT_SYMBOL(rfbi_configure); + +int omap_rfbi_configure(struct omap_dss_device *dssdev, int pixel_size, + int data_lines) +{ + return rfbi_configure(dssdev->phy.rfbi.channel, pixel_size, data_lines); +} +EXPORT_SYMBOL(omap_rfbi_configure); int omap_rfbi_prepare_update(struct omap_dss_device *dssdev, u16 *x, u16 *y, u16 *w, u16 *h) @@ -960,6 +844,8 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) { int r; + rfbi_enable_clocks(1); + r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); @@ -1002,6 +888,8 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev) omap_dispc_unregister_isr(framedone_callback, NULL, DISPC_IRQ_FRAMEDONE); omap_dss_stop_device(dssdev); + + rfbi_enable_clocks(0); } EXPORT_SYMBOL(omapdss_rfbi_display_disable); @@ -1021,11 +909,7 @@ static int omap_rfbihw_probe(struct platform_device *pdev) rfbi.pdev = pdev; - spin_lock_init(&rfbi.cmd_lock); - - init_completion(&rfbi.cmd_done); - atomic_set(&rfbi.cmd_fifo_full, 0); - atomic_set(&rfbi.cmd_pending, 0); + sema_init(&rfbi.bus_lock, 1); rfbi_mem = platform_get_resource(rfbi.pdev, IORESOURCE_MEM, 0); if (!rfbi_mem) { diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 54a53e64818..0bd4b0350f8 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -25,7 +25,7 @@ #include <linux/err.h> #include <linux/regulator/consumer.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index 8e35a5bae42..980f919ed98 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c @@ -34,7 +34,7 @@ #include <linux/platform_device.h> #include <linux/regulator/consumer.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" @@ -373,8 +373,11 @@ static void venc_reset(void) } } +#ifdef CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET /* the magical sleep that makes things work */ + /* XXX more info? What bug this circumvents? */ msleep(20); +#endif } static void venc_enable_clocks(int enable) @@ -473,6 +476,12 @@ static int venc_panel_enable(struct omap_dss_device *dssdev) mutex_lock(&venc.venc_lock); + r = omap_dss_start_device(dssdev); + if (r) { + DSSERR("failed to start device\n"); + goto err0; + } + if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; goto err1; @@ -484,10 +493,11 @@ static int venc_panel_enable(struct omap_dss_device *dssdev) dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; - /* wait couple of vsyncs until enabling the LCD */ - msleep(50); - + mutex_unlock(&venc.venc_lock); + return 0; err1: + omap_dss_stop_device(dssdev); +err0: mutex_unlock(&venc.venc_lock); return r; @@ -510,10 +520,9 @@ static void venc_panel_disable(struct omap_dss_device *dssdev) venc_power_off(dssdev); - /* wait at least 5 vsyncs after disabling the LCD */ - msleep(100); - dssdev->state = OMAP_DSS_DISPLAY_DISABLED; + + omap_dss_stop_device(dssdev); end: mutex_unlock(&venc.venc_lock); } diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c index 6f435450987..cff450392b7 100644 --- a/drivers/video/omap2/omapfb/omapfb-ioctl.c +++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c @@ -28,7 +28,7 @@ #include <linux/omapfb.h> #include <linux/vmalloc.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/vrfb.h> #include <plat/vram.h> @@ -895,8 +895,16 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg) p.display_info.xres = xres; p.display_info.yres = yres; - p.display_info.width = 0; - p.display_info.height = 0; + + if (display->driver->get_dimensions) { + u32 w, h; + display->driver->get_dimensions(display, &w, &h); + p.display_info.width = w; + p.display_info.height = h; + } else { + p.display_info.width = 0; + p.display_info.height = 0; + } if (copy_to_user((void __user *)arg, &p.display_info, sizeof(p.display_info))) diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 505ec667204..505bc12a303 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -30,7 +30,7 @@ #include <linux/platform_device.h> #include <linux/omapfb.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/vram.h> #include <plat/vrfb.h> @@ -702,8 +702,16 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) var->xres, var->yres, var->xres_virtual, var->yres_virtual); - var->height = -1; - var->width = -1; + if (display && display->driver->get_dimensions) { + u32 w, h; + display->driver->get_dimensions(display, &w, &h); + var->width = DIV_ROUND_CLOSEST(w, 1000); + var->height = DIV_ROUND_CLOSEST(h, 1000); + } else { + var->height = -1; + var->width = -1; + } + var->grayscale = 0; if (display && display->driver->get_timings) { @@ -749,35 +757,6 @@ static int omapfb_open(struct fb_info *fbi, int user) static int omapfb_release(struct fb_info *fbi, int user) { -#if 0 - struct omapfb_info *ofbi = FB2OFB(fbi); - struct omapfb2_device *fbdev = ofbi->fbdev; - struct omap_dss_device *display = fb2display(fbi); - - DBG("Closing fb with plane index %d\n", ofbi->id); - - omapfb_lock(fbdev); - - if (display && display->get_update_mode && display->update) { - /* XXX this update should be removed, I think. But it's - * good for debugging */ - if (display->get_update_mode(display) == - OMAP_DSS_UPDATE_MANUAL) { - u16 w, h; - - if (display->sync) - display->sync(display); - - display->get_resolution(display, &w, &h); - display->update(display, 0, 0, w, h); - } - } - - if (display && display->sync) - display->sync(display); - - omapfb_unlock(fbdev); -#endif return 0; } @@ -1263,7 +1242,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi) struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_device *fbdev = ofbi->fbdev; struct omap_dss_device *display = fb2display(fbi); - int do_update = 0; int r = 0; if (!display) @@ -1279,11 +1257,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi) if (display->driver->resume) r = display->driver->resume(display); - if (r == 0 && display->driver->get_update_mode && - display->driver->get_update_mode(display) == - OMAP_DSS_UPDATE_MANUAL) - do_update = 1; - break; case FB_BLANK_NORMAL: @@ -1307,13 +1280,6 @@ static int omapfb_blank(int blank, struct fb_info *fbi) exit: omapfb_unlock(fbdev); - if (r == 0 && do_update && display->driver->update) { - u16 w, h; - display->driver->get_resolution(display, &w, &h); - - r = display->driver->update(display, 0, 0, w, h); - } - return r; } @@ -2030,9 +1996,9 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev) static int omapfb_mode_to_timings(const char *mode_str, struct omap_video_timings *timings, u8 *bpp) { - struct fb_info fbi; - struct fb_var_screeninfo var; - struct fb_ops fbops; + struct fb_info *fbi; + struct fb_var_screeninfo *var; + struct fb_ops *fbops; int r; #ifdef CONFIG_OMAP2_DSS_VENC @@ -2050,39 +2016,66 @@ static int omapfb_mode_to_timings(const char *mode_str, /* this is quite a hack, but I wanted to use the modedb and for * that we need fb_info and var, so we create dummy ones */ - memset(&fbi, 0, sizeof(fbi)); - memset(&var, 0, sizeof(var)); - memset(&fbops, 0, sizeof(fbops)); - fbi.fbops = &fbops; - - r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24); - - if (r != 0) { - timings->pixel_clock = PICOS2KHZ(var.pixclock); - timings->hbp = var.left_margin; - timings->hfp = var.right_margin; - timings->vbp = var.upper_margin; - timings->vfp = var.lower_margin; - timings->hsw = var.hsync_len; - timings->vsw = var.vsync_len; - timings->x_res = var.xres; - timings->y_res = var.yres; - - switch (var.bits_per_pixel) { - case 16: - *bpp = 16; - break; - case 24: - case 32: - default: - *bpp = 24; - break; - } + *bpp = 0; + fbi = NULL; + var = NULL; + fbops = NULL; - return 0; - } else { - return -EINVAL; + fbi = kzalloc(sizeof(*fbi), GFP_KERNEL); + if (fbi == NULL) { + r = -ENOMEM; + goto err; + } + + var = kzalloc(sizeof(*var), GFP_KERNEL); + if (var == NULL) { + r = -ENOMEM; + goto err; + } + + fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); + if (fbops == NULL) { + r = -ENOMEM; + goto err; + } + + fbi->fbops = fbops; + + r = fb_find_mode(var, fbi, mode_str, NULL, 0, NULL, 24); + if (r == 0) { + r = -EINVAL; + goto err; + } + + timings->pixel_clock = PICOS2KHZ(var->pixclock); + timings->hbp = var->left_margin; + timings->hfp = var->right_margin; + timings->vbp = var->upper_margin; + timings->vfp = var->lower_margin; + timings->hsw = var->hsync_len; + timings->vsw = var->vsync_len; + timings->x_res = var->xres; + timings->y_res = var->yres; + + switch (var->bits_per_pixel) { + case 16: + *bpp = 16; + break; + case 24: + case 32: + default: + *bpp = 24; + break; } + + r = 0; + +err: + kfree(fbi); + kfree(var); + kfree(fbops); + + return r; } static int omapfb_set_def_mode(struct omapfb2_device *fbdev, @@ -2185,6 +2178,61 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) return r; } +static int omapfb_init_display(struct omapfb2_device *fbdev, + struct omap_dss_device *dssdev) +{ + struct omap_dss_driver *dssdrv = dssdev->driver; + int r; + + r = dssdrv->enable(dssdev); + if (r) { + dev_warn(fbdev->dev, "Failed to enable display '%s'\n", + dssdev->name); + return r; + } + + if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { + u16 w, h; + if (dssdrv->enable_te) { + r = dssdrv->enable_te(dssdev, 1); + if (r) { + dev_err(fbdev->dev, "Failed to set TE\n"); + return r; + } + } + + if (dssdrv->set_update_mode) { + r = dssdrv->set_update_mode(dssdev, + OMAP_DSS_UPDATE_MANUAL); + if (r) { + dev_err(fbdev->dev, + "Failed to set update mode\n"); + return r; + } + } + + dssdrv->get_resolution(dssdev, &w, &h); + r = dssdrv->update(dssdev, 0, 0, w, h); + if (r) { + dev_err(fbdev->dev, + "Failed to update display\n"); + return r; + } + } else { + if (dssdrv->set_update_mode) { + r = dssdrv->set_update_mode(dssdev, + OMAP_DSS_UPDATE_AUTO); + if (r) { + dev_err(fbdev->dev, + "Failed to set update mode\n"); + return r; + } + } + } + + return 0; +} + static int omapfb_probe(struct platform_device *pdev) { struct omapfb2_device *fbdev = NULL; @@ -2284,30 +2332,13 @@ static int omapfb_probe(struct platform_device *pdev) } if (def_display) { - struct omap_dss_driver *dssdrv = def_display->driver; - - r = def_display->driver->enable(def_display); + r = omapfb_init_display(fbdev, def_display); if (r) { - dev_warn(fbdev->dev, "Failed to enable display '%s'\n", - def_display->name); + dev_err(fbdev->dev, + "failed to initialize default " + "display\n"); goto cleanup; } - - if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { - u16 w, h; - if (dssdrv->enable_te) - dssdrv->enable_te(def_display, 1); - if (dssdrv->set_update_mode) - dssdrv->set_update_mode(def_display, - OMAP_DSS_UPDATE_MANUAL); - - dssdrv->get_resolution(def_display, &w, &h); - def_display->driver->update(def_display, 0, 0, w, h); - } else { - if (dssdrv->set_update_mode) - dssdrv->set_update_mode(def_display, - OMAP_DSS_UPDATE_AUTO); - } } DBG("create sysfs for fbs\n"); diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c index 6f9c72cd6bb..2f5e817b2a9 100644 --- a/drivers/video/omap2/omapfb/omapfb-sysfs.c +++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c @@ -29,7 +29,7 @@ #include <linux/mm.h> #include <linux/omapfb.h> -#include <plat/display.h> +#include <video/omapdss.h> #include <plat/vrfb.h> #include "omapfb.h" @@ -50,10 +50,12 @@ static ssize_t store_rotate_type(struct device *dev, struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); struct omapfb2_mem_region *rg; - enum omap_dss_rotation_type rot_type; + int rot_type; int r; - rot_type = simple_strtoul(buf, NULL, 0); + r = kstrtoint(buf, 0, &rot_type); + if (r) + return r; if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB) return -EINVAL; @@ -102,14 +104,15 @@ static ssize_t store_mirror(struct device *dev, { struct fb_info *fbi = dev_get_drvdata(dev); struct omapfb_info *ofbi = FB2OFB(fbi); - unsigned long mirror; + int mirror; int r; struct fb_var_screeninfo new_var; - mirror = simple_strtoul(buf, NULL, 0); + r = kstrtoint(buf, 0, &mirror); + if (r) + return r; - if (mirror != 0 && mirror != 1) - return -EINVAL; + mirror = !!mirror; if (!lock_fb_info(fbi)) return -ENODEV; @@ -445,7 +448,11 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr, int r; int i; - size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0)); + r = kstrtoul(buf, 0, &size); + if (r) + return r; + + size = PAGE_ALIGN(size); if (!lock_fb_info(fbi)) return -ENODEV; diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h index 1305fc9880b..aa1b1d97427 100644 --- a/drivers/video/omap2/omapfb/omapfb.h +++ b/drivers/video/omap2/omapfb/omapfb.h @@ -29,13 +29,15 @@ #include <linux/rwsem.h> -#include <plat/display.h> +#include <video/omapdss.h> #ifdef DEBUG extern unsigned int omapfb_debug; #define DBG(format, ...) \ - if (omapfb_debug) \ - printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__) + do { \ + if (omapfb_debug) \ + printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__); \ + } while (0) #else #define DBG(format, ...) #endif diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c index 3b6cdcac8f1..0352afa49a3 100644 --- a/drivers/video/s3c-fb.c +++ b/drivers/video/s3c-fb.c @@ -182,6 +182,7 @@ struct s3c_fb_vsync { /** * struct s3c_fb - overall hardware state of the hardware + * @slock: The spinlock protection for this data sturcture. * @dev: The device that we bound to, for printing, etc. * @regs_res: The resource we claimed for the IO registers. * @bus_clk: The clk (hclk) feeding our interface and possibly pixclk. @@ -195,6 +196,7 @@ struct s3c_fb_vsync { * @vsync_info: VSYNC-related information (count, queues...) */ struct s3c_fb { + spinlock_t slock; struct device *dev; struct resource *regs_res; struct clk *bus_clk; @@ -300,6 +302,7 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, var->blue.length = 5; break; + case 32: case 28: case 25: var->transp.length = var->bits_per_pixel - 24; @@ -308,7 +311,6 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var, case 24: /* our 24bpp is unpacked, so 32bpp */ var->bits_per_pixel = 32; - case 32: var->red.offset = 16; var->red.length = 8; var->green.offset = 8; @@ -947,6 +949,8 @@ static irqreturn_t s3c_fb_irq(int irq, void *dev_id) void __iomem *regs = sfb->regs; u32 irq_sts_reg; + spin_lock(&sfb->slock); + irq_sts_reg = readl(regs + VIDINTCON1); if (irq_sts_reg & VIDINTCON1_INT_FRAME) { @@ -963,6 +967,7 @@ static irqreturn_t s3c_fb_irq(int irq, void *dev_id) */ s3c_fb_disable_irq(sfb); + spin_unlock(&sfb->slock); return IRQ_HANDLED; } @@ -1339,6 +1344,8 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev) sfb->pdata = pd; sfb->variant = fbdrv->variant; + spin_lock_init(&sfb->slock); + sfb->bus_clk = clk_get(dev, "lcd"); if (IS_ERR(sfb->bus_clk)) { dev_err(dev, "failed to get bus clock\n"); @@ -1442,8 +1449,7 @@ err_ioremap: iounmap(sfb->regs); err_req_region: - release_resource(sfb->regs_res); - kfree(sfb->regs_res); + release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res)); err_clk: clk_disable(sfb->bus_clk); @@ -1479,8 +1485,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev) clk_disable(sfb->bus_clk); clk_put(sfb->bus_clk); - release_resource(sfb->regs_res); - kfree(sfb->regs_res); + release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res)); kfree(sfb); @@ -1521,7 +1526,8 @@ static int s3c_fb_resume(struct device *dev) clk_enable(sfb->bus_clk); - /* setup registers */ + /* setup gpio and output polarity controls */ + pd->setup_gpio(); writel(pd->vidcon1, sfb->regs + VIDCON1); /* zero all windows before we do anything */ @@ -1549,7 +1555,7 @@ static int s3c_fb_resume(struct device *dev) return 0; } -int s3c_fb_runtime_suspend(struct device *dev) +static int s3c_fb_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c_fb *sfb = platform_get_drvdata(pdev); @@ -1569,7 +1575,7 @@ int s3c_fb_runtime_suspend(struct device *dev) return 0; } -int s3c_fb_runtime_resume(struct device *dev) +static int s3c_fb_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct s3c_fb *sfb = platform_get_drvdata(pdev); @@ -1579,7 +1585,8 @@ int s3c_fb_runtime_resume(struct device *dev) clk_enable(sfb->bus_clk); - /* setup registers */ + /* setup gpio and output polarity controls */ + pd->setup_gpio(); writel(pd->vidcon1, sfb->regs + VIDCON1); /* zero all windows before we do anything */ @@ -1623,28 +1630,31 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = { .has_osd_c = 1, .osd_size_off = 0x8, .palette_sz = 256, - .valid_bpp = VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(24), + .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | + VALID_BPP(18) | VALID_BPP(24)), }, [1] = { .has_osd_c = 1, .has_osd_d = 1, - .osd_size_off = 0x12, + .osd_size_off = 0xc, .has_osd_alpha = 1, .palette_sz = 256, .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | - VALID_BPP(24) | VALID_BPP(25)), + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(28)), }, [2] = { .has_osd_c = 1, .has_osd_d = 1, - .osd_size_off = 0x12, + .osd_size_off = 0xc, .has_osd_alpha = 1, .palette_sz = 16, .palette_16bpp = 1, .valid_bpp = (VALID_BPP1248 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | - VALID_BPP(24) | VALID_BPP(25)), + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(28)), }, [3] = { .has_osd_c = 1, @@ -1653,7 +1663,8 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = { .palette_16bpp = 1, .valid_bpp = (VALID_BPP124 | VALID_BPP(16) | VALID_BPP(18) | VALID_BPP(19) | - VALID_BPP(24) | VALID_BPP(25)), + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(28)), }, [4] = { .has_osd_c = 1, @@ -1662,7 +1673,65 @@ static struct s3c_fb_win_variant s3c_fb_data_64xx_wins[] = { .palette_16bpp = 1, .valid_bpp = (VALID_BPP(1) | VALID_BPP(2) | VALID_BPP(16) | VALID_BPP(18) | - VALID_BPP(24) | VALID_BPP(25)), + VALID_BPP(19) | VALID_BPP(24) | + VALID_BPP(25) | VALID_BPP(28)), + }, +}; + +static struct s3c_fb_win_variant s3c_fb_data_s5p_wins[] = { + [0] = { + .has_osd_c = 1, + .osd_size_off = 0x8, + .palette_sz = 256, + .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | + VALID_BPP(15) | VALID_BPP(16) | + VALID_BPP(18) | VALID_BPP(19) | + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(32)), + }, + [1] = { + .has_osd_c = 1, + .has_osd_d = 1, + .osd_size_off = 0xc, + .has_osd_alpha = 1, + .palette_sz = 256, + .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | + VALID_BPP(15) | VALID_BPP(16) | + VALID_BPP(18) | VALID_BPP(19) | + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(32)), + }, + [2] = { + .has_osd_c = 1, + .has_osd_d = 1, + .osd_size_off = 0xc, + .has_osd_alpha = 1, + .palette_sz = 256, + .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | + VALID_BPP(15) | VALID_BPP(16) | + VALID_BPP(18) | VALID_BPP(19) | + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(32)), + }, + [3] = { + .has_osd_c = 1, + .has_osd_alpha = 1, + .palette_sz = 256, + .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | + VALID_BPP(15) | VALID_BPP(16) | + VALID_BPP(18) | VALID_BPP(19) | + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(32)), + }, + [4] = { + .has_osd_c = 1, + .has_osd_alpha = 1, + .palette_sz = 256, + .valid_bpp = (VALID_BPP1248 | VALID_BPP(13) | + VALID_BPP(15) | VALID_BPP(16) | + VALID_BPP(18) | VALID_BPP(19) | + VALID_BPP(24) | VALID_BPP(25) | + VALID_BPP(32)), }, }; @@ -1719,11 +1788,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pc100 = { .has_prtcon = 1, }, - .win[0] = &s3c_fb_data_64xx_wins[0], - .win[1] = &s3c_fb_data_64xx_wins[1], - .win[2] = &s3c_fb_data_64xx_wins[2], - .win[3] = &s3c_fb_data_64xx_wins[3], - .win[4] = &s3c_fb_data_64xx_wins[4], + .win[0] = &s3c_fb_data_s5p_wins[0], + .win[1] = &s3c_fb_data_s5p_wins[1], + .win[2] = &s3c_fb_data_s5p_wins[2], + .win[3] = &s3c_fb_data_s5p_wins[3], + .win[4] = &s3c_fb_data_s5p_wins[4], }; static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = { @@ -1749,11 +1818,11 @@ static struct s3c_fb_driverdata s3c_fb_data_s5pv210 = { .has_shadowcon = 1, }, - .win[0] = &s3c_fb_data_64xx_wins[0], - .win[1] = &s3c_fb_data_64xx_wins[1], - .win[2] = &s3c_fb_data_64xx_wins[2], - .win[3] = &s3c_fb_data_64xx_wins[3], - .win[4] = &s3c_fb_data_64xx_wins[4], + .win[0] = &s3c_fb_data_s5p_wins[0], + .win[1] = &s3c_fb_data_s5p_wins[1], + .win[2] = &s3c_fb_data_s5p_wins[2], + .win[3] = &s3c_fb_data_s5p_wins[3], + .win[4] = &s3c_fb_data_s5p_wins[4], }; /* S3C2443/S3C2416 style hardware */ diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c index 61c819e35f7..0aa13761de6 100644 --- a/drivers/video/s3c2410fb.c +++ b/drivers/video/s3c2410fb.c @@ -867,7 +867,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev, goto dealloc_fb; } - size = (res->end - res->start) + 1; + size = resource_size(res); info->mem = request_mem_region(res->start, size, pdev->name); if (info->mem == NULL) { dev_err(&pdev->dev, "failed to get memory region\n"); @@ -997,8 +997,7 @@ release_irq: release_regs: iounmap(info->io); release_mem: - release_resource(info->mem); - kfree(info->mem); + release_mem_region(res->start, size); dealloc_fb: platform_set_drvdata(pdev, NULL); framebuffer_release(fbinfo); @@ -1044,8 +1043,7 @@ static int __devexit s3c2410fb_remove(struct platform_device *pdev) iounmap(info->io); - release_resource(info->mem); - kfree(info->mem); + release_mem_region(info->mem->start, resource_size(info->mem)); platform_set_drvdata(pdev, NULL); framebuffer_release(fbinfo); diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index c4482f2e579..4ca5d0c8fe8 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -25,6 +25,9 @@ #include <linux/console.h> /* Why should fb driver call console functions? because console_lock() */ #include <video/vga.h> +#include <linux/i2c.h> +#include <linux/i2c-algo-bit.h> + #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif @@ -36,6 +39,12 @@ struct s3fb_info { struct mutex open_lock; unsigned int ref_count; u32 pseudo_palette[16]; +#ifdef CONFIG_FB_S3_DDC + u8 __iomem *mmio; + bool ddc_registered; + struct i2c_adapter ddc_adapter; + struct i2c_algo_bit_data ddc_algo; +#endif }; @@ -105,6 +114,9 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", #define CHIP_UNDECIDED_FLAG 0x80 #define CHIP_MASK 0xFF +#define MMIO_OFFSET 0x1000000 +#define MMIO_SIZE 0x10000 + /* CRT timing register sets */ static const struct vga_regset s3_h_total_regs[] = {{0x00, 0, 7}, {0x5D, 0, 0}, VGA_REGSET_END}; @@ -140,7 +152,7 @@ static const struct svga_timing_regs s3_timing_regs = { /* Module parameters */ -static char *mode_option __devinitdata = "640x480-8@60"; +static char *mode_option __devinitdata; #ifdef CONFIG_MTRR static int mtrr __devinitdata = 1; @@ -169,6 +181,119 @@ MODULE_PARM_DESC(fasttext, "Enable S3 fast text mode (1=enable, 0=disable, defau /* ------------------------------------------------------------------------- */ +#ifdef CONFIG_FB_S3_DDC + +#define DDC_REG 0xaa /* Trio 3D/1X/2X */ +#define DDC_MMIO_REG 0xff20 /* all other chips */ +#define DDC_SCL_OUT (1 << 0) +#define DDC_SDA_OUT (1 << 1) +#define DDC_SCL_IN (1 << 2) +#define DDC_SDA_IN (1 << 3) +#define DDC_DRIVE_EN (1 << 4) + +static bool s3fb_ddc_needs_mmio(int chip) +{ + return !(chip == CHIP_360_TRIO3D_1X || + chip == CHIP_362_TRIO3D_2X || + chip == CHIP_368_TRIO3D_2X); +} + +static u8 s3fb_ddc_read(struct s3fb_info *par) +{ + if (s3fb_ddc_needs_mmio(par->chip)) + return readb(par->mmio + DDC_MMIO_REG); + else + return vga_rcrt(par->state.vgabase, DDC_REG); +} + +static void s3fb_ddc_write(struct s3fb_info *par, u8 val) +{ + if (s3fb_ddc_needs_mmio(par->chip)) + writeb(val, par->mmio + DDC_MMIO_REG); + else + vga_wcrt(par->state.vgabase, DDC_REG, val); +} + +static void s3fb_ddc_setscl(void *data, int val) +{ + struct s3fb_info *par = data; + unsigned char reg; + + reg = s3fb_ddc_read(par) | DDC_DRIVE_EN; + if (val) + reg |= DDC_SCL_OUT; + else + reg &= ~DDC_SCL_OUT; + s3fb_ddc_write(par, reg); +} + +static void s3fb_ddc_setsda(void *data, int val) +{ + struct s3fb_info *par = data; + unsigned char reg; + + reg = s3fb_ddc_read(par) | DDC_DRIVE_EN; + if (val) + reg |= DDC_SDA_OUT; + else + reg &= ~DDC_SDA_OUT; + s3fb_ddc_write(par, reg); +} + +static int s3fb_ddc_getscl(void *data) +{ + struct s3fb_info *par = data; + + return !!(s3fb_ddc_read(par) & DDC_SCL_IN); +} + +static int s3fb_ddc_getsda(void *data) +{ + struct s3fb_info *par = data; + + return !!(s3fb_ddc_read(par) & DDC_SDA_IN); +} + +static int __devinit s3fb_setup_ddc_bus(struct fb_info *info) +{ + struct s3fb_info *par = info->par; + + strlcpy(par->ddc_adapter.name, info->fix.id, + sizeof(par->ddc_adapter.name)); + par->ddc_adapter.owner = THIS_MODULE; + par->ddc_adapter.class = I2C_CLASS_DDC; + par->ddc_adapter.algo_data = &par->ddc_algo; + par->ddc_adapter.dev.parent = info->device; + par->ddc_algo.setsda = s3fb_ddc_setsda; + par->ddc_algo.setscl = s3fb_ddc_setscl; + par->ddc_algo.getsda = s3fb_ddc_getsda; + par->ddc_algo.getscl = s3fb_ddc_getscl; + par->ddc_algo.udelay = 10; + par->ddc_algo.timeout = 20; + par->ddc_algo.data = par; + + i2c_set_adapdata(&par->ddc_adapter, par); + + /* + * some Virge cards have external MUX to switch chip I2C bus between + * DDC and extension pins - switch it do DDC + */ +/* vga_wseq(par->state.vgabase, 0x08, 0x06); - not needed, already unlocked */ + if (par->chip == CHIP_357_VIRGE_GX2 || + par->chip == CHIP_359_VIRGE_GX2P) + svga_wseq_mask(par->state.vgabase, 0x0d, 0x01, 0x03); + else + svga_wseq_mask(par->state.vgabase, 0x0d, 0x00, 0x03); + /* some Virge need this or the DDC is ignored */ + svga_wcrt_mask(par->state.vgabase, 0x5c, 0x03, 0x03); + + return i2c_bit_add_bus(&par->ddc_adapter); +} +#endif /* CONFIG_FB_S3_DDC */ + + +/* ------------------------------------------------------------------------- */ + /* Set font in S3 fast text mode */ static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map) @@ -994,6 +1119,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i struct s3fb_info *par; int rc; u8 regval, cr38, cr39; + bool found = false; /* Ignore secondary VGA device because there is no VGA arbitration */ if (! svga_primary_device(dev)) { @@ -1110,12 +1236,69 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i info->fix.ypanstep = 0; info->fix.accel = FB_ACCEL_NONE; info->pseudo_palette = (void*) (par->pseudo_palette); + info->var.bits_per_pixel = 8; + +#ifdef CONFIG_FB_S3_DDC + /* Enable MMIO if needed */ + if (s3fb_ddc_needs_mmio(par->chip)) { + par->mmio = ioremap(info->fix.smem_start + MMIO_OFFSET, MMIO_SIZE); + if (par->mmio) + svga_wcrt_mask(par->state.vgabase, 0x53, 0x08, 0x08); /* enable MMIO */ + else + dev_err(info->device, "unable to map MMIO at 0x%lx, disabling DDC", + info->fix.smem_start + MMIO_OFFSET); + } + if (!s3fb_ddc_needs_mmio(par->chip) || par->mmio) + if (s3fb_setup_ddc_bus(info) == 0) { + u8 *edid = fb_ddc_read(&par->ddc_adapter); + par->ddc_registered = true; + if (edid) { + fb_edid_to_monspecs(edid, &info->monspecs); + kfree(edid); + if (!info->monspecs.modedb) + dev_err(info->device, "error getting mode database\n"); + else { + const struct fb_videomode *m; + + fb_videomode_to_modelist(info->monspecs.modedb, + info->monspecs.modedb_len, + &info->modelist); + m = fb_find_best_display(&info->monspecs, &info->modelist); + if (m) { + fb_videomode_to_var(&info->var, m); + /* fill all other info->var's fields */ + if (s3fb_check_var(&info->var, info) == 0) + found = true; + } + } + } + } +#endif + if (!mode_option && !found) + mode_option = "640x480-8@60"; /* Prepare startup mode */ - rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8); - if (! ((rc == 1) || (rc == 2))) { - rc = -EINVAL; - dev_err(info->device, "mode %s not found\n", mode_option); + if (mode_option) { + rc = fb_find_mode(&info->var, info, mode_option, + info->monspecs.modedb, info->monspecs.modedb_len, + NULL, info->var.bits_per_pixel); + if (!rc || rc == 4) { + rc = -EINVAL; + dev_err(info->device, "mode %s not found\n", mode_option); + fb_destroy_modedb(info->monspecs.modedb); + info->monspecs.modedb = NULL; + goto err_find_mode; + } + } + + fb_destroy_modedb(info->monspecs.modedb); + info->monspecs.modedb = NULL; + + /* maximize virtual vertical size for fast scrolling */ + info->var.yres_virtual = info->fix.smem_len * 8 / + (info->var.bits_per_pixel * info->var.xres_virtual); + if (info->var.yres_virtual < info->var.yres) { + dev_err(info->device, "virtual vertical size smaller than real\n"); goto err_find_mode; } @@ -1164,6 +1347,12 @@ err_reg_fb: fb_dealloc_cmap(&info->cmap); err_alloc_cmap: err_find_mode: +#ifdef CONFIG_FB_S3_DDC + if (par->ddc_registered) + i2c_del_adapter(&par->ddc_adapter); + if (par->mmio) + iounmap(par->mmio); +#endif pci_iounmap(dev, info->screen_base); err_iomap: pci_release_regions(dev); @@ -1180,12 +1369,11 @@ err_enable_device: static void __devexit s3_pci_remove(struct pci_dev *dev) { struct fb_info *info = pci_get_drvdata(dev); + struct s3fb_info __maybe_unused *par = info->par; if (info) { #ifdef CONFIG_MTRR - struct s3fb_info *par = info->par; - if (par->mtrr_reg >= 0) { mtrr_del(par->mtrr_reg, 0, 0); par->mtrr_reg = -1; @@ -1195,6 +1383,13 @@ static void __devexit s3_pci_remove(struct pci_dev *dev) unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); +#ifdef CONFIG_FB_S3_DDC + if (par->ddc_registered) + i2c_del_adapter(&par->ddc_adapter); + if (par->mmio) + iounmap(par->mmio); +#endif + pci_iounmap(dev, info->screen_base); pci_release_regions(dev); /* pci_disable_device(dev); */ diff --git a/drivers/video/savage/savagefb-i2c.c b/drivers/video/savage/savagefb-i2c.c index bb71fea0728..80fa87e2ae2 100644 --- a/drivers/video/savage/savagefb-i2c.c +++ b/drivers/video/savage/savagefb-i2c.c @@ -171,6 +171,8 @@ void savagefb_create_i2c_busses(struct fb_info *info) switch (par->chip) { case S3_PROSAVAGE: + case S3_PROSAVAGEDDR: + case S3_TWISTER: par->chan.reg = CR_SERIAL2; par->chan.ioaddr = par->mmio.vbase; par->chan.algo.setsda = prosavage_gpio_setsda; diff --git a/drivers/video/savage/savagefb.h b/drivers/video/savage/savagefb.h index 4e9490c19d7..32549d177b1 100644 --- a/drivers/video/savage/savagefb.h +++ b/drivers/video/savage/savagefb.h @@ -36,7 +36,6 @@ #define PCI_CHIP_SAVAGE_IX 0x8c13 #define PCI_CHIP_PROSAVAGE_PM 0x8a25 #define PCI_CHIP_PROSAVAGE_KM 0x8a26 - /* Twister is a code name; hope I get the real name soon. */ #define PCI_CHIP_S3TWISTER_P 0x8d01 #define PCI_CHIP_S3TWISTER_K 0x8d02 #define PCI_CHIP_PROSAVAGE_DDR 0x8d03 @@ -52,14 +51,15 @@ #define PCI_CHIP_SUPSAV_IXCDDR 0x8c2f +#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) #define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX)) -#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) || (chip==S3_PROSAVAGE)) +#define S3_SAVAGE4_SERIES(chip) ((chip>=S3_SAVAGE4) || (chip<=S3_PROSAVAGEDDR)) #define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE)) -#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000)) +#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) || (chip==S3_PROSAVAGEDDR)) /* Chip tags. These are used to group the adapters into * related families. @@ -71,6 +71,8 @@ typedef enum { S3_SAVAGE_MX, S3_SAVAGE4, S3_PROSAVAGE, + S3_TWISTER, + S3_PROSAVAGEDDR, S3_SUPERSAVAGE, S3_SAVAGE2000, S3_LAST diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c index a2dc1a7ec75..3b7f2f5bae7 100644 --- a/drivers/video/savage/savagefb_driver.c +++ b/drivers/video/savage/savagefb_driver.c @@ -328,7 +328,9 @@ SavageSetup2DEngine(struct savagefb_par *par) savage_out32(0x48C18, savage_in32(0x48C18, par) | 0x0C, par); break; case S3_SAVAGE4: + case S3_TWISTER: case S3_PROSAVAGE: + case S3_PROSAVAGEDDR: case S3_SUPERSAVAGE: /* Disable BCI */ savage_out32(0x48C18, savage_in32(0x48C18, par) & 0x3FF0, par); @@ -1886,6 +1888,8 @@ static int savage_init_hw(struct savagefb_par *par) break; case S3_PROSAVAGE: + case S3_PROSAVAGEDDR: + case S3_TWISTER: videoRam = RamSavageNB[(config1 & 0xE0) >> 5] * 1024; break; @@ -1963,7 +1967,8 @@ static int savage_init_hw(struct savagefb_par *par) } } - if (S3_SAVAGE_MOBILE_SERIES(par->chip) && !par->crtonly) + if ((S3_SAVAGE_MOBILE_SERIES(par->chip) || + S3_MOBILE_TWISTER_SERIES(par->chip)) && !par->crtonly) par->display_type = DISP_LCD; else if (dvi || (par->chip == S3_SAVAGE4 && par->dvi)) par->display_type = DISP_DFP; @@ -2111,19 +2116,19 @@ static int __devinit savage_init_fb_info(struct fb_info *info, snprintf(info->fix.id, 16, "ProSavageKM"); break; case FB_ACCEL_S3TWISTER_P: - par->chip = S3_PROSAVAGE; + par->chip = S3_TWISTER; snprintf(info->fix.id, 16, "TwisterP"); break; case FB_ACCEL_S3TWISTER_K: - par->chip = S3_PROSAVAGE; + par->chip = S3_TWISTER; snprintf(info->fix.id, 16, "TwisterK"); break; case FB_ACCEL_PROSAVAGE_DDR: - par->chip = S3_PROSAVAGE; + par->chip = S3_PROSAVAGEDDR; snprintf(info->fix.id, 16, "ProSavageDDR"); break; case FB_ACCEL_PROSAVAGE_DDRK: - par->chip = S3_PROSAVAGE; + par->chip = S3_PROSAVAGEDDR; snprintf(info->fix.id, 16, "ProSavage8"); break; } diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c index 8fe19582c46..45e47d84716 100644 --- a/drivers/video/sh7760fb.c +++ b/drivers/video/sh7760fb.c @@ -551,8 +551,7 @@ out_unmap: free_irq(par->irq, &par->vsync); iounmap(par->base); out_res: - release_resource(par->ioarea); - kfree(par->ioarea); + release_mem_region(res->start, resource_size(res)); out_fb: framebuffer_release(info); return ret; @@ -570,8 +569,7 @@ static int __devexit sh7760fb_remove(struct platform_device *dev) if (par->irq >= 0) free_irq(par->irq, par); iounmap(par->base); - release_resource(par->ioarea); - kfree(par->ioarea); + release_mem_region(par->ioarea->start, resource_size(par->ioarea)); framebuffer_release(info); platform_set_drvdata(dev, NULL); diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c index 2b9e56a6bde..6ae40b630dc 100644 --- a/drivers/video/sh_mobile_hdmi.c +++ b/drivers/video/sh_mobile_hdmi.c @@ -1131,15 +1131,19 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work) pm_runtime_get_sync(hdmi->dev); ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(hdmi->dev); goto out; + } hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE; /* Reconfigure the clock */ ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate); - if (ret < 0) + if (ret < 0) { + pm_runtime_put(hdmi->dev); goto out; + } msleep(10); sh_hdmi_configure(hdmi); @@ -1336,6 +1340,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev) ecodec: free_irq(irq, hdmi); ereqirq: + pm_runtime_suspend(&pdev->dev); pm_runtime_disable(&pdev->dev); iounmap(hdmi->base); emap: @@ -1372,6 +1377,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev) free_irq(irq, hdmi); /* Wait for already scheduled work */ cancel_delayed_work_sync(&hdmi->edid_work); + pm_runtime_suspend(&pdev->dev); pm_runtime_disable(&pdev->dev); clk_disable(hdmi->hdmi_clk); clk_put(hdmi->hdmi_clk); diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 9bcc61b4ef1..404c03b4b7c 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -27,6 +27,7 @@ #include <asm/atomic.h> #include "sh_mobile_lcdcfb.h" +#include "sh_mobile_meram.h" #define SIDE_B_OFFSET 0x1000 #define MIRROR_OFFSET 0x2000 @@ -143,6 +144,7 @@ struct sh_mobile_lcdc_priv { unsigned long saved_shared_regs[NR_SHARED_REGS]; int started; int forced_bpp; /* 2 channel LCDC must share bpp setting */ + struct sh_mobile_meram_info *meram_dev; }; static bool banked(int reg_nr) @@ -469,7 +471,6 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) int bpp = 0; unsigned long ldddsr; int k, m; - int ret = 0; /* enable clocks before accessing the hardware */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { @@ -538,11 +539,12 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) lcdc_write_chan(ch, LDPMR, 0); board_cfg = &ch->cfg.board_cfg; - if (board_cfg->setup_sys) - ret = board_cfg->setup_sys(board_cfg->board_data, ch, - &sh_mobile_lcdc_sys_bus_ops); - if (ret) - return ret; + if (board_cfg->setup_sys) { + int ret = board_cfg->setup_sys(board_cfg->board_data, + ch, &sh_mobile_lcdc_sys_bus_ops); + if (ret) + return ret; + } } /* word and long word swap */ @@ -564,6 +566,9 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) } for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { + unsigned long base_addr_y; + unsigned long base_addr_c = 0; + int pitch; ch = &priv->ch[k]; if (!priv->ch[k].enabled) @@ -598,16 +603,68 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) } lcdc_write_chan(ch, LDDFR, tmp); + base_addr_y = ch->info->fix.smem_start; + base_addr_c = base_addr_y + + ch->info->var.xres * + ch->info->var.yres_virtual; + pitch = ch->info->fix.line_length; + + /* test if we can enable meram */ + if (ch->cfg.meram_cfg && priv->meram_dev && + priv->meram_dev->ops) { + struct sh_mobile_meram_cfg *cfg; + struct sh_mobile_meram_info *mdev; + unsigned long icb_addr_y, icb_addr_c; + int icb_pitch; + int pf; + + cfg = ch->cfg.meram_cfg; + mdev = priv->meram_dev; + /* we need to de-init configured ICBs before we + * we can re-initialize them. + */ + if (ch->meram_enabled) + mdev->ops->meram_unregister(mdev, cfg); + + ch->meram_enabled = 0; + + if (ch->info->var.nonstd) { + if (ch->info->var.bits_per_pixel == 24) + pf = SH_MOBILE_MERAM_PF_NV24; + else + pf = SH_MOBILE_MERAM_PF_NV; + } else { + pf = SH_MOBILE_MERAM_PF_RGB; + } + + ret = mdev->ops->meram_register(mdev, cfg, pitch, + ch->info->var.yres, + pf, + base_addr_y, + base_addr_c, + &icb_addr_y, + &icb_addr_c, + &icb_pitch); + if (!ret) { + /* set LDSA1R value */ + base_addr_y = icb_addr_y; + pitch = icb_pitch; + + /* set LDSA2R value if required */ + if (base_addr_c) + base_addr_c = icb_addr_c; + + ch->meram_enabled = 1; + } + } + /* point out our frame buffer */ - lcdc_write_chan(ch, LDSA1R, ch->info->fix.smem_start); + lcdc_write_chan(ch, LDSA1R, base_addr_y); if (ch->info->var.nonstd) - lcdc_write_chan(ch, LDSA2R, - ch->info->fix.smem_start + - ch->info->var.xres * - ch->info->var.yres_virtual); + lcdc_write_chan(ch, LDSA2R, base_addr_c); /* set line size */ - lcdc_write_chan(ch, LDMLSR, ch->info->fix.line_length); + lcdc_write_chan(ch, LDMLSR, pitch); /* setup deferred io if SYS bus */ tmp = ch->cfg.sys_bus_cfg.deferred_io_msec; @@ -692,6 +749,17 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) board_cfg->display_off(board_cfg->board_data); module_put(board_cfg->owner); } + + /* disable the meram */ + if (ch->meram_enabled) { + struct sh_mobile_meram_cfg *cfg; + struct sh_mobile_meram_info *mdev; + cfg = ch->cfg.meram_cfg; + mdev = priv->meram_dev; + mdev->ops->meram_unregister(mdev, cfg); + ch->meram_enabled = 0; + } + } /* stop the lcdc */ @@ -875,9 +943,29 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var, } else base_addr_c = 0; - lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); - if (base_addr_c) - lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); + if (!ch->meram_enabled) { + lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); + if (base_addr_c) + lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); + } else { + struct sh_mobile_meram_cfg *cfg; + struct sh_mobile_meram_info *mdev; + unsigned long icb_addr_y, icb_addr_c; + int ret; + + cfg = ch->cfg.meram_cfg; + mdev = priv->meram_dev; + ret = mdev->ops->meram_update(mdev, cfg, + base_addr_y, base_addr_c, + &icb_addr_y, &icb_addr_c); + if (ret) + return ret; + + lcdc_write_chan_mirror(ch, LDSA1R, icb_addr_y); + if (icb_addr_c) + lcdc_write_chan_mirror(ch, LDSA2R, icb_addr_c); + + } if (lcdc_chan_is_sublcd(ch)) lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); @@ -1288,7 +1376,6 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb, struct fb_info *info = event->info; struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg; - int ret; if (&ch->lcdc->notifier != nb) return NOTIFY_DONE; @@ -1302,7 +1389,6 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb, board_cfg->display_off(board_cfg->board_data); module_put(board_cfg->owner); } - pm_runtime_put(info->device); sh_mobile_lcdc_stop(ch->lcdc); break; case FB_EVENT_RESUME: @@ -1316,9 +1402,7 @@ static int sh_mobile_lcdc_notify(struct notifier_block *nb, module_put(board_cfg->owner); } - ret = sh_mobile_lcdc_start(ch->lcdc); - if (!ret) - pm_runtime_get_sync(info->device); + sh_mobile_lcdc_start(ch->lcdc); } return NOTIFY_OK; @@ -1420,6 +1504,8 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + priv->meram_dev = pdata->meram_dev; + for (i = 0; i < j; i++) { struct fb_var_screeninfo *var; const struct fb_videomode *lcd_cfg, *max_cfg = NULL; diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h index f16cb5645a1..aeed6687e6a 100644 --- a/drivers/video/sh_mobile_lcdcfb.h +++ b/drivers/video/sh_mobile_lcdcfb.h @@ -39,6 +39,7 @@ struct sh_mobile_lcdc_chan { int use_count; int blank_status; struct mutex open_lock; /* protects the use counter */ + int meram_enabled; }; #endif diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c new file mode 100644 index 00000000000..9170c82b495 --- /dev/null +++ b/drivers/video/sh_mobile_meram.c @@ -0,0 +1,567 @@ +/* + * SuperH Mobile MERAM Driver for SuperH Mobile LCDC Driver + * + * Copyright (c) 2011 Damian Hobson-Garcia <dhobsong@igel.co.jp> + * Takanari Hayama <taki@igel.co.jp> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/platform_device.h> + +#include "sh_mobile_meram.h" + +/* meram registers */ +#define MExxCTL 0x0 +#define MExxBSIZE 0x4 +#define MExxMNCF 0x8 +#define MExxSARA 0x10 +#define MExxSARB 0x14 +#define MExxSBSIZE 0x18 + +#define MERAM_MExxCTL_VAL(ctl, next_icb, addr) \ + ((ctl) | (((next_icb) & 0x1f) << 11) | (((addr) & 0x7ff) << 16)) +#define MERAM_MExxBSIZE_VAL(a, b, c) \ + (((a) << 28) | ((b) << 16) | (c)) + +#define MEVCR1 0x4 +#define MEACTS 0x10 +#define MEQSEL1 0x40 +#define MEQSEL2 0x44 + +/* settings */ +#define MERAM_SEC_LINE 15 +#define MERAM_LINE_WIDTH 2048 + +/* + * MERAM/ICB access functions + */ + +#define MERAM_ICB_OFFSET(base, idx, off) \ + ((base) + (0x400 + ((idx) * 0x20) + (off))) + +static inline void meram_write_icb(void __iomem *base, int idx, int off, + unsigned long val) +{ + iowrite32(val, MERAM_ICB_OFFSET(base, idx, off)); +} + +static inline unsigned long meram_read_icb(void __iomem *base, int idx, int off) +{ + return ioread32(MERAM_ICB_OFFSET(base, idx, off)); +} + +static inline void meram_write_reg(void __iomem *base, int off, + unsigned long val) +{ + iowrite32(val, base + off); +} + +static inline unsigned long meram_read_reg(void __iomem *base, int off) +{ + return ioread32(base + off); +} + +/* + * register ICB + */ + +#define MERAM_CACHE_START(p) ((p) >> 16) +#define MERAM_CACHE_END(p) ((p) & 0xffff) +#define MERAM_CACHE_SET(o, s) ((((o) & 0xffff) << 16) | \ + (((o) + (s) - 1) & 0xffff)) + +/* + * check if there's no overlaps in MERAM allocation. + */ + +static inline int meram_check_overlap(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_icb *new) +{ + int i; + int used_start, used_end, meram_start, meram_end; + + /* valid ICB? */ + if (new->marker_icb & ~0x1f || new->cache_icb & ~0x1f) + return 1; + + if (test_bit(new->marker_icb, &priv->used_icb) || + test_bit(new->cache_icb, &priv->used_icb)) + return 1; + + for (i = 0; i < priv->used_meram_cache_regions; i++) { + used_start = MERAM_CACHE_START(priv->used_meram_cache[i]); + used_end = MERAM_CACHE_END(priv->used_meram_cache[i]); + meram_start = new->meram_offset; + meram_end = new->meram_offset + new->meram_size; + + if ((meram_start >= used_start && meram_start < used_end) || + (meram_end > used_start && meram_end < used_end)) + return 1; + } + + return 0; +} + +/* + * mark the specified ICB as used + */ + +static inline void meram_mark(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_icb *new) +{ + int n; + + if (new->marker_icb < 0 || new->cache_icb < 0) + return; + + __set_bit(new->marker_icb, &priv->used_icb); + __set_bit(new->cache_icb, &priv->used_icb); + + n = priv->used_meram_cache_regions; + + priv->used_meram_cache[n] = MERAM_CACHE_SET(new->meram_offset, + new->meram_size); + + priv->used_meram_cache_regions++; +} + +/* + * unmark the specified ICB as used + */ + +static inline void meram_unmark(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_icb *icb) +{ + int i; + unsigned long pattern; + + if (icb->marker_icb < 0 || icb->cache_icb < 0) + return; + + __clear_bit(icb->marker_icb, &priv->used_icb); + __clear_bit(icb->cache_icb, &priv->used_icb); + + pattern = MERAM_CACHE_SET(icb->meram_offset, icb->meram_size); + for (i = 0; i < priv->used_meram_cache_regions; i++) { + if (priv->used_meram_cache[i] == pattern) { + while (i < priv->used_meram_cache_regions - 1) { + priv->used_meram_cache[i] = + priv->used_meram_cache[i + 1] ; + i++; + } + priv->used_meram_cache[i] = 0; + priv->used_meram_cache_regions--; + break; + } + } +} + +/* + * is this a YCbCr(NV12, NV16 or NV24) colorspace + */ +static inline int is_nvcolor(int cspace) +{ + if (cspace == SH_MOBILE_MERAM_PF_NV || + cspace == SH_MOBILE_MERAM_PF_NV24) + return 1; + return 0; +} + +/* + * set the next address to fetch + */ +static inline void meram_set_next_addr(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_cfg *cfg, + unsigned long base_addr_y, + unsigned long base_addr_c) +{ + unsigned long target; + + target = (cfg->current_reg) ? MExxSARA : MExxSARB; + cfg->current_reg ^= 1; + + /* set the next address to fetch */ + meram_write_icb(priv->base, cfg->icb[0].cache_icb, target, + base_addr_y); + meram_write_icb(priv->base, cfg->icb[0].marker_icb, target, + base_addr_y + cfg->icb[0].cache_unit); + + if (is_nvcolor(cfg->pixelformat)) { + meram_write_icb(priv->base, cfg->icb[1].cache_icb, target, + base_addr_c); + meram_write_icb(priv->base, cfg->icb[1].marker_icb, target, + base_addr_c + cfg->icb[1].cache_unit); + } +} + +/* + * get the next ICB address + */ +static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, + struct sh_mobile_meram_cfg *cfg, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c) +{ + unsigned long icb_offset; + + if (pdata->addr_mode == SH_MOBILE_MERAM_MODE0) + icb_offset = 0x80000000 | (cfg->current_reg << 29); + else + icb_offset = 0xc0000000 | (cfg->current_reg << 23); + + *icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24); + if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat)) + *icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24); +} + +#define MERAM_CALC_BYTECOUNT(x, y) \ + (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1)) + +/* + * initialize MERAM + */ + +static int meram_init(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_icb *icb, + int xres, int yres, int *out_pitch) +{ + unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres); + unsigned long bnm; + int lcdc_pitch, xpitch, line_cnt; + int save_lines; + + /* adjust pitch to 1024, 2048, 4096 or 8192 */ + lcdc_pitch = (xres - 1) | 1023; + lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 1); + lcdc_pitch = lcdc_pitch | (lcdc_pitch >> 2); + lcdc_pitch += 1; + + /* derive settings */ + if (lcdc_pitch == 8192 && yres >= 1024) { + lcdc_pitch = xpitch = MERAM_LINE_WIDTH; + line_cnt = total_byte_count >> 11; + *out_pitch = xres; + save_lines = (icb->meram_size / 16 / MERAM_SEC_LINE); + save_lines *= MERAM_SEC_LINE; + } else { + xpitch = xres; + line_cnt = yres; + *out_pitch = lcdc_pitch; + save_lines = icb->meram_size / (lcdc_pitch >> 10) / 2; + save_lines &= 0xff; + } + bnm = (save_lines - 1) << 16; + + /* TODO: we better to check if we have enough MERAM buffer size */ + + /* set up ICB */ + meram_write_icb(priv->base, icb->cache_icb, MExxBSIZE, + MERAM_MExxBSIZE_VAL(0x0, line_cnt - 1, xpitch - 1)); + meram_write_icb(priv->base, icb->marker_icb, MExxBSIZE, + MERAM_MExxBSIZE_VAL(0xf, line_cnt - 1, xpitch - 1)); + + meram_write_icb(priv->base, icb->cache_icb, MExxMNCF, bnm); + meram_write_icb(priv->base, icb->marker_icb, MExxMNCF, bnm); + + meram_write_icb(priv->base, icb->cache_icb, MExxSBSIZE, xpitch); + meram_write_icb(priv->base, icb->marker_icb, MExxSBSIZE, xpitch); + + /* save a cache unit size */ + icb->cache_unit = xres * save_lines; + + /* + * Set MERAM for framebuffer + * + * 0x70f: WD = 0x3, WS=0x1, CM=0x1, MD=FB mode + * we also chain the cache_icb and the marker_icb. + * we also split the allocated MERAM buffer between two ICBs. + */ + meram_write_icb(priv->base, icb->cache_icb, MExxCTL, + MERAM_MExxCTL_VAL(0x70f, icb->marker_icb, + icb->meram_offset)); + meram_write_icb(priv->base, icb->marker_icb, MExxCTL, + MERAM_MExxCTL_VAL(0x70f, icb->cache_icb, + icb->meram_offset + + icb->meram_size / 2)); + + return 0; +} + +static void meram_deinit(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_icb *icb) +{ + /* disable ICB */ + meram_write_icb(priv->base, icb->cache_icb, MExxCTL, 0); + meram_write_icb(priv->base, icb->marker_icb, MExxCTL, 0); + icb->cache_unit = 0; +} + +/* + * register the ICB + */ + +static int sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, + struct sh_mobile_meram_cfg *cfg, + int xres, int yres, int pixelformat, + unsigned long base_addr_y, + unsigned long base_addr_c, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c, + int *pitch) +{ + struct platform_device *pdev; + struct sh_mobile_meram_priv *priv; + int n, out_pitch; + int error = 0; + + if (!pdata || !pdata->priv || !pdata->pdev || !cfg) + return -EINVAL; + + if (pixelformat != SH_MOBILE_MERAM_PF_NV && + pixelformat != SH_MOBILE_MERAM_PF_NV24 && + pixelformat != SH_MOBILE_MERAM_PF_RGB) + return -EINVAL; + + priv = pdata->priv; + pdev = pdata->pdev; + + dev_dbg(&pdev->dev, "registering %dx%d (%s) (y=%08lx, c=%08lx)", + xres, yres, (!pixelformat) ? "yuv" : "rgb", + base_addr_y, base_addr_c); + + mutex_lock(&priv->lock); + + /* we can't handle wider than 8192px */ + if (xres > 8192) { + dev_err(&pdev->dev, "width exceeding the limit (> 8192)."); + error = -EINVAL; + goto err; + } + + if (priv->used_meram_cache_regions + 2 > SH_MOBILE_MERAM_ICB_NUM) { + dev_err(&pdev->dev, "no more ICB available."); + error = -EINVAL; + goto err; + } + + /* do we have at least one ICB config? */ + if (cfg->icb[0].marker_icb < 0 || cfg->icb[0].cache_icb < 0) { + dev_err(&pdev->dev, "at least one ICB is required."); + error = -EINVAL; + goto err; + } + + /* make sure that there's no overlaps */ + if (meram_check_overlap(priv, &cfg->icb[0])) { + dev_err(&pdev->dev, "conflicting config detected."); + error = -EINVAL; + goto err; + } + n = 1; + + /* do the same if we have the second ICB set */ + if (cfg->icb[1].marker_icb >= 0 && cfg->icb[1].cache_icb >= 0) { + if (meram_check_overlap(priv, &cfg->icb[1])) { + dev_err(&pdev->dev, "conflicting config detected."); + error = -EINVAL; + goto err; + } + n = 2; + } + + if (is_nvcolor(pixelformat) && n != 2) { + dev_err(&pdev->dev, "requires two ICB sets for planar Y/C."); + error = -EINVAL; + goto err; + } + + /* we now register the ICB */ + cfg->pixelformat = pixelformat; + meram_mark(priv, &cfg->icb[0]); + if (is_nvcolor(pixelformat)) + meram_mark(priv, &cfg->icb[1]); + + /* initialize MERAM */ + meram_init(priv, &cfg->icb[0], xres, yres, &out_pitch); + *pitch = out_pitch; + if (pixelformat == SH_MOBILE_MERAM_PF_NV) + meram_init(priv, &cfg->icb[1], xres, (yres + 1) / 2, + &out_pitch); + else if (pixelformat == SH_MOBILE_MERAM_PF_NV24) + meram_init(priv, &cfg->icb[1], 2 * xres, (yres + 1) / 2, + &out_pitch); + + cfg->current_reg = 1; + meram_set_next_addr(priv, cfg, base_addr_y, base_addr_c); + meram_get_next_icb_addr(pdata, cfg, icb_addr_y, icb_addr_c); + + dev_dbg(&pdev->dev, "registered - can access via y=%08lx, c=%08lx", + *icb_addr_y, *icb_addr_c); + +err: + mutex_unlock(&priv->lock); + return error; +} + +static int sh_mobile_meram_unregister(struct sh_mobile_meram_info *pdata, + struct sh_mobile_meram_cfg *cfg) +{ + struct sh_mobile_meram_priv *priv; + + if (!pdata || !pdata->priv || !cfg) + return -EINVAL; + + priv = pdata->priv; + + mutex_lock(&priv->lock); + + /* deinit & unmark */ + if (is_nvcolor(cfg->pixelformat)) { + meram_deinit(priv, &cfg->icb[1]); + meram_unmark(priv, &cfg->icb[1]); + } + meram_deinit(priv, &cfg->icb[0]); + meram_unmark(priv, &cfg->icb[0]); + + mutex_unlock(&priv->lock); + + return 0; +} + +static int sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, + struct sh_mobile_meram_cfg *cfg, + unsigned long base_addr_y, + unsigned long base_addr_c, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c) +{ + struct sh_mobile_meram_priv *priv; + + if (!pdata || !pdata->priv || !cfg) + return -EINVAL; + + priv = pdata->priv; + + mutex_lock(&priv->lock); + + meram_set_next_addr(priv, cfg, base_addr_y, base_addr_c); + meram_get_next_icb_addr(pdata, cfg, icb_addr_y, icb_addr_c); + + mutex_unlock(&priv->lock); + + return 0; +} + +static struct sh_mobile_meram_ops sh_mobile_meram_ops = { + .module = THIS_MODULE, + .meram_register = sh_mobile_meram_register, + .meram_unregister = sh_mobile_meram_unregister, + .meram_update = sh_mobile_meram_update, +}; + +/* + * initialize MERAM + */ + +static int sh_mobile_meram_remove(struct platform_device *pdev); + +static int __devinit sh_mobile_meram_probe(struct platform_device *pdev) +{ + struct sh_mobile_meram_priv *priv; + struct sh_mobile_meram_info *pdata = pdev->dev.platform_data; + struct resource *res; + int error; + + if (!pdata) { + dev_err(&pdev->dev, "no platform data defined\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + dev_err(&pdev->dev, "cannot get platform resources\n"); + return -ENOENT; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "cannot allocate device data\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, priv); + + /* initialize private data */ + mutex_init(&priv->lock); + priv->base = ioremap_nocache(res->start, resource_size(res)); + if (!priv->base) { + dev_err(&pdev->dev, "ioremap failed\n"); + error = -EFAULT; + goto err; + } + pdata->ops = &sh_mobile_meram_ops; + pdata->priv = priv; + pdata->pdev = pdev; + + /* initialize ICB addressing mode */ + if (pdata->addr_mode == SH_MOBILE_MERAM_MODE1) + meram_write_reg(priv->base, MEVCR1, 1 << 29); + + dev_info(&pdev->dev, "sh_mobile_meram initialized."); + + return 0; + +err: + sh_mobile_meram_remove(pdev); + + return error; +} + + +static int sh_mobile_meram_remove(struct platform_device *pdev) +{ + struct sh_mobile_meram_priv *priv = platform_get_drvdata(pdev); + + if (priv->base) + iounmap(priv->base); + + mutex_destroy(&priv->lock); + + kfree(priv); + + return 0; +} + +static struct platform_driver sh_mobile_meram_driver = { + .driver = { + .name = "sh_mobile_meram", + .owner = THIS_MODULE, + }, + .probe = sh_mobile_meram_probe, + .remove = sh_mobile_meram_remove, +}; + +static int __init sh_mobile_meram_init(void) +{ + return platform_driver_register(&sh_mobile_meram_driver); +} + +static void __exit sh_mobile_meram_exit(void) +{ + platform_driver_unregister(&sh_mobile_meram_driver); +} + +module_init(sh_mobile_meram_init); +module_exit(sh_mobile_meram_exit); + +MODULE_DESCRIPTION("SuperH Mobile MERAM driver"); +MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/sh_mobile_meram.h b/drivers/video/sh_mobile_meram.h new file mode 100644 index 00000000000..82c54fbce8b --- /dev/null +++ b/drivers/video/sh_mobile_meram.h @@ -0,0 +1,41 @@ +#ifndef __sh_mobile_meram_h__ +#define __sh_mobile_meram_h__ + +#include <linux/mutex.h> +#include <video/sh_mobile_meram.h> + +/* + * MERAM private + */ + +#define MERAM_ICB_Y 0x1 +#define MERAM_ICB_C 0x2 + +/* MERAM cache size */ +#define SH_MOBILE_MERAM_ICB_NUM 32 + +#define SH_MOBILE_MERAM_CACHE_OFFSET(p) ((p) >> 16) +#define SH_MOBILE_MERAM_CACHE_SIZE(p) ((p) & 0xffff) + +struct sh_mobile_meram_priv { + void __iomem *base; + struct mutex lock; + unsigned long used_icb; + int used_meram_cache_regions; + unsigned long used_meram_cache[SH_MOBILE_MERAM_ICB_NUM]; +}; + +int sh_mobile_meram_alloc_icb(const struct sh_mobile_meram_cfg *cfg, + int xres, + int yres, + unsigned int base_addr, + int yuv_mode, + int *marker_icb, + int *out_pitch); + +void sh_mobile_meram_free_icb(int marker_icb); + +#define SH_MOBILE_MERAM_START(ind, ab) \ + (0xC0000000 | ((ab & 0x1) << 23) | ((ind & 0x1F) << 24)) + +#endif /* !__sh_mobile_meram_h__ */ diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c index 56ef6b3a985..87f0be1e78b 100644 --- a/drivers/video/sm501fb.c +++ b/drivers/video/sm501fb.c @@ -1625,22 +1625,22 @@ static int sm501fb_start(struct sm501fb_info *info, return 0; /* everything is setup */ err_mem_res: - release_resource(info->fbmem_res); - kfree(info->fbmem_res); + release_mem_region(info->fbmem_res->start, + resource_size(info->fbmem_res)); err_regs2d_map: iounmap(info->regs2d); err_regs2d_res: - release_resource(info->regs2d_res); - kfree(info->regs2d_res); + release_mem_region(info->regs2d_res->start, + resource_size(info->regs2d_res)); err_regs_map: iounmap(info->regs); err_regs_res: - release_resource(info->regs_res); - kfree(info->regs_res); + release_mem_region(info->regs_res->start, + resource_size(info->regs_res)); err_release: return ret; @@ -1652,16 +1652,16 @@ static void sm501fb_stop(struct sm501fb_info *info) sm501_unit_power(info->dev->parent, SM501_GATE_DISPLAY, 0); iounmap(info->fbmem); - release_resource(info->fbmem_res); - kfree(info->fbmem_res); + release_mem_region(info->fbmem_res->start, + resource_size(info->fbmem_res)); iounmap(info->regs2d); - release_resource(info->regs2d_res); - kfree(info->regs2d_res); + release_mem_region(info->regs2d_res->start, + resource_size(info->regs2d_res)); iounmap(info->regs); - release_resource(info->regs_res); - kfree(info->regs_res); + release_mem_region(info->regs_res->start, + resource_size(info->regs_res)); } static int sm501fb_init_fb(struct fb_info *fb, diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c index 695066b5b2e..52b0f3e8cca 100644 --- a/drivers/video/udlfb.c +++ b/drivers/video/udlfb.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/prefetch.h> #include <linux/delay.h> +#include <linux/prefetch.h> #include <video/udlfb.h> #include "edid.h" @@ -1587,10 +1588,19 @@ static int dlfb_usb_probe(struct usb_interface *interface, goto error; } - for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) - device_create_file(info->dev, &fb_device_attrs[i]); + for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) { + retval = device_create_file(info->dev, &fb_device_attrs[i]); + if (retval) { + pr_err("device_create_file failed %d\n", retval); + goto err_del_attrs; + } + } - device_create_bin_file(info->dev, &edid_attr); + retval = device_create_bin_file(info->dev, &edid_attr); + if (retval) { + pr_err("device_create_bin_file failed %d\n", retval); + goto err_del_attrs; + } pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution." " Using %dK framebuffer memory\n", info->node, @@ -1599,6 +1609,10 @@ static int dlfb_usb_probe(struct usb_interface *interface, info->fix.smem_len * 2 : info->fix.smem_len) >> 10); return 0; +err_del_attrs: + for (i -= 1; i >= 0; i--) + device_remove_file(info->dev, &fb_device_attrs[i]); + error: if (dev) { |