diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-08-09 14:09:29 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-08-09 14:09:29 +0100 |
commit | 054d5c9238f3c577ad51195c3ee7803613f322cc (patch) | |
tree | ff7d9f5c0e0ddf14230ba28f28ef69a2c0a0debf /drivers | |
parent | 11e4afb49b7fa1fc8e1ffd850c1806dd86a08204 (diff) | |
parent | 2192482ee5ce5d5d4a6cec0c351b2d3a744606eb (diff) |
Merge branch 'devel-stable' into devel
Diffstat (limited to 'drivers')
349 files changed, 16920 insertions, 5951 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index aa85a98d3a4..25e030f9a3e 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -642,6 +642,17 @@ config PATA_VIA If unsure, say N. +config PATA_PXA + tristate "PXA DMA-capable PATA support" + depends on ARCH_PXA + help + This option enables support for harddrive attached to PXA CPU's bus. + + NOTE: This driver utilizes PXA DMA controller, in case your hardware + is not capable of doing MWDMA, use pata_platform instead. + + If unsure, say N. + config PATA_WINBOND tristate "Winbond SL82C105 PATA support" depends on PCI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 7ef89d73df6..e87d644b8ed 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -89,6 +89,8 @@ obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o obj-$(CONFIG_PATA_WINBOND_VLB) += pata_winbond.o +obj-$(CONFIG_PATA_PXA) += pata_pxa.o + # Should be last but two libata driver obj-$(CONFIG_PATA_ACPI) += pata_acpi.o # Should be last but one libata driver diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c new file mode 100644 index 00000000000..1898c6ed4b4 --- /dev/null +++ b/drivers/ata/pata_pxa.c @@ -0,0 +1,411 @@ +/* + * Generic PXA PATA driver + * + * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/blkdev.h> +#include <linux/ata.h> +#include <linux/libata.h> +#include <linux/platform_device.h> +#include <linux/gpio.h> +#include <linux/slab.h> +#include <linux/completion.h> + +#include <scsi/scsi_host.h> + +#include <mach/pxa2xx-regs.h> +#include <mach/pata_pxa.h> +#include <mach/dma.h> + +#define DRV_NAME "pata_pxa" +#define DRV_VERSION "0.1" + +struct pata_pxa_data { + uint32_t dma_channel; + struct pxa_dma_desc *dma_desc; + dma_addr_t dma_desc_addr; + uint32_t dma_desc_id; + + /* DMA IO physical address */ + uint32_t dma_io_addr; + /* PXA DREQ<0:2> pin selector */ + uint32_t dma_dreq; + /* DMA DCSR register value */ + uint32_t dma_dcsr; + + struct completion dma_done; +}; + +/* + * Setup the DMA descriptors. The size is transfer capped at 4k per descriptor, + * if the transfer is longer, it is split into multiple chained descriptors. + */ +static void pxa_load_dmac(struct scatterlist *sg, struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + + uint32_t cpu_len, seg_len; + dma_addr_t cpu_addr; + + cpu_addr = sg_dma_address(sg); + cpu_len = sg_dma_len(sg); + + do { + seg_len = (cpu_len > 0x1000) ? 0x1000 : cpu_len; + + pd->dma_desc[pd->dma_desc_id].ddadr = pd->dma_desc_addr + + ((pd->dma_desc_id + 1) * sizeof(struct pxa_dma_desc)); + + pd->dma_desc[pd->dma_desc_id].dcmd = DCMD_BURST32 | + DCMD_WIDTH2 | (DCMD_LENGTH & seg_len); + + if (qc->tf.flags & ATA_TFLAG_WRITE) { + pd->dma_desc[pd->dma_desc_id].dsadr = cpu_addr; + pd->dma_desc[pd->dma_desc_id].dtadr = pd->dma_io_addr; + pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCSRCADDR | + DCMD_FLOWTRG; + } else { + pd->dma_desc[pd->dma_desc_id].dsadr = pd->dma_io_addr; + pd->dma_desc[pd->dma_desc_id].dtadr = cpu_addr; + pd->dma_desc[pd->dma_desc_id].dcmd |= DCMD_INCTRGADDR | + DCMD_FLOWSRC; + } + + cpu_len -= seg_len; + cpu_addr += seg_len; + pd->dma_desc_id++; + + } while (cpu_len); + + /* Should not happen */ + if (seg_len & 0x1f) + DALGN |= (1 << pd->dma_dreq); +} + +/* + * Prepare taskfile for submission. + */ +static void pxa_qc_prep(struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + int si = 0; + struct scatterlist *sg; + + if (!(qc->flags & ATA_QCFLAG_DMAMAP)) + return; + + pd->dma_desc_id = 0; + + DCSR(pd->dma_channel) = 0; + DALGN &= ~(1 << pd->dma_dreq); + + for_each_sg(qc->sg, sg, qc->n_elem, si) + pxa_load_dmac(sg, qc); + + pd->dma_desc[pd->dma_desc_id - 1].ddadr = DDADR_STOP; + + /* Fire IRQ only at the end of last block */ + pd->dma_desc[pd->dma_desc_id - 1].dcmd |= DCMD_ENDIRQEN; + + DDADR(pd->dma_channel) = pd->dma_desc_addr; + DRCMR(pd->dma_dreq) = DRCMR_MAPVLD | pd->dma_channel; + +} + +/* + * Configure the DMA controller, load the DMA descriptors, but don't start the + * DMA controller yet. Only issue the ATA command. + */ +static void pxa_bmdma_setup(struct ata_queued_cmd *qc) +{ + qc->ap->ops->sff_exec_command(qc->ap, &qc->tf); +} + +/* + * Execute the DMA transfer. + */ +static void pxa_bmdma_start(struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + init_completion(&pd->dma_done); + DCSR(pd->dma_channel) = DCSR_RUN; +} + +/* + * Wait until the DMA transfer completes, then stop the DMA controller. + */ +static void pxa_bmdma_stop(struct ata_queued_cmd *qc) +{ + struct pata_pxa_data *pd = qc->ap->private_data; + + if ((DCSR(pd->dma_channel) & DCSR_RUN) && + wait_for_completion_timeout(&pd->dma_done, HZ)) + dev_err(qc->ap->dev, "Timeout waiting for DMA completion!"); + + DCSR(pd->dma_channel) = 0; +} + +/* + * Read DMA status. The bmdma_stop() will take care of properly finishing the + * DMA transfer so we always have DMA-complete interrupt here. + */ +static unsigned char pxa_bmdma_status(struct ata_port *ap) +{ + struct pata_pxa_data *pd = ap->private_data; + unsigned char ret = ATA_DMA_INTR; + + if (pd->dma_dcsr & DCSR_BUSERR) + ret |= ATA_DMA_ERR; + + return ret; +} + +/* + * No IRQ register present so we do nothing. + */ +static void pxa_irq_clear(struct ata_port *ap) +{ +} + +/* + * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still + * unclear why ATAPI has DMA issues. + */ +static int pxa_check_atapi_dma(struct ata_queued_cmd *qc) +{ + return -EOPNOTSUPP; +} + +static struct scsi_host_template pxa_ata_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations pxa_ata_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, + + .bmdma_setup = pxa_bmdma_setup, + .bmdma_start = pxa_bmdma_start, + .bmdma_stop = pxa_bmdma_stop, + .bmdma_status = pxa_bmdma_status, + + .check_atapi_dma = pxa_check_atapi_dma, + + .sff_irq_clear = pxa_irq_clear, + + .qc_prep = pxa_qc_prep, +}; + +/* + * DMA interrupt handler. + */ +static void pxa_ata_dma_irq(int dma, void *port) +{ + struct ata_port *ap = port; + struct pata_pxa_data *pd = ap->private_data; + + pd->dma_dcsr = DCSR(dma); + DCSR(dma) = pd->dma_dcsr; + + if (pd->dma_dcsr & DCSR_STOPSTATE) + complete(&pd->dma_done); +} + +static int __devinit pxa_ata_probe(struct platform_device *pdev) +{ + struct ata_host *host; + struct ata_port *ap; + struct pata_pxa_data *data; + struct resource *cmd_res; + struct resource *ctl_res; + struct resource *dma_res; + struct resource *irq_res; + struct pata_pxa_pdata *pdata = pdev->dev.platform_data; + int ret = 0; + + /* + * Resource validation, three resources are needed: + * - CMD port base address + * - CTL port base address + * - DMA port base address + * - IRQ pin + */ + if (pdev->num_resources != 4) { + dev_err(&pdev->dev, "invalid number of resources\n"); + return -EINVAL; + } + + /* + * CMD port base address + */ + cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (unlikely(cmd_res == NULL)) + return -EINVAL; + + /* + * CTL port base address + */ + ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (unlikely(ctl_res == NULL)) + return -EINVAL; + + /* + * DMA port base address + */ + dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (unlikely(dma_res == NULL)) + return -EINVAL; + + /* + * IRQ pin + */ + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (unlikely(irq_res == NULL)) + return -EINVAL; + + /* + * Allocate the host + */ + host = ata_host_alloc(&pdev->dev, 1); + if (!host) + return -ENOMEM; + + ap = host->ports[0]; + ap->ops = &pxa_ata_port_ops; + ap->pio_mask = ATA_PIO4; + ap->mwdma_mask = ATA_MWDMA2; + ap->flags = ATA_FLAG_MMIO; + + ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start, + resource_size(cmd_res)); + ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start, + resource_size(ctl_res)); + ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start, + resource_size(dma_res)); + + /* + * Adjust register offsets + */ + ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr; + ap->ioaddr.data_addr = ap->ioaddr.cmd_addr + + (ATA_REG_DATA << pdata->reg_shift); + ap->ioaddr.error_addr = ap->ioaddr.cmd_addr + + (ATA_REG_ERR << pdata->reg_shift); + ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr + + (ATA_REG_FEATURE << pdata->reg_shift); + ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr + + (ATA_REG_NSECT << pdata->reg_shift); + ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr + + (ATA_REG_LBAL << pdata->reg_shift); + ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr + + (ATA_REG_LBAM << pdata->reg_shift); + ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr + + (ATA_REG_LBAH << pdata->reg_shift); + ap->ioaddr.device_addr = ap->ioaddr.cmd_addr + + (ATA_REG_DEVICE << pdata->reg_shift); + ap->ioaddr.status_addr = ap->ioaddr.cmd_addr + + (ATA_REG_STATUS << pdata->reg_shift); + ap->ioaddr.command_addr = ap->ioaddr.cmd_addr + + (ATA_REG_CMD << pdata->reg_shift); + + /* + * Allocate and load driver's internal data structure + */ + data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + ap->private_data = data; + data->dma_dreq = pdata->dma_dreq; + data->dma_io_addr = dma_res->start; + + /* + * Allocate space for the DMA descriptors + */ + data->dma_desc = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE, + &data->dma_desc_addr, GFP_KERNEL); + if (!data->dma_desc) + return -EINVAL; + + /* + * Request the DMA channel + */ + data->dma_channel = pxa_request_dma(DRV_NAME, DMA_PRIO_LOW, + pxa_ata_dma_irq, ap); + if (data->dma_channel < 0) + return -EBUSY; + + /* + * Stop and clear the DMA channel + */ + DCSR(data->dma_channel) = 0; + + /* + * Activate the ATA host + */ + ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt, + pdata->irq_flags, &pxa_ata_sht); + if (ret) + pxa_free_dma(data->dma_channel); + + return ret; +} + +static int __devexit pxa_ata_remove(struct platform_device *pdev) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct pata_pxa_data *data = host->ports[0]->private_data; + + pxa_free_dma(data->dma_channel); + + ata_host_detach(host); + + return 0; +} + +static struct platform_driver pxa_ata_driver = { + .probe = pxa_ata_probe, + .remove = __devexit_p(pxa_ata_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init pxa_ata_init(void) +{ + return platform_driver_register(&pxa_ata_driver); +} + +static void __exit pxa_ata_exit(void) +{ + platform_driver_unregister(&pxa_ata_driver); +} + +module_init(pxa_ata_init); +module_exit(pxa_ata_exit); + +MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>"); +MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index da8f176c051..b7385e07771 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2657,7 +2657,7 @@ static int __devinit fore200e_sba_probe(struct of_device *op, fore200e->bus = bus; fore200e->bus_dev = op; - fore200e->irq = op->irqs[0]; + fore200e->irq = op->archdata.irqs[0]; fore200e->phys_base = op->resource[0].start; sprintf(fore200e->name, "%s-%d", bus->model_name, index); @@ -2795,7 +2795,7 @@ static int __init fore200e_module_init(void) printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n"); #ifdef CONFIG_SBUS - err = of_register_driver(&fore200e_sba_driver, &of_bus_type); + err = of_register_platform_driver(&fore200e_sba_driver); if (err) return err; #endif @@ -2806,7 +2806,7 @@ static int __init fore200e_module_init(void) #ifdef CONFIG_SBUS if (err) - of_unregister_driver(&fore200e_sba_driver); + of_unregister_platform_driver(&fore200e_sba_driver); #endif return err; @@ -2818,7 +2818,7 @@ static void __exit fore200e_module_cleanup(void) pci_unregister_driver(&fore200e_pca_driver); #endif #ifdef CONFIG_SBUS - of_unregister_driver(&fore200e_sba_driver); + of_unregister_platform_driver(&fore200e_sba_driver); #endif } diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4d99c8bdfed..f699fabf403 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -12,6 +12,7 @@ #include <linux/string.h> #include <linux/platform_device.h> +#include <linux/of_device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/dma-mapping.h> @@ -635,6 +636,12 @@ static struct device_attribute platform_dev_attrs[] = { static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) { struct platform_device *pdev = to_platform_device(dev); + int rc; + + /* Some devices have extra OF data and an OF-style MODALIAS */ + rc = of_device_uevent(dev,env); + if (rc != -ENODEV) + return rc; add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, (pdev->id_entry) ? pdev->id_entry->name : pdev->name); @@ -673,7 +680,11 @@ static int platform_match(struct device *dev, struct device_driver *drv) struct platform_device *pdev = to_platform_device(dev); struct platform_driver *pdrv = to_platform_driver(drv); - /* match against the id table first */ + /* Attempt an OF style match first */ + if (of_driver_match_device(dev, drv)) + return 1; + + /* Then try to match against the id table */ if (pdrv->id_table) return platform_match_id(pdrv->id_table, pdev) != NULL; diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 258bc2ae288..23b7c48df84 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -225,16 +225,6 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, struct gendisk *disk = bdev->bd_disk; struct virtio_blk *vblk = disk->private_data; - if (cmd == 0x56424944) { /* 'VBID' */ - void __user *usr_data = (void __user *)data; - char id_str[VIRTIO_BLK_ID_BYTES]; - int err; - - err = virtblk_get_id(disk, id_str); - if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES)) - err = -EFAULT; - return err; - } /* * Only allow the generic SCSI ioctls if the host can support it. */ @@ -281,6 +271,27 @@ static int index_to_minor(int index) return index << PART_BITS; } +static ssize_t virtblk_serial_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + int err; + + /* sysfs gives us a PAGE_SIZE buffer */ + BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); + + buf[VIRTIO_BLK_ID_BYTES] = '\0'; + err = virtblk_get_id(disk, buf); + if (!err) + return strlen(buf); + + if (err == -EIO) /* Unsupported? Make it empty. */ + return 0; + + return err; +} +DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); + static int __devinit virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; @@ -366,12 +377,32 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) vblk->disk->driverfs_dev = &vdev->dev; index++; - /* If barriers are supported, tell block layer that queue is ordered */ - if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) + if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) { + /* + * If the FLUSH feature is supported we do have support for + * flushing a volatile write cache on the host. Use that + * to implement write barrier support. + */ blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, virtblk_prepare_flush); - else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) + } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) { + /* + * If the BARRIER feature is supported the host expects us + * to order request by tags. This implies there is not + * volatile write cache on the host, and that the host + * never re-orders outstanding I/O. This feature is not + * useful for real life scenarious and deprecated. + */ blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); + } else { + /* + * If the FLUSH feature is not supported we must assume that + * the host does not perform any kind of volatile write + * caching. We still need to drain the queue to provider + * proper barrier semantics. + */ + blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL); + } /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) @@ -445,8 +476,15 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) add_disk(vblk->disk); + err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); + if (err) + goto out_del_disk; + return 0; +out_del_disk: + del_gendisk(vblk->disk); + blk_cleanup_queue(vblk->disk->queue); out_put_disk: put_disk(vblk->disk); out_mempool: diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 82ed403147c..f63ac3d1f8a 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -48,6 +48,7 @@ #include <xen/grant_table.h> #include <xen/events.h> #include <xen/page.h> +#include <xen/platform_pci.h> #include <xen/interface/grant_table.h> #include <xen/interface/io/blkif.h> @@ -737,6 +738,35 @@ static int blkfront_probe(struct xenbus_device *dev, } } + if (xen_hvm_domain()) { + char *type; + int len; + /* no unplug has been done: do not hook devices != xen vbds */ + if (xen_platform_pci_unplug & XEN_UNPLUG_IGNORE) { + int major; + + if (!VDEV_IS_EXTENDED(vdevice)) + major = BLKIF_MAJOR(vdevice); + else + major = XENVBD_MAJOR; + + if (major != XENVBD_MAJOR) { + printk(KERN_INFO + "%s: HVM does not support vbd %d as xen block device\n", + __FUNCTION__, vdevice); + return -ENODEV; + } + } + /* do not create a PV cdrom device if we are an HVM guest */ + type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len); + if (IS_ERR(type)) + return -ENODEV; + if (strncmp(type, "cdrom", 5) == 0) { + kfree(type); + return -ENODEV; + } + kfree(type); + } info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure"); diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c index aa109cbe0e6..d607f53d8af 100644 --- a/drivers/char/agp/efficeon-agp.c +++ b/drivers/char/agp/efficeon-agp.c @@ -372,6 +372,17 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev, bridge->capndx = cap_ptr; /* + * If the device has not been properly setup, the following will catch + * the problem and should stop the system from crashing. + * 20030610 - hamish@zot.org + */ + if (pci_enable_device(pdev)) { + printk(KERN_ERR PFX "Unable to Enable PCI device\n"); + agp_put_bridge(bridge); + return -ENODEV; + } + + /* * The following fixes the case where the BIOS has "forgotten" to * provide an address range for the GART. * 20030610 - hamish@zot.org @@ -385,17 +396,6 @@ static int __devinit agp_efficeon_probe(struct pci_dev *pdev, } } - /* - * If the device has not been properly setup, the following will catch - * the problem and should stop the system from crashing. - * 20030610 - hamish@zot.org - */ - if (pci_enable_device(pdev)) { - printk(KERN_ERR PFX "Unable to Enable PCI device\n"); - agp_put_bridge(bridge); - return -ENODEV; - } - /* Fill in the mode register */ if (cap_ptr) { pci_read_config_dword(pdev, diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index d836a71bf06..ddf5def1b0d 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -816,9 +816,9 @@ static const struct intel_driver_description { { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, "HD Graphics", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, - "Sandybridge", NULL, &intel_i965_driver }, + "Sandybridge", NULL, &intel_gen6_driver }, { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, - "Sandybridge", NULL, &intel_i965_driver }, + "Sandybridge", NULL, &intel_gen6_driver }, { 0, 0, NULL, NULL, NULL } }; @@ -908,6 +908,17 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name); /* + * If the device has not been properly setup, the following will catch + * the problem and should stop the system from crashing. + * 20030610 - hamish@zot.org + */ + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, "can't enable PCI device\n"); + agp_put_bridge(bridge); + return -ENODEV; + } + + /* * The following fixes the case where the BIOS has "forgotten" to * provide an address range for the GART. * 20030610 - hamish@zot.org @@ -921,17 +932,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, } } - /* - * If the device has not been properly setup, the following will catch - * the problem and should stop the system from crashing. - * 20030610 - hamish@zot.org - */ - if (pci_enable_device(pdev)) { - dev_err(&pdev->dev, "can't enable PCI device\n"); - agp_put_bridge(bridge); - return -ENODEV; - } - /* Fill in the mode register */ if (cap_ptr) { pci_read_config_dword(pdev, diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 2547465d465..c05e3e51826 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -60,6 +60,12 @@ #define I810_PTE_LOCAL 0x00000002 #define I810_PTE_VALID 0x00000001 #define I830_PTE_SYSTEM_CACHED 0x00000006 +/* GT PTE cache control fields */ +#define GEN6_PTE_UNCACHED 0x00000002 +#define GEN6_PTE_LLC 0x00000004 +#define GEN6_PTE_LLC_MLC 0x00000006 +#define GEN6_PTE_GFDT 0x00000008 + #define I810_SMRAM_MISCC 0x70 #define I810_GFX_MEM_WIN_SIZE 0x00010000 #define I810_GFX_MEM_WIN_32M 0x00010000 diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index a7547150a70..d22ffb811bf 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -25,6 +25,10 @@ #define USE_PCI_DMA_API 1 #endif +/* Max amount of stolen space, anything above will be returned to Linux */ +int intel_max_stolen = 32 * 1024 * 1024; +EXPORT_SYMBOL(intel_max_stolen); + static const struct aper_size_info_fixed intel_i810_sizes[] = { {64, 16384, 4}, @@ -104,7 +108,7 @@ static int intel_agp_map_memory(struct agp_memory *mem) DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) - return -ENOMEM; + goto err; mem->sg_list = sg = st.sgl; @@ -113,11 +117,14 @@ static int intel_agp_map_memory(struct agp_memory *mem) mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, mem->page_count, PCI_DMA_BIDIRECTIONAL); - if (unlikely(!mem->num_sg)) { - intel_agp_free_sglist(mem); - return -ENOMEM; - } + if (unlikely(!mem->num_sg)) + goto err; + return 0; + +err: + sg_free_table(&st); + return -ENOMEM; } static void intel_agp_unmap_memory(struct agp_memory *mem) @@ -176,7 +183,7 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem, if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { - cache_bits = I830_PTE_SYSTEM_CACHED; + cache_bits = GEN6_PTE_LLC_MLC; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { @@ -710,7 +717,12 @@ static void intel_i830_init_gtt_entries(void) break; } } - if (gtt_entries > 0) { + if (!local && gtt_entries > intel_max_stolen) { + dev_info(&agp_bridge->dev->dev, + "detected %dK stolen memory, trimming to %dK\n", + gtt_entries / KB(1), intel_max_stolen / KB(1)); + gtt_entries = intel_max_stolen / KB(4); + } else if (gtt_entries > 0) { dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", gtt_entries / KB(1), local ? "local" : "stolen"); gtt_entries /= KB(4); @@ -797,6 +809,10 @@ static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge) /* we have to call this as early as possible after the MMIO base address is known */ intel_i830_init_gtt_entries(); + if (intel_private.gtt_entries == 0) { + iounmap(intel_private.registers); + return -ENOMEM; + } agp_bridge->gatt_table = NULL; @@ -1282,6 +1298,11 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge) /* we have to call this as early as possible after the MMIO base address is known */ intel_i830_init_gtt_entries(); + if (intel_private.gtt_entries == 0) { + iounmap(intel_private.gtt); + iounmap(intel_private.registers); + return -ENOMEM; + } agp_bridge->gatt_table = NULL; @@ -1309,6 +1330,16 @@ static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge, return addr | bridge->driver->masks[type].mask; } +static unsigned long intel_gen6_mask_memory(struct agp_bridge_data *bridge, + dma_addr_t addr, int type) +{ + /* Shift high bits down */ + addr |= (addr >> 28) & 0xff; + + /* Type checking must be done elsewhere */ + return addr | bridge->driver->masks[type].mask; +} + static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) { u16 snb_gmch_ctl; @@ -1390,6 +1421,11 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge) /* we have to call this as early as possible after the MMIO base address is known */ intel_i830_init_gtt_entries(); + if (intel_private.gtt_entries == 0) { + iounmap(intel_private.gtt); + iounmap(intel_private.registers); + return -ENOMEM; + } agp_bridge->gatt_table = NULL; @@ -1517,6 +1553,39 @@ static const struct agp_bridge_driver intel_i965_driver = { #endif }; +static const struct agp_bridge_driver intel_gen6_driver = { + .owner = THIS_MODULE, + .aperture_sizes = intel_i830_sizes, + .size_type = FIXED_APER_SIZE, + .num_aperture_sizes = 4, + .needs_scratch_page = true, + .configure = intel_i9xx_configure, + .fetch_size = intel_i9xx_fetch_size, + .cleanup = intel_i915_cleanup, + .mask_memory = intel_gen6_mask_memory, + .masks = intel_i810_masks, + .agp_enable = intel_i810_agp_enable, + .cache_flush = global_cache_flush, + .create_gatt_table = intel_i965_create_gatt_table, + .free_gatt_table = intel_i830_free_gatt_table, + .insert_memory = intel_i915_insert_entries, + .remove_memory = intel_i915_remove_entries, + .alloc_by_type = intel_i830_alloc_by_type, + .free_by_type = intel_i810_free_by_type, + .agp_alloc_page = agp_generic_alloc_page, + .agp_alloc_pages = agp_generic_alloc_pages, + .agp_destroy_page = agp_generic_destroy_page, + .agp_destroy_pages = agp_generic_destroy_pages, + .agp_type_to_mask_type = intel_i830_type_to_mask_type, + .chipset_flush = intel_i915_chipset_flush, +#ifdef USE_PCI_DMA_API + .agp_map_page = intel_agp_map_page, + .agp_unmap_page = intel_agp_unmap_page, + .agp_map_memory = intel_agp_map_memory, + .agp_unmap_memory = intel_agp_unmap_memory, +#endif +}; + static const struct agp_bridge_driver intel_g33_driver = { .owner = THIS_MODULE, .aperture_sizes = intel_i830_sizes, diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c index 89d871ef8c2..91917133ae0 100644 --- a/drivers/char/bsr.c +++ b/drivers/char/bsr.c @@ -23,6 +23,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_platform.h> +#include <linux/fs.h> #include <linux/module.h> #include <linux/cdev.h> #include <linux/list.h> diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 35cca4c7fb1..fa27d1676ee 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -194,7 +194,7 @@ static int __init hvc_console_setup(struct console *co, char *options) return 0; } -static struct console hvc_con_driver = { +static struct console hvc_console = { .name = "hvc", .write = hvc_console_print, .device = hvc_console_device, @@ -220,7 +220,7 @@ static struct console hvc_con_driver = { */ static int __init hvc_console_init(void) { - register_console(&hvc_con_driver); + register_console(&hvc_console); return 0; } console_initcall(hvc_console_init); @@ -276,8 +276,8 @@ int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops) * now (setup won't fail at this point). It's ok to just * call register again if previously .setup failed. */ - if (index == hvc_con_driver.index) - register_console(&hvc_con_driver); + if (index == hvc_console.index) + register_console(&hvc_console); return 0; } @@ -641,7 +641,7 @@ int hvc_poll(struct hvc_struct *hp) } for (i = 0; i < n; ++i) { #ifdef CONFIG_MAGIC_SYSRQ - if (hp->index == hvc_con_driver.index) { + if (hp->index == hvc_console.index) { /* Handle the SysRq Hack */ /* XXX should support a sequence */ if (buf[i] == '\x0f') { /* ^O */ @@ -909,7 +909,7 @@ static void __exit hvc_exit(void) tty_unregister_driver(hvc_driver); /* return tty_struct instances allocated in hvc_init(). */ put_tty_driver(hvc_driver); - unregister_console(&hvc_con_driver); + unregister_console(&hvc_console); } } module_exit(hvc_exit); diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index d4b14ff1c4c..1f4b6de65a2 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c @@ -1255,7 +1255,7 @@ static int __init hvsi_console_setup(struct console *console, char *options) return 0; } -static struct console hvsi_con_driver = { +static struct console hvsi_console = { .name = "hvsi", .write = hvsi_console_print, .device = hvsi_console_device, @@ -1308,7 +1308,7 @@ static int __init hvsi_console_init(void) } if (hvsi_count) - register_console(&hvsi_con_driver); + register_console(&hvsi_console); return 0; } console_initcall(hvsi_console_init); diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c index 101d5f23554..7a4f080f835 100644 --- a/drivers/char/hw_random/n2-drv.c +++ b/drivers/char/hw_random/n2-drv.c @@ -762,12 +762,12 @@ static struct of_platform_driver n2rng_driver = { static int __init n2rng_init(void) { - return of_register_driver(&n2rng_driver, &of_bus_type); + return of_register_platform_driver(&n2rng_driver); } static void __exit n2rng_exit(void) { - of_unregister_driver(&n2rng_driver); + of_unregister_platform_driver(&n2rng_driver); } module_init(n2rng_init); diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 7cdb6ee569c..4a9eb3044e5 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -104,6 +104,7 @@ #include <linux/io.h> #include <asm/system.h> #include <linux/uaccess.h> +#include <linux/kdb.h> #define MAX_NR_CON_DRIVER 16 @@ -187,10 +188,15 @@ static DECLARE_WORK(console_work, console_callback); * fg_console is the current virtual console, * last_console is the last used one, * want_console is the console we want to switch to, + * saved_* variants are for save/restore around kernel debugger enter/leave */ int fg_console; int last_console; int want_console = -1; +int saved_fg_console; +int saved_last_console; +int saved_want_console; +int saved_vc_mode; /* * For each existing display, we have a pointer to console currently visible @@ -3414,6 +3420,78 @@ int con_is_bound(const struct consw *csw) EXPORT_SYMBOL(con_is_bound); /** + * con_debug_enter - prepare the console for the kernel debugger + * @sw: console driver + * + * Called when the console is taken over by the kernel debugger, this + * function needs to save the current console state, then put the console + * into a state suitable for the kernel debugger. + * + * RETURNS: + * Zero on success, nonzero if a failure occurred when trying to prepare + * the console for the debugger. + */ +int con_debug_enter(struct vc_data *vc) +{ + int ret = 0; + + saved_fg_console = fg_console; + saved_last_console = last_console; + saved_want_console = want_console; + saved_vc_mode = vc->vc_mode; + vc->vc_mode = KD_TEXT; + console_blanked = 0; + if (vc->vc_sw->con_debug_enter) + ret = vc->vc_sw->con_debug_enter(vc); +#ifdef CONFIG_KGDB_KDB + /* Set the initial LINES variable if it is not already set */ + if (vc->vc_rows < 999) { + int linecount; + char lns[4]; + const char *setargs[3] = { + "set", + "LINES", + lns, + }; + if (kdbgetintenv(setargs[0], &linecount)) { + snprintf(lns, 4, "%i", vc->vc_rows); + kdb_set(2, setargs); + } + } +#endif /* CONFIG_KGDB_KDB */ + return ret; +} +EXPORT_SYMBOL_GPL(con_debug_enter); + +/** + * con_debug_leave - restore console state + * @sw: console driver + * + * Restore the console state to what it was before the kernel debugger + * was invoked. + * + * RETURNS: + * Zero on success, nonzero if a failure occurred when trying to restore + * the console. + */ +int con_debug_leave(void) +{ + struct vc_data *vc; + int ret = 0; + + fg_console = saved_fg_console; + last_console = saved_last_console; + want_console = saved_want_console; + vc_cons[fg_console].d->vc_mode = saved_vc_mode; + + vc = vc_cons[fg_console].d; + if (vc->vc_sw->con_debug_leave) + ret = vc->vc_sw->con_debug_leave(vc); + return ret; +} +EXPORT_SYMBOL_GPL(con_debug_leave); + +/** * register_con_driver - register console driver to console layer * @csw: console driver * @first: the first console to take over, minimum value is 0 diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index b99c38f23d6..26af2dd5d83 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -2247,20 +2247,20 @@ static struct of_platform_driver n2_mau_driver = { static int __init n2_init(void) { - int err = of_register_driver(&n2_crypto_driver, &of_bus_type); + int err = of_register_platform_driver(&n2_crypto_driver); if (!err) { - err = of_register_driver(&n2_mau_driver, &of_bus_type); + err = of_register_platform_driver(&n2_mau_driver); if (err) - of_unregister_driver(&n2_crypto_driver); + of_unregister_platform_driver(&n2_crypto_driver); } return err; } static void __exit n2_exit(void) { - of_unregister_driver(&n2_mau_driver); - of_unregister_driver(&n2_crypto_driver); + of_unregister_platform_driver(&n2_mau_driver); + of_unregister_platform_driver(&n2_crypto_driver); } module_init(n2_init); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9e01e96fee9..8344375dc01 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -128,7 +128,7 @@ config TXX9_DMAC config SH_DMAE tristate "Renesas SuperH DMAC support" - depends on SUPERH && SH_DMA + depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) depends on !SH_DMA_API select DMA_ENGINE help diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index a2a519fd2a2..fb64cf36ba6 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c @@ -816,7 +816,7 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) return ret; } -#if defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) static irqreturn_t sh_dmae_err(int irq, void *data) { struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; @@ -1057,7 +1057,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) /* Default transfer size of 32 bytes requires 32-byte alignment */ shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; -#if defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!chanirq_res) @@ -1082,7 +1082,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) #else chanirq_res = errirq_res; -#endif /* CONFIG_CPU_SH4 */ +#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ if (chanirq_res->start == chanirq_res->end && !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { @@ -1129,7 +1129,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) chan_probe_err: sh_dmae_chan_remove(shdev); eirqres: -#if defined(CONFIG_CPU_SH4) +#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) free_irq(errirq, shdev); eirq_err: #endif diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 4e51fe3c1fc..6a6bd569e1f 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -8,6 +8,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/gpio.h> +#include <linux/of_gpio.h> #include <linux/idr.h> #include <linux/slab.h> @@ -1100,16 +1101,24 @@ int gpiochip_add(struct gpio_chip *chip) } } + of_gpiochip_add(chip); + unlock: spin_unlock_irqrestore(&gpio_lock, flags); - if (status == 0) - status = gpiochip_export(chip); + + if (status) + goto fail; + + status = gpiochip_export(chip); + if (status) + goto fail; + + return 0; fail: /* failures here can mean systems won't boot... */ - if (status) - pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n", - chip->base, chip->base + chip->ngpio - 1, - chip->label ? : "generic"); + pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n", + chip->base, chip->base + chip->ngpio - 1, + chip->label ? : "generic"); return status; } EXPORT_SYMBOL_GPL(gpiochip_add); @@ -1128,6 +1137,8 @@ int gpiochip_remove(struct gpio_chip *chip) spin_lock_irqsave(&gpio_lock, flags); + of_gpiochip_remove(chip); + for (id = chip->base; id < chip->base + chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) { status = -EBUSY; @@ -1148,6 +1159,38 @@ int gpiochip_remove(struct gpio_chip *chip) } EXPORT_SYMBOL_GPL(gpiochip_remove); +/** + * gpiochip_find() - iterator for locating a specific gpio_chip + * @data: data to pass to match function + * @callback: Callback function to check gpio_chip + * + * Similar to bus_find_device. It returns a reference to a gpio_chip as + * determined by a user supplied @match callback. The callback should return + * 0 if the device doesn't match and non-zero if it does. If the callback is + * non-zero, this function will return to the caller and not iterate over any + * more gpio_chips. + */ +struct gpio_chip *gpiochip_find(void *data, + int (*match)(struct gpio_chip *chip, void *data)) +{ + struct gpio_chip *chip = NULL; + unsigned long flags; + int i; + + spin_lock_irqsave(&gpio_lock, flags); + for (i = 0; i < ARCH_NR_GPIOS; i++) { + if (!gpio_desc[i].chip) + continue; + + if (match(gpio_desc[i].chip, data)) { + chip = gpio_desc[i].chip; + break; + } + } + spin_unlock_irqrestore(&gpio_lock, flags); + + return chip; +} /* These "optional" allocation calls help prevent drivers from stomping * on each other, and help provide better diagnostics in debugfs. diff --git a/drivers/gpio/xilinx_gpio.c b/drivers/gpio/xilinx_gpio.c index b8fa65b5bfc..709690995d0 100644 --- a/drivers/gpio/xilinx_gpio.c +++ b/drivers/gpio/xilinx_gpio.c @@ -161,14 +161,12 @@ static void xgpio_save_regs(struct of_mm_gpio_chip *mm_gc) static int __devinit xgpio_of_probe(struct device_node *np) { struct xgpio_instance *chip; - struct of_gpio_chip *ofchip; int status = 0; const u32 *tree_info; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; - ofchip = &chip->mmchip.of_gc; /* Update GPIO state shadow register with default value */ tree_info = of_get_property(np, "xlnx,dout-default", NULL); @@ -182,21 +180,20 @@ static int __devinit xgpio_of_probe(struct device_node *np) chip->gpio_dir = *tree_info; /* Check device node and parent device node for device width */ - ofchip->gc.ngpio = 32; /* By default assume full GPIO controller */ + chip->mmchip.gc.ngpio = 32; /* By default assume full GPIO controller */ tree_info = of_get_property(np, "xlnx,gpio-width", NULL); if (!tree_info) tree_info = of_get_property(np->parent, "xlnx,gpio-width", NULL); if (tree_info) - ofchip->gc.ngpio = *tree_info; + chip->mmchip.gc.ngpio = *tree_info; spin_lock_init(&chip->gpio_lock); - ofchip->gpio_cells = 2; - ofchip->gc.direction_input = xgpio_dir_in; - ofchip->gc.direction_output = xgpio_dir_out; - ofchip->gc.get = xgpio_get; - ofchip->gc.set = xgpio_set; + chip->mmchip.gc.direction_input = xgpio_dir_in; + chip->mmchip.gc.direction_output = xgpio_dir_out; + chip->mmchip.gc.get = xgpio_get; + chip->mmchip.gc.set = xgpio_set; chip->mmchip.save_regs = xgpio_save_regs; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 88910e5a2c7..4cab0c6397e 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -6,7 +6,7 @@ # menuconfig DRM tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" - depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU + depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU select I2C select I2C_ALGOBIT select SLOW_WORK @@ -17,7 +17,7 @@ menuconfig DRM These modules provide support for synchronization, security, and DMA transfers. Please see <http://dri.sourceforge.net/> for more details. You should also select and configure AGP - (/dev/agpgart) support. + (/dev/agpgart) support if it is available for your platform. config DRM_KMS_HELPER tristate @@ -61,6 +61,7 @@ config DRM_RADEON select DRM_KMS_HELPER select DRM_TTM select POWER_SUPPLY + select HWMON help Choose this option if you have an ATI Radeon graphics card. There are both PCI and AGP versions. You don't need to choose this to @@ -130,7 +131,7 @@ endchoice config DRM_MGA tristate "Matrox g200/g400" - depends on DRM + depends on DRM && PCI select FW_LOADER help Choose this option if you have a Matrox G200, G400 or G450 graphics @@ -148,14 +149,14 @@ config DRM_SIS config DRM_VIA tristate "Via unichrome video cards" - depends on DRM + depends on DRM && PCI help Choose this option if you have a Via unichrome or compatible video chipset. If M is selected the module will be called via. config DRM_SAVAGE tristate "Savage video cards" - depends on DRM + depends on DRM && PCI help Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister chipset. If M is selected the module will be called savage. diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index abe3f446ca4..f3a23a329f4 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -9,9 +9,10 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ - drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ + drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \ drm_crtc.o drm_modes.o drm_edid.o \ - drm_info.o drm_debugfs.o drm_encoder_slave.o + drm_info.o drm_debugfs.o drm_encoder_slave.o \ + drm_trace_points.o drm_global.o drm-$(CONFIG_COMPAT) += drm_ioc32.o @@ -19,6 +20,8 @@ drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o +CFLAGS_drm_trace_points.o := -I$(src) + obj-$(CONFIG_DRM) += drm.o obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 2092e7bb788..a5c9ce93bbc 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -39,19 +39,6 @@ #include <asm/shmparam.h> #include "drmP.h" -resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource) -{ - return pci_resource_start(dev->pdev, resource); -} -EXPORT_SYMBOL(drm_get_resource_start); - -resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource) -{ - return pci_resource_len(dev->pdev, resource); -} - -EXPORT_SYMBOL(drm_get_resource_len); - static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, struct drm_local_map *map) { @@ -189,7 +176,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, switch (map->type) { case _DRM_REGISTERS: case _DRM_FRAME_BUFFER: -#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) +#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__) if (map->offset + (map->size-1) < map->offset || map->offset < virt_to_phys(high_memory)) { kfree(map); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 57cea01c4ff..4c68f76993d 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -80,6 +80,7 @@ static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = { { DRM_MODE_DITHERING_OFF, "Off" }, { DRM_MODE_DITHERING_ON, "On" }, + { DRM_MODE_DITHERING_AUTO, "Automatic" }, }; /* @@ -1126,7 +1127,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id); + DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (put_user(crtc->base.id, crtc_id + copied)) { ret = -EFAULT; goto out; @@ -1154,8 +1155,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data, list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - DRM_DEBUG_KMS("ENCODER ID is %d\n", - encoder->base.id); + DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, + drm_get_encoder_name(encoder)); if (put_user(encoder->base.id, encoder_id + copied)) { ret = -EFAULT; @@ -1185,8 +1186,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data, list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - DRM_DEBUG_KMS("CONNECTOR ID is %d\n", - connector->base.id); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, + drm_get_connector_name(connector)); if (put_user(connector->base.id, connector_id + copied)) { ret = -EFAULT; @@ -1209,7 +1211,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data, } card_res->count_connectors = connector_count; - DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs, + DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs, card_res->count_connectors, card_res->count_encoders); out: @@ -1312,7 +1314,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); - DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id); + DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); mutex_lock(&dev->mode_config.mutex); @@ -1493,6 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } crtc = obj_to_crtc(obj); + DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ @@ -1569,6 +1572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, goto out; } connector = obj_to_connector(obj); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, + drm_get_connector_name(connector)); connector_set[i] = connector; } @@ -1684,6 +1690,7 @@ int drm_mode_addfb(struct drm_device *dev, r->fb_id = fb->base.id; list_add(&fb->filp_head, &file_priv->fbs); + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); out: mutex_unlock(&dev->mode_config.mutex); @@ -2610,6 +2617,15 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, goto out; crtc = obj_to_crtc(obj); + if (crtc->fb == NULL) { + /* The framebuffer is currently unbound, presumably + * due to a hotplug event, that userspace has not + * yet discovered. + */ + ret = -EBUSY; + goto out; + } + if (crtc->funcs->page_flip == NULL) goto out; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 9b2a54117c9..11fe9c870d1 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -86,7 +86,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, int count = 0; int mode_flags = 0; - DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector)); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, + drm_get_connector_name(connector)); /* set all modes to the unverified state */ list_for_each_entry_safe(mode, t, &connector->modes, head) mode->status = MODE_UNVERIFIED; @@ -102,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->status = connector->funcs->detect(connector); if (connector->status == connector_status_disconnected) { - DRM_DEBUG_KMS("%s is disconnected\n", - drm_get_connector_name(connector)); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", + connector->base.id, drm_get_connector_name(connector)); drm_mode_connector_update_edid_property(connector, NULL); goto prune; } @@ -141,8 +142,8 @@ prune: drm_mode_sort(&connector->modes); - DRM_DEBUG_KMS("Probed modes for %s\n", - drm_get_connector_name(connector)); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id, + drm_get_connector_name(connector)); list_for_each_entry_safe(mode, t, &connector->modes, head) { mode->vrefresh = drm_mode_vrefresh(mode); @@ -201,6 +202,17 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc) } EXPORT_SYMBOL(drm_helper_crtc_in_use); +static void +drm_encoder_disable(struct drm_encoder *encoder) +{ + struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; + + if (encoder_funcs->disable) + (*encoder_funcs->disable)(encoder); + else + (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); +} + /** * drm_helper_disable_unused_functions - disable unused objects * @dev: DRM device @@ -215,7 +227,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) { struct drm_encoder *encoder; struct drm_connector *connector; - struct drm_encoder_helper_funcs *encoder_funcs; struct drm_crtc *crtc; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -226,12 +237,8 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - encoder_funcs = encoder->helper_private; if (!drm_helper_encoder_in_use(encoder)) { - if (encoder_funcs->disable) - (*encoder_funcs->disable)(encoder); - else - (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + drm_encoder_disable(encoder); /* disconnector encoder from any connector */ encoder->crtc = NULL; } @@ -241,7 +248,10 @@ void drm_helper_disable_unused_functions(struct drm_device *dev) struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; crtc->enabled = drm_helper_crtc_in_use(crtc); if (!crtc->enabled) { - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc_funcs->disable) + (*crtc_funcs->disable)(crtc); + else + (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); crtc->fb = NULL; } } @@ -292,11 +302,11 @@ drm_crtc_prepare_encoders(struct drm_device *dev) encoder_funcs = encoder->helper_private; /* Disable unused encoders */ if (encoder->crtc == NULL) - (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + drm_encoder_disable(encoder); /* Disable encoders whose CRTC is about to change */ if (encoder_funcs->get_crtc && encoder->crtc != (*encoder_funcs->get_crtc)(encoder)) - (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); + drm_encoder_disable(encoder); } } @@ -365,6 +375,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) { goto done; } + DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); /* Prepare the encoders and CRTCs before setting the mode. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -392,8 +403,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, if (encoder->crtc != crtc) continue; - DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder), - mode->name, mode->base.id); + DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n", + encoder->base.id, drm_get_encoder_name(encoder), + mode->base.id, mode->name); encoder_funcs = encoder->helper_private; encoder_funcs->mode_set(encoder, mode, adjusted_mode); } @@ -469,10 +481,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) crtc_funcs = set->crtc->helper_private; - DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:" - " %d (x, y) (%i, %i)\n", - set->crtc, set->crtc->base.id, set->fb, set->connectors, - (int)set->num_connectors, set->x, set->y); + if (set->fb) { + DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, set->fb->base.id, + (int)set->num_connectors, set->x, set->y); + } else { + DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n", + set->crtc->base.id, (int)set->num_connectors, + set->x, set->y); + } dev = set->crtc->dev; @@ -601,8 +618,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; connector->encoder->crtc = new_crtc; } - DRM_DEBUG_KMS("setting connector %d crtc to %p\n", - connector->base.id, new_crtc); + if (new_crtc) { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", + connector->base.id, drm_get_connector_name(connector), + new_crtc->base.id); + } else { + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", + connector->base.id, drm_get_connector_name(connector)); + } } /* mode_set_base is not a required function */ @@ -620,8 +643,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (!drm_crtc_helper_set_mode(set->crtc, set->mode, set->x, set->y, old_fb)) { - DRM_ERROR("failed to set mode on crtc %p\n", - set->crtc); + DRM_ERROR("failed to set mode on [CRTC:%d]\n", + set->crtc->base.id); ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 4a66201edae..90288ec7c28 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -243,47 +243,20 @@ int drm_lastclose(struct drm_device * dev) * * Initializes an array of drm_device structures, and attempts to * initialize all available devices, using consecutive minors, registering the - * stubs and initializing the AGP device. + * stubs and initializing the device. * * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and * after the initialization for driver customization. */ int drm_init(struct drm_driver *driver) { - struct pci_dev *pdev = NULL; - const struct pci_device_id *pid; - int i; - DRM_DEBUG("\n"); - INIT_LIST_HEAD(&driver->device_list); - if (driver->driver_features & DRIVER_MODESET) - return pci_register_driver(&driver->pci_driver); - - /* If not using KMS, fall back to stealth mode manual scanning. */ - for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { - pid = &driver->pci_driver.id_table[i]; - - /* Loop around setting up a DRM device for each PCI device - * matching our ID and device class. If we had the internal - * function that pci_get_subsys and pci_get_class used, we'd - * be able to just pass pid in instead of doing a two-stage - * thing. - */ - pdev = NULL; - while ((pdev = - pci_get_subsys(pid->vendor, pid->device, pid->subvendor, - pid->subdevice, pdev)) != NULL) { - if ((pdev->class & pid->class_mask) != pid->class) - continue; - - /* stealth mode requires a manual probe */ - pci_dev_get(pdev); - drm_get_dev(pdev, pid, driver); - } - } - return 0; + if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE) + return drm_platform_init(driver); + else + return drm_pci_init(driver); } EXPORT_SYMBOL(drm_init); @@ -315,6 +288,7 @@ static int __init drm_core_init(void) { int ret = -ENOMEM; + drm_global_init(); idr_init(&drm_minors_idr); if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops)) @@ -362,6 +336,7 @@ static void __exit drm_core_exit(void) unregister_chrdev(DRM_MAJOR, "drm"); + idr_remove_all(&drm_minors_idr); idr_destroy(&drm_minors_idr); } @@ -506,9 +481,9 @@ long drm_ioctl(struct file *filp, if (ioctl->flags & DRM_UNLOCKED) retcode = func(dev, kdata, file_priv); else { - lock_kernel(); + mutex_lock(&drm_global_mutex); retcode = func(dev, kdata, file_priv); - unlock_kernel(); + mutex_unlock(&drm_global_mutex); } if (cmd & IOC_OUT) { diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 9585e531ac6..dce5c4a97f8 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -282,7 +282,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) return block; carp: - dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n", + dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n", drm_get_connector_name(connector), j); out: @@ -1623,7 +1623,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) return 0; } if (!drm_edid_is_valid(edid)) { - dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", + dev_warn(connector->dev->dev, "%s: EDID invalid.\n", drm_get_connector_name(connector)); return 0; } diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c index f0184696edf..d62c064fbaa 100644 --- a/drivers/gpu/drm/drm_encoder_slave.c +++ b/drivers/gpu/drm/drm_encoder_slave.c @@ -41,6 +41,9 @@ * &drm_encoder_slave. The @slave_funcs field will be initialized with * the hooks provided by the slave driver. * + * If @info->platform_data is non-NULL it will be used as the initial + * slave config. + * * Returns 0 on success or a negative errno on failure, in particular, * -ENODEV is returned when no matching driver is found. */ @@ -85,6 +88,10 @@ int drm_i2c_encoder_init(struct drm_device *dev, if (err) goto fail_unregister; + if (info->platform_data) + encoder->slave_funcs->set_config(&encoder->base, + info->platform_data); + return 0; fail_unregister: diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 719662034bb..de82e201d68 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -241,6 +241,80 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper) return 0; } +int drm_fb_helper_debug_enter(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + struct drm_crtc_helper_funcs *funcs; + int i; + + if (list_empty(&kernel_fb_helper_list)) + return false; + + list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { + for (i = 0; i < helper->crtc_count; i++) { + struct drm_mode_set *mode_set = + &helper->crtc_info[i].mode_set; + + if (!mode_set->crtc->enabled) + continue; + + funcs = mode_set->crtc->helper_private; + funcs->mode_set_base_atomic(mode_set->crtc, + mode_set->fb, + mode_set->x, + mode_set->y); + + } + } + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_debug_enter); + +/* Find the real fb for a given fb helper CRTC */ +static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_crtc *c; + + list_for_each_entry(c, &dev->mode_config.crtc_list, head) { + if (crtc->base.id == c->base.id) + return c->fb; + } + + return NULL; +} + +int drm_fb_helper_debug_leave(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + struct drm_crtc *crtc; + struct drm_crtc_helper_funcs *funcs; + struct drm_framebuffer *fb; + int i; + + for (i = 0; i < helper->crtc_count; i++) { + struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set; + crtc = mode_set->crtc; + funcs = crtc->helper_private; + fb = drm_mode_config_fb(crtc); + + if (!crtc->enabled) + continue; + + if (!fb) { + DRM_ERROR("no fb to restore??\n"); + continue; + } + + funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, + crtc->y); + } + + return 0; +} +EXPORT_SYMBOL(drm_fb_helper_debug_leave); + bool drm_fb_helper_force_kernel_mode(void) { int i = 0; @@ -611,7 +685,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, struct drm_framebuffer *fb = fb_helper->fb; int depth; - if (var->pixclock != 0) + if (var->pixclock != 0 || in_dbg_master()) return -EINVAL; /* Need to resize the fb object !!! */ diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index e7aace20981..2ca8df8b610 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -39,6 +39,9 @@ #include <linux/slab.h> #include <linux/smp_lock.h> +/* from BKL pushdown: note that nothing else serializes idr_find() */ +DEFINE_MUTEX(drm_global_mutex); + static int drm_open_helper(struct inode *inode, struct file *filp, struct drm_device * dev); @@ -175,8 +178,7 @@ int drm_stub_open(struct inode *inode, struct file *filp) DRM_DEBUG("\n"); - /* BKL pushdown: note that nothing else serializes idr_find() */ - lock_kernel(); + mutex_lock(&drm_global_mutex); minor = idr_find(&drm_minors_idr, minor_id); if (!minor) goto out; @@ -197,7 +199,7 @@ int drm_stub_open(struct inode *inode, struct file *filp) fops_put(old_fops); out: - unlock_kernel(); + mutex_unlock(&drm_global_mutex); return err; } @@ -472,7 +474,7 @@ int drm_release(struct inode *inode, struct file *filp) struct drm_device *dev = file_priv->minor->dev; int retcode = 0; - lock_kernel(); + mutex_lock(&drm_global_mutex); DRM_DEBUG("open_count = %d\n", dev->open_count); @@ -573,17 +575,14 @@ int drm_release(struct inode *inode, struct file *filp) if (atomic_read(&dev->ioctl_count)) { DRM_ERROR("Device busy: %d\n", atomic_read(&dev->ioctl_count)); - spin_unlock(&dev->count_lock); - unlock_kernel(); - return -EBUSY; + retcode = -EBUSY; + goto out; } - spin_unlock(&dev->count_lock); - unlock_kernel(); - return drm_lastclose(dev); + retcode = drm_lastclose(dev); } +out: spin_unlock(&dev->count_lock); - - unlock_kernel(); + mutex_unlock(&drm_global_mutex); return retcode; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 33dad3fa604..4f1b8671448 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -68,8 +68,18 @@ * We make up offsets for buffer objects so we can recognize them at * mmap time. */ + +/* pgoff in mmap is an unsigned long, so we need to make sure that + * the faked up offset will fit + */ + +#if BITS_PER_LONG == 64 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) +#else +#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) +#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) +#endif /** * Initialize the GEM device fields @@ -419,6 +429,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private) idr_for_each(&file_private->object_idr, &drm_gem_object_release_handle, NULL); + idr_remove_all(&file_private->object_idr); idr_destroy(&file_private->object_idr); } diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/drm_global.c index b17007178a3..c87dc96444d 100644 --- a/drivers/gpu/drm/ttm/ttm_global.c +++ b/drivers/gpu/drm/drm_global.c @@ -28,45 +28,45 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -#include "ttm/ttm_module.h" #include <linux/mutex.h> #include <linux/slab.h> #include <linux/module.h> +#include "drm_global.h" -struct ttm_global_item { +struct drm_global_item { struct mutex mutex; void *object; int refcount; }; -static struct ttm_global_item glob[TTM_GLOBAL_NUM]; +static struct drm_global_item glob[DRM_GLOBAL_NUM]; -void ttm_global_init(void) +void drm_global_init(void) { int i; - for (i = 0; i < TTM_GLOBAL_NUM; ++i) { - struct ttm_global_item *item = &glob[i]; + for (i = 0; i < DRM_GLOBAL_NUM; ++i) { + struct drm_global_item *item = &glob[i]; mutex_init(&item->mutex); item->object = NULL; item->refcount = 0; } } -void ttm_global_release(void) +void drm_global_release(void) { int i; - for (i = 0; i < TTM_GLOBAL_NUM; ++i) { - struct ttm_global_item *item = &glob[i]; + for (i = 0; i < DRM_GLOBAL_NUM; ++i) { + struct drm_global_item *item = &glob[i]; BUG_ON(item->object != NULL); BUG_ON(item->refcount != 0); } } -int ttm_global_item_ref(struct ttm_global_reference *ref) +int drm_global_item_ref(struct drm_global_reference *ref) { int ret; - struct ttm_global_item *item = &glob[ref->global_type]; + struct drm_global_item *item = &glob[ref->global_type]; void *object; mutex_lock(&item->mutex); @@ -93,11 +93,11 @@ out_err: item->object = NULL; return ret; } -EXPORT_SYMBOL(ttm_global_item_ref); +EXPORT_SYMBOL(drm_global_item_ref); -void ttm_global_item_unref(struct ttm_global_reference *ref) +void drm_global_item_unref(struct drm_global_reference *ref) { - struct ttm_global_item *item = &glob[ref->global_type]; + struct drm_global_item *item = &glob[ref->global_type]; mutex_lock(&item->mutex); BUG_ON(item->refcount == 0); @@ -108,5 +108,5 @@ void ttm_global_item_unref(struct ttm_global_reference *ref) } mutex_unlock(&item->mutex); } -EXPORT_SYMBOL(ttm_global_item_unref); +EXPORT_SYMBOL(drm_global_item_unref); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index f0f6c6b93f3..2ef2c782724 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -51,13 +51,24 @@ int drm_name_info(struct seq_file *m, void *data) if (!master) return 0; - if (master->unique) { - seq_printf(m, "%s %s %s\n", - dev->driver->pci_driver.name, - pci_name(dev->pdev), master->unique); + if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) { + if (master->unique) { + seq_printf(m, "%s %s %s\n", + dev->driver->platform_device->name, + dev_name(dev->dev), master->unique); + } else { + seq_printf(m, "%s\n", + dev->driver->platform_device->name); + } } else { - seq_printf(m, "%s %s\n", dev->driver->pci_driver.name, - pci_name(dev->pdev)); + if (master->unique) { + seq_printf(m, "%s %s %s\n", + dev->driver->pci_driver.name, + dev_name(dev->dev), master->unique); + } else { + seq_printf(m, "%s %s\n", dev->driver->pci_driver.name, + dev_name(dev->dev)); + } } return 0; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 9b9ff46c237..7b03b197fc0 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -64,6 +64,19 @@ int drm_getunique(struct drm_device *dev, void *data, return 0; } +static void +drm_unset_busid(struct drm_device *dev, + struct drm_master *master) +{ + kfree(dev->devname); + dev->devname = NULL; + + kfree(master->unique); + master->unique = NULL; + master->unique_len = 0; + master->unique_size = 0; +} + /** * Set the bus id. * @@ -94,17 +107,24 @@ int drm_setunique(struct drm_device *dev, void *data, master->unique_len = u->unique_len; master->unique_size = u->unique_len + 1; master->unique = kmalloc(master->unique_size, GFP_KERNEL); - if (!master->unique) - return -ENOMEM; - if (copy_from_user(master->unique, u->unique, master->unique_len)) - return -EFAULT; + if (!master->unique) { + ret = -ENOMEM; + goto err; + } + + if (copy_from_user(master->unique, u->unique, master->unique_len)) { + ret = -EFAULT; + goto err; + } master->unique[master->unique_len] = '\0'; dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) + strlen(master->unique) + 2, GFP_KERNEL); - if (!dev->devname) - return -ENOMEM; + if (!dev->devname) { + ret = -ENOMEM; + goto err; + } sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, master->unique); @@ -113,53 +133,103 @@ int drm_setunique(struct drm_device *dev, void *data, * busid. */ ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func); - if (ret != 3) - return -EINVAL; + if (ret != 3) { + ret = -EINVAL; + goto err; + } + domain = bus >> 8; bus &= 0xff; if ((domain != drm_get_pci_domain(dev)) || (bus != dev->pdev->bus->number) || (slot != PCI_SLOT(dev->pdev->devfn)) || - (func != PCI_FUNC(dev->pdev->devfn))) - return -EINVAL; + (func != PCI_FUNC(dev->pdev->devfn))) { + ret = -EINVAL; + goto err; + } return 0; + +err: + drm_unset_busid(dev, master); + return ret; } static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; - int len; + int len, ret; if (master->unique != NULL) - return -EBUSY; + drm_unset_busid(dev, master); - master->unique_len = 40; - master->unique_size = master->unique_len; - master->unique = kmalloc(master->unique_size, GFP_KERNEL); - if (master->unique == NULL) - return -ENOMEM; - - len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d", - drm_get_pci_domain(dev), - dev->pdev->bus->number, - PCI_SLOT(dev->pdev->devfn), - PCI_FUNC(dev->pdev->devfn)); - if (len >= master->unique_len) - DRM_ERROR("buffer overflow"); - else - master->unique_len = len; + if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) { + master->unique_len = 10 + strlen(dev->platformdev->name); + master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL); - dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) + - master->unique_len + 2, GFP_KERNEL); - if (dev->devname == NULL) - return -ENOMEM; + if (master->unique == NULL) + return -ENOMEM; - sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, - master->unique); + len = snprintf(master->unique, master->unique_len, + "platform:%s", dev->platformdev->name); + + if (len > master->unique_len) { + DRM_ERROR("Unique buffer overflowed\n"); + ret = -EINVAL; + goto err; + } + + dev->devname = + kmalloc(strlen(dev->platformdev->name) + + master->unique_len + 2, GFP_KERNEL); + + if (dev->devname == NULL) { + ret = -ENOMEM; + goto err; + } + + sprintf(dev->devname, "%s@%s", dev->platformdev->name, + master->unique); + + } else { + master->unique_len = 40; + master->unique_size = master->unique_len; + master->unique = kmalloc(master->unique_size, GFP_KERNEL); + if (master->unique == NULL) + return -ENOMEM; + + len = snprintf(master->unique, master->unique_len, + "pci:%04x:%02x:%02x.%d", + drm_get_pci_domain(dev), + dev->pdev->bus->number, + PCI_SLOT(dev->pdev->devfn), + PCI_FUNC(dev->pdev->devfn)); + if (len >= master->unique_len) { + DRM_ERROR("buffer overflow"); + ret = -EINVAL; + goto err; + } else + master->unique_len = len; + + dev->devname = + kmalloc(strlen(dev->driver->pci_driver.name) + + master->unique_len + 2, GFP_KERNEL); + + if (dev->devname == NULL) { + ret = -ENOMEM; + goto err; + } + + sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, + master->unique); + } return 0; + +err: + drm_unset_busid(dev, master); + return ret; } /** @@ -323,7 +393,9 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri /* * Version 1.1 includes tying of DRM to specific device */ - drm_set_busid(dev, file_priv); + retcode = drm_set_busid(dev, file_priv); + if (retcode) + goto done; } } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index a263b7070fc..9d3a5030b6e 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -34,6 +34,7 @@ */ #include "drmP.h" +#include "drm_trace.h" #include <linux/interrupt.h> /* For task queue support */ #include <linux/slab.h> @@ -57,6 +58,9 @@ int drm_irq_by_busid(struct drm_device *dev, void *data, { struct drm_irq_busid *p = data; + if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) + return -EINVAL; + if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; @@ -211,7 +215,7 @@ int drm_irq_install(struct drm_device *dev) if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; - if (dev->pdev->irq == 0) + if (drm_dev_to_irq(dev) == 0) return -EINVAL; mutex_lock(&dev->struct_mutex); @@ -229,7 +233,7 @@ int drm_irq_install(struct drm_device *dev) dev->irq_enabled = 1; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("irq=%d\n", dev->pdev->irq); + DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); /* Before installing handler */ dev->driver->irq_preinstall(dev); @@ -302,14 +306,14 @@ int drm_irq_uninstall(struct drm_device * dev) if (!irq_enabled) return -EINVAL; - DRM_DEBUG("irq=%d\n", dev->pdev->irq); + DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev)); if (!drm_core_check_feature(dev, DRIVER_MODESET)) vga_client_register(dev->pdev, NULL, NULL, NULL); dev->driver->irq_uninstall(dev); - free_irq(dev->pdev->irq, dev); + free_irq(drm_dev_to_irq(dev), dev); return 0; } @@ -341,7 +345,7 @@ int drm_control(struct drm_device *dev, void *data, if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl->irq != dev->pdev->irq) + ctl->irq != drm_dev_to_irq(dev)) return -EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -587,6 +591,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, return -ENOMEM; e->pipe = pipe; + e->base.pid = current->pid; e->event.base.type = DRM_EVENT_VBLANK; e->event.base.length = sizeof e->event; e->event.user_data = vblwait->request.signal; @@ -614,6 +619,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n", vblwait->request.sequence, seq, pipe); + trace_drm_vblank_event_queued(current->pid, pipe, + vblwait->request.sequence); + e->event.sequence = vblwait->request.sequence; if ((seq - vblwait->request.sequence) <= (1 << 23)) { e->event.tv_sec = now.tv_sec; @@ -621,6 +629,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe, drm_vblank_put(dev, e->pipe); list_add_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(current->pid, pipe, + vblwait->request.sequence); } else { list_add_tail(&e->base.link, &dev->vblank_event_list); } @@ -651,7 +661,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, int ret = 0; unsigned int flags, seq, crtc; - if ((!dev->pdev->irq) || (!dev->irq_enabled)) + if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) return -EINVAL; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) @@ -751,9 +761,13 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc) drm_vblank_put(dev, e->pipe); list_move_tail(&e->base.link, &e->base.file_priv->event_list); wake_up_interruptible(&e->base.file_priv->event_wait); + trace_drm_vblank_event_delivered(e->base.pid, e->pipe, + e->event.sequence); } spin_unlock_irqrestore(&dev->event_lock, flags); + + trace_drm_vblank_event(crtc, seq); } /** diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 2ac074c8f5d..da99edc5088 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -48,44 +48,14 @@ #define MM_UNUSED_TARGET 4 -unsigned long drm_mm_tail_space(struct drm_mm *mm) -{ - struct list_head *tail_node; - struct drm_mm_node *entry; - - tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); - if (!entry->free) - return 0; - - return entry->size; -} - -int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) -{ - struct list_head *tail_node; - struct drm_mm_node *entry; - - tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); - if (!entry->free) - return -ENOMEM; - - if (entry->size <= size) - return -ENOMEM; - - entry->size -= size; - return 0; -} - static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) { struct drm_mm_node *child; if (atomic) - child = kmalloc(sizeof(*child), GFP_ATOMIC); + child = kzalloc(sizeof(*child), GFP_ATOMIC); else - child = kmalloc(sizeof(*child), GFP_KERNEL); + child = kzalloc(sizeof(*child), GFP_KERNEL); if (unlikely(child == NULL)) { spin_lock(&mm->unused_lock); @@ -94,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic) else { child = list_entry(mm->unused_nodes.next, - struct drm_mm_node, fl_entry); - list_del(&child->fl_entry); + struct drm_mm_node, free_stack); + list_del(&child->free_stack); --mm->num_unused; } spin_unlock(&mm->unused_lock); @@ -115,7 +85,7 @@ int drm_mm_pre_get(struct drm_mm *mm) spin_lock(&mm->unused_lock); while (mm->num_unused < MM_UNUSED_TARGET) { spin_unlock(&mm->unused_lock); - node = kmalloc(sizeof(*node), GFP_KERNEL); + node = kzalloc(sizeof(*node), GFP_KERNEL); spin_lock(&mm->unused_lock); if (unlikely(node == NULL)) { @@ -124,7 +94,7 @@ int drm_mm_pre_get(struct drm_mm *mm) return ret; } ++mm->num_unused; - list_add_tail(&node->fl_entry, &mm->unused_nodes); + list_add_tail(&node->free_stack, &mm->unused_nodes); } spin_unlock(&mm->unused_lock); return 0; @@ -146,27 +116,12 @@ static int drm_mm_create_tail_node(struct drm_mm *mm, child->start = start; child->mm = mm; - list_add_tail(&child->ml_entry, &mm->ml_entry); - list_add_tail(&child->fl_entry, &mm->fl_entry); + list_add_tail(&child->node_list, &mm->node_list); + list_add_tail(&child->free_stack, &mm->free_stack); return 0; } -int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic) -{ - struct list_head *tail_node; - struct drm_mm_node *entry; - - tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_mm_node, ml_entry); - if (!entry->free) { - return drm_mm_create_tail_node(mm, entry->start + entry->size, - size, atomic); - } - entry->size += size; - return 0; -} - static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, unsigned long size, int atomic) @@ -177,15 +132,14 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, if (unlikely(child == NULL)) return NULL; - INIT_LIST_HEAD(&child->fl_entry); + INIT_LIST_HEAD(&child->free_stack); - child->free = 0; child->size = size; child->start = parent->start; child->mm = parent->mm; - list_add_tail(&child->ml_entry, &parent->ml_entry); - INIT_LIST_HEAD(&child->fl_entry); + list_add_tail(&child->node_list, &parent->node_list); + INIT_LIST_HEAD(&child->free_stack); parent->size -= size; parent->start += size; @@ -213,7 +167,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node, } if (node->size == size) { - list_del_init(&node->fl_entry); + list_del_init(&node->free_stack); node->free = 0; } else { node = drm_mm_split_at_start(node, size, atomic); @@ -251,7 +205,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node, } if (node->size == size) { - list_del_init(&node->fl_entry); + list_del_init(&node->free_stack); node->free = 0; } else { node = drm_mm_split_at_start(node, size, atomic); @@ -273,16 +227,19 @@ void drm_mm_put_block(struct drm_mm_node *cur) { struct drm_mm *mm = cur->mm; - struct list_head *cur_head = &cur->ml_entry; - struct list_head *root_head = &mm->ml_entry; + struct list_head *cur_head = &cur->node_list; + struct list_head *root_head = &mm->node_list; struct drm_mm_node *prev_node = NULL; struct drm_mm_node *next_node; int merged = 0; + BUG_ON(cur->scanned_block || cur->scanned_prev_free + || cur->scanned_next_free); + if (cur_head->prev != root_head) { prev_node = - list_entry(cur_head->prev, struct drm_mm_node, ml_entry); + list_entry(cur_head->prev, struct drm_mm_node, node_list); if (prev_node->free) { prev_node->size += cur->size; merged = 1; @@ -290,15 +247,15 @@ void drm_mm_put_block(struct drm_mm_node *cur) } if (cur_head->next != root_head) { next_node = - list_entry(cur_head->next, struct drm_mm_node, ml_entry); + list_entry(cur_head->next, struct drm_mm_node, node_list); if (next_node->free) { if (merged) { prev_node->size += next_node->size; - list_del(&next_node->ml_entry); - list_del(&next_node->fl_entry); + list_del(&next_node->node_list); + list_del(&next_node->free_stack); spin_lock(&mm->unused_lock); if (mm->num_unused < MM_UNUSED_TARGET) { - list_add(&next_node->fl_entry, + list_add(&next_node->free_stack, &mm->unused_nodes); ++mm->num_unused; } else @@ -313,12 +270,12 @@ void drm_mm_put_block(struct drm_mm_node *cur) } if (!merged) { cur->free = 1; - list_add(&cur->fl_entry, &mm->fl_entry); + list_add(&cur->free_stack, &mm->free_stack); } else { - list_del(&cur->ml_entry); + list_del(&cur->node_list); spin_lock(&mm->unused_lock); if (mm->num_unused < MM_UNUSED_TARGET) { - list_add(&cur->fl_entry, &mm->unused_nodes); + list_add(&cur->free_stack, &mm->unused_nodes); ++mm->num_unused; } else kfree(cur); @@ -328,40 +285,50 @@ void drm_mm_put_block(struct drm_mm_node *cur) EXPORT_SYMBOL(drm_mm_put_block); +static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size, + unsigned alignment) +{ + unsigned wasted = 0; + + if (entry->size < size) + return 0; + + if (alignment) { + register unsigned tmp = entry->start % alignment; + if (tmp) + wasted = alignment - tmp; + } + + if (entry->size >= size + wasted) { + return 1; + } + + return 0; +} + struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match) { - struct list_head *list; - const struct list_head *free_stack = &mm->fl_entry; struct drm_mm_node *entry; struct drm_mm_node *best; unsigned long best_size; - unsigned wasted; + + BUG_ON(mm->scanned_blocks); best = NULL; best_size = ~0UL; - list_for_each(list, free_stack) { - entry = list_entry(list, struct drm_mm_node, fl_entry); - wasted = 0; - - if (entry->size < size) + list_for_each_entry(entry, &mm->free_stack, free_stack) { + if (!check_free_mm_node(entry, size, alignment)) continue; - if (alignment) { - register unsigned tmp = entry->start % alignment; - if (tmp) - wasted += alignment - tmp; - } + if (!best_match) + return entry; - if (entry->size >= size + wasted) { - if (!best_match) - return entry; - if (entry->size < best_size) { - best = entry; - best_size = entry->size; - } + if (entry->size < best_size) { + best = entry; + best_size = entry->size; } } @@ -376,43 +343,28 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, unsigned long end, int best_match) { - struct list_head *list; - const struct list_head *free_stack = &mm->fl_entry; struct drm_mm_node *entry; struct drm_mm_node *best; unsigned long best_size; - unsigned wasted; + + BUG_ON(mm->scanned_blocks); best = NULL; best_size = ~0UL; - list_for_each(list, free_stack) { - entry = list_entry(list, struct drm_mm_node, fl_entry); - wasted = 0; - - if (entry->size < size) - continue; - + list_for_each_entry(entry, &mm->free_stack, free_stack) { if (entry->start > end || (entry->start+entry->size) < start) continue; - if (entry->start < start) - wasted += start - entry->start; + if (!check_free_mm_node(entry, size, alignment)) + continue; - if (alignment) { - register unsigned tmp = (entry->start + wasted) % alignment; - if (tmp) - wasted += alignment - tmp; - } + if (!best_match) + return entry; - if (entry->size >= size + wasted && - (entry->start + wasted + size) <= end) { - if (!best_match) - return entry; - if (entry->size < best_size) { - best = entry; - best_size = entry->size; - } + if (entry->size < best_size) { + best = entry; + best_size = entry->size; } } @@ -420,9 +372,161 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, } EXPORT_SYMBOL(drm_mm_search_free_in_range); +/** + * Initializa lru scanning. + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, + unsigned alignment) +{ + mm->scan_alignment = alignment; + mm->scan_size = size; + mm->scanned_blocks = 0; + mm->scan_hit_start = 0; + mm->scan_hit_size = 0; +} +EXPORT_SYMBOL(drm_mm_init_scan); + +/** + * Add a node to the scan list that might be freed to make space for the desired + * hole. + * + * Returns non-zero, if a hole has been found, zero otherwise. + */ +int drm_mm_scan_add_block(struct drm_mm_node *node) +{ + struct drm_mm *mm = node->mm; + struct list_head *prev_free, *next_free; + struct drm_mm_node *prev_node, *next_node; + + mm->scanned_blocks++; + + prev_free = next_free = NULL; + + BUG_ON(node->free); + node->scanned_block = 1; + node->free = 1; + + if (node->node_list.prev != &mm->node_list) { + prev_node = list_entry(node->node_list.prev, struct drm_mm_node, + node_list); + + if (prev_node->free) { + list_del(&prev_node->node_list); + + node->start = prev_node->start; + node->size += prev_node->size; + + prev_node->scanned_prev_free = 1; + + prev_free = &prev_node->free_stack; + } + } + + if (node->node_list.next != &mm->node_list) { + next_node = list_entry(node->node_list.next, struct drm_mm_node, + node_list); + + if (next_node->free) { + list_del(&next_node->node_list); + + node->size += next_node->size; + + next_node->scanned_next_free = 1; + + next_free = &next_node->free_stack; + } + } + + /* The free_stack list is not used for allocated objects, so these two + * pointers can be abused (as long as no allocations in this memory + * manager happens). */ + node->free_stack.prev = prev_free; + node->free_stack.next = next_free; + + if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) { + mm->scan_hit_start = node->start; + mm->scan_hit_size = node->size; + + return 1; + } + + return 0; +} +EXPORT_SYMBOL(drm_mm_scan_add_block); + +/** + * Remove a node from the scan list. + * + * Nodes _must_ be removed in the exact same order from the scan list as they + * have been added, otherwise the internal state of the memory manager will be + * corrupted. + * + * When the scan list is empty, the selected memory nodes can be freed. An + * immediatly following drm_mm_search_free with best_match = 0 will then return + * the just freed block (because its at the top of the free_stack list). + * + * Returns one if this block should be evicted, zero otherwise. Will always + * return zero when no hole has been found. + */ +int drm_mm_scan_remove_block(struct drm_mm_node *node) +{ + struct drm_mm *mm = node->mm; + struct drm_mm_node *prev_node, *next_node; + + mm->scanned_blocks--; + + BUG_ON(!node->scanned_block); + node->scanned_block = 0; + node->free = 0; + + prev_node = list_entry(node->free_stack.prev, struct drm_mm_node, + free_stack); + next_node = list_entry(node->free_stack.next, struct drm_mm_node, + free_stack); + + if (prev_node) { + BUG_ON(!prev_node->scanned_prev_free); + prev_node->scanned_prev_free = 0; + + list_add_tail(&prev_node->node_list, &node->node_list); + + node->start = prev_node->start + prev_node->size; + node->size -= prev_node->size; + } + + if (next_node) { + BUG_ON(!next_node->scanned_next_free); + next_node->scanned_next_free = 0; + + list_add(&next_node->node_list, &node->node_list); + + node->size -= next_node->size; + } + + INIT_LIST_HEAD(&node->free_stack); + + /* Only need to check for containement because start&size for the + * complete resulting free block (not just the desired part) is + * stored. */ + if (node->start >= mm->scan_hit_start && + node->start + node->size + <= mm->scan_hit_start + mm->scan_hit_size) { + return 1; + } + + return 0; +} +EXPORT_SYMBOL(drm_mm_scan_remove_block); + int drm_mm_clean(struct drm_mm * mm) { - struct list_head *head = &mm->ml_entry; + struct list_head *head = &mm->node_list; return (head->next->next == head); } @@ -430,10 +534,11 @@ EXPORT_SYMBOL(drm_mm_clean); int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { - INIT_LIST_HEAD(&mm->ml_entry); - INIT_LIST_HEAD(&mm->fl_entry); + INIT_LIST_HEAD(&mm->node_list); + INIT_LIST_HEAD(&mm->free_stack); INIT_LIST_HEAD(&mm->unused_nodes); mm->num_unused = 0; + mm->scanned_blocks = 0; spin_lock_init(&mm->unused_lock); return drm_mm_create_tail_node(mm, start, size, 0); @@ -442,25 +547,25 @@ EXPORT_SYMBOL(drm_mm_init); void drm_mm_takedown(struct drm_mm * mm) { - struct list_head *bnode = mm->fl_entry.next; + struct list_head *bnode = mm->free_stack.next; struct drm_mm_node *entry; struct drm_mm_node *next; - entry = list_entry(bnode, struct drm_mm_node, fl_entry); + entry = list_entry(bnode, struct drm_mm_node, free_stack); - if (entry->ml_entry.next != &mm->ml_entry || - entry->fl_entry.next != &mm->fl_entry) { + if (entry->node_list.next != &mm->node_list || + entry->free_stack.next != &mm->free_stack) { DRM_ERROR("Memory manager not clean. Delaying takedown\n"); return; } - list_del(&entry->fl_entry); - list_del(&entry->ml_entry); + list_del(&entry->free_stack); + list_del(&entry->node_list); kfree(entry); spin_lock(&mm->unused_lock); - list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) { - list_del(&entry->fl_entry); + list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) { + list_del(&entry->free_stack); kfree(entry); --mm->num_unused; } @@ -475,7 +580,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) struct drm_mm_node *entry; int total_used = 0, total_free = 0, total = 0; - list_for_each_entry(entry, &mm->ml_entry, ml_entry) { + list_for_each_entry(entry, &mm->node_list, node_list) { printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n", prefix, entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); @@ -496,7 +601,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) struct drm_mm_node *entry; int total_used = 0, total_free = 0, total = 0; - list_for_each_entry(entry, &mm->ml_entry, ml_entry) { + list_for_each_entry(entry, &mm->node_list, node_list) { seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used"); total += entry->size; if (entry->free) diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 2ea9ad4a8d6..e20f78b542a 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -124,4 +124,147 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) EXPORT_SYMBOL(drm_pci_free); +#ifdef CONFIG_PCI +/** + * Register. + * + * \param pdev - PCI device structure + * \param ent entry from the PCI ID table with device type flags + * \return zero on success or a negative number on failure. + * + * Attempt to gets inter module "drm" information. If we are first + * then register the character device and inter module information. + * Try and register, if we fail to register, backout previous work. + */ +int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, + struct drm_driver *driver) +{ + struct drm_device *dev; + int ret; + + DRM_DEBUG("\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + ret = pci_enable_device(pdev); + if (ret) + goto err_g1; + + pci_set_master(pdev); + + dev->pdev = pdev; + dev->dev = &pdev->dev; + + dev->pci_device = pdev->device; + dev->pci_vendor = pdev->vendor; + +#ifdef __alpha__ + dev->hose = pdev->sysdata; +#endif + + if ((ret = drm_fill_in_dev(dev, ent, driver))) { + printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); + goto err_g2; + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + pci_set_drvdata(pdev, dev); + ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); + if (ret) + goto err_g2; + } + + if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) + goto err_g3; + + if (dev->driver->load) { + ret = dev->driver->load(dev, ent->driver_data); + if (ret) + goto err_g4; + } + + /* setup the grouping for the legacy output */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = drm_mode_group_init_legacy_group(dev, + &dev->primary->mode_group); + if (ret) + goto err_g4; + } + + list_add_tail(&dev->driver_item, &driver->device_list); + + DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", + driver->name, driver->major, driver->minor, driver->patchlevel, + driver->date, pci_name(pdev), dev->primary->index); + + return 0; + +err_g4: + drm_put_minor(&dev->primary); +err_g3: + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_put_minor(&dev->control); +err_g2: + pci_disable_device(pdev); +err_g1: + kfree(dev); + return ret; +} +EXPORT_SYMBOL(drm_get_pci_dev); + +/** + * PCI device initialization. Called via drm_init at module load time, + * + * \return zero on success or a negative number on failure. + * + * Initializes a drm_device structures,registering the + * stubs and initializing the AGP device. + * + * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and + * after the initialization for driver customization. + */ +int drm_pci_init(struct drm_driver *driver) +{ + struct pci_dev *pdev = NULL; + const struct pci_device_id *pid; + int i; + + if (driver->driver_features & DRIVER_MODESET) + return pci_register_driver(&driver->pci_driver); + + /* If not using KMS, fall back to stealth mode manual scanning. */ + for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) { + pid = &driver->pci_driver.id_table[i]; + + /* Loop around setting up a DRM device for each PCI device + * matching our ID and device class. If we had the internal + * function that pci_get_subsys and pci_get_class used, we'd + * be able to just pass pid in instead of doing a two-stage + * thing. + */ + pdev = NULL; + while ((pdev = + pci_get_subsys(pid->vendor, pid->device, pid->subvendor, + pid->subdevice, pdev)) != NULL) { + if ((pdev->class & pid->class_mask) != pid->class) + continue; + + /* stealth mode requires a manual probe */ + pci_dev_get(pdev); + drm_get_pci_dev(pdev, pid, driver); + } + } + return 0; +} + +#else + +int drm_pci_init(struct drm_driver *driver) +{ + return -1; +} + +#endif /*@}*/ diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c new file mode 100644 index 00000000000..460e9a3afa8 --- /dev/null +++ b/drivers/gpu/drm/drm_platform.c @@ -0,0 +1,122 @@ +/* + * Derived from drm_pci.c + * + * Copyright 2003 José Fonseca. + * Copyright 2003 Leif Delgass. + * Copyright (c) 2009, Code Aurora Forum. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" + +/** + * Register. + * + * \param platdev - Platform device struture + * \return zero on success or a negative number on failure. + * + * Attempt to gets inter module "drm" information. If we are first + * then register the character device and inter module information. + * Try and register, if we fail to register, backout previous work. + */ + +int drm_get_platform_dev(struct platform_device *platdev, + struct drm_driver *driver) +{ + struct drm_device *dev; + int ret; + + DRM_DEBUG("\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + + dev->platformdev = platdev; + dev->dev = &platdev->dev; + + ret = drm_fill_in_dev(dev, NULL, driver); + + if (ret) { + printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); + goto err_g1; + } + + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + dev_set_drvdata(&platdev->dev, dev); + ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); + if (ret) + goto err_g1; + } + + ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY); + if (ret) + goto err_g2; + + if (dev->driver->load) { + ret = dev->driver->load(dev, 0); + if (ret) + goto err_g3; + } + + /* setup the grouping for the legacy output */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = drm_mode_group_init_legacy_group(dev, + &dev->primary->mode_group); + if (ret) + goto err_g3; + } + + list_add_tail(&dev->driver_item, &driver->device_list); + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + driver->name, driver->major, driver->minor, driver->patchlevel, + driver->date, dev->primary->index); + + return 0; + +err_g3: + drm_put_minor(&dev->primary); +err_g2: + if (drm_core_check_feature(dev, DRIVER_MODESET)) + drm_put_minor(&dev->control); +err_g1: + kfree(dev); + return ret; +} +EXPORT_SYMBOL(drm_get_platform_dev); + +/** + * Platform device initialization. Called via drm_init at module load time, + * + * \return zero on success or a negative number on failure. + * + * Initializes a drm_device structures,registering the + * stubs + * + * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and + * after the initialization for driver customization. + */ + +int drm_platform_init(struct drm_driver *driver) +{ + return drm_get_platform_dev(driver->platform_device, driver); +} diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c index a0c365f2e52..d1ad57450df 100644 --- a/drivers/gpu/drm/drm_stub.c +++ b/drivers/gpu/drm/drm_stub.c @@ -156,6 +156,9 @@ static void drm_master_destroy(struct kref *kref) master->unique_len = 0; } + kfree(dev->devname); + dev->devname = NULL; + list_for_each_entry_safe(pt, next, &master->magicfree, head) { list_del(&pt->head); drm_ht_remove_item(&master->magiclist, &pt->hash_item); @@ -224,7 +227,7 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data, return 0; } -static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, +int drm_fill_in_dev(struct drm_device *dev, const struct pci_device_id *ent, struct drm_driver *driver) { @@ -245,14 +248,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, idr_init(&dev->drw_idr); - dev->pdev = pdev; - dev->pci_device = pdev->device; - dev->pci_vendor = pdev->vendor; - -#ifdef __alpha__ - dev->hose = pdev->sysdata; -#endif - if (drm_ht_create(&dev->map_hash, 12)) { return -ENOMEM; } @@ -321,7 +316,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, * create the proc init entry via proc_init(). This routines assigns * minor numbers to secondary heads of multi-headed cards */ -static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) +int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type) { struct drm_minor *new_minor; int ret; @@ -388,83 +383,6 @@ err_idr: } /** - * Register. - * - * \param pdev - PCI device structure - * \param ent entry from the PCI ID table with device type flags - * \return zero on success or a negative number on failure. - * - * Attempt to gets inter module "drm" information. If we are first - * then register the character device and inter module information. - * Try and register, if we fail to register, backout previous work. - */ -int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, - struct drm_driver *driver) -{ - struct drm_device *dev; - int ret; - - DRM_DEBUG("\n"); - - dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - ret = pci_enable_device(pdev); - if (ret) - goto err_g1; - - pci_set_master(pdev); - if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { - printk(KERN_ERR "DRM: Fill_in_dev failed.\n"); - goto err_g2; - } - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - pci_set_drvdata(pdev, dev); - ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL); - if (ret) - goto err_g2; - } - - if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY))) - goto err_g3; - - if (dev->driver->load) { - ret = dev->driver->load(dev, ent->driver_data); - if (ret) - goto err_g4; - } - - /* setup the grouping for the legacy output */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group); - if (ret) - goto err_g4; - } - - list_add_tail(&dev->driver_item, &driver->device_list); - - DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", - driver->name, driver->major, driver->minor, driver->patchlevel, - driver->date, pci_name(pdev), dev->primary->index); - - return 0; - -err_g4: - drm_put_minor(&dev->primary); -err_g3: - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_put_minor(&dev->control); -err_g2: - pci_disable_device(pdev); -err_g1: - kfree(dev); - return ret; -} -EXPORT_SYMBOL(drm_get_dev); - -/** * Put a secondary minor number. * * \param sec_minor - structure to be released diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 101d381e9d8..86118a74223 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -489,7 +489,8 @@ int drm_sysfs_device_add(struct drm_minor *minor) int err; char *minor_str; - minor->kdev.parent = &minor->dev->pdev->dev; + minor->kdev.parent = minor->dev->dev; + minor->kdev.class = drm_class; minor->kdev.release = drm_sysfs_device_release; minor->kdev.devt = minor->device; diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h new file mode 100644 index 00000000000..03ea964aa60 --- /dev/null +++ b/drivers/gpu/drm/drm_trace.h @@ -0,0 +1,66 @@ +#if !defined(_DRM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _DRM_TRACE_H_ + +#include <linux/stringify.h> +#include <linux/types.h> +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM drm +#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM) +#define TRACE_INCLUDE_FILE drm_trace + +TRACE_EVENT(drm_vblank_event, + TP_PROTO(int crtc, unsigned int seq), + TP_ARGS(crtc, seq), + TP_STRUCT__entry( + __field(int, crtc) + __field(unsigned int, seq) + ), + TP_fast_assign( + __entry->crtc = crtc; + __entry->seq = seq; + ), + TP_printk("crtc=%d, seq=%d", __entry->crtc, __entry->seq) +); + +TRACE_EVENT(drm_vblank_event_queued, + TP_PROTO(pid_t pid, int crtc, unsigned int seq), + TP_ARGS(pid, crtc, seq), + TP_STRUCT__entry( + __field(pid_t, pid) + __field(int, crtc) + __field(unsigned int, seq) + ), + TP_fast_assign( + __entry->pid = pid; + __entry->crtc = crtc; + __entry->seq = seq; + ), + TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \ + __entry->seq) +); + +TRACE_EVENT(drm_vblank_event_delivered, + TP_PROTO(pid_t pid, int crtc, unsigned int seq), + TP_ARGS(pid, crtc, seq), + TP_STRUCT__entry( + __field(pid_t, pid) + __field(int, crtc) + __field(unsigned int, seq) + ), + TP_fast_assign( + __entry->pid = pid; + __entry->crtc = crtc; + __entry->seq = seq; + ), + TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \ + __entry->seq) +); + +#endif /* _DRM_TRACE_H_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/drm_trace_points.c b/drivers/gpu/drm/drm_trace_points.c new file mode 100644 index 00000000000..0d0eb90864a --- /dev/null +++ b/drivers/gpu/drm/drm_trace_points.c @@ -0,0 +1,4 @@ +#include "drmP.h" + +#define CREATE_TRACE_POINTS +#include "drm_trace.h" diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index c3b13fb41d0..3778360ecee 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -61,7 +61,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) tmp = pgprot_writecombine(tmp); else tmp = pgprot_noncached(tmp); -#elif defined(__sparc__) +#elif defined(__sparc__) || defined(__arm__) tmp = pgprot_noncached(tmp); #endif return tmp; @@ -601,6 +601,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) } switch (map->type) { +#if !defined(__arm__) case _DRM_AGP: if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { /* @@ -615,20 +616,31 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) break; } /* fall through to _DRM_FRAME_BUFFER... */ +#endif case _DRM_FRAME_BUFFER: case _DRM_REGISTERS: offset = dev->driver->get_reg_ofs(dev); vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); +#if !defined(__arm__) if (io_remap_pfn_range(vma, vma->vm_start, (map->offset + offset) >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) return -EAGAIN; +#else + if (remap_pfn_range(vma, vma->vm_start, + (map->offset + offset) >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) + return -EAGAIN; +#endif + DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," " offset = 0x%llx\n", map->type, vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); + vma->vm_ops = &drm_vm_ops; break; case _DRM_CONSISTENT: diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile index 6d2abaf35ba..92862563e7e 100644 --- a/drivers/gpu/drm/i2c/Makefile +++ b/drivers/gpu/drm/i2c/Makefile @@ -2,3 +2,6 @@ ccflags-y := -Iinclude/drm ch7006-y := ch7006_drv.o ch7006_mode.o obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o + +sil164-y := sil164_drv.o +obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 81681a07a80..833b35f44a7 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -33,7 +33,7 @@ static void ch7006_encoder_set_config(struct drm_encoder *encoder, { struct ch7006_priv *priv = to_ch7006_priv(encoder); - priv->params = params; + priv->params = *(struct ch7006_encoder_params *)params; } static void ch7006_encoder_destroy(struct drm_encoder *encoder) @@ -114,7 +114,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder, { struct i2c_client *client = drm_i2c_encoder_get_client(encoder); struct ch7006_priv *priv = to_ch7006_priv(encoder); - struct ch7006_encoder_params *params = priv->params; + struct ch7006_encoder_params *params = &priv->params; struct ch7006_state *state = &priv->state; uint8_t *regs = state->regs; struct ch7006_mode *mode = priv->mode; @@ -428,6 +428,22 @@ static int ch7006_remove(struct i2c_client *client) return 0; } +static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg) +{ + ch7006_dbg(client, "\n"); + + return 0; +} + +static int ch7006_resume(struct i2c_client *client) +{ + ch7006_dbg(client, "\n"); + + ch7006_write(client, 0x3d, 0x0); + + return 0; +} + static int ch7006_encoder_init(struct i2c_client *client, struct drm_device *dev, struct drm_encoder_slave *encoder) @@ -487,6 +503,8 @@ static struct drm_i2c_encoder_driver ch7006_driver = { .i2c_driver = { .probe = ch7006_probe, .remove = ch7006_remove, + .suspend = ch7006_suspend, + .resume = ch7006_resume, .driver = { .name = "ch7006", diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h index b06d3d93d8a..1c6d2e3bd96 100644 --- a/drivers/gpu/drm/i2c/ch7006_priv.h +++ b/drivers/gpu/drm/i2c/ch7006_priv.h @@ -77,7 +77,7 @@ struct ch7006_state { }; struct ch7006_priv { - struct ch7006_encoder_params *params; + struct ch7006_encoder_params params; struct ch7006_mode *mode; struct ch7006_state state; diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c new file mode 100644 index 00000000000..0b6773290c0 --- /dev/null +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -0,0 +1,462 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm_crtc_helper.h" +#include "drm_encoder_slave.h" +#include "i2c/sil164.h" + +struct sil164_priv { + struct sil164_encoder_params config; + struct i2c_client *duallink_slave; + + uint8_t saved_state[0x10]; + uint8_t saved_slave_state[0x10]; +}; + +#define to_sil164_priv(x) \ + ((struct sil164_priv *)to_encoder_slave(x)->slave_priv) + +#define sil164_dbg(client, format, ...) do { \ + if (drm_debug & DRM_UT_KMS) \ + dev_printk(KERN_DEBUG, &client->dev, \ + "%s: " format, __func__, ## __VA_ARGS__); \ + } while (0) +#define sil164_info(client, format, ...) \ + dev_info(&client->dev, format, __VA_ARGS__) +#define sil164_err(client, format, ...) \ + dev_err(&client->dev, format, __VA_ARGS__) + +#define SIL164_I2C_ADDR_MASTER 0x38 +#define SIL164_I2C_ADDR_SLAVE 0x39 + +/* HW register definitions */ + +#define SIL164_VENDOR_LO 0x0 +#define SIL164_VENDOR_HI 0x1 +#define SIL164_DEVICE_LO 0x2 +#define SIL164_DEVICE_HI 0x3 +#define SIL164_REVISION 0x4 +#define SIL164_FREQ_MIN 0x6 +#define SIL164_FREQ_MAX 0x7 +#define SIL164_CONTROL0 0x8 +# define SIL164_CONTROL0_POWER_ON 0x01 +# define SIL164_CONTROL0_EDGE_RISING 0x02 +# define SIL164_CONTROL0_INPUT_24BIT 0x04 +# define SIL164_CONTROL0_DUAL_EDGE 0x08 +# define SIL164_CONTROL0_HSYNC_ON 0x10 +# define SIL164_CONTROL0_VSYNC_ON 0x20 +#define SIL164_DETECT 0x9 +# define SIL164_DETECT_INTR_STAT 0x01 +# define SIL164_DETECT_HOTPLUG_STAT 0x02 +# define SIL164_DETECT_RECEIVER_STAT 0x04 +# define SIL164_DETECT_INTR_MODE_RECEIVER 0x00 +# define SIL164_DETECT_INTR_MODE_HOTPLUG 0x08 +# define SIL164_DETECT_OUT_MODE_HIGH 0x00 +# define SIL164_DETECT_OUT_MODE_INTR 0x10 +# define SIL164_DETECT_OUT_MODE_RECEIVER 0x20 +# define SIL164_DETECT_OUT_MODE_HOTPLUG 0x30 +# define SIL164_DETECT_VSWING_STAT 0x80 +#define SIL164_CONTROL1 0xa +# define SIL164_CONTROL1_DESKEW_ENABLE 0x10 +# define SIL164_CONTROL1_DESKEW_INCR_SHIFT 5 +#define SIL164_GPIO 0xb +#define SIL164_CONTROL2 0xc +# define SIL164_CONTROL2_FILTER_ENABLE 0x01 +# define SIL164_CONTROL2_FILTER_SETTING_SHIFT 1 +# define SIL164_CONTROL2_DUALLINK_MASTER 0x40 +# define SIL164_CONTROL2_SYNC_CONT 0x80 +#define SIL164_DUALLINK 0xd +# define SIL164_DUALLINK_ENABLE 0x10 +# define SIL164_DUALLINK_SKEW_SHIFT 5 +#define SIL164_PLLZONE 0xe +# define SIL164_PLLZONE_STAT 0x08 +# define SIL164_PLLZONE_FORCE_ON 0x10 +# define SIL164_PLLZONE_FORCE_HIGH 0x20 + +/* HW access functions */ + +static void +sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val) +{ + uint8_t buf[] = {addr, val}; + int ret; + + ret = i2c_master_send(client, buf, ARRAY_SIZE(buf)); + if (ret < 0) + sil164_err(client, "Error %d writing to subaddress 0x%x\n", + ret, addr); +} + +static uint8_t +sil164_read(struct i2c_client *client, uint8_t addr) +{ + uint8_t val; + int ret; + + ret = i2c_master_send(client, &addr, sizeof(addr)); + if (ret < 0) + goto fail; + + ret = i2c_master_recv(client, &val, sizeof(val)); + if (ret < 0) + goto fail; + + return val; + +fail: + sil164_err(client, "Error %d reading from subaddress 0x%x\n", + ret, addr); + return 0; +} + +static void +sil164_save_state(struct i2c_client *client, uint8_t *state) +{ + int i; + + for (i = 0x8; i <= 0xe; i++) + state[i] = sil164_read(client, i); +} + +static void +sil164_restore_state(struct i2c_client *client, uint8_t *state) +{ + int i; + + for (i = 0x8; i <= 0xe; i++) + sil164_write(client, i, state[i]); +} + +static void +sil164_set_power_state(struct i2c_client *client, bool on) +{ + uint8_t control0 = sil164_read(client, SIL164_CONTROL0); + + if (on) + control0 |= SIL164_CONTROL0_POWER_ON; + else + control0 &= ~SIL164_CONTROL0_POWER_ON; + + sil164_write(client, SIL164_CONTROL0, control0); +} + +static void +sil164_init_state(struct i2c_client *client, + struct sil164_encoder_params *config, + bool duallink) +{ + sil164_write(client, SIL164_CONTROL0, + SIL164_CONTROL0_HSYNC_ON | + SIL164_CONTROL0_VSYNC_ON | + (config->input_edge ? SIL164_CONTROL0_EDGE_RISING : 0) | + (config->input_width ? SIL164_CONTROL0_INPUT_24BIT : 0) | + (config->input_dual ? SIL164_CONTROL0_DUAL_EDGE : 0)); + + sil164_write(client, SIL164_DETECT, + SIL164_DETECT_INTR_STAT | + SIL164_DETECT_OUT_MODE_RECEIVER); + + sil164_write(client, SIL164_CONTROL1, + (config->input_skew ? SIL164_CONTROL1_DESKEW_ENABLE : 0) | + (((config->input_skew + 4) & 0x7) + << SIL164_CONTROL1_DESKEW_INCR_SHIFT)); + + sil164_write(client, SIL164_CONTROL2, + SIL164_CONTROL2_SYNC_CONT | + (config->pll_filter ? 0 : SIL164_CONTROL2_FILTER_ENABLE) | + (4 << SIL164_CONTROL2_FILTER_SETTING_SHIFT)); + + sil164_write(client, SIL164_PLLZONE, 0); + + if (duallink) + sil164_write(client, SIL164_DUALLINK, + SIL164_DUALLINK_ENABLE | + (((config->duallink_skew + 4) & 0x7) + << SIL164_DUALLINK_SKEW_SHIFT)); + else + sil164_write(client, SIL164_DUALLINK, 0); +} + +/* DRM encoder functions */ + +static void +sil164_encoder_set_config(struct drm_encoder *encoder, void *params) +{ + struct sil164_priv *priv = to_sil164_priv(encoder); + + priv->config = *(struct sil164_encoder_params *)params; +} + +static void +sil164_encoder_dpms(struct drm_encoder *encoder, int mode) +{ + struct sil164_priv *priv = to_sil164_priv(encoder); + bool on = (mode == DRM_MODE_DPMS_ON); + bool duallink = (on && encoder->crtc->mode.clock > 165000); + + sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on); + + if (priv->duallink_slave) + sil164_set_power_state(priv->duallink_slave, duallink); +} + +static void +sil164_encoder_save(struct drm_encoder *encoder) +{ + struct sil164_priv *priv = to_sil164_priv(encoder); + + sil164_save_state(drm_i2c_encoder_get_client(encoder), + priv->saved_state); + + if (priv->duallink_slave) + sil164_save_state(priv->duallink_slave, + priv->saved_slave_state); +} + +static void +sil164_encoder_restore(struct drm_encoder *encoder) +{ + struct sil164_priv *priv = to_sil164_priv(encoder); + + sil164_restore_state(drm_i2c_encoder_get_client(encoder), + priv->saved_state); + + if (priv->duallink_slave) + sil164_restore_state(priv->duallink_slave, + priv->saved_slave_state); +} + +static bool +sil164_encoder_mode_fixup(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int +sil164_encoder_mode_valid(struct drm_encoder *encoder, + struct drm_display_mode *mode) +{ + struct sil164_priv *priv = to_sil164_priv(encoder); + + if (mode->clock < 32000) + return MODE_CLOCK_LOW; + + if (mode->clock > 330000 || + (mode->clock > 165000 && !priv->duallink_slave)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static void +sil164_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + struct sil164_priv *priv = to_sil164_priv(encoder); + bool duallink = adjusted_mode->clock > 165000; + + sil164_init_state(drm_i2c_encoder_get_client(encoder), + &priv->config, duallink); + + if (priv->duallink_slave) + sil164_init_state(priv->duallink_slave, + &priv->config, duallink); + + sil164_encoder_dpms(encoder, DRM_MODE_DPMS_ON); +} + +static enum drm_connector_status +sil164_encoder_detect(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct i2c_client *client = drm_i2c_encoder_get_client(encoder); + + if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static int +sil164_encoder_get_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + return 0; +} + +static int +sil164_encoder_create_resources(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + return 0; +} + +static int +sil164_encoder_set_property(struct drm_encoder *encoder, + struct drm_connector *connector, + struct drm_property *property, + uint64_t val) +{ + return 0; +} + +static void +sil164_encoder_destroy(struct drm_encoder *encoder) +{ + struct sil164_priv *priv = to_sil164_priv(encoder); + + if (priv->duallink_slave) + i2c_unregister_device(priv->duallink_slave); + + kfree(priv); + drm_i2c_encoder_destroy(encoder); +} + +static struct drm_encoder_slave_funcs sil164_encoder_funcs = { + .set_config = sil164_encoder_set_config, + .destroy = sil164_encoder_destroy, + .dpms = sil164_encoder_dpms, + .save = sil164_encoder_save, + .restore = sil164_encoder_restore, + .mode_fixup = sil164_encoder_mode_fixup, + .mode_valid = sil164_encoder_mode_valid, + .mode_set = sil164_encoder_mode_set, + .detect = sil164_encoder_detect, + .get_modes = sil164_encoder_get_modes, + .create_resources = sil164_encoder_create_resources, + .set_property = sil164_encoder_set_property, +}; + +/* I2C driver functions */ + +static int +sil164_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 | + sil164_read(client, SIL164_VENDOR_LO); + int device = sil164_read(client, SIL164_DEVICE_HI) << 8 | + sil164_read(client, SIL164_DEVICE_LO); + int rev = sil164_read(client, SIL164_REVISION); + + if (vendor != 0x1 || device != 0x6) { + sil164_dbg(client, "Unknown device %x:%x.%x\n", + vendor, device, rev); + return -ENODEV; + } + + sil164_info(client, "Detected device %x:%x.%x\n", + vendor, device, rev); + + return 0; +} + +static int +sil164_remove(struct i2c_client *client) +{ + return 0; +} + +static struct i2c_client * +sil164_detect_slave(struct i2c_client *client) +{ + struct i2c_adapter *adap = client->adapter; + struct i2c_msg msg = { + .addr = SIL164_I2C_ADDR_SLAVE, + .len = 0, + }; + const struct i2c_board_info info = { + I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE) + }; + + if (i2c_transfer(adap, &msg, 1) != 1) { + sil164_dbg(adap, "No dual-link slave found."); + return NULL; + } + + return i2c_new_device(adap, &info); +} + +static int +sil164_encoder_init(struct i2c_client *client, + struct drm_device *dev, + struct drm_encoder_slave *encoder) +{ + struct sil164_priv *priv; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + encoder->slave_priv = priv; + encoder->slave_funcs = &sil164_encoder_funcs; + + priv->duallink_slave = sil164_detect_slave(client); + + return 0; +} + +static struct i2c_device_id sil164_ids[] = { + { "sil164", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, sil164_ids); + +static struct drm_i2c_encoder_driver sil164_driver = { + .i2c_driver = { + .probe = sil164_probe, + .remove = sil164_remove, + .driver = { + .name = "sil164", + }, + .id_table = sil164_ids, + }, + .encoder_init = sil164_encoder_init, +}; + +/* Module initialization */ + +static int __init +sil164_init(void) +{ + return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver); +} + +static void __exit +sil164_exit(void) +{ + drm_i2c_encoder_unregister(&sil164_driver); +} + +MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>"); +MODULE_DESCRIPTION("Silicon Image sil164 TMDS transmitter driver"); +MODULE_LICENSE("GPL and additional rights"); + +module_init(sil164_init); +module_exit(sil164_exit); diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 997d91707ad..0e6c131313d 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -37,6 +37,7 @@ #include <linux/interrupt.h> /* For task queue support */ #include <linux/delay.h> #include <linux/slab.h> +#include <linux/smp_lock.h> #include <linux/pagemap.h> #define I810_BUF_FREE 2 @@ -60,9 +61,8 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev) /* In use is already a pointer */ used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, I810_BUF_CLIENT); - if (used == I810_BUF_FREE) { + if (used == I810_BUF_FREE) return buf; - } } return NULL; } @@ -71,7 +71,7 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) +static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -121,7 +121,7 @@ static const struct file_operations i810_buffer_fops = { .fasync = drm_fasync, }; -static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) +static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) { struct drm_device *dev = file_priv->minor->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; @@ -152,7 +152,7 @@ static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) return retcode; } -static int i810_unmap_buffer(struct drm_buf * buf) +static int i810_unmap_buffer(struct drm_buf *buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; @@ -172,7 +172,7 @@ static int i810_unmap_buffer(struct drm_buf * buf) return retcode; } -static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, +static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d, struct drm_file *file_priv) { struct drm_buf *buf; @@ -202,7 +202,7 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, return retcode; } -static int i810_dma_cleanup(struct drm_device * dev) +static int i810_dma_cleanup(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; @@ -218,9 +218,8 @@ static int i810_dma_cleanup(struct drm_device * dev) drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - if (dev_priv->ring.virtual_start) { + if (dev_priv->ring.virtual_start) drm_core_ioremapfree(&dev_priv->ring.map, dev); - } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, dev_priv->hw_status_page, @@ -242,7 +241,7 @@ static int i810_dma_cleanup(struct drm_device * dev) return 0; } -static int i810_wait_ring(struct drm_device * dev, int n) +static int i810_wait_ring(struct drm_device *dev, int n) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -271,11 +270,11 @@ static int i810_wait_ring(struct drm_device * dev, int n) udelay(1); } - out_wait_ring: +out_wait_ring: return iters; } -static void i810_kernel_lost_context(struct drm_device * dev) +static void i810_kernel_lost_context(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -287,7 +286,7 @@ static void i810_kernel_lost_context(struct drm_device * dev) ring->space += ring->Size; } -static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) +static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv) { struct drm_device_dma *dma = dev->dma; int my_idx = 24; @@ -322,9 +321,9 @@ static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_ return 0; } -static int i810_dma_initialize(struct drm_device * dev, - drm_i810_private_t * dev_priv, - drm_i810_init_t * init) +static int i810_dma_initialize(struct drm_device *dev, + drm_i810_private_t *dev_priv, + drm_i810_init_t *init) { struct drm_map_list *r_list; memset(dev_priv, 0, sizeof(drm_i810_private_t)); @@ -462,7 +461,7 @@ static int i810_dma_init(struct drm_device *dev, void *data, * Use 'volatile' & local var tmp to force the emitted values to be * identical to the verified ones. */ -static void i810EmitContextVerified(struct drm_device * dev, +static void i810EmitContextVerified(struct drm_device *dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -495,7 +494,7 @@ static void i810EmitContextVerified(struct drm_device * dev, ADVANCE_LP_RING(); } -static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) +static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; @@ -528,7 +527,7 @@ static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int * /* Need to do some additional checking when setting the dest buffer. */ -static void i810EmitDestVerified(struct drm_device * dev, +static void i810EmitDestVerified(struct drm_device *dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -563,7 +562,7 @@ static void i810EmitDestVerified(struct drm_device * dev, ADVANCE_LP_RING(); } -static void i810EmitState(struct drm_device * dev) +static void i810EmitState(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -594,7 +593,7 @@ static void i810EmitState(struct drm_device * dev) /* need to verify */ -static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, +static void i810_dma_dispatch_clear(struct drm_device *dev, int flags, unsigned int clear_color, unsigned int clear_zval) { @@ -669,7 +668,7 @@ static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, } } -static void i810_dma_dispatch_swap(struct drm_device * dev) +static void i810_dma_dispatch_swap(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -715,8 +714,8 @@ static void i810_dma_dispatch_swap(struct drm_device * dev) } } -static void i810_dma_dispatch_vertex(struct drm_device * dev, - struct drm_buf * buf, int discard, int used) +static void i810_dma_dispatch_vertex(struct drm_device *dev, + struct drm_buf *buf, int discard, int used) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private; @@ -795,7 +794,7 @@ static void i810_dma_dispatch_vertex(struct drm_device * dev, } } -static void i810_dma_dispatch_flip(struct drm_device * dev) +static void i810_dma_dispatch_flip(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; int pitch = dev_priv->pitch; @@ -841,7 +840,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev) } -static void i810_dma_quiescent(struct drm_device * dev) +static void i810_dma_quiescent(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -858,7 +857,7 @@ static void i810_dma_quiescent(struct drm_device * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); } -static int i810_flush_queue(struct drm_device * dev) +static int i810_flush_queue(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; @@ -891,7 +890,7 @@ static int i810_flush_queue(struct drm_device * dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(struct drm_device * dev, +static void i810_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; @@ -969,9 +968,8 @@ static int i810_clear_bufs(struct drm_device *dev, void *data, LOCK_TEST_WITH_RETURN(dev, file_priv); /* GH: Someone's doing nasty things... */ - if (!dev->dev_private) { + if (!dev->dev_private) return -EINVAL; - } i810_dma_dispatch_clear(dev, clear->flags, clear->clear_color, clear->clear_depth); @@ -1039,7 +1037,7 @@ static int i810_docopy(struct drm_device *dev, void *data, return 0; } -static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, +static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used, unsigned int last_render) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1053,9 +1051,8 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, i810_kernel_lost_context(dev); u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE); - if (u != I810_BUF_CLIENT) { + if (u != I810_BUF_CLIENT) DRM_DEBUG("MC found buffer that isn't mine!\n"); - } if (used > 4 * 1024) used = 0; @@ -1160,7 +1157,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data, LOCK_TEST_WITH_RETURN(dev, file_priv); - //Tell the overlay to update + /* Tell the overlay to update */ I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); return 0; @@ -1168,7 +1165,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data, /* Not sure why this isn't set all the time: */ -static void i810_do_init_pageflip(struct drm_device * dev) +static void i810_do_init_pageflip(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1178,7 +1175,7 @@ static void i810_do_init_pageflip(struct drm_device * dev) dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; } -static int i810_do_cleanup_pageflip(struct drm_device * dev) +static int i810_do_cleanup_pageflip(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1218,49 +1215,61 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags) return 0; } -void i810_driver_lastclose(struct drm_device * dev) +void i810_driver_lastclose(struct drm_device *dev) { i810_dma_cleanup(dev); } -void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_i810_private_t *dev_priv = dev->dev_private; - if (dev_priv->page_flipping) { + if (dev_priv->page_flipping) i810_do_cleanup_pageflip(dev); - } } } -void i810_driver_reclaim_buffers_locked(struct drm_device * dev, +void i810_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv) { i810_reclaim_buffers(dev, file_priv); } -int i810_driver_dma_quiescent(struct drm_device * dev) +int i810_driver_dma_quiescent(struct drm_device *dev) { i810_dma_quiescent(dev); return 0; } +/* + * call the drm_ioctl under the big kernel lock because + * to lock against the i810_mmap_buffers function. + */ +long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + lock_kernel(); + ret = drm_ioctl(file, cmd, arg); + unlock_kernel(); + return ret; +} + struct drm_ioctl_desc i810_ioctls[] = { - DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH) + DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED), }; int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); @@ -1276,7 +1285,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); * \returns * A value of 1 is always retured to indictate every i810 is AGP. */ -int i810_driver_device_is_agp(struct drm_device * dev) +int i810_driver_device_is_agp(struct drm_device *dev) { return 1; } diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index c1e02752e02..b4250b2cac1 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c @@ -59,7 +59,7 @@ static struct drm_driver driver = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, - .unlocked_ioctl = drm_ioctl, + .unlocked_ioctl = i810_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h index 21e2691f28f..c9339f48179 100644 --- a/drivers/gpu/drm/i810/i810_drv.h +++ b/drivers/gpu/drm/i810/i810_drv.h @@ -115,56 +115,59 @@ typedef struct drm_i810_private { } drm_i810_private_t; /* i810_dma.c */ -extern int i810_driver_dma_quiescent(struct drm_device * dev); -extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, +extern int i810_driver_dma_quiescent(struct drm_device *dev); +extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); extern int i810_driver_load(struct drm_device *, unsigned long flags); -extern void i810_driver_lastclose(struct drm_device * dev); -extern void i810_driver_preclose(struct drm_device * dev, +extern void i810_driver_lastclose(struct drm_device *dev); +extern void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); -extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, +extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); -extern int i810_driver_device_is_agp(struct drm_device * dev); +extern int i810_driver_device_is_agp(struct drm_device *dev); +extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg); extern struct drm_ioctl_desc i810_ioctls[]; extern int i810_max_ioctl; #define I810_BASE(reg) ((unsigned long) \ dev_priv->mmio_map->handle) #define I810_ADDR(reg) (I810_BASE(reg) + reg) -#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) +#define I810_DEREF(reg) (*(__volatile__ int *)I810_ADDR(reg)) #define I810_READ(reg) I810_DEREF(reg) -#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) -#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg) +#define I810_WRITE(reg, val) do { I810_DEREF(reg) = val; } while (0) +#define I810_DEREF16(reg) (*(__volatile__ u16 *)I810_ADDR(reg)) #define I810_READ16(reg) I810_DEREF16(reg) -#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0) +#define I810_WRITE16(reg, val) do { I810_DEREF16(reg) = val; } while (0) #define I810_VERBOSE 0 #define RING_LOCALS unsigned int outring, ringmask; \ - volatile char *virt; - -#define BEGIN_LP_RING(n) do { \ - if (I810_VERBOSE) \ - DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \ - if (dev_priv->ring.space < n*4) \ - i810_wait_ring(dev, n*4); \ - dev_priv->ring.space -= n*4; \ - outring = dev_priv->ring.tail; \ - ringmask = dev_priv->ring.tail_mask; \ - virt = dev_priv->ring.virtual_start; \ + volatile char *virt; + +#define BEGIN_LP_RING(n) do { \ + if (I810_VERBOSE) \ + DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \ + if (dev_priv->ring.space < n*4) \ + i810_wait_ring(dev, n*4); \ + dev_priv->ring.space -= n*4; \ + outring = dev_priv->ring.tail; \ + ringmask = dev_priv->ring.tail_mask; \ + virt = dev_priv->ring.virtual_start; \ } while (0) -#define ADVANCE_LP_RING() do { \ - if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \ +#define ADVANCE_LP_RING() do { \ + if (I810_VERBOSE) \ + DRM_DEBUG("ADVANCE_LP_RING\n"); \ dev_priv->ring.tail = outring; \ - I810_WRITE(LP_RING + RING_TAIL, outring); \ -} while(0) - -#define OUT_RING(n) do { \ - if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ - *(volatile unsigned int *)(virt + outring) = n; \ - outring += 4; \ - outring &= ringmask; \ + I810_WRITE(LP_RING + RING_TAIL, outring); \ +} while (0) + +#define OUT_RING(n) do { \ + if (I810_VERBOSE) \ + DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \ + *(volatile unsigned int *)(virt + outring) = n; \ + outring += 4; \ + outring &= ringmask; \ } while (0) #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 65759a9a85c..5168862c922 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c @@ -36,6 +36,7 @@ #include "i830_drm.h" #include "i830_drv.h" #include <linux/interrupt.h> /* For task queue support */ +#include <linux/smp_lock.h> #include <linux/pagemap.h> #include <linux/delay.h> #include <linux/slab.h> @@ -62,9 +63,8 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev) /* In use is already a pointer */ used = cmpxchg(buf_priv->in_use, I830_BUF_FREE, I830_BUF_CLIENT); - if (used == I830_BUF_FREE) { + if (used == I830_BUF_FREE) return buf; - } } return NULL; } @@ -73,7 +73,7 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf) +static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf) { drm_i830_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -123,7 +123,7 @@ static const struct file_operations i830_buffer_fops = { .fasync = drm_fasync, }; -static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) +static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) { struct drm_device *dev = file_priv->minor->dev; drm_i830_buf_priv_t *buf_priv = buf->dev_private; @@ -156,7 +156,7 @@ static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) return retcode; } -static int i830_unmap_buffer(struct drm_buf * buf) +static int i830_unmap_buffer(struct drm_buf *buf) { drm_i830_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; @@ -176,7 +176,7 @@ static int i830_unmap_buffer(struct drm_buf * buf) return retcode; } -static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d, +static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d, struct drm_file *file_priv) { struct drm_buf *buf; @@ -206,7 +206,7 @@ static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d, return retcode; } -static int i830_dma_cleanup(struct drm_device * dev) +static int i830_dma_cleanup(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; @@ -222,9 +222,8 @@ static int i830_dma_cleanup(struct drm_device * dev) drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; - if (dev_priv->ring.virtual_start) { + if (dev_priv->ring.virtual_start) drm_core_ioremapfree(&dev_priv->ring.map, dev); - } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, dev_priv->hw_status_page, @@ -246,7 +245,7 @@ static int i830_dma_cleanup(struct drm_device * dev) return 0; } -int i830_wait_ring(struct drm_device * dev, int n, const char *caller) +int i830_wait_ring(struct drm_device *dev, int n, const char *caller) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_ring_buffer_t *ring = &(dev_priv->ring); @@ -276,11 +275,11 @@ int i830_wait_ring(struct drm_device * dev, int n, const char *caller) dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT; } - out_wait_ring: +out_wait_ring: return iters; } -static void i830_kernel_lost_context(struct drm_device * dev) +static void i830_kernel_lost_context(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_ring_buffer_t *ring = &(dev_priv->ring); @@ -295,7 +294,7 @@ static void i830_kernel_lost_context(struct drm_device * dev) dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY; } -static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv) +static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv) { struct drm_device_dma *dma = dev->dma; int my_idx = 36; @@ -329,9 +328,9 @@ static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_ return 0; } -static int i830_dma_initialize(struct drm_device * dev, - drm_i830_private_t * dev_priv, - drm_i830_init_t * init) +static int i830_dma_initialize(struct drm_device *dev, + drm_i830_private_t *dev_priv, + drm_i830_init_t *init) { struct drm_map_list *r_list; @@ -482,7 +481,7 @@ static int i830_dma_init(struct drm_device *dev, void *data, /* Most efficient way to verify state for the i830 is as it is * emitted. Non-conformant state is silently dropped. */ -static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code) +static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; @@ -527,7 +526,7 @@ static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code) ADVANCE_LP_RING(); } -static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code) +static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code) { drm_i830_private_t *dev_priv = dev->dev_private; int i, j = 0; @@ -561,7 +560,7 @@ static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code) printk("rejected packet %x\n", code[0]); } -static void i830EmitTexBlendVerified(struct drm_device * dev, +static void i830EmitTexBlendVerified(struct drm_device *dev, unsigned int *code, unsigned int num) { drm_i830_private_t *dev_priv = dev->dev_private; @@ -586,7 +585,7 @@ static void i830EmitTexBlendVerified(struct drm_device * dev, ADVANCE_LP_RING(); } -static void i830EmitTexPalette(struct drm_device * dev, +static void i830EmitTexPalette(struct drm_device *dev, unsigned int *palette, int number, int is_shared) { drm_i830_private_t *dev_priv = dev->dev_private; @@ -603,9 +602,8 @@ static void i830EmitTexPalette(struct drm_device * dev, } else { OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number)); } - for (i = 0; i < 256; i++) { + for (i = 0; i < 256; i++) OUT_RING(palette[i]); - } OUT_RING(0); /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop! */ @@ -613,7 +611,7 @@ static void i830EmitTexPalette(struct drm_device * dev, /* Need to do some additional checking when setting the dest buffer. */ -static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code) +static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code) { drm_i830_private_t *dev_priv = dev->dev_private; unsigned int tmp; @@ -674,7 +672,7 @@ static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code) ADVANCE_LP_RING(); } -static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code) +static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code) { drm_i830_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -685,7 +683,7 @@ static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code) ADVANCE_LP_RING(); } -static void i830EmitState(struct drm_device * dev) +static void i830EmitState(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -788,7 +786,7 @@ static void i830EmitState(struct drm_device * dev) * Performance monitoring functions */ -static void i830_fill_box(struct drm_device * dev, +static void i830_fill_box(struct drm_device *dev, int x, int y, int w, int h, int r, int g, int b) { drm_i830_private_t *dev_priv = dev->dev_private; @@ -816,17 +814,16 @@ static void i830_fill_box(struct drm_device * dev, OUT_RING((y << 16) | x); OUT_RING(((y + h) << 16) | (x + w)); - if (dev_priv->current_page == 1) { + if (dev_priv->current_page == 1) OUT_RING(dev_priv->front_offset); - } else { + else OUT_RING(dev_priv->back_offset); - } OUT_RING(color); ADVANCE_LP_RING(); } -static void i830_cp_performance_boxes(struct drm_device * dev) +static void i830_cp_performance_boxes(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; @@ -871,7 +868,7 @@ static void i830_cp_performance_boxes(struct drm_device * dev) dev_priv->sarea_priv->perf_boxes = 0; } -static void i830_dma_dispatch_clear(struct drm_device * dev, int flags, +static void i830_dma_dispatch_clear(struct drm_device *dev, int flags, unsigned int clear_color, unsigned int clear_zval, unsigned int clear_depthmask) @@ -966,7 +963,7 @@ static void i830_dma_dispatch_clear(struct drm_device * dev, int flags, } } -static void i830_dma_dispatch_swap(struct drm_device * dev) +static void i830_dma_dispatch_swap(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -1036,7 +1033,7 @@ static void i830_dma_dispatch_swap(struct drm_device * dev) } } -static void i830_dma_dispatch_flip(struct drm_device * dev) +static void i830_dma_dispatch_flip(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -1079,8 +1076,8 @@ static void i830_dma_dispatch_flip(struct drm_device * dev) dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; } -static void i830_dma_dispatch_vertex(struct drm_device * dev, - struct drm_buf * buf, int discard, int used) +static void i830_dma_dispatch_vertex(struct drm_device *dev, + struct drm_buf *buf, int discard, int used) { drm_i830_private_t *dev_priv = dev->dev_private; drm_i830_buf_priv_t *buf_priv = buf->dev_private; @@ -1100,9 +1097,8 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev, if (discard) { u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_HARDWARE); - if (u != I830_BUF_CLIENT) { + if (u != I830_BUF_CLIENT) DRM_DEBUG("xxxx 2\n"); - } } if (used > 4 * 1023) @@ -1191,7 +1187,7 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev, } } -static void i830_dma_quiescent(struct drm_device * dev) +static void i830_dma_quiescent(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -1208,7 +1204,7 @@ static void i830_dma_quiescent(struct drm_device * dev) i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__); } -static int i830_flush_queue(struct drm_device * dev) +static int i830_flush_queue(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; @@ -1241,7 +1237,7 @@ static int i830_flush_queue(struct drm_device * dev) } /* Must be called with the lock held */ -static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv) +static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; int i; @@ -1316,9 +1312,8 @@ static int i830_clear_bufs(struct drm_device *dev, void *data, LOCK_TEST_WITH_RETURN(dev, file_priv); /* GH: Someone's doing nasty things... */ - if (!dev->dev_private) { + if (!dev->dev_private) return -EINVAL; - } i830_dma_dispatch_clear(dev, clear->flags, clear->clear_color, @@ -1339,7 +1334,7 @@ static int i830_swap_bufs(struct drm_device *dev, void *data, /* Not sure why this isn't set all the time: */ -static void i830_do_init_pageflip(struct drm_device * dev) +static void i830_do_init_pageflip(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; @@ -1349,7 +1344,7 @@ static void i830_do_init_pageflip(struct drm_device * dev) dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; } -static int i830_do_cleanup_pageflip(struct drm_device * dev) +static int i830_do_cleanup_pageflip(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; @@ -1490,47 +1485,59 @@ int i830_driver_load(struct drm_device *dev, unsigned long flags) return 0; } -void i830_driver_lastclose(struct drm_device * dev) +void i830_driver_lastclose(struct drm_device *dev) { i830_dma_cleanup(dev); } -void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_i830_private_t *dev_priv = dev->dev_private; - if (dev_priv->page_flipping) { + if (dev_priv->page_flipping) i830_do_cleanup_pageflip(dev); - } } } -void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv) +void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv) { i830_reclaim_buffers(dev, file_priv); } -int i830_driver_dma_quiescent(struct drm_device * dev) +int i830_driver_dma_quiescent(struct drm_device *dev) { i830_dma_quiescent(dev); return 0; } +/* + * call the drm_ioctl under the big kernel lock because + * to lock against the i830_mmap_buffers function. + */ +long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + int ret; + lock_kernel(); + ret = drm_ioctl(file, cmd, arg); + unlock_kernel(); + return ret; +} + struct drm_ioctl_desc i830_ioctls[] = { - DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH), - DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH) + DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED), }; int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); @@ -1546,7 +1553,7 @@ int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls); * \returns * A value of 1 is always retured to indictate every i8xx is AGP. */ -int i830_driver_device_is_agp(struct drm_device * dev) +int i830_driver_device_is_agp(struct drm_device *dev) { return 1; } diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c index 44f990bed8f..a5c66aa82f0 100644 --- a/drivers/gpu/drm/i830/i830_drv.c +++ b/drivers/gpu/drm/i830/i830_drv.c @@ -70,7 +70,7 @@ static struct drm_driver driver = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, - .unlocked_ioctl = drm_ioctl, + .unlocked_ioctl = i830_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h index da82afe4ded..0df1c720560 100644 --- a/drivers/gpu/drm/i830/i830_drv.h +++ b/drivers/gpu/drm/i830/i830_drv.h @@ -122,6 +122,7 @@ typedef struct drm_i830_private { } drm_i830_private_t; +long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg); extern struct drm_ioctl_desc i830_ioctls[]; extern int i830_max_ioctl; @@ -132,33 +133,33 @@ extern int i830_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS); -extern void i830_driver_irq_preinstall(struct drm_device * dev); -extern void i830_driver_irq_postinstall(struct drm_device * dev); -extern void i830_driver_irq_uninstall(struct drm_device * dev); +extern void i830_driver_irq_preinstall(struct drm_device *dev); +extern void i830_driver_irq_postinstall(struct drm_device *dev); +extern void i830_driver_irq_uninstall(struct drm_device *dev); extern int i830_driver_load(struct drm_device *, unsigned long flags); -extern void i830_driver_preclose(struct drm_device * dev, +extern void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); -extern void i830_driver_lastclose(struct drm_device * dev); -extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev, +extern void i830_driver_lastclose(struct drm_device *dev); +extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); -extern int i830_driver_dma_quiescent(struct drm_device * dev); -extern int i830_driver_device_is_agp(struct drm_device * dev); +extern int i830_driver_dma_quiescent(struct drm_device *dev); +extern int i830_driver_device_is_agp(struct drm_device *dev); -#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg) -#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val) -#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg) -#define I830_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val) +#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg) +#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val) +#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg) +#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val) #define I830_VERBOSE 0 #define RING_LOCALS unsigned int outring, ringmask, outcount; \ - volatile char *virt; + volatile char *virt; #define BEGIN_LP_RING(n) do { \ if (I830_VERBOSE) \ printk("BEGIN_LP_RING(%d)\n", (n)); \ if (dev_priv->ring.space < n*4) \ - i830_wait_ring(dev, n*4, __func__); \ + i830_wait_ring(dev, n*4, __func__); \ outcount = 0; \ outring = dev_priv->ring.tail; \ ringmask = dev_priv->ring.tail_mask; \ @@ -166,21 +167,23 @@ extern int i830_driver_device_is_agp(struct drm_device * dev); } while (0) #define OUT_RING(n) do { \ - if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \ + if (I830_VERBOSE) \ + printk(" OUT_RING %x\n", (int)(n)); \ *(volatile unsigned int *)(virt + outring) = n; \ - outcount++; \ + outcount++; \ outring += 4; \ outring &= ringmask; \ } while (0) -#define ADVANCE_LP_RING() do { \ - if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \ - dev_priv->ring.tail = outring; \ - dev_priv->ring.space -= outcount * 4; \ - I830_WRITE(LP_RING + RING_TAIL, outring); \ -} while(0) +#define ADVANCE_LP_RING() do { \ + if (I830_VERBOSE) \ + printk("ADVANCE_LP_RING %x\n", outring); \ + dev_priv->ring.tail = outring; \ + dev_priv->ring.space -= outcount * 4; \ + I830_WRITE(LP_RING + RING_TAIL, outring); \ +} while (0) -extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller); +extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller); #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c index 91ec2bb497e..d1a6b95d631 100644 --- a/drivers/gpu/drm/i830/i830_irq.c +++ b/drivers/gpu/drm/i830/i830_irq.c @@ -53,7 +53,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_HANDLED; } -static int i830_emit_irq(struct drm_device * dev) +static int i830_emit_irq(struct drm_device *dev) { drm_i830_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -70,7 +70,7 @@ static int i830_emit_irq(struct drm_device * dev) return atomic_read(&dev_priv->irq_emitted); } -static int i830_wait_irq(struct drm_device * dev, int irq_nr) +static int i830_wait_irq(struct drm_device *dev, int irq_nr) { drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; DECLARE_WAITQUEUE(entry, current); @@ -156,7 +156,7 @@ int i830_irq_wait(struct drm_device *dev, void *data, /* drm_dma.h hooks */ -void i830_driver_irq_preinstall(struct drm_device * dev) +void i830_driver_irq_preinstall(struct drm_device *dev) { drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; @@ -168,14 +168,14 @@ void i830_driver_irq_preinstall(struct drm_device * dev) init_waitqueue_head(&dev_priv->irq_queue); } -void i830_driver_irq_postinstall(struct drm_device * dev) +void i830_driver_irq_postinstall(struct drm_device *dev) { drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; I830_WRITE16(I830REG_INT_ENABLE_R, 0x2); } -void i830_driver_irq_uninstall(struct drm_device * dev) +void i830_driver_irq_uninstall(struct drm_device *dev) { drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private; if (!dev_priv) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2305a1234f1..f19ffe87af3 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -34,12 +34,15 @@ #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" +#include <linux/pci.h> #include <linux/vgaarb.h> #include <linux/acpi.h> #include <linux/pnp.h> #include <linux/vga_switcheroo.h> #include <linux/slab.h> +extern int intel_max_stolen; /* from AGP driver */ + /** * Sets up the hardware status page for devices that need a physical address * in the register. @@ -1256,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) drm_mm_put_block(compressed_fb); } - if (!IS_GM45(dev)) { + if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096, 4096, 0); if (!compressed_llb) { @@ -1282,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size) intel_disable_fbc(dev); dev_priv->compressed_fb = compressed_fb; - - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) + I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); + else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { I915_WRITE(FBC_CFB_BASE, cfb_base); @@ -1291,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) dev_priv->compressed_llb = compressed_llb; } - DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, + DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, ll_base, size >> 20); } @@ -1354,7 +1358,7 @@ static int i915_load_modeset_init(struct drm_device *dev, int fb_bar = IS_I9XX(dev) ? 2 : 0; int ret = 0; - dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & + dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) & 0xff000000; /* Basic memrange allocator for stolen space (aka vram) */ @@ -2063,8 +2067,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Add register map (needed for suspend/resume) */ mmio_bar = IS_I9XX(dev) ? 0 : 1; - base = drm_get_resource_start(dev, mmio_bar); - size = drm_get_resource_len(dev, mmio_bar); + base = pci_resource_start(dev->pdev, mmio_bar); + size = pci_resource_len(dev->pdev, mmio_bar); if (i915_get_bridge_dev(dev)) { ret = -EIO; @@ -2104,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_iomapfree; + if (prealloc_size > intel_max_stolen) { + DRM_INFO("detected %dM stolen memory, trimming to %dM\n", + prealloc_size >> 20, intel_max_stolen >> 20); + prealloc_size = intel_max_stolen; + } + dev_priv->wq = create_singlethread_workqueue("i915"); if (dev_priv->wq == NULL) { DRM_ERROR("Failed to create our workqueue.\n"); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 423dc90c1e2..5044f653e8e 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = { }; static const struct intel_device_info intel_i965g_info = { - .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, + .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, }; static const struct intel_device_info intel_i965gm_info = { - .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, + .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; @@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = { }; static const struct intel_device_info intel_gm45_info = { - .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, + .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, @@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = { static const struct intel_device_info intel_ironlake_m_info = { .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, - .need_gfx_hws = 1, .has_rc6 = 1, + .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, }; @@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = { .has_hotplug = 1, .is_gen6 = 1, }; -static const struct pci_device_id pciidlist[] = { - INTEL_VGA_DEVICE(0x3577, &intel_i830_info), - INTEL_VGA_DEVICE(0x2562, &intel_845g_info), - INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), +static const struct pci_device_id pciidlist[] = { /* aka */ + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), - INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), - INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), - INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), - INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), - INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), - INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), - INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), - INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), - INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), - INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), - INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), - INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), - INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), - INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), - INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), - INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), - INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), - INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), - INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), + INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ + INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ + INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ + INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ + INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ + INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ + INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ + INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ + INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ + INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ + INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ + INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ + INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ + INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ + INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ + INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ + INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ + INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ + INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ + INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ + INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ + INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), @@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags) /* * Clear request list */ - i915_gem_retire_requests(dev, &dev_priv->render_ring); + i915_gem_retire_requests(dev); if (need_display) i915_save_display(dev); @@ -413,7 +413,7 @@ int i965_reset(struct drm_device *dev, u8 flags) static int __devinit i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - return drm_get_dev(pdev, ent, &driver); + return drm_get_pci_dev(pdev, ent, &driver); } static void @@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev) return i915_drm_freeze(drm_dev); } -const struct dev_pm_ops i915_pm_ops = { +static const struct dev_pm_ops i915_pm_ops = { .suspend = i915_pm_suspend, .resume = i915_pm_resume, .freeze = i915_pm_freeze, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 2e1744d37ad..906663b9929 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -176,7 +176,8 @@ struct drm_i915_display_funcs { int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); void (*update_wm)(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size); + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size); /* clock updates for mode set */ /* cursor updates */ /* render clock increase/decrease */ @@ -200,6 +201,8 @@ struct intel_device_info { u8 need_gfx_hws : 1; u8 is_g4x : 1; u8 is_pineview : 1; + u8 is_broadwater : 1; + u8 is_crestline : 1; u8 is_ironlake : 1; u8 is_gen6 : 1; u8 has_fbc : 1; @@ -288,6 +291,8 @@ typedef struct drm_i915_private { struct timer_list hangcheck_timer; int hangcheck_count; uint32_t last_acthd; + uint32_t last_instdone; + uint32_t last_instdone1; struct drm_mm vram; @@ -547,6 +552,14 @@ typedef struct drm_i915_private { struct list_head fence_list; /** + * List of objects currently pending being freed. + * + * These objects are no longer in use, but due to a signal + * we were prevented from freeing them at the appointed time. + */ + struct list_head deferred_free_list; + + /** * We leave the user IRQ off as much as possible, * but this means that requests will finish and never * be retired once the system goes idle. Set a timer to @@ -677,7 +690,7 @@ struct drm_i915_gem_object { * * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) */ - int fence_reg : 5; + signed int fence_reg : 5; /** * Used for checking the object doesn't appear more than once @@ -713,7 +726,7 @@ struct drm_i915_gem_object { * * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 * bits with absolutely no headroom. So use 4 bits. */ - int pin_count : 4; + unsigned int pin_count : 4; #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf /** AGP memory structure for our GTT binding. */ @@ -743,7 +756,7 @@ struct drm_i915_gem_object { uint32_t stride; /** Record of address bit 17 of each page at last unbind. */ - long *bit_17; + unsigned long *bit_17; /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ uint32_t agp_type; @@ -955,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev, bool i915_seqno_passed(uint32_t seq1, uint32_t seq2); int i915_gem_object_get_fence_reg(struct drm_gem_object *obj); int i915_gem_object_put_fence_reg(struct drm_gem_object *obj); -void i915_gem_retire_requests(struct drm_device *dev, - struct intel_ring_buffer *ring); +void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_work_handler(struct work_struct *work); void i915_gem_clflush_object(struct drm_gem_object *obj); int i915_gem_object_set_domain(struct drm_gem_object *obj, @@ -986,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask); void i915_gem_object_put_pages(struct drm_gem_object *obj); void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); -void i915_gem_object_flush_write_domain(struct drm_gem_object *obj); +int i915_gem_object_flush_write_domain(struct drm_gem_object *obj); void i915_gem_shrinker_init(void); void i915_gem_shrinker_exit(void); @@ -1046,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void i8xx_disable_fbc(struct drm_device *dev); extern void g4x_disable_fbc(struct drm_device *dev); +extern void ironlake_disable_fbc(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); extern bool intel_fbc_enabled(struct drm_device *dev); @@ -1135,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) +#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater) +#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline) #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5aa747fc25a..2a4ed7ca8b4 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -35,7 +35,7 @@ #include <linux/swap.h> #include <linux/pci.h> -static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, @@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev); static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file_priv); +static void i915_gem_free_object_tail(struct drm_gem_object *obj); static LIST_HEAD(shrink_list); static DEFINE_SPINLOCK(shrink_list_lock); @@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - drm_gem_object_handle_unreference_unlocked(obj); - + drm_gem_object_unreference_unlocked(obj); if (ret) return ret; @@ -496,10 +496,10 @@ fast_user_write(struct io_mapping *mapping, char *vaddr_atomic; unsigned long unwritten; - vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); + vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0); unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, user_data, length); - io_mapping_unmap_atomic(vaddr_atomic); + io_mapping_unmap_atomic(vaddr_atomic, KM_USER0); if (unwritten) return -EFAULT; return 0; @@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev, /** * This function clears the request list as sequence numbers are passed. */ -void -i915_gem_retire_requests(struct drm_device *dev, - struct intel_ring_buffer *ring) +static void +i915_gem_retire_requests_ring(struct drm_device *dev, + struct intel_ring_buffer *ring) { drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; @@ -1751,6 +1751,30 @@ i915_gem_retire_requests(struct drm_device *dev, } void +i915_gem_retire_requests(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!list_empty(&dev_priv->mm.deferred_free_list)) { + struct drm_i915_gem_object *obj_priv, *tmp; + + /* We must be careful that during unbind() we do not + * accidentally infinitely recurse into retire requests. + * Currently: + * retire -> free -> unbind -> wait -> retire_ring + */ + list_for_each_entry_safe(obj_priv, tmp, + &dev_priv->mm.deferred_free_list, + list) + i915_gem_free_object_tail(&obj_priv->base); + } + + i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); + if (HAS_BSD(dev)) + i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); +} + +void i915_gem_retire_work_handler(struct work_struct *work) { drm_i915_private_t *dev_priv; @@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work) dev = dev_priv->dev; mutex_lock(&dev->struct_mutex); - i915_gem_retire_requests(dev, &dev_priv->render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests(dev); if (!dev_priv->mm.suspended && (!list_empty(&dev_priv->render_ring.request_list) || @@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, * a separate wait queue to handle that. */ if (ret == 0) - i915_gem_retire_requests(dev, ring); + i915_gem_retire_requests_ring(dev, ring); return ret; } @@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) * before we unbind. */ ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret) { - if (ret != -ERESTARTSYS) - DRM_ERROR("set_domain failed: %d\n", ret); + if (ret == -ERESTARTSYS) return ret; - } + /* Continue on if we fail due to EIO, the GPU is hung so we + * should be safe and we need to cleanup or else we might + * cause memory corruption through use-after-free. + */ BUG_ON(obj_priv->active); @@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) trace_i915_gem_object_unbind(obj); - return 0; + return ret; } static struct drm_gem_object * @@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) struct intel_ring_buffer *render_ring = &dev_priv->render_ring; struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; for (;;) { - i915_gem_retire_requests(dev, render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, bsd_ring); + i915_gem_retire_requests(dev); /* If there's an inactive buffer available now, grab it * and be done. @@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) if (!IS_I965G(dev)) { int ret; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret != 0) + return ret; + ret = i915_gem_object_wait_rendering(obj); if (ret != 0) return ret; @@ -2634,10 +2656,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) if (free_space != NULL) { obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, alignment); - if (obj_priv->gtt_space != NULL) { - obj_priv->gtt_space->private = obj; + if (obj_priv->gtt_space != NULL) obj_priv->gtt_offset = obj_priv->gtt_space->start; - } } if (obj_priv->gtt_space == NULL) { /* If the gtt is empty and we're still having trouble @@ -2733,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) } /** Flushes any GPU write domain for the object if it's dirty. */ -static void +static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; @@ -2741,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) - return; + return 0; /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); - (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring); - BUG_ON(obj->write_domain); + if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0) + return -ENOMEM; trace_i915_gem_object_change_domain(obj, obj->read_domains, old_write_domain); + return 0; } /** Flushes the GTT write domain for the object if it's dirty. */ @@ -2795,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) old_write_domain); } -void +int i915_gem_object_flush_write_domain(struct drm_gem_object *obj) { + int ret = 0; + switch (obj->write_domain) { case I915_GEM_DOMAIN_GTT: i915_gem_object_flush_gtt_write_domain(obj); @@ -2806,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj) i915_gem_object_flush_cpu_write_domain(obj); break; default: - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); break; } + + return ret; } /** @@ -2828,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) if (obj_priv->gtt_space == NULL) return -EINVAL; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret != 0) + return ret; + /* Wait on any GPU rendering and flushing to occur. */ ret = i915_gem_object_wait_rendering(obj); if (ret != 0) @@ -2878,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj) if (obj_priv->gtt_space == NULL) return -EINVAL; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; /* Wait on any GPU rendering and flushing to occur. */ if (obj_priv->active) { @@ -2926,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) uint32_t old_write_domain, old_read_domains; int ret; - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + /* Wait on any GPU rendering and flushing to occur. */ ret = i915_gem_object_wait_rendering(obj); if (ret != 0) @@ -3216,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, if (offset == 0 && size == obj->size) return i915_gem_object_set_to_cpu_domain(obj, 0); - i915_gem_object_flush_gpu_write_domain(obj); + ret = i915_gem_object_flush_gpu_write_domain(obj); + if (ret) + return ret; + /* Wait on any GPU rendering and flushing to occur. */ ret = i915_gem_object_wait_rendering(obj); if (ret != 0) @@ -3451,7 +3487,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, reloc_offset = obj_priv->gtt_offset + reloc->offset; reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, (reloc_offset & - ~(PAGE_SIZE - 1))); + ~(PAGE_SIZE - 1)), + KM_USER0); reloc_entry = (uint32_t __iomem *)(reloc_page + (reloc_offset & (PAGE_SIZE - 1))); reloc_val = target_obj_priv->gtt_offset + reloc->delta; @@ -3462,7 +3499,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, readl(reloc_entry), reloc_val); #endif writel(reloc_val, reloc_entry); - io_mapping_unmap_atomic(reloc_page); + io_mapping_unmap_atomic(reloc_page, KM_USER0); /* The updated presumed offset for this entry will be * copied back out to the user. @@ -4313,7 +4350,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_busy *args = data; struct drm_gem_object *obj; struct drm_i915_gem_object *obj_priv; - drm_i915_private_t *dev_priv = dev->dev_private; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { @@ -4328,10 +4364,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, * actually unmasked, and our working set ends up being larger than * required. */ - i915_gem_retire_requests(dev, &dev_priv->render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests(dev); obj_priv = to_intel_bo(obj); /* Don't count being on the flushing list against the object being @@ -4441,20 +4474,19 @@ int i915_gem_init_object(struct drm_gem_object *obj) return 0; } -void i915_gem_free_object(struct drm_gem_object *obj) +static void i915_gem_free_object_tail(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + int ret; - trace_i915_gem_object_destroy(obj); - - while (obj_priv->pin_count > 0) - i915_gem_object_unpin(obj); - - if (obj_priv->phys_obj) - i915_gem_detach_phys_object(dev, obj); - - i915_gem_object_unbind(obj); + ret = i915_gem_object_unbind(obj); + if (ret == -ERESTARTSYS) { + list_move(&obj_priv->list, + &dev_priv->mm.deferred_free_list); + return; + } if (obj_priv->mmap_offset) i915_gem_free_mmap_offset(obj); @@ -4466,6 +4498,22 @@ void i915_gem_free_object(struct drm_gem_object *obj) kfree(obj_priv); } +void i915_gem_free_object(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); + + trace_i915_gem_object_destroy(obj); + + while (obj_priv->pin_count > 0) + i915_gem_object_unpin(obj); + + if (obj_priv->phys_obj) + i915_gem_detach_phys_object(dev, obj); + + i915_gem_free_object_tail(obj); +} + /** Unbinds all inactive objects. */ static int i915_gem_evict_from_inactive_list(struct drm_device *dev) @@ -4689,9 +4737,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list)); mutex_unlock(&dev->struct_mutex); - drm_irq_install(dev); + ret = drm_irq_install(dev); + if (ret) + goto cleanup_ringbuffer; return 0; + +cleanup_ringbuffer: + mutex_lock(&dev->struct_mutex); + i915_gem_cleanup_ringbuffer(dev); + dev_priv->mm.suspended = 1; + mutex_unlock(&dev->struct_mutex); + + return ret; } int @@ -4729,6 +4787,7 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); + INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); INIT_LIST_HEAD(&dev_priv->render_ring.active_list); INIT_LIST_HEAD(&dev_priv->render_ring.request_list); if (HAS_BSD(dev)) { @@ -5027,10 +5086,7 @@ rescan: continue; spin_unlock(&shrink_list_lock); - i915_gem_retire_requests(dev, &dev_priv->render_ring); - - if (HAS_BSD(dev)) - i915_gem_retire_requests(dev, &dev_priv->bsd_ring); + i915_gem_retire_requests(dev); list_for_each_entry_safe(obj_priv, next_obj, &dev_priv->mm.inactive_list, diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 4b7c49d4257..155719e4d16 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, i915_gem_release_mmap(obj); if (ret != 0) { - WARN(ret != -ERESTARTSYS, - "failed to reset object for tiling switch"); args->tiling_mode = obj_priv->tiling_mode; args->stride = obj_priv->stride; goto err; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dba53d4b9fb..85785a8844e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev) ironlake_enable_display_irq(dev_priv, DE_GSE); else { i915_enable_pipestat(dev_priv, 1, - I915_LEGACY_BLC_EVENT_ENABLE); + PIPE_LEGACY_BLC_EVENT_ENABLE); if (IS_I965G(dev)) i915_enable_pipestat(dev_priv, 0, - I915_LEGACY_BLC_EVENT_ENABLE); + PIPE_LEGACY_BLC_EVENT_ENABLE); } } @@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) u32 iir, new_iir; u32 pipea_stats, pipeb_stats; u32 vblank_status; - u32 vblank_enable; int vblank = 0; unsigned long irqflags; int irq_received; @@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) iir = I915_READ(IIR); - if (IS_I965G(dev)) { - vblank_status = I915_START_VBLANK_INTERRUPT_STATUS; - vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE; - } else { - vblank_status = I915_VBLANK_INTERRUPT_STATUS; - vblank_enable = I915_VBLANK_INTERRUPT_ENABLE; - } + if (IS_I965G(dev)) + vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; + else + vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; for (;;) { irq_received = iir != 0; @@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) intel_finish_page_flip(dev, 1); } - if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) || - (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || + if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || + (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) || (iir & I915_ASLE_INTERRUPT)) opregion_asle_intr(dev); @@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t acthd; + uint32_t acthd, instdone, instdone1; /* No reset support on this chip yet. */ if (IS_GEN6(dev)) return; - if (!IS_I965G(dev)) + if (!IS_I965G(dev)) { acthd = I915_READ(ACTHD); - else + instdone = I915_READ(INSTDONE); + instdone1 = 0; + } else { acthd = I915_READ(ACTHD_I965); + instdone = I915_READ(INSTDONE_I965); + instdone1 = I915_READ(INSTDONE1); + } /* If all work is done then ACTHD clearly hasn't advanced. */ if (list_empty(&dev_priv->render_ring.request_list) || @@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data) return; } - if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) { - DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); - i915_handle_error(dev, true); - return; - } + if (dev_priv->last_acthd == acthd && + dev_priv->last_instdone == instdone && + dev_priv->last_instdone1 == instdone1) { + if (dev_priv->hangcheck_count++ > 1) { + DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); + i915_handle_error(dev, true); + return; + } + } else { + dev_priv->hangcheck_count = 0; + + dev_priv->last_acthd = acthd; + dev_priv->last_instdone = instdone; + dev_priv->last_instdone1 = instdone1; + } /* Reset timer case chip hangs without another request being added */ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); - - if (acthd != dev_priv->last_acthd) - dev_priv->hangcheck_count = 0; - else - dev_priv->hangcheck_count++; - - dev_priv->last_acthd = acthd; } /* drm_dma.h hooks diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cf41c672def..281db6e5403 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -442,7 +442,7 @@ #define GEN6_RENDER_IMR 0x20a8 #define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8) #define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7) -#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6) +#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6) #define GEN6_RENDER_L3_PARITY_ERROR (1 << 5) #define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4) #define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3) @@ -530,6 +530,21 @@ #define DPFC_CHICKEN 0x3224 #define DPFC_HT_MODIFY (1<<31) +/* Framebuffer compression for Ironlake */ +#define ILK_DPFC_CB_BASE 0x43200 +#define ILK_DPFC_CONTROL 0x43208 +/* The bit 28-8 is reserved */ +#define DPFC_RESERVED (0x1FFFFF00) +#define ILK_DPFC_RECOMP_CTL 0x4320c +#define ILK_DPFC_STATUS 0x43210 +#define ILK_DPFC_FENCE_YOFF 0x43218 +#define ILK_DPFC_CHICKEN 0x43224 +#define ILK_FBC_RT_BASE 0x2128 +#define ILK_FBC_RT_VALID (1<<0) + +#define ILK_DISPLAY_CHICKEN1 0x42000 +#define ILK_FBCQ_DIS (1<<22) + /* * GPIO regs */ @@ -595,32 +610,6 @@ #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ #define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */ -#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) -#define I915_CRC_ERROR_ENABLE (1UL<<29) -#define I915_CRC_DONE_ENABLE (1UL<<28) -#define I915_GMBUS_EVENT_ENABLE (1UL<<27) -#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) -#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) -#define I915_DPST_EVENT_ENABLE (1UL<<23) -#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) -#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) -#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) -#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ -#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) -#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) -#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) -#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) -#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) -#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) -#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) -#define I915_DPST_EVENT_STATUS (1UL<<7) -#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) -#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) -#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) -#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ -#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) -#define I915_OVERLAY_UPDATED_STATUS (1UL<<0) - #define SRX_INDEX 0x3c4 #define SRX_DATA 0x3c5 #define SR01 1 @@ -2166,7 +2155,8 @@ #define I830_FIFO_LINE_SIZE 32 #define G4X_FIFO_SIZE 127 -#define I945_FIFO_SIZE 127 /* 945 & 965 */ +#define I965_FIFO_SIZE 512 +#define I945_FIFO_SIZE 127 #define I915_FIFO_SIZE 95 #define I855GM_FIFO_SIZE 127 /* In cachelines */ #define I830_FIFO_SIZE 95 @@ -2185,6 +2175,9 @@ #define PINEVIEW_CURSOR_DFT_WM 0 #define PINEVIEW_CURSOR_GUARD_WM 5 +#define I965_CURSOR_FIFO 64 +#define I965_CURSOR_MAX_WM 32 +#define I965_CURSOR_DFT_WM 8 /* define the Watermark register on Ironlake */ #define WM0_PIPEA_ILK 0x45100 @@ -2212,6 +2205,9 @@ #define ILK_DISPLAY_FIFO 128 #define ILK_DISPLAY_MAXWM 64 #define ILK_DISPLAY_DFTWM 8 +#define ILK_CURSOR_FIFO 32 +#define ILK_CURSOR_MAXWM 16 +#define ILK_CURSOR_DFTWM 8 #define ILK_DISPLAY_SR_FIFO 512 #define ILK_DISPLAY_MAX_SRWM 0x1ff @@ -2510,6 +2506,10 @@ #define ILK_VSDPFD_FULL (1<<21) #define ILK_DSPCLK_GATE 0x42020 #define ILK_DPARB_CLK_GATE (1<<5) +/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ +#define ILK_CLK_FBC (1<<7) +#define ILK_DPFC_DIS1 (1<<8) +#define ILK_DPFC_DIS2 (1<<9) #define DISP_ARB_CTL 0x45000 #define DISP_TILE_SURFACE_SWIZZLING (1<<13) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 60a5800fba6..6e2025274db 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev) /* Only save FBC state on the platform that supports FBC */ if (I915_HAS_FBC(dev)) { - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) { + dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE); + } else if (IS_GM45(dev)) { dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE); } else { dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); @@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev) /* only restore FBC info on the platform that supports FBC*/ if (I915_HAS_FBC(dev)) { - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) { + ironlake_disable_fbc(dev); + I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); + } else if (IS_GM45(dev)) { g4x_disable_fbc(dev); I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE); } else { diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fab21760dd5..fea97a21cc1 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -262,6 +262,42 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, TP_ARGS(dev) ); +TRACE_EVENT(i915_flip_request, + TP_PROTO(int plane, struct drm_gem_object *obj), + + TP_ARGS(plane, obj), + + TP_STRUCT__entry( + __field(int, plane) + __field(struct drm_gem_object *, obj) + ), + + TP_fast_assign( + __entry->plane = plane; + __entry->obj = obj; + ), + + TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) +); + +TRACE_EVENT(i915_flip_complete, + TP_PROTO(int plane, struct drm_gem_object *obj), + + TP_ARGS(plane, obj), + + TP_STRUCT__entry( + __field(int, plane) + __field(struct drm_gem_object *, obj) + ), + + TP_fast_assign( + __entry->plane = plane; + __entry->obj = obj; + ), + + TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) +); + #endif /* _I915_TRACE_H_ */ /* This part must be outside protection */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5e21b311982..1e5e0d379fa 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -33,6 +33,7 @@ #include "intel_drv.h" #include "i915_drm.h" #include "i915_drv.h" +#include "i915_trace.h" #include "drm_dp_helper.h" #include "drm_crtc_helper.h" @@ -42,6 +43,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type); static void intel_update_watermarks(struct drm_device *dev); static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule); +static void intel_crtc_update_cursor(struct drm_crtc *crtc); typedef struct { /* given values */ @@ -322,6 +324,9 @@ struct intel_limit { #define IRONLAKE_DP_P1_MIN 1 #define IRONLAKE_DP_P1_MAX 2 +/* FDI */ +#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ + static bool intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock); @@ -975,7 +980,10 @@ void intel_wait_for_vblank(struct drm_device *dev) { /* Wait for 20ms, i.e. one cycle at 50hz. */ - msleep(20); + if (in_dbg_master()) + mdelay(20); /* The kernel debugger cannot call msleep() */ + else + msleep(20); } /* Parameters have changed, update FBC info */ @@ -1122,6 +1130,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev) return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } +static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA : + DPFC_CTL_PLANEB; + unsigned long stall_watermark = 200; + u32 dpfc_ctl; + + dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; + dev_priv->cfb_fence = obj_priv->fence_reg; + dev_priv->cfb_plane = intel_crtc->plane; + + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl &= DPFC_RESERVED; + dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); + if (obj_priv->tiling_mode != I915_TILING_NONE) { + dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); + I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); + } else { + I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY); + } + + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | + (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | + (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); + I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); + I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); + /* enable it... */ + I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) | + DPFC_CTL_EN); + + DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); +} + +void ironlake_disable_fbc(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); + dpfc_ctl &= ~DPFC_CTL_EN; + I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); + intel_wait_for_vblank(dev); + + DRM_DEBUG_KMS("disabled FBC\n"); +} + +static bool ironlake_fbc_enabled(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} + bool intel_fbc_enabled(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1248,6 +1317,10 @@ static void intel_update_fbc(struct drm_crtc *crtc, goto out_disable; } + /* If the kernel debugger is active, always disable compression */ + if (in_dbg_master()) + goto out_disable; + if (intel_fbc_enabled(dev)) { /* We can re-enable it in this case, but need to update pitch */ if ((fb->pitch > dev_priv->cfb_pitch) || @@ -1279,7 +1352,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) switch (obj_priv->tiling_mode) { case I915_TILING_NONE: - alignment = 64 * 1024; + if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) + alignment = 128 * 1024; + else if (IS_I965G(dev)) + alignment = 4 * 1024; + else + alignment = 64 * 1024; break; case I915_TILING_X: /* pin() will align the object as required by fence */ @@ -1314,6 +1392,98 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj) return 0; } +/* Assume fb object is pinned & idle & fenced and just update base pointers */ +static int +intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, + int x, int y) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_framebuffer *intel_fb; + struct drm_i915_gem_object *obj_priv; + struct drm_gem_object *obj; + int plane = intel_crtc->plane; + unsigned long Start, Offset; + int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR); + int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF); + int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; + int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); + int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; + u32 dspcntr; + + switch (plane) { + case 0: + case 1: + break; + default: + DRM_ERROR("Can't update plane %d in SAREA\n", plane); + return -EINVAL; + } + + intel_fb = to_intel_framebuffer(fb); + obj = intel_fb->obj; + obj_priv = to_intel_bo(obj); + + dspcntr = I915_READ(dspcntr_reg); + /* Mask out pixel format bits in case we change it */ + dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; + switch (fb->bits_per_pixel) { + case 8: + dspcntr |= DISPPLANE_8BPP; + break; + case 16: + if (fb->depth == 15) + dspcntr |= DISPPLANE_15_16BPP; + else + dspcntr |= DISPPLANE_16BPP; + break; + case 24: + case 32: + dspcntr |= DISPPLANE_32BPP_NO_ALPHA; + break; + default: + DRM_ERROR("Unknown color depth\n"); + return -EINVAL; + } + if (IS_I965G(dev)) { + if (obj_priv->tiling_mode != I915_TILING_NONE) + dspcntr |= DISPPLANE_TILED; + else + dspcntr &= ~DISPPLANE_TILED; + } + + if (IS_IRONLAKE(dev)) + /* must disable */ + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + + I915_WRITE(dspcntr_reg, dspcntr); + + Start = obj_priv->gtt_offset; + Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); + + DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); + I915_WRITE(dspstride, fb->pitch); + if (IS_I965G(dev)) { + I915_WRITE(dspbase, Offset); + I915_READ(dspbase); + I915_WRITE(dspsurf, Start); + I915_READ(dspsurf); + I915_WRITE(dsptileoff, (y << 16) | x); + } else { + I915_WRITE(dspbase, Start + Offset); + I915_READ(dspbase); + } + + if ((IS_I965G(dev) || plane == 0)) + intel_update_fbc(crtc, &crtc->mode); + + intel_wait_for_vblank(dev); + intel_increase_pllclock(crtc, true); + + return 0; +} + static int intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) @@ -1554,6 +1724,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; u32 temp, tries = 0; + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); + /* enable CPU FDI TX and PCH FDI RX */ temp = I915_READ(fdi_tx_reg); temp |= FDI_TX_ENABLE; @@ -1571,16 +1750,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) I915_READ(fdi_rx_reg); udelay(150); - /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit - for train result */ - temp = I915_READ(fdi_rx_imr_reg); - temp &= ~FDI_RX_SYMBOL_LOCK; - temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); - udelay(150); - - for (;;) { + for (tries = 0; tries < 5; tries++) { temp = I915_READ(fdi_rx_iir_reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); @@ -1590,14 +1760,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) temp | FDI_RX_BIT_LOCK); break; } - - tries++; - - if (tries > 5) { - DRM_DEBUG_KMS("FDI train 1 fail!\n"); - break; - } } + if (tries == 5) + DRM_DEBUG_KMS("FDI train 1 fail!\n"); /* Train 2 */ temp = I915_READ(fdi_tx_reg); @@ -1613,7 +1778,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) tries = 0; - for (;;) { + for (tries = 0; tries < 5; tries++) { temp = I915_READ(fdi_rx_iir_reg); DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); @@ -1623,14 +1788,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) DRM_DEBUG_KMS("FDI train 2 done.\n"); break; } - - tries++; - - if (tries > 5) { - DRM_DEBUG_KMS("FDI train 2 fail!\n"); - break; - } } + if (tries == 5) + DRM_DEBUG_KMS("FDI train 2 fail!\n"); DRM_DEBUG_KMS("FDI train done\n"); } @@ -1655,6 +1815,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR; u32 temp, i; + /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit + for train result */ + temp = I915_READ(fdi_rx_imr_reg); + temp &= ~FDI_RX_SYMBOL_LOCK; + temp &= ~FDI_RX_BIT_LOCK; + I915_WRITE(fdi_rx_imr_reg, temp); + I915_READ(fdi_rx_imr_reg); + udelay(150); + /* enable CPU FDI TX and PCH FDI RX */ temp = I915_READ(fdi_tx_reg); temp |= FDI_TX_ENABLE; @@ -1680,15 +1849,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc) I915_READ(fdi_rx_reg); udelay(150); - /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit - for train result */ - temp = I915_READ(fdi_rx_imr_reg); - temp &= ~FDI_RX_SYMBOL_LOCK; - temp &= ~FDI_RX_BIT_LOCK; - I915_WRITE(fdi_rx_imr_reg, temp); - I915_READ(fdi_rx_imr_reg); - udelay(150); - for (i = 0; i < 4; i++ ) { temp = I915_READ(fdi_tx_reg); temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; @@ -1843,7 +2003,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) } /* Enable panel fitting for LVDS */ - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) + || HAS_eDP || intel_pch_has_edp(crtc)) { temp = I915_READ(pf_ctl_reg); I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3); @@ -1938,9 +2099,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) reg = I915_READ(trans_dp_ctl); reg &= ~TRANS_DP_PORT_SEL_MASK; reg = TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_ENH_FRAMING | - TRANS_DP_VSYNC_ACTIVE_HIGH | - TRANS_DP_HSYNC_ACTIVE_HIGH; + TRANS_DP_ENH_FRAMING; + + if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) + reg |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) + reg |= TRANS_DP_VSYNC_ACTIVE_HIGH; switch (intel_trans_dp_port_sel(crtc)) { case PCH_DP_B: @@ -1980,6 +2144,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) intel_crtc_load_lut(crtc); + intel_update_fbc(crtc, &crtc->mode); + break; case DRM_MODE_DPMS_OFF: DRM_DEBUG_KMS("crtc %d dpms off\n", pipe); @@ -1994,6 +2160,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) I915_READ(dspbase_reg); } + if (dev_priv->cfb_plane == plane && + dev_priv->display.disable_fbc) + dev_priv->display.disable_fbc(dev); + i915_disable_vga(dev); /* disable cpu pipe, disable after all planes disabled */ @@ -2373,8 +2543,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ - if (mode->clock * 3 > 27000 * 4) - return MODE_CLOCK_HIGH; + if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) + return false; } return true; } @@ -2556,6 +2726,20 @@ static struct intel_watermark_params g4x_wm_info = { 2, G4X_FIFO_LINE_SIZE, }; +static struct intel_watermark_params g4x_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + G4X_FIFO_LINE_SIZE, +}; +static struct intel_watermark_params i965_cursor_wm_info = { + I965_CURSOR_FIFO, + I965_CURSOR_MAX_WM, + I965_CURSOR_DFT_WM, + 2, + I915_FIFO_LINE_SIZE, +}; static struct intel_watermark_params i945_wm_info = { I945_FIFO_SIZE, I915_MAX_WM, @@ -2593,6 +2777,14 @@ static struct intel_watermark_params ironlake_display_wm_info = { ILK_FIFO_LINE_SIZE }; +static struct intel_watermark_params ironlake_cursor_wm_info = { + ILK_CURSOR_FIFO, + ILK_CURSOR_MAXWM, + ILK_CURSOR_DFTWM, + 2, + ILK_FIFO_LINE_SIZE +}; + static struct intel_watermark_params ironlake_display_srwm_info = { ILK_DISPLAY_SR_FIFO, ILK_DISPLAY_MAX_SRWM, @@ -2642,7 +2834,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, */ entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / 1000; - entries_required /= wm->cacheline_size; + entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required); @@ -2653,8 +2845,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz, /* Don't promote wm_size to unsigned... */ if (wm_size > (long)wm->max_wm) wm_size = wm->max_wm; - if (wm_size <= 0) + if (wm_size <= 0) { wm_size = wm->default_wm; + DRM_ERROR("Insufficient FIFO for plane, expect flickering:" + " entries required = %ld, available = %lu.\n", + entries_required + wm->guard_size, + wm->fifo_size); + } + return wm_size; } @@ -2763,11 +2961,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane) uint32_t dsparb = I915_READ(DSPARB); int size; - if (plane == 0) - size = dsparb & 0x7f; - else - size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - - (dsparb & 0x7f); + size = dsparb & 0x7f; + if (plane) + size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", size); @@ -2781,11 +2977,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane) uint32_t dsparb = I915_READ(DSPARB); int size; - if (plane == 0) - size = dsparb & 0x1ff; - else - size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - - (dsparb & 0x1ff); + size = dsparb & 0x1ff; + if (plane) + size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; size >>= 1; /* Convert to cachelines */ DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, @@ -2826,7 +3020,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane) } static void pineview_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int unused, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; @@ -2891,7 +3086,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock, } static void g4x_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; int total_size, cacheline_size; @@ -2915,12 +3111,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, */ entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) / 1000; - entries_required /= G4X_FIFO_LINE_SIZE; + entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); planea_wm = entries_required + planea_params.guard_size; entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) / 1000; - entries_required /= G4X_FIFO_LINE_SIZE; + entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE); planeb_wm = entries_required + planeb_params.guard_size; cursora_wm = cursorb_wm = 16; @@ -2934,13 +3130,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, static const int sr_latency_ns = 12000; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1) * - pixel_size * sr_hdisplay) / 1000; - sr_entries = roundup(sr_entries / cacheline_size, 1); - DRM_DEBUG("self-refresh entries: %d\n", sr_entries); + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * sr_hdisplay; + sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); + + entries_required = (((sr_latency_ns / line_time_us) + + 1000) / 1000) * pixel_size * 64; + entries_required = DIV_ROUND_UP(entries_required, + g4x_cursor_wm_info.cacheline_size); + cursor_sr = entries_required + g4x_cursor_wm_info.guard_size; + + if (cursor_sr > g4x_cursor_wm_info.max_wm) + cursor_sr = g4x_cursor_wm_info.max_wm; + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", sr_entries, cursor_sr); + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { /* Turn off self refresh if both pipes are enabled */ @@ -2965,11 +3172,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock, } static void i965_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; unsigned long line_time_us; int sr_clock, sr_entries, srwm = 1; + int cursor_sr = 16; /* Calc sr entries for one plane configs */ if (sr_hdisplay && (!planea_clock || !planeb_clock)) { @@ -2977,17 +3186,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, static const int sr_latency_ns = 12000; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1) * - pixel_size * sr_hdisplay) / 1000; - sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1); + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * sr_hdisplay; + sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE); DRM_DEBUG("self-refresh entries: %d\n", sr_entries); - srwm = I945_FIFO_SIZE - sr_entries; + srwm = I965_FIFO_SIZE - sr_entries; if (srwm < 0) srwm = 1; - srwm &= 0x3f; + srwm &= 0x1ff; + + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * 64; + sr_entries = DIV_ROUND_UP(sr_entries, + i965_cursor_wm_info.cacheline_size); + cursor_sr = i965_cursor_wm_info.fifo_size - + (sr_entries + i965_cursor_wm_info.guard_size); + + if (cursor_sr > i965_cursor_wm_info.max_wm) + cursor_sr = i965_cursor_wm_info.max_wm; + + DRM_DEBUG_KMS("self-refresh watermark: display plane %d " + "cursor %d\n", srwm, cursor_sr); + if (IS_I965GM(dev)) I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); } else { @@ -3004,10 +3227,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock, I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) | (8 << 0)); I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); + /* update cursor SR watermark */ + I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); } static void i9xx_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t fwater_lo; @@ -3052,12 +3278,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, static const int sr_latency_ns = 6000; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ - sr_entries = (((sr_latency_ns / line_time_us) + 1) * - pixel_size * sr_hdisplay) / 1000; - sr_entries = roundup(sr_entries / cacheline_size, 1); + sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * + pixel_size * sr_hdisplay; + sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size); DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries); srwm = total_size - sr_entries; if (srwm < 0) @@ -3095,7 +3321,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, } static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, - int unused2, int pixel_size) + int unused2, int unused3, int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff; @@ -3113,9 +3339,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused, } #define ILK_LP0_PLANE_LATENCY 700 +#define ILK_LP0_CURSOR_LATENCY 1300 static void ironlake_update_wm(struct drm_device *dev, int planea_clock, - int planeb_clock, int sr_hdisplay, int pixel_size) + int planeb_clock, int sr_hdisplay, int sr_htotal, + int pixel_size) { struct drm_i915_private *dev_priv = dev->dev_private; int planea_wm, planeb_wm, cursora_wm, cursorb_wm; @@ -3123,20 +3351,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, unsigned long line_time_us; int sr_clock, entries_required; u32 reg_value; + int line_count; + int planea_htotal = 0, planeb_htotal = 0; + struct drm_crtc *crtc; + struct intel_crtc *intel_crtc; + + /* Need htotal for all active display plane */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + intel_crtc = to_intel_crtc(crtc); + if (crtc->enabled) { + if (intel_crtc->plane == 0) + planea_htotal = crtc->mode.htotal; + else + planeb_htotal = crtc->mode.htotal; + } + } /* Calculate and update the watermark for plane A */ if (planea_clock) { entries_required = ((planea_clock / 1000) * pixel_size * ILK_LP0_PLANE_LATENCY) / 1000; entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); + ironlake_display_wm_info.cacheline_size); planea_wm = entries_required + ironlake_display_wm_info.guard_size; if (planea_wm > (int)ironlake_display_wm_info.max_wm) planea_wm = ironlake_display_wm_info.max_wm; - cursora_wm = 16; + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = (planea_htotal * 1000) / planea_clock; + + /* Use ns/us then divide to preserve precision */ + line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + + /* calculate the cursor watermark for cursor A */ + entries_required = line_count * 64 * pixel_size; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_cursor_wm_info.cacheline_size); + cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size; + if (cursora_wm > ironlake_cursor_wm_info.max_wm) + cursora_wm = ironlake_cursor_wm_info.max_wm; + reg_value = I915_READ(WM0_PIPEA_ILK); reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) | @@ -3150,14 +3406,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, entries_required = ((planeb_clock / 1000) * pixel_size * ILK_LP0_PLANE_LATENCY) / 1000; entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_wm_info.cacheline_size); + ironlake_display_wm_info.cacheline_size); planeb_wm = entries_required + ironlake_display_wm_info.guard_size; if (planeb_wm > (int)ironlake_display_wm_info.max_wm) planeb_wm = ironlake_display_wm_info.max_wm; - cursorb_wm = 16; + /* Use the large buffer method to calculate cursor watermark */ + line_time_us = (planeb_htotal * 1000) / planeb_clock; + + /* Use ns/us then divide to preserve precision */ + line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; + + /* calculate the cursor watermark for cursor B */ + entries_required = line_count * 64 * pixel_size; + entries_required = DIV_ROUND_UP(entries_required, + ironlake_cursor_wm_info.cacheline_size); + cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size; + if (cursorb_wm > ironlake_cursor_wm_info.max_wm) + cursorb_wm = ironlake_cursor_wm_info.max_wm; + reg_value = I915_READ(WM0_PIPEB_ILK); reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK); reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) | @@ -3172,12 +3441,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, * display plane is used. */ if (!planea_clock || !planeb_clock) { - int line_count; + /* Read the self-refresh latency. The unit is 0.5us */ int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK; sr_clock = planea_clock ? planea_clock : planeb_clock; - line_time_us = ((sr_hdisplay * 1000) / sr_clock); + line_time_us = ((sr_htotal * 1000) / sr_clock); /* Use ns/us then divide to preserve precision */ line_count = ((ilk_sr_latency * 500) / line_time_us + 1000) @@ -3186,14 +3455,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, /* calculate the self-refresh watermark for display plane */ entries_required = line_count * sr_hdisplay * pixel_size; entries_required = DIV_ROUND_UP(entries_required, - ironlake_display_srwm_info.cacheline_size); + ironlake_display_srwm_info.cacheline_size); sr_wm = entries_required + ironlake_display_srwm_info.guard_size; /* calculate the self-refresh watermark for display cursor */ entries_required = line_count * pixel_size * 64; entries_required = DIV_ROUND_UP(entries_required, - ironlake_cursor_srwm_info.cacheline_size); + ironlake_cursor_srwm_info.cacheline_size); cursor_wm = entries_required + ironlake_cursor_srwm_info.guard_size; @@ -3237,6 +3506,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock, * bytes per pixel * where * line time = htotal / dotclock + * surface width = hdisplay for normal plane and 64 for cursor * and latency is assumed to be high, as above. * * The final value programmed to the register should always be rounded up, @@ -3253,6 +3523,7 @@ static void intel_update_watermarks(struct drm_device *dev) int sr_hdisplay = 0; unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0; int enabled = 0, pixel_size = 0; + int sr_htotal = 0; if (!dev_priv->display.update_wm) return; @@ -3273,6 +3544,7 @@ static void intel_update_watermarks(struct drm_device *dev) } sr_hdisplay = crtc->mode.hdisplay; sr_clock = crtc->mode.clock; + sr_htotal = crtc->mode.htotal; if (crtc->fb) pixel_size = crtc->fb->bits_per_pixel / 8; else @@ -3284,7 +3556,7 @@ static void intel_update_watermarks(struct drm_device *dev) return; dev_priv->display.update_wm(dev, planea_clock, planeb_clock, - sr_hdisplay, pixel_size); + sr_hdisplay, sr_htotal, pixel_size); } static int intel_crtc_mode_set(struct drm_crtc *crtc, @@ -3403,6 +3675,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, return -EINVAL; } + /* Ensure that the cursor is valid for the new mode before changing... */ + intel_crtc_update_cursor(crtc); + if (is_lvds && dev_priv->lvds_downclock_avail) { has_reduced_clock = limit->find_pll(limit, crtc, dev_priv->lvds_downclock, @@ -3469,7 +3744,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, temp |= PIPE_8BPC; else temp |= PIPE_6BPC; - } else if (is_edp) { + } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) { switch (dev_priv->edp_bpp/3) { case 8: temp |= PIPE_8BPC; @@ -3712,6 +3987,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, udelay(150); } + if (HAS_PCH_SPLIT(dev)) { + pipeconf &= ~PIPE_ENABLE_DITHER; + pipeconf &= ~PIPE_DITHER_TYPE_MASK; + } + /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -3754,16 +4034,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (dev_priv->lvds_dither) { if (HAS_PCH_SPLIT(dev)) { pipeconf |= PIPE_ENABLE_DITHER; - pipeconf &= ~PIPE_DITHER_TYPE_MASK; pipeconf |= PIPE_DITHER_TYPE_ST01; } else lvds |= LVDS_ENABLE_DITHER; } else { - if (HAS_PCH_SPLIT(dev)) { - pipeconf &= ~PIPE_ENABLE_DITHER; - pipeconf &= ~PIPE_DITHER_TYPE_MASK; - } else + if (!HAS_PCH_SPLIT(dev)) { lvds &= ~LVDS_ENABLE_DITHER; + } } } I915_WRITE(lvds_reg, lvds); @@ -3939,6 +4216,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) } } +/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ +static void intel_crtc_update_cursor(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + int x = intel_crtc->cursor_x; + int y = intel_crtc->cursor_y; + uint32_t base, pos; + bool visible; + + pos = 0; + + if (crtc->fb) { + base = intel_crtc->cursor_addr; + if (x > (int) crtc->fb->width) + base = 0; + + if (y > (int) crtc->fb->height) + base = 0; + } else + base = 0; + + if (x < 0) { + if (x + intel_crtc->cursor_width < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + x = -x; + } + pos |= x << CURSOR_X_SHIFT; + + if (y < 0) { + if (y + intel_crtc->cursor_height < 0) + base = 0; + + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + y = -y; + } + pos |= y << CURSOR_Y_SHIFT; + + visible = base != 0; + if (!visible && !intel_crtc->cursor_visble) + return; + + I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos); + if (intel_crtc->cursor_visble != visible) { + uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR); + if (base) { + /* Hooray for CUR*CNTR differences */ + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); + cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; + cntl |= pipe << 28; /* Connect to correct pipe */ + } else { + cntl &= ~(CURSOR_FORMAT_MASK); + cntl |= CURSOR_ENABLE; + cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; + } + } else { + if (IS_MOBILE(dev) || IS_I9XX(dev)) { + cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); + cntl |= CURSOR_MODE_DISABLE; + } else { + cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); + } + } + I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl); + + intel_crtc->cursor_visble = visible; + } + /* and commit changes on next vblank */ + I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base); + + if (visible) + intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); +} + static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, @@ -3949,11 +4305,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_gem_object *bo; struct drm_i915_gem_object *obj_priv; - int pipe = intel_crtc->pipe; - uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; - uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; - uint32_t temp = I915_READ(control); - size_t addr; + uint32_t addr; int ret; DRM_DEBUG_KMS("\n"); @@ -3961,12 +4313,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, /* if we want to turn off the cursor ignore width and height */ if (!handle) { DRM_DEBUG_KMS("cursor off\n"); - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); - temp |= CURSOR_MODE_DISABLE; - } else { - temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); - } addr = 0; bo = NULL; mutex_lock(&dev->struct_mutex); @@ -4008,7 +4354,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = obj_priv->gtt_offset; } else { - ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); + ret = i915_gem_attach_phys_object(dev, bo, + (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); if (ret) { DRM_ERROR("failed to attach phys object\n"); goto fail_locked; @@ -4019,21 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, if (!IS_I9XX(dev)) I915_WRITE(CURSIZE, (height << 12) | width); - /* Hooray for CUR*CNTR differences */ - if (IS_MOBILE(dev) || IS_I9XX(dev)) { - temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); - temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; - temp |= (pipe << 28); /* Connect to correct pipe */ - } else { - temp &= ~(CURSOR_FORMAT_MASK); - temp |= CURSOR_ENABLE; - temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE; - } - finish: - I915_WRITE(control, temp); - I915_WRITE(base, addr); - if (intel_crtc->cursor_bo) { if (dev_priv->info->cursor_needs_physical) { if (intel_crtc->cursor_bo != bo) @@ -4047,6 +4380,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = bo; + intel_crtc->cursor_width = width; + intel_crtc->cursor_height = height; + + intel_crtc_update_cursor(crtc); return 0; fail_unpin: @@ -4060,34 +4397,12 @@ fail: static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_framebuffer *intel_fb; - int pipe = intel_crtc->pipe; - uint32_t temp = 0; - uint32_t adder; - - if (crtc->fb) { - intel_fb = to_intel_framebuffer(crtc->fb); - intel_mark_busy(dev, intel_fb->obj); - } - - if (x < 0) { - temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; - x = -x; - } - if (y < 0) { - temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; - y = -y; - } - temp |= x << CURSOR_X_SHIFT; - temp |= y << CURSOR_Y_SHIFT; + intel_crtc->cursor_x = x; + intel_crtc->cursor_y = y; - adder = intel_crtc->cursor_addr; - I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); - I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder); + intel_crtc_update_cursor(crtc); return 0; } @@ -4671,6 +4986,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev, atomic_dec_and_test(&obj_priv->pending_flip)) DRM_WAKEUP(&dev_priv->pending_flip_queue); schedule_work(&work->work); + + trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); } void intel_finish_page_flip(struct drm_device *dev, int pipe) @@ -4748,27 +5065,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, mutex_lock(&dev->struct_mutex); ret = intel_pin_and_fence_fb_obj(dev, obj); - if (ret != 0) { - mutex_unlock(&dev->struct_mutex); - - spin_lock_irqsave(&dev->event_lock, flags); - intel_crtc->unpin_work = NULL; - spin_unlock_irqrestore(&dev->event_lock, flags); - - kfree(work); - - DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n", - to_intel_bo(obj)); - return ret; - } + if (ret) + goto cleanup_work; /* Reference the objects for the scheduled work. */ drm_gem_object_reference(work->old_fb_obj); drm_gem_object_reference(obj); crtc->fb = fb; - i915_gem_object_flush_write_domain(obj); - drm_vblank_get(dev, intel_crtc->pipe); + ret = i915_gem_object_flush_write_domain(obj); + if (ret) + goto cleanup_objs; + + ret = drm_vblank_get(dev, intel_crtc->pipe); + if (ret) + goto cleanup_objs; + obj_priv = to_intel_bo(obj); atomic_inc(&obj_priv->pending_flip); work->pending_flip_obj = obj; @@ -4806,7 +5118,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, mutex_unlock(&dev->struct_mutex); + trace_i915_flip_request(intel_crtc->plane, obj); + return 0; + +cleanup_objs: + drm_gem_object_unreference(work->old_fb_obj); + drm_gem_object_unreference(obj); +cleanup_work: + mutex_unlock(&dev->struct_mutex); + + spin_lock_irqsave(&dev->event_lock, flags); + intel_crtc->unpin_work = NULL; + spin_unlock_irqrestore(&dev->event_lock, flags); + + kfree(work); + + return ret; } static const struct drm_crtc_helper_funcs intel_helper_funcs = { @@ -4814,6 +5142,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = { .mode_fixup = intel_crtc_mode_fixup, .mode_set = intel_crtc_mode_set, .mode_set_base = intel_pipe_set_base, + .mode_set_base_atomic = intel_pipe_set_base_atomic, .prepare = intel_crtc_prepare, .commit = intel_crtc_commit, .load_lut = intel_crtc_load_lut, @@ -4932,19 +5261,26 @@ static void intel_setup_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_encoder *encoder; + bool dpd_is_edp = false; - intel_crt_init(dev); - - /* Set up integrated LVDS */ if (IS_MOBILE(dev) && !IS_I830(dev)) intel_lvds_init(dev); if (HAS_PCH_SPLIT(dev)) { - int found; + dpd_is_edp = intel_dpd_is_edp(dev); if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) intel_dp_init(dev, DP_A); + if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) + intel_dp_init(dev, PCH_DP_D); + } + + intel_crt_init(dev); + + if (HAS_PCH_SPLIT(dev)) { + int found; + if (I915_READ(HDMIB) & PORT_DETECTED) { /* PCH SDVOB multiplex with HDMIB */ found = intel_sdvo_init(dev, PCH_SDVOB); @@ -4963,7 +5299,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (I915_READ(PCH_DP_C) & DP_DETECTED) intel_dp_init(dev, PCH_DP_C); - if (I915_READ(PCH_DP_D) & DP_DETECTED) + if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) intel_dp_init(dev, PCH_DP_D); } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { @@ -5372,6 +5708,26 @@ void intel_init_clock_gating(struct drm_device *dev) (I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS)); } + /* + * Based on the document from hardware guys the following bits + * should be set unconditionally in order to enable FBC. + * The bit 22 of 0x42000 + * The bit 22 of 0x42004 + * The bit 7,8,9 of 0x42020. + */ + if (IS_IRONLAKE_M(dev)) { + I915_WRITE(ILK_DISPLAY_CHICKEN1, + I915_READ(ILK_DISPLAY_CHICKEN1) | + ILK_FBCQ_DIS); + I915_WRITE(ILK_DISPLAY_CHICKEN2, + I915_READ(ILK_DISPLAY_CHICKEN2) | + ILK_DPARB_GATE); + I915_WRITE(ILK_DSPCLK_GATE, + I915_READ(ILK_DSPCLK_GATE) | + ILK_DPFC_DIS1 | + ILK_DPFC_DIS2 | + ILK_CLK_FBC); + } return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; @@ -5450,7 +5806,11 @@ static void intel_init_display(struct drm_device *dev) dev_priv->display.dpms = i9xx_crtc_dpms; if (I915_HAS_FBC(dev)) { - if (IS_GM45(dev)) { + if (IS_IRONLAKE_M(dev)) { + dev_priv->display.fbc_enabled = ironlake_fbc_enabled; + dev_priv->display.enable_fbc = ironlake_enable_fbc; + dev_priv->display.disable_fbc = ironlake_disable_fbc; + } else if (IS_GM45(dev)) { dev_priv->display.fbc_enabled = g4x_fbc_enabled; dev_priv->display.enable_fbc = g4x_enable_fbc; dev_priv->display.disable_fbc = g4x_disable_fbc; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 5dde80f9e65..40be1fa65be 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -43,6 +43,7 @@ #define DP_LINK_CONFIGURATION_SIZE 9 #define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP) +#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp) struct intel_dp_priv { uint32_t output_reg; @@ -56,6 +57,7 @@ struct intel_dp_priv { struct intel_encoder *intel_encoder; struct i2c_adapter adapter; struct i2c_algo_dp_aux_data algo; + bool is_pch_edp; }; static void @@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev, struct intel_encoder *intel_encoder, int pixel_clock) { struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; - if (IS_eDP(intel_encoder)) + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) return (pixel_clock * dev_priv->edp_bpp) / 8; else return pixel_clock * 3; @@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector, { struct drm_encoder *encoder = intel_attached_encoder(connector); struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_device *dev = connector->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder)); int max_lanes = intel_dp_max_lane_count(intel_encoder); + if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + dev_priv->panel_fixed_mode) { + if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) + return MODE_PANEL; + + if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay) + return MODE_PANEL; + } + /* only refuse the mode on non eDP since we have seen some wierd eDP panels which are outside spec tolerances but somehow work by magic */ if (!IS_eDP(intel_encoder) && @@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, { struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; + struct drm_device *dev = encoder->dev; + struct drm_i915_private *dev_priv = dev->dev_private; int lane_count, clock; int max_lane_count = intel_dp_max_lane_count(intel_encoder); int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; + if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + dev_priv->panel_fixed_mode) { + struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; + + adjusted_mode->hdisplay = fixed_mode->hdisplay; + adjusted_mode->hsync_start = fixed_mode->hsync_start; + adjusted_mode->hsync_end = fixed_mode->hsync_end; + adjusted_mode->htotal = fixed_mode->htotal; + + adjusted_mode->vdisplay = fixed_mode->vdisplay; + adjusted_mode->vsync_start = fixed_mode->vsync_start; + adjusted_mode->vsync_end = fixed_mode->vsync_end; + adjusted_mode->vtotal = fixed_mode->vtotal; + + adjusted_mode->clock = fixed_mode->clock; + drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); + + /* + * the mode->clock is used to calculate the Data&Link M/N + * of the pipe. For the eDP the fixed clock should be used. + */ + mode->clock = dev_priv->panel_fixed_mode->clock; + } + for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { for (clock = 0; clock <= max_clock; clock++) { int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); @@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, } } - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { /* okay we failed just pick the highest */ dp_priv->lane_count = max_lane_count; dp_priv->link_bw = bws[max_clock]; @@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den) } static void -intel_dp_compute_m_n(int bytes_per_pixel, +intel_dp_compute_m_n(int bpp, int nlanes, int pixel_clock, int link_clock, struct intel_dp_m_n *m_n) { m_n->tu = 64; - m_n->gmch_m = pixel_clock * bytes_per_pixel; + m_n->gmch_m = (pixel_clock * bpp) >> 3; m_n->gmch_n = link_clock * nlanes; intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); m_n->link_m = pixel_clock; @@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel, intel_reduce_ratio(&m_n->link_m, &m_n->link_n); } +bool intel_pch_has_edp(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_mode_config *mode_config = &dev->mode_config; + struct drm_encoder *encoder; + + list_for_each_entry(encoder, &mode_config->encoder_list, head) { + struct intel_encoder *intel_encoder; + struct intel_dp_priv *dp_priv; + + if (!encoder || encoder->crtc != crtc) + continue; + + intel_encoder = enc_to_intel_encoder(encoder); + dp_priv = intel_encoder->dev_priv; + + if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) + return dp_priv->is_pch_edp; + } + return false; +} + void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_encoder *encoder; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int lane_count = 4; + int lane_count = 4, bpp = 24; struct intel_dp_m_n m_n; /* @@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) { lane_count = dp_priv->lane_count; + if (IS_PCH_eDP(dp_priv)) + bpp = dev_priv->edp_bpp; break; } } @@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, * the number of bytes_per_pixel post-LUT, which we always * set up for 8-bits of R/G/B, or 3 bytes total. */ - intel_dp_compute_m_n(3, lane_count, + intel_dp_compute_m_n(bpp, lane_count, mode->clock, adjusted_mode->clock, &m_n); if (HAS_PCH_SPLIT(dev)) { @@ -796,7 +861,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) if (mode != DRM_MODE_DPMS_ON) { if (dp_reg & DP_PORT_EN) { intel_dp_link_down(intel_encoder, dp_priv->DP); - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { ironlake_edp_backlight_off(dev); ironlake_edp_panel_off(dev); } @@ -804,7 +869,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) } else { if (!(dp_reg & DP_PORT_EN)) { intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration); - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { ironlake_edp_panel_on(dev); ironlake_edp_backlight_on(dev); } @@ -1340,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector) struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct drm_device *dev = intel_encoder->enc.dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_dp_priv *dp_priv = intel_encoder->dev_priv; int ret; /* We should parse the EDID data and find out if it has an audio sink */ ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus); - if (ret) + if (ret) { + if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) && + !dev_priv->panel_fixed_mode) { + struct drm_display_mode *newmode; + list_for_each_entry(newmode, &connector->probed_modes, + head) { + if (newmode->type & DRM_MODE_TYPE_PREFERRED) { + dev_priv->panel_fixed_mode = + drm_mode_duplicate(dev, newmode); + break; + } + } + } + return ret; + } /* if eDP has no EDID, try to use fixed panel mode from VBT */ - if (IS_eDP(intel_encoder)) { + if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) { if (dev_priv->panel_fixed_mode != NULL) { struct drm_display_mode *mode; mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); @@ -1435,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc) return -1; } +/* check the VBT to see whether the eDP is on DP-D port */ +bool intel_dpd_is_edp(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct child_device_config *p_child; + int i; + + if (!dev_priv->child_dev_num) + return false; + + for (i = 0; i < dev_priv->child_dev_num; i++) { + p_child = dev_priv->child_dev + i; + + if (p_child->dvo_port == PORT_IDPD && + p_child->device_type == DEVICE_TYPE_eDP) + return true; + } + return false; +} + void intel_dp_init(struct drm_device *dev, int output_reg) { @@ -1444,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) struct intel_connector *intel_connector; struct intel_dp_priv *dp_priv; const char *name = NULL; + int type; intel_encoder = kcalloc(sizeof(struct intel_encoder) + sizeof(struct intel_dp_priv), 1, GFP_KERNEL); @@ -1458,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg) dp_priv = (struct intel_dp_priv *)(intel_encoder + 1); + if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D)) + if (intel_dpd_is_edp(dev)) + dp_priv->is_pch_edp = true; + + if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { + type = DRM_MODE_CONNECTOR_eDP; + intel_encoder->type = INTEL_OUTPUT_EDP; + } else { + type = DRM_MODE_CONNECTOR_DisplayPort; + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + } + connector = &intel_connector->base; - drm_connector_init(dev, connector, &intel_dp_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); + drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); connector->polled = DRM_CONNECTOR_POLL_HPD; - if (output_reg == DP_A) - intel_encoder->type = INTEL_OUTPUT_EDP; - else - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; - if (output_reg == DP_B || output_reg == PCH_DP_B) intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); else if (output_reg == DP_C || output_reg == PCH_DP_C) @@ -1528,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) intel_encoder->ddc_bus = &dp_priv->adapter; intel_encoder->hot_plug = intel_dp_hot_plug; - if (output_reg == DP_A) { + if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) { /* initialize panel mode from VBT if available for eDP */ if (dev_priv->lfp_lvds_vbt_mode) { dev_priv->panel_fixed_mode = diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 2f7970be905..b2190148703 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -143,8 +143,6 @@ struct intel_crtc { struct drm_crtc base; enum pipe pipe; enum plane plane; - struct drm_gem_object *cursor_bo; - uint32_t cursor_addr; u8 lut_r[256], lut_g[256], lut_b[256]; int dpms_mode; bool busy; /* is scanout buffer being updated frequently? */ @@ -153,6 +151,12 @@ struct intel_crtc { struct intel_overlay *overlay; struct intel_unpin_work *unpin_work; int fdi_lanes; + + struct drm_gem_object *cursor_bo; + uint32_t cursor_addr; + int16_t cursor_x, cursor_y; + int16_t cursor_width, cursor_height; + bool cursor_visble; }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) @@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); void intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); +extern bool intel_pch_has_edp(struct drm_crtc *crtc); +extern bool intel_dpd_is_edp(struct drm_device *dev); extern void intel_edp_link_config (struct intel_encoder *, int *, int *); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 3e18c9e7729..54acd8b534d 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -61,6 +61,8 @@ static struct fb_ops intelfb_ops = { .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, }; static int intelfb_create(struct intel_fbdev *ifbdev, diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 83bd764b000..197887ed182 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv; u32 sdvox; - sdvox = SDVO_ENCODING_HDMI | - SDVO_BORDER_ENABLE | - SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH; + sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + sdvox |= SDVO_VSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + sdvox |= SDVO_HSYNC_ACTIVE_HIGH; if (hdmi_priv->has_hdmi_sink) { sdvox |= SDVO_AUDIO_ENABLE; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0eab8df5bf7..0a2e60059fb 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -156,31 +156,73 @@ static int intel_lvds_mode_valid(struct drm_connector *connector, return MODE_OK; } +static void +centre_horizontally(struct drm_display_mode *mode, + int width) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the hsync and hblank widths constant */ + sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; + blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (mode->hdisplay - width + 1) / 2; + border += border & 1; /* make the border even */ + + mode->crtc_hdisplay = width; + mode->crtc_hblank_start = width + border; + mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; + + mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; + mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; +} + +static void +centre_vertically(struct drm_display_mode *mode, + int height) +{ + u32 border, sync_pos, blank_width, sync_width; + + /* keep the vsync and vblank widths constant */ + sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; + blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; + sync_pos = (blank_width - sync_width + 1) / 2; + + border = (mode->vdisplay - height + 1) / 2; + + mode->crtc_vdisplay = height; + mode->crtc_vblank_start = height + border; + mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; + + mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; + mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; +} + +static inline u32 panel_fitter_scaling(u32 source, u32 target) +{ + /* + * Floating point operation is not supported. So the FACTOR + * is defined, which can avoid the floating point computation + * when calculating the panel ratio. + */ +#define ACCURACY 12 +#define FACTOR (1 << ACCURACY) + u32 ratio = source * FACTOR / target; + return (FACTOR * ratio + FACTOR/2) / FACTOR; +} + static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - /* - * float point operation is not supported . So the PANEL_RATIO_FACTOR - * is defined, which can avoid the float point computation when - * calculating the panel ratio. - */ -#define PANEL_RATIO_FACTOR 8192 struct drm_device *dev = encoder->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); struct drm_encoder *tmp_encoder; struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder); struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv; - u32 pfit_control = 0, pfit_pgm_ratios = 0; - int left_border = 0, right_border = 0, top_border = 0; - int bottom_border = 0; - bool border = 0; - int panel_ratio, desired_ratio, vert_scale, horiz_scale; - int horiz_ratio, vert_ratio; - u32 hsync_width, vsync_width; - u32 hblank_width, vblank_width; - u32 hsync_pos, vsync_pos; + u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; /* Should never happen!! */ if (!IS_I965G(dev) && intel_crtc->pipe == 0) { @@ -200,27 +242,25 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, if (dev_priv->panel_fixed_mode == NULL) return true; /* - * If we have timings from the BIOS for the panel, put them in + * We have timings from the BIOS for the panel, put them in * to the adjusted mode. The CRTC will be set up for this mode, * with the panel scaling set up to source from the H/VDisplay * of the original mode. */ - if (dev_priv->panel_fixed_mode != NULL) { - adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; - adjusted_mode->hsync_start = - dev_priv->panel_fixed_mode->hsync_start; - adjusted_mode->hsync_end = - dev_priv->panel_fixed_mode->hsync_end; - adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; - adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; - adjusted_mode->vsync_start = - dev_priv->panel_fixed_mode->vsync_start; - adjusted_mode->vsync_end = - dev_priv->panel_fixed_mode->vsync_end; - adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; - adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; - drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); - } + adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay; + adjusted_mode->hsync_start = + dev_priv->panel_fixed_mode->hsync_start; + adjusted_mode->hsync_end = + dev_priv->panel_fixed_mode->hsync_end; + adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal; + adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay; + adjusted_mode->vsync_start = + dev_priv->panel_fixed_mode->vsync_start; + adjusted_mode->vsync_end = + dev_priv->panel_fixed_mode->vsync_end; + adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal; + adjusted_mode->clock = dev_priv->panel_fixed_mode->clock; + drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V); /* Make sure pre-965s set dither correctly */ if (!IS_I965G(dev)) { @@ -230,11 +270,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, /* Native modes don't need fitting */ if (adjusted_mode->hdisplay == mode->hdisplay && - adjusted_mode->vdisplay == mode->vdisplay) { - pfit_pgm_ratios = 0; - border = 0; + adjusted_mode->vdisplay == mode->vdisplay) goto out; - } /* full screen scale for now */ if (HAS_PCH_SPLIT(dev)) @@ -242,25 +279,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, /* 965+ wants fuzzy fitting */ if (IS_I965G(dev)) - pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) | - PFIT_FILTER_FUZZY; - - hsync_width = adjusted_mode->crtc_hsync_end - - adjusted_mode->crtc_hsync_start; - vsync_width = adjusted_mode->crtc_vsync_end - - adjusted_mode->crtc_vsync_start; - hblank_width = adjusted_mode->crtc_hblank_end - - adjusted_mode->crtc_hblank_start; - vblank_width = adjusted_mode->crtc_vblank_end - - adjusted_mode->crtc_vblank_start; - /* - * Deal with panel fitting options. Figure out how to stretch the - * image based on its aspect ratio & the current panel fitting mode. - */ - panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR / - adjusted_mode->vdisplay; - desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR / - mode->vdisplay; + pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | + PFIT_FILTER_FUZZY); + /* * Enable automatic panel scaling for non-native modes so that they fill * the screen. Should be enabled before the pipe is enabled, according @@ -278,170 +299,63 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * For centered modes, we have to calculate border widths & * heights and modify the values programmed into the CRTC. */ - left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2; - right_border = left_border; - if (mode->hdisplay & 1) - right_border++; - top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2; - bottom_border = top_border; - if (mode->vdisplay & 1) - bottom_border++; - /* Set active & border values */ - adjusted_mode->crtc_hdisplay = mode->hdisplay; - /* Keep the boder be even */ - if (right_border & 1) - right_border++; - /* use the border directly instead of border minuse one */ - adjusted_mode->crtc_hblank_start = mode->hdisplay + - right_border; - /* keep the blank width constant */ - adjusted_mode->crtc_hblank_end = - adjusted_mode->crtc_hblank_start + hblank_width; - /* get the hsync pos relative to hblank start */ - hsync_pos = (hblank_width - hsync_width) / 2; - /* keep the hsync pos be even */ - if (hsync_pos & 1) - hsync_pos++; - adjusted_mode->crtc_hsync_start = - adjusted_mode->crtc_hblank_start + hsync_pos; - /* keep the hsync width constant */ - adjusted_mode->crtc_hsync_end = - adjusted_mode->crtc_hsync_start + hsync_width; - adjusted_mode->crtc_vdisplay = mode->vdisplay; - /* use the border instead of border minus one */ - adjusted_mode->crtc_vblank_start = mode->vdisplay + - bottom_border; - /* keep the vblank width constant */ - adjusted_mode->crtc_vblank_end = - adjusted_mode->crtc_vblank_start + vblank_width; - /* get the vsync start postion relative to vblank start */ - vsync_pos = (vblank_width - vsync_width) / 2; - adjusted_mode->crtc_vsync_start = - adjusted_mode->crtc_vblank_start + vsync_pos; - /* keep the vsync width constant */ - adjusted_mode->crtc_vsync_end = - adjusted_mode->crtc_vsync_start + vsync_width; - border = 1; + centre_horizontally(adjusted_mode, mode->hdisplay); + centre_vertically(adjusted_mode, mode->vdisplay); + border = LVDS_BORDER_ENABLE; break; + case DRM_MODE_SCALE_ASPECT: - /* Scale but preserve the spect ratio */ - pfit_control |= PFIT_ENABLE; + /* Scale but preserve the aspect ratio */ if (IS_I965G(dev)) { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; + + pfit_control |= PFIT_ENABLE; /* 965+ is easy, it does everything in hw */ - if (panel_ratio > desired_ratio) + if (scaled_width > scaled_height) pfit_control |= PFIT_SCALING_PILLAR; - else if (panel_ratio < desired_ratio) + else if (scaled_width < scaled_height) pfit_control |= PFIT_SCALING_LETTER; else pfit_control |= PFIT_SCALING_AUTO; } else { + u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; + u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; /* * For earlier chips we have to calculate the scaling * ratio by hand and program it into the * PFIT_PGM_RATIO register */ - u32 horiz_bits, vert_bits, bits = 12; - horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/ - adjusted_mode->hdisplay; - vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/ - adjusted_mode->vdisplay; - horiz_scale = adjusted_mode->hdisplay * - PANEL_RATIO_FACTOR / mode->hdisplay; - vert_scale = adjusted_mode->vdisplay * - PANEL_RATIO_FACTOR / mode->vdisplay; - - /* retain aspect ratio */ - if (panel_ratio > desired_ratio) { /* Pillar */ - u32 scaled_width; - scaled_width = mode->hdisplay * vert_scale / - PANEL_RATIO_FACTOR; - horiz_ratio = vert_ratio; - pfit_control |= (VERT_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); - /* Pillar will have left/right borders */ - left_border = (adjusted_mode->hdisplay - - scaled_width) / 2; - right_border = left_border; - if (mode->hdisplay & 1) /* odd resolutions */ - right_border++; - /* keep the border be even */ - if (right_border & 1) - right_border++; - adjusted_mode->crtc_hdisplay = scaled_width; - /* use border instead of border minus one */ - adjusted_mode->crtc_hblank_start = - scaled_width + right_border; - /* keep the hblank width constant */ - adjusted_mode->crtc_hblank_end = - adjusted_mode->crtc_hblank_start + - hblank_width; - /* - * get the hsync start pos relative to - * hblank start - */ - hsync_pos = (hblank_width - hsync_width) / 2; - /* keep the hsync_pos be even */ - if (hsync_pos & 1) - hsync_pos++; - adjusted_mode->crtc_hsync_start = - adjusted_mode->crtc_hblank_start + - hsync_pos; - /* keept hsync width constant */ - adjusted_mode->crtc_hsync_end = - adjusted_mode->crtc_hsync_start + - hsync_width; - border = 1; - } else if (panel_ratio < desired_ratio) { /* letter */ - u32 scaled_height = mode->vdisplay * - horiz_scale / PANEL_RATIO_FACTOR; - vert_ratio = horiz_ratio; - pfit_control |= (HORIZ_AUTO_SCALE | - VERT_INTERP_BILINEAR | - HORIZ_INTERP_BILINEAR); - /* Letterbox will have top/bottom border */ - top_border = (adjusted_mode->vdisplay - - scaled_height) / 2; - bottom_border = top_border; - if (mode->vdisplay & 1) - bottom_border++; - adjusted_mode->crtc_vdisplay = scaled_height; - /* use border instead of border minus one */ - adjusted_mode->crtc_vblank_start = - scaled_height + bottom_border; - /* keep the vblank width constant */ - adjusted_mode->crtc_vblank_end = - adjusted_mode->crtc_vblank_start + - vblank_width; - /* - * get the vsync start pos relative to - * vblank start - */ - vsync_pos = (vblank_width - vsync_width) / 2; - adjusted_mode->crtc_vsync_start = - adjusted_mode->crtc_vblank_start + - vsync_pos; - /* keep the vsync width constant */ - adjusted_mode->crtc_vsync_end = - adjusted_mode->crtc_vsync_start + - vsync_width; - border = 1; - } else { - /* Aspects match, Let hw scale both directions */ - pfit_control |= (VERT_AUTO_SCALE | - HORIZ_AUTO_SCALE | + if (scaled_width > scaled_height) { /* pillar */ + centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay); + + border = LVDS_BORDER_ENABLE; + if (mode->vdisplay != adjusted_mode->vdisplay) { + u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay); + pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | + bits << PFIT_VERT_SCALE_SHIFT); + pfit_control |= (PFIT_ENABLE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + } else if (scaled_width < scaled_height) { /* letter */ + centre_vertically(adjusted_mode, scaled_width / mode->hdisplay); + + border = LVDS_BORDER_ENABLE; + if (mode->hdisplay != adjusted_mode->hdisplay) { + u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay); + pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | + bits << PFIT_VERT_SCALE_SHIFT); + pfit_control |= (PFIT_ENABLE | + VERT_INTERP_BILINEAR | + HORIZ_INTERP_BILINEAR); + } + } else + /* Aspects match, Let hw scale both directions */ + pfit_control |= (PFIT_ENABLE | + VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); - } - horiz_bits = (1 << bits) * horiz_ratio / - PANEL_RATIO_FACTOR; - vert_bits = (1 << bits) * vert_ratio / - PANEL_RATIO_FACTOR; - pfit_pgm_ratios = - ((vert_bits << PFIT_VERT_SCALE_SHIFT) & - PFIT_VERT_SCALE_MASK) | - ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) & - PFIT_HORIZ_SCALE_MASK); } break; @@ -458,6 +372,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, VERT_INTERP_BILINEAR | HORIZ_INTERP_BILINEAR); break; + default: break; } @@ -465,14 +380,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, out: lvds_priv->pfit_control = pfit_control; lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios; - /* - * When there exists the border, it means that the LVDS_BORDR - * should be enabled. - */ - if (border) - dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE; - else - dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE); + dev_priv->lvds_border_bits = border; + /* * XXX: It would be nice to support lower refresh rates on the * panels to reduce power consumption, and perhaps match the diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index d7ad5139d17..d39aea24eab 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -65,7 +65,7 @@ #define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */ #define OCMD_TVSYNCFLIP_PARITY (0x1<<9) #define OCMD_TVSYNCFLIP_ENABLE (0x1<<7) -#define OCMD_BUF_TYPE_MASK (Ox1<<5) +#define OCMD_BUF_TYPE_MASK (0x1<<5) #define OCMD_BUF_TYPE_FRAME (0x0<<5) #define OCMD_BUF_TYPE_FIELD (0x1<<5) #define OCMD_TEST_MODE (0x1<<4) @@ -185,7 +185,8 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over if (OVERLAY_NONPHYSICAL(overlay->dev)) { regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, - overlay->reg_bo->gtt_offset); + overlay->reg_bo->gtt_offset, + KM_USER0); if (!regs) { DRM_ERROR("failed to map overlay regs in GTT\n"); @@ -200,7 +201,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) { if (OVERLAY_NONPHYSICAL(overlay->dev)) - io_mapping_unmap_atomic(overlay->virt_addr); + io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0); overlay->virt_addr = NULL; @@ -958,7 +959,7 @@ static int check_overlay_src(struct drm_device *dev, || rec->src_width < N_HORIZ_Y_TAPS*4) return -EINVAL; - /* check alingment constrains */ + /* check alignment constraints */ switch (rec->flags & I915_OVERLAY_TYPE_MASK) { case I915_OVERLAY_RGB: /* not implemented */ @@ -990,7 +991,10 @@ static int check_overlay_src(struct drm_device *dev, return -EINVAL; /* stride checking */ - stride_mask = 63; + if (IS_I830(dev) || IS_845G(dev)) + stride_mask = 255; + else + stride_mask = 63; if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) return -EINVAL; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 03c231be227..d9d4d51aa89 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1237,9 +1237,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, /* Set the SDVO control regs. */ if (IS_I965G(dev)) { - sdvox |= SDVO_BORDER_ENABLE | - SDVO_VSYNC_ACTIVE_HIGH | - SDVO_HSYNC_ACTIVE_HIGH; + sdvox |= SDVO_BORDER_ENABLE; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + sdvox |= SDVO_VSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + sdvox |= SDVO_HSYNC_ACTIVE_HIGH; } else { sdvox |= I915_READ(sdvo_priv->sdvo_reg); switch (sdvo_priv->sdvo_reg) { diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index d2d4e4045ca..cc3726a4a1c 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -476,7 +476,7 @@ static const struct tv_mode tv_modes[] = { .vi_end_f1 = 20, .vi_end_f2 = 21, .nbr_end = 240, - .burst_ena = 8, + .burst_ena = true, .hburst_start = 72, .hburst_len = 34, .vburst_start_f1 = 9, .vburst_end_f1 = 240, .vburst_start_f2 = 10, .vburst_end_f2 = 240, @@ -896,8 +896,6 @@ static const struct tv_mode tv_modes[] = { }, }; -#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0]) - static void intel_tv_dpms(struct drm_encoder *encoder, int mode) { @@ -1512,7 +1510,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop tv_priv->margin[TV_MARGIN_BOTTOM] = val; changed = true; } else if (property == dev->mode_config.tv_mode_property) { - if (val >= NUM_TV_MODES) { + if (val >= ARRAY_SIZE(tv_modes)) { ret = -EINVAL; goto out; } @@ -1693,13 +1691,13 @@ intel_tv_init(struct drm_device *dev) connector->doublescan_allowed = false; /* Create TV properties then attach current values */ - tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES, + tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes), GFP_KERNEL); if (!tv_format_names) goto out; - for (i = 0; i < NUM_TV_MODES; i++) + for (i = 0; i < ARRAY_SIZE(tv_modes); i++) tv_format_names[i] = tv_modes[i].name; - drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names); + drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names); drm_connector_attach_property(connector, dev->mode_config.tv_mode_property, initial_mode); diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 3c917fb3a60..08868ac3048 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c @@ -52,7 +52,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup); * Engine control */ -int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) +int mga_do_wait_for_idle(drm_mga_private_t *dev_priv) { u32 status = 0; int i; @@ -74,7 +74,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv) return -EBUSY; } -static int mga_do_dma_reset(drm_mga_private_t * dev_priv) +static int mga_do_dma_reset(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_primary_buffer_t *primary = &dev_priv->prim; @@ -102,7 +102,7 @@ static int mga_do_dma_reset(drm_mga_private_t * dev_priv) * Primary DMA stream */ -void mga_do_dma_flush(drm_mga_private_t * dev_priv) +void mga_do_dma_flush(drm_mga_private_t *dev_priv) { drm_mga_primary_buffer_t *primary = &dev_priv->prim; u32 head, tail; @@ -142,11 +142,10 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv) head = MGA_READ(MGA_PRIMADDRESS); - if (head <= tail) { + if (head <= tail) primary->space = primary->size - primary->tail; - } else { + else primary->space = head - tail; - } DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset)); @@ -158,7 +157,7 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv) DRM_DEBUG("done.\n"); } -void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) +void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv) { drm_mga_primary_buffer_t *primary = &dev_priv->prim; u32 head, tail; @@ -181,11 +180,10 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) head = MGA_READ(MGA_PRIMADDRESS); - if (head == dev_priv->primary->offset) { + if (head == dev_priv->primary->offset) primary->space = primary->size; - } else { + else primary->space = head - dev_priv->primary->offset; - } DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset)); DRM_DEBUG(" tail = 0x%06x\n", primary->tail); @@ -199,7 +197,7 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv) DRM_DEBUG("done.\n"); } -void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) +void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv) { drm_mga_primary_buffer_t *primary = &dev_priv->prim; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -220,11 +218,11 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv) * Freelist management */ -#define MGA_BUFFER_USED ~0 +#define MGA_BUFFER_USED (~0) #define MGA_BUFFER_FREE 0 #if MGA_FREELIST_DEBUG -static void mga_freelist_print(struct drm_device * dev) +static void mga_freelist_print(struct drm_device *dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; @@ -245,7 +243,7 @@ static void mga_freelist_print(struct drm_device * dev) } #endif -static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv) +static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv) { struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; @@ -288,7 +286,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr return 0; } -static void mga_freelist_cleanup(struct drm_device * dev) +static void mga_freelist_cleanup(struct drm_device *dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_freelist_t *entry; @@ -308,7 +306,7 @@ static void mga_freelist_cleanup(struct drm_device * dev) #if 0 /* FIXME: Still needed? */ -static void mga_freelist_reset(struct drm_device * dev) +static void mga_freelist_reset(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; @@ -356,7 +354,7 @@ static struct drm_buf *mga_freelist_get(struct drm_device * dev) return NULL; } -int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) +int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -391,7 +389,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf) * DMA initialization, cleanup */ -int mga_driver_load(struct drm_device * dev, unsigned long flags) +int mga_driver_load(struct drm_device *dev, unsigned long flags) { drm_mga_private_t *dev_priv; int ret; @@ -405,8 +403,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags) dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT; dev_priv->chipset = flags; - dev_priv->mmio_base = drm_get_resource_start(dev, 1); - dev_priv->mmio_size = drm_get_resource_len(dev, 1); + dev_priv->mmio_base = pci_resource_start(dev->pdev, 1); + dev_priv->mmio_size = pci_resource_len(dev->pdev, 1); dev->counters += 3; dev->types[6] = _DRM_STAT_IRQ; @@ -439,8 +437,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags) * * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap */ -static int mga_do_agp_dma_bootstrap(struct drm_device * dev, - drm_mga_dma_bootstrap_t * dma_bs) +static int mga_do_agp_dma_bootstrap(struct drm_device *dev, + drm_mga_dma_bootstrap_t *dma_bs) { drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -481,11 +479,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev, */ if (dev_priv->chipset == MGA_CARD_TYPE_G200) { - if (mode.mode & 0x02) { + if (mode.mode & 0x02) MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE); - } else { + else MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE); - } } /* Allocate and bind AGP memory. */ @@ -593,8 +590,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev, return 0; } #else -static int mga_do_agp_dma_bootstrap(struct drm_device * dev, - drm_mga_dma_bootstrap_t * dma_bs) +static int mga_do_agp_dma_bootstrap(struct drm_device *dev, + drm_mga_dma_bootstrap_t *dma_bs) { return -EINVAL; } @@ -614,8 +611,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev, * * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap */ -static int mga_do_pci_dma_bootstrap(struct drm_device * dev, - drm_mga_dma_bootstrap_t * dma_bs) +static int mga_do_pci_dma_bootstrap(struct drm_device *dev, + drm_mga_dma_bootstrap_t *dma_bs) { drm_mga_private_t *const dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -678,9 +675,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, req.size = dma_bs->secondary_bin_size; err = drm_addbufs_pci(dev, &req); - if (!err) { + if (!err) break; - } } if (bin_count == 0) { @@ -704,8 +700,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev, return 0; } -static int mga_do_dma_bootstrap(struct drm_device * dev, - drm_mga_dma_bootstrap_t * dma_bs) +static int mga_do_dma_bootstrap(struct drm_device *dev, + drm_mga_dma_bootstrap_t *dma_bs) { const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev); int err; @@ -737,17 +733,15 @@ static int mga_do_dma_bootstrap(struct drm_device * dev, * carve off portions of it for internal uses. The remaining memory * is returned to user-mode to be used for AGP textures. */ - if (is_agp) { + if (is_agp) err = mga_do_agp_dma_bootstrap(dev, dma_bs); - } /* If we attempted to initialize the card for AGP DMA but failed, * clean-up any mess that may have been created. */ - if (err) { + if (err) mga_do_cleanup_dma(dev, MINIMAL_CLEANUP); - } /* Not only do we want to try and initialized PCI cards for PCI DMA, * but we also try to initialized AGP cards that could not be @@ -757,9 +751,8 @@ static int mga_do_dma_bootstrap(struct drm_device * dev, * AGP memory, etc. */ - if (!is_agp || err) { + if (!is_agp || err) err = mga_do_pci_dma_bootstrap(dev, dma_bs); - } return err; } @@ -792,7 +785,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data, return err; } -static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) +static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init) { drm_mga_private_t *dev_priv; int ret; @@ -800,11 +793,10 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init) dev_priv = dev->dev_private; - if (init->sgram) { + if (init->sgram) dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK; - } else { + else dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR; - } dev_priv->maccess = init->maccess; dev_priv->fb_cpp = init->fb_cpp; @@ -975,9 +967,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) dev_priv->agp_handle = 0; } - if ((dev->agp != NULL) && dev->agp->acquired) { + if ((dev->agp != NULL) && dev->agp->acquired) err = drm_agp_release(dev); - } #endif } @@ -998,9 +989,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); - if (dev_priv->head != NULL) { + if (dev_priv->head != NULL) mga_freelist_cleanup(dev); - } } return err; @@ -1017,9 +1007,8 @@ int mga_dma_init(struct drm_device *dev, void *data, switch (init->func) { case MGA_INIT_DMA: err = mga_do_init_dma(dev, init); - if (err) { + if (err) (void)mga_do_cleanup_dma(dev, FULL_CLEANUP); - } return err; case MGA_CLEANUP_DMA: return mga_do_cleanup_dma(dev, FULL_CLEANUP); @@ -1047,9 +1036,8 @@ int mga_dma_flush(struct drm_device *dev, void *data, WRAP_WAIT_WITH_RETURN(dev_priv); - if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) { + if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) mga_do_dma_flush(dev_priv); - } if (lock->flags & _DRM_LOCK_QUIESCENT) { #if MGA_DMA_DEBUG @@ -1079,8 +1067,8 @@ int mga_dma_reset(struct drm_device *dev, void *data, * DMA buffer management */ -static int mga_dma_get_buffers(struct drm_device * dev, - struct drm_file *file_priv, struct drm_dma * d) +static int mga_dma_get_buffers(struct drm_device *dev, + struct drm_file *file_priv, struct drm_dma *d) { struct drm_buf *buf; int i; @@ -1134,9 +1122,8 @@ int mga_dma_buffers(struct drm_device *dev, void *data, d->granted_count = 0; - if (d->request_count) { + if (d->request_count) ret = mga_dma_get_buffers(dev, file_priv, d); - } return ret; } @@ -1144,7 +1131,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data, /** * Called just before the module is unloaded. */ -int mga_driver_unload(struct drm_device * dev) +int mga_driver_unload(struct drm_device *dev) { kfree(dev->dev_private); dev->dev_private = NULL; @@ -1155,12 +1142,12 @@ int mga_driver_unload(struct drm_device * dev) /** * Called when the last opener of the device is closed. */ -void mga_driver_lastclose(struct drm_device * dev) +void mga_driver_lastclose(struct drm_device *dev) { mga_do_cleanup_dma(dev, FULL_CLEANUP); } -int mga_driver_dma_quiescent(struct drm_device * dev) +int mga_driver_dma_quiescent(struct drm_device *dev) { drm_mga_private_t *dev_priv = dev->dev_private; return mga_do_wait_for_idle(dev_priv); diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index ddfe16197b5..26d0d8ced80 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c @@ -36,7 +36,7 @@ #include "drm_pciids.h" -static int mga_driver_device_is_agp(struct drm_device * dev); +static int mga_driver_device_is_agp(struct drm_device *dev); static struct pci_device_id pciidlist[] = { mga_PCI_IDS @@ -119,7 +119,7 @@ MODULE_LICENSE("GPL and additional rights"); * \returns * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. */ -static int mga_driver_device_is_agp(struct drm_device * dev) +static int mga_driver_device_is_agp(struct drm_device *dev) { const struct pci_dev *const pdev = dev->pdev; diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h index be6c6b9b0e8..1084fa4d261 100644 --- a/drivers/gpu/drm/mga/mga_drv.h +++ b/drivers/gpu/drm/mga/mga_drv.h @@ -164,59 +164,59 @@ extern int mga_dma_reset(struct drm_device *dev, void *data, extern int mga_dma_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int mga_driver_load(struct drm_device *dev, unsigned long flags); -extern int mga_driver_unload(struct drm_device * dev); -extern void mga_driver_lastclose(struct drm_device * dev); -extern int mga_driver_dma_quiescent(struct drm_device * dev); +extern int mga_driver_unload(struct drm_device *dev); +extern void mga_driver_lastclose(struct drm_device *dev); +extern int mga_driver_dma_quiescent(struct drm_device *dev); -extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv); +extern int mga_do_wait_for_idle(drm_mga_private_t *dev_priv); -extern void mga_do_dma_flush(drm_mga_private_t * dev_priv); -extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv); -extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv); +extern void mga_do_dma_flush(drm_mga_private_t *dev_priv); +extern void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv); +extern void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv); -extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf); +extern int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf); /* mga_warp.c */ -extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv); -extern int mga_warp_init(drm_mga_private_t * dev_priv); +extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv); +extern int mga_warp_init(drm_mga_private_t *dev_priv); /* mga_irq.c */ extern int mga_enable_vblank(struct drm_device *dev, int crtc); extern void mga_disable_vblank(struct drm_device *dev, int crtc); extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); -extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); -extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); +extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence); +extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); -extern void mga_driver_irq_preinstall(struct drm_device * dev); +extern void mga_driver_irq_preinstall(struct drm_device *dev); extern int mga_driver_irq_postinstall(struct drm_device *dev); -extern void mga_driver_irq_uninstall(struct drm_device * dev); +extern void mga_driver_irq_uninstall(struct drm_device *dev); extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() #if defined(__linux__) && defined(__alpha__) -#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle)) -#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg) +#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle)) +#define MGA_ADDR(reg) (MGA_BASE(reg) + reg) -#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg ) -#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg ) +#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg)) +#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg)) -#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg))) -#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg))) -#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0) -#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0) +#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg))) +#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg))) +#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0) +#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0) -static inline u32 _MGA_READ(u32 * addr) +static inline u32 _MGA_READ(u32 *addr) { DRM_MEMORYBARRIER(); return *(volatile u32 *)addr; } #else -#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg)) -#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg)) -#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val)) -#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val)) +#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg)) +#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg)) +#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val)) +#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val)) #endif #define DWGREG0 0x1c00 @@ -233,40 +233,39 @@ static inline u32 _MGA_READ(u32 * addr) * Helper macross... */ -#define MGA_EMIT_STATE( dev_priv, dirty ) \ +#define MGA_EMIT_STATE(dev_priv, dirty) \ do { \ - if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \ - if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \ - mga_g400_emit_state( dev_priv ); \ - } else { \ - mga_g200_emit_state( dev_priv ); \ - } \ + if ((dirty) & ~MGA_UPLOAD_CLIPRECTS) { \ + if (dev_priv->chipset >= MGA_CARD_TYPE_G400) \ + mga_g400_emit_state(dev_priv); \ + else \ + mga_g200_emit_state(dev_priv); \ } \ } while (0) -#define WRAP_TEST_WITH_RETURN( dev_priv ) \ +#define WRAP_TEST_WITH_RETURN(dev_priv) \ do { \ - if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ - if ( mga_is_idle( dev_priv ) ) { \ - mga_do_dma_wrap_end( dev_priv ); \ - } else if ( dev_priv->prim.space < \ - dev_priv->prim.high_mark ) { \ - if ( MGA_DMA_DEBUG ) \ - DRM_INFO( "wrap...\n"); \ - return -EBUSY; \ + if (test_bit(0, &dev_priv->prim.wrapped)) { \ + if (mga_is_idle(dev_priv)) { \ + mga_do_dma_wrap_end(dev_priv); \ + } else if (dev_priv->prim.space < \ + dev_priv->prim.high_mark) { \ + if (MGA_DMA_DEBUG) \ + DRM_INFO("wrap...\n"); \ + return -EBUSY; \ } \ } \ } while (0) -#define WRAP_WAIT_WITH_RETURN( dev_priv ) \ +#define WRAP_WAIT_WITH_RETURN(dev_priv) \ do { \ - if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \ - if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \ - if ( MGA_DMA_DEBUG ) \ - DRM_INFO( "wrap...\n"); \ - return -EBUSY; \ + if (test_bit(0, &dev_priv->prim.wrapped)) { \ + if (mga_do_wait_for_idle(dev_priv) < 0) { \ + if (MGA_DMA_DEBUG) \ + DRM_INFO("wrap...\n"); \ + return -EBUSY; \ } \ - mga_do_dma_wrap_end( dev_priv ); \ + mga_do_dma_wrap_end(dev_priv); \ } \ } while (0) @@ -280,12 +279,12 @@ do { \ #define DMA_BLOCK_SIZE (5 * sizeof(u32)) -#define BEGIN_DMA( n ) \ +#define BEGIN_DMA(n) \ do { \ - if ( MGA_VERBOSE ) { \ - DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \ - DRM_INFO( " space=0x%x req=0x%Zx\n", \ - dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \ + if (MGA_VERBOSE) { \ + DRM_INFO("BEGIN_DMA(%d)\n", (n)); \ + DRM_INFO(" space=0x%x req=0x%Zx\n", \ + dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \ } \ prim = dev_priv->prim.start; \ write = dev_priv->prim.tail; \ @@ -293,9 +292,9 @@ do { \ #define BEGIN_DMA_WRAP() \ do { \ - if ( MGA_VERBOSE ) { \ - DRM_INFO( "BEGIN_DMA()\n" ); \ - DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \ + if (MGA_VERBOSE) { \ + DRM_INFO("BEGIN_DMA()\n"); \ + DRM_INFO(" space=0x%x\n", dev_priv->prim.space); \ } \ prim = dev_priv->prim.start; \ write = dev_priv->prim.tail; \ @@ -304,72 +303,68 @@ do { \ #define ADVANCE_DMA() \ do { \ dev_priv->prim.tail = write; \ - if ( MGA_VERBOSE ) { \ - DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \ - write, dev_priv->prim.space ); \ - } \ + if (MGA_VERBOSE) \ + DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \ + write, dev_priv->prim.space); \ } while (0) #define FLUSH_DMA() \ do { \ - if ( 0 ) { \ - DRM_INFO( "\n" ); \ - DRM_INFO( " tail=0x%06x head=0x%06lx\n", \ - dev_priv->prim.tail, \ - (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \ - dev_priv->primary->offset)); \ + if (0) { \ + DRM_INFO("\n"); \ + DRM_INFO(" tail=0x%06x head=0x%06lx\n", \ + dev_priv->prim.tail, \ + (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \ + dev_priv->primary->offset)); \ } \ - if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \ - if ( dev_priv->prim.space < \ - dev_priv->prim.high_mark ) { \ - mga_do_dma_wrap_start( dev_priv ); \ - } else { \ - mga_do_dma_flush( dev_priv ); \ - } \ + if (!test_bit(0, &dev_priv->prim.wrapped)) { \ + if (dev_priv->prim.space < dev_priv->prim.high_mark) \ + mga_do_dma_wrap_start(dev_priv); \ + else \ + mga_do_dma_flush(dev_priv); \ } \ } while (0) /* Never use this, always use DMA_BLOCK(...) for primary DMA output. */ -#define DMA_WRITE( offset, val ) \ +#define DMA_WRITE(offset, val) \ do { \ - if ( MGA_VERBOSE ) { \ - DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ - (u32)(val), write + (offset) * sizeof(u32) ); \ - } \ + if (MGA_VERBOSE) \ + DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \ + (u32)(val), write + (offset) * sizeof(u32)); \ *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \ } while (0) -#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \ +#define DMA_BLOCK(reg0, val0, reg1, val1, reg2, val2, reg3, val3) \ do { \ - DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \ - (DMAREG( reg1 ) << 8) | \ - (DMAREG( reg2 ) << 16) | \ - (DMAREG( reg3 ) << 24)) ); \ - DMA_WRITE( 1, val0 ); \ - DMA_WRITE( 2, val1 ); \ - DMA_WRITE( 3, val2 ); \ - DMA_WRITE( 4, val3 ); \ + DMA_WRITE(0, ((DMAREG(reg0) << 0) | \ + (DMAREG(reg1) << 8) | \ + (DMAREG(reg2) << 16) | \ + (DMAREG(reg3) << 24))); \ + DMA_WRITE(1, val0); \ + DMA_WRITE(2, val1); \ + DMA_WRITE(3, val2); \ + DMA_WRITE(4, val3); \ write += DMA_BLOCK_SIZE; \ } while (0) /* Buffer aging via primary DMA stream head pointer. */ -#define SET_AGE( age, h, w ) \ +#define SET_AGE(age, h, w) \ do { \ (age)->head = h; \ (age)->wrap = w; \ } while (0) -#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \ - ( (age)->wrap == w && \ - (age)->head < h ) ) +#define TEST_AGE(age, h, w) ((age)->wrap < w || \ + ((age)->wrap == w && \ + (age)->head < h)) -#define AGE_BUFFER( buf_priv ) \ +#define AGE_BUFFER(buf_priv) \ do { \ drm_mga_freelist_t *entry = (buf_priv)->list_entry; \ - if ( (buf_priv)->dispatched ) { \ + if ((buf_priv)->dispatched) { \ entry->age.head = (dev_priv->prim.tail + \ dev_priv->primary->offset); \ entry->age.wrap = dev_priv->sarea_priv->last_wrap; \ @@ -681,7 +676,7 @@ do { \ /* Simple idle test. */ -static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv) +static __inline__ int mga_is_idle(drm_mga_private_t *dev_priv) { u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK; return (status == MGA_ENDPRDMASTS); diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c index daa6041a483..2581202297e 100644 --- a/drivers/gpu/drm/mga/mga_irq.c +++ b/drivers/gpu/drm/mga/mga_irq.c @@ -76,9 +76,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) /* In addition to clearing the interrupt-pending bit, we * have to write to MGA_PRIMEND to re-start the DMA operation. */ - if ((prim_start & ~0x03) != (prim_end & ~0x03)) { + if ((prim_start & ~0x03) != (prim_end & ~0x03)) MGA_WRITE(MGA_PRIMEND, prim_end); - } atomic_inc(&dev_priv->last_fence_retired); DRM_WAKEUP(&dev_priv->fence_queue); @@ -120,7 +119,7 @@ void mga_disable_vblank(struct drm_device *dev, int crtc) /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ } -int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) +int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; unsigned int cur_fence; @@ -139,7 +138,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) return ret; } -void mga_driver_irq_preinstall(struct drm_device * dev) +void mga_driver_irq_preinstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; @@ -162,7 +161,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev) return 0; } -void mga_driver_irq_uninstall(struct drm_device * dev) +void mga_driver_irq_uninstall(struct drm_device *dev) { drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; if (!dev_priv) diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index a53b848e0f1..fff82045c42 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -41,8 +41,8 @@ * DMA hardware state programming functions */ -static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, - struct drm_clip_rect * box) +static void mga_emit_clip_rect(drm_mga_private_t *dev_priv, + struct drm_clip_rect *box) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; @@ -66,7 +66,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv, ADVANCE_DMA(); } -static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv) +static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; @@ -89,7 +89,7 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv) ADVANCE_DMA(); } -static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv) +static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; @@ -116,7 +116,7 @@ static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv) ADVANCE_DMA(); } -static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv) +static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; @@ -144,7 +144,7 @@ static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv) ADVANCE_DMA(); } -static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv) +static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0]; @@ -184,7 +184,7 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv) ADVANCE_DMA(); } -static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv) +static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1]; @@ -223,7 +223,7 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv) ADVANCE_DMA(); } -static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) +static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->warp_pipe; @@ -250,7 +250,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) ADVANCE_DMA(); } -static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) +static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int pipe = sarea_priv->warp_pipe; @@ -327,7 +327,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) ADVANCE_DMA(); } -static void mga_g200_emit_state(drm_mga_private_t * dev_priv) +static void mga_g200_emit_state(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; @@ -348,7 +348,7 @@ static void mga_g200_emit_state(drm_mga_private_t * dev_priv) } } -static void mga_g400_emit_state(drm_mga_private_t * dev_priv) +static void mga_g400_emit_state(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; @@ -381,7 +381,7 @@ static void mga_g400_emit_state(drm_mga_private_t * dev_priv) /* Disallow all write destinations except the front and backbuffer. */ -static int mga_verify_context(drm_mga_private_t * dev_priv) +static int mga_verify_context(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_context_regs_t *ctx = &sarea_priv->context_state; @@ -400,7 +400,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv) /* Disallow texture reads from PCI space. */ -static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) +static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit]; @@ -417,7 +417,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit) return 0; } -static int mga_verify_state(drm_mga_private_t * dev_priv) +static int mga_verify_state(drm_mga_private_t *dev_priv) { drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; @@ -446,7 +446,7 @@ static int mga_verify_state(drm_mga_private_t * dev_priv) return (ret == 0); } -static int mga_verify_iload(drm_mga_private_t * dev_priv, +static int mga_verify_iload(drm_mga_private_t *dev_priv, unsigned int dstorg, unsigned int length) { if (dstorg < dev_priv->texture_offset || @@ -465,7 +465,7 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv, return 0; } -static int mga_verify_blit(drm_mga_private_t * dev_priv, +static int mga_verify_blit(drm_mga_private_t *dev_priv, unsigned int srcorg, unsigned int dstorg) { if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) || @@ -480,7 +480,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv, * */ -static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear) +static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -568,7 +568,7 @@ static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * cl FLUSH_DMA(); } -static void mga_dma_dispatch_swap(struct drm_device * dev) +static void mga_dma_dispatch_swap(struct drm_device *dev) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -622,7 +622,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev) DRM_DEBUG("... done.\n"); } -static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) +static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_buf_priv_t *buf_priv = buf->dev_private; @@ -669,7 +669,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu FLUSH_DMA(); } -static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf, +static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf, unsigned int start, unsigned int end) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -718,7 +718,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b /* This copies a 64 byte aligned agp region to the frambuffer with a * standard blit, the ioctl needs to do checking. */ -static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf, +static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf, unsigned int dstorg, unsigned int length) { drm_mga_private_t *dev_priv = dev->dev_private; @@ -766,7 +766,7 @@ static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf FLUSH_DMA(); } -static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit) +static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit) { drm_mga_private_t *dev_priv = dev->dev_private; drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -801,9 +801,8 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit int w = pbox[i].x2 - pbox[i].x1 - 1; int start; - if (blit->ydir == -1) { + if (blit->ydir == -1) srcy = blit->height - srcy - 1; - } start = srcy * blit->src_pitch + srcx; diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c index 9aad4847afd..f172bd5c257 100644 --- a/drivers/gpu/drm/mga/mga_warp.c +++ b/drivers/gpu/drm/mga/mga_warp.c @@ -46,7 +46,7 @@ MODULE_FIRMWARE(FIRMWARE_G400); #define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN) -int mga_warp_install_microcode(drm_mga_private_t * dev_priv) +int mga_warp_install_microcode(drm_mga_private_t *dev_priv) { unsigned char *vcbase = dev_priv->warp->handle; unsigned long pcbase = dev_priv->warp->offset; @@ -133,7 +133,7 @@ out: #define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) -int mga_warp_init(drm_mga_private_t * dev_priv) +int mga_warp_init(drm_mga_private_t *dev_priv) { u32 wmisc; diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig index 1175429da10..d2d28048efb 100644 --- a/drivers/gpu/drm/nouveau/Kconfig +++ b/drivers/gpu/drm/nouveau/Kconfig @@ -1,6 +1,6 @@ config DRM_NOUVEAU tristate "Nouveau (nVidia) cards" - depends on DRM + depends on DRM && PCI select FW_LOADER select DRM_KMS_HELPER select DRM_TTM @@ -41,4 +41,13 @@ config DRM_I2C_CH7006 This driver is currently only useful if you're also using the nouveau driver. + +config DRM_I2C_SIL164 + tristate "Silicon Image sil164 TMDS transmitter" + default m if DRM_NOUVEAU + help + Support for sil164 and similar single-link (or dual-link + when used in pairs) TMDS transmitters, used in some nVidia + video cards. + endmenu diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index acd31ed861e..2405d5ef0ca 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -9,10 +9,10 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ - nouveau_dp.o nouveau_grctx.o \ + nouveau_dp.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ - nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \ + nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o \ @@ -22,7 +22,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \ nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \ nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \ - nv17_gpio.o nv50_gpio.o \ + nv10_gpio.o nv50_gpio.o \ nv50_calc.o nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index d4bcca8a513..c17a055ee3e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -3,6 +3,7 @@ #include <linux/slab.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> +#include <acpi/video.h> #include "drmP.h" #include "drm.h" @@ -11,6 +12,7 @@ #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nv50_display.h" +#include "nouveau_connector.h" #include <linux/vga_switcheroo.h> @@ -42,7 +44,7 @@ static const char nouveau_dsm_muid[] = { 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, }; -static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result) +static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result) { struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; @@ -259,3 +261,37 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len); } + +int +nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) +{ + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct acpi_device *acpidev; + acpi_handle handle; + int type, ret; + void *edid; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_LVDS: + case DRM_MODE_CONNECTOR_eDP: + type = ACPI_VIDEO_DISPLAY_LCD; + break; + default: + return -EINVAL; + } + + handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); + if (!handle) + return -ENODEV; + + ret = acpi_bus_get_device(handle, &acpidev); + if (ret) + return -ENODEV; + + ret = acpi_video_get_edid(acpidev, type, -1, &edid); + if (ret < 0) + return ret; + + nv_connector->edid = edid; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index e492919faf4..7369b5e7364 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -28,6 +28,8 @@ #include "nouveau_hw.h" #include "nouveau_encoder.h" +#include <linux/io-mapping.h> + /* these defines are made up */ #define NV_CIO_CRE_44_HEADA 0x0 #define NV_CIO_CRE_44_HEADB 0x3 @@ -209,20 +211,20 @@ static struct methods shadow_methods[] = { { "PCIROM", load_vbios_pci, true }, { "ACPI", load_vbios_acpi, true }, }; +#define NUM_SHADOW_METHODS ARRAY_SIZE(shadow_methods) static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) { - const int nr_methods = ARRAY_SIZE(shadow_methods); struct methods *methods = shadow_methods; int testscore = 3; - int scores[nr_methods], i; + int scores[NUM_SHADOW_METHODS], i; if (nouveau_vbios) { - for (i = 0; i < nr_methods; i++) + for (i = 0; i < NUM_SHADOW_METHODS; i++) if (!strcasecmp(nouveau_vbios, methods[i].desc)) break; - if (i < nr_methods) { + if (i < NUM_SHADOW_METHODS) { NV_INFO(dev, "Attempting to use BIOS image from %s\n", methods[i].desc); @@ -234,7 +236,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios); } - for (i = 0; i < nr_methods; i++) { + for (i = 0; i < NUM_SHADOW_METHODS; i++) { NV_TRACE(dev, "Attempting to load BIOS image from %s\n", methods[i].desc); data[0] = data[1] = 0; /* avoid reuse of previous image */ @@ -245,7 +247,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) } while (--testscore > 0) { - for (i = 0; i < nr_methods; i++) { + for (i = 0; i < NUM_SHADOW_METHODS; i++) { if (scores[i] == testscore) { NV_TRACE(dev, "Using BIOS image from %s\n", methods[i].desc); @@ -920,7 +922,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); - return -EINVAL; + return len; } configval = ROM32(bios->data[offset + 11 + config * 4]); @@ -1022,7 +1024,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); - return -EINVAL; + return len; } freq = ROM16(bios->data[offset + 12 + config * 2]); @@ -1194,7 +1196,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) dpe = nouveau_bios_dp_table(dev, dcb, &dummy); if (!dpe) { NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset); - return -EINVAL; + return 3; } switch (cond) { @@ -1218,12 +1220,16 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) int ret; auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index); - if (!auxch) - return -ENODEV; + if (!auxch) { + NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset); + return 3; + } ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1); - if (ret) - return ret; + if (ret) { + NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret); + return 3; + } if (cond & 1) iexec->execute = false; @@ -1392,7 +1398,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, NV_ERROR(bios->dev, "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", offset, config, count); - return -EINVAL; + return len; } freq = ROM32(bios->data[offset + 11 + config * 4]); @@ -1452,6 +1458,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * "mask n" and OR it with "data n" before writing it back to the device */ + struct drm_device *dev = bios->dev; uint8_t i2c_index = bios->data[offset + 1]; uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; @@ -1466,9 +1473,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) "Count: 0x%02X\n", offset, i2c_index, i2c_address, count); - chan = init_i2c_device_find(bios->dev, i2c_index); - if (!chan) - return -ENODEV; + chan = init_i2c_device_find(dev, i2c_index); + if (!chan) { + NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); + return len; + } for (i = 0; i < count; i++) { uint8_t reg = bios->data[offset + 4 + i * 3]; @@ -1479,8 +1488,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, I2C_SMBUS_READ, reg, I2C_SMBUS_BYTE_DATA, &val); - if (ret < 0) - return ret; + if (ret < 0) { + NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret); + return len; + } BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " "Mask: 0x%02X, Data: 0x%02X\n", @@ -1494,8 +1505,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &val); - if (ret < 0) - return ret; + if (ret < 0) { + NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); + return len; + } } return len; @@ -1520,6 +1533,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * "DCB I2C table entry index", set the register to "data n" */ + struct drm_device *dev = bios->dev; uint8_t i2c_index = bios->data[offset + 1]; uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; @@ -1534,9 +1548,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) "Count: 0x%02X\n", offset, i2c_index, i2c_address, count); - chan = init_i2c_device_find(bios->dev, i2c_index); - if (!chan) - return -ENODEV; + chan = init_i2c_device_find(dev, i2c_index); + if (!chan) { + NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); + return len; + } for (i = 0; i < count; i++) { uint8_t reg = bios->data[offset + 4 + i * 2]; @@ -1553,8 +1569,10 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0, I2C_SMBUS_WRITE, reg, I2C_SMBUS_BYTE_DATA, &val); - if (ret < 0) - return ret; + if (ret < 0) { + NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); + return len; + } } return len; @@ -1577,6 +1595,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * address" on the I2C bus given by "DCB I2C table entry index" */ + struct drm_device *dev = bios->dev; uint8_t i2c_index = bios->data[offset + 1]; uint8_t i2c_address = bios->data[offset + 2] >> 1; uint8_t count = bios->data[offset + 3]; @@ -1584,7 +1603,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) struct nouveau_i2c_chan *chan; struct i2c_msg msg; uint8_t data[256]; - int i; + int ret, i; if (!iexec->execute) return len; @@ -1593,9 +1612,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) "Count: 0x%02X\n", offset, i2c_index, i2c_address, count); - chan = init_i2c_device_find(bios->dev, i2c_index); - if (!chan) - return -ENODEV; + chan = init_i2c_device_find(dev, i2c_index); + if (!chan) { + NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset); + return len; + } for (i = 0; i < count; i++) { data[i] = bios->data[offset + 4 + i]; @@ -1608,8 +1629,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) msg.flags = 0; msg.len = count; msg.buf = data; - if (i2c_transfer(&chan->adapter, &msg, 1) != 1) - return -EIO; + ret = i2c_transfer(&chan->adapter, &msg, 1); + if (ret != 1) { + NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret); + return len; + } } return len; @@ -1633,6 +1657,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * used -- see get_tmds_index_reg() */ + struct drm_device *dev = bios->dev; uint8_t mlv = bios->data[offset + 1]; uint32_t tmdsaddr = bios->data[offset + 2]; uint8_t mask = bios->data[offset + 3]; @@ -1647,8 +1672,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) offset, mlv, tmdsaddr, mask, data); reg = get_tmds_index_reg(bios->dev, mlv); - if (!reg) - return -EINVAL; + if (!reg) { + NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset); + return 5; + } bios_wr32(bios, reg, tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); @@ -1678,6 +1705,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, * register is used -- see get_tmds_index_reg() */ + struct drm_device *dev = bios->dev; uint8_t mlv = bios->data[offset + 1]; uint8_t count = bios->data[offset + 2]; int len = 3 + count * 2; @@ -1691,8 +1719,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, offset, mlv, count); reg = get_tmds_index_reg(bios->dev, mlv); - if (!reg) - return -EINVAL; + if (!reg) { + NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset); + return len; + } for (i = 0; i < count; i++) { uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; @@ -2039,6 +2069,323 @@ init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) return 5; } +static inline void +bios_md32(struct nvbios *bios, uint32_t reg, + uint32_t mask, uint32_t val) +{ + bios_wr32(bios, reg, (bios_rd32(bios, reg) & ~mask) | val); +} + +static uint32_t +peek_fb(struct drm_device *dev, struct io_mapping *fb, + uint32_t off) +{ + uint32_t val = 0; + + if (off < pci_resource_len(dev->pdev, 1)) { + uint32_t __iomem *p = io_mapping_map_atomic_wc(fb, off, KM_USER0); + + val = ioread32(p); + + io_mapping_unmap_atomic(p, KM_USER0); + } + + return val; +} + +static void +poke_fb(struct drm_device *dev, struct io_mapping *fb, + uint32_t off, uint32_t val) +{ + if (off < pci_resource_len(dev->pdev, 1)) { + uint32_t __iomem *p = io_mapping_map_atomic_wc(fb, off, KM_USER0); + + iowrite32(val, p); + wmb(); + + io_mapping_unmap_atomic(p, KM_USER0); + } +} + +static inline bool +read_back_fb(struct drm_device *dev, struct io_mapping *fb, + uint32_t off, uint32_t val) +{ + poke_fb(dev, fb, off, val); + return val == peek_fb(dev, fb, off); +} + +static int +nv04_init_compute_mem(struct nvbios *bios) +{ + struct drm_device *dev = bios->dev; + uint32_t patt = 0xdeadbeef; + struct io_mapping *fb; + int i; + + /* Map the framebuffer aperture */ + fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1)); + if (!fb) + return -ENOMEM; + + /* Sequencer and refresh off */ + NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20); + bios_md32(bios, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF); + + bios_md32(bios, NV04_PFB_BOOT_0, ~0, + NV04_PFB_BOOT_0_RAM_AMOUNT_16MB | + NV04_PFB_BOOT_0_RAM_WIDTH_128 | + NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT); + + for (i = 0; i < 4; i++) + poke_fb(dev, fb, 4 * i, patt); + + poke_fb(dev, fb, 0x400000, patt + 1); + + if (peek_fb(dev, fb, 0) == patt + 1) { + bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE, + NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT); + bios_md32(bios, NV04_PFB_DEBUG_0, + NV04_PFB_DEBUG_0_REFRESH_OFF, 0); + + for (i = 0; i < 4; i++) + poke_fb(dev, fb, 4 * i, patt); + + if ((peek_fb(dev, fb, 0xc) & 0xffff) != (patt & 0xffff)) + bios_md32(bios, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_WIDTH_128 | + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + + } else if ((peek_fb(dev, fb, 0xc) & 0xffff0000) != + (patt & 0xffff0000)) { + bios_md32(bios, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_WIDTH_128 | + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); + + } else if (peek_fb(dev, fb, 0) == patt) { + if (read_back_fb(dev, fb, 0x800000, patt)) + bios_md32(bios, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + else + bios_md32(bios, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); + + bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE, + NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT); + + } else if (!read_back_fb(dev, fb, 0x800000, patt)) { + bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + + } + + /* Refresh on, sequencer on */ + bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); + NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20); + + io_mapping_free(fb); + return 0; +} + +static const uint8_t * +nv05_memory_config(struct nvbios *bios) +{ + /* Defaults for BIOSes lacking a memory config table */ + static const uint8_t default_config_tab[][2] = { + { 0x24, 0x00 }, + { 0x28, 0x00 }, + { 0x24, 0x01 }, + { 0x1f, 0x00 }, + { 0x0f, 0x00 }, + { 0x17, 0x00 }, + { 0x06, 0x00 }, + { 0x00, 0x00 } + }; + int i = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & + NV_PEXTDEV_BOOT_0_RAMCFG) >> 2; + + if (bios->legacy.mem_init_tbl_ptr) + return &bios->data[bios->legacy.mem_init_tbl_ptr + 2 * i]; + else + return default_config_tab[i]; +} + +static int +nv05_init_compute_mem(struct nvbios *bios) +{ + struct drm_device *dev = bios->dev; + const uint8_t *ramcfg = nv05_memory_config(bios); + uint32_t patt = 0xdeadbeef; + struct io_mapping *fb; + int i, v; + + /* Map the framebuffer aperture */ + fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1)); + if (!fb) + return -ENOMEM; + + /* Sequencer off */ + NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20); + + if (bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE) + goto out; + + bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); + + /* If present load the hardcoded scrambling table */ + if (bios->legacy.mem_init_tbl_ptr) { + uint32_t *scramble_tab = (uint32_t *)&bios->data[ + bios->legacy.mem_init_tbl_ptr + 0x10]; + + for (i = 0; i < 8; i++) + bios_wr32(bios, NV04_PFB_SCRAMBLE(i), + ROM32(scramble_tab[i])); + } + + /* Set memory type/width/length defaults depending on the straps */ + bios_md32(bios, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]); + + if (ramcfg[1] & 0x80) + bios_md32(bios, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE); + + bios_md32(bios, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20); + bios_md32(bios, NV04_PFB_CFG1, 0, 1); + + /* Probe memory bus width */ + for (i = 0; i < 4; i++) + poke_fb(dev, fb, 4 * i, patt); + + if (peek_fb(dev, fb, 0xc) != patt) + bios_md32(bios, NV04_PFB_BOOT_0, + NV04_PFB_BOOT_0_RAM_WIDTH_128, 0); + + /* Probe memory length */ + v = bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT; + + if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB && + (!read_back_fb(dev, fb, 0x1000000, ++patt) || + !read_back_fb(dev, fb, 0, ++patt))) + bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_16MB); + + if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB && + !read_back_fb(dev, fb, 0x800000, ++patt)) + bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); + + if (!read_back_fb(dev, fb, 0x400000, ++patt)) + bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, + NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); + +out: + /* Sequencer on */ + NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20); + + io_mapping_free(fb); + return 0; +} + +static int +nv10_init_compute_mem(struct nvbios *bios) +{ + struct drm_device *dev = bios->dev; + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + const int mem_width[] = { 0x10, 0x00, 0x20 }; + const int mem_width_count = (dev_priv->chipset >= 0x17 ? 3 : 2); + uint32_t patt = 0xdeadbeef; + struct io_mapping *fb; + int i, j, k; + + /* Map the framebuffer aperture */ + fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1)); + if (!fb) + return -ENOMEM; + + bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1); + + /* Probe memory bus width */ + for (i = 0; i < mem_width_count; i++) { + bios_md32(bios, NV04_PFB_CFG0, 0x30, mem_width[i]); + + for (j = 0; j < 4; j++) { + for (k = 0; k < 4; k++) + poke_fb(dev, fb, 0x1c, 0); + + poke_fb(dev, fb, 0x1c, patt); + poke_fb(dev, fb, 0x3c, 0); + + if (peek_fb(dev, fb, 0x1c) == patt) + goto mem_width_found; + } + } + +mem_width_found: + patt <<= 1; + + /* Probe amount of installed memory */ + for (i = 0; i < 4; i++) { + int off = bios_rd32(bios, NV04_PFB_FIFO_DATA) - 0x100000; + + poke_fb(dev, fb, off, patt); + poke_fb(dev, fb, 0, 0); + + peek_fb(dev, fb, 0); + peek_fb(dev, fb, 0); + peek_fb(dev, fb, 0); + peek_fb(dev, fb, 0); + + if (peek_fb(dev, fb, off) == patt) + goto amount_found; + } + + /* IC missing - disable the upper half memory space. */ + bios_md32(bios, NV04_PFB_CFG0, 0x1000, 0); + +amount_found: + io_mapping_free(fb); + return 0; +} + +static int +nv20_init_compute_mem(struct nvbios *bios) +{ + struct drm_device *dev = bios->dev; + struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + uint32_t mask = (dev_priv->chipset >= 0x25 ? 0x300 : 0x900); + uint32_t amount, off; + struct io_mapping *fb; + + /* Map the framebuffer aperture */ + fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1)); + if (!fb) + return -ENOMEM; + + bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1); + + /* Allow full addressing */ + bios_md32(bios, NV04_PFB_CFG0, 0, mask); + + amount = bios_rd32(bios, NV04_PFB_FIFO_DATA); + for (off = amount; off > 0x2000000; off -= 0x2000000) + poke_fb(dev, fb, off - 4, off); + + amount = bios_rd32(bios, NV04_PFB_FIFO_DATA); + if (amount != peek_fb(dev, fb, amount - 4)) + /* IC missing - disable the upper half memory space. */ + bios_md32(bios, NV04_PFB_CFG0, mask, 0); + + io_mapping_free(fb); + return 0; +} + static int init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) { @@ -2047,64 +2394,57 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) * * offset (8 bit): opcode * - * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so - * that the hardware can correctly calculate how much VRAM it has - * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C)) + * This opcode is meant to set the PFB memory config registers + * appropriately so that we can correctly calculate how much VRAM it + * has (on nv10 and better chipsets the amount of installed VRAM is + * subsequently reported in NV_PFB_CSTATUS (0x10020C)). * - * The implementation of this opcode in general consists of two parts: - * 1) determination of the memory bus width - * 2) determination of how many of the card's RAM pads have ICs attached + * The implementation of this opcode in general consists of several + * parts: * - * 1) is done by a cunning combination of writes to offsets 0x1c and - * 0x3c in the framebuffer, and seeing whether the written values are - * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0 + * 1) Determination of memory type and density. Only necessary for + * really old chipsets, the memory type reported by the strap bits + * (0x101000) is assumed to be accurate on nv05 and newer. * - * 2) is done by a cunning combination of writes to an offset slightly - * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing - * if the test pattern can be read back. This then affects bits 12-15 of - * NV_PFB_CFG0 + * 2) Determination of the memory bus width. Usually done by a cunning + * combination of writes to offsets 0x1c and 0x3c in the fb, and + * seeing whether the written values are read back correctly. * - * In this context a "cunning combination" may include multiple reads - * and writes to varying locations, often alternating the test pattern - * and 0, doubtless to make sure buffers are filled, residual charges - * on tracks are removed etc. + * Only necessary on nv0x-nv1x and nv34, on the other cards we can + * trust the straps. * - * Unfortunately, the "cunning combination"s mentioned above, and the - * changes to the bits in NV_PFB_CFG0 differ with nearly every bios - * trace I have. + * 3) Determination of how many of the card's RAM pads have ICs + * attached, usually done by a cunning combination of writes to an + * offset slightly less than the maximum memory reported by + * NV_PFB_CSTATUS, then seeing if the test pattern can be read back. * - * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which - * we started was correct, and use that instead + * This appears to be a NOP on IGPs and NV4x or newer chipsets, both io + * logs of the VBIOS and kmmio traces of the binary driver POSTing the + * card show nothing being done for this opcode. Why is it still listed + * in the table?! */ /* no iexec->execute check by design */ - /* - * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS - * and kmmio traces of the binary driver POSTing the card show nothing - * being done for this opcode. why is it still listed in the table?! - */ - struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + int ret; - if (dev_priv->card_type >= NV_40) - return 1; - - /* - * On every card I've seen, this step gets done for us earlier in - * the init scripts - uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01); - bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20); - */ - - /* - * This also has probably been done in the scripts, but an mmio trace of - * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write) - */ - bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1); + if (dev_priv->chipset >= 0x40 || + dev_priv->chipset == 0x1a || + dev_priv->chipset == 0x1f) + ret = 0; + else if (dev_priv->chipset >= 0x20 && + dev_priv->chipset != 0x34) + ret = nv20_init_compute_mem(bios); + else if (dev_priv->chipset >= 0x10) + ret = nv10_init_compute_mem(bios); + else if (dev_priv->chipset >= 0x5) + ret = nv05_init_compute_mem(bios); + else + ret = nv04_init_compute_mem(bios); - /* write back the saved configuration value */ - bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0); + if (ret) + return ret; return 1; } @@ -2131,7 +2471,8 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) /* no iexec->execute check by design */ pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19); - bios_wr32(bios, NV_PBUS_PCI_NV_19, 0); + bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00); + bios_wr32(bios, reg, value1); udelay(10); @@ -2167,7 +2508,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, uint32_t reg, data; if (bios->major_version > 2) - return -ENODEV; + return 0; bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); @@ -2180,14 +2521,14 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, reg = ROM32(bios->data[seqtbloffs += 4])) { switch (reg) { - case NV_PFB_PRE: - data = NV_PFB_PRE_CMD_PRECHARGE; + case NV04_PFB_PRE: + data = NV04_PFB_PRE_CMD_PRECHARGE; break; - case NV_PFB_PAD: - data = NV_PFB_PAD_CKE_NORMAL; + case NV04_PFB_PAD: + data = NV04_PFB_PAD_CKE_NORMAL; break; - case NV_PFB_REF: - data = NV_PFB_REF_CMD_REFRESH; + case NV04_PFB_REF: + data = NV04_PFB_REF_CMD_REFRESH; break; default: data = ROM32(bios->data[meminitdata]); @@ -2222,7 +2563,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, int clock; if (bios->major_version > 2) - return -ENODEV; + return 0; clock = ROM16(bios->data[meminitoffs + 4]) * 10; setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); @@ -2255,7 +2596,7 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); if (bios->major_version > 2) - return -ENODEV; + return 0; bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX, cr3c); @@ -2389,7 +2730,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset, * offset + 1 (8 bit): mask * offset + 2 (8 bit): cmpval * - * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval". + * Test if (NV04_PFB_BOOT_0 & "mask") equals "cmpval". * If condition not met skip subsequent opcodes until condition is * inverted (INIT_NOT), or we hit INIT_RESUME */ @@ -2401,7 +2742,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset, if (!iexec->execute) return 3; - data = bios_rd32(bios, NV_PFB_BOOT_0) & mask; + data = bios_rd32(bios, NV04_PFB_BOOT_0) & mask; BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n", offset, data, cmpval); @@ -2795,12 +3136,13 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) */ struct drm_nouveau_private *dev_priv = bios->dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; int i; if (dev_priv->card_type != NV_50) { NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n"); - return -ENODEV; + return 1; } if (!iexec->execute) @@ -2815,7 +3157,7 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n", offset, gpio->tag, gpio->state_default); if (bios->execute) - nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default); + pgpio->set(bios->dev, gpio->tag, gpio->state_default); /* The NVIDIA binary driver doesn't appear to actually do * any of this, my VBIOS does however. @@ -2872,10 +3214,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, uint8_t index; int i; - - if (!iexec->execute) - return len; - + /* critical! to know the length of the opcode */; if (!blocklen) { NV_ERROR(bios->dev, "0x%04X: Zero block length - has the M table " @@ -2883,6 +3222,9 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, return -EINVAL; } + if (!iexec->execute) + return len; + strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg]; @@ -3064,14 +3406,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) if (!bios->display.output) { NV_ERROR(dev, "INIT_AUXCH: no active output\n"); - return -EINVAL; + return len; } auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); if (!auxch) { NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", bios->display.output->i2c_index); - return -ENODEV; + return len; } if (!iexec->execute) @@ -3084,7 +3426,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); if (ret) { NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); - return ret; + return len; } data &= bios->data[offset + 0]; @@ -3093,7 +3435,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); if (ret) { NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); - return ret; + return len; } } @@ -3123,14 +3465,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) if (!bios->display.output) { NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); - return -EINVAL; + return len; } auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); if (!auxch) { NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", bios->display.output->i2c_index); - return -ENODEV; + return len; } if (!iexec->execute) @@ -3141,7 +3483,7 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); if (ret) { NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); - return ret; + return len; } } @@ -5151,10 +5493,14 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; - bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; - bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; - bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; - bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; + if (bios->data[legacy_i2c_offset + 4]) + bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; + if (bios->data[legacy_i2c_offset + 5]) + bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; + if (bios->data[legacy_i2c_offset + 6]) + bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; + if (bios->data[legacy_i2c_offset + 7]) + bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; if (bmplength > 74) { bios->fmaxvco = ROM32(bmp[67]); @@ -5589,9 +5935,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, if (conf & 0x4 || conf & 0x8) entry->lvdsconf.use_power_scripts = true; } else { - mask = ~0x5; + mask = ~0x7; + if (conf & 0x2) + entry->lvdsconf.use_acpi_for_edid = true; if (conf & 0x4) entry->lvdsconf.use_power_scripts = true; + entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4; } if (conf & mask) { /* @@ -5706,13 +6055,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, case OUTPUT_TV: entry->tvconf.has_component_output = false; break; - case OUTPUT_TMDS: - /* - * Invent a DVI-A output, by copying the fields of the DVI-D - * output; reported to work by math_b on an NV20(!). - */ - fabricate_vga_output(dcb, entry->i2c_index, entry->heads); - break; case OUTPUT_LVDS: if ((conn & 0x00003f00) != 0x10) entry->lvdsconf.use_straps_for_mode = true; @@ -5793,6 +6135,31 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) dcb->entries = newentries; } +static bool +apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf) +{ + /* Dell Precision M6300 + * DCB entry 2: 02025312 00000010 + * DCB entry 3: 02026312 00000020 + * + * Identical, except apparently a different connector on a + * different SOR link. Not a clue how we're supposed to know + * which one is in use if it even shares an i2c line... + * + * Ignore the connector on the second SOR link to prevent + * nasty problems until this is sorted (assuming it's not a + * VBIOS bug). + */ + if ((dev->pdev->device == 0x040d) && + (dev->pdev->subsystem_vendor == 0x1028) && + (dev->pdev->subsystem_device == 0x019b)) { + if (*conn == 0x02026312 && *conf == 0x00000020) + return false; + } + + return true; +} + static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) { @@ -5926,6 +6293,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) if ((connection & 0x0000000f) == 0x0000000f) continue; + if (!apply_dcb_encoder_quirks(dev, i, &connection, &config)) + continue; + NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", dcb->entries, connection, config); @@ -6181,9 +6551,8 @@ nouveau_run_vbios_init(struct drm_device *dev) struct nvbios *bios = &dev_priv->vbios; int i, ret = 0; - NVLockVgaCrtcs(dev, false); - if (nv_two_heads(dev)) - NVSetOwner(dev, bios->state.crtchead); + /* Reset the BIOS head to 0. */ + bios->state.crtchead = 0; if (bios->major_version < 5) /* BMP only */ load_nv17_hw_sequencer_ucode(dev, bios); @@ -6216,8 +6585,6 @@ nouveau_run_vbios_init(struct drm_device *dev) } } - NVLockVgaCrtcs(dev, true); - return ret; } @@ -6238,7 +6605,6 @@ static bool nouveau_bios_posted(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - bool was_locked; unsigned htotal; if (dev_priv->chipset >= NV_50) { @@ -6248,13 +6614,12 @@ nouveau_bios_posted(struct drm_device *dev) return true; } - was_locked = NVLockVgaCrtcs(dev, false); htotal = NVReadVgaCrtc(dev, 0, 0x06); htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8; htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4; htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10; htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11; - NVLockVgaCrtcs(dev, was_locked); + return (htotal != 0); } @@ -6263,8 +6628,6 @@ nouveau_bios_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvbios *bios = &dev_priv->vbios; - uint32_t saved_nv_pextdev_boot_0; - bool was_locked; int ret; if (!NVInitVBIOS(dev)) @@ -6284,40 +6647,27 @@ nouveau_bios_init(struct drm_device *dev) if (!bios->major_version) /* we don't run version 0 bios */ return 0; - /* these will need remembering across a suspend */ - saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0); - bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0); - /* init script execution disabled */ bios->execute = false; /* ... unless card isn't POSTed already */ if (!nouveau_bios_posted(dev)) { - NV_INFO(dev, "Adaptor not initialised\n"); - if (dev_priv->card_type < NV_40) { - NV_ERROR(dev, "Unable to POST this chipset\n"); - return -ENODEV; - } - - NV_INFO(dev, "Running VBIOS init tables\n"); + NV_INFO(dev, "Adaptor not initialised, " + "running VBIOS init tables.\n"); bios->execute = true; } - bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0); - ret = nouveau_run_vbios_init(dev); if (ret) return ret; /* feature_byte on BMP is poor, but init always sets CR4B */ - was_locked = NVLockVgaCrtcs(dev, false); if (bios->major_version < 5) bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40; /* all BIT systems need p_f_m_t for digital_min_front_porch */ if (bios->is_mobile || bios->major_version >= 5) ret = parse_fp_mode_table(dev, bios); - NVLockVgaCrtcs(dev, was_locked); /* allow subsequent scripts to execute */ bios->execute = true; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index adf4ec2d06c..024458a8d06 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -81,6 +81,7 @@ struct dcb_connector_table_entry { enum dcb_connector_type type; uint8_t index2; uint8_t gpio_tag; + void *drm; }; struct dcb_connector_table { @@ -117,6 +118,7 @@ struct dcb_entry { struct { struct sor_conf sor; bool use_straps_for_mode; + bool use_acpi_for_edid; bool use_power_scripts; } lvdsconf; struct { @@ -249,8 +251,6 @@ struct nvbios { struct { int crtchead; - /* these need remembering across suspend */ - uint32_t saved_nv_pfb_cfg0; } state; struct { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 6f3c1952237..3ca8343c15d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -461,9 +461,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, return ret; ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, - evict, no_wait_reserve, no_wait_gpu, new_mem); - if (nvbo->channel && nvbo->channel != chan) - ret = nouveau_fence_wait(fence, NULL, false, false); + evict || (nvbo->channel && + nvbo->channel != chan), + no_wait_reserve, no_wait_gpu, new_mem); nouveau_fence_unref((void *)&fence); return ret; } @@ -711,8 +711,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, return ret; /* Software copy if the card isn't up and running yet. */ - if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || - !dev_priv->channel) { + if (!dev_priv->channel) { ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); goto out; } @@ -783,7 +782,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) break; case TTM_PL_VRAM: mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; - mem->bus.base = drm_get_resource_start(dev, 1); + mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; break; default: diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c index 88f9bc0941e..ca85da78484 100644 --- a/drivers/gpu/drm/nouveau/nouveau_calc.c +++ b/drivers/gpu/drm/nouveau/nouveau_calc.c @@ -200,7 +200,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, struct nv_sim_state sim_data; int MClk = nouveau_hw_get_clock(dev, MPLL); int NVClk = nouveau_hw_get_clock(dev, NVPLL); - uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1); + uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1); sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; @@ -218,7 +218,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, sim_data.mem_latency = 3; sim_data.mem_page_miss = 10; } else { - sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1; + sim_data.memory_type = nvReadFB(dev, NV04_PFB_CFG0) & 0x1; sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64; sim_data.mem_latency = cfg1 & 0xf; sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1); diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 1fc57ef5829..90fdcda332b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -62,7 +62,8 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) * VRAM. */ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, - drm_get_resource_start(dev, 1), + pci_resource_start(dev->pdev, + 1), dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); @@ -257,9 +258,7 @@ nouveau_channel_free(struct nouveau_channel *chan) nouveau_debugfs_channel_fini(chan); /* Give outstanding push buffers a chance to complete */ - spin_lock_irqsave(&chan->fence.lock, flags); nouveau_fence_update(chan); - spin_unlock_irqrestore(&chan->fence.lock, flags); if (chan->fence.sequence != chan->fence.sequence_ack) { struct nouveau_fence *fence = NULL; @@ -368,8 +367,6 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - if (dev_priv->engine.graph.accel_blocked) return -ENODEV; @@ -418,7 +415,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, struct drm_nouveau_channel_free *cfree = data; struct nouveau_channel *chan; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan); nouveau_channel_free(chan); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 149ed224c3c..734e92635e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -102,63 +102,15 @@ nouveau_connector_destroy(struct drm_connector *drm_connector) kfree(drm_connector); } -static void -nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags) -{ - struct drm_nouveau_private *dev_priv = connector->dev->dev_private; - - if (dev_priv->card_type >= NV_50) - return; - - *flags = 0; - if (NVLockVgaCrtcs(dev_priv->dev, false)) - *flags |= 1; - if (nv_heads_tied(dev_priv->dev)) - *flags |= 2; - - if (*flags & 2) - NVSetOwner(dev_priv->dev, 0); /* necessary? */ -} - -static void -nouveau_connector_ddc_finish(struct drm_connector *connector, int flags) -{ - struct drm_nouveau_private *dev_priv = connector->dev->dev_private; - - if (dev_priv->card_type >= NV_50) - return; - - if (flags & 2) - NVSetOwner(dev_priv->dev, 4); - if (flags & 1) - NVLockVgaCrtcs(dev_priv->dev, true); -} - static struct nouveau_i2c_chan * nouveau_connector_ddc_detect(struct drm_connector *connector, struct nouveau_encoder **pnv_encoder) { struct drm_device *dev = connector->dev; - uint8_t out_buf[] = { 0x0, 0x0}, buf[2]; - int ret, flags, i; - - struct i2c_msg msgs[] = { - { - .addr = 0x50, - .flags = 0, - .len = 1, - .buf = out_buf, - }, - { - .addr = 0x50, - .flags = I2C_M_RD, - .len = 1, - .buf = buf, - } - }; + int i; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - struct nouveau_i2c_chan *i2c = NULL; + struct nouveau_i2c_chan *i2c; struct nouveau_encoder *nv_encoder; struct drm_mode_object *obj; int id; @@ -171,17 +123,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector, if (!obj) continue; nv_encoder = nouveau_encoder(obj_to_encoder(obj)); + i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); - if (nv_encoder->dcb->i2c_index < 0xf) - i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); - if (!i2c) - continue; - - nouveau_connector_ddc_prepare(connector, &flags); - ret = i2c_transfer(&i2c->adapter, msgs, 2); - nouveau_connector_ddc_finish(connector, flags); - - if (ret == 2) { + if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) { *pnv_encoder = nv_encoder; return i2c; } @@ -234,21 +178,7 @@ nouveau_connector_detect(struct drm_connector *connector) struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = NULL; struct nouveau_i2c_chan *i2c; - int type, flags; - - if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS) - nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); - if (nv_encoder && nv_connector->native_mode) { - unsigned status = connector_status_connected; - -#if defined(CONFIG_ACPI_BUTTON) || \ - (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) - if (!nouveau_ignorelid && !acpi_lid_open()) - status = connector_status_unknown; -#endif - nouveau_connector_set_encoder(connector, nv_encoder); - return status; - } + int type; /* Cleanup the previous EDID block. */ if (nv_connector->edid) { @@ -259,9 +189,7 @@ nouveau_connector_detect(struct drm_connector *connector) i2c = nouveau_connector_ddc_detect(connector, &nv_encoder); if (i2c) { - nouveau_connector_ddc_prepare(connector, &flags); nv_connector->edid = drm_get_edid(connector, &i2c->adapter); - nouveau_connector_ddc_finish(connector, flags); drm_mode_connector_update_edid_property(connector, nv_connector->edid); if (!nv_connector->edid) { @@ -321,6 +249,85 @@ detect_analog: return connector_status_disconnected; } +static enum drm_connector_status +nouveau_connector_detect_lvds(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_connector *nv_connector = nouveau_connector(connector); + struct nouveau_encoder *nv_encoder = NULL; + enum drm_connector_status status = connector_status_disconnected; + + /* Cleanup the previous EDID block. */ + if (nv_connector->edid) { + drm_mode_connector_update_edid_property(connector, NULL); + kfree(nv_connector->edid); + nv_connector->edid = NULL; + } + + nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); + if (!nv_encoder) + return connector_status_disconnected; + + /* Try retrieving EDID via DDC */ + if (!dev_priv->vbios.fp_no_ddc) { + status = nouveau_connector_detect(connector); + if (status == connector_status_connected) + goto out; + } + + /* On some laptops (Sony, i'm looking at you) there appears to + * be no direct way of accessing the panel's EDID. The only + * option available to us appears to be to ask ACPI for help.. + * + * It's important this check's before trying straps, one of the + * said manufacturer's laptops are configured in such a way + * the nouveau decides an entry in the VBIOS FP mode table is + * valid - it's not (rh#613284) + */ + if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) { + if (!nouveau_acpi_edid(dev, connector)) { + status = connector_status_connected; + goto out; + } + } + + /* If no EDID found above, and the VBIOS indicates a hardcoded + * modeline is avalilable for the panel, set it as the panel's + * native mode and exit. + */ + if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc || + nv_encoder->dcb->lvdsconf.use_straps_for_mode)) { + status = connector_status_connected; + goto out; + } + + /* Still nothing, some VBIOS images have a hardcoded EDID block + * stored for the panel stored in them. + */ + if (!dev_priv->vbios.fp_no_ddc) { + struct edid *edid = + (struct edid *)nouveau_bios_embedded_edid(dev); + if (edid) { + nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); + *(nv_connector->edid) = *edid; + status = connector_status_connected; + } + } + +out: +#if defined(CONFIG_ACPI_BUTTON) || \ + (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE)) + if (status == connector_status_connected && + !nouveau_ignorelid && !acpi_lid_open()) + status = connector_status_unknown; +#endif + + drm_mode_connector_update_edid_property(connector, nv_connector->edid); + nouveau_connector_set_encoder(connector, nv_encoder); + return status; +} + static void nouveau_connector_force(struct drm_connector *connector) { @@ -441,7 +448,8 @@ nouveau_connector_native_mode(struct drm_connector *connector) int high_w = 0, high_h = 0, high_v = 0; list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { - if (helper->mode_valid(connector, mode) != MODE_OK) + if (helper->mode_valid(connector, mode) != MODE_OK || + (mode->flags & DRM_MODE_FLAG_INTERLACE)) continue; /* Use preferred mode if there is one.. */ @@ -534,21 +542,27 @@ static int nouveau_connector_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder; int ret = 0; - /* If we're not LVDS, destroy the previous native mode, the attached - * monitor could have changed. + /* destroy the native mode, the attached monitor could have changed. */ - if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS && - nv_connector->native_mode) { + if (nv_connector->native_mode) { drm_mode_destroy(dev, nv_connector->native_mode); nv_connector->native_mode = NULL; } if (nv_connector->edid) ret = drm_add_edid_modes(connector, nv_connector->edid); + else + if (nv_encoder->dcb->type == OUTPUT_LVDS && + (nv_encoder->dcb->lvdsconf.use_straps_for_mode || + dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) { + nv_connector->native_mode = drm_mode_create(dev); + nouveau_bios_fp_mode(dev, nv_connector->native_mode); + } /* Find the native mode if this is a digital panel, if we didn't * find any modes through DDC previously add the native mode to @@ -569,7 +583,8 @@ nouveau_connector_get_modes(struct drm_connector *connector) ret = get_slave_funcs(nv_encoder)-> get_modes(to_drm_encoder(nv_encoder), connector); - if (nv_encoder->dcb->type == OUTPUT_LVDS) + if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS || + nv_connector->dcb->type == DCB_CONNECTOR_eDP) ret += nouveau_connector_scaler_modes_add(connector); return ret; @@ -643,6 +658,44 @@ nouveau_connector_best_encoder(struct drm_connector *connector) return NULL; } +void +nouveau_connector_set_polling(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_crtc *crtc; + bool spare_crtc = false; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + spare_crtc |= !crtc->enabled; + + connector->polled = 0; + + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_VGA: + case DRM_MODE_CONNECTOR_TV: + if (dev_priv->card_type >= NV_50 || + (nv_gf4_disp_arch(dev) && spare_crtc)) + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + break; + + case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_DVID: + case DRM_MODE_CONNECTOR_HDMIA: + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: + if (dev_priv->card_type >= NV_50) + connector->polled = DRM_CONNECTOR_POLL_HPD; + else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID || + spare_crtc) + connector->polled = DRM_CONNECTOR_POLL_CONNECT; + break; + + default: + break; + } +} + static const struct drm_connector_helper_funcs nouveau_connector_helper_funcs = { .get_modes = nouveau_connector_get_modes, @@ -662,148 +715,74 @@ nouveau_connector_funcs = { .force = nouveau_connector_force }; -static int -nouveau_connector_create_lvds(struct drm_device *dev, - struct drm_connector *connector) -{ - struct nouveau_connector *nv_connector = nouveau_connector(connector); - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_i2c_chan *i2c = NULL; - struct nouveau_encoder *nv_encoder; - struct drm_display_mode native, *mode, *temp; - bool dummy, if_is_24bit = false; - int ret, flags; - - nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); - if (!nv_encoder) - return -ENODEV; - - ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit); - if (ret) { - NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n"); - return ret; - } - nv_connector->use_dithering = !if_is_24bit; - - /* Firstly try getting EDID over DDC, if allowed and I2C channel - * is available. - */ - if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) - i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); - - if (i2c) { - nouveau_connector_ddc_prepare(connector, &flags); - nv_connector->edid = drm_get_edid(connector, &i2c->adapter); - nouveau_connector_ddc_finish(connector, flags); - } - - /* If no EDID found above, and the VBIOS indicates a hardcoded - * modeline is avalilable for the panel, set it as the panel's - * native mode and exit. - */ - if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && - (nv_encoder->dcb->lvdsconf.use_straps_for_mode || - dev_priv->vbios.fp_no_ddc)) { - nv_connector->native_mode = drm_mode_duplicate(dev, &native); - goto out; - } - - /* Still nothing, some VBIOS images have a hardcoded EDID block - * stored for the panel stored in them. - */ - if (!nv_connector->edid && !nv_connector->native_mode && - !dev_priv->vbios.fp_no_ddc) { - struct edid *edid = - (struct edid *)nouveau_bios_embedded_edid(dev); - if (edid) { - nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL); - *(nv_connector->edid) = *edid; - } - } - - if (!nv_connector->edid) - goto out; - - /* We didn't find/use a panel mode from the VBIOS, so parse the EDID - * block and look for the preferred mode there. - */ - ret = drm_add_edid_modes(connector, nv_connector->edid); - if (ret == 0) - goto out; - nv_connector->detected_encoder = nv_encoder; - nv_connector->native_mode = nouveau_connector_native_mode(connector); - list_for_each_entry_safe(mode, temp, &connector->probed_modes, head) - drm_mode_remove(connector, mode); - -out: - if (!nv_connector->native_mode) { - NV_ERROR(dev, "LVDS present in DCB table, but couldn't " - "determine its native mode. Disabling.\n"); - return -ENODEV; - } - - drm_mode_connector_update_edid_property(connector, nv_connector->edid); - return 0; -} +static const struct drm_connector_funcs +nouveau_connector_funcs_lvds = { + .dpms = drm_helper_connector_dpms, + .save = NULL, + .restore = NULL, + .detect = nouveau_connector_detect_lvds, + .destroy = nouveau_connector_destroy, + .fill_modes = drm_helper_probe_single_connector_modes, + .set_property = nouveau_connector_set_property, + .force = nouveau_connector_force +}; -int -nouveau_connector_create(struct drm_device *dev, - struct dcb_connector_table_entry *dcb) +struct drm_connector * +nouveau_connector_create(struct drm_device *dev, int index) { + const struct drm_connector_funcs *funcs = &nouveau_connector_funcs; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_connector *nv_connector = NULL; + struct dcb_connector_table_entry *dcb = NULL; struct drm_connector *connector; - struct drm_encoder *encoder; - int ret, type; + int type, ret = 0; NV_DEBUG_KMS(dev, "\n"); + if (index >= dev_priv->vbios.dcb.connector.entries) + return ERR_PTR(-EINVAL); + + dcb = &dev_priv->vbios.dcb.connector.entry[index]; + if (dcb->drm) + return dcb->drm; + switch (dcb->type) { - case DCB_CONNECTOR_NONE: - return 0; case DCB_CONNECTOR_VGA: - NV_INFO(dev, "Detected a VGA connector\n"); type = DRM_MODE_CONNECTOR_VGA; break; case DCB_CONNECTOR_TV_0: case DCB_CONNECTOR_TV_1: case DCB_CONNECTOR_TV_3: - NV_INFO(dev, "Detected a TV connector\n"); type = DRM_MODE_CONNECTOR_TV; break; case DCB_CONNECTOR_DVI_I: - NV_INFO(dev, "Detected a DVI-I connector\n"); type = DRM_MODE_CONNECTOR_DVII; break; case DCB_CONNECTOR_DVI_D: - NV_INFO(dev, "Detected a DVI-D connector\n"); type = DRM_MODE_CONNECTOR_DVID; break; case DCB_CONNECTOR_HDMI_0: case DCB_CONNECTOR_HDMI_1: - NV_INFO(dev, "Detected a HDMI connector\n"); type = DRM_MODE_CONNECTOR_HDMIA; break; case DCB_CONNECTOR_LVDS: - NV_INFO(dev, "Detected a LVDS connector\n"); type = DRM_MODE_CONNECTOR_LVDS; + funcs = &nouveau_connector_funcs_lvds; break; case DCB_CONNECTOR_DP: - NV_INFO(dev, "Detected a DisplayPort connector\n"); type = DRM_MODE_CONNECTOR_DisplayPort; break; case DCB_CONNECTOR_eDP: - NV_INFO(dev, "Detected an eDP connector\n"); type = DRM_MODE_CONNECTOR_eDP; break; default: NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type); - return -EINVAL; + return ERR_PTR(-EINVAL); } nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); if (!nv_connector) - return -ENOMEM; + return ERR_PTR(-ENOMEM); nv_connector->dcb = dcb; connector = &nv_connector->base; @@ -811,27 +790,21 @@ nouveau_connector_create(struct drm_device *dev, connector->interlace_allowed = false; connector->doublescan_allowed = false; - drm_connector_init(dev, connector, &nouveau_connector_funcs, type); + drm_connector_init(dev, connector, funcs, type); drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); - /* attach encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - - if (nv_encoder->dcb->connector != dcb->index) - continue; - - if (get_slave_funcs(nv_encoder)) - get_slave_funcs(nv_encoder)->create_resources(encoder, connector); + /* Check if we need dithering enabled */ + if (dcb->type == DCB_CONNECTOR_LVDS) { + bool dummy, is_24bit = false; - drm_mode_connector_attach_encoder(connector, encoder); - } + ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit); + if (ret) { + NV_ERROR(dev, "Error parsing LVDS table, disabling " + "LVDS\n"); + goto fail; + } - if (!connector->encoder_ids[0]) { - NV_WARN(dev, " no encoders, ignoring\n"); - drm_connector_cleanup(connector); - kfree(connector); - return 0; + nv_connector->use_dithering = !is_24bit; } /* Init DVI-I specific properties */ @@ -841,12 +814,8 @@ nouveau_connector_create(struct drm_device *dev, drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); } - if (dcb->type != DCB_CONNECTOR_LVDS) - nv_connector->use_dithering = false; - switch (dcb->type) { case DCB_CONNECTOR_VGA: - connector->polled = DRM_CONNECTOR_POLL_CONNECT; if (dev_priv->card_type >= NV_50) { drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, @@ -858,17 +827,6 @@ nouveau_connector_create(struct drm_device *dev, case DCB_CONNECTOR_TV_3: nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; break; - case DCB_CONNECTOR_DP: - case DCB_CONNECTOR_eDP: - case DCB_CONNECTOR_HDMI_0: - case DCB_CONNECTOR_HDMI_1: - case DCB_CONNECTOR_DVI_I: - case DCB_CONNECTOR_DVI_D: - if (dev_priv->card_type >= NV_50) - connector->polled = DRM_CONNECTOR_POLL_HPD; - else - connector->polled = DRM_CONNECTOR_POLL_CONNECT; - /* fall-through */ default: nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; @@ -882,15 +840,15 @@ nouveau_connector_create(struct drm_device *dev, break; } + nouveau_connector_set_polling(connector); + drm_sysfs_connector_add(connector); + dcb->drm = connector; + return dcb->drm; - if (dcb->type == DCB_CONNECTOR_LVDS) { - ret = nouveau_connector_create_lvds(dev, connector); - if (ret) { - connector->funcs->destroy(connector); - return ret; - } - } +fail: + drm_connector_cleanup(connector); + kfree(connector); + return ERR_PTR(ret); - return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index 4ef38abc2d9..0d2e668ccfe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -49,7 +49,10 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } -int nouveau_connector_create(struct drm_device *, - struct dcb_connector_table_entry *); +struct drm_connector * +nouveau_connector_create(struct drm_device *, int index); + +void +nouveau_connector_set_polling(struct drm_connector *); #endif /* __NOUVEAU_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 65c441a1999..2e3c6caa97e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -92,11 +92,9 @@ nouveau_dma_init(struct nouveau_channel *chan) return ret; /* Map M2MF notifier object - fbcon. */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = nouveau_bo_map(chan->notifier_bo); - if (ret) - return ret; - } + ret = nouveau_bo_map(chan->notifier_bo); + if (ret) + return ret; /* Insert NOPS for NOUVEAU_DMA_SKIPS */ ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index deeb21c6865..33742b11188 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c @@ -23,8 +23,10 @@ */ #include "drmP.h" + #include "nouveau_drv.h" #include "nouveau_i2c.h" +#include "nouveau_connector.h" #include "nouveau_encoder.h" static int @@ -270,13 +272,39 @@ bool nouveau_dp_link_train(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - uint8_t config[4]; - uint8_t status[3]; + struct nouveau_connector *nv_connector; + struct bit_displayport_encoder_table *dpe; + int dpe_headerlen; + uint8_t config[4], status[3]; bool cr_done, cr_max_vs, eq_done; int ret = 0, i, tries, voltage; NV_DEBUG_KMS(dev, "link training!!\n"); + + nv_connector = nouveau_encoder_connector_get(nv_encoder); + if (!nv_connector) + return false; + + dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); + if (!dpe) { + NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); + return false; + } + + /* disable hotplug detect, this flips around on some panels during + * link training. + */ + pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false); + + if (dpe->script0) { + NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); + nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), + nv_encoder->dcb); + } + train: cr_done = eq_done = false; @@ -403,6 +431,15 @@ stop: } } + if (dpe->script1) { + NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); + nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), + nv_encoder->dcb); + } + + /* re-enable hotplug detect */ + pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true); + return eq_done; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 27377043229..1de5eb53e01 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -35,10 +35,6 @@ #include "drm_pciids.h" -MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)"); -int nouveau_ctxfw = 0; -module_param_named(ctxfw, nouveau_ctxfw, int, 0400); - MODULE_PARM_DESC(noagp, "Disable AGP"); int nouveau_noagp; module_param_named(noagp, nouveau_noagp, int, 0400); @@ -56,7 +52,7 @@ int nouveau_vram_pushbuf; module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400); MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM"); -int nouveau_vram_notify = 1; +int nouveau_vram_notify = 0; module_param_named(vram_notify, nouveau_vram_notify, int, 0400); MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)"); @@ -132,7 +128,7 @@ static struct drm_driver driver; static int __devinit nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - return drm_get_dev(pdev, ent, &driver); + return drm_get_pci_dev(pdev, ent, &driver); } static void @@ -155,9 +151,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) struct drm_crtc *crtc; int ret, i; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - if (pm_state.event == PM_EVENT_PRETHAW) return 0; @@ -257,9 +250,6 @@ nouveau_pci_resume(struct pci_dev *pdev) struct drm_crtc *crtc; int ret, i; - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - nouveau_fbcon_save_disable_accel(dev); NV_INFO(dev, "We're back, enabling device...\n"); @@ -269,6 +259,13 @@ nouveau_pci_resume(struct pci_dev *pdev) return -1; pci_set_master(dev->pdev); + /* Make sure the AGP controller is in a consistent state */ + if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) + nouveau_mem_reset_agp(dev); + + /* Make the CRTCs accessible */ + engine->display.early_init(dev); + NV_INFO(dev, "POSTing device...\n"); ret = nouveau_run_vbios_init(dev); if (ret) @@ -323,7 +320,6 @@ nouveau_pci_resume(struct pci_dev *pdev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); - int ret; ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); if (!ret) @@ -332,11 +328,7 @@ nouveau_pci_resume(struct pci_dev *pdev) NV_ERROR(dev, "Could not pin/map cursor.\n"); } - if (dev_priv->card_type < NV_50) { - nv04_display_restore(dev); - NVLockVgaCrtcs(dev, false); - } else - nv50_display_init(dev); + engine->display.init(dev); list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); @@ -371,7 +363,8 @@ nouveau_pci_resume(struct pci_dev *pdev) static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | + DRIVER_MODESET, .load = nouveau_load, .firstopen = nouveau_firstopen, .lastclose = nouveau_lastclose, @@ -438,16 +431,18 @@ static int __init nouveau_init(void) nouveau_modeset = 1; } - if (nouveau_modeset == 1) { - driver.driver_features |= DRIVER_MODESET; - nouveau_register_dsm_handler(); - } + if (!nouveau_modeset) + return 0; + nouveau_register_dsm_handler(); return drm_init(&driver); } static void __exit nouveau_exit(void) { + if (!nouveau_modeset) + return; + drm_exit(&driver); nouveau_unregister_dsm_handler(); } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c6971910648..e15db15dca7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -123,14 +123,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo) return ioptr; } -struct mem_block { - struct mem_block *next; - struct mem_block *prev; - uint64_t start; - uint64_t size; - struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */ -}; - enum nouveau_flags { NV_NFORCE = 0x10000000, NV_NFORCE2 = 0x20000000 @@ -149,7 +141,7 @@ struct nouveau_gpuobj { struct list_head list; struct nouveau_channel *im_channel; - struct mem_block *im_pramin; + struct drm_mm_node *im_pramin; struct nouveau_bo *im_backing; uint32_t im_backing_start; uint32_t *im_backing_suspend; @@ -196,7 +188,7 @@ struct nouveau_channel { struct list_head pending; uint32_t sequence; uint32_t sequence_ack; - uint32_t last_sequence_irq; + atomic_t last_sequence_irq; } fence; /* DMA push buffer */ @@ -206,7 +198,7 @@ struct nouveau_channel { /* Notifier memory */ struct nouveau_bo *notifier_bo; - struct mem_block *notifier_heap; + struct drm_mm notifier_heap; /* PFIFO context */ struct nouveau_gpuobj_ref *ramfc; @@ -224,7 +216,7 @@ struct nouveau_channel { /* Objects */ struct nouveau_gpuobj_ref *ramin; /* Private instmem */ - struct mem_block *ramin_heap; /* Private PRAMIN heap */ + struct drm_mm ramin_heap; /* Private PRAMIN heap */ struct nouveau_gpuobj_ref *ramht; /* Hash table */ struct list_head ramht_refs; /* Objects referenced by RAMHT */ @@ -277,8 +269,7 @@ struct nouveau_instmem_engine { void (*clear)(struct drm_device *, struct nouveau_gpuobj *); int (*bind)(struct drm_device *, struct nouveau_gpuobj *); int (*unbind)(struct drm_device *, struct nouveau_gpuobj *); - void (*prepare_access)(struct drm_device *, bool write); - void (*finish_access)(struct drm_device *); + void (*flush)(struct drm_device *); }; struct nouveau_mc_engine { @@ -303,10 +294,11 @@ struct nouveau_fb_engine { }; struct nouveau_fifo_engine { - void *priv; - int channels; + struct nouveau_gpuobj_ref *playlist[2]; + int cur_playlist; + int (*init)(struct drm_device *); void (*takedown)(struct drm_device *); @@ -339,10 +331,11 @@ struct nouveau_pgraph_object_class { struct nouveau_pgraph_engine { struct nouveau_pgraph_object_class *grclass; bool accel_blocked; - void *ctxprog; - void *ctxvals; int grctx_size; + /* NV2x/NV3x context table (0x400780) */ + struct nouveau_gpuobj_ref *ctx_table; + int (*init)(struct drm_device *); void (*takedown)(struct drm_device *); @@ -358,6 +351,24 @@ struct nouveau_pgraph_engine { uint32_t size, uint32_t pitch); }; +struct nouveau_display_engine { + int (*early_init)(struct drm_device *); + void (*late_takedown)(struct drm_device *); + int (*create)(struct drm_device *); + int (*init)(struct drm_device *); + void (*destroy)(struct drm_device *); +}; + +struct nouveau_gpio_engine { + int (*init)(struct drm_device *); + void (*takedown)(struct drm_device *); + + int (*get)(struct drm_device *, enum dcb_gpio_tag); + int (*set)(struct drm_device *, enum dcb_gpio_tag, int state); + + void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on); +}; + struct nouveau_engine { struct nouveau_instmem_engine instmem; struct nouveau_mc_engine mc; @@ -365,6 +376,8 @@ struct nouveau_engine { struct nouveau_fb_engine fb; struct nouveau_pgraph_engine graph; struct nouveau_fifo_engine fifo; + struct nouveau_display_engine display; + struct nouveau_gpio_engine gpio; }; struct nouveau_pll_vals { @@ -500,11 +513,6 @@ enum nouveau_card_type { struct drm_nouveau_private { struct drm_device *dev; - enum { - NOUVEAU_CARD_INIT_DOWN, - NOUVEAU_CARD_INIT_DONE, - NOUVEAU_CARD_INIT_FAILED - } init_state; /* the card type, takes NV_* as values */ enum nouveau_card_type card_type; @@ -525,7 +533,7 @@ struct drm_nouveau_private { struct list_head vbl_waiting; struct { - struct ttm_global_reference mem_global_ref; + struct drm_global_reference mem_global_ref; struct ttm_bo_global_ref bo_global_ref; struct ttm_bo_device bdev; spinlock_t bo_list_lock; @@ -533,8 +541,6 @@ struct drm_nouveau_private { atomic_t validate_sequence; } ttm; - struct fb_info *fbdev_info; - int fifo_alloc_count; struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR]; @@ -595,11 +601,7 @@ struct drm_nouveau_private { struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR]; int vm_vram_pt_nr; - struct mem_block *ramin_heap; - - /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */ - uint32_t ctx_table_size; - struct nouveau_gpuobj_ref *ctx_table; + struct drm_mm ramin_heap; struct list_head gpuobj_list; @@ -618,6 +620,11 @@ struct drm_nouveau_private { struct backlight_device *backlight; struct nouveau_channel *evo; + struct { + struct dcb_entry *dcb; + u16 script; + u32 pclk; + } evo_irq; struct { struct dentry *channel_root; @@ -652,14 +659,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo) return 0; } -#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \ - struct drm_nouveau_private *nv = dev->dev_private; \ - if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \ - NV_ERROR(dev, "called without init\n"); \ - return -EINVAL; \ - } \ -} while (0) - #define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \ struct drm_nouveau_private *nv = dev->dev_private; \ if (!nouveau_channel_owner(dev, (cl), (id))) { \ @@ -682,7 +681,6 @@ extern int nouveau_tv_disable; extern char *nouveau_tv_norm; extern int nouveau_reg_debug; extern char *nouveau_vbios; -extern int nouveau_ctxfw; extern int nouveau_ignorelid; extern int nouveau_nofbaccel; extern int nouveau_noaccel; @@ -707,17 +705,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *); extern int nouveau_card_init(struct drm_device *); /* nouveau_mem.c */ -extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, - uint64_t size); -extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *, - uint64_t size, int align2, - struct drm_file *, int tail); -extern void nouveau_mem_takedown(struct mem_block **heap); -extern void nouveau_mem_free_block(struct mem_block *); extern int nouveau_mem_detect(struct drm_device *dev); -extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap); extern int nouveau_mem_init(struct drm_device *); extern int nouveau_mem_init_agp(struct drm_device *); +extern int nouveau_mem_reset_agp(struct drm_device *); extern void nouveau_mem_close(struct drm_device *); extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, @@ -857,11 +848,13 @@ void nouveau_register_dsm_handler(void); void nouveau_unregister_dsm_handler(void); int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len); bool nouveau_acpi_rom_supported(struct pci_dev *pdev); +int nouveau_acpi_edid(struct drm_device *, struct drm_connector *); #else static inline void nouveau_register_dsm_handler(void) {} static inline void nouveau_unregister_dsm_handler(void) {} static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; } static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; } +static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; } #endif /* nouveau_backlight.c */ @@ -924,6 +917,10 @@ extern void nv10_fb_takedown(struct drm_device *); extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t, uint32_t, uint32_t); +/* nv30_fb.c */ +extern int nv30_fb_init(struct drm_device *); +extern void nv30_fb_takedown(struct drm_device *); + /* nv40_fb.c */ extern int nv40_fb_init(struct drm_device *); extern void nv40_fb_takedown(struct drm_device *); @@ -1035,12 +1032,6 @@ extern int nv50_graph_unload_context(struct drm_device *); extern void nv50_graph_context_switch(struct drm_device *); extern int nv50_grctx_init(struct nouveau_grctx *); -/* nouveau_grctx.c */ -extern int nouveau_grctx_prog_load(struct drm_device *); -extern void nouveau_grctx_vals_load(struct drm_device *, - struct nouveau_gpuobj *); -extern void nouveau_grctx_fini(struct drm_device *); - /* nv04_instmem.c */ extern int nv04_instmem_init(struct drm_device *); extern void nv04_instmem_takedown(struct drm_device *); @@ -1051,8 +1042,7 @@ extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); -extern void nv04_instmem_prepare_access(struct drm_device *, bool write); -extern void nv04_instmem_finish_access(struct drm_device *); +extern void nv04_instmem_flush(struct drm_device *); /* nv50_instmem.c */ extern int nv50_instmem_init(struct drm_device *); @@ -1064,8 +1054,9 @@ extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *, extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *); extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *); extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *); -extern void nv50_instmem_prepare_access(struct drm_device *, bool write); -extern void nv50_instmem_finish_access(struct drm_device *); +extern void nv50_instmem_flush(struct drm_device *); +extern void nv84_instmem_flush(struct drm_device *); +extern void nv50_vm_flush(struct drm_device *, int engine); /* nv04_mc.c */ extern int nv04_mc_init(struct drm_device *); @@ -1088,13 +1079,14 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); /* nv04_dac.c */ -extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry); +extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *); extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder); extern int nv04_dac_output_offset(struct drm_encoder *encoder); extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable); +extern bool nv04_dac_in_use(struct drm_encoder *encoder); /* nv04_dfp.c */ -extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry); +extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *); extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent); extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent, int head, bool dl); @@ -1103,15 +1095,17 @@ extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode); /* nv04_tv.c */ extern int nv04_tv_identify(struct drm_device *dev, int i2c_index); -extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry); +extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *); /* nv17_tv.c */ -extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry); +extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *); /* nv04_display.c */ +extern int nv04_display_early_init(struct drm_device *); +extern void nv04_display_late_takedown(struct drm_device *); extern int nv04_display_create(struct drm_device *); +extern int nv04_display_init(struct drm_device *); extern void nv04_display_destroy(struct drm_device *); -extern void nv04_display_restore(struct drm_device *); /* nv04_crtc.c */ extern int nv04_crtc_create(struct drm_device *, int index); @@ -1147,7 +1141,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr); extern int nouveau_fence_flush(void *obj, void *arg); extern void nouveau_fence_unref(void **obj); extern void *nouveau_fence_ref(void *obj); -extern void nouveau_fence_handler(struct drm_device *dev, int channel); /* nouveau_gem.c */ extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *, @@ -1167,13 +1160,15 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, extern int nouveau_gem_ioctl_info(struct drm_device *, void *, struct drm_file *); -/* nv17_gpio.c */ -int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); -int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); +/* nv10_gpio.c */ +int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); +int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); /* nv50_gpio.c */ +int nv50_gpio_init(struct drm_device *dev); int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state); +void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on); /* nv50_calc. */ int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk, @@ -1220,6 +1215,14 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val) iowrite32_native(val, dev_priv->mmio + reg); } +static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val) +{ + u32 tmp = nv_rd32(dev, reg); + tmp &= ~mask; + tmp |= val; + nv_wr32(dev, reg, tmp); +} + static inline u8 nv_rd08(struct drm_device *dev, unsigned reg) { struct drm_nouveau_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h index e1df8209cd0..a1a0d48ae70 100644 --- a/drivers/gpu/drm/nouveau/nouveau_encoder.h +++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h @@ -38,13 +38,15 @@ struct nouveau_encoder { struct dcb_entry *dcb; int or; + /* different to drm_encoder.crtc, this reflects what's + * actually programmed on the hw, not the proposed crtc */ + struct drm_crtc *crtc; + struct drm_display_mode mode; int last_dpms; struct nv04_output_reg restore; - void (*disconnect)(struct nouveau_encoder *encoder); - union { struct { int mc_unknown; @@ -71,8 +73,8 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc) struct nouveau_connector * nouveau_encoder_connector_get(struct nouveau_encoder *encoder); -int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry); -int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry); +int nv50_sor_create(struct drm_connector *, struct dcb_entry *); +int nv50_dac_create(struct drm_connector *, struct dcb_entry *); struct bit_displayport_encoder_table { uint32_t match; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 257ea130ae1..2fb2444d232 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -333,7 +333,7 @@ nouveau_fbcon_output_poll_changed(struct drm_device *dev) drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper); } -int +static int nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) { struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index faddf53ff9e..6b208ffafa8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -67,12 +67,13 @@ nouveau_fence_update(struct nouveau_channel *chan) if (USE_REFCNT) sequence = nvchan_rd32(chan, 0x48); else - sequence = chan->fence.last_sequence_irq; + sequence = atomic_read(&chan->fence.last_sequence_irq); if (chan->fence.sequence_ack == sequence) return; chan->fence.sequence_ack = sequence; + spin_lock(&chan->fence.lock); list_for_each_safe(entry, tmp, &chan->fence.pending) { fence = list_entry(entry, struct nouveau_fence, entry); @@ -84,6 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan) if (sequence == chan->fence.sequence_ack) break; } + spin_unlock(&chan->fence.lock); } int @@ -119,7 +121,6 @@ nouveau_fence_emit(struct nouveau_fence *fence) { struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private; struct nouveau_channel *chan = fence->channel; - unsigned long flags; int ret; ret = RING_SPACE(chan, 2); @@ -127,9 +128,7 @@ nouveau_fence_emit(struct nouveau_fence *fence) return ret; if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) { - spin_lock_irqsave(&chan->fence.lock, flags); nouveau_fence_update(chan); - spin_unlock_irqrestore(&chan->fence.lock, flags); BUG_ON(chan->fence.sequence == chan->fence.sequence_ack - 1); @@ -138,9 +137,9 @@ nouveau_fence_emit(struct nouveau_fence *fence) fence->sequence = ++chan->fence.sequence; kref_get(&fence->refcount); - spin_lock_irqsave(&chan->fence.lock, flags); + spin_lock(&chan->fence.lock); list_add_tail(&fence->entry, &chan->fence.pending); - spin_unlock_irqrestore(&chan->fence.lock, flags); + spin_unlock(&chan->fence.lock); BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1); OUT_RING(chan, fence->sequence); @@ -173,14 +172,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg) { struct nouveau_fence *fence = nouveau_fence(sync_obj); struct nouveau_channel *chan = fence->channel; - unsigned long flags; if (fence->signalled) return true; - spin_lock_irqsave(&chan->fence.lock, flags); nouveau_fence_update(chan); - spin_unlock_irqrestore(&chan->fence.lock, flags); return fence->signalled; } @@ -190,8 +186,6 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) unsigned long timeout = jiffies + (3 * DRM_HZ); int ret = 0; - __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); - while (1) { if (nouveau_fence_signalled(sync_obj, sync_arg)) break; @@ -201,6 +195,8 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr) break; } + __set_current_state(intr ? TASK_INTERRUPTIBLE + : TASK_UNINTERRUPTIBLE); if (lazy) schedule_timeout(1); @@ -221,27 +217,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg) return 0; } -void -nouveau_fence_handler(struct drm_device *dev, int channel) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_channel *chan = NULL; - - if (channel >= 0 && channel < dev_priv->engine.fifo.channels) - chan = dev_priv->fifos[channel]; - - if (chan) { - spin_lock_irq(&chan->fence.lock); - nouveau_fence_update(chan); - spin_unlock_irq(&chan->fence.lock); - } -} - int nouveau_fence_init(struct nouveau_channel *chan) { INIT_LIST_HEAD(&chan->fence.pending); spin_lock_init(&chan->fence.lock); + atomic_set(&chan->fence.last_sequence_irq, 0); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 69c76cf9340..547f2c24c1e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -137,8 +137,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, uint32_t flags = 0; int ret = 0; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; @@ -577,10 +575,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct drm_nouveau_gem_pushbuf_bo *bo; struct nouveau_channel *chan; struct validate_op op; - struct nouveau_fence *fence = 0; + struct nouveau_fence *fence = NULL; int i, j, ret = 0, do_reloc = 0; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); req->vram_available = dev_priv->fb_aper_free; @@ -760,8 +757,6 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); int ret = -EINVAL; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - gem = drm_gem_object_lookup(dev, file_priv, req->handle); if (!gem) return ret; @@ -800,8 +795,6 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, struct nouveau_bo *nvbo; int ret = -EINVAL; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - gem = drm_gem_object_lookup(dev, file_priv, req->handle); if (!gem) return ret; @@ -827,8 +820,6 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, struct drm_gem_object *gem; int ret; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - gem = drm_gem_object_lookup(dev, file_priv, req->handle); if (!gem) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c deleted file mode 100644 index f731c5f6053..00000000000 --- a/drivers/gpu/drm/nouveau/nouveau_grctx.c +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2009 Red Hat Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Ben Skeggs - */ - -#include <linux/firmware.h> -#include <linux/slab.h> - -#include "drmP.h" -#include "nouveau_drv.h" - -struct nouveau_ctxprog { - uint32_t signature; - uint8_t version; - uint16_t length; - uint32_t data[]; -} __attribute__ ((packed)); - -struct nouveau_ctxvals { - uint32_t signature; - uint8_t version; - uint32_t length; - struct { - uint32_t offset; - uint32_t value; - } data[]; -} __attribute__ ((packed)); - -int -nouveau_grctx_prog_load(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - const int chipset = dev_priv->chipset; - const struct firmware *fw; - const struct nouveau_ctxprog *cp; - const struct nouveau_ctxvals *cv; - char name[32]; - int ret, i; - - if (pgraph->accel_blocked) - return -ENODEV; - - if (!pgraph->ctxprog) { - sprintf(name, "nouveau/nv%02x.ctxprog", chipset); - ret = request_firmware(&fw, name, &dev->pdev->dev); - if (ret) { - NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset); - return ret; - } - - pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL); - if (!pgraph->ctxprog) { - NV_ERROR(dev, "OOM copying ctxprog\n"); - release_firmware(fw); - return -ENOMEM; - } - - cp = pgraph->ctxprog; - if (le32_to_cpu(cp->signature) != 0x5043564e || - cp->version != 0 || - le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) { - NV_ERROR(dev, "ctxprog invalid\n"); - release_firmware(fw); - nouveau_grctx_fini(dev); - return -EINVAL; - } - release_firmware(fw); - } - - if (!pgraph->ctxvals) { - sprintf(name, "nouveau/nv%02x.ctxvals", chipset); - ret = request_firmware(&fw, name, &dev->pdev->dev); - if (ret) { - NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset); - nouveau_grctx_fini(dev); - return ret; - } - - pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL); - if (!pgraph->ctxvals) { - NV_ERROR(dev, "OOM copying ctxvals\n"); - release_firmware(fw); - nouveau_grctx_fini(dev); - return -ENOMEM; - } - - cv = (void *)pgraph->ctxvals; - if (le32_to_cpu(cv->signature) != 0x5643564e || - cv->version != 0 || - le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) { - NV_ERROR(dev, "ctxvals invalid\n"); - release_firmware(fw); - nouveau_grctx_fini(dev); - return -EINVAL; - } - release_firmware(fw); - } - - cp = pgraph->ctxprog; - - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); - for (i = 0; i < le16_to_cpu(cp->length); i++) - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, - le32_to_cpu(cp->data[i])); - - return 0; -} - -void -nouveau_grctx_fini(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - - if (pgraph->ctxprog) { - kfree(pgraph->ctxprog); - pgraph->ctxprog = NULL; - } - - if (pgraph->ctxvals) { - kfree(pgraph->ctxprog); - pgraph->ctxvals = NULL; - } -} - -void -nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - struct nouveau_ctxvals *cv = pgraph->ctxvals; - int i; - - if (!cv) - return; - - for (i = 0; i < le32_to_cpu(cv->length); i++) - nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset), - le32_to_cpu(cv->data[i].value)); -} diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 316a3c7e6eb..cb0cb34440c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -278,3 +278,45 @@ nouveau_i2c_find(struct drm_device *dev, int index) return i2c->chan; } +bool +nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr) +{ + uint8_t buf[] = { 0 }; + struct i2c_msg msgs[] = { + { + .addr = addr, + .flags = 0, + .len = 1, + .buf = buf, + }, + { + .addr = addr, + .flags = I2C_M_RD, + .len = 1, + .buf = buf, + } + }; + + return i2c_transfer(&i2c->adapter, msgs, 2) == 2; +} + +int +nouveau_i2c_identify(struct drm_device *dev, const char *what, + struct i2c_board_info *info, int index) +{ + struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index); + int i; + + NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); + + for (i = 0; info[i].addr; i++) { + if (nouveau_probe_i2c_addr(i2c, info[i].addr)) { + NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); + return i; + } + } + + NV_DEBUG(dev, "No devices found.\n"); + + return -ENODEV; +} diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h index c8eaf7a9fcb..6dd2f8713cd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.h +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h @@ -45,6 +45,9 @@ struct nouveau_i2c_chan { int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index); void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *); struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index); +bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr); +int nouveau_i2c_identify(struct drm_device *dev, const char *what, + struct i2c_board_info *info, int index); int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte, uint8_t *read_byte); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index c1fd42b0dad..a9f36ab256b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -35,162 +35,6 @@ #include "drm_sarea.h" #include "nouveau_drv.h" -static struct mem_block * -split_block(struct mem_block *p, uint64_t start, uint64_t size, - struct drm_file *file_priv) -{ - /* Maybe cut off the start of an existing block */ - if (start > p->start) { - struct mem_block *newblock = - kmalloc(sizeof(*newblock), GFP_KERNEL); - if (!newblock) - goto out; - newblock->start = start; - newblock->size = p->size - (start - p->start); - newblock->file_priv = NULL; - newblock->next = p->next; - newblock->prev = p; - p->next->prev = newblock; - p->next = newblock; - p->size -= newblock->size; - p = newblock; - } - - /* Maybe cut off the end of an existing block */ - if (size < p->size) { - struct mem_block *newblock = - kmalloc(sizeof(*newblock), GFP_KERNEL); - if (!newblock) - goto out; - newblock->start = start + size; - newblock->size = p->size - size; - newblock->file_priv = NULL; - newblock->next = p->next; - newblock->prev = p; - p->next->prev = newblock; - p->next = newblock; - p->size = size; - } - -out: - /* Our block is in the middle */ - p->file_priv = file_priv; - return p; -} - -struct mem_block * -nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size, - int align2, struct drm_file *file_priv, int tail) -{ - struct mem_block *p; - uint64_t mask = (1 << align2) - 1; - - if (!heap) - return NULL; - - if (tail) { - list_for_each_prev(p, heap) { - uint64_t start = ((p->start + p->size) - size) & ~mask; - - if (p->file_priv == NULL && start >= p->start && - start + size <= p->start + p->size) - return split_block(p, start, size, file_priv); - } - } else { - list_for_each(p, heap) { - uint64_t start = (p->start + mask) & ~mask; - - if (p->file_priv == NULL && - start + size <= p->start + p->size) - return split_block(p, start, size, file_priv); - } - } - - return NULL; -} - -void nouveau_mem_free_block(struct mem_block *p) -{ - p->file_priv = NULL; - - /* Assumes a single contiguous range. Needs a special file_priv in - * 'heap' to stop it being subsumed. - */ - if (p->next->file_priv == NULL) { - struct mem_block *q = p->next; - p->size += q->size; - p->next = q->next; - p->next->prev = p; - kfree(q); - } - - if (p->prev->file_priv == NULL) { - struct mem_block *q = p->prev; - q->size += p->size; - q->next = p->next; - q->next->prev = q; - kfree(p); - } -} - -/* Initialize. How to check for an uninitialized heap? - */ -int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start, - uint64_t size) -{ - struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL); - - if (!blocks) - return -ENOMEM; - - *heap = kmalloc(sizeof(**heap), GFP_KERNEL); - if (!*heap) { - kfree(blocks); - return -ENOMEM; - } - - blocks->start = start; - blocks->size = size; - blocks->file_priv = NULL; - blocks->next = blocks->prev = *heap; - - memset(*heap, 0, sizeof(**heap)); - (*heap)->file_priv = (struct drm_file *) -1; - (*heap)->next = (*heap)->prev = blocks; - return 0; -} - -/* - * Free all blocks associated with the releasing file_priv - */ -void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) -{ - struct mem_block *p; - - if (!heap || !heap->next) - return; - - list_for_each(p, heap) { - if (p->file_priv == file_priv) - p->file_priv = NULL; - } - - /* Assumes a single contiguous range. Needs a special file_priv in - * 'heap' to stop it being subsumed. - */ - list_for_each(p, heap) { - while ((p->file_priv == NULL) && - (p->next->file_priv == NULL) && - (p->next != heap)) { - struct mem_block *q = p->next; - p->size += q->size; - p->next = q->next; - p->next->prev = p; - kfree(q); - } - } -} - /* * NV10-NV40 tiling helpers */ @@ -299,7 +143,6 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, phys |= 0x30; } - dev_priv->engine.instmem.prepare_access(dev, true); while (size) { unsigned offset_h = upper_32_bits(phys); unsigned offset_l = lower_32_bits(phys); @@ -331,36 +174,12 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size, } } } - dev_priv->engine.instmem.finish_access(dev); - - nv_wr32(dev, 0x100c80, 0x00050001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return -EBUSY; - } - - nv_wr32(dev, 0x100c80, 0x00000001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return -EBUSY; - } - - nv_wr32(dev, 0x100c80, 0x00040001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return -EBUSY; - } - - nv_wr32(dev, 0x100c80, 0x00060001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return -EBUSY; - } + dev_priv->engine.instmem.flush(dev); + nv50_vm_flush(dev, 5); + nv50_vm_flush(dev, 0); + nv50_vm_flush(dev, 4); + nv50_vm_flush(dev, 6); return 0; } @@ -374,7 +193,6 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) virt -= dev_priv->vm_vram_base; pages = (size >> 16) << 1; - dev_priv->engine.instmem.prepare_access(dev, true); while (pages) { pgt = dev_priv->vm_vram_pt[virt >> 29]; pte = (virt & 0x1ffe0000ULL) >> 15; @@ -388,57 +206,19 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size) while (pte < end) nv_wo32(dev, pgt, pte++, 0); } - dev_priv->engine.instmem.finish_access(dev); - - nv_wr32(dev, 0x100c80, 0x00050001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return; - } - - nv_wr32(dev, 0x100c80, 0x00000001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return; - } - - nv_wr32(dev, 0x100c80, 0x00040001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return; - } + dev_priv->engine.instmem.flush(dev); - nv_wr32(dev, 0x100c80, 0x00060001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - } + nv50_vm_flush(dev, 5); + nv50_vm_flush(dev, 0); + nv50_vm_flush(dev, 4); + nv50_vm_flush(dev, 6); } /* * Cleanup everything */ -void nouveau_mem_takedown(struct mem_block **heap) -{ - struct mem_block *p; - - if (!*heap) - return; - - for (p = (*heap)->next; p != *heap;) { - struct mem_block *q = p; - p = p->next; - kfree(q); - } - - kfree(*heap); - *heap = NULL; -} - -void nouveau_mem_close(struct drm_device *dev) +void +nouveau_mem_close(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -449,8 +229,7 @@ void nouveau_mem_close(struct drm_device *dev) nouveau_ttm_global_release(dev_priv); - if (drm_core_has_AGP(dev) && dev->agp && - drm_core_check_feature(dev, DRIVER_MODESET)) { + if (drm_core_has_AGP(dev) && dev->agp) { struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp @@ -471,28 +250,29 @@ void nouveau_mem_close(struct drm_device *dev) } if (dev_priv->fb_mtrr) { - drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1), - drm_get_resource_len(dev, 1), DRM_MTRR_WC); - dev_priv->fb_mtrr = 0; + drm_mtrr_del(dev_priv->fb_mtrr, + pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); + dev_priv->fb_mtrr = -1; } } static uint32_t nouveau_mem_detect_nv04(struct drm_device *dev) { - uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0); + uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0); if (boot0 & 0x00000100) return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024; - switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) { - case NV04_BOOT_0_RAM_AMOUNT_32MB: + switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) { + case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB: return 32 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_16MB: + case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB: return 16 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_8MB: + case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB: return 8 * 1024 * 1024; - case NV04_BOOT_0_RAM_AMOUNT_4MB: + case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB: return 4 * 1024 * 1024; } @@ -536,12 +316,18 @@ nouveau_mem_detect(struct drm_device *dev) } else if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) { dev_priv->vram_size = nouveau_mem_detect_nforce(dev); + } else + if (dev_priv->card_type < NV_50) { + dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); + dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK; } else { - dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA); - dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK; - if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) + dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA); + dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32; + dev_priv->vram_size &= 0xffffffff00ll; + if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) { dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10); dev_priv->vram_sys_base <<= 12; + } } NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20)); @@ -555,18 +341,36 @@ nouveau_mem_detect(struct drm_device *dev) return -ENOMEM; } -#if __OS_HAS_AGP -static void nouveau_mem_reset_agp(struct drm_device *dev) +int +nouveau_mem_reset_agp(struct drm_device *dev) { - uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable; +#if __OS_HAS_AGP + uint32_t saved_pci_nv_1, pmc_enable; + int ret; + + /* First of all, disable fast writes, otherwise if it's + * already enabled in the AGP bridge and we disable the card's + * AGP controller we might be locking ourselves out of it. */ + if (dev->agp->acquired) { + struct drm_agp_info info; + struct drm_agp_mode mode; + + ret = drm_agp_info(dev, &info); + if (ret) + return ret; + + mode.mode = info.mode & ~0x10; + ret = drm_agp_enable(dev, mode); + if (ret) + return ret; + } saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1); - saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19); /* clear busmaster bit */ nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4); - /* clear SBA and AGP bits */ - nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff); + /* disable AGP */ + nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0); /* power cycle pgraph, if enabled */ pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); @@ -578,11 +382,12 @@ static void nouveau_mem_reset_agp(struct drm_device *dev) } /* and restore (gives effect of resetting AGP) */ - nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19); nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1); -} #endif + return 0; +} + int nouveau_mem_init_agp(struct drm_device *dev) { @@ -592,11 +397,6 @@ nouveau_mem_init_agp(struct drm_device *dev) struct drm_agp_mode mode; int ret; - if (nouveau_noagp) - return 0; - - nouveau_mem_reset_agp(dev); - if (!dev->agp->acquired) { ret = drm_agp_acquire(dev); if (ret) { @@ -633,7 +433,7 @@ nouveau_mem_init(struct drm_device *dev) struct ttm_bo_device *bdev = &dev_priv->ttm.bdev; int ret, dma_bits = 32; - dev_priv->fb_phys = drm_get_resource_start(dev, 1); + dev_priv->fb_phys = pci_resource_start(dev->pdev, 1); dev_priv->gart_info.type = NOUVEAU_GART_NONE; if (dev_priv->card_type >= NV_50 && @@ -665,8 +465,9 @@ nouveau_mem_init(struct drm_device *dev) dev_priv->fb_available_size = dev_priv->vram_size; dev_priv->fb_mappable_pages = dev_priv->fb_available_size; - if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1)) - dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1); + if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1)) + dev_priv->fb_mappable_pages = + pci_resource_len(dev->pdev, 1); dev_priv->fb_mappable_pages >>= PAGE_SHIFT; /* remove reserved space at end of vram from available amount */ @@ -692,7 +493,8 @@ nouveau_mem_init(struct drm_device *dev) /* GART */ #if !defined(__powerpc__) && !defined(__ia64__) - if (drm_device_is_agp(dev) && dev->agp) { + if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) { + nouveau_mem_reset_agp(dev); ret = nouveau_mem_init_agp(dev); if (ret) NV_ERROR(dev, "Error initialising AGP: %d\n", ret); @@ -718,8 +520,8 @@ nouveau_mem_init(struct drm_device *dev) return ret; } - dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1), - drm_get_resource_len(dev, 1), + dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), + pci_resource_len(dev->pdev, 1), DRM_MTRR_WC); return 0; diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index 9537f3e3011..3ec181ff50c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -55,7 +55,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) if (ret) goto out_err; - ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size); + ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size); if (ret) goto out_err; @@ -80,7 +80,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) nouveau_bo_unpin(chan->notifier_bo); mutex_unlock(&dev->struct_mutex); drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); - nouveau_mem_takedown(&chan->notifier_heap); + drm_mm_takedown(&chan->notifier_heap); } static void @@ -90,7 +90,7 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, NV_DEBUG(dev, "\n"); if (gpuobj->priv) - nouveau_mem_free_block(gpuobj->priv); + drm_mm_put_block(gpuobj->priv); } int @@ -100,18 +100,13 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; - struct mem_block *mem; + struct drm_mm_node *mem; uint32_t offset; int target, ret; - if (!chan->notifier_heap) { - NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n", - chan->id); - return -EINVAL; - } - - mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0, - (struct drm_file *)-2, 0); + mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); + if (mem) + mem = drm_mm_get_block(mem, size, 0); if (!mem) { NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); return -ENOMEM; @@ -144,17 +139,17 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, mem->size, NV_DMA_ACCESS_RW, target, &nobj); if (ret) { - nouveau_mem_free_block(mem); + drm_mm_put_block(mem); NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); return ret; } - nobj->dtor = nouveau_notifier_gpuobj_dtor; - nobj->priv = mem; + nobj->dtor = nouveau_notifier_gpuobj_dtor; + nobj->priv = mem; ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL); if (ret) { nouveau_gpuobj_del(dev, &nobj); - nouveau_mem_free_block(mem); + drm_mm_put_block(mem); NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret); return ret; } @@ -170,7 +165,7 @@ nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset) return -EINVAL; if (poffset) { - struct mem_block *mem = nobj->priv; + struct drm_mm_node *mem = nobj->priv; if (*poffset >= mem->size) return false; @@ -189,7 +184,6 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan); ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c index e7c100ba63a..b6bcb254f4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_object.c +++ b/drivers/gpu/drm/nouveau/nouveau_object.c @@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) } } - instmem->prepare_access(dev, true); co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); do { if (!nouveau_ramht_entry_valid(dev, ramht, co)) { @@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) nv_wo32(dev, ramht, (co + 4)/4, ctx); list_add_tail(&ref->list, &chan->ramht_refs); - instmem->finish_access(dev); + instmem->flush(dev); return 0; } NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n", @@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) if (co >= dev_priv->ramht_size) co = 0; } while (co != ho); - instmem->finish_access(dev); NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id); return -ENOMEM; @@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) return; } - instmem->prepare_access(dev, true); co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle); do { if (nouveau_ramht_entry_valid(dev, ramht, co) && @@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) nv_wo32(dev, ramht, (co + 4)/4, 0x00000000); list_del(&ref->list); - instmem->finish_access(dev); + instmem->flush(dev); return; } @@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref) co = 0; } while (co != ho); list_del(&ref->list); - instmem->finish_access(dev); NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n", chan->id, ref->handle); @@ -209,7 +205,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; struct nouveau_gpuobj *gpuobj; - struct mem_block *pramin = NULL; + struct drm_mm *pramin = NULL; int ret; NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n", @@ -233,25 +229,12 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, * available. */ if (chan) { - if (chan->ramin_heap) { - NV_DEBUG(dev, "private heap\n"); - pramin = chan->ramin_heap; - } else - if (dev_priv->card_type < NV_50) { - NV_DEBUG(dev, "global heap fallback\n"); - pramin = dev_priv->ramin_heap; - } + NV_DEBUG(dev, "channel heap\n"); + pramin = &chan->ramin_heap; } else { NV_DEBUG(dev, "global heap\n"); - pramin = dev_priv->ramin_heap; - } - - if (!pramin) { - NV_ERROR(dev, "No PRAMIN heap!\n"); - return -EINVAL; - } + pramin = &dev_priv->ramin_heap; - if (!chan) { ret = engine->instmem.populate(dev, gpuobj, &size); if (ret) { nouveau_gpuobj_del(dev, &gpuobj); @@ -260,9 +243,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, } /* Allocate a chunk of the PRAMIN aperture */ - gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size, - drm_order(align), - (struct drm_file *)-2, 0); + gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0); + if (gpuobj->im_pramin) + gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align); + if (!gpuobj->im_pramin) { nouveau_gpuobj_del(dev, &gpuobj); return -ENOMEM; @@ -279,10 +263,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan, if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { int i; - engine->instmem.prepare_access(dev, true); for (i = 0; i < gpuobj->im_pramin->size; i += 4) nv_wo32(dev, gpuobj, i/4, 0); - engine->instmem.finish_access(dev); + engine->instmem.flush(dev); } *gpuobj_ret = gpuobj; @@ -370,10 +353,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) } if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) { - engine->instmem.prepare_access(dev, true); for (i = 0; i < gpuobj->im_pramin->size; i += 4) nv_wo32(dev, gpuobj, i/4, 0); - engine->instmem.finish_access(dev); + engine->instmem.flush(dev); } if (gpuobj->dtor) @@ -386,7 +368,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj) if (gpuobj->flags & NVOBJ_FLAG_FAKE) kfree(gpuobj->im_pramin); else - nouveau_mem_free_block(gpuobj->im_pramin); + drm_mm_put_block(gpuobj->im_pramin); } list_del(&gpuobj->list); @@ -589,7 +571,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list); if (p_offset != ~0) { - gpuobj->im_pramin = kzalloc(sizeof(struct mem_block), + gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node), GFP_KERNEL); if (!gpuobj->im_pramin) { nouveau_gpuobj_del(dev, &gpuobj); @@ -605,10 +587,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset, } if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) { - dev_priv->engine.instmem.prepare_access(dev, true); for (i = 0; i < gpuobj->im_pramin->size; i += 4) nv_wo32(dev, gpuobj, i/4, 0); - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); } if (pref) { @@ -696,8 +677,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, return ret; } - instmem->prepare_access(dev, true); - if (dev_priv->card_type < NV_50) { uint32_t frame, adjust, pte_flags = 0; @@ -734,7 +713,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, nv_wo32(dev, *gpuobj, 5, flags5); } - instmem->finish_access(dev); + instmem->flush(dev); (*gpuobj)->engine = NVOBJ_ENGINE_SW; (*gpuobj)->class = class; @@ -849,7 +828,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, return ret; } - dev_priv->engine.instmem.prepare_access(dev, true); if (dev_priv->card_type >= NV_50) { nv_wo32(dev, *gpuobj, 0, class); nv_wo32(dev, *gpuobj, 5, 0x00010000); @@ -874,7 +852,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class, } } } - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); (*gpuobj)->engine = NVOBJ_ENGINE_GR; (*gpuobj)->class = class; @@ -920,6 +898,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) base = 0; /* PGRAPH context */ + size += dev_priv->engine.graph.grctx_size; if (dev_priv->card_type == NV_50) { /* Various fixed table thingos */ @@ -930,12 +909,8 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) size += 0x8000; /* RAMFC */ size += 0x1000; - /* PGRAPH context */ - size += 0x70000; } - NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n", - chan->id, size, base); ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0, &chan->ramin); if (ret) { @@ -944,8 +919,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan) } pramin = chan->ramin->gpuobj; - ret = nouveau_mem_init_heap(&chan->ramin_heap, - pramin->im_pramin->start + base, size); + ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size); if (ret) { NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); nouveau_gpuobj_ref_del(dev, &chan->ramin); @@ -969,15 +943,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h); - /* Reserve a block of PRAMIN for the channel - *XXX: maybe on <NV50 too at some point - */ - if (0 || dev_priv->card_type == NV_50) { - ret = nouveau_gpuobj_channel_init_pramin(chan); - if (ret) { - NV_ERROR(dev, "init pramin\n"); - return ret; - } + /* Allocate a chunk of memory for per-channel object storage */ + ret = nouveau_gpuobj_channel_init_pramin(chan); + if (ret) { + NV_ERROR(dev, "init pramin\n"); + return ret; } /* NV50 VM @@ -988,17 +958,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, if (dev_priv->card_type >= NV_50) { uint32_t vm_offset, pde; - instmem->prepare_access(dev, true); - vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200; vm_offset += chan->ramin->gpuobj->im_pramin->start; ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, 0, &chan->vm_pd, NULL); - if (ret) { - instmem->finish_access(dev); + if (ret) return ret; - } for (i = 0; i < 0x4000; i += 8) { nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000); nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe); @@ -1008,10 +974,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->gart_info.sg_ctxdma, &chan->vm_gart_pt); - if (ret) { - instmem->finish_access(dev); + if (ret) return ret; - } nv_wo32(dev, chan->vm_pd, pde++, chan->vm_gart_pt->instance | 0x03); nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); @@ -1021,17 +985,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan, ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->vm_vram_pt[i], &chan->vm_vram_pt[i]); - if (ret) { - instmem->finish_access(dev); + if (ret) return ret; - } nv_wo32(dev, chan->vm_pd, pde++, chan->vm_vram_pt[i]->instance | 0x61); nv_wo32(dev, chan->vm_pd, pde++, 0x00000000); } - instmem->finish_access(dev); + instmem->flush(dev); } /* RAMHT */ @@ -1130,8 +1092,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan) for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]); - if (chan->ramin_heap) - nouveau_mem_takedown(&chan->ramin_heap); + if (chan->ramin_heap.free_stack.next) + drm_mm_takedown(&chan->ramin_heap); if (chan->ramin) nouveau_gpuobj_ref_del(dev, &chan->ramin); @@ -1164,10 +1126,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev) return -ENOMEM; } - dev_priv->engine.instmem.prepare_access(dev, false); for (i = 0; i < gpuobj->im_pramin->size / 4; i++) gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i); - dev_priv->engine.instmem.finish_access(dev); } return 0; @@ -1212,10 +1172,9 @@ nouveau_gpuobj_resume(struct drm_device *dev) if (!gpuobj->im_backing_suspend) continue; - dev_priv->engine.instmem.prepare_access(dev, true); for (i = 0; i < gpuobj->im_pramin->size / 4; i++) nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]); - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); } nouveau_gpuobj_suspend_cleanup(dev); @@ -1232,7 +1191,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan); if (init->handle == ~0) @@ -1283,7 +1241,6 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data, struct nouveau_channel *chan; int ret; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan); ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref); diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h index 6ca80a3fe70..9c1056cb8a9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_reg.h +++ b/drivers/gpu/drm/nouveau/nouveau_reg.h @@ -1,19 +1,64 @@ +#define NV04_PFB_BOOT_0 0x00100000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002 +# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003 +# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004 +# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008 +# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020 +# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028 +# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100 +# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000 +#define NV04_PFB_DEBUG_0 0x00100080 +# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001 +# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010 +# define NV04_PFB_DEBUG_0_REFRESH_COUNTX64 0x00003f00 +# define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK 0x00004000 +# define NV04_PFB_DEBUG_0_SAFE_MODE 0x00008000 +# define NV04_PFB_DEBUG_0_ALOM_ENABLE 0x00010000 +# define NV04_PFB_DEBUG_0_CASOE 0x00100000 +# define NV04_PFB_DEBUG_0_CKE_INVERT 0x10000000 +# define NV04_PFB_DEBUG_0_REFINC 0x20000000 +# define NV04_PFB_DEBUG_0_SAVE_POWER_OFF 0x40000000 +#define NV04_PFB_CFG0 0x00100200 +# define NV04_PFB_CFG0_SCRAMBLE 0x20000000 +#define NV04_PFB_CFG1 0x00100204 +#define NV04_PFB_FIFO_DATA 0x0010020c +# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 +# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 +#define NV10_PFB_REFCTRL 0x00100210 +# define NV10_PFB_REFCTRL_VALID_1 (1 << 31) +#define NV04_PFB_PAD 0x0010021c +# define NV04_PFB_PAD_CKE_NORMAL (1 << 0) +#define NV10_PFB_TILE(i) (0x00100240 + (i*16)) +#define NV10_PFB_TILE__SIZE 8 +#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16)) +#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16)) +#define NV10_PFB_TSTATUS(i) (0x0010024c + (i*16)) +#define NV04_PFB_REF 0x001002d0 +# define NV04_PFB_REF_CMD_REFRESH (1 << 0) +#define NV04_PFB_PRE 0x001002d4 +# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0) +#define NV10_PFB_CLOSE_PAGE2 0x0010033c +#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i)) +#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) +#define NV40_PFB_TILE__SIZE_0 12 +#define NV40_PFB_TILE__SIZE_1 15 +#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16)) +#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16)) +#define NV40_PFB_TSTATUS(i) (0x0010060c + (i*16)) +#define NV40_PFB_UNK_800 0x00100800 -#define NV03_BOOT_0 0x00100000 -# define NV03_BOOT_0_RAM_AMOUNT 0x00000003 -# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000 -# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001 -# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002 -# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003 -# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000 -# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001 -# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002 -# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003 - -#define NV04_FIFO_DATA 0x0010020c -# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 -# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 +#define NV_PEXTDEV_BOOT_0 0x00101000 +#define NV_PEXTDEV_BOOT_0_RAMCFG 0x0000003c +# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12) +#define NV_PEXTDEV_BOOT_3 0x0010100c #define NV_RAMIN 0x00700000 @@ -131,23 +176,6 @@ #define NV04_PTIMER_TIME_1 0x00009410 #define NV04_PTIMER_ALARM_0 0x00009420 -#define NV04_PFB_CFG0 0x00100200 -#define NV04_PFB_CFG1 0x00100204 -#define NV40_PFB_020C 0x0010020C -#define NV10_PFB_TILE(i) (0x00100240 + (i*16)) -#define NV10_PFB_TILE__SIZE 8 -#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16)) -#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16)) -#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16)) -#define NV10_PFB_CLOSE_PAGE2 0x0010033C -#define NV40_PFB_TILE(i) (0x00100600 + (i*16)) -#define NV40_PFB_TILE__SIZE_0 12 -#define NV40_PFB_TILE__SIZE_1 15 -#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16)) -#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16)) -#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16)) -#define NV40_PFB_UNK_800 0x00100800 - #define NV04_PGRAPH_DEBUG_0 0x00400080 #define NV04_PGRAPH_DEBUG_1 0x00400084 #define NV04_PGRAPH_DEBUG_2 0x00400088 @@ -814,6 +842,7 @@ #define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000 #define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff #define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80) +#define NV50_SOR_DP_CTRL_ENABLED 0x00000001 #define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000 #define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000 #define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000 diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 1d6ee8b5515..491767fe4fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); - dev_priv->engine.instmem.prepare_access(nvbe->dev, true); pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); nvbe->pte_start = pte; for (i = 0; i < nvbe->nr_pages; i++) { @@ -116,24 +115,11 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) dma_offset += NV_CTXDMA_PAGE_SIZE; } } - dev_priv->engine.instmem.finish_access(nvbe->dev); + dev_priv->engine.instmem.flush(nvbe->dev); if (dev_priv->card_type == NV_50) { - nv_wr32(dev, 0x100c80, 0x00050001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } - - nv_wr32(dev, 0x100c80, 0x00000001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } + nv50_vm_flush(dev, 5); /* PGRAPH */ + nv50_vm_flush(dev, 0); /* PFIFO */ } nvbe->bound = true; @@ -154,7 +140,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be) if (!nvbe->bound) return 0; - dev_priv->engine.instmem.prepare_access(nvbe->dev, true); pte = nvbe->pte_start; for (i = 0; i < nvbe->nr_pages; i++) { dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; @@ -170,24 +155,11 @@ nouveau_sgdma_unbind(struct ttm_backend *be) dma_offset += NV_CTXDMA_PAGE_SIZE; } } - dev_priv->engine.instmem.finish_access(nvbe->dev); + dev_priv->engine.instmem.flush(nvbe->dev); if (dev_priv->card_type == NV_50) { - nv_wr32(dev, 0x100c80, 0x00050001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } - - nv_wr32(dev, 0x100c80, 0x00000001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } + nv50_vm_flush(dev, 5); + nv50_vm_flush(dev, 0); } nvbe->bound = false; @@ -272,7 +244,6 @@ nouveau_sgdma_init(struct drm_device *dev) pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - dev_priv->engine.instmem.prepare_access(dev, true); if (dev_priv->card_type < NV_50) { /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE @@ -294,7 +265,7 @@ nouveau_sgdma_init(struct drm_device *dev) nv_wo32(dev, gpuobj, (i+4)/4, 0); } } - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; dev_priv->gart_info.aper_base = 0; @@ -325,14 +296,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; int pte; pte = (offset >> NV_CTXDMA_PAGE_SHIFT); if (dev_priv->card_type < NV_50) { - instmem->prepare_access(dev, false); *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; - instmem->finish_access(dev); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index b02a231d693..ee3729e7823 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -38,6 +38,7 @@ #include "nv50_display.h" static void nouveau_stub_takedown(struct drm_device *dev) {} +static int nouveau_stub_init(struct drm_device *dev) { return 0; } static int nouveau_init_engine_ptrs(struct drm_device *dev) { @@ -54,8 +55,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.clear = nv04_instmem_clear; engine->instmem.bind = nv04_instmem_bind; engine->instmem.unbind = nv04_instmem_unbind; - engine->instmem.prepare_access = nv04_instmem_prepare_access; - engine->instmem.finish_access = nv04_instmem_finish_access; + engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; @@ -85,6 +85,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.destroy_context = nv04_fifo_destroy_context; engine->fifo.load_context = nv04_fifo_load_context; engine->fifo.unload_context = nv04_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; + engine->display.late_takedown = nv04_display_late_takedown; + engine->display.create = nv04_display_create; + engine->display.init = nv04_display_init; + engine->display.destroy = nv04_display_destroy; + engine->gpio.init = nouveau_stub_init; + engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.get = NULL; + engine->gpio.set = NULL; + engine->gpio.irq_enable = NULL; break; case 0x10: engine->instmem.init = nv04_instmem_init; @@ -95,8 +105,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.clear = nv04_instmem_clear; engine->instmem.bind = nv04_instmem_bind; engine->instmem.unbind = nv04_instmem_unbind; - engine->instmem.prepare_access = nv04_instmem_prepare_access; - engine->instmem.finish_access = nv04_instmem_finish_access; + engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; @@ -128,6 +137,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.destroy_context = nv10_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; + engine->display.late_takedown = nv04_display_late_takedown; + engine->display.create = nv04_display_create; + engine->display.init = nv04_display_init; + engine->display.destroy = nv04_display_destroy; + engine->gpio.init = nouveau_stub_init; + engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.get = nv10_gpio_get; + engine->gpio.set = nv10_gpio_set; + engine->gpio.irq_enable = NULL; break; case 0x20: engine->instmem.init = nv04_instmem_init; @@ -138,8 +157,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.clear = nv04_instmem_clear; engine->instmem.bind = nv04_instmem_bind; engine->instmem.unbind = nv04_instmem_unbind; - engine->instmem.prepare_access = nv04_instmem_prepare_access; - engine->instmem.finish_access = nv04_instmem_finish_access; + engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; @@ -171,6 +189,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.destroy_context = nv10_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; + engine->display.late_takedown = nv04_display_late_takedown; + engine->display.create = nv04_display_create; + engine->display.init = nv04_display_init; + engine->display.destroy = nv04_display_destroy; + engine->gpio.init = nouveau_stub_init; + engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.get = nv10_gpio_get; + engine->gpio.set = nv10_gpio_set; + engine->gpio.irq_enable = NULL; break; case 0x30: engine->instmem.init = nv04_instmem_init; @@ -181,15 +209,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.clear = nv04_instmem_clear; engine->instmem.bind = nv04_instmem_bind; engine->instmem.unbind = nv04_instmem_unbind; - engine->instmem.prepare_access = nv04_instmem_prepare_access; - engine->instmem.finish_access = nv04_instmem_finish_access; + engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv04_mc_init; engine->mc.takedown = nv04_mc_takedown; engine->timer.init = nv04_timer_init; engine->timer.read = nv04_timer_read; engine->timer.takedown = nv04_timer_takedown; - engine->fb.init = nv10_fb_init; - engine->fb.takedown = nv10_fb_takedown; + engine->fb.init = nv30_fb_init; + engine->fb.takedown = nv30_fb_takedown; engine->fb.set_region_tiling = nv10_fb_set_region_tiling; engine->graph.grclass = nv30_graph_grclass; engine->graph.init = nv30_graph_init; @@ -214,6 +241,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.destroy_context = nv10_fifo_destroy_context; engine->fifo.load_context = nv10_fifo_load_context; engine->fifo.unload_context = nv10_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; + engine->display.late_takedown = nv04_display_late_takedown; + engine->display.create = nv04_display_create; + engine->display.init = nv04_display_init; + engine->display.destroy = nv04_display_destroy; + engine->gpio.init = nouveau_stub_init; + engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.get = nv10_gpio_get; + engine->gpio.set = nv10_gpio_set; + engine->gpio.irq_enable = NULL; break; case 0x40: case 0x60: @@ -225,8 +262,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.clear = nv04_instmem_clear; engine->instmem.bind = nv04_instmem_bind; engine->instmem.unbind = nv04_instmem_unbind; - engine->instmem.prepare_access = nv04_instmem_prepare_access; - engine->instmem.finish_access = nv04_instmem_finish_access; + engine->instmem.flush = nv04_instmem_flush; engine->mc.init = nv40_mc_init; engine->mc.takedown = nv40_mc_takedown; engine->timer.init = nv04_timer_init; @@ -258,6 +294,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.destroy_context = nv40_fifo_destroy_context; engine->fifo.load_context = nv40_fifo_load_context; engine->fifo.unload_context = nv40_fifo_unload_context; + engine->display.early_init = nv04_display_early_init; + engine->display.late_takedown = nv04_display_late_takedown; + engine->display.create = nv04_display_create; + engine->display.init = nv04_display_init; + engine->display.destroy = nv04_display_destroy; + engine->gpio.init = nouveau_stub_init; + engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.get = nv10_gpio_get; + engine->gpio.set = nv10_gpio_set; + engine->gpio.irq_enable = NULL; break; case 0x50: case 0x80: /* gotta love NVIDIA's consistency.. */ @@ -271,8 +317,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->instmem.clear = nv50_instmem_clear; engine->instmem.bind = nv50_instmem_bind; engine->instmem.unbind = nv50_instmem_unbind; - engine->instmem.prepare_access = nv50_instmem_prepare_access; - engine->instmem.finish_access = nv50_instmem_finish_access; + if (dev_priv->chipset == 0x50) + engine->instmem.flush = nv50_instmem_flush; + else + engine->instmem.flush = nv84_instmem_flush; engine->mc.init = nv50_mc_init; engine->mc.takedown = nv50_mc_takedown; engine->timer.init = nv04_timer_init; @@ -300,6 +348,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev) engine->fifo.destroy_context = nv50_fifo_destroy_context; engine->fifo.load_context = nv50_fifo_load_context; engine->fifo.unload_context = nv50_fifo_unload_context; + engine->display.early_init = nv50_display_early_init; + engine->display.late_takedown = nv50_display_late_takedown; + engine->display.create = nv50_display_create; + engine->display.init = nv50_display_init; + engine->display.destroy = nv50_display_destroy; + engine->gpio.init = nv50_gpio_init; + engine->gpio.takedown = nouveau_stub_takedown; + engine->gpio.get = nv50_gpio_get; + engine->gpio.set = nv50_gpio_set; + engine->gpio.irq_enable = nv50_gpio_irq_enable; break; default: NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset); @@ -407,11 +465,6 @@ nouveau_card_init(struct drm_device *dev) struct nouveau_engine *engine; int ret; - NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); - - if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE) - return 0; - vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, nouveau_switcheroo_can_switch); @@ -421,15 +474,17 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out; engine = &dev_priv->engine; - dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; spin_lock_init(&dev_priv->context_switch_lock); + /* Make the CRTCs and I2C buses accessible */ + ret = engine->display.early_init(dev); + if (ret) + goto out; + /* Parse BIOS tables / Run init tables if card not POSTed */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - ret = nouveau_bios_init(dev); - if (ret) - goto out; - } + ret = nouveau_bios_init(dev); + if (ret) + goto out_display_early; ret = nouveau_mem_detect(dev); if (ret) @@ -461,10 +516,15 @@ nouveau_card_init(struct drm_device *dev) if (ret) goto out_gpuobj; + /* PGPIO */ + ret = engine->gpio.init(dev); + if (ret) + goto out_mc; + /* PTIMER */ ret = engine->timer.init(dev); if (ret) - goto out_mc; + goto out_gpio; /* PFB */ ret = engine->fb.init(dev); @@ -485,12 +545,16 @@ nouveau_card_init(struct drm_device *dev) goto out_graph; } + ret = engine->display.create(dev); + if (ret) + goto out_fifo; + /* this call irq_preinstall, register irq handler and * call irq_postinstall */ ret = drm_irq_install(dev); if (ret) - goto out_fifo; + goto out_display; ret = drm_vblank_init(dev, 0); if (ret) @@ -504,35 +568,18 @@ nouveau_card_init(struct drm_device *dev) goto out_irq; } - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - if (dev_priv->card_type >= NV_50) - ret = nv50_display_create(dev); - else - ret = nv04_display_create(dev); - if (ret) - goto out_channel; - } - ret = nouveau_backlight_init(dev); if (ret) NV_ERROR(dev, "Error %d registering backlight\n", ret); - dev_priv->init_state = NOUVEAU_CARD_INIT_DONE; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - nouveau_fbcon_init(dev); - drm_kms_helper_poll_init(dev); - } - + nouveau_fbcon_init(dev); + drm_kms_helper_poll_init(dev); return 0; -out_channel: - if (dev_priv->channel) { - nouveau_channel_free(dev_priv->channel); - dev_priv->channel = NULL; - } out_irq: drm_irq_uninstall(dev); +out_display: + engine->display.destroy(dev); out_fifo: if (!nouveau_noaccel) engine->fifo.takedown(dev); @@ -543,6 +590,8 @@ out_fb: engine->fb.takedown(dev); out_timer: engine->timer.takedown(dev); +out_gpio: + engine->gpio.takedown(dev); out_mc: engine->mc.takedown(dev); out_gpuobj: @@ -556,6 +605,8 @@ out_gpuobj_early: nouveau_gpuobj_late_takedown(dev); out_bios: nouveau_bios_takedown(dev); +out_display_early: + engine->display.late_takedown(dev); out: vga_client_register(dev->pdev, NULL, NULL, NULL); return ret; @@ -566,45 +617,39 @@ static void nouveau_card_takedown(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; - NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); - - if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) { - - nouveau_backlight_exit(dev); - - if (dev_priv->channel) { - nouveau_channel_free(dev_priv->channel); - dev_priv->channel = NULL; - } + nouveau_backlight_exit(dev); - if (!nouveau_noaccel) { - engine->fifo.takedown(dev); - engine->graph.takedown(dev); - } - engine->fb.takedown(dev); - engine->timer.takedown(dev); - engine->mc.takedown(dev); + if (dev_priv->channel) { + nouveau_channel_free(dev_priv->channel); + dev_priv->channel = NULL; + } - mutex_lock(&dev->struct_mutex); - ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); - ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); - mutex_unlock(&dev->struct_mutex); - nouveau_sgdma_takedown(dev); + if (!nouveau_noaccel) { + engine->fifo.takedown(dev); + engine->graph.takedown(dev); + } + engine->fb.takedown(dev); + engine->timer.takedown(dev); + engine->gpio.takedown(dev); + engine->mc.takedown(dev); + engine->display.late_takedown(dev); - nouveau_gpuobj_takedown(dev); - nouveau_mem_close(dev); - engine->instmem.takedown(dev); + mutex_lock(&dev->struct_mutex); + ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM); + ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT); + mutex_unlock(&dev->struct_mutex); + nouveau_sgdma_takedown(dev); - if (drm_core_check_feature(dev, DRIVER_MODESET)) - drm_irq_uninstall(dev); + nouveau_gpuobj_takedown(dev); + nouveau_mem_close(dev); + engine->instmem.takedown(dev); - nouveau_gpuobj_late_takedown(dev); - nouveau_bios_takedown(dev); + drm_irq_uninstall(dev); - vga_client_register(dev->pdev, NULL, NULL, NULL); + nouveau_gpuobj_late_takedown(dev); + nouveau_bios_takedown(dev); - dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; - } + vga_client_register(dev->pdev, NULL, NULL, NULL); } /* here a client dies, release the stuff that was allocated for its @@ -691,6 +736,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) struct drm_nouveau_private *dev_priv; uint32_t reg0; resource_size_t mmio_start_offs; + int ret; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (!dev_priv) @@ -699,7 +745,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->dev = dev; dev_priv->flags = flags & NOUVEAU_FLAGS; - dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN; NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); @@ -773,11 +818,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n", dev_priv->card_type, reg0); - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - int ret = nouveau_remove_conflicting_drivers(dev); - if (ret) - return ret; - } + ret = nouveau_remove_conflicting_drivers(dev); + if (ret) + return ret; /* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */ if (dev_priv->card_type >= NV_40) { @@ -812,46 +855,26 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) dev_priv->flags |= NV_NFORCE2; /* For kernel modesetting, init card now and bring up fbcon */ - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - int ret = nouveau_card_init(dev); - if (ret) - return ret; - } + ret = nouveau_card_init(dev); + if (ret) + return ret; return 0; } -static void nouveau_close(struct drm_device *dev) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - - /* In the case of an error dev_priv may not be allocated yet */ - if (dev_priv) - nouveau_card_takedown(dev); -} - -/* KMS: we need mmio at load time, not when the first drm client opens. */ void nouveau_lastclose(struct drm_device *dev) { - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return; - - nouveau_close(dev); } int nouveau_unload(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_engine *engine = &dev_priv->engine; - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - drm_kms_helper_poll_fini(dev); - nouveau_fbcon_fini(dev); - if (dev_priv->card_type >= NV_50) - nv50_display_destroy(dev); - else - nv04_display_destroy(dev); - nouveau_close(dev); - } + drm_kms_helper_poll_fini(dev); + nouveau_fbcon_fini(dev); + engine->display.destroy(dev); + nouveau_card_takedown(dev); iounmap(dev_priv->mmio); iounmap(dev_priv->ramin); @@ -867,8 +890,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_getparam *getparam = data; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - switch (getparam->param) { case NOUVEAU_GETPARAM_CHIPSET_ID: getparam->value = dev_priv->chipset; @@ -937,8 +958,6 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data, { struct drm_nouveau_setparam *setparam = data; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - switch (setparam->param) { default: NV_ERROR(dev, "unknown parameter %lld\n", setparam->param); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index c385d50f041..bd35f930568 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -42,13 +42,13 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) } static int -nouveau_ttm_mem_global_init(struct ttm_global_reference *ref) +nouveau_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } static void -nouveau_ttm_mem_global_release(struct ttm_global_reference *ref) +nouveau_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } @@ -56,16 +56,16 @@ nouveau_ttm_mem_global_release(struct ttm_global_reference *ref) int nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv) { - struct ttm_global_reference *global_ref; + struct drm_global_reference *global_ref; int ret; global_ref = &dev_priv->ttm.mem_global_ref; - global_ref->global_type = TTM_GLOBAL_TTM_MEM; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &nouveau_ttm_mem_global_init; global_ref->release = &nouveau_ttm_mem_global_release; - ret = ttm_global_item_ref(global_ref); + ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM memory accounting\n"); dev_priv->ttm.mem_global_ref.release = NULL; @@ -74,15 +74,15 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv) dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object; global_ref = &dev_priv->ttm.bo_global_ref.ref; - global_ref->global_type = TTM_GLOBAL_TTM_BO; + global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; - ret = ttm_global_item_ref(global_ref); + ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM BO subsystem\n"); - ttm_global_item_unref(&dev_priv->ttm.mem_global_ref); + drm_global_item_unref(&dev_priv->ttm.mem_global_ref); dev_priv->ttm.mem_global_ref.release = NULL; return ret; } @@ -96,8 +96,8 @@ nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv) if (dev_priv->ttm.mem_global_ref.release == NULL) return; - ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref); - ttm_global_item_unref(&dev_priv->ttm.mem_global_ref); + drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref); + drm_global_item_unref(&dev_priv->ttm.mem_global_ref); dev_priv->ttm.mem_global_ref.release = NULL; } diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index eba687f1099..1c20c08ce67 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -157,6 +157,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = crtc->dev; + struct drm_connector *connector; unsigned char seq1 = 0, crtc17 = 0; unsigned char crtc1A; @@ -211,6 +212,10 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) NVVgaSeqReset(dev, nv_crtc->index, false); NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A); + + /* Update connector polling modes */ + list_for_each_entry(connector, &dev->mode_config.connector_list, head) + nouveau_connector_set_polling(connector); } static bool diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index 1cb19e3acb5..ea3627041ec 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -220,6 +220,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio; struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder); uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput, @@ -251,22 +252,21 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf); } - saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1); - saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0); + saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1); + saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0); - nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV); - nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV); + gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV); + gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV); msleep(4); saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); head = (saved_routput & 0x100) >> 8; -#if 0 - /* if there's a spare crtc, using it will minimise flicker for the case - * where the in-use crtc is in use by an off-chip tmds encoder */ - if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled) + + /* if there's a spare crtc, using it will minimise flicker */ + if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0)) head ^= 1; -#endif + /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */ routput = (saved_routput & 0xfffffece) | head << 8; @@ -304,8 +304,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4); nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2); - nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1); - nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0); + gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1); + gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0); return sample; } @@ -315,9 +315,12 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; - uint32_t sample = nv17_dac_sample_load(encoder); - if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { + if (nv04_dac_in_use(encoder)) + return connector_status_disconnected; + + if (nv17_dac_sample_load(encoder) & + NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) { NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); return connector_status_connected; @@ -330,6 +333,9 @@ static bool nv04_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { + if (nv04_dac_in_use(encoder)) + return false; + return true; } @@ -428,6 +434,17 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable) } } +/* Check if the DAC corresponding to 'encoder' is being used by + * someone else. */ +bool nv04_dac_in_use(struct drm_encoder *encoder) +{ + struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; + struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; + + return nv_gf4_disp_arch(encoder->dev) && + (dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index)); +} + static void nv04_dac_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; @@ -501,11 +518,13 @@ static const struct drm_encoder_funcs nv04_dac_funcs = { .destroy = nv04_dac_destroy, }; -int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) +int +nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry) { const struct drm_encoder_helper_funcs *helper; - struct drm_encoder *encoder; struct nouveau_encoder *nv_encoder = NULL; + struct drm_device *dev = connector->dev; + struct drm_encoder *encoder; nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); if (!nv_encoder) @@ -527,5 +546,6 @@ int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry) encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; + drm_mode_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 41634d4752f..3311f3a8c81 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -413,10 +413,6 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) struct dcb_entry *dcbe = nv_encoder->dcb; int head = nouveau_crtc(encoder->crtc)->index; - NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", - drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), - nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); - if (dcbe->type == OUTPUT_TMDS) run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); else if (dcbe->type == OUTPUT_LVDS) @@ -584,11 +580,12 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = { .destroy = nv04_dfp_destroy, }; -int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) +int +nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry) { const struct drm_encoder_helper_funcs *helper; - struct drm_encoder *encoder; struct nouveau_encoder *nv_encoder = NULL; + struct drm_encoder *encoder; int type; switch (entry->type) { @@ -613,11 +610,12 @@ int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry) nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; - drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type); + drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type); drm_encoder_helper_add(encoder, helper); encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; + drm_mode_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index c7898b4f6df..9e28cf772e3 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c @@ -32,8 +32,6 @@ #include "nouveau_encoder.h" #include "nouveau_connector.h" -#define MULTIPLE_ENCODERS(e) (e & (e - 1)) - static void nv04_display_store_initial_head_owner(struct drm_device *dev) { @@ -41,7 +39,7 @@ nv04_display_store_initial_head_owner(struct drm_device *dev) if (dev_priv->chipset != 0x11) { dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44); - goto ownerknown; + return; } /* reading CR44 is broken on nv11, so we attempt to infer it */ @@ -52,8 +50,6 @@ nv04_display_store_initial_head_owner(struct drm_device *dev) bool tvA = false; bool tvB = false; - NVLockVgaCrtcs(dev, false); - slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) & 0x80; if (slaved_on_B) @@ -66,8 +62,6 @@ nv04_display_store_initial_head_owner(struct drm_device *dev) tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) & MASK(NV_CIO_CRE_LCD_LCD_SELECT)); - NVLockVgaCrtcs(dev, true); - if (slaved_on_A && !tvA) dev_priv->crtc_owner = 0x0; else if (slaved_on_B && !tvB) @@ -79,14 +73,40 @@ nv04_display_store_initial_head_owner(struct drm_device *dev) else dev_priv->crtc_owner = 0x0; } +} + +int +nv04_display_early_init(struct drm_device *dev) +{ + /* Make the I2C buses accessible. */ + if (!nv_gf4_disp_arch(dev)) { + uint32_t pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE); + + if (!(pmc_enable & 1)) + nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable | 1); + } -ownerknown: - NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner); + /* Unlock the VGA CRTCs. */ + NVLockVgaCrtcs(dev, false); + + /* Make sure the CRTCs aren't in slaved mode. */ + if (nv_two_heads(dev)) { + nv04_display_store_initial_head_owner(dev); + NVSetOwner(dev, 0); + } + + return 0; +} + +void +nv04_display_late_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (nv_two_heads(dev)) + NVSetOwner(dev, dev_priv->crtc_owner); - /* we need to ensure the heads are not tied henceforth, or reading any - * 8 bit reg on head B will fail - * setting a single arbitrary head solves that */ - NVSetOwner(dev, 0); + NVLockVgaCrtcs(dev, true); } int @@ -94,14 +114,13 @@ nv04_display_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct dcb_table *dcb = &dev_priv->vbios.dcb; + struct drm_connector *connector, *ct; struct drm_encoder *encoder; struct drm_crtc *crtc; int i, ret; NV_DEBUG_KMS(dev, "\n"); - if (nv_two_heads(dev)) - nv04_display_store_initial_head_owner(dev); nouveau_hw_save_vga_fonts(dev, 1); drm_mode_config_init(dev); @@ -132,19 +151,23 @@ nv04_display_create(struct drm_device *dev) for (i = 0; i < dcb->entries; i++) { struct dcb_entry *dcbent = &dcb->entry[i]; + connector = nouveau_connector_create(dev, dcbent->connector); + if (IS_ERR(connector)) + continue; + switch (dcbent->type) { case OUTPUT_ANALOG: - ret = nv04_dac_create(dev, dcbent); + ret = nv04_dac_create(connector, dcbent); break; case OUTPUT_LVDS: case OUTPUT_TMDS: - ret = nv04_dfp_create(dev, dcbent); + ret = nv04_dfp_create(connector, dcbent); break; case OUTPUT_TV: if (dcbent->location == DCB_LOC_ON_CHIP) - ret = nv17_tv_create(dev, dcbent); + ret = nv17_tv_create(connector, dcbent); else - ret = nv04_tv_create(dev, dcbent); + ret = nv04_tv_create(connector, dcbent); break; default: NV_WARN(dev, "DCB type %d not known\n", dcbent->type); @@ -155,12 +178,16 @@ nv04_display_create(struct drm_device *dev) continue; } - for (i = 0; i < dcb->connector.entries; i++) - nouveau_connector_create(dev, &dcb->connector.entry[i]); + list_for_each_entry_safe(connector, ct, + &dev->mode_config.connector_list, head) { + if (!connector->encoder_ids[0]) { + NV_WARN(dev, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } + } /* Save previous state */ - NVLockVgaCrtcs(dev, false); - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) crtc->funcs->save(crtc); @@ -191,8 +218,6 @@ nv04_display_destroy(struct drm_device *dev) } /* Restore state */ - NVLockVgaCrtcs(dev, false); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { struct drm_encoder_helper_funcs *func = encoder->helper_private; @@ -207,15 +232,12 @@ nv04_display_destroy(struct drm_device *dev) nouveau_hw_save_vga_fonts(dev, 0); } -void -nv04_display_restore(struct drm_device *dev) +int +nv04_display_init(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_encoder *encoder; struct drm_crtc *crtc; - NVLockVgaCrtcs(dev, false); - /* meh.. modeset apparently doesn't setup all the regs and depends * on pre-existing state, for now load the state of the card *before* * nouveau was loaded, and then do a modeset. @@ -233,12 +255,6 @@ nv04_display_restore(struct drm_device *dev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) crtc->funcs->restore(crtc); - if (nv_two_heads(dev)) { - NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n", - dev_priv->crtc_owner); - NVSetOwner(dev, dev_priv->crtc_owner); - } - - NVLockVgaCrtcs(dev, true); + return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index 66fe55983b6..06cedd99c26 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -112,6 +112,12 @@ nv04_fifo_channel_id(struct drm_device *dev) NV03_PFIFO_CACHE1_PUSH1_CHID_MASK; } +#ifdef __BIG_ENDIAN +#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN +#else +#define DMA_FETCH_ENDIANNESS 0 +#endif + int nv04_fifo_create_context(struct nouveau_channel *chan) { @@ -131,18 +137,13 @@ nv04_fifo_create_context(struct nouveau_channel *chan) spin_lock_irqsave(&dev_priv->context_switch_lock, flags); /* Setup initial state */ - dev_priv->engine.instmem.prepare_access(dev, true); RAMFC_WR(DMA_PUT, chan->pushbuf_base); RAMFC_WR(DMA_GET, chan->pushbuf_base); RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4); RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 | -#ifdef __BIG_ENDIAN - NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif - 0)); - dev_priv->engine.instmem.finish_access(dev); + DMA_FETCH_ENDIANNESS)); /* enable the fifo dma operation */ nv_wr32(dev, NV04_PFIFO_MODE, @@ -169,8 +170,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid) struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fc = NV04_RAMFC(chid), tmp; - dev_priv->engine.instmem.prepare_access(dev, false); - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); tmp = nv_ri32(dev, fc + 8); @@ -181,8 +180,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid) nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20)); nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24)); - dev_priv->engine.instmem.finish_access(dev); - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); } @@ -223,7 +220,6 @@ nv04_fifo_unload_context(struct drm_device *dev) return -EINVAL; } - dev_priv->engine.instmem.prepare_access(dev, true); RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16; @@ -233,7 +229,6 @@ nv04_fifo_unload_context(struct drm_device *dev) RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH)); RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE)); RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1)); - dev_priv->engine.instmem.finish_access(dev); nv04_fifo_do_load_context(dev, pfifo->channels - 1); nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); @@ -297,6 +292,7 @@ nv04_fifo_init(struct drm_device *dev) nv04_fifo_init_intr(dev); pfifo->enable(dev); + pfifo->reassign(dev, true); for (i = 0; i < dev_priv->engine.fifo.channels; i++) { if (dev_priv->fifos[i]) { diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 618355e9cdd..c8973421b63 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c @@ -342,7 +342,7 @@ static uint32_t nv04_graph_ctx_regs[] = { }; struct graph_state { - int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; + uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)]; }; struct nouveau_channel * @@ -527,8 +527,7 @@ static int nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass, int mthd, uint32_t data) { - chan->fence.last_sequence_irq = data; - nouveau_fence_handler(chan->dev, chan->id); + atomic_set(&chan->fence.last_sequence_irq, data); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c index a3b9563a6f6..4408232d33f 100644 --- a/drivers/gpu/drm/nouveau/nv04_instmem.c +++ b/drivers/gpu/drm/nouveau/nv04_instmem.c @@ -49,10 +49,8 @@ nv04_instmem_determine_amount(struct drm_device *dev) NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10); /* Clear all of it, except the BIOS image that's in the first 64KiB */ - dev_priv->engine.instmem.prepare_access(dev, true); for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4) nv_wi32(dev, i, 0x00000000); - dev_priv->engine.instmem.finish_access(dev); } static void @@ -106,7 +104,7 @@ int nv04_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t offset; - int ret = 0; + int ret; nv04_instmem_determine_amount(dev); nv04_instmem_configure_fixed_tables(dev); @@ -129,14 +127,14 @@ int nv04_instmem_init(struct drm_device *dev) offset = 0x40000; } - ret = nouveau_mem_init_heap(&dev_priv->ramin_heap, - offset, dev_priv->ramin_rsvd_vram - offset); + ret = drm_mm_init(&dev_priv->ramin_heap, offset, + dev_priv->ramin_rsvd_vram - offset); if (ret) { - dev_priv->ramin_heap = NULL; - NV_ERROR(dev, "Failed to init RAMIN heap\n"); + NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret); + return ret; } - return ret; + return 0; } void @@ -186,12 +184,7 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) } void -nv04_instmem_prepare_access(struct drm_device *dev, bool write) -{ -} - -void -nv04_instmem_finish_access(struct drm_device *dev) +nv04_instmem_flush(struct drm_device *dev) { } diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c index 617ed1e0526..2af43a1cb2e 100644 --- a/drivers/gpu/drm/nouveau/nv04_mc.c +++ b/drivers/gpu/drm/nouveau/nv04_mc.c @@ -11,6 +11,10 @@ nv04_mc_init(struct drm_device *dev) */ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF); + + /* Disable PROM access. */ + nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED); + return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index c4e3404337d..94e299cef0b 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -34,69 +34,26 @@ #include "i2c/ch7006.h" -static struct { - struct i2c_board_info board_info; - struct drm_encoder_funcs funcs; - struct drm_encoder_helper_funcs hfuncs; - void *params; - -} nv04_tv_encoder_info[] = { +static struct i2c_board_info nv04_tv_encoder_info[] = { { - .board_info = { I2C_BOARD_INFO("ch7006", 0x75) }, - .params = &(struct ch7006_encoder_params) { + I2C_BOARD_INFO("ch7006", 0x75), + .platform_data = &(struct ch7006_encoder_params) { CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER, 0, 0, 0, CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED, CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC - }, + } }, + { } }; -static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr) -{ - struct i2c_msg msg = { - .addr = addr, - .len = 0, - }; - - return i2c_transfer(adapter, &msg, 1) == 1; -} - int nv04_tv_identify(struct drm_device *dev, int i2c_index) { - struct nouveau_i2c_chan *i2c; - bool was_locked; - int i, ret; - - NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index); - - i2c = nouveau_i2c_find(dev, i2c_index); - if (!i2c) - return -ENODEV; - - was_locked = NVLockVgaCrtcs(dev, false); - - for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) { - if (probe_i2c_addr(&i2c->adapter, - nv04_tv_encoder_info[i].board_info.addr)) { - ret = i; - break; - } - } - - if (i < ARRAY_SIZE(nv04_tv_encoder_info)) { - NV_TRACE(dev, "Detected TV encoder: %s\n", - nv04_tv_encoder_info[i].board_info.type); - - } else { - NV_TRACE(dev, "No TV encoders found.\n"); - i = -ENODEV; - } - - NVLockVgaCrtcs(dev, was_locked); - return i; + return nouveau_i2c_identify(dev, "TV encoder", + nv04_tv_encoder_info, i2c_index); } + #define PLLSEL_TV_CRTC1_MASK \ (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \ | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1) @@ -214,30 +171,32 @@ static void nv04_tv_commit(struct drm_encoder *encoder) static void nv04_tv_destroy(struct drm_encoder *encoder) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - to_encoder_slave(encoder)->slave_funcs->destroy(encoder); drm_encoder_cleanup(encoder); - kfree(nv_encoder); + kfree(encoder->helper_private); + kfree(nouveau_encoder(encoder)); } -int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) +static const struct drm_encoder_funcs nv04_tv_funcs = { + .destroy = nv04_tv_destroy, +}; + +int +nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry) { struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct i2c_adapter *adap; - struct drm_encoder_funcs *funcs = NULL; - struct drm_encoder_helper_funcs *hfuncs = NULL; - struct drm_encoder_slave_funcs *sfuncs = NULL; - int i2c_index = entry->i2c_index; + struct drm_device *dev = connector->dev; + struct drm_encoder_helper_funcs *hfuncs; + struct drm_encoder_slave_funcs *sfuncs; + struct nouveau_i2c_chan *i2c = + nouveau_i2c_find(dev, entry->i2c_index); int type, ret; - bool was_locked; /* Ensure that we can talk to this encoder */ - type = nv04_tv_identify(dev, i2c_index); + type = nv04_tv_identify(dev, entry->i2c_index); if (type < 0) return type; @@ -246,41 +205,32 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) if (!nv_encoder) return -ENOMEM; + hfuncs = kzalloc(sizeof(*hfuncs), GFP_KERNEL); + if (!hfuncs) { + ret = -ENOMEM; + goto fail_free; + } + /* Initialize the common members */ encoder = to_drm_encoder(nv_encoder); - funcs = &nv04_tv_encoder_info[type].funcs; - hfuncs = &nv04_tv_encoder_info[type].hfuncs; - - drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC); + drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, hfuncs); encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; - nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; /* Run the slave-specific initialization */ - adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter; - - was_locked = NVLockVgaCrtcs(dev, false); - - ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap, - &nv04_tv_encoder_info[type].board_info); - - NVLockVgaCrtcs(dev, was_locked); - + ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder), + &i2c->adapter, &nv04_tv_encoder_info[type]); if (ret < 0) - goto fail; + goto fail_cleanup; /* Fill the function pointers */ sfuncs = to_encoder_slave(encoder)->slave_funcs; - *funcs = (struct drm_encoder_funcs) { - .destroy = nv04_tv_destroy, - }; - *hfuncs = (struct drm_encoder_helper_funcs) { .dpms = nv04_tv_dpms, .save = sfuncs->save, @@ -292,14 +242,17 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) .detect = sfuncs->detect, }; - /* Set the slave encoder configuration */ - sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params); + /* Attach it to the specified connector. */ + sfuncs->set_config(encoder, nv04_tv_encoder_info[type].platform_data); + sfuncs->create_resources(encoder, connector); + drm_mode_connector_attach_encoder(connector, encoder); return 0; -fail: +fail_cleanup: drm_encoder_cleanup(encoder); - + kfree(hfuncs); +fail_free: kfree(nv_encoder); return ret; } diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c index 7aeabf262bc..7a4069cf5d0 100644 --- a/drivers/gpu/drm/nouveau/nv10_fifo.c +++ b/drivers/gpu/drm/nouveau/nv10_fifo.c @@ -55,7 +55,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) /* Fill entries that are seen filled in dumps of nvidia driver just * after channel's is put into DMA mode */ - dev_priv->engine.instmem.prepare_access(dev, true); nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); @@ -66,7 +65,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan) NV_PFIFO_CACHE1_BIG_ENDIAN | #endif 0); - dev_priv->engine.instmem.finish_access(dev); /* enable the fifo dma operation */ nv_wr32(dev, NV04_PFIFO_MODE, @@ -91,8 +89,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid) struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fc = NV10_RAMFC(chid), tmp; - dev_priv->engine.instmem.prepare_access(dev, false); - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); @@ -117,8 +113,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid) nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48)); out: - dev_priv->engine.instmem.finish_access(dev); - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); } @@ -155,8 +149,6 @@ nv10_fifo_unload_context(struct drm_device *dev) return 0; fc = NV10_RAMFC(chid); - dev_priv->engine.instmem.prepare_access(dev, true); - nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); @@ -179,8 +171,6 @@ nv10_fifo_unload_context(struct drm_device *dev) nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); out: - dev_priv->engine.instmem.finish_access(dev); - nv10_fifo_do_load_context(dev, pfifo->channels - 1); nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1); return 0; diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c index 2e58c331e9b..007fc29e2f8 100644 --- a/drivers/gpu/drm/nouveau/nv17_gpio.c +++ b/drivers/gpu/drm/nouveau/nv10_gpio.c @@ -55,7 +55,7 @@ get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift, } int -nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) +nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) { struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); uint32_t reg, shift, mask, value; @@ -72,7 +72,7 @@ nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag) } int -nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) +nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) { struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag); uint32_t reg, shift, mask, value; diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 74c880374fb..44fefb0c708 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -37,6 +37,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio; uint32_t testval, regoffset = nv04_dac_output_offset(encoder); uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; @@ -52,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) head = (dacclk & 0x100) >> 8; /* Save the previous state. */ - gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1); - gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0); + gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1); + gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0); fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); @@ -64,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); /* Prepare the DAC for load detection. */ - nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true); - nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true); + gpio->set(dev, DCB_GPIO_TVDAC1, true); + gpio->set(dev, DCB_GPIO_TVDAC0, true); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); @@ -110,12 +111,27 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); - nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1); - nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0); + gpio->set(dev, DCB_GPIO_TVDAC1, gpio1); + gpio->set(dev, DCB_GPIO_TVDAC0, gpio0); return sample; } +static bool +get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) +{ + /* Zotac FX5200 */ + if (dev->pdev->device == 0x0322 && + dev->pdev->subsystem_vendor == 0x19da && + (dev->pdev->subsystem_device == 0x1035 || + dev->pdev->subsystem_device == 0x2035)) { + *pin_mask = 0xc; + return false; + } + + return true; +} + static enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { @@ -124,12 +140,20 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) struct drm_mode_config *conf = &dev->mode_config; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct dcb_entry *dcb = tv_enc->base.dcb; + bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); - if (dev_priv->chipset == 0x42 || - dev_priv->chipset == 0x43) - tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; - else - tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe; + if (nv04_dac_in_use(encoder)) + return connector_status_disconnected; + + if (reliable) { + if (dev_priv->chipset == 0x42 || + dev_priv->chipset == 0x43) + tv_enc->pin_mask = + nv42_tv_sample_load(encoder) >> 28 & 0xe; + else + tv_enc->pin_mask = + nv17_dac_sample_load(encoder) >> 28 & 0xe; + } switch (tv_enc->pin_mask) { case 0x2: @@ -154,7 +178,9 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) conf->tv_subconnector_property, tv_enc->subconnector); - if (tv_enc->subconnector) { + if (!reliable) { + return connector_status_unknown; + } else if (tv_enc->subconnector) { NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); return connector_status_connected; @@ -296,6 +322,9 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); + if (nv04_dac_in_use(encoder)) + return false; + if (tv_norm->kind == CTV_ENC_MODE) adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock; else @@ -307,6 +336,8 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio; struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); @@ -331,8 +362,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) nv_load_ptv(dev, regs, 200); - nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); - nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); + gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); + gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); } @@ -744,8 +775,10 @@ static struct drm_encoder_funcs nv17_tv_funcs = { .destroy = nv17_tv_destroy, }; -int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) +int +nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) { + struct drm_device *dev = connector->dev; struct drm_encoder *encoder; struct nv17_tv_encoder *tv_enc = NULL; @@ -774,5 +807,7 @@ int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry) encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; + nv17_tv_create_resources(encoder, connector); + drm_mode_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c index d6fc0a82f03..17f309b36c9 100644 --- a/drivers/gpu/drm/nouveau/nv20_graph.c +++ b/drivers/gpu/drm/nouveau/nv20_graph.c @@ -370,68 +370,54 @@ nv20_graph_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *); - unsigned int ctx_size; unsigned int idoffs = 0x28/4; int ret; switch (dev_priv->chipset) { case 0x20: - ctx_size = NV20_GRCTX_SIZE; ctx_init = nv20_graph_context_init; idoffs = 0; break; case 0x25: case 0x28: - ctx_size = NV25_GRCTX_SIZE; ctx_init = nv25_graph_context_init; break; case 0x2a: - ctx_size = NV2A_GRCTX_SIZE; ctx_init = nv2a_graph_context_init; idoffs = 0; break; case 0x30: case 0x31: - ctx_size = NV30_31_GRCTX_SIZE; ctx_init = nv30_31_graph_context_init; break; case 0x34: - ctx_size = NV34_GRCTX_SIZE; ctx_init = nv34_graph_context_init; break; case 0x35: case 0x36: - ctx_size = NV35_36_GRCTX_SIZE; ctx_init = nv35_36_graph_context_init; break; default: - ctx_size = 0; - ctx_init = nv35_36_graph_context_init; - NV_ERROR(dev, "Please contact the devs if you want your NV%x" - " card to work\n", dev_priv->chipset); - return -ENOSYS; - break; + BUG_ON(1); } - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16, - NVOBJ_FLAG_ZERO_ALLOC, - &chan->ramin_grctx); + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, + 16, NVOBJ_FLAG_ZERO_ALLOC, + &chan->ramin_grctx); if (ret) return ret; /* Initialise default context values */ - dev_priv->engine.instmem.prepare_access(dev, true); ctx_init(dev, chan->ramin_grctx->gpuobj); /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */ nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs, (chan->id << 24) | 0x1); /* CTX_USER */ - nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, - chan->ramin_grctx->instance >> 4); - - dev_priv->engine.instmem.finish_access(dev); + nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, + chan->ramin_grctx->instance >> 4); return 0; } @@ -440,13 +426,12 @@ nv20_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; if (chan->ramin_grctx) nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); - dev_priv->engine.instmem.prepare_access(dev, true); - nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0); - dev_priv->engine.instmem.finish_access(dev); + nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0); } int @@ -538,29 +523,44 @@ nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr, int nv20_graph_init(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = - (struct drm_nouveau_private *)dev->dev_private; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; uint32_t tmp, vramsz; int ret, i; + switch (dev_priv->chipset) { + case 0x20: + pgraph->grctx_size = NV20_GRCTX_SIZE; + break; + case 0x25: + case 0x28: + pgraph->grctx_size = NV25_GRCTX_SIZE; + break; + case 0x2a: + pgraph->grctx_size = NV2A_GRCTX_SIZE; + break; + default: + NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); + pgraph->accel_blocked = true; + return 0; + } + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); - if (!dev_priv->ctx_table) { + if (!pgraph->ctx_table) { /* Create Context Pointer Table */ - dev_priv->ctx_table_size = 32 * 4; - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, - dev_priv->ctx_table_size, 16, + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC, - &dev_priv->ctx_table); + &pgraph->ctx_table); if (ret) return ret; } nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, - dev_priv->ctx_table->instance >> 4); + pgraph->ctx_table->instance >> 4); nv20_graph_rdi(dev); @@ -616,7 +616,7 @@ nv20_graph_init(struct drm_device *dev) nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp); /* begin RAM config */ - vramsz = drm_get_resource_len(dev, 0) - 1; + vramsz = pci_resource_len(dev->pdev, 0) - 1; nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000); @@ -644,34 +644,52 @@ void nv20_graph_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; - nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table); + nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table); } int nv30_graph_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; int ret, i; + switch (dev_priv->chipset) { + case 0x30: + case 0x31: + pgraph->grctx_size = NV30_31_GRCTX_SIZE; + break; + case 0x34: + pgraph->grctx_size = NV34_GRCTX_SIZE; + break; + case 0x35: + case 0x36: + pgraph->grctx_size = NV35_36_GRCTX_SIZE; + break; + default: + NV_ERROR(dev, "unknown chipset, disabling acceleration\n"); + pgraph->accel_blocked = true; + return 0; + } + nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH); nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); - if (!dev_priv->ctx_table) { + if (!pgraph->ctx_table) { /* Create Context Pointer Table */ - dev_priv->ctx_table_size = 32 * 4; - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, - dev_priv->ctx_table_size, 16, + ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC, - &dev_priv->ctx_table); + &pgraph->ctx_table); if (ret) return ret; } nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, - dev_priv->ctx_table->instance >> 4); + pgraph->ctx_table->instance >> 4); nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF); nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF); @@ -717,7 +735,7 @@ nv30_graph_init(struct drm_device *dev) nv_wr32(dev, 0x0040075c , 0x00000001); /* begin RAM config */ - /* vramsz = drm_get_resource_len(dev, 0) - 1; */ + /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1)); if (dev_priv->chipset != 0x34) { diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c new file mode 100644 index 00000000000..9d35c8b3b83 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv30_fb.c @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2010 Francisco Jerez. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +static int +calc_ref(int b, int l, int i) +{ + int j, x = 0; + + for (j = 0; j < 4; j++) { + int n = (b >> (8 * j) & 0xf); + int m = (l >> (8 * i) & 0xff) + 2 * (n & 0x8 ? n - 0x10 : n); + + x |= (0x80 | (m & 0x1f)) << (8 * j); + } + + return x; +} + +int +nv30_fb_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; + int i, j; + + pfb->num_tiles = NV10_PFB_TILE__SIZE; + + /* Turn all the tiling regions off. */ + for (i = 0; i < pfb->num_tiles; i++) + pfb->set_region_tiling(dev, i, 0, 0, 0); + + /* Init the memory timing regs at 0x10037c/0x1003ac */ + if (dev_priv->chipset == 0x30 || + dev_priv->chipset == 0x31 || + dev_priv->chipset == 0x35) { + /* Related to ROP count */ + int n = (dev_priv->chipset == 0x31 ? 2 : 4); + int b = (dev_priv->chipset > 0x30 ? + nv_rd32(dev, 0x122c) & 0xf : 0); + int l = nv_rd32(dev, 0x1003d0); + + for (i = 0; i < n; i++) { + for (j = 0; j < 3; j++) + nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j, + calc_ref(b, l, j)); + + for (j = 0; j < 2; j++) + nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j, + calc_ref(b, l, j)); + } + } + + return 0; +} + +void +nv30_fb_takedown(struct drm_device *dev) +{ +} diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index 500ccfd3a0b..2b67f1835c3 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -48,7 +48,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - dev_priv->engine.instmem.prepare_access(dev, true); nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4); @@ -61,7 +60,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan) 0x30000000 /* no idea.. */); nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4); nv_wi32(dev, fc + 60, 0x0001FFFF); - dev_priv->engine.instmem.finish_access(dev); /* enable the fifo dma operation */ nv_wr32(dev, NV04_PFIFO_MODE, @@ -89,8 +87,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid) struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fc = NV40_RAMFC(chid), tmp, tmp2; - dev_priv->engine.instmem.prepare_access(dev, false); - nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0)); nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4)); nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8)); @@ -127,8 +123,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid) nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76)); nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80)); - dev_priv->engine.instmem.finish_access(dev); - nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0); } @@ -166,7 +160,6 @@ nv40_fifo_unload_context(struct drm_device *dev) return 0; fc = NV40_RAMFC(chid); - dev_priv->engine.instmem.prepare_access(dev, true); nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT)); nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET)); nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT)); @@ -200,7 +193,6 @@ nv40_fifo_unload_context(struct drm_device *dev) tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16); nv_wi32(dev, fc + 72, tmp); #endif - dev_priv->engine.instmem.finish_access(dev); nv40_fifo_do_load_context(dev, pfifo->channels - 1); nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 704a25d04ac..fd7d2b50131 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c @@ -58,6 +58,7 @@ nv40_graph_create_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_grctx ctx = {}; int ret; ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, @@ -67,20 +68,13 @@ nv40_graph_create_context(struct nouveau_channel *chan) return ret; /* Initialise default context values */ - dev_priv->engine.instmem.prepare_access(dev, true); - if (!pgraph->ctxprog) { - struct nouveau_grctx ctx = {}; - - ctx.dev = chan->dev; - ctx.mode = NOUVEAU_GRCTX_VALS; - ctx.data = chan->ramin_grctx->gpuobj; - nv40_grctx_init(&ctx); - } else { - nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj); - } + ctx.dev = chan->dev; + ctx.mode = NOUVEAU_GRCTX_VALS; + ctx.data = chan->ramin_grctx->gpuobj; + nv40_grctx_init(&ctx); + nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, chan->ramin_grctx->gpuobj->im_pramin->start); - dev_priv->engine.instmem.finish_access(dev); return 0; } @@ -238,7 +232,8 @@ nv40_graph_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = (struct drm_nouveau_private *)dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; - uint32_t vramsz; + struct nouveau_grctx ctx = {}; + uint32_t vramsz, *cp; int i, j; nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & @@ -246,32 +241,22 @@ nv40_graph_init(struct drm_device *dev) nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH); - if (nouveau_ctxfw) { - nouveau_grctx_prog_load(dev); - dev_priv->engine.graph.grctx_size = 175 * 1024; - } + cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); + if (!cp) + return -ENOMEM; - if (!dev_priv->engine.graph.ctxprog) { - struct nouveau_grctx ctx = {}; - uint32_t *cp; + ctx.dev = dev; + ctx.mode = NOUVEAU_GRCTX_PROG; + ctx.data = cp; + ctx.ctxprog_max = 256; + nv40_grctx_init(&ctx); + dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; - cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL); - if (!cp) - return -ENOMEM; + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); + for (i = 0; i < ctx.ctxprog_len; i++) + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); - ctx.dev = dev; - ctx.mode = NOUVEAU_GRCTX_PROG; - ctx.data = cp; - ctx.ctxprog_max = 256; - nv40_grctx_init(&ctx); - dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; - - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); - for (i = 0; i < ctx.ctxprog_len; i++) - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); - - kfree(cp); - } + kfree(cp); /* No context present currently */ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); @@ -367,7 +352,7 @@ nv40_graph_init(struct drm_device *dev) nv40_graph_set_region_tiling(dev, i, 0, 0, 0); /* begin RAM config */ - vramsz = drm_get_resource_len(dev, 0) - 1; + vramsz = pci_resource_len(dev->pdev, 0) - 1; switch (dev_priv->chipset) { case 0x40: nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0)); @@ -407,7 +392,6 @@ nv40_graph_init(struct drm_device *dev) void nv40_graph_takedown(struct drm_device *dev) { - nouveau_grctx_fini(dev); } struct nouveau_pgraph_object_class nv40_graph_grclass[] = { diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c index 2a3495e848e..e4e72c12ab6 100644 --- a/drivers/gpu/drm/nouveau/nv40_mc.c +++ b/drivers/gpu/drm/nouveau/nv40_mc.c @@ -19,7 +19,7 @@ nv40_mc_init(struct drm_device *dev) case 0x46: /* G72 */ case 0x4e: case 0x4c: /* C51_G7X */ - tmp = nv_rd32(dev, NV40_PFB_020C); + tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA); nv_wr32(dev, NV40_PMC_1700, tmp); nv_wr32(dev, NV40_PMC_1704, 0); nv_wr32(dev, NV40_PMC_1708, 0); diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index b4e4a3b05ea..5d11ea10166 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -440,47 +440,15 @@ nv50_crtc_prepare(struct drm_crtc *crtc) { struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct drm_device *dev = crtc->dev; - struct drm_encoder *encoder; - uint32_t dac = 0, sor = 0; NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); - /* Disconnect all unused encoders. */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - - if (!drm_helper_encoder_in_use(encoder)) - continue; - - if (nv_encoder->dcb->type == OUTPUT_ANALOG || - nv_encoder->dcb->type == OUTPUT_TV) - dac |= (1 << nv_encoder->or); - else - sor |= (1 << nv_encoder->or); - } - - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - - if (nv_encoder->dcb->type == OUTPUT_ANALOG || - nv_encoder->dcb->type == OUTPUT_TV) { - if (dac & (1 << nv_encoder->or)) - continue; - } else { - if (sor & (1 << nv_encoder->or)) - continue; - } - - nv_encoder->disconnect(nv_encoder); - } - nv50_crtc_blank(nv_crtc, true); } static void nv50_crtc_commit(struct drm_crtc *crtc) { - struct drm_crtc *crtc2; struct drm_device *dev = crtc->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = dev_priv->evo; @@ -491,20 +459,14 @@ nv50_crtc_commit(struct drm_crtc *crtc) nv50_crtc_blank(nv_crtc, false); - /* Explicitly blank all unused crtc's. */ - list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) { - if (!drm_helper_crtc_in_use(crtc2)) - nv50_crtc_blank(nouveau_crtc(crtc2), true); - } - ret = RING_SPACE(evo, 2); if (ret) { NV_ERROR(dev, "no space while committing crtc\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); - OUT_RING(evo, 0); - FIRE_RING(evo); + OUT_RING (evo, 0); + FIRE_RING (evo); } static bool diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c index 1fd9537beff..1bc08596294 100644 --- a/drivers/gpu/drm/nouveau/nv50_dac.c +++ b/drivers/gpu/drm/nouveau/nv50_dac.c @@ -37,22 +37,31 @@ #include "nv50_display.h" static void -nv50_dac_disconnect(struct nouveau_encoder *nv_encoder) +nv50_dac_disconnect(struct drm_encoder *encoder) { - struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = dev_priv->evo; int ret; + if (!nv_encoder->crtc) + return; + nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); + NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); - ret = RING_SPACE(evo, 2); + ret = RING_SPACE(evo, 4); if (ret) { NV_ERROR(dev, "no space while disconnecting DAC\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1); - OUT_RING(evo, 0); + OUT_RING (evo, 0); + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); + + nv_encoder->crtc = NULL; } static enum drm_connector_status @@ -213,7 +222,8 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, uint32_t mode_ctl = 0, mode_ctl2 = 0; int ret; - NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); + NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n", + nv_encoder->or, nv_encoder->dcb->type, crtc->index); nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); @@ -243,6 +253,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2); OUT_RING(evo, mode_ctl); OUT_RING(evo, mode_ctl2); + + nv_encoder->crtc = encoder->crtc; +} + +static struct drm_crtc * +nv50_dac_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; } static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { @@ -253,7 +271,9 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = { .prepare = nv50_dac_prepare, .commit = nv50_dac_commit, .mode_set = nv50_dac_mode_set, - .detect = nv50_dac_detect + .get_crtc = nv50_dac_crtc_get, + .detect = nv50_dac_detect, + .disable = nv50_dac_disconnect }; static void @@ -275,14 +295,11 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = { }; int -nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) +nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry) { struct nouveau_encoder *nv_encoder; struct drm_encoder *encoder; - NV_DEBUG_KMS(dev, "\n"); - NV_INFO(dev, "Detected a DAC output\n"); - nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); if (!nv_encoder) return -ENOMEM; @@ -291,14 +308,14 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; - nv_encoder->disconnect = nv50_dac_disconnect; - - drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs, + drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs, DRM_MODE_ENCODER_DAC); drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs); encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; + + drm_mode_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 580a5d10be9..f13ad0de9c8 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -71,14 +71,13 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name, return ret; } - dev_priv->engine.instmem.prepare_access(dev, true); nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class); nv_wo32(dev, obj, 1, limit); nv_wo32(dev, obj, 2, offset); nv_wo32(dev, obj, 3, 0x00000000); nv_wo32(dev, obj, 4, 0x00000000); nv_wo32(dev, obj, 5, 0x00010000); - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); return 0; } @@ -110,8 +109,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) return ret; } - ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj-> - im_pramin->start, 32768); + ret = drm_mm_init(&chan->ramin_heap, + chan->ramin->gpuobj->im_pramin->start, 32768); if (ret) { NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret); nv50_evo_channel_del(pchan); @@ -179,13 +178,25 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan) } int +nv50_display_early_init(struct drm_device *dev) +{ + return 0; +} + +void +nv50_display_late_takedown(struct drm_device *dev) +{ +} + +int nv50_display_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer; + struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct nouveau_channel *evo = dev_priv->evo; struct drm_connector *connector; - uint32_t val, ram_amount, hpd_en[2]; + uint32_t val, ram_amount; uint64_t start; int ret, i; @@ -366,26 +377,13 @@ nv50_display_init(struct drm_device *dev) NV50_PDISPLAY_INTR_EN_CLK_UNK40)); /* enable hotplug interrupts */ - hpd_en[0] = hpd_en[1] = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct nouveau_connector *conn = nouveau_connector(connector); - struct dcb_gpio_entry *gpio; if (conn->dcb->gpio_tag == 0xff) continue; - gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag); - if (!gpio) - continue; - - hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf)); - } - - nv_wr32(dev, 0xe054, 0xffffffff); - nv_wr32(dev, 0xe050, hpd_en[0]); - if (dev_priv->chipset >= 0x90) { - nv_wr32(dev, 0xe074, 0xffffffff); - nv_wr32(dev, 0xe070, hpd_en[1]); + pgpio->irq_enable(dev, conn->dcb->gpio_tag, true); } return 0; @@ -465,6 +463,7 @@ int nv50_display_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct dcb_table *dcb = &dev_priv->vbios.dcb; + struct drm_connector *connector, *ct; int ret, i; NV_DEBUG_KMS(dev, "\n"); @@ -507,14 +506,18 @@ int nv50_display_create(struct drm_device *dev) continue; } + connector = nouveau_connector_create(dev, entry->connector); + if (IS_ERR(connector)) + continue; + switch (entry->type) { case OUTPUT_TMDS: case OUTPUT_LVDS: case OUTPUT_DP: - nv50_sor_create(dev, entry); + nv50_sor_create(connector, entry); break; case OUTPUT_ANALOG: - nv50_dac_create(dev, entry); + nv50_dac_create(connector, entry); break; default: NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); @@ -522,11 +525,13 @@ int nv50_display_create(struct drm_device *dev) } } - for (i = 0 ; i < dcb->connector.entries; i++) { - if (i != 0 && dcb->connector.entry[i].index2 == - dcb->connector.entry[i - 1].index2) - continue; - nouveau_connector_create(dev, &dcb->connector.entry[i]); + list_for_each_entry_safe(connector, ct, + &dev->mode_config.connector_list, head) { + if (!connector->encoder_ids[0]) { + NV_WARN(dev, "%s has no encoders, removing\n", + drm_get_connector_name(connector)); + connector->funcs->destroy(connector); + } } ret = nv50_display_init(dev); @@ -538,7 +543,8 @@ int nv50_display_create(struct drm_device *dev) return 0; } -int nv50_display_destroy(struct drm_device *dev) +void +nv50_display_destroy(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -548,135 +554,30 @@ int nv50_display_destroy(struct drm_device *dev) nv50_display_disable(dev); nv50_evo_channel_del(&dev_priv->evo); - - return 0; -} - -static inline uint32_t -nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t mc; - - if (sor) { - if (dev_priv->chipset < 0x90 || - dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0) - mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or)); - else - mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or)); - } else { - mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or)); - } - - return mc; -} - -static int -nv50_display_irq_head(struct drm_device *dev, int *phead, - struct dcb_entry **pdcbent) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL); - uint32_t dac = 0, sor = 0; - int head, i, or = 0, type = OUTPUT_ANY; - - /* We're assuming that head 0 *or* head 1 will be active here, - * and not both. I'm not sure if the hw will even signal both - * ever, but it definitely shouldn't for us as we commit each - * CRTC separately, and submission will be blocked by the GPU - * until we handle each in turn. - */ - NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); - head = ffs((unk30 >> 9) & 3) - 1; - if (head < 0) - return -EINVAL; - - /* This assumes CRTCs are never bound to multiple encoders, which - * should be the case. - */ - for (i = 0; i < 3 && type == OUTPUT_ANY; i++) { - uint32_t mc = nv50_display_mode_ctrl(dev, false, i); - if (!(mc & (1 << head))) - continue; - - switch ((mc >> 8) & 0xf) { - case 0: type = OUTPUT_ANALOG; break; - case 1: type = OUTPUT_TV; break; - default: - NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac); - return -1; - } - - or = i; - } - - for (i = 0; i < 4 && type == OUTPUT_ANY; i++) { - uint32_t mc = nv50_display_mode_ctrl(dev, true, i); - if (!(mc & (1 << head))) - continue; - - switch ((mc >> 8) & 0xf) { - case 0: type = OUTPUT_LVDS; break; - case 1: type = OUTPUT_TMDS; break; - case 2: type = OUTPUT_TMDS; break; - case 5: type = OUTPUT_TMDS; break; - case 8: type = OUTPUT_DP; break; - case 9: type = OUTPUT_DP; break; - default: - NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor); - return -1; - } - - or = i; - } - - NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or); - if (type == OUTPUT_ANY) { - NV_ERROR(dev, "unknown encoder!!\n"); - return -1; - } - - for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { - struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i]; - - if (dcbent->type != type) - continue; - - if (!(dcbent->or & (1 << or))) - continue; - - *phead = head; - *pdcbent = dcbent; - return 0; - } - - NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or); - return 0; } -static uint32_t -nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, - int pxclk) +static u16 +nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb, + u32 mc, int pxclk) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_connector *nv_connector = NULL; struct drm_encoder *encoder; struct nvbios *bios = &dev_priv->vbios; - uint32_t mc, script = 0, or; + u32 script = 0, or; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - if (nv_encoder->dcb != dcbent) + if (nv_encoder->dcb != dcb) continue; nv_connector = nouveau_encoder_connector_get(nv_encoder); break; } - or = ffs(dcbent->or) - 1; - mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or); - switch (dcbent->type) { + or = ffs(dcb->or) - 1; + switch (dcb->type) { case OUTPUT_LVDS: script = (mc >> 8) & 0xf; if (bios->fp_no_ddc) { @@ -767,17 +668,88 @@ nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr) static void nv50_display_unk10_handler(struct drm_device *dev) { - struct dcb_entry *dcbent; - int head, ret; + struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 unk30 = nv_rd32(dev, 0x610030), mc; + int i, crtc, or, type = OUTPUT_ANY; - ret = nv50_display_irq_head(dev, &head, &dcbent); - if (ret) - goto ack; + NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); + dev_priv->evo_irq.dcb = NULL; nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8); - nouveau_bios_run_display_table(dev, dcbent, 0, -1); + /* Determine which CRTC we're dealing with, only 1 ever will be + * signalled at the same time with the current nouveau code. + */ + crtc = ffs((unk30 & 0x00000060) >> 5) - 1; + if (crtc < 0) + goto ack; + + /* Nothing needs to be done for the encoder */ + crtc = ffs((unk30 & 0x00000180) >> 7) - 1; + if (crtc < 0) + goto ack; + /* Find which encoder was connected to the CRTC */ + for (i = 0; type == OUTPUT_ANY && i < 3; i++) { + mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i)); + NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; + + switch ((mc & 0x00000f00) >> 8) { + case 0: type = OUTPUT_ANALOG; break; + case 1: type = OUTPUT_TV; break; + default: + NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); + goto ack; + } + + or = i; + } + + for (i = 0; type == OUTPUT_ANY && i < 4; i++) { + if (dev_priv->chipset < 0x90 || + dev_priv->chipset == 0x92 || + dev_priv->chipset == 0xa0) + mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i)); + else + mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i)); + + NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; + + switch ((mc & 0x00000f00) >> 8) { + case 0: type = OUTPUT_LVDS; break; + case 1: type = OUTPUT_TMDS; break; + case 2: type = OUTPUT_TMDS; break; + case 5: type = OUTPUT_TMDS; break; + case 8: type = OUTPUT_DP; break; + case 9: type = OUTPUT_DP; break; + default: + NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); + goto ack; + } + + or = i; + } + + /* There was no encoder to disable */ + if (type == OUTPUT_ANY) + goto ack; + + /* Disable the encoder */ + for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { + struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i]; + + if (dcb->type == type && (dcb->or & (1 << or))) { + nouveau_bios_run_display_table(dev, dcb, 0, -1); + dev_priv->evo_irq.dcb = dcb; + goto ack; + } + } + + NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); ack: nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10); nv_wr32(dev, 0x610030, 0x80000000); @@ -817,33 +789,103 @@ nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb) static void nv50_display_unk20_handler(struct drm_device *dev) { - struct dcb_entry *dcbent; - uint32_t tmp, pclk, script; - int head, or, ret; + struct drm_nouveau_private *dev_priv = dev->dev_private; + u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc; + struct dcb_entry *dcb; + int i, crtc, or, type = OUTPUT_ANY; - ret = nv50_display_irq_head(dev, &head, &dcbent); - if (ret) + NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); + dcb = dev_priv->evo_irq.dcb; + if (dcb) { + nouveau_bios_run_display_table(dev, dcb, 0, -2); + dev_priv->evo_irq.dcb = NULL; + } + + /* CRTC clock change requested? */ + crtc = ffs((unk30 & 0x00000600) >> 9) - 1; + if (crtc >= 0) { + pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)); + pclk &= 0x003fffff; + + nv50_crtc_set_clock(dev, crtc, pclk); + + tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc)); + tmp &= ~0x000000f; + nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp); + } + + /* Nothing needs to be done for the encoder */ + crtc = ffs((unk30 & 0x00000180) >> 7) - 1; + if (crtc < 0) goto ack; - or = ffs(dcbent->or) - 1; - pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; - script = nv50_display_script_select(dev, dcbent, pclk); + pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff; - NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk); + /* Find which encoder is connected to the CRTC */ + for (i = 0; type == OUTPUT_ANY && i < 3; i++) { + mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i)); + NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; - if (dcbent->type != OUTPUT_DP) - nouveau_bios_run_display_table(dev, dcbent, 0, -2); + switch ((mc & 0x00000f00) >> 8) { + case 0: type = OUTPUT_ANALOG; break; + case 1: type = OUTPUT_TV; break; + default: + NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc); + goto ack; + } - nv50_crtc_set_clock(dev, head, pclk); + or = i; + } - nouveau_bios_run_display_table(dev, dcbent, script, pclk); + for (i = 0; type == OUTPUT_ANY && i < 4; i++) { + if (dev_priv->chipset < 0x90 || + dev_priv->chipset == 0x92 || + dev_priv->chipset == 0xa0) + mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i)); + else + mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i)); - nv50_display_unk20_dp_hack(dev, dcbent); + NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc); + if (!(mc & (1 << crtc))) + continue; + + switch ((mc & 0x00000f00) >> 8) { + case 0: type = OUTPUT_LVDS; break; + case 1: type = OUTPUT_TMDS; break; + case 2: type = OUTPUT_TMDS; break; + case 5: type = OUTPUT_TMDS; break; + case 8: type = OUTPUT_DP; break; + case 9: type = OUTPUT_DP; break; + default: + NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc); + goto ack; + } + + or = i; + } + + if (type == OUTPUT_ANY) + goto ack; + + /* Enable the encoder */ + for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { + dcb = &dev_priv->vbios.dcb.entry[i]; + if (dcb->type == type && (dcb->or & (1 << or))) + break; + } + + if (i == dev_priv->vbios.dcb.entries) { + NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc); + goto ack; + } - tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head)); - tmp &= ~0x000000f; - nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp); + script = nv50_display_script_select(dev, dcb, mc, pclk); + nouveau_bios_run_display_table(dev, dcb, script, pclk); - if (dcbent->type != OUTPUT_ANALOG) { + nv50_display_unk20_dp_hack(dev, dcb); + + if (dcb->type != OUTPUT_ANALOG) { tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or)); tmp &= ~0x00000f0f; if (script & 0x0100) @@ -853,24 +895,61 @@ nv50_display_unk20_handler(struct drm_device *dev) nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0); } + dev_priv->evo_irq.dcb = dcb; + dev_priv->evo_irq.pclk = pclk; + dev_priv->evo_irq.script = script; + ack: nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20); nv_wr32(dev, 0x610030, 0x80000000); } +/* If programming a TMDS output on a SOR that can also be configured for + * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off. + * + * It looks like the VBIOS TMDS scripts make an attempt at this, however, + * the VBIOS scripts on at least one board I have only switch it off on + * link 0, causing a blank display if the output has previously been + * programmed for DisplayPort. + */ +static void +nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb) +{ + int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1); + struct drm_encoder *encoder; + u32 tmp; + + if (dcb->type != OUTPUT_TMDS) + return; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (nv_encoder->dcb->type == OUTPUT_DP && + nv_encoder->dcb->or & (1 << or)) { + tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link)); + tmp &= ~NV50_SOR_DP_CTRL_ENABLED; + nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp); + break; + } + } +} + static void nv50_display_unk40_handler(struct drm_device *dev) { - struct dcb_entry *dcbent; - int head, pclk, script, ret; + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct dcb_entry *dcb = dev_priv->evo_irq.dcb; + u16 script = dev_priv->evo_irq.script; + u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk; - ret = nv50_display_irq_head(dev, &head, &dcbent); - if (ret) + NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); + dev_priv->evo_irq.dcb = NULL; + if (!dcb) goto ack; - pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; - script = nv50_display_script_select(dev, dcbent, pclk); - nouveau_bios_run_display_table(dev, dcbent, script, -pclk); + nouveau_bios_run_display_table(dev, dcb, script, -pclk); + nv50_display_unk40_dp_set_tmds(dev, dcb); ack: nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40); diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h index 581d405ac01..c551f0b85ee 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.h +++ b/drivers/gpu/drm/nouveau/nv50_display.h @@ -38,9 +38,11 @@ void nv50_display_irq_handler(struct drm_device *dev); void nv50_display_irq_handler_bh(struct work_struct *work); void nv50_display_irq_hotplug_bh(struct work_struct *work); -int nv50_display_init(struct drm_device *dev); +int nv50_display_early_init(struct drm_device *dev); +void nv50_display_late_takedown(struct drm_device *dev); int nv50_display_create(struct drm_device *dev); -int nv50_display_destroy(struct drm_device *dev); +int nv50_display_init(struct drm_device *dev); +void nv50_display_destroy(struct drm_device *dev); int nv50_crtc_blank(struct nouveau_crtc *, bool blank); int nv50_crtc_set_clock(struct drm_device *, int head, int pclk); diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index e20c0e2474f..fb0281ae8f9 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -28,41 +28,33 @@ #include "drm.h" #include "nouveau_drv.h" -struct nv50_fifo_priv { - struct nouveau_gpuobj_ref *thingo[2]; - int cur_thingo; -}; - -#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) - static void -nv50_fifo_init_thingo(struct drm_device *dev) +nv50_fifo_playlist_update(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_gpuobj_ref *cur; int i, nr; NV_DEBUG(dev, "\n"); - cur = priv->thingo[priv->cur_thingo]; - priv->cur_thingo = !priv->cur_thingo; + cur = pfifo->playlist[pfifo->cur_playlist]; + pfifo->cur_playlist = !pfifo->cur_playlist; /* We never schedule channel 0 or 127 */ - dev_priv->engine.instmem.prepare_access(dev, true); for (i = 1, nr = 0; i < 127; i++) { if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc) nv_wo32(dev, cur->gpuobj, nr++, i); } - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); nv_wr32(dev, 0x32f4, cur->instance >> 12); nv_wr32(dev, 0x32ec, nr); nv_wr32(dev, 0x2500, 0x101); } -static int -nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) +static void +nv50_fifo_channel_enable(struct drm_device *dev, int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan = dev_priv->fifos[channel]; @@ -70,37 +62,28 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt) NV_DEBUG(dev, "ch%d\n", channel); - if (!chan->ramfc) - return -EINVAL; - - if (IS_G80) + if (dev_priv->chipset == 0x50) inst = chan->ramfc->instance >> 12; else inst = chan->ramfc->instance >> 8; - nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), - inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); - if (!nt) - nv50_fifo_init_thingo(dev); - return 0; + nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst | + NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED); } static void -nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt) +nv50_fifo_channel_disable(struct drm_device *dev, int channel) { struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t inst; - NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt); + NV_DEBUG(dev, "ch%d\n", channel); - if (IS_G80) + if (dev_priv->chipset == 0x50) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80; else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84; nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst); - - if (!nt) - nv50_fifo_init_thingo(dev); } static void @@ -133,12 +116,12 @@ nv50_fifo_init_context_table(struct drm_device *dev) for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) { if (dev_priv->fifos[i]) - nv50_fifo_channel_enable(dev, i, true); + nv50_fifo_channel_enable(dev, i); else - nv50_fifo_channel_disable(dev, i, true); + nv50_fifo_channel_disable(dev, i); } - nv50_fifo_init_thingo(dev); + nv50_fifo_playlist_update(dev); } static void @@ -162,41 +145,38 @@ nv50_fifo_init_regs(struct drm_device *dev) nv_wr32(dev, 0x3270, 0); /* Enable dummy channels setup by nv50_instmem.c */ - nv50_fifo_channel_enable(dev, 0, true); - nv50_fifo_channel_enable(dev, 127, true); + nv50_fifo_channel_enable(dev, 0); + nv50_fifo_channel_enable(dev, 127); } int nv50_fifo_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nv50_fifo_priv *priv; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; int ret; NV_DEBUG(dev, "\n"); - priv = dev_priv->engine.fifo.priv; - if (priv) { - priv->cur_thingo = !priv->cur_thingo; + if (pfifo->playlist[0]) { + pfifo->cur_playlist = !pfifo->cur_playlist; goto just_reset; } - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - dev_priv->engine.fifo.priv = priv; - ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); + NVOBJ_FLAG_ZERO_ALLOC, + &pfifo->playlist[0]); if (ret) { - NV_ERROR(dev, "error creating thingo0: %d\n", ret); + NV_ERROR(dev, "error creating playlist 0: %d\n", ret); return ret; } ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); + NVOBJ_FLAG_ZERO_ALLOC, + &pfifo->playlist[1]); if (ret) { - NV_ERROR(dev, "error creating thingo1: %d\n", ret); + nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); + NV_ERROR(dev, "error creating playlist 1: %d\n", ret); return ret; } @@ -216,18 +196,15 @@ void nv50_fifo_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv; + struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; NV_DEBUG(dev, "\n"); - if (!priv) + if (!pfifo->playlist[0]) return; - nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); - nouveau_gpuobj_ref_del(dev, &priv->thingo[1]); - - dev_priv->engine.fifo.priv = NULL; - kfree(priv); + nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]); + nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]); } int @@ -248,7 +225,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - if (IS_G80) { + if (dev_priv->chipset == 0x50) { uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start; uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start; @@ -281,10 +258,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan) spin_lock_irqsave(&dev_priv->context_switch_lock, flags); - dev_priv->engine.instmem.prepare_access(dev, true); - nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); - nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); + nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ | + (4 << 24) /* SEARCH_FULL */ | + (chan->ramht->instance >> 4)); nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); nv_wo32(dev, ramfc, 0x40/4, 0x00000000); @@ -295,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) chan->dma.ib_base * 4); nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); - if (!IS_G80) { + if (dev_priv->chipset != 0x50) { nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); nv_wo32(dev, chan->ramin->gpuobj, 1, chan->ramfc->instance >> 8); @@ -304,16 +281,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan) nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12); } - dev_priv->engine.instmem.finish_access(dev); - - ret = nv50_fifo_channel_enable(dev, chan->id, false); - if (ret) { - NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); - spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); - nouveau_gpuobj_ref_del(dev, &chan->ramfc); - return ret; - } + dev_priv->engine.instmem.flush(dev); + nv50_fifo_channel_enable(dev, chan->id); + nv50_fifo_playlist_update(dev); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return 0; } @@ -328,11 +299,12 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan) /* This will ensure the channel is seen as disabled. */ chan->ramfc = NULL; - nv50_fifo_channel_disable(dev, chan->id, false); + nv50_fifo_channel_disable(dev, chan->id); /* Dummy channel, also used on ch 127 */ if (chan->id == 0) - nv50_fifo_channel_disable(dev, 127, false); + nv50_fifo_channel_disable(dev, 127); + nv50_fifo_playlist_update(dev); nouveau_gpuobj_ref_del(dev, &ramfc); nouveau_gpuobj_ref_del(dev, &chan->cache); @@ -349,8 +321,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) NV_DEBUG(dev, "ch%d\n", chan->id); - dev_priv->engine.instmem.prepare_access(dev, false); - nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4)); nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4)); nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4)); @@ -396,7 +366,7 @@ nv50_fifo_load_context(struct nouveau_channel *chan) nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0); /* guessing that all the 0x34xx regs aren't on NV50 */ - if (!IS_G80) { + if (dev_priv->chipset != 0x50) { nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4)); nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4)); nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4)); @@ -404,8 +374,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan) nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4)); } - dev_priv->engine.instmem.finish_access(dev); - nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16)); return 0; } @@ -434,8 +402,6 @@ nv50_fifo_unload_context(struct drm_device *dev) ramfc = chan->ramfc->gpuobj; cache = chan->cache->gpuobj; - dev_priv->engine.instmem.prepare_access(dev, true); - nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330)); nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334)); nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240)); @@ -482,7 +448,7 @@ nv50_fifo_unload_context(struct drm_device *dev) } /* guessing that all the 0x34xx regs aren't on NV50 */ - if (!IS_G80) { + if (dev_priv->chipset != 0x50) { nv_wo32(dev, ramfc, 0x84/4, ptr >> 1); nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c)); nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400)); @@ -491,7 +457,7 @@ nv50_fifo_unload_context(struct drm_device *dev) nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410)); } - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); /*XXX: probably reload ch127 (NULL) state back too */ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127); diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c index bb47ad73726..b2fab2bf3d6 100644 --- a/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -74,3 +74,38 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state) nv_wr32(dev, r, v); return 0; } + +void +nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on) +{ + struct dcb_gpio_entry *gpio; + u32 reg, mask; + + gpio = nouveau_bios_gpio_entry(dev, tag); + if (!gpio) { + NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag); + return; + } + + reg = gpio->line < 16 ? 0xe050 : 0xe070; + mask = 0x00010001 << (gpio->line & 0xf); + + nv_wr32(dev, reg + 4, mask); + nv_mask(dev, reg + 0, mask, on ? mask : 0); +} + +int +nv50_gpio_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + /* disable, and ack any pending gpio interrupts */ + nv_wr32(dev, 0xe050, 0x00000000); + nv_wr32(dev, 0xe054, 0xffffffff); + if (dev_priv->chipset >= 0x90) { + nv_wr32(dev, 0xe070, 0x00000000); + nv_wr32(dev, 0xe074, 0xffffffff); + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index b203d06f601..1413028e158 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -30,8 +30,6 @@ #include "nouveau_grctx.h" -#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) - static void nv50_graph_init_reset(struct drm_device *dev) { @@ -103,37 +101,33 @@ static int nv50_graph_init_ctxctl(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_grctx ctx = {}; + uint32_t *cp; + int i; NV_DEBUG(dev, "\n"); - if (nouveau_ctxfw) { - nouveau_grctx_prog_load(dev); - dev_priv->engine.graph.grctx_size = 0x70000; + cp = kmalloc(512 * 4, GFP_KERNEL); + if (!cp) { + NV_ERROR(dev, "failed to allocate ctxprog\n"); + dev_priv->engine.graph.accel_blocked = true; + return 0; } - if (!dev_priv->engine.graph.ctxprog) { - struct nouveau_grctx ctx = {}; - uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL); - int i; - if (!cp) { - NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n"); - dev_priv->engine.graph.accel_blocked = true; - return 0; - } - ctx.dev = dev; - ctx.mode = NOUVEAU_GRCTX_PROG; - ctx.data = cp; - ctx.ctxprog_max = 512; - if (!nv50_grctx_init(&ctx)) { - dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; - - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); - for (i = 0; i < ctx.ctxprog_len; i++) - nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); - } else { - dev_priv->engine.graph.accel_blocked = true; - } - kfree(cp); + + ctx.dev = dev; + ctx.mode = NOUVEAU_GRCTX_PROG; + ctx.data = cp; + ctx.ctxprog_max = 512; + if (!nv50_grctx_init(&ctx)) { + dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; + + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); + for (i = 0; i < ctx.ctxprog_len; i++) + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); + } else { + dev_priv->engine.graph.accel_blocked = true; } + kfree(cp); nv_wr32(dev, 0x400320, 4); nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); @@ -164,7 +158,6 @@ void nv50_graph_takedown(struct drm_device *dev) { NV_DEBUG(dev, "\n"); - nouveau_grctx_fini(dev); } void @@ -212,8 +205,9 @@ nv50_graph_create_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; - struct nouveau_gpuobj *ctx; + struct nouveau_gpuobj *obj; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; + struct nouveau_grctx ctx = {}; int hdr, ret; NV_DEBUG(dev, "ch%d\n", chan->id); @@ -223,10 +217,9 @@ nv50_graph_create_context(struct nouveau_channel *chan) NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); if (ret) return ret; - ctx = chan->ramin_grctx->gpuobj; + obj = chan->ramin_grctx->gpuobj; - hdr = IS_G80 ? 0x200 : 0x20; - dev_priv->engine.instmem.prepare_access(dev, true); + hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + pgraph->grctx_size - 1); @@ -234,21 +227,15 @@ nv50_graph_create_context(struct nouveau_channel *chan) nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000); - dev_priv->engine.instmem.finish_access(dev); - - dev_priv->engine.instmem.prepare_access(dev, true); - if (!pgraph->ctxprog) { - struct nouveau_grctx ctx = {}; - ctx.dev = chan->dev; - ctx.mode = NOUVEAU_GRCTX_VALS; - ctx.data = chan->ramin_grctx->gpuobj; - nv50_grctx_init(&ctx); - } else { - nouveau_grctx_vals_load(dev, ctx); - } - nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); - dev_priv->engine.instmem.finish_access(dev); + ctx.dev = chan->dev; + ctx.mode = NOUVEAU_GRCTX_VALS; + ctx.data = obj; + nv50_grctx_init(&ctx); + + nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12); + + dev_priv->engine.instmem.flush(dev); return 0; } @@ -257,17 +244,16 @@ nv50_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; - int i, hdr = IS_G80 ? 0x200 : 0x20; + int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; NV_DEBUG(dev, "ch%d\n", chan->id); if (!chan->ramin || !chan->ramin->gpuobj) return; - dev_priv->engine.instmem.prepare_access(dev, true); for (i = hdr; i < hdr + 24; i += 4) nv_wo32(dev, chan->ramin->gpuobj, i/4, 0); - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx); } diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index 5f21df31f3a..37c7b48ab24 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -35,8 +35,6 @@ struct nv50_instmem_priv { struct nouveau_gpuobj_ref *pramin_pt; struct nouveau_gpuobj_ref *pramin_bar; struct nouveau_gpuobj_ref *fb_bar; - - bool last_access_wr; }; #define NV50_INSTMEM_PAGE_SHIFT 12 @@ -147,7 +145,7 @@ nv50_instmem_init(struct drm_device *dev) if (ret) return ret; - if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base)) + if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base)) return -ENOMEM; /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */ @@ -241,7 +239,7 @@ nv50_instmem_init(struct drm_device *dev) return ret; BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000); BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 + - drm_get_resource_len(dev, 1) - 1); + pci_resource_len(dev->pdev, 1) - 1); BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000); BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000); BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000); @@ -262,23 +260,18 @@ nv50_instmem_init(struct drm_device *dev) /* Assume that praying isn't enough, check that we can re-read the * entire fake channel back from the PRAMIN BAR */ - dev_priv->engine.instmem.prepare_access(dev, false); for (i = 0; i < c_size; i += 4) { if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) { NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n", i); - dev_priv->engine.instmem.finish_access(dev); return -EINVAL; } } - dev_priv->engine.instmem.finish_access(dev); nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700); /* Global PRAMIN heap */ - if (nouveau_mem_init_heap(&dev_priv->ramin_heap, - c_size, dev_priv->ramin_size - c_size)) { - dev_priv->ramin_heap = NULL; + if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) { NV_ERROR(dev, "Failed to init RAMIN heap\n"); } @@ -321,7 +314,7 @@ nv50_instmem_takedown(struct drm_device *dev) nouveau_gpuobj_del(dev, &chan->vm_pd); nouveau_gpuobj_ref_del(dev, &chan->ramfc); nouveau_gpuobj_ref_del(dev, &chan->ramin); - nouveau_mem_takedown(&chan->ramin_heap); + drm_mm_takedown(&chan->ramin_heap); dev_priv->fifos[0] = dev_priv->fifos[127] = NULL; kfree(chan); @@ -436,14 +429,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound) return -EINVAL; - NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n", + NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n", gpuobj->im_pramin->start, gpuobj->im_pramin->size); pte = (gpuobj->im_pramin->start >> 12) << 1; pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; vram = gpuobj->im_backing_start; - NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n", + NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n", gpuobj->im_pramin->start, pte, pte_end); NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start); @@ -453,27 +446,15 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) vram |= 0x30; } - dev_priv->engine.instmem.prepare_access(dev, true); while (pte < pte_end) { nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram)); nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram)); vram += NV50_INSTMEM_PAGE_SIZE; } - dev_priv->engine.instmem.finish_access(dev); - - nv_wr32(dev, 0x100c80, 0x00040001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return -EBUSY; - } + dev_priv->engine.instmem.flush(dev); - nv_wr32(dev, 0x100c80, 0x00060001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80)); - return -EBUSY; - } + nv50_vm_flush(dev, 4); + nv50_vm_flush(dev, 6); gpuobj->im_bound = 1; return 0; @@ -492,36 +473,37 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj) pte = (gpuobj->im_pramin->start >> 12) << 1; pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte; - dev_priv->engine.instmem.prepare_access(dev, true); while (pte < pte_end) { nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000); } - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); gpuobj->im_bound = 0; return 0; } void -nv50_instmem_prepare_access(struct drm_device *dev, bool write) +nv50_instmem_flush(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; - - priv->last_access_wr = write; + nv_wr32(dev, 0x00330c, 0x00000001); + if (!nv_wait(0x00330c, 0x00000002, 0x00000000)) + NV_ERROR(dev, "PRAMIN flush timeout\n"); } void -nv50_instmem_finish_access(struct drm_device *dev) +nv84_instmem_flush(struct drm_device *dev) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; + nv_wr32(dev, 0x070000, 0x00000001); + if (!nv_wait(0x070000, 0x00000002, 0x00000000)) + NV_ERROR(dev, "PRAMIN flush timeout\n"); +} - if (priv->last_access_wr) { - nv_wr32(dev, 0x070000, 0x00000001); - if (!nv_wait(0x070000, 0x00000001, 0x00000000)) - NV_ERROR(dev, "PRAMIN flush timeout\n"); - } +void +nv50_vm_flush(struct drm_device *dev, int engine) +{ + nv_wr32(dev, 0x100c80, (engine << 16) | 1); + if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) + NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); } diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index 812778db76a..bcd4cf84a7e 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c @@ -37,52 +37,32 @@ #include "nv50_display.h" static void -nv50_sor_disconnect(struct nouveau_encoder *nv_encoder) +nv50_sor_disconnect(struct drm_encoder *encoder) { - struct drm_device *dev = to_drm_encoder(nv_encoder)->dev; + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *evo = dev_priv->evo; int ret; + if (!nv_encoder->crtc) + return; + nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true); + NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or); - ret = RING_SPACE(evo, 2); + ret = RING_SPACE(evo, 4); if (ret) { NV_ERROR(dev, "no space while disconnecting SOR\n"); return; } BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); - OUT_RING(evo, 0); -} - -static void -nv50_sor_dp_link_train(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - struct bit_displayport_encoder_table *dpe; - int dpe_headerlen; - - dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); - if (!dpe) { - NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or); - return; - } + OUT_RING (evo, 0); + BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1); + OUT_RING (evo, 0); - if (dpe->script0) { - NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); - nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), - nv_encoder->dcb); - } - - if (!nouveau_dp_link_train(encoder)) - NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); - - if (dpe->script1) { - NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); - nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), - nv_encoder->dcb); - } + nv_encoder->crtc = NULL; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; } static void @@ -94,14 +74,16 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) uint32_t val; int or = nv_encoder->or; - NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); + NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode); nv_encoder->last_dpms = mode; list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { struct nouveau_encoder *nvenc = nouveau_encoder(enc); if (nvenc == nv_encoder || - nvenc->disconnect != nv50_sor_disconnect || + (nvenc->dcb->type != OUTPUT_TMDS && + nvenc->dcb->type != OUTPUT_LVDS && + nvenc->dcb->type != OUTPUT_DP) || nvenc->dcb->or != nv_encoder->dcb->or) continue; @@ -133,8 +115,22 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or))); } - if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON) - nv50_sor_dp_link_train(encoder); + if (nv_encoder->dcb->type == OUTPUT_DP) { + struct nouveau_i2c_chan *auxch; + + auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); + if (!auxch) + return; + + if (mode == DRM_MODE_DPMS_ON) { + u8 status = DP_SET_POWER_D0; + nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); + nouveau_dp_link_train(encoder); + } else { + u8 status = DP_SET_POWER_D3; + nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1); + } + } } static void @@ -196,7 +192,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, uint32_t mode_ctl = 0; int ret; - NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); + NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n", + nv_encoder->or, nv_encoder->dcb->type, crtc->index); nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); @@ -239,6 +236,14 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, } BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1); OUT_RING(evo, mode_ctl); + + nv_encoder->crtc = encoder->crtc; +} + +static struct drm_crtc * +nv50_sor_crtc_get(struct drm_encoder *encoder) +{ + return nouveau_encoder(encoder)->crtc; } static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { @@ -249,7 +254,9 @@ static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = { .prepare = nv50_sor_prepare, .commit = nv50_sor_commit, .mode_set = nv50_sor_mode_set, - .detect = NULL + .get_crtc = nv50_sor_crtc_get, + .detect = NULL, + .disable = nv50_sor_disconnect }; static void @@ -272,32 +279,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = { }; int -nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) +nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry) { struct nouveau_encoder *nv_encoder = NULL; + struct drm_device *dev = connector->dev; struct drm_encoder *encoder; - bool dum; int type; NV_DEBUG_KMS(dev, "\n"); switch (entry->type) { case OUTPUT_TMDS: - NV_INFO(dev, "Detected a TMDS output\n"); + case OUTPUT_DP: type = DRM_MODE_ENCODER_TMDS; break; case OUTPUT_LVDS: - NV_INFO(dev, "Detected a LVDS output\n"); type = DRM_MODE_ENCODER_LVDS; - - if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) { - NV_ERROR(dev, "Failed parsing LVDS table\n"); - return -EINVAL; - } - break; - case OUTPUT_DP: - NV_INFO(dev, "Detected a DP output\n"); - type = DRM_MODE_ENCODER_TMDS; break; default: return -EINVAL; @@ -310,8 +307,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) nv_encoder->dcb = entry; nv_encoder->or = ffs(entry->or) - 1; - - nv_encoder->disconnect = nv50_sor_disconnect; + nv_encoder->last_dpms = DRM_MODE_DPMS_OFF; drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type); drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs); @@ -342,5 +338,6 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) nv_encoder->dp.mc_unknown = 5; } + drm_mode_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h index 5998c35237b..ad64673ace1 100644 --- a/drivers/gpu/drm/nouveau/nvreg.h +++ b/drivers/gpu/drm/nouveau/nvreg.h @@ -147,28 +147,6 @@ # define NV_VIO_GX_DONT_CARE_INDEX 0x07 # define NV_VIO_GX_BIT_MASK_INDEX 0x08 -#define NV_PFB_BOOT_0 0x00100000 -#define NV_PFB_CFG0 0x00100200 -#define NV_PFB_CFG1 0x00100204 -#define NV_PFB_CSTATUS 0x0010020C -#define NV_PFB_REFCTRL 0x00100210 -# define NV_PFB_REFCTRL_VALID_1 (1 << 31) -#define NV_PFB_PAD 0x0010021C -# define NV_PFB_PAD_CKE_NORMAL (1 << 0) -#define NV_PFB_TILE_NV10 0x00100240 -#define NV_PFB_TILE_SIZE_NV10 0x00100244 -#define NV_PFB_REF 0x001002D0 -# define NV_PFB_REF_CMD_REFRESH (1 << 0) -#define NV_PFB_PRE 0x001002D4 -# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0) -#define NV_PFB_CLOSE_PAGE2 0x0010033C -#define NV_PFB_TILE_NV40 0x00100600 -#define NV_PFB_TILE_SIZE_NV40 0x00100604 - -#define NV_PEXTDEV_BOOT_0 0x00101000 -# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12) -#define NV_PEXTDEV_BOOT_3 0x0010100c - #define NV_PCRTC_INTR_0 0x00600100 # define NV_PCRTC_INTR_0_VBLANK (1 << 0) #define NV_PCRTC_INTR_EN_0 0x00600140 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index e671d0e74d4..570e190710b 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -44,7 +44,7 @@ MODULE_FIRMWARE(FIRMWARE_NAME); -static int R128_READ_PLL(struct drm_device * dev, int addr) +static int R128_READ_PLL(struct drm_device *dev, int addr) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -53,7 +53,7 @@ static int R128_READ_PLL(struct drm_device * dev, int addr) } #if R128_FIFO_DEBUG -static void r128_status(drm_r128_private_t * dev_priv) +static void r128_status(drm_r128_private_t *dev_priv) { printk("GUI_STAT = 0x%08x\n", (unsigned int)R128_READ(R128_GUI_STAT)); @@ -74,7 +74,7 @@ static void r128_status(drm_r128_private_t * dev_priv) * Engine, FIFO control */ -static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) +static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv) { u32 tmp; int i; @@ -83,9 +83,8 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp); for (i = 0; i < dev_priv->usec_timeout; i++) { - if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) { + if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) return 0; - } DRM_UDELAY(1); } @@ -95,7 +94,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv) return -EBUSY; } -static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) +static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries) { int i; @@ -112,7 +111,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries) return -EBUSY; } -static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv) +static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv) { int i, ret; @@ -189,7 +188,7 @@ out_release: * prior to a wait for idle, as it informs the engine that the command * stream is ending. */ -static void r128_do_cce_flush(drm_r128_private_t * dev_priv) +static void r128_do_cce_flush(drm_r128_private_t *dev_priv) { u32 tmp; @@ -199,7 +198,7 @@ static void r128_do_cce_flush(drm_r128_private_t * dev_priv) /* Wait for the CCE to go idle. */ -int r128_do_cce_idle(drm_r128_private_t * dev_priv) +int r128_do_cce_idle(drm_r128_private_t *dev_priv) { int i; @@ -225,7 +224,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv) /* Start the Concurrent Command Engine. */ -static void r128_do_cce_start(drm_r128_private_t * dev_priv) +static void r128_do_cce_start(drm_r128_private_t *dev_priv) { r128_do_wait_for_idle(dev_priv); @@ -242,7 +241,7 @@ static void r128_do_cce_start(drm_r128_private_t * dev_priv) * commands, so you must wait for the CCE command stream to complete * before calling this routine. */ -static void r128_do_cce_reset(drm_r128_private_t * dev_priv) +static void r128_do_cce_reset(drm_r128_private_t *dev_priv) { R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0); R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0); @@ -253,7 +252,7 @@ static void r128_do_cce_reset(drm_r128_private_t * dev_priv) * commands, so you must flush the command stream and wait for the CCE * to go idle before calling this routine. */ -static void r128_do_cce_stop(drm_r128_private_t * dev_priv) +static void r128_do_cce_stop(drm_r128_private_t *dev_priv) { R128_WRITE(R128_PM4_MICRO_CNTL, 0); R128_WRITE(R128_PM4_BUFFER_CNTL, @@ -264,7 +263,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv) /* Reset the engine. This will stop the CCE if it is running. */ -static int r128_do_engine_reset(struct drm_device * dev) +static int r128_do_engine_reset(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; u32 clock_cntl_index, mclk_cntl, gen_reset_cntl; @@ -301,8 +300,8 @@ static int r128_do_engine_reset(struct drm_device * dev) return 0; } -static void r128_cce_init_ring_buffer(struct drm_device * dev, - drm_r128_private_t * dev_priv) +static void r128_cce_init_ring_buffer(struct drm_device *dev, + drm_r128_private_t *dev_priv) { u32 ring_start; u32 tmp; @@ -340,7 +339,7 @@ static void r128_cce_init_ring_buffer(struct drm_device * dev, R128_WRITE(R128_BUS_CNTL, tmp); } -static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) +static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) { drm_r128_private_t *dev_priv; int rc; @@ -588,7 +587,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) return rc; } -int r128_do_cleanup_cce(struct drm_device * dev) +int r128_do_cleanup_cce(struct drm_device *dev) { /* Make sure interrupts are disabled here because the uninstall ioctl @@ -682,9 +681,8 @@ int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv /* Flush any pending CCE commands. This ensures any outstanding * commands are exectuted by the engine before we turn it off. */ - if (stop->flush) { + if (stop->flush) r128_do_cce_flush(dev_priv); - } /* If we fail to make the engine go idle, we return an error * code so that the DRM ioctl wrapper can try again. @@ -735,9 +733,8 @@ int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv DEV_INIT_TEST_WITH_RETURN(dev_priv); - if (dev_priv->cce_running) { + if (dev_priv->cce_running) r128_do_cce_flush(dev_priv); - } return r128_do_cce_idle(dev_priv); } @@ -765,7 +762,7 @@ int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_pr #define R128_BUFFER_FREE 0 #if 0 -static int r128_freelist_init(struct drm_device * dev) +static int r128_freelist_init(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; drm_r128_private_t *dev_priv = dev->dev_private; @@ -848,7 +845,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev) return NULL; } -void r128_freelist_reset(struct drm_device * dev) +void r128_freelist_reset(struct drm_device *dev) { struct drm_device_dma *dma = dev->dma; int i; @@ -864,7 +861,7 @@ void r128_freelist_reset(struct drm_device * dev) * CCE command submission */ -int r128_wait_ring(drm_r128_private_t * dev_priv, int n) +int r128_wait_ring(drm_r128_private_t *dev_priv, int n) { drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; @@ -881,9 +878,9 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n) return -EBUSY; } -static int r128_cce_get_buffers(struct drm_device * dev, +static int r128_cce_get_buffers(struct drm_device *dev, struct drm_file *file_priv, - struct drm_dma * d) + struct drm_dma *d) { int i; struct drm_buf *buf; @@ -933,9 +930,8 @@ int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_p d->granted_count = 0; - if (d->request_count) { + if (d->request_count) ret = r128_cce_get_buffers(dev, file_priv, d); - } return ret; } diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index b806fdcc717..1e2971f13aa 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c @@ -85,7 +85,7 @@ static struct drm_driver driver = { .patchlevel = DRIVER_PATCHLEVEL, }; -int r128_driver_load(struct drm_device * dev, unsigned long flags) +int r128_driver_load(struct drm_device *dev, unsigned long flags) { return drm_vblank_init(dev, 1); } diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h index 3c60829d82e..930c71b2fb5 100644 --- a/drivers/gpu/drm/r128/r128_drv.h +++ b/drivers/gpu/drm/r128/r128_drv.h @@ -53,7 +53,7 @@ #define DRIVER_MINOR 5 #define DRIVER_PATCHLEVEL 0 -#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR ) +#define GET_RING_HEAD(dev_priv) R128_READ(R128_PM4_BUFFER_DL_RPTR) typedef struct drm_r128_freelist { unsigned int age; @@ -144,23 +144,23 @@ extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern void r128_freelist_reset(struct drm_device * dev); +extern void r128_freelist_reset(struct drm_device *dev); -extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n); +extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n); -extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); -extern int r128_do_cleanup_cce(struct drm_device * dev); +extern int r128_do_cce_idle(drm_r128_private_t *dev_priv); +extern int r128_do_cleanup_cce(struct drm_device *dev); extern int r128_enable_vblank(struct drm_device *dev, int crtc); extern void r128_disable_vblank(struct drm_device *dev, int crtc); extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); -extern void r128_driver_irq_preinstall(struct drm_device * dev); +extern void r128_driver_irq_preinstall(struct drm_device *dev); extern int r128_driver_irq_postinstall(struct drm_device *dev); -extern void r128_driver_irq_uninstall(struct drm_device * dev); -extern void r128_driver_lastclose(struct drm_device * dev); -extern int r128_driver_load(struct drm_device * dev, unsigned long flags); -extern void r128_driver_preclose(struct drm_device * dev, +extern void r128_driver_irq_uninstall(struct drm_device *dev); +extern void r128_driver_lastclose(struct drm_device *dev); +extern int r128_driver_load(struct drm_device *dev, unsigned long flags); +extern void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv); extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, @@ -390,27 +390,27 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd, #define R128_PCIGART_TABLE_SIZE 32768 -#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) -#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) -#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) ) -#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) ) +#define R128_READ(reg) DRM_READ32(dev_priv->mmio, (reg)) +#define R128_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val)) +#define R128_READ8(reg) DRM_READ8(dev_priv->mmio, (reg)) +#define R128_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val)) -#define R128_WRITE_PLL(addr,val) \ +#define R128_WRITE_PLL(addr, val) \ do { \ R128_WRITE8(R128_CLOCK_CNTL_INDEX, \ ((addr) & 0x1f) | R128_PLL_WR_EN); \ R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \ } while (0) -#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \ +#define CCE_PACKET0(reg, n) (R128_CCE_PACKET0 | \ ((n) << 16) | ((reg) >> 2)) -#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \ +#define CCE_PACKET1(reg0, reg1) (R128_CCE_PACKET1 | \ (((reg1) >> 2) << 11) | ((reg0) >> 2)) #define CCE_PACKET2() (R128_CCE_PACKET2) -#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \ +#define CCE_PACKET3(pkt, n) (R128_CCE_PACKET3 | \ (pkt) | ((n) << 16)) -static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv) +static __inline__ void r128_update_ring_snapshot(drm_r128_private_t *dev_priv) { drm_r128_ring_buffer_t *ring = &dev_priv->ring; ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); @@ -430,37 +430,38 @@ do { \ } \ } while (0) -#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ +#define RING_SPACE_TEST_WITH_RETURN(dev_priv) \ do { \ drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ - if ( ring->space < ring->high_mark ) { \ - for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \ - r128_update_ring_snapshot( dev_priv ); \ - if ( ring->space >= ring->high_mark ) \ + if (ring->space < ring->high_mark) { \ + for (i = 0 ; i < dev_priv->usec_timeout ; i++) { \ + r128_update_ring_snapshot(dev_priv); \ + if (ring->space >= ring->high_mark) \ goto __ring_space_done; \ - DRM_UDELAY(1); \ + DRM_UDELAY(1); \ } \ - DRM_ERROR( "ring space check failed!\n" ); \ - return -EBUSY; \ + DRM_ERROR("ring space check failed!\n"); \ + return -EBUSY; \ } \ __ring_space_done: \ ; \ } while (0) -#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \ +#define VB_AGE_TEST_WITH_RETURN(dev_priv) \ do { \ drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \ - if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \ - int __ret = r128_do_cce_idle( dev_priv ); \ - if ( __ret ) return __ret; \ + if (sarea_priv->last_dispatch >= R128_MAX_VB_AGE) { \ + int __ret = r128_do_cce_idle(dev_priv); \ + if (__ret) \ + return __ret; \ sarea_priv->last_dispatch = 0; \ - r128_freelist_reset( dev ); \ + r128_freelist_reset(dev); \ } \ } while (0) #define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \ - OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \ - OUT_RING( R128_EVENT_CRTC_OFFSET ); \ + OUT_RING(CCE_PACKET0(R128_WAIT_UNTIL, 0)); \ + OUT_RING(R128_EVENT_CRTC_OFFSET); \ } while (0) /* ================================================================ @@ -472,13 +473,12 @@ do { \ #define RING_LOCALS \ int write, _nr; unsigned int tail_mask; volatile u32 *ring; -#define BEGIN_RING( n ) do { \ - if ( R128_VERBOSE ) { \ - DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ - } \ - if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \ +#define BEGIN_RING(n) do { \ + if (R128_VERBOSE) \ + DRM_INFO("BEGIN_RING(%d)\n", (n)); \ + if (dev_priv->ring.space <= (n) * sizeof(u32)) { \ COMMIT_RING(); \ - r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \ + r128_wait_ring(dev_priv, (n) * sizeof(u32)); \ } \ _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \ ring = dev_priv->ring.start; \ @@ -494,40 +494,36 @@ do { \ #define R128_BROKEN_CCE 1 #define ADVANCE_RING() do { \ - if ( R128_VERBOSE ) { \ - DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ - write, dev_priv->ring.tail ); \ - } \ - if ( R128_BROKEN_CCE && write < 32 ) { \ - memcpy( dev_priv->ring.end, \ - dev_priv->ring.start, \ - write * sizeof(u32) ); \ - } \ - if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \ + if (R128_VERBOSE) \ + DRM_INFO("ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \ + write, dev_priv->ring.tail); \ + if (R128_BROKEN_CCE && write < 32) \ + memcpy(dev_priv->ring.end, \ + dev_priv->ring.start, \ + write * sizeof(u32)); \ + if (((dev_priv->ring.tail + _nr) & tail_mask) != write) \ DRM_ERROR( \ "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \ ((dev_priv->ring.tail + _nr) & tail_mask), \ write, __LINE__); \ - } else \ + else \ dev_priv->ring.tail = write; \ } while (0) #define COMMIT_RING() do { \ - if ( R128_VERBOSE ) { \ - DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \ - dev_priv->ring.tail ); \ - } \ + if (R128_VERBOSE) \ + DRM_INFO("COMMIT_RING() tail=0x%06x\n", \ + dev_priv->ring.tail); \ DRM_MEMORYBARRIER(); \ - R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \ - R128_READ( R128_PM4_BUFFER_DL_WPTR ); \ + R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \ + R128_READ(R128_PM4_BUFFER_DL_WPTR); \ } while (0) -#define OUT_RING( x ) do { \ - if ( R128_VERBOSE ) { \ - DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \ - (unsigned int)(x), write ); \ - } \ - ring[write++] = cpu_to_le32( x ); \ +#define OUT_RING(x) do { \ + if (R128_VERBOSE) \ + DRM_INFO(" OUT_RING( 0x%08x ) at 0x%x\n", \ + (unsigned int)(x), write); \ + ring[write++] = cpu_to_le32(x); \ write &= tail_mask; \ } while (0) diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c index 69810fb8ac4..429d5a02695 100644 --- a/drivers/gpu/drm/r128/r128_irq.c +++ b/drivers/gpu/drm/r128/r128_irq.c @@ -90,7 +90,7 @@ void r128_disable_vblank(struct drm_device *dev, int crtc) */ } -void r128_driver_irq_preinstall(struct drm_device * dev) +void r128_driver_irq_preinstall(struct drm_device *dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; @@ -105,7 +105,7 @@ int r128_driver_irq_postinstall(struct drm_device *dev) return 0; } -void r128_driver_irq_uninstall(struct drm_device * dev) +void r128_driver_irq_uninstall(struct drm_device *dev) { drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; if (!dev_priv) diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index af2665cf471..077af1f2f9b 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -37,8 +37,8 @@ * CCE hardware state programming functions */ -static void r128_emit_clip_rects(drm_r128_private_t * dev_priv, - struct drm_clip_rect * boxes, int count) +static void r128_emit_clip_rects(drm_r128_private_t *dev_priv, + struct drm_clip_rect *boxes, int count) { u32 aux_sc_cntl = 0x00000000; RING_LOCALS; @@ -80,7 +80,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv, ADVANCE_RING(); } -static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv) +static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; @@ -95,7 +95,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv) +static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; @@ -121,7 +121,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv) +static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; @@ -137,7 +137,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv) +static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; @@ -156,7 +156,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv) +static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; @@ -171,7 +171,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv) +static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_context_regs_t *ctx = &sarea_priv->context_state; @@ -187,9 +187,8 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv) OUT_RING(tex->tex_cntl); OUT_RING(tex->tex_combine_cntl); OUT_RING(ctx->tex_size_pitch_c); - for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { + for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) OUT_RING(tex->tex_offset[i]); - } OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1)); OUT_RING(ctx->constant_color_c); @@ -198,7 +197,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) +static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1]; @@ -211,9 +210,8 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS)); OUT_RING(tex->tex_cntl); OUT_RING(tex->tex_combine_cntl); - for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) { + for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) OUT_RING(tex->tex_offset[i]); - } OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0)); OUT_RING(tex->tex_border_color); @@ -221,7 +219,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static void r128_emit_state(drm_r128_private_t * dev_priv) +static void r128_emit_state(drm_r128_private_t *dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; @@ -274,7 +272,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv) * Performance monitoring functions */ -static void r128_clear_box(drm_r128_private_t * dev_priv, +static void r128_clear_box(drm_r128_private_t *dev_priv, int x, int y, int w, int h, int r, int g, int b) { u32 pitch, offset; @@ -321,13 +319,12 @@ static void r128_clear_box(drm_r128_private_t * dev_priv, ADVANCE_RING(); } -static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv) +static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv) { - if (atomic_read(&dev_priv->idle_count) == 0) { + if (atomic_read(&dev_priv->idle_count) == 0) r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0); - } else { + else atomic_set(&dev_priv->idle_count, 0); - } } #endif @@ -352,8 +349,8 @@ static void r128_print_dirty(const char *msg, unsigned int flags) (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : ""); } -static void r128_cce_dispatch_clear(struct drm_device * dev, - drm_r128_clear_t * clear) +static void r128_cce_dispatch_clear(struct drm_device *dev, + drm_r128_clear_t *clear) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -458,7 +455,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev, } } -static void r128_cce_dispatch_swap(struct drm_device * dev) +static void r128_cce_dispatch_swap(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -524,7 +521,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev) ADVANCE_RING(); } -static void r128_cce_dispatch_flip(struct drm_device * dev) +static void r128_cce_dispatch_flip(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -542,11 +539,10 @@ static void r128_cce_dispatch_flip(struct drm_device * dev) R128_WAIT_UNTIL_PAGE_FLIPPED(); OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0)); - if (dev_priv->current_page == 0) { + if (dev_priv->current_page == 0) OUT_RING(dev_priv->back_offset); - } else { + else OUT_RING(dev_priv->front_offset); - } ADVANCE_RING(); @@ -566,7 +562,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev) ADVANCE_RING(); } -static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) +static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; @@ -585,9 +581,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b if (buf->used) { buf_priv->dispatched = 1; - if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { + if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) r128_emit_state(dev_priv); - } do { /* Emit the next set of up to three cliprects */ @@ -636,8 +631,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b sarea_priv->nbox = 0; } -static void r128_cce_dispatch_indirect(struct drm_device * dev, - struct drm_buf * buf, int start, int end) +static void r128_cce_dispatch_indirect(struct drm_device *dev, + struct drm_buf *buf, int start, int end) { drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv = buf->dev_private; @@ -691,8 +686,8 @@ static void r128_cce_dispatch_indirect(struct drm_device * dev, dev_priv->sarea_priv->last_dispatch++; } -static void r128_cce_dispatch_indices(struct drm_device * dev, - struct drm_buf * buf, +static void r128_cce_dispatch_indices(struct drm_device *dev, + struct drm_buf *buf, int start, int end, int count) { drm_r128_private_t *dev_priv = dev->dev_private; @@ -713,9 +708,8 @@ static void r128_cce_dispatch_indices(struct drm_device * dev, if (start != end) { buf_priv->dispatched = 1; - if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) { + if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) r128_emit_state(dev_priv); - } dwords = (end - start + 3) / sizeof(u32); @@ -775,9 +769,9 @@ static void r128_cce_dispatch_indices(struct drm_device * dev, sarea_priv->nbox = 0; } -static int r128_cce_dispatch_blit(struct drm_device * dev, +static int r128_cce_dispatch_blit(struct drm_device *dev, struct drm_file *file_priv, - drm_r128_blit_t * blit) + drm_r128_blit_t *blit) { drm_r128_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; @@ -887,8 +881,8 @@ static int r128_cce_dispatch_blit(struct drm_device * dev, * have hardware stencil support. */ -static int r128_cce_dispatch_write_span(struct drm_device * dev, - drm_r128_depth_t * depth) +static int r128_cce_dispatch_write_span(struct drm_device *dev, + drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, x, y; @@ -902,12 +896,10 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, if (count > 4096 || count <= 0) return -EMSGSIZE; - if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { + if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) return -EFAULT; - } - if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { + if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) return -EFAULT; - } buffer_size = depth->n * sizeof(u32); buffer = kmalloc(buffer_size, GFP_KERNEL); @@ -983,8 +975,8 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev, return 0; } -static int r128_cce_dispatch_write_pixels(struct drm_device * dev, - drm_r128_depth_t * depth) +static int r128_cce_dispatch_write_pixels(struct drm_device *dev, + drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, *x, *y; @@ -1001,9 +993,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); x = kmalloc(xbuf_size, GFP_KERNEL); - if (x == NULL) { + if (x == NULL) return -ENOMEM; - } y = kmalloc(ybuf_size, GFP_KERNEL); if (y == NULL) { kfree(x); @@ -1105,8 +1096,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev, return 0; } -static int r128_cce_dispatch_read_span(struct drm_device * dev, - drm_r128_depth_t * depth) +static int r128_cce_dispatch_read_span(struct drm_device *dev, + drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, x, y; @@ -1117,12 +1108,10 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev, if (count > 4096 || count <= 0) return -EMSGSIZE; - if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) { + if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) return -EFAULT; - } - if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) { + if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) return -EFAULT; - } BEGIN_RING(7); @@ -1148,8 +1137,8 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev, return 0; } -static int r128_cce_dispatch_read_pixels(struct drm_device * dev, - drm_r128_depth_t * depth) +static int r128_cce_dispatch_read_pixels(struct drm_device *dev, + drm_r128_depth_t *depth) { drm_r128_private_t *dev_priv = dev->dev_private; int count, *x, *y; @@ -1161,16 +1150,14 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, if (count > 4096 || count <= 0) return -EMSGSIZE; - if (count > dev_priv->depth_pitch) { + if (count > dev_priv->depth_pitch) count = dev_priv->depth_pitch; - } xbuf_size = count * sizeof(*x); ybuf_size = count * sizeof(*y); x = kmalloc(xbuf_size, GFP_KERNEL); - if (x == NULL) { + if (x == NULL) return -ENOMEM; - } y = kmalloc(ybuf_size, GFP_KERNEL); if (y == NULL) { kfree(x); @@ -1220,7 +1207,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev, * Polygon stipple */ -static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) +static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple) { drm_r128_private_t *dev_priv = dev->dev_private; int i; @@ -1230,9 +1217,8 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) BEGIN_RING(33); OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31)); - for (i = 0; i < 32; i++) { + for (i = 0; i < 32; i++) OUT_RING(stipple[i]); - } ADVANCE_RING(); } @@ -1269,7 +1255,7 @@ static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *f return 0; } -static int r128_do_init_pageflip(struct drm_device * dev) +static int r128_do_init_pageflip(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1288,7 +1274,7 @@ static int r128_do_init_pageflip(struct drm_device * dev) return 0; } -static int r128_do_cleanup_pageflip(struct drm_device * dev) +static int r128_do_cleanup_pageflip(struct drm_device *dev) { drm_r128_private_t *dev_priv = dev->dev_private; DRM_DEBUG("\n"); @@ -1645,17 +1631,16 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi return 0; } -void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) +void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_r128_private_t *dev_priv = dev->dev_private; - if (dev_priv->page_flipping) { + if (dev_priv->page_flipping) r128_do_cleanup_pageflip(dev); - } } } -void r128_driver_lastclose(struct drm_device * dev) +void r128_driver_lastclose(struct drm_device *dev) { r128_do_cleanup_cce(dev); } diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 84b1f2729d4..aebe0087504 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -69,5 +69,6 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \ radeon-$(CONFIG_COMPAT) += radeon_ioc32.o radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o +radeon-$(CONFIG_ACPI) += radeon_acpi.o obj-$(CONFIG_DRM_RADEON)+= radeon.o diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 1d569830ed9..8e421f644a5 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -108,12 +108,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base, base++; break; case ATOM_IIO_READ: - temp = ctx->card->reg_read(ctx->card, CU16(base + 1)); + temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)); base += 3; break; case ATOM_IIO_WRITE: - (void)ctx->card->reg_read(ctx->card, CU16(base + 1)); - ctx->card->reg_write(ctx->card, CU16(base + 1), temp); + ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); base += 3; break; case ATOM_IIO_CLEAR: @@ -715,8 +714,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) cjiffies = jiffies; if (time_after(cjiffies, ctx->last_jump_jiffies)) { cjiffies -= ctx->last_jump_jiffies; - if ((jiffies_to_msecs(cjiffies) > 1000)) { - DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n"); + if ((jiffies_to_msecs(cjiffies) > 5000)) { + DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n"); ctx->abort = true; } } else { diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index cd1b64ab5ca..a589a55b223 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h @@ -113,6 +113,8 @@ struct card_info { struct drm_device *dev; void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */ + void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ + uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */ void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */ void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */ diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 8c2d6478a22..12ad512bd3d 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -44,10 +44,6 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, memset(&args, 0, sizeof(args)); - args.usOverscanRight = 0; - args.usOverscanLeft = 0; - args.usOverscanBottom = 0; - args.usOverscanTop = 0; args.ucCRTC = radeon_crtc->crtc_id; switch (radeon_crtc->rmx_type) { @@ -56,7 +52,6 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); break; case RMX_ASPECT: a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; @@ -69,17 +64,16 @@ static void atombios_overscan_setup(struct drm_crtc *crtc, args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; } - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); break; case RMX_FULL: default: - args.usOverscanRight = 0; - args.usOverscanLeft = 0; - args.usOverscanBottom = 0; - args.usOverscanTop = 0; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + args.usOverscanRight = radeon_crtc->h_border; + args.usOverscanLeft = radeon_crtc->h_border; + args.usOverscanBottom = radeon_crtc->v_border; + args.usOverscanTop = radeon_crtc->v_border; break; } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } static void atombios_scaler_setup(struct drm_crtc *crtc) @@ -282,22 +276,22 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc, u16 misc = 0; memset(&args, 0, sizeof(args)); - args.usH_Size = cpu_to_le16(mode->crtc_hdisplay); + args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2)); args.usH_Blanking_Time = - cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay); - args.usV_Size = cpu_to_le16(mode->crtc_vdisplay); + cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2)); + args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2)); args.usV_Blanking_Time = - cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay); + cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2)); args.usH_SyncOffset = - cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay); + cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border); args.usH_SyncWidth = cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start); args.usV_SyncOffset = - cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay); + cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border); args.usV_SyncWidth = cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start); - /*args.ucH_Border = mode->hborder;*/ - /*args.ucV_Border = mode->vborder;*/ + args.ucH_Border = radeon_crtc->h_border; + args.ucV_Border = radeon_crtc->v_border; if (mode->flags & DRM_MODE_FLAG_NVSYNC) misc |= ATOM_VSYNC_POLARITY; @@ -669,56 +663,25 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } -static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) +static void atombios_crtc_program_pll(struct drm_crtc *crtc, + int crtc_id, + int pll_id, + u32 encoder_mode, + u32 encoder_id, + u32 clock, + u32 ref_div, + u32 fb_div, + u32 frac_fb_div, + u32 post_div) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - struct drm_encoder *encoder = NULL; - struct radeon_encoder *radeon_encoder = NULL; u8 frev, crev; - int index; + int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); union set_pixel_clock args; - u32 pll_clock = mode->clock; - u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; - struct radeon_pll *pll; - u32 adjusted_clock; - int encoder_mode = 0; memset(&args, 0, sizeof(args)); - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - radeon_encoder = to_radeon_encoder(encoder); - encoder_mode = atombios_get_encoder_mode(encoder); - break; - } - } - - if (!radeon_encoder) - return; - - switch (radeon_crtc->pll_id) { - case ATOM_PPLL1: - pll = &rdev->clock.p1pll; - break; - case ATOM_PPLL2: - pll = &rdev->clock.p2pll; - break; - case ATOM_DCPLL: - case ATOM_PPLL_INVALID: - default: - pll = &rdev->clock.dcpll; - break; - } - - /* adjust pixel clock as needed */ - adjusted_clock = atombios_adjust_pll(crtc, mode, pll); - - radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, - &ref_div, &post_div); - - index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; @@ -727,47 +690,49 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode case 1: switch (crev) { case 1: - args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); + if (clock == ATOM_DISABLE) + return; + args.v1.usPixelClock = cpu_to_le16(clock / 10); args.v1.usRefDiv = cpu_to_le16(ref_div); args.v1.usFbDiv = cpu_to_le16(fb_div); args.v1.ucFracFbDiv = frac_fb_div; args.v1.ucPostDiv = post_div; - args.v1.ucPpll = radeon_crtc->pll_id; - args.v1.ucCRTC = radeon_crtc->crtc_id; + args.v1.ucPpll = pll_id; + args.v1.ucCRTC = crtc_id; args.v1.ucRefDivSrc = 1; break; case 2: - args.v2.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v2.usPixelClock = cpu_to_le16(clock / 10); args.v2.usRefDiv = cpu_to_le16(ref_div); args.v2.usFbDiv = cpu_to_le16(fb_div); args.v2.ucFracFbDiv = frac_fb_div; args.v2.ucPostDiv = post_div; - args.v2.ucPpll = radeon_crtc->pll_id; - args.v2.ucCRTC = radeon_crtc->crtc_id; + args.v2.ucPpll = pll_id; + args.v2.ucCRTC = crtc_id; args.v2.ucRefDivSrc = 1; break; case 3: - args.v3.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v3.usPixelClock = cpu_to_le16(clock / 10); args.v3.usRefDiv = cpu_to_le16(ref_div); args.v3.usFbDiv = cpu_to_le16(fb_div); args.v3.ucFracFbDiv = frac_fb_div; args.v3.ucPostDiv = post_div; - args.v3.ucPpll = radeon_crtc->pll_id; - args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2); - args.v3.ucTransmitterId = radeon_encoder->encoder_id; + args.v3.ucPpll = pll_id; + args.v3.ucMiscInfo = (pll_id << 2); + args.v3.ucTransmitterId = encoder_id; args.v3.ucEncoderMode = encoder_mode; break; case 5: - args.v5.ucCRTC = radeon_crtc->crtc_id; - args.v5.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v5.ucCRTC = crtc_id; + args.v5.usPixelClock = cpu_to_le16(clock / 10); args.v5.ucRefDiv = ref_div; args.v5.usFbDiv = cpu_to_le16(fb_div); args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); args.v5.ucPostDiv = post_div; args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ - args.v5.ucTransmitterID = radeon_encoder->encoder_id; + args.v5.ucTransmitterID = encoder_id; args.v5.ucEncoderMode = encoder_mode; - args.v5.ucPpll = radeon_crtc->pll_id; + args.v5.ucPpll = pll_id; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); @@ -782,6 +747,56 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_encoder *encoder = NULL; + struct radeon_encoder *radeon_encoder = NULL; + u32 pll_clock = mode->clock; + u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; + struct radeon_pll *pll; + u32 adjusted_clock; + int encoder_mode = 0; + + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + radeon_encoder = to_radeon_encoder(encoder); + encoder_mode = atombios_get_encoder_mode(encoder); + break; + } + } + + if (!radeon_encoder) + return; + + switch (radeon_crtc->pll_id) { + case ATOM_PPLL1: + pll = &rdev->clock.p1pll; + break; + case ATOM_PPLL2: + pll = &rdev->clock.p2pll; + break; + case ATOM_DCPLL: + case ATOM_PPLL_INVALID: + default: + pll = &rdev->clock.dcpll; + break; + } + + /* adjust pixel clock as needed */ + adjusted_clock = atombios_adjust_pll(crtc, mode, pll); + + radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); + + atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, + encoder_mode, radeon_encoder->encoder_id, mode->clock, + ref_div, fb_div, frac_fb_div, post_div); + +} + static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -797,7 +812,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, /* no fb bound */ if (!crtc->fb) { - DRM_DEBUG("No FB bound\n"); + DRM_DEBUG_KMS("No FB bound\n"); return 0; } @@ -841,6 +856,11 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } + if (tiling_flags & RADEON_TILING_MACRO) + fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); + else if (tiling_flags & RADEON_TILING_MICRO) + fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); + switch (radeon_crtc->crtc_id) { case 0: WREG32(AVIVO_D1VGA_CONTROL, 0); @@ -931,7 +951,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, /* no fb bound */ if (!crtc->fb) { - DRM_DEBUG("No FB bound\n"); + DRM_DEBUG_KMS("No FB bound\n"); return 0; } @@ -979,11 +999,18 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, return -EINVAL; } - if (tiling_flags & RADEON_TILING_MACRO) - fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; + if (rdev->family >= CHIP_R600) { + if (tiling_flags & RADEON_TILING_MACRO) + fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1; + else if (tiling_flags & RADEON_TILING_MICRO) + fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1; + } else { + if (tiling_flags & RADEON_TILING_MACRO) + fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; - if (tiling_flags & RADEON_TILING_MICRO) - fb_format |= AVIVO_D1GRPH_TILED; + if (tiling_flags & RADEON_TILING_MICRO) + fb_format |= AVIVO_D1GRPH_TILED; + } if (radeon_crtc->crtc_id == 0) WREG32(AVIVO_D1VGA_CONTROL, 0); @@ -1143,10 +1170,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, atombios_crtc_set_pll(crtc, adjusted_mode); atombios_enable_ss(crtc); - if (ASIC_IS_DCE4(rdev)) + if (ASIC_IS_AVIVO(rdev)) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); - else if (ASIC_IS_AVIVO(rdev)) - atombios_crtc_set_timing(crtc, adjusted_mode); else { atombios_crtc_set_timing(crtc, adjusted_mode); if (radeon_crtc->crtc_id == 0) @@ -1191,6 +1216,24 @@ static void atombios_crtc_commit(struct drm_crtc *crtc) atombios_lock_crtc(crtc, ATOM_DISABLE); } +static void atombios_crtc_disable(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + + switch (radeon_crtc->pll_id) { + case ATOM_PPLL1: + case ATOM_PPLL2: + /* disable the ppll */ + atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, + 0, 0, ATOM_DISABLE, 0, 0, 0, 0); + break; + default: + break; + } + radeon_crtc->pll_id = -1; +} + static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .dpms = atombios_crtc_dpms, .mode_fixup = atombios_crtc_mode_fixup, @@ -1199,6 +1242,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .prepare = atombios_crtc_prepare, .commit = atombios_crtc_commit, .load_lut = radeon_crtc_load_lut, + .disable = atombios_crtc_disable, }; void radeon_atombios_init_crtc(struct drm_device *dev, diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index abffb1499e2..36e0d4b545e 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -296,7 +296,7 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], u8 this_v = dp_get_adjust_request_voltage(link_status, lane); u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane); - DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n", + DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n", lane, voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT], pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); @@ -313,7 +313,7 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], if (p >= dp_pre_emphasis_max(v)) p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; - DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n", + DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n", voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT], pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]); @@ -358,7 +358,7 @@ retry: if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) goto retry; - DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", + DRM_DEBUG_KMS("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); return false; @@ -461,10 +461,10 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) memcpy(dig_connector->dpcd, msg, 8); { int i; - DRM_DEBUG("DPCD: "); + DRM_DEBUG_KMS("DPCD: "); for (i = 0; i < 8; i++) - DRM_DEBUG("%02x ", msg[i]); - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("%02x ", msg[i]); + DRM_DEBUG_KMS("\n"); } return true; } @@ -512,7 +512,7 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector, return false; } - DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n", + DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n", link_status[0], link_status[1], link_status[2], link_status[3], link_status[4], link_status[5]); return true; @@ -695,7 +695,7 @@ void dp_link_train(struct drm_encoder *encoder, if (!clock_recovery) DRM_ERROR("clock recovery failed\n"); else - DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n", + DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n", train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT); @@ -739,7 +739,7 @@ void dp_link_train(struct drm_encoder *encoder, if (!channel_eq) DRM_ERROR("channel eq failed\n"); else - DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n", + DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n", train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK, (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 1caf625e472..957d5067ad9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -39,6 +39,23 @@ static void evergreen_gpu_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); +/* get temperature in millidegrees */ +u32 evergreen_get_temp(struct radeon_device *rdev) +{ + u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> + ASIC_T_SHIFT; + u32 actual_temp = 0; + + if ((temp >> 10) & 1) + actual_temp = 0; + else if ((temp >> 9) & 1) + actual_temp = 255; + else + actual_temp = (temp >> 1) & 0xff; + + return actual_temp * 1000; +} + void evergreen_pm_misc(struct radeon_device *rdev) { int req_ps_idx = rdev->pm.requested_power_state_index; @@ -1115,6 +1132,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.max_backends) & EVERGREEN_MAX_BACKENDS_MASK)); + rdev->config.evergreen.tile_config = gb_addr_config; WREG32(GB_BACKEND_MAP, gb_backend_map); WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); @@ -1334,8 +1352,8 @@ int evergreen_mc_init(struct radeon_device *rdev) } rdev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); + rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* Setup GPU memory space */ /* size in MB on evergreen */ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h index e028c1cd9d9..2330f3a36fd 100644 --- a/drivers/gpu/drm/radeon/evergreen_reg.h +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -61,6 +61,11 @@ # define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 # define EVERGREEN_GRPH_FORMAT_RGB111110 6 # define EVERGREEN_GRPH_FORMAT_BGR101111 7 +# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) +# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 +# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 +# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2 +# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4 #define EVERGREEN_GRPH_SWAP_CONTROL 0x680c # define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) # define EVERGREEN_GRPH_ENDIAN_NONE 0 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a1cd621780e..9b7532dd30f 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -165,6 +165,11 @@ #define SE_DB_BUSY (1 << 30) #define SE_CB_BUSY (1 << 31) +#define CG_MULT_THERMAL_STATUS 0x740 +#define ASIC_T(x) ((x) << 16) +#define ASIC_T_MASK 0x7FF0000 +#define ASIC_T_SHIFT 16 + #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index a89a15ab524..e817a0bb5eb 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -141,7 +141,7 @@ void r100_pm_get_dynpm_state(struct radeon_device *rdev) /* only one clock mode per power state */ rdev->pm.requested_clock_mode_index = 0; - DRM_DEBUG("Requested: e: %d m: %d p: %d\n", + DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", rdev->pm.power_state[rdev->pm.requested_power_state_index]. clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev->pm.power_state[rdev->pm.requested_power_state_index]. @@ -276,7 +276,7 @@ void r100_pm_misc(struct radeon_device *rdev) rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) { radeon_set_pcie_lanes(rdev, ps->pcie_lanes); - DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes); + DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes); } } @@ -849,7 +849,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) const char *fw_name = NULL; int err; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0); err = IS_ERR(pdev); @@ -1803,6 +1803,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p, return r; break; /* triggers drawing using indices to vertex buffer */ + case PACKET3_3D_CLEAR_HIZ: + case PACKET3_3D_CLEAR_ZMASK: + if (p->rdev->hyperz_filp != p->filp) + return -EINVAL; + break; case PACKET3_NOP: break; default: @@ -2295,8 +2300,8 @@ void r100_vram_init_sizes(struct radeon_device *rdev) u64 config_aper_size; /* work out accessible VRAM */ - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); + rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); /* FIXME we don't use the second aperture yet when we could use it */ if (rdev->mc.visible_vram_size > rdev->mc.aper_size) @@ -2364,11 +2369,10 @@ void r100_mc_init(struct radeon_device *rdev) */ void r100_pll_errata_after_index(struct radeon_device *rdev) { - if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) { - return; + if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) { + (void)RREG32(RADEON_CLOCK_CNTL_DATA); + (void)RREG32(RADEON_CRTC_GEN_CNTL); } - (void)RREG32(RADEON_CLOCK_CNTL_DATA); - (void)RREG32(RADEON_CRTC_GEN_CNTL); } static void r100_pll_errata_after_data(struct radeon_device *rdev) @@ -2643,7 +2647,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, flags |= pitch / 8; - DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); + DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); WREG32(RADEON_SURFACE0_INFO + surf_index, flags); WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); @@ -3039,7 +3043,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) } #endif - DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", + DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n", /* (unsigned int)info->SavedReg->grph_buffer_cntl, */ (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); } @@ -3135,7 +3139,7 @@ void r100_bandwidth_update(struct radeon_device *rdev) WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); } - DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", + DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); } } @@ -3809,6 +3813,31 @@ void r100_fini(struct radeon_device *rdev) rdev->bios = NULL; } +/* + * Due to how kexec works, it can leave the hw fully initialised when it + * boots the new kernel. However doing our init sequence with the CP and + * WB stuff setup causes GPU hangs on the RN50 at least. So at startup + * do some quick sanity checks and restore sane values to avoid this + * problem. + */ +void r100_restore_sanity(struct radeon_device *rdev) +{ + u32 tmp; + + tmp = RREG32(RADEON_CP_CSQ_CNTL); + if (tmp) { + WREG32(RADEON_CP_CSQ_CNTL, 0); + } + tmp = RREG32(RADEON_CP_RB_CNTL); + if (tmp) { + WREG32(RADEON_CP_RB_CNTL, 0); + } + tmp = RREG32(RADEON_SCRATCH_UMSK); + if (tmp) { + WREG32(RADEON_SCRATCH_UMSK, 0); + } +} + int r100_init(struct radeon_device *rdev) { int r; @@ -3821,6 +3850,8 @@ int r100_init(struct radeon_device *rdev) radeon_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); + /* sanity check some register to avoid hangs like after kexec */ + r100_restore_sanity(rdev); /* TODO: disable VGA need to use VGA request */ /* BIOS*/ if (!radeon_get_bios(rdev)) { diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index d016b16fa11..b121b6c678d 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h @@ -48,10 +48,12 @@ #define PACKET3_3D_DRAW_IMMD 0x29 #define PACKET3_3D_DRAW_INDX 0x2A #define PACKET3_3D_LOAD_VBPNTR 0x2F +#define PACKET3_3D_CLEAR_ZMASK 0x32 #define PACKET3_INDX_BUFFER 0x33 #define PACKET3_3D_DRAW_VBUF_2 0x34 #define PACKET3_3D_DRAW_IMMD_2 0x35 #define PACKET3_3D_DRAW_INDX_2 0x36 +#define PACKET3_3D_CLEAR_HIZ 0x37 #define PACKET3_BITBLT_MULTI 0x9B #define PACKET0(reg, n) (CP_PACKET0 | \ diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 19a7ef7ee34..c827738ad7d 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1048,14 +1048,47 @@ static int r300_packet0_check(struct radeon_cs_parser *p, /* RB3D_COLOR_CHANNEL_MASK */ track->color_channel_mask = idx_value; break; - case 0x4d1c: + case 0x43a4: + /* SC_HYPERZ_EN */ + /* r300c emits this register - we need to disable hyperz for it + * without complaining */ + if (p->rdev->hyperz_filp != p->filp) { + if (idx_value & 0x1) + ib[idx] = idx_value & ~1; + } + break; + case 0x4f1c: /* ZB_BW_CNTL */ track->zb_cb_clear = !!(idx_value & (1 << 5)); + if (p->rdev->hyperz_filp != p->filp) { + if (idx_value & (R300_HIZ_ENABLE | + R300_RD_COMP_ENABLE | + R300_WR_COMP_ENABLE | + R300_FAST_FILL_ENABLE)) + goto fail; + } break; case 0x4e04: /* RB3D_BLENDCNTL */ track->blend_read_enable = !!(idx_value & (1 << 2)); break; + case 0x4f28: /* ZB_DEPTHCLEARVALUE */ + break; + case 0x4f30: /* ZB_MASK_OFFSET */ + case 0x4f34: /* ZB_ZMASK_PITCH */ + case 0x4f44: /* ZB_HIZ_OFFSET */ + case 0x4f54: /* ZB_HIZ_PITCH */ + if (idx_value && (p->rdev->hyperz_filp != p->filp)) + goto fail; + break; + case 0x4028: + if (idx_value && (p->rdev->hyperz_filp != p->filp)) + goto fail; + /* GB_Z_PEQ_CONFIG */ + if (p->rdev->family >= CHIP_RV350) + break; + goto fail; + break; case 0x4be8: /* valid register only on RV530 */ if (p->rdev->family == CHIP_RV530) @@ -1066,8 +1099,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, } return 0; fail: - printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", - reg, idx); + printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n", + reg, idx, idx_value); return -EINVAL; } @@ -1161,6 +1194,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p, return r; } break; + case PACKET3_3D_CLEAR_HIZ: + case PACKET3_3D_CLEAR_ZMASK: + if (p->rdev->hyperz_filp != p->filp) + return -EINVAL; + break; case PACKET3_NOP: break; default: @@ -1380,6 +1418,8 @@ int r300_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ + /* restore some register to sane defaults */ + r100_restore_sanity(rdev); /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index 968a33317fb..0c036c60d9d 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h @@ -48,10 +48,12 @@ #define PACKET3_3D_DRAW_IMMD 0x29 #define PACKET3_3D_DRAW_INDX 0x2A #define PACKET3_3D_LOAD_VBPNTR 0x2F +#define PACKET3_3D_CLEAR_ZMASK 0x32 #define PACKET3_INDX_BUFFER 0x33 #define PACKET3_3D_DRAW_VBUF_2 0x34 #define PACKET3_3D_DRAW_IMMD_2 0x35 #define PACKET3_3D_DRAW_INDX_2 0x36 +#define PACKET3_3D_CLEAR_HIZ 0x37 #define PACKET3_BITBLT_MULTI 0x9B #define PACKET0(reg, n) (CP_PACKET0 | \ diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index e6c89142bb4..59f7bccc5be 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -343,6 +343,8 @@ int r420_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ + /* restore some register to sane defaults */ + r100_restore_sanity(rdev); /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 93c9a2bbccf..6ac1f604e29 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -386,6 +386,11 @@ # define AVIVO_D1GRPH_TILED (1 << 20) # define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21) +# define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20) +# define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20) +# define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20) +# define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20) + /* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2 * block and vice versa. This applies to GRPH, CUR, etc. */ diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index 694af7cc23a..1458dee902d 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -231,6 +231,8 @@ int r520_init(struct radeon_device *rdev) radeon_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); + /* restore some register to sane defaults */ + r100_restore_sanity(rdev); /* TODO: disable VGA need to use VGA request */ /* BIOS*/ if (!radeon_get_bios(rdev)) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e100f69faee..d0ebae9dde2 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -92,6 +92,21 @@ void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); void r600_irq_disable(struct radeon_device *rdev); +/* get temperature in millidegrees */ +u32 rv6xx_get_temp(struct radeon_device *rdev) +{ + u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >> + ASIC_T_SHIFT; + u32 actual_temp = 0; + + if ((temp >> 7) & 1) + actual_temp = 0; + else + actual_temp = (temp >> 1) & 0xff; + + return actual_temp * 1000; +} + void r600_pm_get_dynpm_state(struct radeon_device *rdev) { int i; @@ -256,7 +271,7 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev) } } - DRM_DEBUG("Requested: e: %d m: %d p: %d\n", + DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n", rdev->pm.power_state[rdev->pm.requested_power_state_index]. clock_info[rdev->pm.requested_clock_mode_index].sclk, rdev->pm.power_state[rdev->pm.requested_power_state_index]. @@ -571,7 +586,7 @@ void r600_pm_misc(struct radeon_device *rdev) if (voltage->voltage != rdev->pm.current_vddc) { radeon_atom_set_voltage(rdev, voltage->voltage); rdev->pm.current_vddc = voltage->voltage; - DRM_DEBUG("Setting: v: %d\n", voltage->voltage); + DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage); } } } @@ -869,7 +884,17 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) u32 tmp; /* flush hdp cache so updates hit vram */ - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { + void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + u32 tmp; + + /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL + */ + WREG32(HDP_DEBUG1, 0); + tmp = readl((void __iomem *)ptr); + } else + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); @@ -1217,8 +1242,8 @@ int r600_mc_init(struct radeon_device *rdev) } rdev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); + rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* Setup GPU memory space */ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); @@ -1609,7 +1634,7 @@ void r600_gpu_init(struct radeon_device *rdev) r600_count_pipe_bits((cc_rb_backend_disable & R6XX_MAX_BACKENDS_MASK) >> 16)), (cc_rb_backend_disable >> 16)); - + rdev->config.r600.tile_config = tiling_config; tiling_config |= BACKEND_MAP(backend_map); WREG32(GB_TILING_CONFIG, tiling_config); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); @@ -3512,5 +3537,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev) */ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) { - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL + */ + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { + void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + u32 tmp; + + WREG32(HDP_DEBUG1, 0); + tmp = readl((void __iomem *)ptr); + } else + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); } diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 2b26553c352..b5443fe1c1d 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -63,7 +63,8 @@ int r600_audio_bits_per_sample(struct radeon_device *rdev) case 0x4: return 32; } - DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value); + dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n", + (int)value); return 16; } @@ -150,7 +151,8 @@ static void r600_audio_update_hdmi(unsigned long param) r600_hdmi_update_audio_settings(encoder); } - if(still_going) r600_audio_schedule_polling(rdev); + if (still_going) + r600_audio_schedule_polling(rdev); } /* @@ -158,8 +160,9 @@ static void r600_audio_update_hdmi(unsigned long param) */ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) { - DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling"); + DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling"); WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000); + rdev->audio_enabled = enable; } /* @@ -195,12 +198,14 @@ void r600_audio_enable_polling(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active); + DRM_DEBUG("r600_audio_enable_polling: %d\n", + radeon_encoder->audio_polling_active); if (radeon_encoder->audio_polling_active) return; radeon_encoder->audio_polling_active = 1; - mod_timer(&rdev->audio_timer, jiffies + 1); + if (rdev->audio_enabled) + mod_timer(&rdev->audio_timer, jiffies + 1); } /* @@ -209,7 +214,8 @@ void r600_audio_enable_polling(struct drm_encoder *encoder) void r600_audio_disable_polling(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active); + DRM_DEBUG("r600_audio_disable_polling: %d\n", + radeon_encoder->audio_polling_active); radeon_encoder->audio_polling_active = 0; } @@ -236,7 +242,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301); break; default: - DRM_ERROR("Unsupported encoder type 0x%02X\n", + dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n", radeon_encoder->encoder_id); return; } @@ -266,7 +272,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) */ void r600_audio_fini(struct radeon_device *rdev) { - if (!radeon_audio || !r600_audio_chipset_supported(rdev)) + if (!rdev->audio_enabled) return; del_timer(&rdev->audio_timer); diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index 0271b53fa2d..e8151c1d55b 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c @@ -39,37 +39,45 @@ const u32 r6xx_default_state[] = { - 0xc0002400, + 0xc0002400, /* START_3D_CMDBUF */ 0x00000000, - 0xc0012800, + + 0xc0012800, /* CONTEXT_CONTROL */ 0x80000000, 0x80000000, + 0xc0016800, 0x00000010, - 0x00008000, + 0x00008000, /* WAIT_UNTIL */ + 0xc0016800, 0x00000542, - 0x07000003, + 0x07000003, /* TA_CNTL_AUX */ + 0xc0016800, 0x000005c5, - 0x00000000, + 0x00000000, /* VC_ENHANCE */ + 0xc0016800, 0x00000363, - 0x00000000, + 0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */ + 0xc0016800, 0x0000060c, - 0x82000000, + 0x82000000, /* DB_DEBUG */ + 0xc0016800, 0x0000060e, - 0x01020204, - 0xc0016f00, - 0x00000000, - 0x00000000, - 0xc0016f00, - 0x00000001, + 0x01020204, /* DB_WATERMARKS */ + + 0xc0026f00, 0x00000000, + 0x00000000, /* SQ_VTX_BASE_VTX_LOC */ + 0x00000000, /* SQ_VTX_START_INST_LOC */ + 0xc0096900, 0x0000022a, + 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, @@ -78,515 +86,317 @@ const u32 r6xx_default_state[] = 0x00000000, 0x00000000, 0x00000000, - 0x00000000, + 0xc0016900, 0x00000004, - 0x00000000, - 0xc0016900, + 0x00000000, /* DB_DEPTH_INFO */ + + 0xc0026900, 0x0000000a, - 0x00000000, - 0xc0016900, - 0x0000000b, - 0x00000000, - 0xc0016900, - 0x0000010c, - 0x00000000, - 0xc0016900, - 0x0000010d, - 0x00000000, + 0x00000000, /* DB_STENCIL_CLEAR */ + 0x00000000, /* DB_DEPTH_CLEAR */ + 0xc0016900, 0x00000200, - 0x00000000, - 0xc0016900, + 0x00000000, /* DB_DEPTH_CONTROL */ + + 0xc0026900, 0x00000343, - 0x00000060, - 0xc0016900, - 0x00000344, - 0x00000040, + 0x00000060, /* DB_RENDER_CONTROL */ + 0x00000040, /* DB_RENDER_OVERRIDE */ + 0xc0016900, 0x00000351, - 0x0000aa00, - 0xc0016900, - 0x00000104, - 0x00000000, - 0xc0016900, - 0x0000010e, - 0x00000000, - 0xc0046900, - 0x00000105, - 0x00000000, - 0x00000000, + 0x0000aa00, /* DB_ALPHA_TO_MASK */ + + 0xc00f6900, + 0x00000100, + 0x00000800, /* VGT_MAX_VTX_INDX */ + 0x00000000, /* VGT_MIN_VTX_INDX */ + 0x00000000, /* VGT_INDX_OFFSET */ + 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ + 0x00000000, /* SX_ALPHA_TEST_CONTROL */ + 0x00000000, /* CB_BLEND_RED */ 0x00000000, 0x00000000, - 0xc0036900, - 0x00000109, 0x00000000, + 0x00000000, /* CB_FOG_RED */ 0x00000000, 0x00000000, + 0x00000000, /* DB_STENCILREFMASK */ + 0x00000000, /* DB_STENCILREFMASK_BF */ + 0x00000000, /* SX_ALPHA_REF */ + 0xc0046900, 0x0000030c, - 0x01000000, + 0x01000000, /* CB_CLRCMP_CNTL */ 0x00000000, 0x00000000, 0x00000000, + 0xc0046900, 0x00000048, - 0x3f800000, + 0x3f800000, /* CB_CLEAR_RED */ 0x00000000, 0x3f800000, 0x3f800000, - 0xc0016900, - 0x0000008e, - 0x0000000f, + 0xc0016900, 0x00000080, - 0x00000000, - 0xc0016900, + 0x00000000, /* PA_SC_WINDOW_OFFSET */ + + 0xc00a6900, 0x00000083, - 0x0000ffff, - 0xc0016900, - 0x00000084, - 0x00000000, - 0xc0016900, - 0x00000085, + 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */ + 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, - 0xc0016900, - 0x00000086, 0x00000000, - 0xc0016900, - 0x00000087, 0x20002000, - 0xc0016900, - 0x00000088, 0x00000000, - 0xc0016900, - 0x00000089, 0x20002000, - 0xc0016900, - 0x0000008a, 0x00000000, - 0xc0016900, - 0x0000008b, 0x20002000, - 0xc0016900, - 0x0000008c, - 0x00000000, - 0xc0016900, + 0x00000000, /* PA_SC_EDGERULE */ + + 0xc0406900, 0x00000094, - 0x80000000, - 0xc0016900, - 0x00000095, + 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ + 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ + 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */ 0x20002000, - 0xc0026900, - 0x000000b4, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x00000096, 0x80000000, - 0xc0016900, - 0x00000097, 0x20002000, - 0xc0026900, - 0x000000b6, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x00000098, 0x80000000, - 0xc0016900, - 0x00000099, 0x20002000, - 0xc0026900, - 0x000000b8, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x0000009a, 0x80000000, - 0xc0016900, - 0x0000009b, 0x20002000, - 0xc0026900, - 0x000000ba, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x0000009c, 0x80000000, - 0xc0016900, - 0x0000009d, 0x20002000, - 0xc0026900, - 0x000000bc, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x0000009e, 0x80000000, - 0xc0016900, - 0x0000009f, 0x20002000, - 0xc0026900, - 0x000000be, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a0, 0x80000000, - 0xc0016900, - 0x000000a1, 0x20002000, - 0xc0026900, - 0x000000c0, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a2, 0x80000000, - 0xc0016900, - 0x000000a3, 0x20002000, - 0xc0026900, - 0x000000c2, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a4, 0x80000000, - 0xc0016900, - 0x000000a5, 0x20002000, - 0xc0026900, - 0x000000c4, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a6, 0x80000000, - 0xc0016900, - 0x000000a7, 0x20002000, - 0xc0026900, - 0x000000c6, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a8, 0x80000000, - 0xc0016900, - 0x000000a9, 0x20002000, - 0xc0026900, - 0x000000c8, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000aa, 0x80000000, - 0xc0016900, - 0x000000ab, 0x20002000, - 0xc0026900, - 0x000000ca, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000ac, 0x80000000, - 0xc0016900, - 0x000000ad, 0x20002000, - 0xc0026900, - 0x000000cc, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000ae, 0x80000000, - 0xc0016900, - 0x000000af, 0x20002000, - 0xc0026900, - 0x000000ce, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000b0, 0x80000000, - 0xc0016900, - 0x000000b1, 0x20002000, - 0xc0026900, - 0x000000d0, - 0x00000000, + 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, - 0xc0016900, - 0x000000b2, - 0x80000000, - 0xc0016900, - 0x000000b3, - 0x20002000, - 0xc0026900, - 0x000000d2, 0x00000000, 0x3f800000, - 0xc0016900, - 0x00000293, - 0x00004010, - 0xc0016900, - 0x00000300, 0x00000000, - 0xc0016900, - 0x00000301, - 0x00000000, - 0xc0016900, - 0x00000312, - 0xffffffff, - 0xc0016900, - 0x00000307, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000308, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000283, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000292, + 0x3f800000, 0x00000000, - 0xc0066900, - 0x0000010f, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000206, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000207, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000208, + 0x3f800000, 0x00000000, - 0xc0046900, - 0x00000303, 0x3f800000, + + 0xc0026900, + 0x00000292, + 0x00000000, /* PA_SC_MPASS_PS_CNTL */ + 0x00004010, /* PA_SC_MODE_CNTL */ + + 0xc0096900, + 0x00000300, + 0x00000000, /* PA_SC_LINE_CNTL */ + 0x00000000, /* PA_SC_AA_CONFIG */ + 0x0000002d, /* PA_SU_VTX_CNTL */ + 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, 0x3f800000, 0x3f800000, - 0xc0016900, - 0x00000205, - 0x00000004, - 0xc0016900, - 0x00000280, - 0x00000000, - 0xc0016900, - 0x00000281, + 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */ 0x00000000, + 0xc0016900, + 0x00000312, + 0xffffffff, /* PA_SC_AA_MASK */ + + 0xc0066900, 0x0000037e, - 0x00000000, - 0xc0016900, - 0x00000382, - 0x00000000, - 0xc0016900, - 0x00000380, - 0x00000000, - 0xc0016900, - 0x00000383, - 0x00000000, - 0xc0016900, - 0x00000381, - 0x00000000, - 0xc0016900, - 0x00000282, - 0x00000008, - 0xc0016900, - 0x00000302, - 0x0000002d, - 0xc0016900, - 0x0000037f, - 0x00000000, - 0xc0016900, - 0x000001b2, - 0x00000000, - 0xc0016900, + 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ + 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */ + 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */ + 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */ + 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */ + 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */ + + 0xc0046900, 0x000001b6, - 0x00000000, - 0xc0016900, - 0x000001b7, - 0x00000000, - 0xc0016900, - 0x000001b8, - 0x00000000, - 0xc0016900, - 0x000001b9, - 0x00000000, + 0x00000000, /* SPI_INPUT_Z */ + 0x00000000, /* SPI_FOG_CNTL */ + 0x00000000, /* SPI_FOG_FUNC_SCALE */ + 0x00000000, /* SPI_FOG_FUNC_BIAS */ + 0xc0016900, 0x00000225, - 0x00000000, + 0x00000000, /* SQ_PGM_START_FS */ + 0xc0016900, 0x00000229, - 0x00000000, + 0x00000000, /* SQ_PGM_RESOURCES_FS */ + 0xc0016900, 0x00000237, - 0x00000000, - 0xc0016900, - 0x00000100, - 0x00000800, - 0xc0016900, - 0x00000101, - 0x00000000, - 0xc0016900, - 0x00000102, - 0x00000000, - 0xc0016900, + 0x00000000, /* SQ_PGM_CF_OFFSET_FS */ + + 0xc0026900, 0x000002a8, - 0x00000000, - 0xc0016900, - 0x000002a9, - 0x00000000, - 0xc0016900, - 0x00000103, - 0x00000000, - 0xc0016900, - 0x00000284, - 0x00000000, - 0xc0016900, - 0x00000290, - 0x00000000, - 0xc0016900, - 0x00000285, - 0x00000000, - 0xc0016900, - 0x00000286, - 0x00000000, - 0xc0016900, - 0x00000287, - 0x00000000, - 0xc0016900, - 0x00000288, - 0x00000000, - 0xc0016900, - 0x00000289, - 0x00000000, - 0xc0016900, - 0x0000028a, - 0x00000000, - 0xc0016900, - 0x0000028b, - 0x00000000, - 0xc0016900, - 0x0000028c, - 0x00000000, - 0xc0016900, - 0x0000028d, - 0x00000000, - 0xc0016900, - 0x0000028e, - 0x00000000, - 0xc0016900, - 0x0000028f, - 0x00000000, + 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ + 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */ + + 0xc0116900, + 0x00000280, + 0x00000000, /* PA_SU_POINT_SIZE */ + 0x00000000, /* PA_SU_POINT_MINMAX */ + 0x00000008, /* PA_SU_LINE_CNTL */ + 0x00000000, /* PA_SC_LINE_STIPPLE */ + 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ + 0x00000000, /* VGT_HOS_CNTL */ + 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */ + 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */ + 0x00000000, /* VGT_HOS_REUSE_DEPTH */ + 0x00000000, /* VGT_GROUP_PRIM_TYPE */ + 0x00000000, /* VGT_GROUP_FIRST_DECR */ + 0x00000000, /* VGT_GROUP_DECR */ + 0x00000000, /* VGT_GROUP_VECT_0_CNTL */ + 0x00000000, /* VGT_GROUP_VECT_1_CNTL */ + 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */ + 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */ + 0x00000000, /* VGT_GS_MODE */ + 0xc0016900, 0x000002a1, - 0x00000000, + 0x00000000, /* VGT_PRIMITIVEID_EN */ + 0xc0016900, 0x000002a5, - 0x00000000, - 0xc0016900, + 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */ + + 0xc0036900, 0x000002ac, - 0x00000000, - 0xc0016900, - 0x000002ad, - 0x00000000, - 0xc0016900, - 0x000002ae, - 0x00000000, + 0x00000000, /* VGT_STRMOUT_EN */ + 0x00000000, /* VGT_REUSE_OFF */ + 0x00000000, /* VGT_VTX_CNT_EN */ + 0xc0016900, 0x000002c8, - 0x00000000, - 0xc0016900, - 0x00000206, - 0x00000100, - 0xc0016900, - 0x00000204, - 0x00010000, - 0xc0036e00, - 0x00000000, - 0x00000012, - 0x00000000, - 0x00000000, - 0xc0016900, - 0x0000008f, - 0x0000000f, - 0xc0016900, - 0x000001e8, - 0x00000001, - 0xc0016900, + 0x00000000, /* VGT_STRMOUT_BUFFER_EN */ + + 0xc0076900, 0x00000202, - 0x00cc0000, + 0x00cc0000, /* CB_COLOR_CONTROL */ + 0x00000210, /* DB_SHADER_CNTL */ + 0x00010000, /* PA_CL_CLIP_CNTL */ + 0x00000244, /* PA_SU_SC_MODE_CNTL */ + 0x00000100, /* PA_CL_VTE_CNTL */ + 0x00000000, /* PA_CL_VS_OUT_CNTL */ + 0x00000000, /* PA_CL_NANINF_CNTL */ + + 0xc0026900, + 0x0000008e, + 0x0000000f, /* CB_TARGET_MASK */ + 0x0000000f, /* CB_SHADER_MASK */ + 0xc0016900, - 0x00000205, - 0x00000244, + 0x000001e8, + 0x00000001, /* CB_SHADER_CONTROL */ + 0xc0016900, - 0x00000203, - 0x00000210, + 0x00000185, + 0x00000000, /* SPI_VS_OUT_ID_0 */ + 0xc0016900, + 0x00000191, + 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */ + + 0xc0056900, 0x000001b1, + 0x00000000, /* SPI_VS_OUT_CONFIG */ + 0x00000000, /* SPI_THREAD_GROUPING */ + 0x00000001, /* SPI_PS_IN_CONTROL_0 */ + 0x00000000, /* SPI_PS_IN_CONTROL_1 */ + 0x00000000, /* SPI_INTERP_CONTROL_0 */ + + 0xc0036e00, /* SET_SAMPLER */ 0x00000000, - 0xc0016900, - 0x00000185, - 0x00000000, - 0xc0016900, - 0x000001b3, - 0x00000001, - 0xc0016900, - 0x000001b4, + 0x00000012, 0x00000000, - 0xc0016900, - 0x00000191, - 0x00000b00, - 0xc0016900, - 0x000001b5, 0x00000000, }; const u32 r7xx_default_state[] = { - 0xc0012800, + 0xc0012800, /* CONTEXT_CONTROL */ 0x80000000, 0x80000000, + 0xc0016800, 0x00000010, - 0x00008000, + 0x00008000, /* WAIT_UNTIL */ + 0xc0016800, 0x00000542, - 0x07000002, + 0x07000002, /* TA_CNTL_AUX */ + 0xc0016800, 0x000005c5, - 0x00000000, + 0x00000000, /* VC_ENHANCE */ + 0xc0016800, 0x00000363, - 0x00004000, + 0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */ + 0xc0016800, 0x0000060c, - 0x00000000, + 0x00000000, /* DB_DEBUG */ + 0xc0016800, 0x0000060e, - 0x00420204, - 0xc0016f00, - 0x00000000, - 0x00000000, - 0xc0016f00, - 0x00000001, + 0x00420204, /* DB_WATERMARKS */ + + 0xc0026f00, 0x00000000, + 0x00000000, /* SQ_VTX_BASE_VTX_LOC */ + 0x00000000, /* SQ_VTX_START_INST_LOC */ + 0xc0096900, 0x0000022a, + 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */ 0x00000000, 0x00000000, 0x00000000, @@ -595,470 +405,269 @@ const u32 r7xx_default_state[] = 0x00000000, 0x00000000, 0x00000000, - 0x00000000, + 0xc0016900, 0x00000004, - 0x00000000, - 0xc0016900, + 0x00000000, /* DB_DEPTH_INFO */ + + 0xc0026900, 0x0000000a, - 0x00000000, - 0xc0016900, - 0x0000000b, - 0x00000000, - 0xc0016900, - 0x0000010c, - 0x00000000, - 0xc0016900, - 0x0000010d, - 0x00000000, + 0x00000000, /* DB_STENCIL_CLEAR */ + 0x00000000, /* DB_DEPTH_CLEAR */ + 0xc0016900, 0x00000200, - 0x00000000, - 0xc0016900, + 0x00000000, /* DB_DEPTH_CONTROL */ + + 0xc0026900, 0x00000343, - 0x00000060, - 0xc0016900, - 0x00000344, - 0x00000000, + 0x00000060, /* DB_RENDER_CONTROL */ + 0x00000000, /* DB_RENDER_OVERRIDE */ + 0xc0016900, 0x00000351, - 0x0000aa00, - 0xc0016900, - 0x00000104, - 0x00000000, - 0xc0016900, - 0x0000010e, - 0x00000000, - 0xc0046900, - 0x00000105, - 0x00000000, + 0x0000aa00, /* DB_ALPHA_TO_MASK */ + + 0xc0096900, + 0x00000100, + 0x00000800, /* VGT_MAX_VTX_INDX */ + 0x00000000, /* VGT_MIN_VTX_INDX */ + 0x00000000, /* VGT_INDX_OFFSET */ + 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ + 0x00000000, /* SX_ALPHA_TEST_CONTROL */ + 0x00000000, /* CB_BLEND_RED */ 0x00000000, 0x00000000, 0x00000000, + + 0xc0036900, + 0x0000010c, + 0x00000000, /* DB_STENCILREFMASK */ + 0x00000000, /* DB_STENCILREFMASK_BF */ + 0x00000000, /* SX_ALPHA_REF */ + 0xc0046900, - 0x0000030c, + 0x0000030c, /* CB_CLRCMP_CNTL */ 0x01000000, 0x00000000, 0x00000000, 0x00000000, - 0xc0016900, - 0x0000008e, - 0x0000000f, + 0xc0016900, 0x00000080, - 0x00000000, - 0xc0016900, + 0x00000000, /* PA_SC_WINDOW_OFFSET */ + + 0xc00a6900, 0x00000083, - 0x0000ffff, - 0xc0016900, - 0x00000084, - 0x00000000, - 0xc0016900, - 0x00000085, + 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */ + 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, - 0xc0016900, - 0x00000086, 0x00000000, - 0xc0016900, - 0x00000087, 0x20002000, - 0xc0016900, - 0x00000088, 0x00000000, - 0xc0016900, - 0x00000089, 0x20002000, - 0xc0016900, - 0x0000008a, 0x00000000, - 0xc0016900, - 0x0000008b, 0x20002000, - 0xc0016900, - 0x0000008c, - 0xaaaaaaaa, - 0xc0016900, + 0xaaaaaaaa, /* PA_SC_EDGERULE */ + + 0xc0406900, 0x00000094, - 0x80000000, - 0xc0016900, - 0x00000095, + 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ + 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ + 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */ 0x20002000, - 0xc0026900, - 0x000000b4, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x00000096, 0x80000000, - 0xc0016900, - 0x00000097, 0x20002000, - 0xc0026900, - 0x000000b6, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x00000098, 0x80000000, - 0xc0016900, - 0x00000099, 0x20002000, - 0xc0026900, - 0x000000b8, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x0000009a, 0x80000000, - 0xc0016900, - 0x0000009b, 0x20002000, - 0xc0026900, - 0x000000ba, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x0000009c, 0x80000000, - 0xc0016900, - 0x0000009d, 0x20002000, - 0xc0026900, - 0x000000bc, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x0000009e, 0x80000000, - 0xc0016900, - 0x0000009f, 0x20002000, - 0xc0026900, - 0x000000be, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a0, 0x80000000, - 0xc0016900, - 0x000000a1, 0x20002000, - 0xc0026900, - 0x000000c0, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a2, 0x80000000, - 0xc0016900, - 0x000000a3, 0x20002000, - 0xc0026900, - 0x000000c2, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a4, 0x80000000, - 0xc0016900, - 0x000000a5, 0x20002000, - 0xc0026900, - 0x000000c4, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a6, 0x80000000, - 0xc0016900, - 0x000000a7, 0x20002000, - 0xc0026900, - 0x000000c6, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000a8, 0x80000000, - 0xc0016900, - 0x000000a9, 0x20002000, - 0xc0026900, - 0x000000c8, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000aa, 0x80000000, - 0xc0016900, - 0x000000ab, 0x20002000, - 0xc0026900, - 0x000000ca, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000ac, 0x80000000, - 0xc0016900, - 0x000000ad, 0x20002000, - 0xc0026900, - 0x000000cc, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000ae, 0x80000000, - 0xc0016900, - 0x000000af, 0x20002000, - 0xc0026900, - 0x000000ce, - 0x00000000, - 0x3f800000, - 0xc0016900, - 0x000000b0, 0x80000000, - 0xc0016900, - 0x000000b1, 0x20002000, - 0xc0026900, - 0x000000d0, - 0x00000000, + 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, - 0xc0016900, - 0x000000b2, - 0x80000000, - 0xc0016900, - 0x000000b3, - 0x20002000, - 0xc0026900, - 0x000000d2, 0x00000000, 0x3f800000, - 0xc0016900, - 0x00000293, - 0x00514000, - 0xc0016900, - 0x00000300, - 0x00000000, - 0xc0016900, - 0x00000301, 0x00000000, - 0xc0016900, - 0x00000312, - 0xffffffff, - 0xc0016900, - 0x00000307, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000308, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000283, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000292, + 0x3f800000, 0x00000000, - 0xc0066900, - 0x0000010f, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000206, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000207, + 0x3f800000, 0x00000000, - 0xc0016900, - 0x00000208, + 0x3f800000, 0x00000000, - 0xc0046900, - 0x00000303, 0x3f800000, + + 0xc0026900, + 0x00000292, + 0x00000000, /* PA_SC_MPASS_PS_CNTL */ + 0x00514000, /* PA_SC_MODE_CNTL */ + + 0xc0096900, + 0x00000300, + 0x00000000, /* PA_SC_LINE_CNTL */ + 0x00000000, /* PA_SC_AA_CONFIG */ + 0x0000002d, /* PA_SU_VTX_CNTL */ + 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, 0x3f800000, 0x3f800000, - 0xc0016900, - 0x00000205, - 0x00000004, - 0xc0016900, - 0x00000280, - 0x00000000, - 0xc0016900, - 0x00000281, + 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */ 0x00000000, + 0xc0016900, + 0x00000312, + 0xffffffff, /* PA_SC_AA_MASK */ + + 0xc0066900, 0x0000037e, - 0x00000000, - 0xc0016900, - 0x00000382, - 0x00000000, - 0xc0016900, - 0x00000380, - 0x00000000, - 0xc0016900, - 0x00000383, - 0x00000000, - 0xc0016900, - 0x00000381, - 0x00000000, - 0xc0016900, - 0x00000282, - 0x00000008, - 0xc0016900, - 0x00000302, - 0x0000002d, - 0xc0016900, - 0x0000037f, - 0x00000000, - 0xc0016900, - 0x000001b2, - 0x00000001, - 0xc0016900, + 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ + 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */ + 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */ + 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */ + 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */ + 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */ + + 0xc0046900, 0x000001b6, - 0x00000000, - 0xc0016900, - 0x000001b7, - 0x00000000, - 0xc0016900, - 0x000001b8, - 0x00000000, - 0xc0016900, - 0x000001b9, - 0x00000000, + 0x00000000, /* SPI_INPUT_Z */ + 0x00000000, /* SPI_FOG_CNTL */ + 0x00000000, /* SPI_FOG_FUNC_SCALE */ + 0x00000000, /* SPI_FOG_FUNC_BIAS */ + 0xc0016900, 0x00000225, - 0x00000000, + 0x00000000, /* SQ_PGM_START_FS */ + 0xc0016900, 0x00000229, - 0x00000000, + 0x00000000, /* SQ_PGM_RESOURCES_FS */ + 0xc0016900, 0x00000237, - 0x00000000, - 0xc0016900, - 0x00000100, - 0x00000800, - 0xc0016900, - 0x00000101, - 0x00000000, - 0xc0016900, - 0x00000102, - 0x00000000, - 0xc0016900, + 0x00000000, /* SQ_PGM_CF_OFFSET_FS */ + + 0xc0026900, 0x000002a8, - 0x00000000, - 0xc0016900, - 0x000002a9, - 0x00000000, - 0xc0016900, - 0x00000103, - 0x00000000, - 0xc0016900, - 0x00000284, - 0x00000000, - 0xc0016900, - 0x00000290, - 0x00000000, - 0xc0016900, - 0x00000285, - 0x00000000, - 0xc0016900, - 0x00000286, - 0x00000000, - 0xc0016900, - 0x00000287, - 0x00000000, - 0xc0016900, - 0x00000288, - 0x00000000, - 0xc0016900, - 0x00000289, - 0x00000000, - 0xc0016900, - 0x0000028a, - 0x00000000, - 0xc0016900, - 0x0000028b, - 0x00000000, - 0xc0016900, - 0x0000028c, - 0x00000000, - 0xc0016900, - 0x0000028d, - 0x00000000, - 0xc0016900, - 0x0000028e, - 0x00000000, - 0xc0016900, - 0x0000028f, - 0x00000000, + 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ + 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */ + + 0xc0116900, + 0x00000280, + 0x00000000, /* PA_SU_POINT_SIZE */ + 0x00000000, /* PA_SU_POINT_MINMAX */ + 0x00000008, /* PA_SU_LINE_CNTL */ + 0x00000000, /* PA_SC_LINE_STIPPLE */ + 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ + 0x00000000, /* VGT_HOS_CNTL */ + 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */ + 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */ + 0x00000000, /* VGT_HOS_REUSE_DEPTH */ + 0x00000000, /* VGT_GROUP_PRIM_TYPE */ + 0x00000000, /* VGT_GROUP_FIRST_DECR */ + 0x00000000, /* VGT_GROUP_DECR */ + 0x00000000, /* VGT_GROUP_VECT_0_CNTL */ + 0x00000000, /* VGT_GROUP_VECT_1_CNTL */ + 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */ + 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */ + 0x00000000, /* VGT_GS_MODE */ + 0xc0016900, 0x000002a1, - 0x00000000, + 0x00000000, /* VGT_PRIMITIVEID_EN */ + 0xc0016900, 0x000002a5, - 0x00000000, - 0xc0016900, + 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */ + + 0xc0036900, 0x000002ac, - 0x00000000, - 0xc0016900, - 0x000002ad, - 0x00000000, - 0xc0016900, - 0x000002ae, - 0x00000000, + 0x00000000, /* VGT_STRMOUT_EN */ + 0x00000000, /* VGT_REUSE_OFF */ + 0x00000000, /* VGT_VTX_CNT_EN */ + 0xc0016900, 0x000002c8, - 0x00000000, - 0xc0016900, - 0x00000206, - 0x00000100, - 0xc0016900, - 0x00000204, - 0x00010000, - 0xc0036e00, - 0x00000000, - 0x00000012, - 0x00000000, - 0x00000000, - 0xc0016900, - 0x0000008f, - 0x0000000f, - 0xc0016900, - 0x000001e8, - 0x00000001, - 0xc0016900, + 0x00000000, /* VGT_STRMOUT_BUFFER_EN */ + + 0xc0076900, 0x00000202, - 0x00cc0000, + 0x00cc0000, /* CB_COLOR_CONTROL */ + 0x00000210, /* DB_SHADER_CNTL */ + 0x00010000, /* PA_CL_CLIP_CNTL */ + 0x00000244, /* PA_SU_SC_MODE_CNTL */ + 0x00000100, /* PA_CL_VTE_CNTL */ + 0x00000000, /* PA_CL_VS_OUT_CNTL */ + 0x00000000, /* PA_CL_NANINF_CNTL */ + + 0xc0026900, + 0x0000008e, + 0x0000000f, /* CB_TARGET_MASK */ + 0x0000000f, /* CB_SHADER_MASK */ + 0xc0016900, - 0x00000205, - 0x00000244, + 0x000001e8, + 0x00000001, /* CB_SHADER_CONTROL */ + 0xc0016900, - 0x00000203, - 0x00000210, + 0x00000185, + 0x00000000, /* SPI_VS_OUT_ID_0 */ + 0xc0016900, + 0x00000191, + 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */ + + 0xc0056900, 0x000001b1, + 0x00000000, /* SPI_VS_OUT_CONFIG */ + 0x00000001, /* SPI_THREAD_GROUPING */ + 0x00000001, /* SPI_PS_IN_CONTROL_0 */ + 0x00000000, /* SPI_PS_IN_CONTROL_1 */ + 0x00000000, /* SPI_INTERP_CONTROL_0 */ + + 0xc0036e00, /* SET_SAMPLER */ 0x00000000, - 0xc0016900, - 0x00000185, - 0x00000000, - 0xc0016900, - 0x000001b3, - 0x00000001, - 0xc0016900, - 0x000001b4, + 0x00000012, 0x00000000, - 0xc0016900, - 0x00000191, - 0x00000b00, - 0xc0016900, - 0x000001b5, 0x00000000, }; diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 144c32d3713..c3ea212e0c3 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/kernel.h> #include "drmP.h" #include "radeon.h" #include "r600d.h" @@ -166,7 +167,7 @@ static void r600_cs_track_init(struct r600_cs_track *track) static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) { struct r600_cs_track *track = p->track; - u32 bpe = 0, pitch, slice_tile_max, size, tmp, height; + u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align; volatile u32 *ib = p->ib->ptr; if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { @@ -180,56 +181,57 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) i, track->cb_color_info[i]); return -EINVAL; } - pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3; + /* pitch is the number of 8x8 tiles per row */ + pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1; slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; - if (!pitch) { - dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n", - __func__, __LINE__, pitch, i, track->cb_color_size[i]); - return -EINVAL; - } - height = size / (pitch * bpe); + height = size / (pitch * 8 * bpe); if (height > 8192) height = 8192; + if (height > 7) + height &= ~0x7; switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) { case V_0280A0_ARRAY_LINEAR_GENERAL: + /* technically height & 0x7 */ + break; case V_0280A0_ARRAY_LINEAR_ALIGNED: - if (pitch & 0x3f) { - dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n", - __func__, __LINE__, pitch, bpe, pitch * bpe); + pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", + __func__, __LINE__, pitch); return -EINVAL; } - if ((pitch * bpe) & (track->group_size - 1)) { - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", - __func__, __LINE__, pitch); + if (!IS_ALIGNED(height, 8)) { + dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", + __func__, __LINE__, height); return -EINVAL; } break; case V_0280A0_ARRAY_1D_TILED_THIN1: - if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) { + pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8; + if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", - __func__, __LINE__, pitch); + __func__, __LINE__, pitch); + return -EINVAL; + } + if (!IS_ALIGNED(height, 8)) { + dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", + __func__, __LINE__, height); return -EINVAL; } - height &= ~0x7; - if (!height) - height = 8; break; case V_0280A0_ARRAY_2D_TILED_THIN1: - if (pitch & ((8 * track->nbanks) - 1)) { + pitch_align = max((u32)track->nbanks, + (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks)); + if (!IS_ALIGNED(pitch, pitch_align)) { dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", __func__, __LINE__, pitch); return -EINVAL; } - tmp = pitch * 8 * bpe * track->nsamples; - tmp = tmp / track->nbanks; - if (tmp & (track->group_size - 1)) { - dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", - __func__, __LINE__, pitch); + if (!IS_ALIGNED((height / 8), track->nbanks)) { + dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", + __func__, __LINE__, height); return -EINVAL; } - height &= ~((16 * track->npipes) - 1); - if (!height) - height = 16 * track->npipes; break; default: dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, @@ -238,16 +240,20 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) return -EINVAL; } /* check offset */ - tmp = height * pitch; + tmp = height * pitch * 8 * bpe; if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { - dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]); + dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]); + return -EINVAL; + } + if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) { + dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]); return -EINVAL; } /* limit max tile */ - tmp = (height * pitch) >> 6; + tmp = (height * pitch * 8) >> 6; if (tmp < slice_tile_max) slice_tile_max = tmp; - tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) | + tmp = S_028060_PITCH_TILE_MAX(pitch - 1) | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); ib[track->cb_color_size_idx[i]] = tmp; return 0; @@ -289,7 +295,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) /* Check depth buffer */ if (G_028800_STENCIL_ENABLE(track->db_depth_control) || G_028800_Z_ENABLE(track->db_depth_control)) { - u32 nviews, bpe, ntiles; + u32 nviews, bpe, ntiles, pitch, pitch_align, height, size; if (track->db_bo == NULL) { dev_warn(p->dev, "z/stencil with no depth buffer\n"); return -EINVAL; @@ -332,6 +338,51 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) } ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); } else { + size = radeon_bo_size(track->db_bo); + pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1; + height = size / (pitch * 8 * bpe); + height &= ~0x7; + if (!height) + height = 8; + + switch (G_028010_ARRAY_MODE(track->db_depth_info)) { + case V_028010_ARRAY_1D_TILED_THIN1: + pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8); + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + if (!IS_ALIGNED(height, 8)) { + dev_warn(p->dev, "%s:%d db height (%d) invalid\n", + __func__, __LINE__, height); + return -EINVAL; + } + break; + case V_028010_ARRAY_2D_TILED_THIN1: + pitch_align = max((u32)track->nbanks, + (u32)(((track->group_size / 8) / bpe) * track->nbanks)); + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + if ((height / 8) & (track->nbanks - 1)) { + dev_warn(p->dev, "%s:%d db height (%d) invalid\n", + __func__, __LINE__, height); + return -EINVAL; + } + break; + default: + dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, + G_028010_ARRAY_MODE(track->db_depth_info), + track->db_depth_info); + return -EINVAL; + } + if (!IS_ALIGNED(track->db_offset, track->group_size)) { + dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset); + return -EINVAL; + } ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; tmp = ntiles * bpe * 64 * nviews; @@ -724,7 +775,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx track->db_depth_control = radeon_get_ib_value(p, idx); break; case R_028010_DB_DEPTH_INFO: - track->db_depth_info = radeon_get_ib_value(p, idx); + if (r600_cs_packet_next_is_pkt3_nop(p)) { + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_depth_info = radeon_get_ib_value(p, idx); + ib[idx] &= C_028010_ARRAY_MODE; + track->db_depth_info &= C_028010_ARRAY_MODE; + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); + track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1); + } else { + ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); + track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1); + } + } else + track->db_depth_info = radeon_get_ib_value(p, idx); break; case R_028004_DB_DEPTH_VIEW: track->db_depth_view = radeon_get_ib_value(p, idx); @@ -757,8 +826,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx case R_0280B4_CB_COLOR5_INFO: case R_0280B8_CB_COLOR6_INFO: case R_0280BC_CB_COLOR7_INFO: - tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; - track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + if (r600_cs_packet_next_is_pkt3_nop(p)) { + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; + track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { + ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); + track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1); + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { + ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); + track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1); + } + } else { + tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; + track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + } break; case R_028060_CB_COLOR0_SIZE: case R_028064_CB_COLOR1_SIZE: @@ -946,8 +1032,9 @@ static inline unsigned minify(unsigned size, unsigned levels) } static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels, - unsigned w0, unsigned h0, unsigned d0, unsigned bpe, - unsigned *l0_size, unsigned *mipmap_size) + unsigned w0, unsigned h0, unsigned d0, unsigned bpe, + unsigned pitch_align, + unsigned *l0_size, unsigned *mipmap_size) { unsigned offset, i, level, face; unsigned width, height, depth, rowstride, size; @@ -960,13 +1047,13 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels height = minify(h0, i); depth = minify(d0, i); for(face = 0; face < nfaces; face++) { - rowstride = ((width * bpe) + 255) & ~255; + rowstride = ALIGN((width * bpe), pitch_align); size = height * rowstride * depth; offset += size; offset = (offset + 0x1f) & ~0x1f; } } - *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0; + *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0; *mipmap_size = offset; if (!blevel) *mipmap_size -= *l0_size; @@ -985,16 +1072,23 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels * the texture and mipmap bo object are big enough to cover this resource. */ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, - struct radeon_bo *texture, - struct radeon_bo *mipmap) + struct radeon_bo *texture, + struct radeon_bo *mipmap, + u32 tiling_flags) { + struct r600_cs_track *track = p->track; u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; - u32 word0, word1, l0_size, mipmap_size; + u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) return 0; + word0 = radeon_get_ib_value(p, idx + 0); + if (tiling_flags & RADEON_TILING_MACRO) + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); + else if (tiling_flags & RADEON_TILING_MICRO) + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); word1 = radeon_get_ib_value(p, idx + 1); w0 = G_038000_TEX_WIDTH(word0) + 1; h0 = G_038004_TEX_HEIGHT(word1) + 1; @@ -1021,11 +1115,55 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i __func__, __LINE__, G_038004_DATA_FORMAT(word1)); return -EINVAL; } + + pitch = G_038000_PITCH(word0) + 1; + switch (G_038000_TILE_MODE(word0)) { + case V_038000_ARRAY_LINEAR_GENERAL: + pitch_align = 1; + /* XXX check height align */ + break; + case V_038000_ARRAY_LINEAR_ALIGNED: + pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8; + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + /* XXX check height align */ + break; + case V_038000_ARRAY_1D_TILED_THIN1: + pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8; + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + /* XXX check height align */ + break; + case V_038000_ARRAY_2D_TILED_THIN1: + pitch_align = max((u32)track->nbanks, + (u32)(((track->group_size / 8) / bpe) * track->nbanks)); + if (!IS_ALIGNED(pitch, pitch_align)) { + dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + /* XXX check height align */ + break; + default: + dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__, + G_038000_TILE_MODE(word0), word0); + return -EINVAL; + } + /* XXX check offset align */ + word0 = radeon_get_ib_value(p, idx + 4); word1 = radeon_get_ib_value(p, idx + 5); blevel = G_038010_BASE_LEVEL(word0); nlevels = G_038014_LAST_LEVEL(word1); - r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size); + r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, + (pitch_align * bpe), + &l0_size, &mipmap_size); /* using get ib will give us the offset into the texture bo */ word0 = radeon_get_ib_value(p, idx + 2); if ((l0_size + word0) > radeon_bo_size(texture)) { @@ -1239,6 +1377,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); texture = reloc->robj; /* tex mip base */ r = r600_cs_packet_next_reloc(p, &reloc); @@ -1249,7 +1391,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); mipmap = reloc->robj; r = r600_check_texture_resource(p, idx+(i*7)+1, - texture, mipmap); + texture, mipmap, reloc->lobj.tiling_flags); if (r) return r; break; diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index 26b4bc9d89a..e6a58ed48dc 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -435,7 +435,8 @@ static int r600_hdmi_find_free_block(struct drm_device *dev) } } - if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) { + if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || + rdev->family == CHIP_RS740) { return free_blocks[0] ? R600_HDMI_BLOCK1 : 0; } else if (rdev->family >= CHIP_R600) { if (free_blocks[0]) @@ -466,7 +467,8 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder) if (ASIC_IS_DCE32(rdev)) radeon_encoder->hdmi_config_offset = dig->dig_encoder ? R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1; - } else if (rdev->family >= CHIP_R600) { + } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 || + rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev); } } diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 59c1f8793e6..858a1920c0d 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -239,12 +239,18 @@ #define GRBM_SOFT_RESET 0x8020 #define SOFT_RESET_CP (1<<0) +#define CG_THERMAL_STATUS 0x7F4 +#define ASIC_T(x) ((x) << 0) +#define ASIC_T_MASK 0x1FF +#define ASIC_T_SHIFT 0 + #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_TILING_CONFIG 0x2F3C +#define HDP_DEBUG1 0x2F34 #define MC_VM_AGP_TOP 0x2184 #define MC_VM_AGP_BOT 0x2188 @@ -1154,6 +1160,10 @@ #define S_038000_TILE_MODE(x) (((x) & 0xF) << 3) #define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF) #define C_038000_TILE_MODE 0xFFFFFF87 +#define V_038000_ARRAY_LINEAR_GENERAL 0x00000000 +#define V_038000_ARRAY_LINEAR_ALIGNED 0x00000001 +#define V_038000_ARRAY_1D_TILED_THIN1 0x00000002 +#define V_038000_ARRAY_2D_TILED_THIN1 0x00000004 #define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7) #define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1) #define C_038000_TILE_TYPE 0xFFFFFF7F @@ -1357,6 +1367,8 @@ #define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15) #define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF) #define C_028010_ARRAY_MODE 0xFFF87FFF +#define V_028010_ARRAY_1D_TILED_THIN1 0x00000002 +#define V_028010_ARRAY_2D_TILED_THIN1 0x00000004 #define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25) #define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1) #define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f94dc66c18..3cd1c470b77 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -178,6 +178,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev); void radeon_atombios_get_power_modes(struct radeon_device *rdev); void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level); void rs690_pm_info(struct radeon_device *rdev); +extern u32 rv6xx_get_temp(struct radeon_device *rdev); +extern u32 rv770_get_temp(struct radeon_device *rdev); +extern u32 evergreen_get_temp(struct radeon_device *rdev); /* * Fences. @@ -232,7 +235,7 @@ struct radeon_surface_reg { */ struct radeon_mman { struct ttm_bo_global_ref bo_global_ref; - struct ttm_global_reference mem_global_ref; + struct drm_global_reference mem_global_ref; struct ttm_bo_device bdev; bool mem_global_referenced; bool initialized; @@ -671,6 +674,13 @@ struct radeon_pm_profile { int dpms_on_cm_idx; }; +enum radeon_int_thermal_type { + THERMAL_TYPE_NONE, + THERMAL_TYPE_RV6XX, + THERMAL_TYPE_RV770, + THERMAL_TYPE_EVERGREEN, +}; + struct radeon_voltage { enum radeon_voltage_type type; /* gpio voltage */ @@ -766,6 +776,9 @@ struct radeon_pm { enum radeon_pm_profile_type profile; int profile_index; struct radeon_pm_profile profiles[PM_PROFILE_MAX]; + /* internal thermal controller on rv6xx+ */ + enum radeon_int_thermal_type int_thermal_type; + struct device *int_hwmon_dev; }; @@ -902,6 +915,7 @@ struct r600_asic { unsigned tiling_nbanks; unsigned tiling_npipes; unsigned tiling_group_size; + unsigned tile_config; struct r100_gpu_lockup lockup; }; @@ -926,6 +940,7 @@ struct rv770_asic { unsigned tiling_nbanks; unsigned tiling_npipes; unsigned tiling_group_size; + unsigned tile_config; struct r100_gpu_lockup lockup; }; @@ -951,6 +966,7 @@ struct evergreen_asic { unsigned tiling_nbanks; unsigned tiling_npipes; unsigned tiling_group_size; + unsigned tile_config; }; union radeon_asic_config { @@ -1033,6 +1049,9 @@ struct radeon_device { uint32_t pcie_reg_mask; radeon_rreg_t pciep_rreg; radeon_wreg_t pciep_wreg; + /* io port */ + void __iomem *rio_mem; + resource_size_t rio_mem_size; struct radeon_clock clock; struct radeon_mc mc; struct radeon_gart gart; @@ -1069,6 +1088,7 @@ struct radeon_device { struct mutex vram_mutex; /* audio stuff */ + bool audio_enabled; struct timer_list audio_timer; int audio_channels; int audio_rate; @@ -1078,6 +1098,8 @@ struct radeon_device { bool powered_down; struct notifier_block acpi_nb; + /* only one userspace can use Hyperz features at a time */ + struct drm_file *hyperz_filp; }; int radeon_device_init(struct radeon_device *rdev, @@ -1114,6 +1136,26 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32 } } +static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) +{ + if (reg < rdev->rio_mem_size) + return ioread32(rdev->rio_mem + reg); + else { + iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); + return ioread32(rdev->rio_mem + RADEON_MM_DATA); + } +} + +static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) +{ + if (reg < rdev->rio_mem_size) + iowrite32(v, rdev->rio_mem + reg); + else { + iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); + iowrite32(v, rdev->rio_mem + RADEON_MM_DATA); + } +} + /* * Cast helper */ @@ -1152,6 +1194,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32 WREG32_PLL(reg, tmp_); \ } while (0) #define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg))) +#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) +#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) /* * Indirect registers accessor @@ -1415,6 +1459,13 @@ extern void r700_cp_fini(struct radeon_device *rdev); extern void evergreen_disable_interrupt_state(struct radeon_device *rdev); extern int evergreen_irq_set(struct radeon_device *rdev); +/* radeon_acpi.c */ +#if defined(CONFIG_ACPI) +extern int radeon_acpi_init(struct radeon_device *rdev); +#else +static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } +#endif + /* evergreen */ struct evergreen_mc_save { u32 vga_control[6]; diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c new file mode 100644 index 00000000000..3f6636bb2d7 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -0,0 +1,67 @@ +#include <linux/pci.h> +#include <linux/acpi.h> +#include <linux/slab.h> +#include <acpi/acpi_drivers.h> +#include <acpi/acpi_bus.h> + +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "drm_crtc_helper.h" +#include "radeon.h" + +#include <linux/vga_switcheroo.h> + +/* Call the ATIF method + * + * Note: currently we discard the output + */ +static int radeon_atif_call(acpi_handle handle) +{ + acpi_status status; + union acpi_object atif_arg_elements[2]; + struct acpi_object_list atif_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + atif_arg.count = 2; + atif_arg.pointer = &atif_arg_elements[0]; + + atif_arg_elements[0].type = ACPI_TYPE_INTEGER; + atif_arg_elements[0].integer.value = 0; + atif_arg_elements[1].type = ACPI_TYPE_INTEGER; + atif_arg_elements[1].integer.value = 0; + + status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer); + + /* Fail only if calling the method fails and ATIF is supported */ + if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { + printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); + kfree(buffer.pointer); + return 1; + } + + kfree(buffer.pointer); + return 0; +} + +/* Call all ACPI methods here */ +int radeon_acpi_init(struct radeon_device *rdev) +{ + acpi_handle handle; + int ret; + + /* No need to proceed if we're sure that ATIF is not supported */ + if (!ASIC_IS_AVIVO(rdev) || !rdev->bios) + return 0; + + /* Get the device handle */ + handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); + + /* Call the ATIF method */ + ret = radeon_atif_call(handle); + if (ret) + return ret; + + return 0; +} + diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index c0bbaa64157..a5aff755f0d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -113,6 +113,7 @@ void r100_wb_fini(struct radeon_device *rdev); int r100_wb_init(struct radeon_device *rdev); int r100_cp_reset(struct radeon_device *rdev); void r100_vga_render_disable(struct radeon_device *rdev); +void r100_restore_sanity(struct radeon_device *rdev); int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, struct radeon_bo *robj); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 10673ae59cf..3bc2bcdf530 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -723,7 +723,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct } if (i == ATOM_DEVICE_CV_INDEX) { - DRM_DEBUG("Skipping Component Video\n"); + DRM_DEBUG_KMS("Skipping Component Video\n"); continue; } @@ -1032,21 +1032,18 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) u8 frev, crev; u16 data_offset; + /* sideport is AMD only */ + if (rdev->family == CHIP_RS600) + return false; + if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { igp_info = (union igp_info *)(mode_info->atom_context->bios + data_offset); switch (crev) { case 1: - /* AMD IGPS */ - if ((rdev->family == CHIP_RS690) || - (rdev->family == CHIP_RS740)) { - if (igp_info->info.ulBootUpMemoryClock) - return true; - } else { - if (igp_info->info.ucMemoryType & 0xf0) - return true; - } + if (igp_info->info.ulBootUpMemoryClock) + return true; break; case 2: if (igp_info->info_2.ucMemoryType & 0x0f) @@ -1095,7 +1092,7 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, (tmds_info->asMiscInfo[i]. ucPLL_VoltageSwing & 0xf) << 16; - DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n", + DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); @@ -1789,14 +1786,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) } /* add the i2c bus for thermal/fan chip */ - /* no support for internal controller yet */ if (controller->ucType > 0) { - if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) || - (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) || - (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) { + if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == @@ -2179,11 +2184,11 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) { if (connected) { - DRM_DEBUG("TV1 connected\n"); + DRM_DEBUG_KMS("TV1 connected\n"); bios_3_scratch |= ATOM_S3_TV1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_TV1; } else { - DRM_DEBUG("TV1 disconnected\n"); + DRM_DEBUG_KMS("TV1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_TV1_MASK; bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1; @@ -2192,11 +2197,11 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) { if (connected) { - DRM_DEBUG("CV connected\n"); + DRM_DEBUG_KMS("CV connected\n"); bios_3_scratch |= ATOM_S3_CV_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CV; } else { - DRM_DEBUG("CV disconnected\n"); + DRM_DEBUG_KMS("CV disconnected\n"); bios_0_scratch &= ~ATOM_S0_CV_MASK; bios_3_scratch &= ~ATOM_S3_CV_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV; @@ -2205,12 +2210,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) { if (connected) { - DRM_DEBUG("LCD1 connected\n"); + DRM_DEBUG_KMS("LCD1 connected\n"); bios_0_scratch |= ATOM_S0_LCD1; bios_3_scratch |= ATOM_S3_LCD1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1; } else { - DRM_DEBUG("LCD1 disconnected\n"); + DRM_DEBUG_KMS("LCD1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_LCD1; bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1; @@ -2219,12 +2224,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) { if (connected) { - DRM_DEBUG("CRT1 connected\n"); + DRM_DEBUG_KMS("CRT1 connected\n"); bios_0_scratch |= ATOM_S0_CRT1_COLOR; bios_3_scratch |= ATOM_S3_CRT1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1; } else { - DRM_DEBUG("CRT1 disconnected\n"); + DRM_DEBUG_KMS("CRT1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT1_MASK; bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1; @@ -2233,12 +2238,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) { if (connected) { - DRM_DEBUG("CRT2 connected\n"); + DRM_DEBUG_KMS("CRT2 connected\n"); bios_0_scratch |= ATOM_S0_CRT2_COLOR; bios_3_scratch |= ATOM_S3_CRT2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2; } else { - DRM_DEBUG("CRT2 disconnected\n"); + DRM_DEBUG_KMS("CRT2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT2_MASK; bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2; @@ -2247,12 +2252,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) { if (connected) { - DRM_DEBUG("DFP1 connected\n"); + DRM_DEBUG_KMS("DFP1 connected\n"); bios_0_scratch |= ATOM_S0_DFP1; bios_3_scratch |= ATOM_S3_DFP1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1; } else { - DRM_DEBUG("DFP1 disconnected\n"); + DRM_DEBUG_KMS("DFP1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP1; bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1; @@ -2261,12 +2266,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) { if (connected) { - DRM_DEBUG("DFP2 connected\n"); + DRM_DEBUG_KMS("DFP2 connected\n"); bios_0_scratch |= ATOM_S0_DFP2; bios_3_scratch |= ATOM_S3_DFP2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2; } else { - DRM_DEBUG("DFP2 disconnected\n"); + DRM_DEBUG_KMS("DFP2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP2; bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2; @@ -2275,12 +2280,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) { if (connected) { - DRM_DEBUG("DFP3 connected\n"); + DRM_DEBUG_KMS("DFP3 connected\n"); bios_0_scratch |= ATOM_S0_DFP3; bios_3_scratch |= ATOM_S3_DFP3_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3; } else { - DRM_DEBUG("DFP3 disconnected\n"); + DRM_DEBUG_KMS("DFP3 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP3; bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3; @@ -2289,12 +2294,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) { if (connected) { - DRM_DEBUG("DFP4 connected\n"); + DRM_DEBUG_KMS("DFP4 connected\n"); bios_0_scratch |= ATOM_S0_DFP4; bios_3_scratch |= ATOM_S3_DFP4_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4; } else { - DRM_DEBUG("DFP4 disconnected\n"); + DRM_DEBUG_KMS("DFP4 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP4; bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4; @@ -2303,12 +2308,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) { if (connected) { - DRM_DEBUG("DFP5 connected\n"); + DRM_DEBUG_KMS("DFP5 connected\n"); bios_0_scratch |= ATOM_S0_DFP5; bios_3_scratch |= ATOM_S3_DFP5_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5; } else { - DRM_DEBUG("DFP5 disconnected\n"); + DRM_DEBUG_KMS("DFP5 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP5; bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 2c921373999..654787ec43f 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -53,7 +53,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) return false; rdev->bios = NULL; - vram_base = drm_get_resource_start(rdev->ddev, 0); + vram_base = pci_resource_start(rdev->pdev, 0); bios = ioremap(vram_base, size); if (!bios) { return false; diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 2417d7b06fd..5e1474cde4b 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -693,6 +693,10 @@ bool radeon_combios_sideport_present(struct radeon_device *rdev) struct drm_device *dev = rdev->ddev; u16 igp_info; + /* sideport is AMD only */ + if (rdev->family == CHIP_RS400) + return false; + igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE); if (igp_info) { @@ -1205,7 +1209,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, RBIOS32(tmds_info + i * 10 + 0x08); tmds->tmds_pll[i].freq = RBIOS16(tmds_info + i * 10 + 0x10); - DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n", + DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); } @@ -1223,7 +1227,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, stride += 10; else stride += 6; - DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n", + DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); } @@ -2208,7 +2212,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) uint16_t tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); if (tmds_info) { - DRM_DEBUG("Found DFP table, assuming DVI connector\n"); + DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n"); radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, @@ -2234,7 +2238,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) } else { uint16_t crt_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); - DRM_DEBUG("Found CRT table, assuming VGA connector\n"); + DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n"); if (crt_info) { radeon_add_legacy_encoder(dev, radeon_get_encoder_id(dev, @@ -2251,7 +2255,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) CONNECTOR_OBJECT_ID_VGA, &hpd); } else { - DRM_DEBUG("No connector info found\n"); + DRM_DEBUG_KMS("No connector info found\n"); return false; } } @@ -2340,7 +2344,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) ddc_i2c.valid = false; break; } - DRM_DEBUG("LCD DDC Info Table found!\n"); + DRM_DEBUG_KMS("LCD DDC Info Table found!\n"); } else ddc_i2c.valid = false; @@ -2941,9 +2945,8 @@ static void combios_write_ram_size(struct drm_device *dev) if (rev < 3) { mem_cntl = RBIOS32(offset + 1); mem_size = RBIOS16(offset + 5); - if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) && - ((dev->pdev->device != 0x515e) - && (dev->pdev->device != 0x5969))) + if ((rdev->family < CHIP_R200) && + !ASIC_IS_RN50(rdev)) WREG32(RADEON_MEM_CNTL, mem_cntl); } } @@ -2954,10 +2957,8 @@ static void combios_write_ram_size(struct drm_device *dev) if (offset) { rev = RBIOS8(offset - 1); if (rev < 1) { - if (((rdev->flags & RADEON_FAMILY_MASK) < - CHIP_R200) - && ((dev->pdev->device != 0x515e) - && (dev->pdev->device != 0x5969))) { + if ((rdev->family < CHIP_R200) + && !ASIC_IS_RN50(rdev)) { int ram = 0; int mem_addr_mapping = 0; @@ -3121,14 +3122,14 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) { if (connected) { - DRM_DEBUG("TV1 connected\n"); + DRM_DEBUG_KMS("TV1 connected\n"); /* fix me */ bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO; /*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */ bios_5_scratch |= RADEON_TV1_ON; bios_5_scratch |= RADEON_ACC_REQ_TV1; } else { - DRM_DEBUG("TV1 disconnected\n"); + DRM_DEBUG_KMS("TV1 disconnected\n"); bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK; bios_5_scratch &= ~RADEON_TV1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_TV1; @@ -3137,12 +3138,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) { if (connected) { - DRM_DEBUG("LCD1 connected\n"); + DRM_DEBUG_KMS("LCD1 connected\n"); bios_4_scratch |= RADEON_LCD1_ATTACHED; bios_5_scratch |= RADEON_LCD1_ON; bios_5_scratch |= RADEON_ACC_REQ_LCD1; } else { - DRM_DEBUG("LCD1 disconnected\n"); + DRM_DEBUG_KMS("LCD1 disconnected\n"); bios_4_scratch &= ~RADEON_LCD1_ATTACHED; bios_5_scratch &= ~RADEON_LCD1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_LCD1; @@ -3151,12 +3152,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) { if (connected) { - DRM_DEBUG("CRT1 connected\n"); + DRM_DEBUG_KMS("CRT1 connected\n"); bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR; bios_5_scratch |= RADEON_CRT1_ON; bios_5_scratch |= RADEON_ACC_REQ_CRT1; } else { - DRM_DEBUG("CRT1 disconnected\n"); + DRM_DEBUG_KMS("CRT1 disconnected\n"); bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK; bios_5_scratch &= ~RADEON_CRT1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_CRT1; @@ -3165,12 +3166,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) { if (connected) { - DRM_DEBUG("CRT2 connected\n"); + DRM_DEBUG_KMS("CRT2 connected\n"); bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR; bios_5_scratch |= RADEON_CRT2_ON; bios_5_scratch |= RADEON_ACC_REQ_CRT2; } else { - DRM_DEBUG("CRT2 disconnected\n"); + DRM_DEBUG_KMS("CRT2 disconnected\n"); bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK; bios_5_scratch &= ~RADEON_CRT2_ON; bios_5_scratch &= ~RADEON_ACC_REQ_CRT2; @@ -3179,12 +3180,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) { if (connected) { - DRM_DEBUG("DFP1 connected\n"); + DRM_DEBUG_KMS("DFP1 connected\n"); bios_4_scratch |= RADEON_DFP1_ATTACHED; bios_5_scratch |= RADEON_DFP1_ON; bios_5_scratch |= RADEON_ACC_REQ_DFP1; } else { - DRM_DEBUG("DFP1 disconnected\n"); + DRM_DEBUG_KMS("DFP1 disconnected\n"); bios_4_scratch &= ~RADEON_DFP1_ATTACHED; bios_5_scratch &= ~RADEON_DFP1_ON; bios_5_scratch &= ~RADEON_ACC_REQ_DFP1; @@ -3193,12 +3194,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector, if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) { if (connected) { - DRM_DEBUG("DFP2 connected\n"); + DRM_DEBUG_KMS("DFP2 connected\n"); bios_4_scratch |= RADEON_DFP2_ATTACHED; bios_5_scratch |= RADEON_DFP2_ON; bios_5_scratch |= RADEON_ACC_REQ_DFP2; } else { - DRM_DEBUG("DFP2 disconnected\n"); + DRM_DEBUG_KMS("DFP2 disconnected\n"); bios_4_scratch &= ~RADEON_DFP2_ATTACHED; bios_5_scratch &= ~RADEON_DFP2_ON; bios_5_scratch &= ~RADEON_ACC_REQ_DFP2; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index adccbc2c202..2395c8600cf 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -214,7 +214,7 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; drm_mode_set_name(mode); - DRM_DEBUG("Adding native panel mode %s\n", mode->name); + DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name); } else if (native_mode->hdisplay != 0 && native_mode->vdisplay != 0) { /* mac laptops without an edid */ @@ -226,7 +226,7 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode */ mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; - DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name); + DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name); } return mode; } @@ -312,6 +312,20 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr } } + if (property == rdev->mode_info.underscan_property) { + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + if (radeon_encoder->underscan_type != val) { + radeon_encoder->underscan_type = val; + radeon_property_change_mode(&radeon_encoder->base); + } + } + if (property == rdev->mode_info.tv_std_property) { encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); if (!encoder) { @@ -522,7 +536,7 @@ static int radeon_lvds_set_property(struct drm_connector *connector, struct radeon_encoder *radeon_encoder; enum radeon_rmx_type rmx_type; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (property != dev->mode_config.scaling_mode_property) return 0; @@ -1082,6 +1096,8 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; break; case DRM_MODE_CONNECTOR_DVIA: @@ -1096,6 +1112,8 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1116,6 +1134,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); + if (ASIC_IS_AVIVO(rdev)) + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_property, + UNDERSCAN_AUTO); if (connector_type == DRM_MODE_CONNECTOR_DVII) { radeon_connector->dac_load_detect = true; drm_connector_attach_property(&radeon_connector->base, @@ -1141,6 +1163,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); + if (ASIC_IS_AVIVO(rdev)) + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_property, + UNDERSCAN_AUTO); subpixel_order = SubPixelHorizontalRGB; break; case DRM_MODE_CONNECTOR_DisplayPort: @@ -1172,6 +1198,10 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.coherent_mode_property, 1); + if (ASIC_IS_AVIVO(rdev)) + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.underscan_property, + UNDERSCAN_AUTO); break; case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Composite: @@ -1186,6 +1216,8 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_atombios_get_tv_info(rdev)); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; } break; case DRM_MODE_CONNECTOR_LVDS: @@ -1209,7 +1241,7 @@ radeon_add_atom_connector(struct drm_device *dev, break; } - if (hpd->hpd == RADEON_HPD_NONE) { + if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { if (i2c_bus->valid) connector->polled = DRM_CONNECTOR_POLL_CONNECT; } else @@ -1276,6 +1308,8 @@ radeon_add_legacy_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; connector->polled = DRM_CONNECTOR_POLL_CONNECT; break; case DRM_MODE_CONNECTOR_DVIA: @@ -1290,6 +1324,8 @@ radeon_add_legacy_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.load_detect_property, 1); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -1328,6 +1364,8 @@ radeon_add_legacy_connector(struct drm_device *dev, drm_connector_attach_property(&radeon_connector->base, rdev->mode_info.tv_std_property, radeon_combios_get_tv_info(rdev)); + /* no HPD on analog connectors */ + radeon_connector->hpd.hpd = RADEON_HPD_NONE; } break; case DRM_MODE_CONNECTOR_LVDS: @@ -1345,7 +1383,7 @@ radeon_add_legacy_connector(struct drm_device *dev, break; } - if (hpd->hpd == RADEON_HPD_NONE) { + if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { if (i2c_bus->valid) connector->polled = DRM_CONNECTOR_POLL_CONNECT; } else diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 2f042a3c0e6..eb6b9eed734 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -2120,8 +2120,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags) else dev_priv->flags |= RADEON_IS_PCI; - ret = drm_addmap(dev, drm_get_resource_start(dev, 2), - drm_get_resource_len(dev, 2), _DRM_REGISTERS, + ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2), + pci_resource_len(dev->pdev, 2), _DRM_REGISTERS, _DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio); if (ret != 0) return ret; @@ -2194,9 +2194,9 @@ int radeon_driver_firstopen(struct drm_device *dev) dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE; - dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); + dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0); ret = drm_addmap(dev, dev_priv->fb_aper_offset, - drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER, + pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, &map); if (ret != 0) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index dd279da9054..a64811a9451 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -415,6 +415,22 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg) return r; } +static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val) +{ + struct radeon_device *rdev = info->dev->dev_private; + + WREG32_IO(reg*4, val); +} + +static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) +{ + struct radeon_device *rdev = info->dev->dev_private; + uint32_t r; + + r = RREG32_IO(reg*4); + return r; +} + int radeon_atombios_init(struct radeon_device *rdev) { struct card_info *atom_card_info = @@ -427,6 +443,15 @@ int radeon_atombios_init(struct radeon_device *rdev) atom_card_info->dev = rdev->ddev; atom_card_info->reg_read = cail_reg_read; atom_card_info->reg_write = cail_reg_write; + /* needed for iio ops */ + if (rdev->rio_mem) { + atom_card_info->ioreg_read = cail_ioreg_read; + atom_card_info->ioreg_write = cail_ioreg_write; + } else { + DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n"); + atom_card_info->ioreg_read = cail_reg_read; + atom_card_info->ioreg_write = cail_reg_write; + } atom_card_info->mc_read = cail_mc_read; atom_card_info->mc_write = cail_mc_write; atom_card_info->pll_read = cail_pll_read; @@ -573,7 +598,7 @@ int radeon_device_init(struct radeon_device *rdev, struct pci_dev *pdev, uint32_t flags) { - int r; + int r, i; int dma_bits; rdev->shutdown = false; @@ -650,8 +675,8 @@ int radeon_device_init(struct radeon_device *rdev, /* Registers mapping */ /* TODO: block userspace mapping of io register */ - rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2); - rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2); + rdev->rmmio_base = pci_resource_start(rdev->pdev, 2); + rdev->rmmio_size = pci_resource_len(rdev->pdev, 2); rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size); if (rdev->rmmio == NULL) { return -ENOMEM; @@ -659,6 +684,17 @@ int radeon_device_init(struct radeon_device *rdev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); + /* io port mapping */ + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) { + rdev->rio_mem_size = pci_resource_len(rdev->pdev, i); + rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size); + break; + } + } + if (rdev->rio_mem == NULL) + DRM_ERROR("Unable to find PCI I/O BAR\n"); + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ /* this will fail for cards that aren't VGA class devices, just * ignore it */ @@ -701,6 +737,9 @@ void radeon_device_fini(struct radeon_device *rdev) destroy_workqueue(rdev->wq); vga_switcheroo_unregister_client(rdev->pdev); vga_client_register(rdev->pdev, NULL, NULL, NULL); + if (rdev->rio_mem) + pci_iounmap(rdev->pdev, rdev->rio_mem); + rdev->rio_mem = NULL; iounmap(rdev->rmmio); rdev->rmmio = NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8154cdf796e..74dac9635d7 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -42,7 +42,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) struct radeon_device *rdev = dev->dev_private; int i; - DRM_DEBUG("%d\n", radeon_crtc->crtc_id); + DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0); WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); @@ -75,7 +75,7 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc) struct radeon_device *rdev = dev->dev_private; int i; - DRM_DEBUG("%d\n", radeon_crtc->crtc_id); + DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); @@ -469,7 +469,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, uint32_t post_div; u32 pll_out_min, pll_out_max; - DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); + DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div); freq = freq * 1000; if (pll->flags & RADEON_PLL_IS_LCD) { @@ -558,15 +558,17 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll, current_freq = radeon_div(tmp, ref_div * post_div); if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { - error = freq - current_freq; - error = error < 0 ? 0xffffffff : error; + if (freq < current_freq) + error = 0xffffffff; + else + error = freq - current_freq; } else error = abs(current_freq - freq); vco_diff = abs(vco - best_vco); if ((best_vco == 0 && error < best_error) || (best_vco != 0 && - (error < best_error - 100 || + ((best_error > 100 && error < best_error - 100) || (abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) { best_post_div = post_div; best_ref_div = ref_div; @@ -803,7 +805,7 @@ done: *ref_div_p = ref_div; *post_div_p = post_div; - DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); + DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); } void radeon_compute_pll(struct radeon_pll *pll, @@ -919,6 +921,12 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] = { TV_STD_SECAM, "secam" }, }; +static struct drm_prop_enum_list radeon_underscan_enum_list[] = +{ { UNDERSCAN_OFF, "off" }, + { UNDERSCAN_ON, "on" }, + { UNDERSCAN_AUTO, "auto" }, +}; + static int radeon_modeset_create_props(struct radeon_device *rdev) { int i, sz; @@ -972,6 +980,18 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) radeon_tv_std_enum_list[i].name); } + sz = ARRAY_SIZE(radeon_underscan_enum_list); + rdev->mode_info.underscan_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_ENUM, + "underscan", sz); + for (i = 0; i < sz; i++) { + drm_property_add_enum(rdev->mode_info.underscan_property, + i, + radeon_underscan_enum_list[i].type, + radeon_underscan_enum_list[i].name); + } + return 0; } @@ -1067,15 +1087,26 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_encoder *radeon_encoder; + struct drm_connector *connector; + struct radeon_connector *radeon_connector; bool first = true; + u32 src_v = 1, dst_v = 1; + u32 src_h = 1, dst_h = 1; + + radeon_crtc->h_border = 0; + radeon_crtc->v_border = 0; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - radeon_encoder = to_radeon_encoder(encoder); if (encoder->crtc != crtc) continue; + radeon_encoder = to_radeon_encoder(encoder); + connector = radeon_get_connector_for_encoder(encoder); + radeon_connector = to_radeon_connector(connector); + if (first) { /* set scaling */ if (radeon_encoder->rmx_type == RMX_OFF) @@ -1085,31 +1116,49 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, radeon_crtc->rmx_type = radeon_encoder->rmx_type; else radeon_crtc->rmx_type = RMX_OFF; + src_v = crtc->mode.vdisplay; + dst_v = radeon_crtc->native_mode.vdisplay; + src_h = crtc->mode.hdisplay; + dst_h = radeon_crtc->native_mode.vdisplay; /* copy native mode */ memcpy(&radeon_crtc->native_mode, &radeon_encoder->native_mode, sizeof(struct drm_display_mode)); + + /* fix up for overscan on hdmi */ + if (ASIC_IS_AVIVO(rdev) && + ((radeon_encoder->underscan_type == UNDERSCAN_ON) || + ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) && + drm_detect_hdmi_monitor(radeon_connector->edid)))) { + radeon_crtc->h_border = (mode->hdisplay >> 5) + 16; + radeon_crtc->v_border = (mode->vdisplay >> 5) + 16; + radeon_crtc->rmx_type = RMX_FULL; + src_v = crtc->mode.vdisplay; + dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2); + src_h = crtc->mode.hdisplay; + dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2); + } first = false; } else { if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { /* WARNING: Right now this can't happen but * in the future we need to check that scaling - * are consistent accross different encoder + * are consistent across different encoder * (ie all encoder can work with the same * scaling). */ - DRM_ERROR("Scaling not consistent accross encoder.\n"); + DRM_ERROR("Scaling not consistent across encoder.\n"); return false; } } } if (radeon_crtc->rmx_type != RMX_OFF) { fixed20_12 a, b; - a.full = dfixed_const(crtc->mode.vdisplay); - b.full = dfixed_const(radeon_crtc->native_mode.hdisplay); + a.full = dfixed_const(src_v); + b.full = dfixed_const(dst_v); radeon_crtc->vsc.full = dfixed_div(a, b); - a.full = dfixed_const(crtc->mode.hdisplay); - b.full = dfixed_const(radeon_crtc->native_mode.vdisplay); + a.full = dfixed_const(src_h); + b.full = dfixed_const(dst_h); radeon_crtc->hsc.full = dfixed_div(a, b); } else { radeon_crtc->vsc.full = dfixed_const(1); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e166fe4d7c3..795403b0e2c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -46,9 +46,10 @@ * - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs * - 2.4.0 - add crtc id query * - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen + * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500) */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 5 +#define KMS_DRIVER_MINOR 6 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -238,7 +239,7 @@ static struct drm_driver kms_driver; static int __devinit radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - return drm_get_dev(pdev, ent, &kms_driver); + return drm_get_pci_dev(pdev, ent, &kms_driver); } static void diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index e0b30b264c2..263c8098d7d 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -205,14 +205,14 @@ void radeon_encoder_set_active_device(struct drm_encoder *encoder) if (connector->encoder == encoder) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices; - DRM_DEBUG("setting active device to %08x from %08x %08x for encoder %d\n", + DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n", radeon_encoder->active_device, radeon_encoder->devices, radeon_connector->devices, encoder->encoder_type); } } } -static struct drm_connector * +struct drm_connector * radeon_get_connector_for_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; @@ -1021,7 +1021,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) memset(&args, 0, sizeof(args)); - DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", + DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", radeon_encoder->encoder_id, mode, radeon_encoder->devices, radeon_encoder->active_device); switch (radeon_encoder->encoder_id) { @@ -1484,7 +1484,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec uint32_t bios_0_scratch; if (!atombios_dac_load_detect(encoder, connector)) { - DRM_DEBUG("detect returned false \n"); + DRM_DEBUG_KMS("detect returned false \n"); return connector_status_unknown; } @@ -1493,7 +1493,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec else bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); - DRM_DEBUG("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); + DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (bios_0_scratch & ATOM_S0_CRT1_MASK) return connector_status_connected; @@ -1694,6 +1694,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su radeon_encoder->encoder_id = encoder_id; radeon_encoder->devices = supported_device; radeon_encoder->rmx_type = RMX_OFF; + radeon_encoder->underscan_type = UNDERSCAN_OFF; switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_LVDS: @@ -1707,6 +1708,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su } else { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); + if (ASIC_IS_AVIVO(rdev)) + radeon_encoder->underscan_type = UNDERSCAN_AUTO; } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; @@ -1736,6 +1739,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su } else { drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); + if (ASIC_IS_AVIVO(rdev)) + radeon_encoder->underscan_type = UNDERSCAN_AUTO; } drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); break; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index ab389f89fa8..ddcd3b13f15 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -49,7 +49,7 @@ int radeon_driver_unload_kms(struct drm_device *dev) int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { struct radeon_device *rdev; - int r; + int r, acpi_status; rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL); if (rdev == NULL) { @@ -77,6 +77,12 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); goto out; } + + /* Call ACPI methods */ + acpi_status = radeon_acpi_init(rdev); + if (acpi_status) + dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); + /* Again modeset_init should fail only on fatal error * otherwise it should provide enough functionalities * for shadowfb to run @@ -135,15 +141,36 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) } } if (!found) { - DRM_DEBUG("unknown crtc id %d\n", value); + DRM_DEBUG_KMS("unknown crtc id %d\n", value); return -EINVAL; } break; case RADEON_INFO_ACCEL_WORKING2: value = rdev->accel_working; break; + case RADEON_INFO_TILING_CONFIG: + if (rdev->family >= CHIP_CEDAR) + value = rdev->config.evergreen.tile_config; + else if (rdev->family >= CHIP_RV770) + value = rdev->config.rv770.tile_config; + else if (rdev->family >= CHIP_R600) + value = rdev->config.r600.tile_config; + else { + DRM_DEBUG_KMS("tiling config is r6xx+ only!\n"); + return -EINVAL; + } + case RADEON_INFO_WANT_HYPERZ: + mutex_lock(&dev->struct_mutex); + if (rdev->hyperz_filp) + value = 0; + else { + rdev->hyperz_filp = filp; + value = 1; + } + mutex_unlock(&dev->struct_mutex); + break; default: - DRM_DEBUG("Invalid request %d\n", info->request); + DRM_DEBUG_KMS("Invalid request %d\n", info->request); return -EINVAL; } if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { @@ -181,9 +208,11 @@ void radeon_driver_postclose_kms(struct drm_device *dev, void radeon_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { + struct radeon_device *rdev = dev->dev_private; + if (rdev->hyperz_filp == file_priv) + rdev->hyperz_filp = NULL; } - /* * VBlank related functions. */ diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index e1e5255396a..989df519a1e 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -362,10 +362,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, uint32_t gen_cntl_reg, gen_cntl_val; int r; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); /* no fb bound */ if (!crtc->fb) { - DRM_DEBUG("No FB bound\n"); + DRM_DEBUG_KMS("No FB bound\n"); return 0; } @@ -528,7 +528,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod uint32_t crtc_v_sync_strt_wid; bool is_tv = false; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -757,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) } } - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (!use_bios_divs) { radeon_compute_pll(pll, mode->clock, @@ -772,7 +772,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) if (!post_div->divider) post_div = &post_divs[0]; - DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n", + DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n", (unsigned)freq, feedback_div, reference_div, @@ -841,12 +841,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | RADEON_P2PLL_SLEEP | RADEON_P2PLL_ATOMIC_UPDATE_EN)); - DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n", + DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n", (unsigned)pll_ref_div, (unsigned)pll_fb_post_div, (unsigned)htotal_cntl, RREG32_PLL(RADEON_P2PLL_CNTL)); - DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n", + DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n", (unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK, (unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK, (unsigned)((pll_fb_post_div & @@ -947,12 +947,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) | RADEON_PPLL_ATOMIC_UPDATE_EN | RADEON_PPLL_VGA_ATOMIC_UPDATE_EN)); - DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n", + DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n", pll_ref_div, pll_fb_post_div, (unsigned)htotal_cntl, RREG32_PLL(RADEON_PPLL_CNTL)); - DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n", + DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n", pll_ref_div & RADEON_PPLL_REF_DIV_MASK, pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK, (pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 5688a0cf6bb..b8149cbc0c7 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -47,7 +47,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man; int panel_pwr_delay = 2000; bool is_mac = false; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (radeon_encoder->enc_priv) { if (rdev->is_atom_bios) { @@ -151,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; @@ -167,7 +167,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder, } else { struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; if (lvds) { - DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); + DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); lvds_gen_cntl = lvds->lvds_gen_cntl; lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) | (0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); @@ -250,7 +250,7 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL); uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL); - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: @@ -315,7 +315,7 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder, struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (radeon_crtc->crtc_id == 0) { if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { @@ -446,7 +446,7 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL); - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: @@ -502,7 +502,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder, uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl; int i; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); tmp &= 0xfffff; @@ -610,7 +610,7 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); switch (mode) { case DRM_MODE_DPMS_ON: @@ -666,7 +666,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t fp2_gen_cntl; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); if (rdev->is_atom_bios) { radeon_encoder->pixel_clock = adjusted_mode->clock; @@ -760,7 +760,7 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0; uint32_t tv_master_cntl = 0; bool is_tv; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; @@ -878,7 +878,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0; bool is_tv = false; - DRM_DEBUG("\n"); + DRM_DEBUG_KMS("\n"); is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; @@ -1075,10 +1075,10 @@ static bool r300_legacy_tv_detect(struct drm_encoder *encoder, tmp = RREG32(RADEON_TV_DAC_CNTL); if ((tmp & RADEON_TV_DAC_GDACDET) != 0) { found = true; - DRM_DEBUG("S-video TV connection detected\n"); + DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; - DRM_DEBUG("Composite TV connection detected\n"); + DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); @@ -1141,10 +1141,10 @@ static bool radeon_legacy_tv_detect(struct drm_encoder *encoder, tmp = RREG32(RADEON_TV_DAC_CNTL); if (tmp & RADEON_TV_DAC_GDACDET) { found = true; - DRM_DEBUG("S-video TV connection detected\n"); + DRM_DEBUG_KMS("S-video TV connection detected\n"); } else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { found = true; - DRM_DEBUG("Composite TV connection detected\n"); + DRM_DEBUG_KMS("Composite TV connection detected\n"); } WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c index 03204039774..c7b6cb428d0 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c @@ -496,7 +496,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder) restart -= v_offset + h_offset; - DRM_DEBUG("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n", + DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n", const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart); tv_dac->tv.hrestart = restart % h_total; @@ -505,7 +505,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder) restart /= v_total; tv_dac->tv.frestart = restart % f_total; - DRM_DEBUG("compute_restart: F/H/V=%u,%u,%u\n", + DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n", (unsigned)tv_dac->tv.frestart, (unsigned)tv_dac->tv.vrestart, (unsigned)tv_dac->tv.hrestart); @@ -523,7 +523,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder) tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) | ((u32)h_inc << RADEON_H_INC_SHIFT); - DRM_DEBUG("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc); + DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc); return h_changed; } diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 95696aa57ac..71aea4037e9 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -66,6 +66,12 @@ enum radeon_tv_std { TV_STD_PAL_N, }; +enum radeon_underscan_type { + UNDERSCAN_OFF, + UNDERSCAN_ON, + UNDERSCAN_AUTO, +}; + enum radeon_hpd_id { RADEON_HPD_1 = 0, RADEON_HPD_2, @@ -226,10 +232,12 @@ struct radeon_mode_info { struct drm_property *coherent_mode_property; /* DAC enable load detect */ struct drm_property *load_detect_property; - /* TV standard load detect */ + /* TV standard */ struct drm_property *tv_std_property; /* legacy TMDS PLL detect */ struct drm_property *tmds_pll_property; + /* underscan */ + struct drm_property *underscan_property; /* hardcoded DFP edid from BIOS */ struct edid *bios_hardcoded_edid; @@ -266,6 +274,8 @@ struct radeon_crtc { uint32_t legacy_display_base_addr; uint32_t legacy_cursor_offset; enum radeon_rmx_type rmx_type; + u8 h_border; + u8 v_border; fixed20_12 vsc; fixed20_12 hsc; struct drm_display_mode native_mode; @@ -354,6 +364,7 @@ struct radeon_encoder { uint32_t flags; uint32_t pixel_clock; enum radeon_rmx_type rmx_type; + enum radeon_underscan_type underscan_type; struct drm_display_mode native_mode; void *enc_priv; int audio_polling_active; @@ -392,7 +403,7 @@ struct radeon_connector { uint32_t connector_id; uint32_t devices; struct radeon_i2c_chan *ddc_bus; - /* some systems have a an hdmi and vga port with a shared ddc line */ + /* some systems have an hdmi and vga port with a shared ddc line */ bool shared_ddc; bool use_digital; /* we need to mind the EDID between detect @@ -414,6 +425,9 @@ radeon_combios_get_tv_info(struct radeon_device *rdev); extern enum radeon_tv_std radeon_atombios_get_tv_info(struct radeon_device *rdev); +extern struct drm_connector * +radeon_get_connector_for_encoder(struct drm_encoder *encoder); + extern void radeon_connector_hotplug(struct drm_connector *connector); extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index d5b9373ce06..0afd1e62347 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -110,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); +retry: radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ mutex_lock(&rdev->vram_mutex); @@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj, &radeon_ttm_bo_destroy); mutex_unlock(&rdev->vram_mutex); if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) + if (r != -ERESTARTSYS) { + if (domain == RADEON_GEM_DOMAIN_VRAM) { + domain |= RADEON_GEM_DOMAIN_GTT; + goto retry; + } dev_err(rdev->dev, "object_init failed for (%lu, 0x%08X)\n", size, domain); + } return r; } *bo_ptr = bo; @@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head) { struct radeon_bo_list *lobj; struct radeon_bo *bo; + u32 domain; int r; list_for_each_entry(lobj, head, list) { @@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head) list_for_each_entry(lobj, head, list) { bo = lobj->bo; if (!bo->pin_count) { - if (lobj->wdomain) { - radeon_ttm_placement_from_domain(bo, - lobj->wdomain); - } else { - radeon_ttm_placement_from_domain(bo, - lobj->rdomain); - } + domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; + + retry: + radeon_ttm_placement_from_domain(bo, domain); r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false, false); - if (unlikely(r)) + if (unlikely(r)) { + if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { + domain |= RADEON_GEM_DOMAIN_GTT; + goto retry; + } return r; + } } lobj->gpu_offset = radeon_bo_gpu_offset(bo); lobj->tiling_flags = bo->tiling_flags; diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 3fa6984d989..95f8b3a3c43 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -27,6 +27,8 @@ #include <linux/acpi.h> #endif #include <linux/power_supply.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> #define RADEON_IDLE_LOOP_MS 100 #define RADEON_RECLOCK_DELAY_MS 200 @@ -60,9 +62,9 @@ static int radeon_acpi_event(struct notifier_block *nb, if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) { if (power_supply_is_system_supplied() > 0) - DRM_DEBUG("pm: AC\n"); + DRM_DEBUG_DRIVER("pm: AC\n"); else - DRM_DEBUG("pm: DC\n"); + DRM_DEBUG_DRIVER("pm: DC\n"); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (rdev->pm.profile == PM_PROFILE_AUTO) { @@ -196,7 +198,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) radeon_set_engine_clock(rdev, sclk); radeon_pm_debug_check_in_vbl(rdev, true); rdev->pm.current_sclk = sclk; - DRM_DEBUG("Setting: e: %d\n", sclk); + DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk); } /* set memory clock */ @@ -205,7 +207,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) radeon_set_memory_clock(rdev, mclk); radeon_pm_debug_check_in_vbl(rdev, true); rdev->pm.current_mclk = mclk; - DRM_DEBUG("Setting: m: %d\n", mclk); + DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk); } if (misc_after) @@ -217,7 +219,7 @@ static void radeon_set_power_state(struct radeon_device *rdev) rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index; rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index; } else - DRM_DEBUG("pm: GUI not idle!!!\n"); + DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n"); } static void radeon_pm_set_clocks(struct radeon_device *rdev) @@ -292,27 +294,27 @@ static void radeon_pm_print_states(struct radeon_device *rdev) struct radeon_power_state *power_state; struct radeon_pm_clock_info *clock_info; - DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states); + DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states); for (i = 0; i < rdev->pm.num_power_states; i++) { power_state = &rdev->pm.power_state[i]; - DRM_DEBUG("State %d: %s\n", i, + DRM_DEBUG_DRIVER("State %d: %s\n", i, radeon_pm_state_type_name[power_state->type]); if (i == rdev->pm.default_power_state_index) - DRM_DEBUG("\tDefault"); + DRM_DEBUG_DRIVER("\tDefault"); if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) - DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes); + DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes); if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY) - DRM_DEBUG("\tSingle display only\n"); - DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes); + DRM_DEBUG_DRIVER("\tSingle display only\n"); + DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes); for (j = 0; j < power_state->num_clock_modes; j++) { clock_info = &(power_state->clock_info[j]); if (rdev->flags & RADEON_IS_IGP) - DRM_DEBUG("\t\t%d e: %d%s\n", + DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n", j, clock_info->sclk * 10, clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : ""); else - DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n", + DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n", j, clock_info->sclk * 10, clock_info->mclk * 10, @@ -424,6 +426,82 @@ fail: static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile); static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method); +static ssize_t radeon_hwmon_show_temp(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + struct radeon_device *rdev = ddev->dev_private; + u32 temp; + + switch (rdev->pm.int_thermal_type) { + case THERMAL_TYPE_RV6XX: + temp = rv6xx_get_temp(rdev); + break; + case THERMAL_TYPE_RV770: + temp = rv770_get_temp(rdev); + break; + case THERMAL_TYPE_EVERGREEN: + temp = evergreen_get_temp(rdev); + break; + default: + temp = 0; + break; + } + + return snprintf(buf, PAGE_SIZE, "%d\n", temp); +} + +static ssize_t radeon_hwmon_show_name(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "radeon\n"); +} + +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0); +static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0); + +static struct attribute *hwmon_attributes[] = { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_name.dev_attr.attr, + NULL +}; + +static const struct attribute_group hwmon_attrgroup = { + .attrs = hwmon_attributes, +}; + +static void radeon_hwmon_init(struct radeon_device *rdev) +{ + int err; + + rdev->pm.int_hwmon_dev = NULL; + + switch (rdev->pm.int_thermal_type) { + case THERMAL_TYPE_RV6XX: + case THERMAL_TYPE_RV770: + case THERMAL_TYPE_EVERGREEN: + rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); + dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev); + err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj, + &hwmon_attrgroup); + if (err) + DRM_ERROR("Unable to create hwmon sysfs file: %d\n", err); + break; + default: + break; + } +} + +static void radeon_hwmon_fini(struct radeon_device *rdev) +{ + if (rdev->pm.int_hwmon_dev) { + sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup); + hwmon_device_unregister(rdev->pm.int_hwmon_dev); + } +} + void radeon_pm_suspend(struct radeon_device *rdev) { bool flush_wq = false; @@ -471,6 +549,7 @@ int radeon_pm_init(struct radeon_device *rdev) rdev->pm.dynpm_can_downclock = true; rdev->pm.current_sclk = rdev->clock.default_sclk; rdev->pm.current_mclk = rdev->clock.default_mclk; + rdev->pm.int_thermal_type = THERMAL_TYPE_NONE; if (rdev->bios) { if (rdev->is_atom_bios) @@ -481,6 +560,8 @@ int radeon_pm_init(struct radeon_device *rdev) radeon_pm_init_profile(rdev); } + /* set up the internal thermal sensor if applicable */ + radeon_hwmon_init(rdev); if (rdev->pm.num_power_states > 1) { /* where's the best place to put these? */ ret = device_create_file(rdev->dev, &dev_attr_power_profile); @@ -536,6 +617,7 @@ void radeon_pm_fini(struct radeon_device *rdev) #endif } + radeon_hwmon_fini(rdev); if (rdev->pm.i2c_bus) radeon_i2c_destroy(rdev->pm.i2c_bus); } @@ -576,7 +658,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) radeon_pm_get_dynpm_state(rdev); radeon_pm_set_clocks(rdev); - DRM_DEBUG("radeon: dynamic power management deactivated\n"); + DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n"); } } else if (rdev->pm.active_crtc_count == 1) { /* TODO: Increase clocks if needed for current mode */ @@ -593,7 +675,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev) rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE; queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work, msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); - DRM_DEBUG("radeon: dynamic power management activated\n"); + DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n"); } } else { /* count == 0 */ if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) { @@ -689,7 +771,7 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish bool in_vbl = radeon_pm_in_vbl(rdev); if (in_vbl == false) - DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc, + DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, finish ? "exit" : "entry"); return in_vbl; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index e9918d88f5b..84c53e41a88 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -59,28 +59,28 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) /* * Global memory. */ -static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref) +static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } -static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref) +static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } static int radeon_ttm_global_init(struct radeon_device *rdev) { - struct ttm_global_reference *global_ref; + struct drm_global_reference *global_ref; int r; rdev->mman.mem_global_referenced = false; global_ref = &rdev->mman.mem_global_ref; - global_ref->global_type = TTM_GLOBAL_TTM_MEM; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &radeon_ttm_mem_global_init; global_ref->release = &radeon_ttm_mem_global_release; - r = ttm_global_item_ref(global_ref); + r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM memory accounting " "subsystem.\n"); @@ -90,14 +90,14 @@ static int radeon_ttm_global_init(struct radeon_device *rdev) rdev->mman.bo_global_ref.mem_glob = rdev->mman.mem_global_ref.object; global_ref = &rdev->mman.bo_global_ref.ref; - global_ref->global_type = TTM_GLOBAL_TTM_BO; + global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; - r = ttm_global_item_ref(global_ref); + r = drm_global_item_ref(global_ref); if (r != 0) { DRM_ERROR("Failed setting up TTM BO subsystem.\n"); - ttm_global_item_unref(&rdev->mman.mem_global_ref); + drm_global_item_unref(&rdev->mman.mem_global_ref); return r; } @@ -108,8 +108,8 @@ static int radeon_ttm_global_init(struct radeon_device *rdev) static void radeon_ttm_global_fini(struct radeon_device *rdev) { if (rdev->mman.mem_global_referenced) { - ttm_global_item_unref(&rdev->mman.bo_global_ref.ref); - ttm_global_item_unref(&rdev->mman.mem_global_ref); + drm_global_item_unref(&rdev->mman.bo_global_ref.ref); + drm_global_item_unref(&rdev->mman.mem_global_ref); rdev->mman.mem_global_referenced = false; } } diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300 index 1e97b2d129f..b506ec1cab4 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r300 +++ b/drivers/gpu/drm/radeon/reg_srcs/r300 @@ -187,7 +187,6 @@ r300 0x4f60 0x4364 RS_INST_13 0x4368 RS_INST_14 0x436C RS_INST_15 -0x43A4 SC_HYPERZ_EN 0x43A8 SC_EDGERULE 0x43B0 SC_CLIP_0_A 0x43B4 SC_CLIP_0_B @@ -716,16 +715,4 @@ r300 0x4f60 0x4F08 ZB_STENCILREFMASK 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT -0x4F1C ZB_BW_CNTL -0x4F28 ZB_DEPTHCLEARVALUE -0x4F30 ZB_ZMASK_OFFSET -0x4F34 ZB_ZMASK_PITCH -0x4F38 ZB_ZMASK_WRINDEX -0x4F3C ZB_ZMASK_DWORD -0x4F40 ZB_ZMASK_RDINDEX -0x4F44 ZB_HIZ_OFFSET -0x4F48 ZB_HIZ_WRINDEX -0x4F4C ZB_HIZ_DWORD -0x4F50 ZB_HIZ_RDINDEX -0x4F54 ZB_HIZ_PITCH 0x4F58 ZB_ZPASS_DATA diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420 index e958980d00f..8c1214c2390 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/r420 +++ b/drivers/gpu/drm/radeon/reg_srcs/r420 @@ -130,6 +130,7 @@ r420 0x4f60 0x401C GB_SELECT 0x4020 GB_AA_CONFIG 0x4024 GB_FIFO_SIZE +0x4028 GB_Z_PEQ_CONFIG 0x4100 TX_INVALTAGS 0x4200 GA_POINT_S0 0x4204 GA_POINT_T0 @@ -187,7 +188,6 @@ r420 0x4f60 0x4364 RS_INST_13 0x4368 RS_INST_14 0x436C RS_INST_15 -0x43A4 SC_HYPERZ_EN 0x43A8 SC_EDGERULE 0x43B0 SC_CLIP_0_A 0x43B4 SC_CLIP_0_B @@ -782,16 +782,4 @@ r420 0x4f60 0x4F08 ZB_STENCILREFMASK 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT -0x4F1C ZB_BW_CNTL -0x4F28 ZB_DEPTHCLEARVALUE -0x4F30 ZB_ZMASK_OFFSET -0x4F34 ZB_ZMASK_PITCH -0x4F38 ZB_ZMASK_WRINDEX -0x4F3C ZB_ZMASK_DWORD -0x4F40 ZB_ZMASK_RDINDEX -0x4F44 ZB_HIZ_OFFSET -0x4F48 ZB_HIZ_WRINDEX -0x4F4C ZB_HIZ_DWORD -0x4F50 ZB_HIZ_RDINDEX -0x4F54 ZB_HIZ_PITCH 0x4F58 ZB_ZPASS_DATA diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600 index 83e8bc0c2bb..0828d80396f 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rs600 +++ b/drivers/gpu/drm/radeon/reg_srcs/rs600 @@ -187,7 +187,6 @@ rs600 0x6d40 0x4364 RS_INST_13 0x4368 RS_INST_14 0x436C RS_INST_15 -0x43A4 SC_HYPERZ_EN 0x43A8 SC_EDGERULE 0x43B0 SC_CLIP_0_A 0x43B4 SC_CLIP_0_B @@ -782,16 +781,4 @@ rs600 0x6d40 0x4F08 ZB_STENCILREFMASK 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT -0x4F1C ZB_BW_CNTL -0x4F28 ZB_DEPTHCLEARVALUE -0x4F30 ZB_ZMASK_OFFSET -0x4F34 ZB_ZMASK_PITCH -0x4F38 ZB_ZMASK_WRINDEX -0x4F3C ZB_ZMASK_DWORD -0x4F40 ZB_ZMASK_RDINDEX -0x4F44 ZB_HIZ_OFFSET -0x4F48 ZB_HIZ_WRINDEX -0x4F4C ZB_HIZ_DWORD -0x4F50 ZB_HIZ_RDINDEX -0x4F54 ZB_HIZ_PITCH 0x4F58 ZB_ZPASS_DATA diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515 index 1e46233985e..8293855f5f0 100644 --- a/drivers/gpu/drm/radeon/reg_srcs/rv515 +++ b/drivers/gpu/drm/radeon/reg_srcs/rv515 @@ -235,7 +235,6 @@ rv515 0x6d40 0x4354 RS_INST_13 0x4358 RS_INST_14 0x435C RS_INST_15 -0x43A4 SC_HYPERZ_EN 0x43A8 SC_EDGERULE 0x43B0 SC_CLIP_0_A 0x43B4 SC_CLIP_0_B @@ -479,17 +478,5 @@ rv515 0x6d40 0x4F08 ZB_STENCILREFMASK 0x4F14 ZB_ZTOP 0x4F18 ZB_ZCACHE_CTLSTAT -0x4F1C ZB_BW_CNTL -0x4F28 ZB_DEPTHCLEARVALUE -0x4F30 ZB_ZMASK_OFFSET -0x4F34 ZB_ZMASK_PITCH -0x4F38 ZB_ZMASK_WRINDEX -0x4F3C ZB_ZMASK_DWORD -0x4F40 ZB_ZMASK_RDINDEX -0x4F44 ZB_HIZ_OFFSET -0x4F48 ZB_HIZ_WRINDEX -0x4F4C ZB_HIZ_DWORD -0x4F50 ZB_HIZ_RDINDEX -0x4F54 ZB_HIZ_PITCH 0x4F58 ZB_ZPASS_DATA 0x4FD4 ZB_STENCILREFMASK_BF diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index f454c9a5e7f..ae2b76b9a38 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -55,14 +55,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev) rdev->mc.gtt_size = 32 * 1024 * 1024; return; } - if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { - /* FIXME: RS400 & RS480 seems to have issue with GART size - * if 4G of system memory (needs more testing) - */ - /* XXX is this still an issue with proper alignment? */ - rdev->mc.gtt_size = 32 * 1024 * 1024; - DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n"); - } } void rs400_gart_tlb_flush(struct radeon_device *rdev) @@ -483,6 +475,8 @@ int rs400_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ + /* restore some register to sane defaults */ + r100_restore_sanity(rdev); /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 6dc15ea8ba3..cc05b230d7e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -686,8 +686,8 @@ void rs600_mc_init(struct radeon_device *rdev) { u64 base; - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); + rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); @@ -696,7 +696,6 @@ void rs600_mc_init(struct radeon_device *rdev) rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); base = RREG32_MC(R_000004_MC_FB_LOCATION); base = G_000004_MC_FB_START(base) << 16; - rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = 0; radeon_gtt_location(rdev, &rdev->mc); @@ -813,6 +812,13 @@ static int rs600_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing IB (%d).\n", r); return r; } + + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "failed initializing audio\n"); + return r; + } + return 0; } @@ -839,6 +845,7 @@ int rs600_resume(struct radeon_device *rdev) int rs600_suspend(struct radeon_device *rdev) { + r600_audio_fini(rdev); r100_cp_disable(rdev); r100_wb_disable(rdev); rs600_irq_disable(rdev); @@ -848,6 +855,7 @@ int rs600_suspend(struct radeon_device *rdev) void rs600_fini(struct radeon_device *rdev) { + r600_audio_fini(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -871,6 +879,8 @@ int rs600_init(struct radeon_device *rdev) radeon_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); + /* restore some register to sane defaults */ + r100_restore_sanity(rdev); /* BIOS */ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index ce4ecbe1081..3e3f75718be 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -154,13 +154,13 @@ void rs690_mc_init(struct radeon_device *rdev) rdev->mc.vram_width = 128; rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); + rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); rdev->mc.visible_vram_size = rdev->mc.aper_size; base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); base = G_000100_MC_FB_START(base) << 16; - rs690_pm_info(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + rs690_pm_info(rdev); radeon_vram_location(rdev, &rdev->mc, base); rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; radeon_gtt_location(rdev, &rdev->mc); @@ -398,7 +398,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev) struct drm_display_mode *mode1 = NULL; struct rs690_watermark wm0; struct rs690_watermark wm1; - u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; + u32 tmp; + u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); + u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1); fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; @@ -495,10 +497,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev) d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); } - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } else if (mode0) { if (dfixed_trunc(wm0.dbpp) > 64) a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair); @@ -528,13 +526,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev) d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); if (rdev->disp_priority == 2) d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1); - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, - S_006D48_D2MODE_PRIORITY_A_OFF(1)); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, - S_006D4C_D2MODE_PRIORITY_B_OFF(1)); - } else { + } else if (mode1) { if (dfixed_trunc(wm1.dbpp) > 64) a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair); else @@ -563,13 +555,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev) d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1); - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, - S_006548_D1MODE_PRIORITY_A_OFF(1)); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, - S_00654C_D1MODE_PRIORITY_B_OFF(1)); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } + + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) @@ -641,6 +632,13 @@ static int rs690_startup(struct radeon_device *rdev) dev_err(rdev->dev, "failled initializing IB (%d).\n", r); return r; } + + r = r600_audio_init(rdev); + if (r) { + dev_err(rdev->dev, "failed initializing audio\n"); + return r; + } + return 0; } @@ -667,6 +665,7 @@ int rs690_resume(struct radeon_device *rdev) int rs690_suspend(struct radeon_device *rdev) { + r600_audio_fini(rdev); r100_cp_disable(rdev); r100_wb_disable(rdev); rs600_irq_disable(rdev); @@ -676,6 +675,7 @@ int rs690_suspend(struct radeon_device *rdev) void rs690_fini(struct radeon_device *rdev) { + r600_audio_fini(rdev); r100_cp_fini(rdev); r100_wb_fini(rdev); r100_ib_fini(rdev); @@ -699,6 +699,8 @@ int rs690_init(struct radeon_device *rdev) radeon_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); + /* restore some register to sane defaults */ + r100_restore_sanity(rdev); /* TODO: disable VGA need to use VGA request */ /* BIOS*/ if (!radeon_get_bios(rdev)) { diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 0c9c169a685..4d6e86041a9 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -469,6 +469,8 @@ int rv515_init(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); /* TODO: disable VGA need to use VGA request */ + /* restore some register to sane defaults */ + r100_restore_sanity(rdev); /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) @@ -925,7 +927,9 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) struct drm_display_mode *mode1 = NULL; struct rv515_watermark wm0; struct rv515_watermark wm1; - u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt; + u32 tmp; + u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF; + u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF; fixed20_12 priority_mark02, priority_mark12, fill_rate; fixed20_12 a, b; @@ -999,10 +1003,6 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; } - WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } else if (mode0) { if (dfixed_trunc(wm0.dbpp) > 64) a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair); @@ -1032,11 +1032,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) d1mode_priority_a_cnt = dfixed_trunc(priority_mark02); if (rdev->disp_priority == 2) d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); - } else { + } else if (mode1) { if (dfixed_trunc(wm1.dbpp) > 64) a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair); else @@ -1065,11 +1061,12 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev) d2mode_priority_a_cnt = dfixed_trunc(priority_mark12); if (rdev->disp_priority == 2) d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON; - WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); - WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); - WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } + + WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt); + WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt); + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt); } void rv515_bandwidth_update(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index b7fd8206492..f1c79681011 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -42,6 +42,21 @@ static void rv770_gpu_init(struct radeon_device *rdev); void rv770_fini(struct radeon_device *rdev); +/* get temperature in millidegrees */ +u32 rv770_get_temp(struct radeon_device *rdev) +{ + u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> + ASIC_T_SHIFT; + u32 actual_temp = 0; + + if ((temp >> 9) & 1) + actual_temp = 0; + else + actual_temp = (temp >> 1) & 0xff; + + return actual_temp * 1000; +} + void rv770_pm_misc(struct radeon_device *rdev) { int req_ps_idx = rdev->pm.requested_power_state_index; @@ -189,7 +204,10 @@ static void rv770_mc_program(struct radeon_device *rdev) WREG32((0x2c20 + j), 0x00000000); WREG32((0x2c24 + j), 0x00000000); } - WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); + /* r7xx hw bug. Read from HDP_DEBUG1 rather + * than writing to HDP_REG_COHERENCY_FLUSH_CNTL + */ + tmp = RREG32(HDP_DEBUG1); rv515_mc_stop(rdev, &save); if (r600_mc_wait_for_idle(rdev)) { @@ -659,8 +677,9 @@ static void rv770_gpu_init(struct radeon_device *rdev) r600_count_pipe_bits((cc_rb_backend_disable & R7XX_MAX_BACKENDS_MASK) >> 16)), (cc_rb_backend_disable >> 16)); - gb_tiling_config |= BACKEND_MAP(backend_map); + rdev->config.rv770.tile_config = gb_tiling_config; + gb_tiling_config |= BACKEND_MAP(backend_map); WREG32(GB_TILING_CONFIG, gb_tiling_config); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); @@ -919,8 +938,8 @@ int rv770_mc_init(struct radeon_device *rdev) } rdev->mc.vram_width = numchan * chansize; /* Could aper size report 0 ? */ - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); + rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* Setup GPU memory space */ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index 9506f8cb99e..b7a5a20e81d 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -122,12 +122,18 @@ #define GUI_ACTIVE (1<<31) #define GRBM_STATUS2 0x8014 +#define CG_MULT_THERMAL_STATUS 0x740 +#define ASIC_T(x) ((x) << 16) +#define ASIC_T_MASK 0x3FF0000 +#define ASIC_T_SHIFT 16 + #define HDP_HOST_PATH_CNTL 0x2C00 #define HDP_NONSURFACE_BASE 0x2C04 #define HDP_NONSURFACE_INFO 0x2C08 #define HDP_NONSURFACE_SIZE 0x2C0C #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0 #define HDP_TILING_CONFIG 0x2F3C +#define HDP_DEBUG1 0x2F34 #define MC_SHARED_CHMAP 0x2004 #define NOOFCHAN_SHIFT 12 diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index fa05cda8c98..976dc8d2528 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -573,13 +573,13 @@ int savage_driver_firstopen(struct drm_device *dev) dev_priv->mtrr[2].handle = -1; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { fb_rsrc = 0; - fb_base = drm_get_resource_start(dev, 0); + fb_base = pci_resource_start(dev->pdev, 0); fb_size = SAVAGE_FB_SIZE_S3; mmio_base = fb_base + SAVAGE_FB_SIZE_S3; aper_rsrc = 0; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ - if (drm_get_resource_len(dev, 0) == 0x08000000) { + if (pci_resource_len(dev->pdev, 0) == 0x08000000) { /* Don't make MMIO write-cobining! We need 3 * MTRRs. */ dev_priv->mtrr[0].base = fb_base; @@ -599,18 +599,19 @@ int savage_driver_firstopen(struct drm_device *dev) dev_priv->mtrr[2].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", - (unsigned long long)drm_get_resource_len(dev, 0)); + (unsigned long long) + pci_resource_len(dev->pdev, 0)); } } else if (dev_priv->chipset != S3_SUPERSAVAGE && dev_priv->chipset != S3_SAVAGE2000) { - mmio_base = drm_get_resource_start(dev, 0); + mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; - fb_base = drm_get_resource_start(dev, 1); + fb_base = pci_resource_start(dev->pdev, 1); fb_size = SAVAGE_FB_SIZE_S4; aper_rsrc = 1; aperture_base = fb_base + SAVAGE_APERTURE_OFFSET; /* this should always be true */ - if (drm_get_resource_len(dev, 1) == 0x08000000) { + if (pci_resource_len(dev->pdev, 1) == 0x08000000) { /* Can use one MTRR to cover both fb and * aperture. */ dev_priv->mtrr[0].base = fb_base; @@ -620,15 +621,16 @@ int savage_driver_firstopen(struct drm_device *dev) dev_priv->mtrr[0].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", - (unsigned long long)drm_get_resource_len(dev, 1)); + (unsigned long long) + pci_resource_len(dev->pdev, 1)); } } else { - mmio_base = drm_get_resource_start(dev, 0); + mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; - fb_base = drm_get_resource_start(dev, 1); - fb_size = drm_get_resource_len(dev, 1); + fb_base = pci_resource_start(dev->pdev, 1); + fb_size = pci_resource_len(dev->pdev, 1); aper_rsrc = 2; - aperture_base = drm_get_resource_start(dev, 2); + aperture_base = pci_resource_start(dev->pdev, 2); /* Automatic MTRR setup will do the right thing. */ } diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index 4fd1f067d38..776bf9e9ea1 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c @@ -47,9 +47,8 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset) dev->dev_private = (void *)dev_priv; dev_priv->chipset = chipset; ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); - if (ret) { + if (ret) kfree(dev_priv); - } return ret; } diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index af22111397d..07d0f2979ca 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -78,7 +78,7 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref) #else /* CONFIG_FB_SIS[_MODULE] */ #define SIS_MM_ALIGN_SHIFT 4 -#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1) +#define SIS_MM_ALIGN_MASK ((1 << SIS_MM_ALIGN_SHIFT) - 1) #endif /* CONFIG_FB_SIS[_MODULE] */ @@ -225,9 +225,8 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev) map = entry->map; if (!map) continue; - if (map->type == _DRM_REGISTERS) { + if (map->type == _DRM_REGISTERS) return map; - } } return NULL; } @@ -264,10 +263,10 @@ int sis_idle(struct drm_device *dev) end = jiffies + (DRM_HZ * 3); - for (i=0; i<4; ++i) { + for (i = 0; i < 4; ++i) { do { idle_reg = SIS_READ(0x85cc); - } while ( !time_after_eq(jiffies, end) && + } while (!time_after_eq(jiffies, end) && ((idle_reg & 0x80000000) != 0x80000000)); } @@ -301,7 +300,7 @@ void sis_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void sis_reclaim_buffers_locked(struct drm_device * dev, +void sis_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv) { drm_sis_private_t *dev_priv = dev->dev_private; @@ -312,9 +311,8 @@ void sis_reclaim_buffers_locked(struct drm_device * dev, return; } - if (dev->driver->dma_quiescent) { + if (dev->driver->dma_quiescent) dev->driver->dma_quiescent(dev); - } drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 4256e200647..b256d4adfaf 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -3,7 +3,7 @@ ccflags-y := -Iinclude/drm ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ - ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \ + ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 555ebb12ace..cb4cf7ef4d1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -476,7 +476,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ++put_count; } if (bo->mem.mm_node) { - bo->mem.mm_node->private = NULL; drm_mm_put_block(bo->mem.mm_node); bo->mem.mm_node = NULL; } @@ -670,7 +669,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); spin_lock(&glob->lru_lock); if (evict_mem.mm_node) { - evict_mem.mm_node->private = NULL; drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } @@ -929,8 +927,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, mem->mm_node = node; mem->mem_type = mem_type; mem->placement = cur_flags; - if (node) - node->private = bo; return 0; } @@ -973,7 +969,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, interruptible, no_wait_reserve, no_wait_gpu); if (ret == 0 && mem->mm_node) { mem->placement = cur_flags; - mem->mm_node->private = bo; return 0; } if (ret == -ERESTARTSYS) @@ -1029,7 +1024,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo, out_unlock: if (ret && mem.mm_node) { spin_lock(&glob->lru_lock); - mem.mm_node->private = NULL; drm_mm_put_block(mem.mm_node); spin_unlock(&glob->lru_lock); } @@ -1401,7 +1395,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj) kfree(glob); } -void ttm_bo_global_release(struct ttm_global_reference *ref) +void ttm_bo_global_release(struct drm_global_reference *ref) { struct ttm_bo_global *glob = ref->object; @@ -1410,7 +1404,7 @@ void ttm_bo_global_release(struct ttm_global_reference *ref) } EXPORT_SYMBOL(ttm_bo_global_release); -int ttm_bo_global_init(struct ttm_global_reference *ref) +int ttm_bo_global_init(struct drm_global_reference *ref) { struct ttm_bo_global_ref *bo_ref = container_of(ref, struct ttm_bo_global_ref, ref); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 13012a1f148..7cffb3e0423 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -353,8 +353,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->vm_node = NULL; fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); - if (fbo->mem.mm_node) - fbo->mem.mm_node->private = (void *)fbo; kref_init(&fbo->list_kref); kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 9a6edbfeaa9..902d7cf9fb4 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -70,8 +70,6 @@ static int __init ttm_init(void) if (unlikely(ret != 0)) return ret; - ttm_global_init(); - atomic_set(&device_released, 0); ret = drm_class_device_register(&ttm_drm_class_device); if (unlikely(ret != 0)) @@ -81,7 +79,6 @@ static int __init ttm_init(void) out_no_dev_reg: atomic_set(&device_released, 1); wake_up_all(&exit_q); - ttm_global_release(); return ret; } @@ -95,7 +92,6 @@ static void __exit ttm_exit(void) */ wait_event(exit_q, atomic_read(&device_released) == 1); - ttm_global_release(); } module_init(ttm_init); diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c index bfb92d28326..68dda74a50a 100644 --- a/drivers/gpu/drm/via/via_dma.c +++ b/drivers/gpu/drm/via/via_dma.c @@ -58,28 +58,29 @@ *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \ *((uint32_t *)(vb) + 1) = (nData); \ vb = ((uint32_t *)vb) + 2; \ - dev_priv->dma_low +=8; \ + dev_priv->dma_low += 8; \ } #define via_flush_write_combine() DRM_MEMORYBARRIER() -#define VIA_OUT_RING_QW(w1,w2) \ +#define VIA_OUT_RING_QW(w1, w2) do { \ *vb++ = (w1); \ *vb++ = (w2); \ - dev_priv->dma_low += 8; + dev_priv->dma_low += 8; \ +} while (0) -static void via_cmdbuf_start(drm_via_private_t * dev_priv); -static void via_cmdbuf_pause(drm_via_private_t * dev_priv); -static void via_cmdbuf_reset(drm_via_private_t * dev_priv); -static void via_cmdbuf_rewind(drm_via_private_t * dev_priv); -static int via_wait_idle(drm_via_private_t * dev_priv); -static void via_pad_cache(drm_via_private_t * dev_priv, int qwords); +static void via_cmdbuf_start(drm_via_private_t *dev_priv); +static void via_cmdbuf_pause(drm_via_private_t *dev_priv); +static void via_cmdbuf_reset(drm_via_private_t *dev_priv); +static void via_cmdbuf_rewind(drm_via_private_t *dev_priv); +static int via_wait_idle(drm_via_private_t *dev_priv); +static void via_pad_cache(drm_via_private_t *dev_priv, int qwords); /* * Free space in command buffer. */ -static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv) +static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv) { uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; @@ -93,7 +94,7 @@ static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv) * How much does the command regulator lag behind? */ -static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv) +static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv) { uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; @@ -108,7 +109,7 @@ static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv) */ static inline int -via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size) +via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size) { uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; uint32_t cur_addr, hw_addr, next_addr; @@ -146,14 +147,13 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv, dev_priv->dma_high) { via_cmdbuf_rewind(dev_priv); } - if (via_cmdbuf_wait(dev_priv, size) != 0) { + if (via_cmdbuf_wait(dev_priv, size) != 0) return NULL; - } return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); } -int via_dma_cleanup(struct drm_device * dev) +int via_dma_cleanup(struct drm_device *dev) { if (dev->dev_private) { drm_via_private_t *dev_priv = @@ -171,9 +171,9 @@ int via_dma_cleanup(struct drm_device * dev) return 0; } -static int via_initialize(struct drm_device * dev, - drm_via_private_t * dev_priv, - drm_via_dma_init_t * init) +static int via_initialize(struct drm_device *dev, + drm_via_private_t *dev_priv, + drm_via_dma_init_t *init) { if (!dev_priv || !dev_priv->mmio) { DRM_ERROR("via_dma_init called before via_map_init\n"); @@ -258,7 +258,7 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil return retcode; } -static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd) +static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd) { drm_via_private_t *dev_priv; uint32_t *vb; @@ -271,9 +271,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * return -EFAULT; } - if (cmd->size > VIA_PCI_BUF_SIZE) { + if (cmd->size > VIA_PCI_BUF_SIZE) return -ENOMEM; - } if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) return -EFAULT; @@ -291,9 +290,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * } vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); - if (vb == NULL) { + if (vb == NULL) return -EAGAIN; - } memcpy(vb, dev_priv->pci_buf, cmd->size); @@ -311,13 +309,12 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * return 0; } -int via_driver_dma_quiescent(struct drm_device * dev) +int via_driver_dma_quiescent(struct drm_device *dev) { drm_via_private_t *dev_priv = dev->dev_private; - if (!via_wait_idle(dev_priv)) { + if (!via_wait_idle(dev_priv)) return -EBUSY; - } return 0; } @@ -339,22 +336,17 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); ret = via_dispatch_cmdbuffer(dev, cmdbuf); - if (ret) { - return ret; - } - - return 0; + return ret; } -static int via_dispatch_pci_cmdbuffer(struct drm_device * dev, - drm_via_cmdbuffer_t * cmd) +static int via_dispatch_pci_cmdbuffer(struct drm_device *dev, + drm_via_cmdbuffer_t *cmd) { drm_via_private_t *dev_priv = dev->dev_private; int ret; - if (cmd->size > VIA_PCI_BUF_SIZE) { + if (cmd->size > VIA_PCI_BUF_SIZE) return -ENOMEM; - } if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size)) return -EFAULT; @@ -380,19 +372,14 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf); - if (ret) { - return ret; - } - - return 0; + return ret; } -static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv, +static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv, uint32_t * vb, int qw_count) { - for (; qw_count > 0; --qw_count) { + for (; qw_count > 0; --qw_count) VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY); - } return vb; } @@ -401,7 +388,7 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv, * * Returns virtual pointer to ring buffer. */ -static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv) +static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv) { return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); } @@ -411,18 +398,18 @@ static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv) * modifying the pause address stored in the buffer itself. If * the regulator has already paused, restart it. */ -static int via_hook_segment(drm_via_private_t * dev_priv, +static int via_hook_segment(drm_via_private_t *dev_priv, uint32_t pause_addr_hi, uint32_t pause_addr_lo, int no_pci_fire) { int paused, count; volatile uint32_t *paused_at = dev_priv->last_pause_ptr; - uint32_t reader,ptr; + uint32_t reader, ptr; uint32_t diff; paused = 0; via_flush_write_combine(); - (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1); + (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1); *paused_at = pause_addr_lo; via_flush_write_combine(); @@ -435,7 +422,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv, dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1; /* - * If there is a possibility that the command reader will + * If there is a possibility that the command reader will * miss the new pause address and pause on the old one, * In that case we need to program the new start address * using PCI. @@ -443,9 +430,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv, diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; count = 10000000; - while(diff == 0 && count--) { + while (diff == 0 && count--) { paused = (VIA_READ(0x41c) & 0x80000000); - if (paused) + if (paused) break; reader = *(dev_priv->hw_addr_ptr); diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; @@ -477,7 +464,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv, return paused; } -static int via_wait_idle(drm_via_private_t * dev_priv) +static int via_wait_idle(drm_via_private_t *dev_priv) { int count = 10000000; @@ -491,9 +478,9 @@ static int via_wait_idle(drm_via_private_t * dev_priv) return count; } -static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, - uint32_t addr, uint32_t * cmd_addr_hi, - uint32_t * cmd_addr_lo, int skip_wait) +static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type, + uint32_t addr, uint32_t *cmd_addr_hi, + uint32_t *cmd_addr_lo, int skip_wait) { uint32_t agp_base; uint32_t cmd_addr, addr_lo, addr_hi; @@ -521,7 +508,7 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, return vb; } -static void via_cmdbuf_start(drm_via_private_t * dev_priv) +static void via_cmdbuf_start(drm_via_private_t *dev_priv) { uint32_t pause_addr_lo, pause_addr_hi; uint32_t start_addr, start_addr_lo; @@ -580,7 +567,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv) dev_priv->dma_diff = ptr - reader; } -static void via_pad_cache(drm_via_private_t * dev_priv, int qwords) +static void via_pad_cache(drm_via_private_t *dev_priv, int qwords) { uint32_t *vb; @@ -590,7 +577,7 @@ static void via_pad_cache(drm_via_private_t * dev_priv, int qwords) via_align_buffer(dev_priv, vb, qwords); } -static inline void via_dummy_bitblt(drm_via_private_t * dev_priv) +static inline void via_dummy_bitblt(drm_via_private_t *dev_priv) { uint32_t *vb = via_get_dma(dev_priv); SetReg2DAGP(0x0C, (0 | (0 << 16))); @@ -598,7 +585,7 @@ static inline void via_dummy_bitblt(drm_via_private_t * dev_priv) SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000); } -static void via_cmdbuf_jump(drm_via_private_t * dev_priv) +static void via_cmdbuf_jump(drm_via_private_t *dev_priv) { uint32_t agp_base; uint32_t pause_addr_lo, pause_addr_hi; @@ -617,9 +604,8 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv) */ dev_priv->dma_low = 0; - if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) { + if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) DRM_ERROR("via_cmdbuf_jump failed\n"); - } via_dummy_bitblt(dev_priv); via_dummy_bitblt(dev_priv); @@ -657,12 +643,12 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv) } -static void via_cmdbuf_rewind(drm_via_private_t * dev_priv) +static void via_cmdbuf_rewind(drm_via_private_t *dev_priv) { via_cmdbuf_jump(dev_priv); } -static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type) +static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type) { uint32_t pause_addr_lo, pause_addr_hi; @@ -670,12 +656,12 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type) via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0); } -static void via_cmdbuf_pause(drm_via_private_t * dev_priv) +static void via_cmdbuf_pause(drm_via_private_t *dev_priv) { via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE); } -static void via_cmdbuf_reset(drm_via_private_t * dev_priv) +static void via_cmdbuf_reset(drm_via_private_t *dev_priv) { via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP); via_wait_idle(dev_priv); @@ -708,9 +694,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * case VIA_CMDBUF_SPACE: while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) && --count) { - if (!d_siz->wait) { + if (!d_siz->wait) break; - } } if (!count) { DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n"); @@ -720,9 +705,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file * case VIA_CMDBUF_LAG: while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) && --count) { - if (!d_siz->wait) { + if (!d_siz->wait) break; - } } if (!count) { DRM_ERROR("VIA_CMDBUF_LAG timed out.\n"); diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index 4c54f043068..9b5b4d9dd62 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -70,7 +70,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg) descriptor_this_page; dma_addr_t next = vsg->chain_start; - while(num_desc--) { + while (num_desc--) { if (descriptor_this_page-- == 0) { cur_descriptor_page--; descriptor_this_page = vsg->descriptors_per_page - 1; @@ -174,19 +174,19 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) struct page *page; int i; - switch(vsg->state) { + switch (vsg->state) { case dr_via_device_mapped: via_unmap_blit_from_device(pdev, vsg); case dr_via_desc_pages_alloc: - for (i=0; i<vsg->num_desc_pages; ++i) { + for (i = 0; i < vsg->num_desc_pages; ++i) { if (vsg->desc_pages[i] != NULL) - free_page((unsigned long)vsg->desc_pages[i]); + free_page((unsigned long)vsg->desc_pages[i]); } kfree(vsg->desc_pages); case dr_via_pages_locked: - for (i=0; i<vsg->num_pages; ++i) { - if ( NULL != (page = vsg->pages[i])) { - if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) + for (i = 0; i < vsg->num_pages; ++i) { + if (NULL != (page = vsg->pages[i])) { + if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) SetPageDirty(page); page_cache_release(page); } @@ -232,7 +232,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int ret; unsigned long first_pfn = VIA_PFN(xfer->mem_addr); - vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - + vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - first_pfn + 1; if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) @@ -268,7 +268,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) { int i; - vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); + vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t); vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / vsg->descriptors_per_page; @@ -276,7 +276,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) return -ENOMEM; vsg->state = dr_via_desc_pages_alloc; - for (i=0; i<vsg->num_desc_pages; ++i) { + for (i = 0; i < vsg->num_desc_pages; ++i) { if (NULL == (vsg->desc_pages[i] = (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) return -ENOMEM; @@ -318,21 +318,20 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; int cur; int done_transfer; - unsigned long irqsave=0; + unsigned long irqsave = 0; uint32_t status = 0; DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n", engine, from_irq, (unsigned long) blitq); - if (from_irq) { + if (from_irq) spin_lock(&blitq->blit_lock); - } else { + else spin_lock_irqsave(&blitq->blit_lock, irqsave); - } done_transfer = blitq->is_active && - (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); - done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); + ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); + done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE)); cur = blitq->cur; if (done_transfer) { @@ -377,18 +376,16 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) if (!timer_pending(&blitq->poll_timer)) mod_timer(&blitq->poll_timer, jiffies + 1); } else { - if (timer_pending(&blitq->poll_timer)) { + if (timer_pending(&blitq->poll_timer)) del_timer(&blitq->poll_timer); - } via_dmablit_engine_off(dev, engine); } } - if (from_irq) { + if (from_irq) spin_unlock(&blitq->blit_lock); - } else { + else spin_unlock_irqrestore(&blitq->blit_lock, irqsave); - } } @@ -414,10 +411,9 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que ((blitq->cur_blit_handle - handle) <= (1 << 23)); if (queue && active) { - slot = handle - blitq->done_blit_handle + blitq->cur -1; - if (slot >= VIA_NUM_BLIT_SLOTS) { + slot = handle - blitq->done_blit_handle + blitq->cur - 1; + if (slot >= VIA_NUM_BLIT_SLOTS) slot -= VIA_NUM_BLIT_SLOTS; - } *queue = blitq->blit_queue + slot; } @@ -506,12 +502,12 @@ via_dmablit_workqueue(struct work_struct *work) int cur_released; - DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) + DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); spin_lock_irqsave(&blitq->blit_lock, irqsave); - while(blitq->serviced != blitq->cur) { + while (blitq->serviced != blitq->cur) { cur_released = blitq->serviced++; @@ -545,13 +541,13 @@ via_dmablit_workqueue(struct work_struct *work) void via_init_dmablit(struct drm_device *dev) { - int i,j; + int i, j; drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_blitq_t *blitq; pci_set_master(dev->pdev); - for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) { + for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) { blitq = dev_priv->blit_queues + i; blitq->dev = dev; blitq->cur_blit_handle = 0; @@ -564,9 +560,8 @@ via_init_dmablit(struct drm_device *dev) blitq->is_active = 0; blitq->aborting = 0; spin_lock_init(&blitq->blit_lock); - for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { + for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j) DRM_INIT_WAITQUEUE(blitq->blit_queue + j); - } DRM_INIT_WAITQUEUE(&blitq->busy_queue); INIT_WORK(&blitq->wq, via_dmablit_workqueue); setup_timer(&blitq->poll_timer, via_dmablit_timer, @@ -685,18 +680,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli static int via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) { - int ret=0; + int ret = 0; unsigned long irqsave; DRM_DEBUG("Num free is %d\n", blitq->num_free); spin_lock_irqsave(&blitq->blit_lock, irqsave); - while(blitq->num_free == 0) { + while (blitq->num_free == 0) { spin_unlock_irqrestore(&blitq->blit_lock, irqsave); DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); - if (ret) { + if (ret) return (-EINTR == ret) ? -EAGAIN : ret; - } spin_lock_irqsave(&blitq->blit_lock, irqsave); } @@ -719,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq) spin_lock_irqsave(&blitq->blit_lock, irqsave); blitq->num_free++; spin_unlock_irqrestore(&blitq->blit_lock, irqsave); - DRM_WAKEUP( &blitq->busy_queue ); + DRM_WAKEUP(&blitq->busy_queue); } /* @@ -744,9 +738,8 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) engine = (xfer->to_fb) ? 0 : 1; blitq = dev_priv->blit_queues + engine; - if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) { + if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) return ret; - } if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { via_dmablit_release_slot(blitq); return -ENOMEM; @@ -780,7 +773,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) */ int -via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) +via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_blitsync_t *sync = data; int err; @@ -804,7 +797,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri */ int -via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) +via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_via_dmablit_t *xfer = data; int err; diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h index 7408a547a03..9b662a327ce 100644 --- a/drivers/gpu/drm/via/via_dmablit.h +++ b/drivers/gpu/drm/via/via_dmablit.h @@ -45,12 +45,12 @@ typedef struct _drm_via_sg_info { int num_desc; enum dma_data_direction direction; unsigned char *bounce_buffer; - dma_addr_t chain_start; + dma_addr_t chain_start; uint32_t free_on_sequence; - unsigned int descriptors_per_page; + unsigned int descriptors_per_page; int aborted; enum { - dr_via_device_mapped, + dr_via_device_mapped, dr_via_desc_pages_alloc, dr_via_pages_locked, dr_via_pages_alloc, @@ -68,7 +68,7 @@ typedef struct _drm_via_blitq { unsigned num_free; unsigned num_outstanding; unsigned long end; - int aborting; + int aborting; int is_active; drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; spinlock_t blit_lock; diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h index cafcb844a22..9cf87d91232 100644 --- a/drivers/gpu/drm/via/via_drv.h +++ b/drivers/gpu/drm/via/via_drv.h @@ -107,9 +107,9 @@ enum via_family { #define VIA_BASE ((dev_priv->mmio)) #define VIA_READ(reg) DRM_READ32(VIA_BASE, reg) -#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val) +#define VIA_WRITE(reg, val) DRM_WRITE32(VIA_BASE, reg, val) #define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg) -#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val) +#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val) extern struct drm_ioctl_desc via_ioctls[]; extern int via_max_ioctl; @@ -121,28 +121,28 @@ extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *fil extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv); -extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ); -extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ); +extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int via_driver_load(struct drm_device *dev, unsigned long chipset); extern int via_driver_unload(struct drm_device *dev); -extern int via_init_context(struct drm_device * dev, int context); -extern int via_final_context(struct drm_device * dev, int context); +extern int via_init_context(struct drm_device *dev, int context); +extern int via_final_context(struct drm_device *dev, int context); -extern int via_do_cleanup_map(struct drm_device * dev); +extern int via_do_cleanup_map(struct drm_device *dev); extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc); extern int via_enable_vblank(struct drm_device *dev, int crtc); extern void via_disable_vblank(struct drm_device *dev, int crtc); extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); -extern void via_driver_irq_preinstall(struct drm_device * dev); +extern void via_driver_irq_preinstall(struct drm_device *dev); extern int via_driver_irq_postinstall(struct drm_device *dev); -extern void via_driver_irq_uninstall(struct drm_device * dev); +extern void via_driver_irq_uninstall(struct drm_device *dev); -extern int via_dma_cleanup(struct drm_device * dev); +extern int via_dma_cleanup(struct drm_device *dev); extern void via_init_command_verifier(void); -extern int via_driver_dma_quiescent(struct drm_device * dev); +extern int via_driver_dma_quiescent(struct drm_device *dev); extern void via_init_futex(drm_via_private_t *dev_priv); extern void via_cleanup_futex(drm_via_private_t *dev_priv); extern void via_release_futex(drm_via_private_t *dev_priv, int context); diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index 34079f251cd..d391f48ef87 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c @@ -141,11 +141,10 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) atomic_inc(&cur_irq->irq_received); DRM_WAKEUP(&cur_irq->irq_queue); handled = 1; - if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { + if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) via_dmablit_handler(dev, 0, 1); - } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) { + else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) via_dmablit_handler(dev, 1, 1); - } } cur_irq++; } @@ -160,7 +159,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) return IRQ_NONE; } -static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv) +static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv) { u32 status; @@ -207,7 +206,7 @@ void via_disable_vblank(struct drm_device *dev, int crtc) } static int -via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence, +via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence, unsigned int *sequence) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -260,7 +259,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc * drm_dma.h hooks */ -void via_driver_irq_preinstall(struct drm_device * dev) +void via_driver_irq_preinstall(struct drm_device *dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; @@ -329,7 +328,7 @@ int via_driver_irq_postinstall(struct drm_device *dev) return 0; } -void via_driver_irq_uninstall(struct drm_device * dev) +void via_driver_irq_uninstall(struct drm_device *dev) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c index 6e6f9159163..6cca9a709f7 100644 --- a/drivers/gpu/drm/via/via_map.c +++ b/drivers/gpu/drm/via/via_map.c @@ -25,7 +25,7 @@ #include "via_drm.h" #include "via_drv.h" -static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init) +static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init) { drm_via_private_t *dev_priv = dev->dev_private; @@ -68,7 +68,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init) return 0; } -int via_do_cleanup_map(struct drm_device * dev) +int via_do_cleanup_map(struct drm_device *dev) { via_dma_cleanup(dev); diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index f694cb5eded..6cc2dadae3e 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c @@ -31,7 +31,7 @@ #include "drm_sman.h" #define VIA_MM_ALIGN_SHIFT 4 -#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) +#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1) int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -172,7 +172,7 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) } -void via_reclaim_buffers_locked(struct drm_device * dev, +void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv) { drm_via_private_t *dev_priv = dev->dev_private; @@ -183,9 +183,8 @@ void via_reclaim_buffers_locked(struct drm_device * dev, return; } - if (dev->driver->dma_quiescent) { + if (dev->driver->dma_quiescent) dev->driver->dma_quiescent(dev); - } drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c index 46a57919874..48957b856d4 100644 --- a/drivers/gpu/drm/via/via_verifier.c +++ b/drivers/gpu/drm/via/via_verifier.c @@ -235,7 +235,7 @@ static hazard_t table2[256]; static hazard_t table3[256]; static __inline__ int -eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) +eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words) { if ((buf_end - *buf) >= num_words) { *buf += num_words; @@ -252,7 +252,7 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words) static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, unsigned long offset, unsigned long size, - struct drm_device * dev) + struct drm_device *dev) { struct drm_map_list *r_list; drm_local_map_t *map = seq->map_cache; @@ -344,7 +344,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) } static __inline__ int -investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq) +investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq) { register uint32_t tmp, *tmp_addr; @@ -518,7 +518,7 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq) static __inline__ int via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end, - drm_via_state_t * cur_seq) + drm_via_state_t *cur_seq) { drm_via_private_t *dev_priv = (drm_via_private_t *) cur_seq->dev->dev_private; @@ -621,8 +621,8 @@ via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end, } static __inline__ verifier_state_t -via_check_header2(uint32_t const **buffer, const uint32_t * buf_end, - drm_via_state_t * hc_state) +via_check_header2(uint32_t const **buffer, const uint32_t *buf_end, + drm_via_state_t *hc_state) { uint32_t cmd; int hz_mode; @@ -706,16 +706,15 @@ via_check_header2(uint32_t const **buffer, const uint32_t * buf_end, return state_error; } } - if (hc_state->unfinished && finish_current_sequence(hc_state)) { + if (hc_state->unfinished && finish_current_sequence(hc_state)) return state_error; - } *buffer = buf; return state_command; } static __inline__ verifier_state_t -via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer, - const uint32_t * buf_end, int *fire_count) +via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer, + const uint32_t *buf_end, int *fire_count) { uint32_t cmd; const uint32_t *buf = *buffer; @@ -833,8 +832,8 @@ via_check_header1(uint32_t const **buffer, const uint32_t * buf_end) } static __inline__ verifier_state_t -via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer, - const uint32_t * buf_end) +via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer, + const uint32_t *buf_end) { register uint32_t cmd; const uint32_t *buf = *buffer; @@ -851,7 +850,7 @@ via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer, } static __inline__ verifier_state_t -via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end) +via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end) { uint32_t data; const uint32_t *buf = *buffer; @@ -884,8 +883,8 @@ via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end) } static __inline__ verifier_state_t -via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer, - const uint32_t * buf_end) +via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer, + const uint32_t *buf_end) { uint32_t addr, count, i; const uint32_t *buf = *buffer; @@ -893,9 +892,8 @@ via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer, addr = *buf++ & ~VIA_VIDEOMASK; i = count = *buf; buf += 3; - while (i--) { + while (i--) VIA_WRITE(addr, *buf++); - } if (count & 3) buf += 4 - (count & 3); *buffer = buf; @@ -940,8 +938,8 @@ via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end) } static __inline__ verifier_state_t -via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer, - const uint32_t * buf_end) +via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer, + const uint32_t *buf_end) { uint32_t addr, count, i; @@ -1037,7 +1035,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, } int -via_parse_command_stream(struct drm_device * dev, const uint32_t * buf, +via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, unsigned int size) { @@ -1085,9 +1083,8 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf, return -EINVAL; } } - if (state == state_error) { + if (state == state_error) return -EINVAL; - } return 0; } @@ -1096,13 +1093,11 @@ setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size) { int i; - for (i = 0; i < 256; ++i) { + for (i = 0; i < 256; ++i) table[i] = forbidden_command; - } - for (i = 0; i < size; ++i) { + for (i = 0; i < size; ++i) table[init_table[i].code] = init_table[i].hz; - } } void via_init_command_verifier(void) diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h index d6f8214b69f..26b6d361ab9 100644 --- a/drivers/gpu/drm/via/via_verifier.h +++ b/drivers/gpu/drm/via/via_verifier.h @@ -54,8 +54,8 @@ typedef struct { const uint32_t *buf_start; } drm_via_state_t; -extern int via_verify_command_stream(const uint32_t * buf, unsigned int size, - struct drm_device * dev, int agp); +extern int via_verify_command_stream(const uint32_t *buf, unsigned int size, + struct drm_device *dev, int agp); extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, unsigned int size); diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c index 6efac8117c9..675d311f038 100644 --- a/drivers/gpu/drm/via/via_video.c +++ b/drivers/gpu/drm/via/via_video.c @@ -29,7 +29,7 @@ #include "via_drm.h" #include "via_drv.h" -void via_init_futex(drm_via_private_t * dev_priv) +void via_init_futex(drm_via_private_t *dev_priv) { unsigned int i; @@ -41,11 +41,11 @@ void via_init_futex(drm_via_private_t * dev_priv) } } -void via_cleanup_futex(drm_via_private_t * dev_priv) +void via_cleanup_futex(drm_via_private_t *dev_priv) { } -void via_release_futex(drm_via_private_t * dev_priv, int context) +void via_release_futex(drm_via_private_t *dev_priv, int context) { unsigned int i; volatile int *lock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index b793c8c9acb..9dd395b9021 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -764,7 +764,7 @@ static struct drm_driver driver = { static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - return drm_get_dev(pdev, ent, &driver); + return drm_get_pci_dev(pdev, ent, &driver); } static int __init vmwgfx_init(void) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index eaad5209533..429f917b60b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -164,7 +164,7 @@ struct vmw_vga_topology_state { struct vmw_private { struct ttm_bo_device bdev; struct ttm_bo_global_ref bo_global_ref; - struct ttm_global_reference mem_global_ref; + struct drm_global_reference mem_global_ref; struct vmw_fifo_state fifo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index b0866f04ec7..870967a97c1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -528,7 +528,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv) * Dirty & Deferred IO */ par->dirty.x1 = par->dirty.x2 = 0; - par->dirty.y1 = par->dirty.y1 = 0; + par->dirty.y1 = par->dirty.y2 = 0; par->dirty.active = true; spin_lock_init(&par->dirty.lock); info->fbdefio = &vmw_defio; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index e3df4adfb4d..83123287c60 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -44,29 +44,29 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) return ttm_bo_mmap(filp, vma, &dev_priv->bdev); } -static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref) +static int vmw_ttm_mem_global_init(struct drm_global_reference *ref) { DRM_INFO("global init.\n"); return ttm_mem_global_init(ref->object); } -static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref) +static void vmw_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } int vmw_ttm_global_init(struct vmw_private *dev_priv) { - struct ttm_global_reference *global_ref; + struct drm_global_reference *global_ref; int ret; global_ref = &dev_priv->mem_global_ref; - global_ref->global_type = TTM_GLOBAL_TTM_MEM; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); global_ref->init = &vmw_ttm_mem_global_init; global_ref->release = &vmw_ttm_mem_global_release; - ret = ttm_global_item_ref(global_ref); + ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM memory accounting.\n"); return ret; @@ -75,11 +75,11 @@ int vmw_ttm_global_init(struct vmw_private *dev_priv) dev_priv->bo_global_ref.mem_glob = dev_priv->mem_global_ref.object; global_ref = &dev_priv->bo_global_ref.ref; - global_ref->global_type = TTM_GLOBAL_TTM_BO; + global_ref->global_type = DRM_GLOBAL_TTM_BO; global_ref->size = sizeof(struct ttm_bo_global); global_ref->init = &ttm_bo_global_init; global_ref->release = &ttm_bo_global_release; - ret = ttm_global_item_ref(global_ref); + ret = drm_global_item_ref(global_ref); if (unlikely(ret != 0)) { DRM_ERROR("Failed setting up TTM buffer objects.\n"); @@ -88,12 +88,12 @@ int vmw_ttm_global_init(struct vmw_private *dev_priv) return 0; out_no_bo: - ttm_global_item_unref(&dev_priv->mem_global_ref); + drm_global_item_unref(&dev_priv->mem_global_ref); return ret; } void vmw_ttm_global_release(struct vmw_private *dev_priv) { - ttm_global_item_unref(&dev_priv->bo_global_ref.ref); - ttm_global_item_unref(&dev_priv->mem_global_ref); + drm_global_item_unref(&dev_priv->bo_global_ref.ref); + drm_global_item_unref(&dev_priv->mem_global_ref); } diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index e19cf8eb6cc..c57e530d07c 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -446,6 +446,16 @@ config SENSORS_IT87 This driver can also be built as a module. If so, the module will be called it87. +config SENSORS_JZ4740 + tristate "Ingenic JZ4740 SoC ADC driver" + depends on MACH_JZ4740 && MFD_JZ4740_ADC + help + If you say yes here you get support for reading adc values from the ADCIN + pin on Ingenic JZ4740 SoC based boards. + + This driver can also be build as a module. If so, the module will be + called jz4740-hwmon. + config SENSORS_LM63 tristate "National Semiconductor LM63 and LM64" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 2138ceb1a71..c5057745b06 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -55,6 +55,7 @@ obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o obj-$(CONFIG_SENSORS_IT87) += it87.o +obj-$(CONFIG_SENSORS_JZ4740) += jz4740-hwmon.o obj-$(CONFIG_SENSORS_K8TEMP) += k8temp.o obj-$(CONFIG_SENSORS_K10TEMP) += k10temp.o obj-$(CONFIG_SENSORS_LIS3LV02D) += lis3lv02d.o hp_accel.o diff --git a/drivers/hwmon/jz4740-hwmon.c b/drivers/hwmon/jz4740-hwmon.c new file mode 100644 index 00000000000..1c8b3d9e205 --- /dev/null +++ b/drivers/hwmon/jz4740-hwmon.c @@ -0,0 +1,230 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * JZ4740 SoC HWMON driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <linux/completion.h> +#include <linux/mfd/core.h> + +#include <linux/hwmon.h> + +struct jz4740_hwmon { + struct resource *mem; + void __iomem *base; + + int irq; + + struct mfd_cell *cell; + struct device *hwmon; + + struct completion read_completion; + + struct mutex lock; +}; + +static ssize_t jz4740_hwmon_show_name(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + return sprintf(buf, "jz4740\n"); +} + +static irqreturn_t jz4740_hwmon_irq(int irq, void *data) +{ + struct jz4740_hwmon *hwmon = data; + + complete(&hwmon->read_completion); + return IRQ_HANDLED; +} + +static ssize_t jz4740_hwmon_read_adcin(struct device *dev, + struct device_attribute *dev_attr, char *buf) +{ + struct jz4740_hwmon *hwmon = dev_get_drvdata(dev); + struct completion *completion = &hwmon->read_completion; + unsigned long t; + unsigned long val; + int ret; + + mutex_lock(&hwmon->lock); + + INIT_COMPLETION(*completion); + + enable_irq(hwmon->irq); + hwmon->cell->enable(to_platform_device(dev)); + + t = wait_for_completion_interruptible_timeout(completion, HZ); + + if (t > 0) { + val = readw(hwmon->base) & 0xfff; + val = (val * 3300) >> 12; + ret = sprintf(buf, "%lu\n", val); + } else { + ret = t ? t : -ETIMEDOUT; + } + + hwmon->cell->disable(to_platform_device(dev)); + disable_irq(hwmon->irq); + + mutex_unlock(&hwmon->lock); + + return ret; +} + +static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL); +static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL); + +static struct attribute *jz4740_hwmon_attributes[] = { + &dev_attr_name.attr, + &dev_attr_in0_input.attr, + NULL +}; + +static const struct attribute_group jz4740_hwmon_attr_group = { + .attrs = jz4740_hwmon_attributes, +}; + +static int __devinit jz4740_hwmon_probe(struct platform_device *pdev) +{ + int ret; + struct jz4740_hwmon *hwmon; + + hwmon = kmalloc(sizeof(*hwmon), GFP_KERNEL); + if (!hwmon) { + dev_err(&pdev->dev, "Failed to allocate driver structure\n"); + return -ENOMEM; + } + + hwmon->cell = pdev->dev.platform_data; + + hwmon->irq = platform_get_irq(pdev, 0); + if (hwmon->irq < 0) { + ret = hwmon->irq; + dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); + goto err_free; + } + + hwmon->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!hwmon->mem) { + ret = -ENOENT; + dev_err(&pdev->dev, "Failed to get platform mmio resource\n"); + goto err_free; + } + + hwmon->mem = request_mem_region(hwmon->mem->start, + resource_size(hwmon->mem), pdev->name); + if (!hwmon->mem) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to request mmio memory region\n"); + goto err_free; + } + + hwmon->base = ioremap_nocache(hwmon->mem->start, + resource_size(hwmon->mem)); + if (!hwmon->base) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); + goto err_release_mem_region; + } + + init_completion(&hwmon->read_completion); + mutex_init(&hwmon->lock); + + platform_set_drvdata(pdev, hwmon); + + ret = request_irq(hwmon->irq, jz4740_hwmon_irq, 0, pdev->name, hwmon); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); + goto err_iounmap; + } + disable_irq(hwmon->irq); + + ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); + if (ret) { + dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret); + goto err_free_irq; + } + + hwmon->hwmon = hwmon_device_register(&pdev->dev); + if (IS_ERR(hwmon->hwmon)) { + ret = PTR_ERR(hwmon->hwmon); + goto err_remove_file; + } + + return 0; + +err_remove_file: + sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); +err_free_irq: + free_irq(hwmon->irq, hwmon); +err_iounmap: + platform_set_drvdata(pdev, NULL); + iounmap(hwmon->base); +err_release_mem_region: + release_mem_region(hwmon->mem->start, resource_size(hwmon->mem)); +err_free: + kfree(hwmon); + + return ret; +} + +static int __devexit jz4740_hwmon_remove(struct platform_device *pdev) +{ + struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev); + + hwmon_device_unregister(hwmon->hwmon); + sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group); + + free_irq(hwmon->irq, hwmon); + + iounmap(hwmon->base); + release_mem_region(hwmon->mem->start, resource_size(hwmon->mem)); + + platform_set_drvdata(pdev, NULL); + kfree(hwmon); + + return 0; +} + +struct platform_driver jz4740_hwmon_driver = { + .probe = jz4740_hwmon_probe, + .remove = __devexit_p(jz4740_hwmon_remove), + .driver = { + .name = "jz4740-hwmon", + .owner = THIS_MODULE, + }, +}; + +static int __init jz4740_hwmon_init(void) +{ + return platform_driver_register(&jz4740_hwmon_driver); +} +module_init(jz4740_hwmon_init); + +static void __exit jz4740_hwmon_exit(void) +{ + platform_driver_unregister(&jz4740_hwmon_driver); +} +module_exit(jz4740_hwmon_exit); + +MODULE_DESCRIPTION("JZ4740 SoC HWMON driver"); +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:jz4740-hwmon"); diff --git a/drivers/hwmon/ultra45_env.c b/drivers/hwmon/ultra45_env.c index 5da5942cf97..89643261ccd 100644 --- a/drivers/hwmon/ultra45_env.c +++ b/drivers/hwmon/ultra45_env.c @@ -311,12 +311,12 @@ static struct of_platform_driver env_driver = { static int __init env_init(void) { - return of_register_driver(&env_driver, &of_bus_type); + return of_register_platform_driver(&env_driver); } static void __exit env_exit(void) { - of_unregister_driver(&env_driver); + of_unregister_platform_driver(&env_driver); } module_init(env_init); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index bceafbfa726..15a9702e294 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -521,12 +521,19 @@ config I2C_PXA_SLAVE is necessary for systems where the PXA may be a target on the I2C bus. +config HAVE_S3C2410_I2C + bool + help + This will include I2C support for Samsung SoCs. If you want to + include I2C support for any machine, kindly select this in the + respective Kconfig file. + config I2C_S3C2410 tristate "S3C2410 I2C Driver" - depends on ARCH_S3C2410 || ARCH_S3C64XX + depends on HAVE_S3C2410_I2C help Say Y here to include support for I2C controller in the - Samsung S3C2410 based System-on-Chip devices. + Samsung SoCs. config I2C_S6000 tristate "S6000 I2C support" @@ -549,7 +556,7 @@ config I2C_SH7760 config I2C_SH_MOBILE tristate "SuperH Mobile I2C Controller" - depends on SUPERH + depends on SUPERH || ARCH_SHMOBILE help If you say yes to this option, support will be included for the built-in I2C interface on the Renesas SH-Mobile processor. diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index b02b4533651..e591de1bc70 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -652,6 +652,7 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev, cpm->adap = cpm_ops; i2c_set_adapdata(&cpm->adap, cpm); cpm->adap.dev.parent = &ofdev->dev; + cpm->adap.dev.of_node = of_node_get(ofdev->dev.of_node); result = cpm_i2c_setup(cpm); if (result) { @@ -676,11 +677,6 @@ static int __devinit cpm_i2c_probe(struct of_device *ofdev, dev_dbg(&ofdev->dev, "hw routines for %s registered.\n", cpm->adap.name); - /* - * register OF I2C devices - */ - of_register_i2c_devices(&cpm->adap, ofdev->dev.of_node); - return 0; out_shut: cpm_i2c_shutdown(cpm); diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index bf344135647..1168d61418c 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -745,6 +745,7 @@ static int __devinit iic_probe(struct of_device *ofdev, /* Register it with i2c layer */ adap = &dev->adap; adap->dev.parent = &ofdev->dev; + adap->dev.of_node = of_node_get(np); strlcpy(adap->name, "IBM IIC", sizeof(adap->name)); i2c_set_adapdata(adap, dev); adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; @@ -760,9 +761,6 @@ static int __devinit iic_probe(struct of_device *ofdev, dev_info(&ofdev->dev, "using %s mode\n", dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)"); - /* Now register all the child nodes */ - of_register_i2c_devices(adap, np); - return 0; error_cleanup: diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index df00eb1f11f..6545d1c99b6 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -63,6 +63,7 @@ struct mpc_i2c { wait_queue_head_t queue; struct i2c_adapter adap; int irq; + u32 real_clk; }; struct mpc_i2c_divider { @@ -96,20 +97,23 @@ static irqreturn_t mpc_i2c_isr(int irq, void *dev_id) /* Sometimes 9th clock pulse isn't generated, and slave doesn't release * the bus, because it wants to send ACK. * Following sequence of enabling/disabling and sending start/stop generates - * the pulse, so it's all OK. + * the 9 pulses, so it's all OK. */ static void mpc_i2c_fixup(struct mpc_i2c *i2c) { - writeccr(i2c, 0); - udelay(30); - writeccr(i2c, CCR_MEN); - udelay(30); - writeccr(i2c, CCR_MSTA | CCR_MTX); - udelay(30); - writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN); - udelay(30); - writeccr(i2c, CCR_MEN); - udelay(30); + int k; + u32 delay_val = 1000000 / i2c->real_clk + 1; + + if (delay_val < 2) + delay_val = 2; + + for (k = 9; k; k--) { + writeccr(i2c, 0); + writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN); + udelay(delay_val); + writeccr(i2c, CCR_MEN); + udelay(delay_val << 1); + } } static int i2c_wait(struct mpc_i2c *i2c, unsigned timeout, int writing) @@ -190,15 +194,18 @@ static const struct mpc_i2c_divider mpc_i2c_dividers_52xx[] __devinitconst = { }; static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock, - int prescaler) + int prescaler, u32 *real_clk) { const struct mpc_i2c_divider *div = NULL; unsigned int pvr = mfspr(SPRN_PVR); u32 divider; int i; - if (clock == MPC_I2C_CLOCK_LEGACY) + if (clock == MPC_I2C_CLOCK_LEGACY) { + /* see below - default fdr = 0x3f -> div = 2048 */ + *real_clk = mpc5xxx_get_bus_frequency(node) / 2048; return -EINVAL; + } /* Determine divider value */ divider = mpc5xxx_get_bus_frequency(node) / clock; @@ -216,7 +223,8 @@ static int __devinit mpc_i2c_get_fdr_52xx(struct device_node *node, u32 clock, break; } - return div ? (int)div->fdr : -EINVAL; + *real_clk = mpc5xxx_get_bus_frequency(node) / div->divider; + return (int)div->fdr; } static void __devinit mpc_i2c_setup_52xx(struct device_node *node, @@ -231,13 +239,14 @@ static void __devinit mpc_i2c_setup_52xx(struct device_node *node, return; } - ret = mpc_i2c_get_fdr_52xx(node, clock, prescaler); + ret = mpc_i2c_get_fdr_52xx(node, clock, prescaler, &i2c->real_clk); fdr = (ret >= 0) ? ret : 0x3f; /* backward compatibility */ writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR); if (ret >= 0) - dev_info(i2c->dev, "clock %d Hz (fdr=%d)\n", clock, fdr); + dev_info(i2c->dev, "clock %u Hz (fdr=%d)\n", i2c->real_clk, + fdr); } #else /* !(CONFIG_PPC_MPC52xx || CONFIG_PPC_MPC512x) */ static void __devinit mpc_i2c_setup_52xx(struct device_node *node, @@ -334,14 +343,17 @@ static u32 __devinit mpc_i2c_get_sec_cfg_8xxx(void) } static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock, - u32 prescaler) + u32 prescaler, u32 *real_clk) { const struct mpc_i2c_divider *div = NULL; u32 divider; int i; - if (clock == MPC_I2C_CLOCK_LEGACY) + if (clock == MPC_I2C_CLOCK_LEGACY) { + /* see below - default fdr = 0x1031 -> div = 16 * 3072 */ + *real_clk = fsl_get_sys_freq() / prescaler / (16 * 3072); return -EINVAL; + } /* Determine proper divider value */ if (of_device_is_compatible(node, "fsl,mpc8544-i2c")) @@ -364,6 +376,7 @@ static int __devinit mpc_i2c_get_fdr_8xxx(struct device_node *node, u32 clock, break; } + *real_clk = fsl_get_sys_freq() / prescaler / div->divider; return div ? (int)div->fdr : -EINVAL; } @@ -380,7 +393,7 @@ static void __devinit mpc_i2c_setup_8xxx(struct device_node *node, return; } - ret = mpc_i2c_get_fdr_8xxx(node, clock, prescaler); + ret = mpc_i2c_get_fdr_8xxx(node, clock, prescaler, &i2c->real_clk); fdr = (ret >= 0) ? ret : 0x1031; /* backward compatibility */ writeb(fdr & 0xff, i2c->base + MPC_I2C_FDR); @@ -388,7 +401,7 @@ static void __devinit mpc_i2c_setup_8xxx(struct device_node *node, if (ret >= 0) dev_info(i2c->dev, "clock %d Hz (dfsrr=%d fdr=%d)\n", - clock, fdr >> 8, fdr & 0xff); + i2c->real_clk, fdr >> 8, fdr & 0xff); } #else /* !CONFIG_FSL_SOC */ @@ -500,10 +513,14 @@ static int mpc_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) return -EINTR; } if (time_after(jiffies, orig_jiffies + HZ)) { + u8 status = readb(i2c->base + MPC_I2C_SR); + dev_dbg(i2c->dev, "timeout\n"); - if (readb(i2c->base + MPC_I2C_SR) == - (CSR_MCF | CSR_MBB | CSR_RXAK)) + if ((status & (CSR_MCF | CSR_MBB | CSR_RXAK)) != 0) { + writeb(status & ~CSR_MAL, + i2c->base + MPC_I2C_SR); mpc_i2c_fixup(i2c); + } return -EIO; } schedule(); @@ -595,18 +612,26 @@ static int __devinit fsl_i2c_probe(struct of_device *op, mpc_i2c_setup_8xxx(op->dev.of_node, i2c, clock, 0); } + prop = of_get_property(op->dev.of_node, "fsl,timeout", &plen); + if (prop && plen == sizeof(u32)) { + mpc_ops.timeout = *prop * HZ / 1000000; + if (mpc_ops.timeout < 5) + mpc_ops.timeout = 5; + } + dev_info(i2c->dev, "timeout %u us\n", mpc_ops.timeout * 1000000 / HZ); + dev_set_drvdata(&op->dev, i2c); i2c->adap = mpc_ops; i2c_set_adapdata(&i2c->adap, i2c); i2c->adap.dev.parent = &op->dev; + i2c->adap.dev.of_node = of_node_get(op->dev.of_node); result = i2c_add_adapter(&i2c->adap); if (result < 0) { dev_err(i2c->dev, "failed to add adapter\n"); goto fail_add; } - of_register_i2c_devices(&i2c->adap, op->dev.of_node); return result; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index ffb405d7c6f..598c49acaeb 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -119,8 +119,10 @@ struct sh_mobile_i2c_data { struct i2c_adapter adap; struct clk *clk; + u_int8_t icic; u_int8_t iccl; u_int8_t icch; + u_int8_t flags; spinlock_t lock; wait_queue_head_t wait; @@ -129,15 +131,17 @@ struct sh_mobile_i2c_data { int sr; }; +#define IIC_FLAG_HAS_ICIC67 (1 << 0) + #define NORMAL_SPEED 100000 /* FAST_SPEED 400000 */ /* Register offsets */ -#define ICDR(pd) (pd->reg + 0x00) -#define ICCR(pd) (pd->reg + 0x04) -#define ICSR(pd) (pd->reg + 0x08) -#define ICIC(pd) (pd->reg + 0x0c) -#define ICCL(pd) (pd->reg + 0x10) -#define ICCH(pd) (pd->reg + 0x14) +#define ICDR 0x00 +#define ICCR 0x04 +#define ICSR 0x08 +#define ICIC 0x0c +#define ICCL 0x10 +#define ICCH 0x14 /* Register bits */ #define ICCR_ICE 0x80 @@ -155,11 +159,32 @@ struct sh_mobile_i2c_data { #define ICSR_WAIT 0x02 #define ICSR_DTE 0x01 +#define ICIC_ICCLB8 0x80 +#define ICIC_ICCHB8 0x40 #define ICIC_ALE 0x08 #define ICIC_TACKE 0x04 #define ICIC_WAITE 0x02 #define ICIC_DTEE 0x01 +static void iic_wr(struct sh_mobile_i2c_data *pd, int offs, unsigned char data) +{ + if (offs == ICIC) + data |= pd->icic; + + iowrite8(data, pd->reg + offs); +} + +static unsigned char iic_rd(struct sh_mobile_i2c_data *pd, int offs) +{ + return ioread8(pd->reg + offs); +} + +static void iic_set_clr(struct sh_mobile_i2c_data *pd, int offs, + unsigned char set, unsigned char clr) +{ + iic_wr(pd, offs, (iic_rd(pd, offs) | set) & ~clr); +} + static void activate_ch(struct sh_mobile_i2c_data *pd) { unsigned long i2c_clk; @@ -187,6 +212,14 @@ static void activate_ch(struct sh_mobile_i2c_data *pd) else pd->iccl = (u_int8_t)(num/denom); + /* one more bit of ICCL in ICIC */ + if (pd->flags & IIC_FLAG_HAS_ICIC67) { + if ((num/denom) > 0xff) + pd->icic |= ICIC_ICCLB8; + else + pd->icic &= ~ICIC_ICCLB8; + } + /* Calculate the value for icch. From the data sheet: icch = (p clock / transfer rate) * (H / (L + H)) */ num = i2c_clk * 4; @@ -196,25 +229,33 @@ static void activate_ch(struct sh_mobile_i2c_data *pd) else pd->icch = (u_int8_t)(num/denom); + /* one more bit of ICCH in ICIC */ + if (pd->flags & IIC_FLAG_HAS_ICIC67) { + if ((num/denom) > 0xff) + pd->icic |= ICIC_ICCHB8; + else + pd->icic &= ~ICIC_ICCHB8; + } + /* Enable channel and configure rx ack */ - iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); + iic_set_clr(pd, ICCR, ICCR_ICE, 0); /* Mask all interrupts */ - iowrite8(0, ICIC(pd)); + iic_wr(pd, ICIC, 0); /* Set the clock */ - iowrite8(pd->iccl, ICCL(pd)); - iowrite8(pd->icch, ICCH(pd)); + iic_wr(pd, ICCL, pd->iccl); + iic_wr(pd, ICCH, pd->icch); } static void deactivate_ch(struct sh_mobile_i2c_data *pd) { /* Clear/disable interrupts */ - iowrite8(0, ICSR(pd)); - iowrite8(0, ICIC(pd)); + iic_wr(pd, ICSR, 0); + iic_wr(pd, ICIC, 0); /* Disable channel */ - iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd)); + iic_set_clr(pd, ICCR, 0, ICCR_ICE); /* Disable clock and mark device as idle */ clk_disable(pd->clk); @@ -233,35 +274,35 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, switch (op) { case OP_START: /* issue start and trigger DTE interrupt */ - iowrite8(0x94, ICCR(pd)); + iic_wr(pd, ICCR, 0x94); break; case OP_TX_FIRST: /* disable DTE interrupt and write data */ - iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE, ICIC(pd)); - iowrite8(data, ICDR(pd)); + iic_wr(pd, ICIC, ICIC_WAITE | ICIC_ALE | ICIC_TACKE); + iic_wr(pd, ICDR, data); break; case OP_TX: /* write data */ - iowrite8(data, ICDR(pd)); + iic_wr(pd, ICDR, data); break; case OP_TX_STOP: /* write data and issue a stop afterwards */ - iowrite8(data, ICDR(pd)); - iowrite8(0x90, ICCR(pd)); + iic_wr(pd, ICDR, data); + iic_wr(pd, ICCR, 0x90); break; case OP_TX_TO_RX: /* select read mode */ - iowrite8(0x81, ICCR(pd)); + iic_wr(pd, ICCR, 0x81); break; case OP_RX: /* just read data */ - ret = ioread8(ICDR(pd)); + ret = iic_rd(pd, ICDR); break; case OP_RX_STOP: /* enable DTE interrupt, issue stop */ - iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE, - ICIC(pd)); - iowrite8(0xc0, ICCR(pd)); + iic_wr(pd, ICIC, + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); + iic_wr(pd, ICCR, 0xc0); break; case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */ - iowrite8(ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE, - ICIC(pd)); - ret = ioread8(ICDR(pd)); - iowrite8(0xc0, ICCR(pd)); + iic_wr(pd, ICIC, + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); + ret = iic_rd(pd, ICDR); + iic_wr(pd, ICCR, 0xc0); break; } @@ -367,7 +408,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) unsigned char sr; int wakeup; - sr = ioread8(ICSR(pd)); + sr = iic_rd(pd, ICSR); pd->sr |= sr; /* remember state */ dev_dbg(pd->dev, "i2c_isr 0x%02x 0x%02x %s %d %d!\n", sr, pd->sr, @@ -376,7 +417,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) if (sr & (ICSR_AL | ICSR_TACK)) { /* don't interrupt transaction - continue to issue stop */ - iowrite8(sr & ~(ICSR_AL | ICSR_TACK), ICSR(pd)); + iic_wr(pd, ICSR, sr & ~(ICSR_AL | ICSR_TACK)); wakeup = 0; } else if (pd->msg->flags & I2C_M_RD) wakeup = sh_mobile_i2c_isr_rx(pd); @@ -384,7 +425,7 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) wakeup = sh_mobile_i2c_isr_tx(pd); if (sr & ICSR_WAIT) /* TODO: add delay here to support slow acks */ - iowrite8(sr & ~ICSR_WAIT, ICSR(pd)); + iic_wr(pd, ICSR, sr & ~ICSR_WAIT); if (wakeup) { pd->sr |= SW_DONE; @@ -402,21 +443,21 @@ static int start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg) } /* Initialize channel registers */ - iowrite8(ioread8(ICCR(pd)) & ~ICCR_ICE, ICCR(pd)); + iic_set_clr(pd, ICCR, 0, ICCR_ICE); /* Enable channel and configure rx ack */ - iowrite8(ioread8(ICCR(pd)) | ICCR_ICE, ICCR(pd)); + iic_set_clr(pd, ICCR, ICCR_ICE, 0); /* Set the clock */ - iowrite8(pd->iccl, ICCL(pd)); - iowrite8(pd->icch, ICCH(pd)); + iic_wr(pd, ICCL, pd->iccl); + iic_wr(pd, ICCH, pd->icch); pd->msg = usr_msg; pd->pos = -1; pd->sr = 0; /* Enable all interrupts to begin with */ - iowrite8(ICIC_WAITE | ICIC_ALE | ICIC_TACKE | ICIC_DTEE, ICIC(pd)); + iic_wr(pd, ICIC, ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); return 0; } @@ -451,7 +492,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, retry_count = 1000; again: - val = ioread8(ICSR(pd)); + val = iic_rd(pd, ICSR); dev_dbg(pd->dev, "val 0x%02x pd->sr 0x%02x\n", val, pd->sr); @@ -576,6 +617,12 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) goto err_irq; } + /* The IIC blocks on SH-Mobile ARM processors + * come with two new bits in ICIC. + */ + if (size > 0x17) + pd->flags |= IIC_FLAG_HAS_ICIC67; + /* Enable Runtime PM for this device. * * Also tell the Runtime PM core to ignore children diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 0815e10da7c..df937df845e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -30,6 +30,8 @@ #include <linux/init.h> #include <linux/idr.h> #include <linux/mutex.h> +#include <linux/of_i2c.h> +#include <linux/of_device.h> #include <linux/completion.h> #include <linux/hardirq.h> #include <linux/irqflags.h> @@ -70,6 +72,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv) if (!client) return 0; + /* Attempt an OF style match */ + if (of_driver_match_device(dev, drv)) + return 1; + driver = to_i2c_driver(drv); /* match on an id table if there is one */ if (driver->id_table) @@ -790,6 +796,9 @@ static int i2c_register_adapter(struct i2c_adapter *adap) if (adap->nr < __i2c_first_dynamic_bus_num) i2c_scan_static_board_info(adap); + /* Register devices from the device tree */ + of_i2c_register_devices(adap); + /* Notify drivers */ mutex_lock(&core_lock); dummy = bus_for_each_drv(&i2c_bus_type, NULL, adap, diff --git a/drivers/input/misc/sparcspkr.c b/drivers/input/misc/sparcspkr.c index 1dacae4b43f..f3bb92e9755 100644 --- a/drivers/input/misc/sparcspkr.c +++ b/drivers/input/misc/sparcspkr.c @@ -353,14 +353,12 @@ static struct of_platform_driver grover_beep_driver = { static int __init sparcspkr_init(void) { - int err = of_register_driver(&bbc_beep_driver, - &of_platform_bus_type); + int err = of_register_platform_driver(&bbc_beep_driver); if (!err) { - err = of_register_driver(&grover_beep_driver, - &of_platform_bus_type); + err = of_register_platform_driver(&grover_beep_driver); if (err) - of_unregister_driver(&bbc_beep_driver); + of_unregister_platform_driver(&bbc_beep_driver); } return err; @@ -368,8 +366,8 @@ static int __init sparcspkr_init(void) static void __exit sparcspkr_exit(void) { - of_unregister_driver(&bbc_beep_driver); - of_unregister_driver(&grover_beep_driver); + of_unregister_platform_driver(&bbc_beep_driver); + of_unregister_platform_driver(&grover_beep_driver); } module_init(sparcspkr_init); diff --git a/drivers/input/serio/i8042-io.h b/drivers/input/serio/i8042-io.h index 847f4aad7ed..5d48bb66aa7 100644 --- a/drivers/input/serio/i8042-io.h +++ b/drivers/input/serio/i8042-io.h @@ -27,6 +27,11 @@ #include <asm/irq.h> #elif defined(CONFIG_SH_CAYMAN) #include <asm/irq.h> +#elif defined(CONFIG_PPC) +extern int of_i8042_kbd_irq; +extern int of_i8042_aux_irq; +# define I8042_KBD_IRQ of_i8042_kbd_irq +# define I8042_AUX_IRQ of_i8042_aux_irq #else # define I8042_KBD_IRQ 1 # define I8042_AUX_IRQ 12 diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index 04e32f2d124..cb2a24b9474 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h @@ -58,9 +58,9 @@ static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_dev if (!strcmp(dp->name, OBP_PS2KBD_NAME1) || !strcmp(dp->name, OBP_PS2KBD_NAME2)) { struct of_device *kbd = of_find_device_by_node(dp); - unsigned int irq = kbd->irqs[0]; + unsigned int irq = kbd->archdata.irqs[0]; if (irq == 0xffffffff) - irq = op->irqs[0]; + irq = op->archdata.irqs[0]; i8042_kbd_irq = irq; kbd_iobase = of_ioremap(&kbd->resource[0], 0, 8, "kbd"); @@ -68,9 +68,9 @@ static int __devinit sparc_i8042_probe(struct of_device *op, const struct of_dev } else if (!strcmp(dp->name, OBP_PS2MS_NAME1) || !strcmp(dp->name, OBP_PS2MS_NAME2)) { struct of_device *ms = of_find_device_by_node(dp); - unsigned int irq = ms->irqs[0]; + unsigned int irq = ms->archdata.irqs[0]; if (irq == 0xffffffff) - irq = op->irqs[0]; + irq = op->archdata.irqs[0]; i8042_aux_irq = irq; } @@ -116,8 +116,7 @@ static int __init i8042_platform_init(void) if (!kbd_iobase) return -ENODEV; } else { - int err = of_register_driver(&sparc_i8042_driver, - &of_bus_type); + int err = of_register_platform_driver(&sparc_i8042_driver); if (err) return err; @@ -141,7 +140,7 @@ static inline void i8042_platform_exit(void) struct device_node *root = of_find_node_by_path("/"); if (strcmp(root->name, "SUNW,JavaStation-1")) - of_unregister_driver(&sparc_i8042_driver); + of_unregister_platform_driver(&sparc_i8042_driver); } #else /* !CONFIG_PCI */ diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c index e14081675bb..ebb11907d40 100644 --- a/drivers/input/xen-kbdfront.c +++ b/drivers/input/xen-kbdfront.c @@ -339,7 +339,7 @@ static struct xenbus_driver xenkbd_driver = { static int __init xenkbd_init(void) { - if (!xen_domain()) + if (!xen_pv_domain()) return -ENODEV; /* Nothing to do if running in dom0. */ diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c index 6999ce59fd1..6024038a5b9 100644 --- a/drivers/macintosh/macio_sysfs.c +++ b/drivers/macintosh/macio_sysfs.c @@ -41,10 +41,7 @@ compatible_show (struct device *dev, struct device_attribute *attr, char *buf) static ssize_t modalias_show (struct device *dev, struct device_attribute *attr, char *buf) { - struct of_device *ofdev = to_of_device(dev); - int len; - - len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2); + int len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); buf[len] = '\n'; buf[len+1] = 0; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index f06d06e7fdf..d25e22cee4c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -432,3 +432,12 @@ config MMC_SH_MMCIF This selects the MMC Host Interface controler (MMCIF). This driver supports MMCIF in sh7724/sh7757/sh7372. + +config MMC_JZ4740 + tristate "JZ4740 SD/Multimedia Card Interface support" + depends on MACH_JZ4740 + help + This selects support for the SD/MMC controller on Ingenic JZ4740 + SoCs. + If you have a board based on such a SoC and with a SD/MMC slot, + say Y or M here. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index e30c2ee4889..f4e53c98d94 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o +obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o sdhci-of-y := sdhci-of-core.o diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c new file mode 100644 index 00000000000..ad4f9870e3c --- /dev/null +++ b/drivers/mmc/host/jz4740_mmc.c @@ -0,0 +1,1029 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * JZ4740 SD/MMC controller driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/mmc/host.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/scatterlist.h> +#include <linux/clk.h> + +#include <linux/bitops.h> +#include <linux/gpio.h> +#include <asm/mach-jz4740/gpio.h> +#include <asm/cacheflush.h> +#include <linux/dma-mapping.h> + +#include <asm/mach-jz4740/jz4740_mmc.h> + +#define JZ_REG_MMC_STRPCL 0x00 +#define JZ_REG_MMC_STATUS 0x04 +#define JZ_REG_MMC_CLKRT 0x08 +#define JZ_REG_MMC_CMDAT 0x0C +#define JZ_REG_MMC_RESTO 0x10 +#define JZ_REG_MMC_RDTO 0x14 +#define JZ_REG_MMC_BLKLEN 0x18 +#define JZ_REG_MMC_NOB 0x1C +#define JZ_REG_MMC_SNOB 0x20 +#define JZ_REG_MMC_IMASK 0x24 +#define JZ_REG_MMC_IREG 0x28 +#define JZ_REG_MMC_CMD 0x2C +#define JZ_REG_MMC_ARG 0x30 +#define JZ_REG_MMC_RESP_FIFO 0x34 +#define JZ_REG_MMC_RXFIFO 0x38 +#define JZ_REG_MMC_TXFIFO 0x3C + +#define JZ_MMC_STRPCL_EXIT_MULTIPLE BIT(7) +#define JZ_MMC_STRPCL_EXIT_TRANSFER BIT(6) +#define JZ_MMC_STRPCL_START_READWAIT BIT(5) +#define JZ_MMC_STRPCL_STOP_READWAIT BIT(4) +#define JZ_MMC_STRPCL_RESET BIT(3) +#define JZ_MMC_STRPCL_START_OP BIT(2) +#define JZ_MMC_STRPCL_CLOCK_CONTROL (BIT(1) | BIT(0)) +#define JZ_MMC_STRPCL_CLOCK_STOP BIT(0) +#define JZ_MMC_STRPCL_CLOCK_START BIT(1) + + +#define JZ_MMC_STATUS_IS_RESETTING BIT(15) +#define JZ_MMC_STATUS_SDIO_INT_ACTIVE BIT(14) +#define JZ_MMC_STATUS_PRG_DONE BIT(13) +#define JZ_MMC_STATUS_DATA_TRAN_DONE BIT(12) +#define JZ_MMC_STATUS_END_CMD_RES BIT(11) +#define JZ_MMC_STATUS_DATA_FIFO_AFULL BIT(10) +#define JZ_MMC_STATUS_IS_READWAIT BIT(9) +#define JZ_MMC_STATUS_CLK_EN BIT(8) +#define JZ_MMC_STATUS_DATA_FIFO_FULL BIT(7) +#define JZ_MMC_STATUS_DATA_FIFO_EMPTY BIT(6) +#define JZ_MMC_STATUS_CRC_RES_ERR BIT(5) +#define JZ_MMC_STATUS_CRC_READ_ERROR BIT(4) +#define JZ_MMC_STATUS_TIMEOUT_WRITE BIT(3) +#define JZ_MMC_STATUS_CRC_WRITE_ERROR BIT(2) +#define JZ_MMC_STATUS_TIMEOUT_RES BIT(1) +#define JZ_MMC_STATUS_TIMEOUT_READ BIT(0) + +#define JZ_MMC_STATUS_READ_ERROR_MASK (BIT(4) | BIT(0)) +#define JZ_MMC_STATUS_WRITE_ERROR_MASK (BIT(3) | BIT(2)) + + +#define JZ_MMC_CMDAT_IO_ABORT BIT(11) +#define JZ_MMC_CMDAT_BUS_WIDTH_4BIT BIT(10) +#define JZ_MMC_CMDAT_DMA_EN BIT(8) +#define JZ_MMC_CMDAT_INIT BIT(7) +#define JZ_MMC_CMDAT_BUSY BIT(6) +#define JZ_MMC_CMDAT_STREAM BIT(5) +#define JZ_MMC_CMDAT_WRITE BIT(4) +#define JZ_MMC_CMDAT_DATA_EN BIT(3) +#define JZ_MMC_CMDAT_RESPONSE_FORMAT (BIT(2) | BIT(1) | BIT(0)) +#define JZ_MMC_CMDAT_RSP_R1 1 +#define JZ_MMC_CMDAT_RSP_R2 2 +#define JZ_MMC_CMDAT_RSP_R3 3 + +#define JZ_MMC_IRQ_SDIO BIT(7) +#define JZ_MMC_IRQ_TXFIFO_WR_REQ BIT(6) +#define JZ_MMC_IRQ_RXFIFO_RD_REQ BIT(5) +#define JZ_MMC_IRQ_END_CMD_RES BIT(2) +#define JZ_MMC_IRQ_PRG_DONE BIT(1) +#define JZ_MMC_IRQ_DATA_TRAN_DONE BIT(0) + + +#define JZ_MMC_CLK_RATE 24000000 + +enum jz4740_mmc_state { + JZ4740_MMC_STATE_READ_RESPONSE, + JZ4740_MMC_STATE_TRANSFER_DATA, + JZ4740_MMC_STATE_SEND_STOP, + JZ4740_MMC_STATE_DONE, +}; + +struct jz4740_mmc_host { + struct mmc_host *mmc; + struct platform_device *pdev; + struct jz4740_mmc_platform_data *pdata; + struct clk *clk; + + int irq; + int card_detect_irq; + + struct resource *mem; + void __iomem *base; + struct mmc_request *req; + struct mmc_command *cmd; + + unsigned long waiting; + + uint32_t cmdat; + + uint16_t irq_mask; + + spinlock_t lock; + + struct timer_list timeout_timer; + struct sg_mapping_iter miter; + enum jz4740_mmc_state state; +}; + +static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host, + unsigned int irq, bool enabled) +{ + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + if (enabled) + host->irq_mask &= ~irq; + else + host->irq_mask |= irq; + spin_unlock_irqrestore(&host->lock, flags); + + writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK); +} + +static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host, + bool start_transfer) +{ + uint16_t val = JZ_MMC_STRPCL_CLOCK_START; + + if (start_transfer) + val |= JZ_MMC_STRPCL_START_OP; + + writew(val, host->base + JZ_REG_MMC_STRPCL); +} + +static void jz4740_mmc_clock_disable(struct jz4740_mmc_host *host) +{ + uint32_t status; + unsigned int timeout = 1000; + + writew(JZ_MMC_STRPCL_CLOCK_STOP, host->base + JZ_REG_MMC_STRPCL); + do { + status = readl(host->base + JZ_REG_MMC_STATUS); + } while (status & JZ_MMC_STATUS_CLK_EN && --timeout); +} + +static void jz4740_mmc_reset(struct jz4740_mmc_host *host) +{ + uint32_t status; + unsigned int timeout = 1000; + + writew(JZ_MMC_STRPCL_RESET, host->base + JZ_REG_MMC_STRPCL); + udelay(10); + do { + status = readl(host->base + JZ_REG_MMC_STATUS); + } while (status & JZ_MMC_STATUS_IS_RESETTING && --timeout); +} + +static void jz4740_mmc_request_done(struct jz4740_mmc_host *host) +{ + struct mmc_request *req; + + req = host->req; + host->req = NULL; + + mmc_request_done(host->mmc, req); +} + +static unsigned int jz4740_mmc_poll_irq(struct jz4740_mmc_host *host, + unsigned int irq) +{ + unsigned int timeout = 0x800; + uint16_t status; + + do { + status = readw(host->base + JZ_REG_MMC_IREG); + } while (!(status & irq) && --timeout); + + if (timeout == 0) { + set_bit(0, &host->waiting); + mod_timer(&host->timeout_timer, jiffies + 5*HZ); + jz4740_mmc_set_irq_enabled(host, irq, true); + return true; + } + + return false; +} + +static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + int status; + + status = readl(host->base + JZ_REG_MMC_STATUS); + if (status & JZ_MMC_STATUS_WRITE_ERROR_MASK) { + if (status & (JZ_MMC_STATUS_TIMEOUT_WRITE)) { + host->req->cmd->error = -ETIMEDOUT; + data->error = -ETIMEDOUT; + } else { + host->req->cmd->error = -EIO; + data->error = -EIO; + } + } +} + +static bool jz4740_mmc_write_data(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + struct sg_mapping_iter *miter = &host->miter; + void __iomem *fifo_addr = host->base + JZ_REG_MMC_TXFIFO; + uint32_t *buf; + bool timeout; + size_t i, j; + + while (sg_miter_next(miter)) { + buf = miter->addr; + i = miter->length / 4; + j = i / 8; + i = i & 0x7; + while (j) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + writel(buf[0], fifo_addr); + writel(buf[1], fifo_addr); + writel(buf[2], fifo_addr); + writel(buf[3], fifo_addr); + writel(buf[4], fifo_addr); + writel(buf[5], fifo_addr); + writel(buf[6], fifo_addr); + writel(buf[7], fifo_addr); + buf += 8; + --j; + } + if (unlikely(i)) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_TXFIFO_WR_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + while (i) { + writel(*buf, fifo_addr); + ++buf; + --i; + } + } + data->bytes_xfered += miter->length; + } + sg_miter_stop(miter); + + return false; + +poll_timeout: + miter->consumed = (void *)buf - miter->addr; + data->bytes_xfered += miter->consumed; + sg_miter_stop(miter); + + return true; +} + +static bool jz4740_mmc_read_data(struct jz4740_mmc_host *host, + struct mmc_data *data) +{ + struct sg_mapping_iter *miter = &host->miter; + void __iomem *fifo_addr = host->base + JZ_REG_MMC_RXFIFO; + uint32_t *buf; + uint32_t d; + uint16_t status; + size_t i, j; + unsigned int timeout; + + while (sg_miter_next(miter)) { + buf = miter->addr; + i = miter->length; + j = i / 32; + i = i & 0x1f; + while (j) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + buf[0] = readl(fifo_addr); + buf[1] = readl(fifo_addr); + buf[2] = readl(fifo_addr); + buf[3] = readl(fifo_addr); + buf[4] = readl(fifo_addr); + buf[5] = readl(fifo_addr); + buf[6] = readl(fifo_addr); + buf[7] = readl(fifo_addr); + + buf += 8; + --j; + } + + if (unlikely(i)) { + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_RXFIFO_RD_REQ); + if (unlikely(timeout)) + goto poll_timeout; + + while (i >= 4) { + *buf++ = readl(fifo_addr); + i -= 4; + } + if (unlikely(i > 0)) { + d = readl(fifo_addr); + memcpy(buf, &d, i); + } + } + data->bytes_xfered += miter->length; + + /* This can go away once MIPS implements + * flush_kernel_dcache_page */ + flush_dcache_page(miter->page); + } + sg_miter_stop(miter); + + /* For whatever reason there is sometime one word more in the fifo then + * requested */ + timeout = 1000; + status = readl(host->base + JZ_REG_MMC_STATUS); + while (!(status & JZ_MMC_STATUS_DATA_FIFO_EMPTY) && --timeout) { + d = readl(fifo_addr); + status = readl(host->base + JZ_REG_MMC_STATUS); + } + + return false; + +poll_timeout: + miter->consumed = (void *)buf - miter->addr; + data->bytes_xfered += miter->consumed; + sg_miter_stop(miter); + + return true; +} + +static void jz4740_mmc_timeout(unsigned long data) +{ + struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data; + + if (!test_and_clear_bit(0, &host->waiting)) + return; + + jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, false); + + host->req->cmd->error = -ETIMEDOUT; + jz4740_mmc_request_done(host); +} + +static void jz4740_mmc_read_response(struct jz4740_mmc_host *host, + struct mmc_command *cmd) +{ + int i; + uint16_t tmp; + void __iomem *fifo_addr = host->base + JZ_REG_MMC_RESP_FIFO; + + if (cmd->flags & MMC_RSP_136) { + tmp = readw(fifo_addr); + for (i = 0; i < 4; ++i) { + cmd->resp[i] = tmp << 24; + tmp = readw(fifo_addr); + cmd->resp[i] |= tmp << 8; + tmp = readw(fifo_addr); + cmd->resp[i] |= tmp >> 8; + } + } else { + cmd->resp[0] = readw(fifo_addr) << 24; + cmd->resp[0] |= readw(fifo_addr) << 8; + cmd->resp[0] |= readw(fifo_addr) & 0xff; + } +} + +static void jz4740_mmc_send_command(struct jz4740_mmc_host *host, + struct mmc_command *cmd) +{ + uint32_t cmdat = host->cmdat; + + host->cmdat &= ~JZ_MMC_CMDAT_INIT; + jz4740_mmc_clock_disable(host); + + host->cmd = cmd; + + if (cmd->flags & MMC_RSP_BUSY) + cmdat |= JZ_MMC_CMDAT_BUSY; + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1B: + case MMC_RSP_R1: + cmdat |= JZ_MMC_CMDAT_RSP_R1; + break; + case MMC_RSP_R2: + cmdat |= JZ_MMC_CMDAT_RSP_R2; + break; + case MMC_RSP_R3: + cmdat |= JZ_MMC_CMDAT_RSP_R3; + break; + default: + break; + } + + if (cmd->data) { + cmdat |= JZ_MMC_CMDAT_DATA_EN; + if (cmd->data->flags & MMC_DATA_WRITE) + cmdat |= JZ_MMC_CMDAT_WRITE; + if (cmd->data->flags & MMC_DATA_STREAM) + cmdat |= JZ_MMC_CMDAT_STREAM; + + writew(cmd->data->blksz, host->base + JZ_REG_MMC_BLKLEN); + writew(cmd->data->blocks, host->base + JZ_REG_MMC_NOB); + } + + writeb(cmd->opcode, host->base + JZ_REG_MMC_CMD); + writel(cmd->arg, host->base + JZ_REG_MMC_ARG); + writel(cmdat, host->base + JZ_REG_MMC_CMDAT); + + jz4740_mmc_clock_enable(host, 1); +} + +static void jz_mmc_prepare_data_transfer(struct jz4740_mmc_host *host) +{ + struct mmc_command *cmd = host->req->cmd; + struct mmc_data *data = cmd->data; + int direction; + + if (data->flags & MMC_DATA_READ) + direction = SG_MITER_TO_SG; + else + direction = SG_MITER_FROM_SG; + + sg_miter_start(&host->miter, data->sg, data->sg_len, direction); +} + + +static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) +{ + struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)devid; + struct mmc_command *cmd = host->req->cmd; + struct mmc_request *req = host->req; + bool timeout = false; + + if (cmd->error) + host->state = JZ4740_MMC_STATE_DONE; + + switch (host->state) { + case JZ4740_MMC_STATE_READ_RESPONSE: + if (cmd->flags & MMC_RSP_PRESENT) + jz4740_mmc_read_response(host, cmd); + + if (!cmd->data) + break; + + jz_mmc_prepare_data_transfer(host); + + case JZ4740_MMC_STATE_TRANSFER_DATA: + if (cmd->data->flags & MMC_DATA_READ) + timeout = jz4740_mmc_read_data(host, cmd->data); + else + timeout = jz4740_mmc_write_data(host, cmd->data); + + if (unlikely(timeout)) { + host->state = JZ4740_MMC_STATE_TRANSFER_DATA; + break; + } + + jz4740_mmc_transfer_check_state(host, cmd->data); + + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_DATA_TRAN_DONE); + if (unlikely(timeout)) { + host->state = JZ4740_MMC_STATE_SEND_STOP; + break; + } + writew(JZ_MMC_IRQ_DATA_TRAN_DONE, host->base + JZ_REG_MMC_IREG); + + case JZ4740_MMC_STATE_SEND_STOP: + if (!req->stop) + break; + + jz4740_mmc_send_command(host, req->stop); + + timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_PRG_DONE); + if (timeout) { + host->state = JZ4740_MMC_STATE_DONE; + break; + } + case JZ4740_MMC_STATE_DONE: + break; + } + + if (!timeout) + jz4740_mmc_request_done(host); + + return IRQ_HANDLED; +} + +static irqreturn_t jz_mmc_irq(int irq, void *devid) +{ + struct jz4740_mmc_host *host = devid; + struct mmc_command *cmd = host->cmd; + uint16_t irq_reg, status, tmp; + + irq_reg = readw(host->base + JZ_REG_MMC_IREG); + + tmp = irq_reg; + irq_reg &= ~host->irq_mask; + + tmp &= ~(JZ_MMC_IRQ_TXFIFO_WR_REQ | JZ_MMC_IRQ_RXFIFO_RD_REQ | + JZ_MMC_IRQ_PRG_DONE | JZ_MMC_IRQ_DATA_TRAN_DONE); + + if (tmp != irq_reg) + writew(tmp & ~irq_reg, host->base + JZ_REG_MMC_IREG); + + if (irq_reg & JZ_MMC_IRQ_SDIO) { + writew(JZ_MMC_IRQ_SDIO, host->base + JZ_REG_MMC_IREG); + mmc_signal_sdio_irq(host->mmc); + irq_reg &= ~JZ_MMC_IRQ_SDIO; + } + + if (host->req && cmd && irq_reg) { + if (test_and_clear_bit(0, &host->waiting)) { + del_timer(&host->timeout_timer); + + status = readl(host->base + JZ_REG_MMC_STATUS); + + if (status & JZ_MMC_STATUS_TIMEOUT_RES) { + cmd->error = -ETIMEDOUT; + } else if (status & JZ_MMC_STATUS_CRC_RES_ERR) { + cmd->error = -EIO; + } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | + JZ_MMC_STATUS_CRC_WRITE_ERROR)) { + if (cmd->data) + cmd->data->error = -EIO; + cmd->error = -EIO; + } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | + JZ_MMC_STATUS_CRC_WRITE_ERROR)) { + if (cmd->data) + cmd->data->error = -EIO; + cmd->error = -EIO; + } + + jz4740_mmc_set_irq_enabled(host, irq_reg, false); + writew(irq_reg, host->base + JZ_REG_MMC_IREG); + + return IRQ_WAKE_THREAD; + } + } + + return IRQ_HANDLED; +} + +static int jz4740_mmc_set_clock_rate(struct jz4740_mmc_host *host, int rate) +{ + int div = 0; + int real_rate; + + jz4740_mmc_clock_disable(host); + clk_set_rate(host->clk, JZ_MMC_CLK_RATE); + + real_rate = clk_get_rate(host->clk); + + while (real_rate > rate && div < 7) { + ++div; + real_rate >>= 1; + } + + writew(div, host->base + JZ_REG_MMC_CLKRT); + return real_rate; +} + +static void jz4740_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + + host->req = req; + + writew(0xffff, host->base + JZ_REG_MMC_IREG); + + writew(JZ_MMC_IRQ_END_CMD_RES, host->base + JZ_REG_MMC_IREG); + jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_END_CMD_RES, true); + + host->state = JZ4740_MMC_STATE_READ_RESPONSE; + set_bit(0, &host->waiting); + mod_timer(&host->timeout_timer, jiffies + 5*HZ); + jz4740_mmc_send_command(host, req->cmd); +} + +static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + if (ios->clock) + jz4740_mmc_set_clock_rate(host, ios->clock); + + switch (ios->power_mode) { + case MMC_POWER_UP: + jz4740_mmc_reset(host); + if (gpio_is_valid(host->pdata->gpio_power)) + gpio_set_value(host->pdata->gpio_power, + !host->pdata->power_active_low); + host->cmdat |= JZ_MMC_CMDAT_INIT; + clk_enable(host->clk); + break; + case MMC_POWER_ON: + break; + default: + if (gpio_is_valid(host->pdata->gpio_power)) + gpio_set_value(host->pdata->gpio_power, + host->pdata->power_active_low); + clk_disable(host->clk); + break; + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + host->cmdat &= ~JZ_MMC_CMDAT_BUS_WIDTH_4BIT; + break; + case MMC_BUS_WIDTH_4: + host->cmdat |= JZ_MMC_CMDAT_BUS_WIDTH_4BIT; + break; + default: + break; + } +} + +static int jz4740_mmc_get_ro(struct mmc_host *mmc) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + if (!gpio_is_valid(host->pdata->gpio_read_only)) + return -ENOSYS; + + return gpio_get_value(host->pdata->gpio_read_only) ^ + host->pdata->read_only_active_low; +} + +static int jz4740_mmc_get_cd(struct mmc_host *mmc) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + if (!gpio_is_valid(host->pdata->gpio_card_detect)) + return -ENOSYS; + + return gpio_get_value(host->pdata->gpio_card_detect) ^ + host->pdata->card_detect_active_low; +} + +static irqreturn_t jz4740_mmc_card_detect_irq(int irq, void *devid) +{ + struct jz4740_mmc_host *host = devid; + + mmc_detect_change(host->mmc, HZ / 2); + + return IRQ_HANDLED; +} + +static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct jz4740_mmc_host *host = mmc_priv(mmc); + jz4740_mmc_set_irq_enabled(host, JZ_MMC_IRQ_SDIO, enable); +} + +static const struct mmc_host_ops jz4740_mmc_ops = { + .request = jz4740_mmc_request, + .set_ios = jz4740_mmc_set_ios, + .get_ro = jz4740_mmc_get_ro, + .get_cd = jz4740_mmc_get_cd, + .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, +}; + +static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = { + JZ_GPIO_BULK_PIN(MSC_CMD), + JZ_GPIO_BULK_PIN(MSC_CLK), + JZ_GPIO_BULK_PIN(MSC_DATA0), + JZ_GPIO_BULK_PIN(MSC_DATA1), + JZ_GPIO_BULK_PIN(MSC_DATA2), + JZ_GPIO_BULK_PIN(MSC_DATA3), +}; + +static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio, + const char *name, bool output, int value) +{ + int ret; + + if (!gpio_is_valid(gpio)) + return 0; + + ret = gpio_request(gpio, name); + if (ret) { + dev_err(dev, "Failed to request %s gpio: %d\n", name, ret); + return ret; + } + + if (output) + gpio_direction_output(gpio, value); + else + gpio_direction_input(gpio); + + return 0; +} + +static int __devinit jz4740_mmc_request_gpios(struct platform_device *pdev) +{ + int ret; + struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; + + if (!pdata) + return 0; + + ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_card_detect, + "MMC detect change", false, 0); + if (ret) + goto err; + + ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_read_only, + "MMC read only", false, 0); + if (ret) + goto err_free_gpio_card_detect; + + ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, + "MMC read only", true, pdata->power_active_low); + if (ret) + goto err_free_gpio_read_only; + + return 0; + +err_free_gpio_read_only: + if (gpio_is_valid(pdata->gpio_read_only)) + gpio_free(pdata->gpio_read_only); +err_free_gpio_card_detect: + if (gpio_is_valid(pdata->gpio_card_detect)) + gpio_free(pdata->gpio_card_detect); +err: + return ret; +} + +static int __devinit jz4740_mmc_request_cd_irq(struct platform_device *pdev, + struct jz4740_mmc_host *host) +{ + struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; + + if (!gpio_is_valid(pdata->gpio_card_detect)) + return 0; + + host->card_detect_irq = gpio_to_irq(pdata->gpio_card_detect); + if (host->card_detect_irq < 0) { + dev_warn(&pdev->dev, "Failed to get card detect irq\n"); + return 0; + } + + return request_irq(host->card_detect_irq, jz4740_mmc_card_detect_irq, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "MMC card detect", host); +} + +static void jz4740_mmc_free_gpios(struct platform_device *pdev) +{ + struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; + + if (!pdata) + return; + + if (gpio_is_valid(pdata->gpio_power)) + gpio_free(pdata->gpio_power); + if (gpio_is_valid(pdata->gpio_read_only)) + gpio_free(pdata->gpio_read_only); + if (gpio_is_valid(pdata->gpio_card_detect)) + gpio_free(pdata->gpio_card_detect); +} + +static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) +{ + size_t num_pins = ARRAY_SIZE(jz4740_mmc_pins); + if (host->pdata && host->pdata->data_1bit) + num_pins -= 3; + + return num_pins; +} + +static int __devinit jz4740_mmc_probe(struct platform_device* pdev) +{ + int ret; + struct mmc_host *mmc; + struct jz4740_mmc_host *host; + struct jz4740_mmc_platform_data *pdata; + + pdata = pdev->dev.platform_data; + + mmc = mmc_alloc_host(sizeof(struct jz4740_mmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "Failed to alloc mmc host structure\n"); + return -ENOMEM; + } + + host = mmc_priv(mmc); + host->pdata = pdata; + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) { + ret = host->irq; + dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); + goto err_free_host; + } + + host->clk = clk_get(&pdev->dev, "mmc"); + if (!host->clk) { + ret = -ENOENT; + dev_err(&pdev->dev, "Failed to get mmc clock\n"); + goto err_free_host; + } + + host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!host->mem) { + ret = -ENOENT; + dev_err(&pdev->dev, "Failed to get base platform memory\n"); + goto err_clk_put; + } + + host->mem = request_mem_region(host->mem->start, + resource_size(host->mem), pdev->name); + if (!host->mem) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to request base memory region\n"); + goto err_clk_put; + } + + host->base = ioremap_nocache(host->mem->start, resource_size(host->mem)); + if (!host->base) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to ioremap base memory\n"); + goto err_release_mem_region; + } + + ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); + if (ret) { + dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret); + goto err_iounmap; + } + + ret = jz4740_mmc_request_gpios(pdev); + if (ret) + goto err_gpio_bulk_free; + + mmc->ops = &jz4740_mmc_ops; + mmc->f_min = JZ_MMC_CLK_RATE / 128; + mmc->f_max = JZ_MMC_CLK_RATE; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = (pdata && pdata->data_1bit) ? 0 : MMC_CAP_4_BIT_DATA; + mmc->caps |= MMC_CAP_SDIO_IRQ; + + mmc->max_blk_size = (1 << 10) - 1; + mmc->max_blk_count = (1 << 15) - 1; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + + mmc->max_phys_segs = 128; + mmc->max_hw_segs = 128; + mmc->max_seg_size = mmc->max_req_size; + + host->mmc = mmc; + host->pdev = pdev; + spin_lock_init(&host->lock); + host->irq_mask = 0xffff; + + ret = jz4740_mmc_request_cd_irq(pdev, host); + if (ret) { + dev_err(&pdev->dev, "Failed to request card detect irq\n"); + goto err_free_gpios; + } + + ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, + dev_name(&pdev->dev), host); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); + goto err_free_card_detect_irq; + } + + jz4740_mmc_reset(host); + jz4740_mmc_clock_disable(host); + setup_timer(&host->timeout_timer, jz4740_mmc_timeout, + (unsigned long)host); + /* It is not important when it times out, it just needs to timeout. */ + set_timer_slack(&host->timeout_timer, HZ); + + platform_set_drvdata(pdev, host); + ret = mmc_add_host(mmc); + + if (ret) { + dev_err(&pdev->dev, "Failed to add mmc host: %d\n", ret); + goto err_free_irq; + } + dev_info(&pdev->dev, "JZ SD/MMC card driver registered\n"); + + return 0; + +err_free_irq: + free_irq(host->irq, host); +err_free_card_detect_irq: + if (host->card_detect_irq >= 0) + free_irq(host->card_detect_irq, host); +err_free_gpios: + jz4740_mmc_free_gpios(pdev); +err_gpio_bulk_free: + jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); +err_iounmap: + iounmap(host->base); +err_release_mem_region: + release_mem_region(host->mem->start, resource_size(host->mem)); +err_clk_put: + clk_put(host->clk); +err_free_host: + platform_set_drvdata(pdev, NULL); + mmc_free_host(mmc); + + return ret; +} + +static int __devexit jz4740_mmc_remove(struct platform_device *pdev) +{ + struct jz4740_mmc_host *host = platform_get_drvdata(pdev); + + del_timer_sync(&host->timeout_timer); + jz4740_mmc_set_irq_enabled(host, 0xff, false); + jz4740_mmc_reset(host); + + mmc_remove_host(host->mmc); + + free_irq(host->irq, host); + if (host->card_detect_irq >= 0) + free_irq(host->card_detect_irq, host); + + jz4740_mmc_free_gpios(pdev); + jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); + + iounmap(host->base); + release_mem_region(host->mem->start, resource_size(host->mem)); + + clk_put(host->clk); + + platform_set_drvdata(pdev, NULL); + mmc_free_host(host->mmc); + + return 0; +} + +#ifdef CONFIG_PM + +static int jz4740_mmc_suspend(struct device *dev) +{ + struct jz4740_mmc_host *host = dev_get_drvdata(dev); + + mmc_suspend_host(host->mmc); + + jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); + + return 0; +} + +static int jz4740_mmc_resume(struct device *dev) +{ + struct jz4740_mmc_host *host = dev_get_drvdata(dev); + + jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); + + mmc_resume_host(host->mmc); + + return 0; +} + +const struct dev_pm_ops jz4740_mmc_pm_ops = { + .suspend = jz4740_mmc_suspend, + .resume = jz4740_mmc_resume, + .poweroff = jz4740_mmc_suspend, + .restore = jz4740_mmc_resume, +}; + +#define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops) +#else +#define JZ4740_MMC_PM_OPS NULL +#endif + +static struct platform_driver jz4740_mmc_driver = { + .probe = jz4740_mmc_probe, + .remove = __devexit_p(jz4740_mmc_remove), + .driver = { + .name = "jz4740-mmc", + .owner = THIS_MODULE, + .pm = JZ4740_MMC_PM_OPS, + }, +}; + +static int __init jz4740_mmc_init(void) +{ + return platform_driver_register(&jz4740_mmc_driver); +} +module_init(jz4740_mmc_init); + +static void __exit jz4740_mmc_exit(void) +{ + platform_driver_unregister(&jz4740_mmc_driver); +} +module_exit(jz4740_mmc_exit); + +MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index ad847a24a67..7b0f3ef50f9 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1533,12 +1533,20 @@ static int __devexit mmc_spi_remove(struct spi_device *spi) return 0; } +#if defined(CONFIG_OF) +static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { + { .compatible = "mmc-spi-slot", }, +}; +#endif static struct spi_driver mmc_spi_driver = { .driver = { .name = "mmc_spi", .bus = &spi_bus_type, .owner = THIS_MODULE, +#if defined(CONFIG_OF) + .of_match_table = mmc_spi_of_match_table, +#endif }, .probe = mmc_spi_probe, .remove = __devexit_p(mmc_spi_remove), diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index f22bc9f05dd..6629d09f3b3 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -321,7 +321,7 @@ config MTD_CFI_FLAGADM config MTD_REDWOOD tristate "CFI Flash devices mapped on IBM Redwood" - depends on MTD_CFI && ( REDWOOD_4 || REDWOOD_5 || REDWOOD_6 ) + depends on MTD_CFI help This enables access routines for the flash chips on the IBM Redwood board. If you have one of these boards and would like to diff --git a/drivers/mtd/maps/redwood.c b/drivers/mtd/maps/redwood.c index 933c0b63b01..d2c9db00db0 100644 --- a/drivers/mtd/maps/redwood.c +++ b/drivers/mtd/maps/redwood.c @@ -22,8 +22,6 @@ #include <asm/io.h> -#if !defined (CONFIG_REDWOOD_6) - #define WINDOW_ADDR 0xffc00000 #define WINDOW_SIZE 0x00400000 @@ -69,47 +67,6 @@ static struct mtd_partition redwood_flash_partitions[] = { } }; -#else /* CONFIG_REDWOOD_6 */ -/* FIXME: the window is bigger - armin */ -#define WINDOW_ADDR 0xff800000 -#define WINDOW_SIZE 0x00800000 - -#define RW_PART0_OF 0 -#define RW_PART0_SZ 0x400000 /* 4 MiB data */ -#define RW_PART1_OF RW_PART0_OF + RW_PART0_SZ -#define RW_PART1_SZ 0x10000 /* 64K VPD */ -#define RW_PART2_OF RW_PART1_OF + RW_PART1_SZ -#define RW_PART2_SZ 0x400000 - (0x10000 + 0x20000) -#define RW_PART3_OF RW_PART2_OF + RW_PART2_SZ -#define RW_PART3_SZ 0x20000 - -static struct mtd_partition redwood_flash_partitions[] = { - { - .name = "Redwood filesystem", - .offset = RW_PART0_OF, - .size = RW_PART0_SZ - }, - { - .name = "Redwood OpenBIOS Vital Product Data", - .offset = RW_PART1_OF, - .size = RW_PART1_SZ, - .mask_flags = MTD_WRITEABLE /* force read-only */ - }, - { - .name = "Redwood kernel", - .offset = RW_PART2_OF, - .size = RW_PART2_SZ - }, - { - .name = "Redwood OpenBIOS", - .offset = RW_PART3_OF, - .size = RW_PART3_SZ, - .mask_flags = MTD_WRITEABLE /* force read-only */ - } -}; - -#endif /* CONFIG_REDWOOD_6 */ - struct map_info redwood_flash_map = { .name = "IBM Redwood", .size = WINDOW_SIZE, diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c index 0391c2527bd..8984236a8d0 100644 --- a/drivers/mtd/maps/sun_uflash.c +++ b/drivers/mtd/maps/sun_uflash.c @@ -160,12 +160,12 @@ static struct of_platform_driver uflash_driver = { static int __init uflash_init(void) { - return of_register_driver(&uflash_driver, &of_bus_type); + return of_register_platform_driver(&uflash_driver); } static void __exit uflash_exit(void) { - of_unregister_driver(&uflash_driver); + of_unregister_platform_driver(&uflash_driver); } module_init(uflash_init); diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index ffc3720929f..362d177efe1 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -526,4 +526,10 @@ config MTD_NAND_NUC900 This enables the driver for the NAND Flash on evaluation board based on w90p910 / NUC9xx. +config MTD_NAND_JZ4740 + tristate "Support for JZ4740 SoC NAND controller" + depends on MACH_JZ4740 + help + Enables support for NAND Flash on JZ4740 SoC based boards. + endif # MTD_NAND diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index e8ab884ba47..ac83dcdac5d 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -46,5 +46,6 @@ obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o obj-$(CONFIG_MTD_NAND_MPC5121_NFC) += mpc5121_nfc.o obj-$(CONFIG_MTD_NAND_RICOH) += r852.o +obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740_nand.o nand-objs := nand_base.o nand_bbt.o diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c new file mode 100644 index 00000000000..67343fc31bd --- /dev/null +++ b/drivers/mtd/nand/jz4740_nand.c @@ -0,0 +1,516 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * JZ4740 SoC NAND controller driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <linux/mtd/mtd.h> +#include <linux/mtd/nand.h> +#include <linux/mtd/partitions.h> + +#include <linux/gpio.h> + +#include <asm/mach-jz4740/jz4740_nand.h> + +#define JZ_REG_NAND_CTRL 0x50 +#define JZ_REG_NAND_ECC_CTRL 0x100 +#define JZ_REG_NAND_DATA 0x104 +#define JZ_REG_NAND_PAR0 0x108 +#define JZ_REG_NAND_PAR1 0x10C +#define JZ_REG_NAND_PAR2 0x110 +#define JZ_REG_NAND_IRQ_STAT 0x114 +#define JZ_REG_NAND_IRQ_CTRL 0x118 +#define JZ_REG_NAND_ERR(x) (0x11C + ((x) << 2)) + +#define JZ_NAND_ECC_CTRL_PAR_READY BIT(4) +#define JZ_NAND_ECC_CTRL_ENCODING BIT(3) +#define JZ_NAND_ECC_CTRL_RS BIT(2) +#define JZ_NAND_ECC_CTRL_RESET BIT(1) +#define JZ_NAND_ECC_CTRL_ENABLE BIT(0) + +#define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29)) +#define JZ_NAND_STATUS_PAD_FINISH BIT(4) +#define JZ_NAND_STATUS_DEC_FINISH BIT(3) +#define JZ_NAND_STATUS_ENC_FINISH BIT(2) +#define JZ_NAND_STATUS_UNCOR_ERROR BIT(1) +#define JZ_NAND_STATUS_ERROR BIT(0) + +#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1) +#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1) + +#define JZ_NAND_MEM_ADDR_OFFSET 0x10000 +#define JZ_NAND_MEM_CMD_OFFSET 0x08000 + +struct jz_nand { + struct mtd_info mtd; + struct nand_chip chip; + void __iomem *base; + struct resource *mem; + + void __iomem *bank_base; + struct resource *bank_mem; + + struct jz_nand_platform_data *pdata; + bool is_reading; +}; + +static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd) +{ + return container_of(mtd, struct jz_nand, mtd); +} + +static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) +{ + struct jz_nand *nand = mtd_to_jz_nand(mtd); + struct nand_chip *chip = mtd->priv; + uint32_t reg; + + if (ctrl & NAND_CTRL_CHANGE) { + BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE)); + if (ctrl & NAND_ALE) + chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET; + else if (ctrl & NAND_CLE) + chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET; + else + chip->IO_ADDR_W = nand->bank_base; + + reg = readl(nand->base + JZ_REG_NAND_CTRL); + if (ctrl & NAND_NCE) + reg |= JZ_NAND_CTRL_ASSERT_CHIP(0); + else + reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0); + writel(reg, nand->base + JZ_REG_NAND_CTRL); + } + if (dat != NAND_CMD_NONE) + writeb(dat, chip->IO_ADDR_W); +} + +static int jz_nand_dev_ready(struct mtd_info *mtd) +{ + struct jz_nand *nand = mtd_to_jz_nand(mtd); + return gpio_get_value_cansleep(nand->pdata->busy_gpio); +} + +static void jz_nand_hwctl(struct mtd_info *mtd, int mode) +{ + struct jz_nand *nand = mtd_to_jz_nand(mtd); + uint32_t reg; + + writel(0, nand->base + JZ_REG_NAND_IRQ_STAT); + reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); + + reg |= JZ_NAND_ECC_CTRL_RESET; + reg |= JZ_NAND_ECC_CTRL_ENABLE; + reg |= JZ_NAND_ECC_CTRL_RS; + + switch (mode) { + case NAND_ECC_READ: + reg &= ~JZ_NAND_ECC_CTRL_ENCODING; + nand->is_reading = true; + break; + case NAND_ECC_WRITE: + reg |= JZ_NAND_ECC_CTRL_ENCODING; + nand->is_reading = false; + break; + default: + break; + } + + writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); +} + +static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat, + uint8_t *ecc_code) +{ + struct jz_nand *nand = mtd_to_jz_nand(mtd); + uint32_t reg, status; + int i; + unsigned int timeout = 1000; + static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4, + 0x8b, 0xff, 0xb7, 0x6f}; + + if (nand->is_reading) + return 0; + + do { + status = readl(nand->base + JZ_REG_NAND_IRQ_STAT); + } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout); + + if (timeout == 0) + return -1; + + reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); + reg &= ~JZ_NAND_ECC_CTRL_ENABLE; + writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); + + for (i = 0; i < 9; ++i) + ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i); + + /* If the written data is completly 0xff, we also want to write 0xff as + * ecc, otherwise we will get in trouble when doing subpage writes. */ + if (memcmp(ecc_code, empty_block_ecc, 9) == 0) + memset(ecc_code, 0xff, 9); + + return 0; +} + +static void jz_nand_correct_data(uint8_t *dat, int index, int mask) +{ + int offset = index & 0x7; + uint16_t data; + + index += (index >> 3); + + data = dat[index]; + data |= dat[index+1] << 8; + + mask ^= (data >> offset) & 0x1ff; + data &= ~(0x1ff << offset); + data |= (mask << offset); + + dat[index] = data & 0xff; + dat[index+1] = (data >> 8) & 0xff; +} + +static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat, + uint8_t *read_ecc, uint8_t *calc_ecc) +{ + struct jz_nand *nand = mtd_to_jz_nand(mtd); + int i, error_count, index; + uint32_t reg, status, error; + uint32_t t; + unsigned int timeout = 1000; + + t = read_ecc[0]; + + if (t == 0xff) { + for (i = 1; i < 9; ++i) + t &= read_ecc[i]; + + t &= dat[0]; + t &= dat[nand->chip.ecc.size / 2]; + t &= dat[nand->chip.ecc.size - 1]; + + if (t == 0xff) { + for (i = 1; i < nand->chip.ecc.size - 1; ++i) + t &= dat[i]; + if (t == 0xff) + return 0; + } + } + + for (i = 0; i < 9; ++i) + writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i); + + reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); + reg |= JZ_NAND_ECC_CTRL_PAR_READY; + writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); + + do { + status = readl(nand->base + JZ_REG_NAND_IRQ_STAT); + } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout); + + if (timeout == 0) + return -1; + + reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); + reg &= ~JZ_NAND_ECC_CTRL_ENABLE; + writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); + + if (status & JZ_NAND_STATUS_ERROR) { + if (status & JZ_NAND_STATUS_UNCOR_ERROR) + return -1; + + error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29; + + for (i = 0; i < error_count; ++i) { + error = readl(nand->base + JZ_REG_NAND_ERR(i)); + index = ((error >> 16) & 0x1ff) - 1; + if (index >= 0 && index < 512) + jz_nand_correct_data(dat, index, error & 0x1ff); + } + + return error_count; + } + + return 0; +} + + +/* Copy paste of nand_read_page_hwecc_oob_first except for different eccpos + * handling. The ecc area is for 4k chips 72 bytes long and thus does not fit + * into the eccpos array. */ +static int jz_nand_read_page_hwecc_oob_first(struct mtd_info *mtd, + struct nand_chip *chip, uint8_t *buf, int page) +{ + int i, eccsize = chip->ecc.size; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + uint8_t *p = buf; + unsigned int ecc_offset = chip->page_shift; + + /* Read the OOB area first */ + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); + + for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + int stat; + + chip->ecc.hwctl(mtd, NAND_ECC_READ); + chip->read_buf(mtd, p, eccsize); + + stat = chip->ecc.correct(mtd, p, &chip->oob_poi[i], NULL); + if (stat < 0) + mtd->ecc_stats.failed++; + else + mtd->ecc_stats.corrected += stat; + } + return 0; +} + +/* Copy-and-paste of nand_write_page_hwecc with different eccpos handling. */ +static void jz_nand_write_page_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, const uint8_t *buf) +{ + int i, eccsize = chip->ecc.size; + int eccbytes = chip->ecc.bytes; + int eccsteps = chip->ecc.steps; + const uint8_t *p = buf; + unsigned int ecc_offset = chip->page_shift; + + for (i = ecc_offset; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + chip->write_buf(mtd, p, eccsize); + chip->ecc.calculate(mtd, p, &chip->oob_poi[i]); + } + + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); +} + +#ifdef CONFIG_MTD_CMDLINE_PARTS +static const char *part_probes[] = {"cmdline", NULL}; +#endif + +static int jz_nand_ioremap_resource(struct platform_device *pdev, + const char *name, struct resource **res, void __iomem **base) +{ + int ret; + + *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); + if (!*res) { + dev_err(&pdev->dev, "Failed to get platform %s memory\n", name); + ret = -ENXIO; + goto err; + } + + *res = request_mem_region((*res)->start, resource_size(*res), + pdev->name); + if (!*res) { + dev_err(&pdev->dev, "Failed to request %s memory region\n", name); + ret = -EBUSY; + goto err; + } + + *base = ioremap((*res)->start, resource_size(*res)); + if (!*base) { + dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name); + ret = -EBUSY; + goto err_release_mem; + } + + return 0; + +err_release_mem: + release_mem_region((*res)->start, resource_size(*res)); +err: + *res = NULL; + *base = NULL; + return ret; +} + +static int __devinit jz_nand_probe(struct platform_device *pdev) +{ + int ret; + struct jz_nand *nand; + struct nand_chip *chip; + struct mtd_info *mtd; + struct jz_nand_platform_data *pdata = pdev->dev.platform_data; +#ifdef CONFIG_MTD_PARTITIONS + struct mtd_partition *partition_info; + int num_partitions = 0; +#endif + + nand = kzalloc(sizeof(*nand), GFP_KERNEL); + if (!nand) { + dev_err(&pdev->dev, "Failed to allocate device structure.\n"); + return -ENOMEM; + } + + ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base); + if (ret) + goto err_free; + ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem, + &nand->bank_base); + if (ret) + goto err_iounmap_mmio; + + if (pdata && gpio_is_valid(pdata->busy_gpio)) { + ret = gpio_request(pdata->busy_gpio, "NAND busy pin"); + if (ret) { + dev_err(&pdev->dev, + "Failed to request busy gpio %d: %d\n", + pdata->busy_gpio, ret); + goto err_iounmap_mem; + } + } + + mtd = &nand->mtd; + chip = &nand->chip; + mtd->priv = chip; + mtd->owner = THIS_MODULE; + mtd->name = "jz4740-nand"; + + chip->ecc.hwctl = jz_nand_hwctl; + chip->ecc.calculate = jz_nand_calculate_ecc_rs; + chip->ecc.correct = jz_nand_correct_ecc_rs; + chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; + chip->ecc.size = 512; + chip->ecc.bytes = 9; + + chip->ecc.read_page = jz_nand_read_page_hwecc_oob_first; + chip->ecc.write_page = jz_nand_write_page_hwecc; + + if (pdata) + chip->ecc.layout = pdata->ecc_layout; + + chip->chip_delay = 50; + chip->cmd_ctrl = jz_nand_cmd_ctrl; + + if (pdata && gpio_is_valid(pdata->busy_gpio)) + chip->dev_ready = jz_nand_dev_ready; + + chip->IO_ADDR_R = nand->bank_base; + chip->IO_ADDR_W = nand->bank_base; + + nand->pdata = pdata; + platform_set_drvdata(pdev, nand); + + writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL); + + ret = nand_scan_ident(mtd, 1, NULL); + if (ret) { + dev_err(&pdev->dev, "Failed to scan nand\n"); + goto err_gpio_free; + } + + if (pdata && pdata->ident_callback) { + pdata->ident_callback(pdev, chip, &pdata->partitions, + &pdata->num_partitions); + } + + ret = nand_scan_tail(mtd); + if (ret) { + dev_err(&pdev->dev, "Failed to scan nand\n"); + goto err_gpio_free; + } + +#ifdef CONFIG_MTD_PARTITIONS +#ifdef CONFIG_MTD_CMDLINE_PARTS + num_partitions = parse_mtd_partitions(mtd, part_probes, + &partition_info, 0); +#endif + if (num_partitions <= 0 && pdata) { + num_partitions = pdata->num_partitions; + partition_info = pdata->partitions; + } + + if (num_partitions > 0) + ret = add_mtd_partitions(mtd, partition_info, num_partitions); + else +#endif + ret = add_mtd_device(mtd); + + if (ret) { + dev_err(&pdev->dev, "Failed to add mtd device\n"); + goto err_nand_release; + } + + dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n"); + + return 0; + +err_nand_release: + nand_release(&nand->mtd); +err_gpio_free: + platform_set_drvdata(pdev, NULL); + gpio_free(pdata->busy_gpio); +err_iounmap_mem: + iounmap(nand->bank_base); +err_iounmap_mmio: + iounmap(nand->base); +err_free: + kfree(nand); + return ret; +} + +static int __devexit jz_nand_remove(struct platform_device *pdev) +{ + struct jz_nand *nand = platform_get_drvdata(pdev); + + nand_release(&nand->mtd); + + /* Deassert and disable all chips */ + writel(0, nand->base + JZ_REG_NAND_CTRL); + + iounmap(nand->bank_base); + release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem)); + iounmap(nand->base); + release_mem_region(nand->mem->start, resource_size(nand->mem)); + + platform_set_drvdata(pdev, NULL); + kfree(nand); + + return 0; +} + +struct platform_driver jz_nand_driver = { + .probe = jz_nand_probe, + .remove = __devexit_p(jz_nand_remove), + .driver = { + .name = "jz4740-nand", + .owner = THIS_MODULE, + }, +}; + +static int __init jz_nand_init(void) +{ + return platform_driver_register(&jz_nand_driver); +} +module_init(jz_nand_init); + +static void __exit jz_nand_exit(void) +{ + platform_driver_unregister(&jz_nand_driver); +} +module_exit(jz_nand_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC"); +MODULE_ALIAS("platform:jz4740-nand"); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index ebe68395ecf..5a6895320b4 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -484,7 +484,7 @@ config XTENSA_XT2000_SONIC config MIPS_AU1X00_ENET tristate "MIPS AU1000 Ethernet support" - depends on SOC_AU1X00 + depends on MIPS_ALCHEMY select PHYLIB select CRC32 help @@ -914,7 +914,7 @@ config SMC91X tristate "SMC 91C9x/91C1xxx support" select CRC32 select MII - depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ + depends on ARM || M32R || SUPERH || \ MIPS || BLACKFIN || MN10300 || COLDFIRE help This is a driver for SMC's 91x series of Ethernet chipsets, diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 386d4feec65..15ae6df2ff0 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -104,14 +104,6 @@ MODULE_VERSION(DRV_VERSION); * complete immediately. */ -/* These addresses are only used if yamon doesn't tell us what - * the mac address is, and the mac address is not passed on the - * command line. - */ -static unsigned char au1000_mac_addr[6] __devinitdata = { - 0x00, 0x50, 0xc2, 0x0c, 0x30, 0x00 -}; - struct au1000_private *au_macs[NUM_ETH_INTERFACES]; /* @@ -1002,7 +994,6 @@ static int __devinit au1000_probe(struct platform_device *pdev) db_dest_t *pDB, *pDBfree; int irq, i, err = 0; struct resource *base, *macen; - char ethaddr[6]; base = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!base) { @@ -1079,24 +1070,13 @@ static int __devinit au1000_probe(struct platform_device *pdev) } aup->mac_id = pdev->id; - if (pdev->id == 0) { - if (prom_get_ethernet_addr(ethaddr) == 0) - memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); - else { - netdev_info(dev, "No MAC address found\n"); - /* Use the hard coded MAC addresses */ - } - + if (pdev->id == 0) au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); - } else if (pdev->id == 1) + else if (pdev->id == 1) au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); - /* - * Assign to the Ethernet ports two consecutive MAC addresses - * to match those that are printed on their stickers - */ - memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr)); - dev->dev_addr[5] += pdev->id; + /* set a random MAC now in case platform_data doesn't provide one */ + random_ether_addr(dev->dev_addr); *aup->enable = 0; aup->mac_enabled = 0; @@ -1106,6 +1086,9 @@ static int __devinit au1000_probe(struct platform_device *pdev) dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n"); aup->phy1_search_mac0 = 1; } else { + if (is_valid_ether_addr(pd->mac)) + memcpy(dev->dev_addr, pd->mac, 6); + aup->phy_static_config = pd->phy_static_config; aup->phy_search_highest_addr = pd->phy_search_highest_addr; aup->phy1_search_mac0 = pd->phy1_search_mac0; diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index b4c41d72c42..f53f850b641 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c @@ -35,6 +35,7 @@ #include <linux/mii.h> #include <linux/phy.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 0f1d4e96cf8..eeec7bc2ce7 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -2339,11 +2339,11 @@ static int __devinit emac_wait_deps(struct emac_instance *dev) deps[EMAC_DEP_MDIO_IDX].phandle = dev->mdio_ph; if (dev->blist && dev->blist > emac_boot_list) deps[EMAC_DEP_PREV_IDX].phandle = 0xffffffffu; - bus_register_notifier(&of_platform_bus_type, &emac_of_bus_notifier); + bus_register_notifier(&platform_bus_type, &emac_of_bus_notifier); wait_event_timeout(emac_probe_wait, emac_check_deps(dev, deps), EMAC_PROBE_DEP_TIMEOUT); - bus_unregister_notifier(&of_platform_bus_type, &emac_of_bus_notifier); + bus_unregister_notifier(&platform_bus_type, &emac_of_bus_notifier); err = emac_check_deps(dev, deps) ? 0 : -ENODEV; for (i = 0; i < EMAC_DEP_COUNT; i++) { if (deps[i].node) diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c index edd5666f0ff..9e3f4f54281 100644 --- a/drivers/net/irda/sh_irda.c +++ b/drivers/net/irda/sh_irda.c @@ -748,7 +748,6 @@ static int __devinit sh_irda_probe(struct platform_device *pdev) struct net_device *ndev; struct sh_irda_self *self; struct resource *res; - char clk_name[8]; int irq; int err = -ENOMEM; @@ -775,10 +774,9 @@ static int __devinit sh_irda_probe(struct platform_device *pdev) if (err) goto err_mem_2; - snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id); - self->clk = clk_get(&pdev->dev, clk_name); + self->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(self->clk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); + dev_err(&pdev->dev, "cannot get irda clock\n"); goto err_mem_3; } diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c index 1a57c3da1f4..04e552aa14e 100644 --- a/drivers/net/myri_sbus.c +++ b/drivers/net/myri_sbus.c @@ -1079,7 +1079,7 @@ static int __devinit myri_sbus_probe(struct of_device *op, const struct of_devic mp->dev = dev; dev->watchdog_timeo = 5*HZ; - dev->irq = op->irqs[0]; + dev->irq = op->archdata.irqs[0]; dev->netdev_ops = &myri_ops; /* Register interrupt handler now. */ @@ -1172,12 +1172,12 @@ static struct of_platform_driver myri_sbus_driver = { static int __init myri_sbus_init(void) { - return of_register_driver(&myri_sbus_driver, &of_bus_type); + return of_register_platform_driver(&myri_sbus_driver); } static void __exit myri_sbus_exit(void) { - of_unregister_driver(&myri_sbus_driver); + of_unregister_platform_driver(&myri_sbus_driver); } module_init(myri_sbus_init); diff --git a/drivers/net/niu.c b/drivers/net/niu.c index b9b950845b0..404f2d55288 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -28,10 +28,7 @@ #include <linux/slab.h> #include <linux/io.h> - -#ifdef CONFIG_SPARC64 #include <linux/of_device.h> -#endif #include "niu.h" @@ -9114,12 +9111,12 @@ static int __devinit niu_n2_irq_init(struct niu *np, u8 *ldg_num_map) if (!int_prop) return -ENODEV; - for (i = 0; i < op->num_irqs; i++) { + for (i = 0; i < op->archdata.num_irqs; i++) { ldg_num_map[i] = int_prop[i]; - np->ldg[i].irq = op->irqs[i]; + np->ldg[i].irq = op->archdata.irqs[i]; } - np->num_ldg = op->num_irqs; + np->num_ldg = op->archdata.num_irqs; return 0; #else @@ -10249,14 +10246,14 @@ static int __init niu_init(void) niu_debug = netif_msg_init(debug, NIU_MSG_DEFAULT); #ifdef CONFIG_SPARC64 - err = of_register_driver(&niu_of_driver, &of_bus_type); + err = of_register_platform_driver(&niu_of_driver); #endif if (!err) { err = pci_register_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 if (err) - of_unregister_driver(&niu_of_driver); + of_unregister_platform_driver(&niu_of_driver); #endif } @@ -10267,7 +10264,7 @@ static void __exit niu_exit(void) { pci_unregister_driver(&niu_pci_driver); #ifdef CONFIG_SPARC64 - of_unregister_driver(&niu_of_driver); + of_unregister_platform_driver(&niu_of_driver); #endif } diff --git a/drivers/net/niu.h b/drivers/net/niu.h index d6715465f35..a41fa8ebe05 100644 --- a/drivers/net/niu.h +++ b/drivers/net/niu.h @@ -3236,7 +3236,7 @@ struct niu_phy_ops { int (*link_status)(struct niu *np, int *); }; -struct of_device; +struct platform_device; struct niu { void __iomem *regs; struct net_device *dev; @@ -3297,7 +3297,7 @@ struct niu { struct niu_vpd vpd; u32 eeprom_len; - struct of_device *op; + struct platform_device *op; void __iomem *vir_regs_1; void __iomem *vir_regs_2; }; diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 8d2772cc42f..ee747919a76 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h @@ -83,43 +83,6 @@ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) } } -#elif defined(CONFIG_REDWOOD_5) || defined(CONFIG_REDWOOD_6) - -/* We can only do 16-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -#define SMC_IO_SHIFT 0 - -#define SMC_inw(a, r) in_be16((volatile u16 *)((a) + (r))) -#define SMC_outw(v, a, r) out_be16((volatile u16 *)((a) + (r)), v) -#define SMC_insw(a, r, p, l) \ - do { \ - unsigned long __port = (a) + (r); \ - u16 *__p = (u16 *)(p); \ - int __l = (l); \ - insw(__port, __p, __l); \ - while (__l > 0) { \ - *__p = swab16(*__p); \ - __p++; \ - __l--; \ - } \ - } while (0) -#define SMC_outsw(a, r, p, l) \ - do { \ - unsigned long __port = (a) + (r); \ - u16 *__p = (u16 *)(p); \ - int __l = (l); \ - while (__l > 0) { \ - /* Believe it or not, the swab isn't needed. */ \ - outw( /* swab16 */ (*__p++), __port); \ - __l--; \ - } \ - } while (0) -#define SMC_IRQ_FLAGS (0) - #elif defined(CONFIG_SA1100_PLEB) /* We can only do 16-bit reads and writes in the static memory space. */ #define SMC_CAN_USE_8BIT 1 diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c index 367e96f317d..09c071bd6ad 100644 --- a/drivers/net/sunbmac.c +++ b/drivers/net/sunbmac.c @@ -1201,7 +1201,7 @@ static int __devinit bigmac_ether_init(struct of_device *op, dev->watchdog_timeo = 5*HZ; /* Finish net device registration. */ - dev->irq = bp->bigmac_op->irqs[0]; + dev->irq = bp->bigmac_op->archdata.irqs[0]; dev->dma = 0; if (register_netdev(dev)) { @@ -1301,12 +1301,12 @@ static struct of_platform_driver bigmac_sbus_driver = { static int __init bigmac_init(void) { - return of_register_driver(&bigmac_sbus_driver, &of_bus_type); + return of_register_platform_driver(&bigmac_sbus_driver); } static void __exit bigmac_exit(void) { - of_unregister_driver(&bigmac_sbus_driver); + of_unregister_platform_driver(&bigmac_sbus_driver); } module_init(bigmac_init); diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c index 3d9650b8d38..eec443f6407 100644 --- a/drivers/net/sunhme.c +++ b/drivers/net/sunhme.c @@ -2561,7 +2561,7 @@ static int __init quattro_sbus_register_irqs(void) if (skip) continue; - err = request_irq(op->irqs[0], + err = request_irq(op->archdata.irqs[0], quattro_sbus_interrupt, IRQF_SHARED, "Quattro", qp); @@ -2590,7 +2590,7 @@ static void quattro_sbus_free_irqs(void) if (skip) continue; - free_irq(op->irqs[0], qp); + free_irq(op->archdata.irqs[0], qp); } } #endif /* CONFIG_SBUS */ @@ -2790,7 +2790,7 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe) /* Happy Meal can do it all... */ dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; - dev->irq = op->irqs[0]; + dev->irq = op->archdata.irqs[0]; #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) /* Hook up SBUS register/descriptor accessors. */ @@ -3304,7 +3304,7 @@ static int __init happy_meal_sbus_init(void) { int err; - err = of_register_driver(&hme_sbus_driver, &of_bus_type); + err = of_register_platform_driver(&hme_sbus_driver); if (!err) err = quattro_sbus_register_irqs(); @@ -3313,7 +3313,7 @@ static int __init happy_meal_sbus_init(void) static void happy_meal_sbus_exit(void) { - of_unregister_driver(&hme_sbus_driver); + of_unregister_platform_driver(&hme_sbus_driver); quattro_sbus_free_irqs(); while (qfe_sbus_list) { diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c index 7d9c33dd9d1..ee364fa7563 100644 --- a/drivers/net/sunlance.c +++ b/drivers/net/sunlance.c @@ -1474,7 +1474,7 @@ no_link_test: dev->ethtool_ops = &sparc_lance_ethtool_ops; dev->netdev_ops = &sparc_lance_ops; - dev->irq = op->irqs[0]; + dev->irq = op->archdata.irqs[0]; /* We cannot sleep if the chip is busy during a * multicast list update event, because such events @@ -1558,12 +1558,12 @@ static struct of_platform_driver sunlance_sbus_driver = { /* Find all the lance cards on the system and initialize them */ static int __init sparc_lance_init(void) { - return of_register_driver(&sunlance_sbus_driver, &of_bus_type); + return of_register_platform_driver(&sunlance_sbus_driver); } static void __exit sparc_lance_exit(void) { - of_unregister_driver(&sunlance_sbus_driver); + of_unregister_platform_driver(&sunlance_sbus_driver); } module_init(sparc_lance_init); diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index 72b579c8d81..5f84a5daded 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c @@ -803,7 +803,7 @@ static struct sunqec * __devinit get_qec(struct of_device *child) qec_init_once(qecp, op); - if (request_irq(op->irqs[0], qec_interrupt, + if (request_irq(op->archdata.irqs[0], qec_interrupt, IRQF_SHARED, "qec", (void *) qecp)) { printk(KERN_ERR "qec: Can't register irq.\n"); goto fail; @@ -901,7 +901,7 @@ static int __devinit qec_ether_init(struct of_device *op) SET_NETDEV_DEV(dev, &op->dev); dev->watchdog_timeo = 5*HZ; - dev->irq = op->irqs[0]; + dev->irq = op->archdata.irqs[0]; dev->dma = 0; dev->ethtool_ops = &qe_ethtool_ops; dev->netdev_ops = &qec_ops; @@ -988,18 +988,18 @@ static struct of_platform_driver qec_sbus_driver = { static int __init qec_init(void) { - return of_register_driver(&qec_sbus_driver, &of_bus_type); + return of_register_platform_driver(&qec_sbus_driver); } static void __exit qec_exit(void) { - of_unregister_driver(&qec_sbus_driver); + of_unregister_platform_driver(&qec_sbus_driver); while (root_qec_dev) { struct sunqec *next = root_qec_dev->next_module; struct of_device *op = root_qec_dev->op; - free_irq(op->irqs[0], (void *) root_qec_dev); + free_irq(op->archdata.irqs[0], (void *) root_qec_dev); of_iounmap(&op->resource[0], root_qec_dev->gregs, GLOB_REG_SIZE); kfree(root_qec_dev); diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c index d04c5b26205..b2c2f391b29 100644 --- a/drivers/net/xilinx_emaclite.c +++ b/drivers/net/xilinx_emaclite.c @@ -20,7 +20,7 @@ #include <linux/skbuff.h> #include <linux/io.h> #include <linux/slab.h> - +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/of_mdio.h> diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 7cecc8fea9b..6acbff389ab 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -1,35 +1,61 @@ -config OF_FLATTREE +config DTC + bool + +config OF bool + +menu "Flattened Device Tree and Open Firmware support" depends on OF +config PROC_DEVICETREE + bool "Support for device tree in /proc" + depends on PROC_FS && !SPARC + help + This option adds a device-tree directory under /proc which contains + an image of the device tree that the kernel copies from Open + Firmware or other boot firmware. If unsure, say Y here. + +config OF_FLATTREE + bool + select DTC + config OF_DYNAMIC def_bool y - depends on OF && PPC_OF + depends on PPC_OF + +config OF_ADDRESS + def_bool y + depends on !SPARC + +config OF_IRQ + def_bool y + depends on !SPARC config OF_DEVICE def_bool y - depends on OF && (SPARC || PPC_OF || MICROBLAZE) config OF_GPIO def_bool y - depends on OF && (PPC_OF || MICROBLAZE) && GPIOLIB + depends on GPIOLIB && !SPARC help OpenFirmware GPIO accessors config OF_I2C def_tristate I2C - depends on (PPC_OF || MICROBLAZE) && I2C + depends on I2C && !SPARC help OpenFirmware I2C accessors config OF_SPI def_tristate SPI - depends on OF && (PPC_OF || MICROBLAZE) && SPI + depends on SPI && !SPARC help OpenFirmware SPI accessors config OF_MDIO def_tristate PHYLIB - depends on OF && PHYLIB + depends on PHYLIB help OpenFirmware MDIO bus (Ethernet PHY) accessors + +endmenu # OF diff --git a/drivers/of/Makefile b/drivers/of/Makefile index f232cc98ce0..0052c405463 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -1,5 +1,7 @@ obj-y = base.o obj-$(CONFIG_OF_FLATTREE) += fdt.o +obj-$(CONFIG_OF_ADDRESS) += address.o +obj-$(CONFIG_OF_IRQ) += irq.o obj-$(CONFIG_OF_DEVICE) += device.o platform.o obj-$(CONFIG_OF_GPIO) += gpio.o obj-$(CONFIG_OF_I2C) += of_i2c.o diff --git a/drivers/of/address.c b/drivers/of/address.c new file mode 100644 index 00000000000..fcadb726d4f --- /dev/null +++ b/drivers/of/address.c @@ -0,0 +1,595 @@ + +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/pci_regs.h> +#include <linux/string.h> + +/* Max address size we deal with */ +#define OF_MAX_ADDR_CELLS 4 +#define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ + (ns) > 0) + +static struct of_bus *of_match_bus(struct device_node *np); +static int __of_address_to_resource(struct device_node *dev, const u32 *addrp, + u64 size, unsigned int flags, + struct resource *r); + +/* Debug utility */ +#ifdef DEBUG +static void of_dump_addr(const char *s, const u32 *addr, int na) +{ + printk(KERN_DEBUG "%s", s); + while (na--) + printk(" %08x", be32_to_cpu(*(addr++))); + printk("\n"); +} +#else +static void of_dump_addr(const char *s, const u32 *addr, int na) { } +#endif + +/* Callbacks for bus specific translators */ +struct of_bus { + const char *name; + const char *addresses; + int (*match)(struct device_node *parent); + void (*count_cells)(struct device_node *child, + int *addrc, int *sizec); + u64 (*map)(u32 *addr, const u32 *range, + int na, int ns, int pna); + int (*translate)(u32 *addr, u64 offset, int na); + unsigned int (*get_flags)(const u32 *addr); +}; + +/* + * Default translator (generic bus) + */ + +static void of_bus_default_count_cells(struct device_node *dev, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = of_n_addr_cells(dev); + if (sizec) + *sizec = of_n_size_cells(dev); +} + +static u64 of_bus_default_map(u32 *addr, const u32 *range, + int na, int ns, int pna) +{ + u64 cp, s, da; + + cp = of_read_number(range, na); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr, na); + + pr_debug("OF: default map, cp=%llx, s=%llx, da=%llx\n", + (unsigned long long)cp, (unsigned long long)s, + (unsigned long long)da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int of_bus_default_translate(u32 *addr, u64 offset, int na) +{ + u64 a = of_read_number(addr, na); + memset(addr, 0, na * 4); + a += offset; + if (na > 1) + addr[na - 2] = cpu_to_be32(a >> 32); + addr[na - 1] = cpu_to_be32(a & 0xffffffffu); + + return 0; +} + +static unsigned int of_bus_default_get_flags(const u32 *addr) +{ + return IORESOURCE_MEM; +} + +#ifdef CONFIG_PCI +/* + * PCI bus specific translator + */ + +static int of_bus_pci_match(struct device_node *np) +{ + /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */ + return !strcmp(np->type, "pci") || !strcmp(np->type, "vci"); +} + +static void of_bus_pci_count_cells(struct device_node *np, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 3; + if (sizec) + *sizec = 2; +} + +static unsigned int of_bus_pci_get_flags(const u32 *addr) +{ + unsigned int flags = 0; + u32 w = addr[0]; + + switch((w >> 24) & 0x03) { + case 0x01: + flags |= IORESOURCE_IO; + break; + case 0x02: /* 32 bits */ + case 0x03: /* 64 bits */ + flags |= IORESOURCE_MEM; + break; + } + if (w & 0x40000000) + flags |= IORESOURCE_PREFETCH; + return flags; +} + +static u64 of_bus_pci_map(u32 *addr, const u32 *range, int na, int ns, int pna) +{ + u64 cp, s, da; + unsigned int af, rf; + + af = of_bus_pci_get_flags(addr); + rf = of_bus_pci_get_flags(range); + + /* Check address type match */ + if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO)) + return OF_BAD_ADDR; + + /* Read address values, skipping high cell */ + cp = of_read_number(range + 1, na - 1); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr + 1, na - 1); + + pr_debug("OF: PCI map, cp=%llx, s=%llx, da=%llx\n", + (unsigned long long)cp, (unsigned long long)s, + (unsigned long long)da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int of_bus_pci_translate(u32 *addr, u64 offset, int na) +{ + return of_bus_default_translate(addr + 1, offset, na - 1); +} + +const u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, + unsigned int *flags) +{ + const u32 *prop; + unsigned int psize; + struct device_node *parent; + struct of_bus *bus; + int onesize, i, na, ns; + + /* Get parent & match bus type */ + parent = of_get_parent(dev); + if (parent == NULL) + return NULL; + bus = of_match_bus(parent); + if (strcmp(bus->name, "pci")) { + of_node_put(parent); + return NULL; + } + bus->count_cells(dev, &na, &ns); + of_node_put(parent); + if (!OF_CHECK_COUNTS(na, ns)) + return NULL; + + /* Get "reg" or "assigned-addresses" property */ + prop = of_get_property(dev, bus->addresses, &psize); + if (prop == NULL) + return NULL; + psize /= 4; + + onesize = na + ns; + for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) { + u32 val = be32_to_cpu(prop[0]); + if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { + if (size) + *size = of_read_number(prop + na, ns); + if (flags) + *flags = bus->get_flags(prop); + return prop; + } + } + return NULL; +} +EXPORT_SYMBOL(of_get_pci_address); + +int of_pci_address_to_resource(struct device_node *dev, int bar, + struct resource *r) +{ + const u32 *addrp; + u64 size; + unsigned int flags; + + addrp = of_get_pci_address(dev, bar, &size, &flags); + if (addrp == NULL) + return -EINVAL; + return __of_address_to_resource(dev, addrp, size, flags, r); +} +EXPORT_SYMBOL_GPL(of_pci_address_to_resource); +#endif /* CONFIG_PCI */ + +/* + * ISA bus specific translator + */ + +static int of_bus_isa_match(struct device_node *np) +{ + return !strcmp(np->name, "isa"); +} + +static void of_bus_isa_count_cells(struct device_node *child, + int *addrc, int *sizec) +{ + if (addrc) + *addrc = 2; + if (sizec) + *sizec = 1; +} + +static u64 of_bus_isa_map(u32 *addr, const u32 *range, int na, int ns, int pna) +{ + u64 cp, s, da; + + /* Check address type match */ + if ((addr[0] ^ range[0]) & 0x00000001) + return OF_BAD_ADDR; + + /* Read address values, skipping high cell */ + cp = of_read_number(range + 1, na - 1); + s = of_read_number(range + na + pna, ns); + da = of_read_number(addr + 1, na - 1); + + pr_debug("OF: ISA map, cp=%llx, s=%llx, da=%llx\n", + (unsigned long long)cp, (unsigned long long)s, + (unsigned long long)da); + + if (da < cp || da >= (cp + s)) + return OF_BAD_ADDR; + return da - cp; +} + +static int of_bus_isa_translate(u32 *addr, u64 offset, int na) +{ + return of_bus_default_translate(addr + 1, offset, na - 1); +} + +static unsigned int of_bus_isa_get_flags(const u32 *addr) +{ + unsigned int flags = 0; + u32 w = addr[0]; + + if (w & 1) + flags |= IORESOURCE_IO; + else + flags |= IORESOURCE_MEM; + return flags; +} + +/* + * Array of bus specific translators + */ + +static struct of_bus of_busses[] = { +#ifdef CONFIG_PCI + /* PCI */ + { + .name = "pci", + .addresses = "assigned-addresses", + .match = of_bus_pci_match, + .count_cells = of_bus_pci_count_cells, + .map = of_bus_pci_map, + .translate = of_bus_pci_translate, + .get_flags = of_bus_pci_get_flags, + }, +#endif /* CONFIG_PCI */ + /* ISA */ + { + .name = "isa", + .addresses = "reg", + .match = of_bus_isa_match, + .count_cells = of_bus_isa_count_cells, + .map = of_bus_isa_map, + .translate = of_bus_isa_translate, + .get_flags = of_bus_isa_get_flags, + }, + /* Default */ + { + .name = "default", + .addresses = "reg", + .match = NULL, + .count_cells = of_bus_default_count_cells, + .map = of_bus_default_map, + .translate = of_bus_default_translate, + .get_flags = of_bus_default_get_flags, + }, +}; + +static struct of_bus *of_match_bus(struct device_node *np) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(of_busses); i++) + if (!of_busses[i].match || of_busses[i].match(np)) + return &of_busses[i]; + BUG(); + return NULL; +} + +static int of_translate_one(struct device_node *parent, struct of_bus *bus, + struct of_bus *pbus, u32 *addr, + int na, int ns, int pna, const char *rprop) +{ + const u32 *ranges; + unsigned int rlen; + int rone; + u64 offset = OF_BAD_ADDR; + + /* Normally, an absence of a "ranges" property means we are + * crossing a non-translatable boundary, and thus the addresses + * below the current not cannot be converted to CPU physical ones. + * Unfortunately, while this is very clear in the spec, it's not + * what Apple understood, and they do have things like /uni-n or + * /ht nodes with no "ranges" property and a lot of perfectly + * useable mapped devices below them. Thus we treat the absence of + * "ranges" as equivalent to an empty "ranges" property which means + * a 1:1 translation at that level. It's up to the caller not to try + * to translate addresses that aren't supposed to be translated in + * the first place. --BenH. + * + * As far as we know, this damage only exists on Apple machines, so + * This code is only enabled on powerpc. --gcl + */ + ranges = of_get_property(parent, rprop, &rlen); +#if !defined(CONFIG_PPC) + if (ranges == NULL) { + pr_err("OF: no ranges; cannot translate\n"); + return 1; + } +#endif /* !defined(CONFIG_PPC) */ + if (ranges == NULL || rlen == 0) { + offset = of_read_number(addr, na); + memset(addr, 0, pna * 4); + pr_debug("OF: empty ranges; 1:1 translation\n"); + goto finish; + } + + pr_debug("OF: walking ranges...\n"); + + /* Now walk through the ranges */ + rlen /= 4; + rone = na + pna + ns; + for (; rlen >= rone; rlen -= rone, ranges += rone) { + offset = bus->map(addr, ranges, na, ns, pna); + if (offset != OF_BAD_ADDR) + break; + } + if (offset == OF_BAD_ADDR) { + pr_debug("OF: not found !\n"); + return 1; + } + memcpy(addr, ranges + na, 4 * pna); + + finish: + of_dump_addr("OF: parent translation for:", addr, pna); + pr_debug("OF: with offset: %llx\n", (unsigned long long)offset); + + /* Translate it into parent bus space */ + return pbus->translate(addr, offset, pna); +} + +/* + * Translate an address from the device-tree into a CPU physical address, + * this walks up the tree and applies the various bus mappings on the + * way. + * + * Note: We consider that crossing any level with #size-cells == 0 to mean + * that translation is impossible (that is we are not dealing with a value + * that can be mapped to a cpu physical address). This is not really specified + * that way, but this is traditionally the way IBM at least do things + */ +u64 __of_translate_address(struct device_node *dev, const u32 *in_addr, + const char *rprop) +{ + struct device_node *parent = NULL; + struct of_bus *bus, *pbus; + u32 addr[OF_MAX_ADDR_CELLS]; + int na, ns, pna, pns; + u64 result = OF_BAD_ADDR; + + pr_debug("OF: ** translation for device %s **\n", dev->full_name); + + /* Increase refcount at current level */ + of_node_get(dev); + + /* Get parent & match bus type */ + parent = of_get_parent(dev); + if (parent == NULL) + goto bail; + bus = of_match_bus(parent); + + /* Cound address cells & copy address locally */ + bus->count_cells(dev, &na, &ns); + if (!OF_CHECK_COUNTS(na, ns)) { + printk(KERN_ERR "prom_parse: Bad cell count for %s\n", + dev->full_name); + goto bail; + } + memcpy(addr, in_addr, na * 4); + + pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n", + bus->name, na, ns, parent->full_name); + of_dump_addr("OF: translating address:", addr, na); + + /* Translate */ + for (;;) { + /* Switch to parent bus */ + of_node_put(dev); + dev = parent; + parent = of_get_parent(dev); + + /* If root, we have finished */ + if (parent == NULL) { + pr_debug("OF: reached root node\n"); + result = of_read_number(addr, na); + break; + } + + /* Get new parent bus and counts */ + pbus = of_match_bus(parent); + pbus->count_cells(dev, &pna, &pns); + if (!OF_CHECK_COUNTS(pna, pns)) { + printk(KERN_ERR "prom_parse: Bad cell count for %s\n", + dev->full_name); + break; + } + + pr_debug("OF: parent bus is %s (na=%d, ns=%d) on %s\n", + pbus->name, pna, pns, parent->full_name); + + /* Apply bus translation */ + if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop)) + break; + + /* Complete the move up one level */ + na = pna; + ns = pns; + bus = pbus; + + of_dump_addr("OF: one level translation:", addr, na); + } + bail: + of_node_put(parent); + of_node_put(dev); + + return result; +} + +u64 of_translate_address(struct device_node *dev, const u32 *in_addr) +{ + return __of_translate_address(dev, in_addr, "ranges"); +} +EXPORT_SYMBOL(of_translate_address); + +u64 of_translate_dma_address(struct device_node *dev, const u32 *in_addr) +{ + return __of_translate_address(dev, in_addr, "dma-ranges"); +} +EXPORT_SYMBOL(of_translate_dma_address); + +const u32 *of_get_address(struct device_node *dev, int index, u64 *size, + unsigned int *flags) +{ + const u32 *prop; + unsigned int psize; + struct device_node *parent; + struct of_bus *bus; + int onesize, i, na, ns; + + /* Get parent & match bus type */ + parent = of_get_parent(dev); + if (parent == NULL) + return NULL; + bus = of_match_bus(parent); + bus->count_cells(dev, &na, &ns); + of_node_put(parent); + if (!OF_CHECK_COUNTS(na, ns)) + return NULL; + + /* Get "reg" or "assigned-addresses" property */ + prop = of_get_property(dev, bus->addresses, &psize); + if (prop == NULL) + return NULL; + psize /= 4; + + onesize = na + ns; + for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) + if (i == index) { + if (size) + *size = of_read_number(prop + na, ns); + if (flags) + *flags = bus->get_flags(prop); + return prop; + } + return NULL; +} +EXPORT_SYMBOL(of_get_address); + +static int __of_address_to_resource(struct device_node *dev, const u32 *addrp, + u64 size, unsigned int flags, + struct resource *r) +{ + u64 taddr; + + if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) + return -EINVAL; + taddr = of_translate_address(dev, addrp); + if (taddr == OF_BAD_ADDR) + return -EINVAL; + memset(r, 0, sizeof(struct resource)); + if (flags & IORESOURCE_IO) { + unsigned long port; + port = pci_address_to_pio(taddr); + if (port == (unsigned long)-1) + return -EINVAL; + r->start = port; + r->end = port + size - 1; + } else { + r->start = taddr; + r->end = taddr + size - 1; + } + r->flags = flags; + r->name = dev->full_name; + return 0; +} + +/** + * of_address_to_resource - Translate device tree address and return as resource + * + * Note that if your address is a PIO address, the conversion will fail if + * the physical address can't be internally converted to an IO token with + * pci_address_to_pio(), that is because it's either called to early or it + * can't be matched to any host bridge IO space + */ +int of_address_to_resource(struct device_node *dev, int index, + struct resource *r) +{ + const u32 *addrp; + u64 size; + unsigned int flags; + + addrp = of_get_address(dev, index, &size, &flags); + if (addrp == NULL) + return -EINVAL; + return __of_address_to_resource(dev, addrp, size, flags, r); +} +EXPORT_SYMBOL_GPL(of_address_to_resource); + + +/** + * of_iomap - Maps the memory mapped IO for a given device_node + * @device: the device whose io range will be mapped + * @index: index of the io range + * + * Returns a pointer to the mapped memory + */ +void __iomem *of_iomap(struct device_node *np, int index) +{ + struct resource res; + + if (of_address_to_resource(np, index, &res)) + return NULL; + + return ioremap(res.start, 1 + res.end - res.start); +} +EXPORT_SYMBOL(of_iomap); diff --git a/drivers/of/base.c b/drivers/of/base.c index b5ad9740d8b..aa805250de7 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -545,74 +545,28 @@ struct device_node *of_find_matching_node(struct device_node *from, EXPORT_SYMBOL(of_find_matching_node); /** - * of_modalias_table: Table of explicit compatible ==> modalias mappings - * - * This table allows particulare compatible property values to be mapped - * to modalias strings. This is useful for busses which do not directly - * understand the OF device tree but are populated based on data contained - * within the device tree. SPI and I2C are the two current users of this - * table. - * - * In most cases, devices do not need to be listed in this table because - * the modalias value can be derived directly from the compatible table. - * However, if for any reason a value cannot be derived, then this table - * provides a method to override the implicit derivation. - * - * At the moment, a single table is used for all bus types because it is - * assumed that the data size is small and that the compatible values - * should already be distinct enough to differentiate between SPI, I2C - * and other devices. - */ -struct of_modalias_table { - char *of_device; - char *modalias; -}; -static struct of_modalias_table of_modalias_table[] = { - { "fsl,mcu-mpc8349emitx", "mcu-mpc8349emitx" }, - { "mmc-spi-slot", "mmc_spi" }, -}; - -/** * of_modalias_node - Lookup appropriate modalias for a device node * @node: pointer to a device tree node * @modalias: Pointer to buffer that modalias value will be copied into * @len: Length of modalias value * - * Based on the value of the compatible property, this routine will determine - * an appropriate modalias value for a particular device tree node. Two - * separate methods are attempted to derive a modalias value. + * Based on the value of the compatible property, this routine will attempt + * to choose an appropriate modalias value for a particular device tree node. + * It does this by stripping the manufacturer prefix (as delimited by a ',') + * from the first entry in the compatible list property. * - * First method is to lookup the compatible value in of_modalias_table. - * Second is to strip off the manufacturer prefix from the first - * compatible entry and use the remainder as modalias - * - * This routine returns 0 on success + * This routine returns 0 on success, <0 on failure. */ int of_modalias_node(struct device_node *node, char *modalias, int len) { - int i, cplen; - const char *compatible; - const char *p; - - /* 1. search for exception list entry */ - for (i = 0; i < ARRAY_SIZE(of_modalias_table); i++) { - compatible = of_modalias_table[i].of_device; - if (!of_device_is_compatible(node, compatible)) - continue; - strlcpy(modalias, of_modalias_table[i].modalias, len); - return 0; - } + const char *compatible, *p; + int cplen; compatible = of_get_property(node, "compatible", &cplen); - if (!compatible) + if (!compatible || strlen(compatible) > cplen) return -ENODEV; - - /* 2. take first compatible entry and strip manufacturer */ p = strchr(compatible, ','); - if (!p) - return -ENODEV; - p++; - strlcpy(modalias, p, len); + strlcpy(modalias, p ? p + 1 : compatible, len); return 0; } EXPORT_SYMBOL_GPL(of_modalias_node); @@ -651,14 +605,14 @@ EXPORT_SYMBOL(of_find_node_by_phandle); struct device_node * of_parse_phandle(struct device_node *np, const char *phandle_name, int index) { - const phandle *phandle; + const __be32 *phandle; int size; phandle = of_get_property(np, phandle_name, &size); if ((!phandle) || (size < sizeof(*phandle) * (index + 1))) return NULL; - return of_find_node_by_phandle(phandle[index]); + return of_find_node_by_phandle(be32_to_cpup(phandle + index)); } EXPORT_SYMBOL(of_parse_phandle); @@ -714,16 +668,16 @@ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, while (list < list_end) { const __be32 *cells; - const phandle *phandle; + phandle phandle; - phandle = list++; + phandle = be32_to_cpup(list++); args = list; /* one cell hole in the list = <>; */ - if (!*phandle) + if (!phandle) goto next; - node = of_find_node_by_phandle(*phandle); + node = of_find_node_by_phandle(phandle); if (!node) { pr_debug("%s: could not find phandle\n", np->full_name); diff --git a/drivers/of/device.c b/drivers/of/device.c index 7d18f8e0b01..0d8a0644f54 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -20,13 +20,13 @@ const struct of_device_id *of_match_device(const struct of_device_id *matches, const struct device *dev) { - if (!dev->of_node) + if ((!matches) || (!dev->of_node)) return NULL; return of_match_node(matches, dev->of_node); } EXPORT_SYMBOL(of_match_device); -struct of_device *of_dev_get(struct of_device *dev) +struct platform_device *of_dev_get(struct platform_device *dev) { struct device *tmp; @@ -34,13 +34,13 @@ struct of_device *of_dev_get(struct of_device *dev) return NULL; tmp = get_device(&dev->dev); if (tmp) - return to_of_device(tmp); + return to_platform_device(tmp); else return NULL; } EXPORT_SYMBOL(of_dev_get); -void of_dev_put(struct of_device *dev) +void of_dev_put(struct platform_device *dev) { if (dev) put_device(&dev->dev); @@ -50,28 +50,25 @@ EXPORT_SYMBOL(of_dev_put); static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct of_device *ofdev; + struct platform_device *ofdev; - ofdev = to_of_device(dev); + ofdev = to_platform_device(dev); return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct of_device *ofdev; + struct platform_device *ofdev; - ofdev = to_of_device(dev); + ofdev = to_platform_device(dev); return sprintf(buf, "%s\n", ofdev->dev.of_node->name); } static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct of_device *ofdev = to_of_device(dev); - ssize_t len = 0; - - len = of_device_get_modalias(ofdev, buf, PAGE_SIZE - 2); + ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); buf[len] = '\n'; buf[len+1] = 0; return len+1; @@ -93,20 +90,25 @@ struct device_attribute of_platform_device_attrs[] = { */ void of_release_dev(struct device *dev) { - struct of_device *ofdev; + struct platform_device *ofdev; - ofdev = to_of_device(dev); + ofdev = to_platform_device(dev); of_node_put(ofdev->dev.of_node); kfree(ofdev); } EXPORT_SYMBOL(of_release_dev); -int of_device_register(struct of_device *ofdev) +int of_device_register(struct platform_device *ofdev) { BUG_ON(ofdev->dev.of_node == NULL); device_initialize(&ofdev->dev); + /* name and id have to be set so that the platform bus doesn't get + * confused on matching */ + ofdev->name = dev_name(&ofdev->dev); + ofdev->id = -1; + /* device_add will assume that this device is on the same node as * the parent. If there is no parent defined, set the node * explicitly */ @@ -117,25 +119,24 @@ int of_device_register(struct of_device *ofdev) } EXPORT_SYMBOL(of_device_register); -void of_device_unregister(struct of_device *ofdev) +void of_device_unregister(struct platform_device *ofdev) { device_unregister(&ofdev->dev); } EXPORT_SYMBOL(of_device_unregister); -ssize_t of_device_get_modalias(struct of_device *ofdev, - char *str, ssize_t len) +ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) { const char *compat; int cplen, i; ssize_t tsize, csize, repend; /* Name & Type */ - csize = snprintf(str, len, "of:N%sT%s", ofdev->dev.of_node->name, - ofdev->dev.of_node->type); + csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name, + dev->of_node->type); /* Get compatible property if any */ - compat = of_get_property(ofdev->dev.of_node, "compatible", &cplen); + compat = of_get_property(dev->of_node, "compatible", &cplen); if (!compat) return csize; @@ -170,3 +171,51 @@ ssize_t of_device_get_modalias(struct of_device *ofdev, return tsize; } + +/** + * of_device_uevent - Display OF related uevent information + */ +int of_device_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + const char *compat; + int seen = 0, cplen, sl; + + if ((!dev) || (!dev->of_node)) + return -ENODEV; + + if (add_uevent_var(env, "OF_NAME=%s", dev->of_node->name)) + return -ENOMEM; + + if (add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type)) + return -ENOMEM; + + /* Since the compatible field can contain pretty much anything + * it's not really legal to split it out with commas. We split it + * up using a number of environment variables instead. */ + + compat = of_get_property(dev->of_node, "compatible", &cplen); + while (compat && *compat && cplen > 0) { + if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat)) + return -ENOMEM; + + sl = strlen(compat) + 1; + compat += sl; + cplen -= sl; + seen++; + } + + if (add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen)) + return -ENOMEM; + + /* modalias is trickier, we add it in 2 steps */ + if (add_uevent_var(env, "MODALIAS=")) + return -ENOMEM; + + sl = of_device_get_modalias(dev, &env->buf[env->buflen-1], + sizeof(env->buf) - env->buflen); + if (sl >= (sizeof(env->buf) - env->buflen)) + return -ENOMEM; + env->buflen += sl; + + return 0; +} diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index b6987bba855..65da5aec755 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -69,9 +69,9 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, u32 sz = be32_to_cpup((__be32 *)p); p += 8; if (be32_to_cpu(initial_boot_params->version) < 0x10) - p = _ALIGN(p, sz >= 8 ? 8 : 4); + p = ALIGN(p, sz >= 8 ? 8 : 4); p += sz; - p = _ALIGN(p, 4); + p = ALIGN(p, 4); continue; } if (tag != OF_DT_BEGIN_NODE) { @@ -80,7 +80,7 @@ int __init of_scan_flat_dt(int (*it)(unsigned long node, } depth++; pathp = (char *)p; - p = _ALIGN(p + strlen(pathp) + 1, 4); + p = ALIGN(p + strlen(pathp) + 1, 4); if ((*pathp) == '/') { char *lp, *np; for (lp = NULL, np = pathp; *np; np++) @@ -109,7 +109,7 @@ unsigned long __init of_get_flat_dt_root(void) p += 4; BUG_ON(be32_to_cpup((__be32 *)p) != OF_DT_BEGIN_NODE); p += 4; - return _ALIGN(p + strlen((char *)p) + 1, 4); + return ALIGN(p + strlen((char *)p) + 1, 4); } /** @@ -138,7 +138,7 @@ void *__init of_get_flat_dt_prop(unsigned long node, const char *name, noff = be32_to_cpup((__be32 *)(p + 4)); p += 8; if (be32_to_cpu(initial_boot_params->version) < 0x10) - p = _ALIGN(p, sz >= 8 ? 8 : 4); + p = ALIGN(p, sz >= 8 ? 8 : 4); nstr = find_flat_dt_string(noff); if (nstr == NULL) { @@ -151,7 +151,7 @@ void *__init of_get_flat_dt_prop(unsigned long node, const char *name, return (void *)p; } p += sz; - p = _ALIGN(p, 4); + p = ALIGN(p, 4); } while (1); } @@ -169,7 +169,7 @@ int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) if (cp == NULL) return 0; while (cplen > 0) { - if (strncasecmp(cp, compat, strlen(compat)) == 0) + if (of_compat_cmp(cp, compat, strlen(compat)) == 0) return 1; l = strlen(cp) + 1; cp += l; @@ -184,7 +184,7 @@ static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, { void *res; - *mem = _ALIGN(*mem, align); + *mem = ALIGN(*mem, align); res = (void *)*mem; *mem += size; @@ -220,7 +220,7 @@ unsigned long __init unflatten_dt_node(unsigned long mem, *p += 4; pathp = (char *)*p; l = allocl = strlen(pathp) + 1; - *p = _ALIGN(*p + l, 4); + *p = ALIGN(*p + l, 4); /* version 0x10 has a more compact unit name here instead of the full * path. we accumulate the full path size using "fpsize", we'll rebuild @@ -299,7 +299,7 @@ unsigned long __init unflatten_dt_node(unsigned long mem, noff = be32_to_cpup((__be32 *)((*p) + 4)); *p += 8; if (be32_to_cpu(initial_boot_params->version) < 0x10) - *p = _ALIGN(*p, sz >= 8 ? 8 : 4); + *p = ALIGN(*p, sz >= 8 ? 8 : 4); pname = find_flat_dt_string(noff); if (pname == NULL) { @@ -320,20 +320,20 @@ unsigned long __init unflatten_dt_node(unsigned long mem, if ((strcmp(pname, "phandle") == 0) || (strcmp(pname, "linux,phandle") == 0)) { if (np->phandle == 0) - np->phandle = *((u32 *)*p); + np->phandle = be32_to_cpup((__be32*)*p); } /* And we process the "ibm,phandle" property * used in pSeries dynamic device tree * stuff */ if (strcmp(pname, "ibm,phandle") == 0) - np->phandle = *((u32 *)*p); + np->phandle = be32_to_cpup((__be32 *)*p); pp->name = pname; pp->length = sz; pp->value = (void *)*p; *prev_pp = pp; prev_pp = &pp->next; } - *p = _ALIGN((*p) + sz, 4); + *p = ALIGN((*p) + sz, 4); } /* with version 0x10 we may not have the name property, recreate * it here from the unit name if absent diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c index a1b31a4abae..905960338fb 100644 --- a/drivers/of/gpio.c +++ b/drivers/of/gpio.c @@ -11,13 +11,14 @@ * (at your option) any later version. */ -#include <linux/kernel.h> +#include <linux/device.h> #include <linux/errno.h> +#include <linux/module.h> #include <linux/io.h> #include <linux/of.h> -#include <linux/slab.h> +#include <linux/of_address.h> #include <linux/of_gpio.h> -#include <asm/prom.h> +#include <linux/slab.h> /** * of_get_gpio_flags - Get a GPIO number and flags to use with GPIO API @@ -33,32 +34,32 @@ int of_get_gpio_flags(struct device_node *np, int index, enum of_gpio_flags *flags) { int ret; - struct device_node *gc; - struct of_gpio_chip *of_gc = NULL; + struct device_node *gpio_np; + struct gpio_chip *gc; int size; const void *gpio_spec; const __be32 *gpio_cells; ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index, - &gc, &gpio_spec); + &gpio_np, &gpio_spec); if (ret) { pr_debug("%s: can't parse gpios property\n", __func__); goto err0; } - of_gc = gc->data; - if (!of_gc) { + gc = of_node_to_gpiochip(gpio_np); + if (!gc) { pr_debug("%s: gpio controller %s isn't registered\n", - np->full_name, gc->full_name); + np->full_name, gpio_np->full_name); ret = -ENODEV; goto err1; } - gpio_cells = of_get_property(gc, "#gpio-cells", &size); + gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size); if (!gpio_cells || size != sizeof(*gpio_cells) || - be32_to_cpup(gpio_cells) != of_gc->gpio_cells) { + be32_to_cpup(gpio_cells) != gc->of_gpio_n_cells) { pr_debug("%s: wrong #gpio-cells for %s\n", - np->full_name, gc->full_name); + np->full_name, gpio_np->full_name); ret = -EINVAL; goto err1; } @@ -67,13 +68,13 @@ int of_get_gpio_flags(struct device_node *np, int index, if (flags) *flags = 0; - ret = of_gc->xlate(of_gc, np, gpio_spec, flags); + ret = gc->of_xlate(gc, np, gpio_spec, flags); if (ret < 0) goto err1; - ret += of_gc->gc.base; + ret += gc->base; err1: - of_node_put(gc); + of_node_put(gpio_np); err0: pr_debug("%s exited with status %d\n", __func__, ret); return ret; @@ -116,7 +117,7 @@ EXPORT_SYMBOL(of_gpio_count); /** * of_gpio_simple_xlate - translate gpio_spec to the GPIO number and flags - * @of_gc: pointer to the of_gpio_chip structure + * @gc: pointer to the gpio_chip structure * @np: device node of the GPIO chip * @gpio_spec: gpio specifier as found in the device tree * @flags: a flags pointer to fill in @@ -125,8 +126,8 @@ EXPORT_SYMBOL(of_gpio_count); * gpio chips. This function performs only one sanity check: whether gpio * is less than ngpios (that is specified in the gpio_chip). */ -int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, - const void *gpio_spec, enum of_gpio_flags *flags) +static int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np, + const void *gpio_spec, u32 *flags) { const __be32 *gpio = gpio_spec; const u32 n = be32_to_cpup(gpio); @@ -137,12 +138,12 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, * number and the flags from a single gpio cell -- this is possible, * but not recommended). */ - if (of_gc->gpio_cells < 2) { + if (gc->of_gpio_n_cells < 2) { WARN_ON(1); return -EINVAL; } - if (n > of_gc->gc.ngpio) + if (n > gc->ngpio) return -EINVAL; if (flags) @@ -150,7 +151,6 @@ int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, struct device_node *np, return n; } -EXPORT_SYMBOL(of_gpio_simple_xlate); /** * of_mm_gpiochip_add - Add memory mapped GPIO chip (bank) @@ -161,10 +161,8 @@ EXPORT_SYMBOL(of_gpio_simple_xlate); * * 1) In the gpio_chip structure: * - all the callbacks - * - * 2) In the of_gpio_chip structure: - * - gpio_cells - * - xlate callback (optional) + * - of_gpio_n_cells + * - of_xlate callback (optional) * * 3) In the of_mm_gpio_chip structure: * - save_regs callback (optional) @@ -177,8 +175,7 @@ int of_mm_gpiochip_add(struct device_node *np, struct of_mm_gpio_chip *mm_gc) { int ret = -ENOMEM; - struct of_gpio_chip *of_gc = &mm_gc->of_gc; - struct gpio_chip *gc = &of_gc->gc; + struct gpio_chip *gc = &mm_gc->gc; gc->label = kstrdup(np->full_name, GFP_KERNEL); if (!gc->label) @@ -190,26 +187,19 @@ int of_mm_gpiochip_add(struct device_node *np, gc->base = -1; - if (!of_gc->xlate) - of_gc->xlate = of_gpio_simple_xlate; - if (mm_gc->save_regs) mm_gc->save_regs(mm_gc); - np->data = of_gc; + mm_gc->gc.of_node = np; ret = gpiochip_add(gc); if (ret) goto err2; - /* We don't want to lose the node and its ->data */ - of_node_get(np); - pr_debug("%s: registered as generic GPIO chip, base is %d\n", np->full_name, gc->base); return 0; err2: - np->data = NULL; iounmap(mm_gc->regs); err1: kfree(gc->label); @@ -219,3 +209,36 @@ err0: return ret; } EXPORT_SYMBOL(of_mm_gpiochip_add); + +void of_gpiochip_add(struct gpio_chip *chip) +{ + if ((!chip->of_node) && (chip->dev)) + chip->of_node = chip->dev->of_node; + + if (!chip->of_node) + return; + + if (!chip->of_xlate) { + chip->of_gpio_n_cells = 2; + chip->of_xlate = of_gpio_simple_xlate; + } + + of_node_get(chip->of_node); +} + +void of_gpiochip_remove(struct gpio_chip *chip) +{ + if (chip->of_node) + of_node_put(chip->of_node); +} + +/* Private function for resolving node pointer to gpio_chip */ +static int of_gpiochip_is_match(struct gpio_chip *chip, void *data) +{ + return chip->of_node == data; +} + +struct gpio_chip *of_node_to_gpiochip(struct device_node *np) +{ + return gpiochip_find(np, of_gpiochip_is_match); +} diff --git a/drivers/of/irq.c b/drivers/of/irq.c new file mode 100644 index 00000000000..6e595e5a397 --- /dev/null +++ b/drivers/of/irq.c @@ -0,0 +1,349 @@ +/* + * Derived from arch/i386/kernel/irq.c + * Copyright (C) 1992 Linus Torvalds + * Adapted from arch/i386 by Gary Thomas + * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) + * Updated and modified by Cort Dougan <cort@fsmlabs.com> + * Copyright (C) 1996-2001 Cort Dougan + * Adapted for Power Macintosh by Paul Mackerras + * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * This file contains the code used to make IRQ descriptions in the + * device tree to actual irq numbers on an interrupt controller + * driver. + */ + +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/string.h> + +/** + * irq_of_parse_and_map - Parse and map an interrupt into linux virq space + * @device: Device node of the device whose interrupt is to be mapped + * @index: Index of the interrupt to map + * + * This function is a wrapper that chains of_irq_map_one() and + * irq_create_of_mapping() to make things easier to callers + */ +unsigned int irq_of_parse_and_map(struct device_node *dev, int index) +{ + struct of_irq oirq; + + if (of_irq_map_one(dev, index, &oirq)) + return NO_IRQ; + + return irq_create_of_mapping(oirq.controller, oirq.specifier, + oirq.size); +} +EXPORT_SYMBOL_GPL(irq_of_parse_and_map); + +/** + * of_irq_find_parent - Given a device node, find its interrupt parent node + * @child: pointer to device node + * + * Returns a pointer to the interrupt parent node, or NULL if the interrupt + * parent could not be determined. + */ +static struct device_node *of_irq_find_parent(struct device_node *child) +{ + struct device_node *p; + const __be32 *parp; + + if (!of_node_get(child)) + return NULL; + + do { + parp = of_get_property(child, "interrupt-parent", NULL); + if (parp == NULL) + p = of_get_parent(child); + else { + if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) + p = of_node_get(of_irq_dflt_pic); + else + p = of_find_node_by_phandle(be32_to_cpup(parp)); + } + of_node_put(child); + child = p; + } while (p && of_get_property(p, "#interrupt-cells", NULL) == NULL); + + return p; +} + +/** + * of_irq_map_raw - Low level interrupt tree parsing + * @parent: the device interrupt parent + * @intspec: interrupt specifier ("interrupts" property of the device) + * @ointsize: size of the passed in interrupt specifier + * @addr: address specifier (start of "reg" property of the device) + * @out_irq: structure of_irq filled by this function + * + * Returns 0 on success and a negative number on error + * + * This function is a low-level interrupt tree walking function. It + * can be used to do a partial walk with synthetized reg and interrupts + * properties, for example when resolving PCI interrupts when no device + * node exist for the parent. + */ +int of_irq_map_raw(struct device_node *parent, const __be32 *intspec, + u32 ointsize, const __be32 *addr, struct of_irq *out_irq) +{ + struct device_node *ipar, *tnode, *old = NULL, *newpar = NULL; + const __be32 *tmp, *imap, *imask; + u32 intsize = 1, addrsize, newintsize = 0, newaddrsize = 0; + int imaplen, match, i; + + pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n", + parent->full_name, be32_to_cpup(intspec), + be32_to_cpup(intspec + 1), ointsize); + + ipar = of_node_get(parent); + + /* First get the #interrupt-cells property of the current cursor + * that tells us how to interpret the passed-in intspec. If there + * is none, we are nice and just walk up the tree + */ + do { + tmp = of_get_property(ipar, "#interrupt-cells", NULL); + if (tmp != NULL) { + intsize = be32_to_cpu(*tmp); + break; + } + tnode = ipar; + ipar = of_irq_find_parent(ipar); + of_node_put(tnode); + } while (ipar); + if (ipar == NULL) { + pr_debug(" -> no parent found !\n"); + goto fail; + } + + pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize); + + if (ointsize != intsize) + return -EINVAL; + + /* Look for this #address-cells. We have to implement the old linux + * trick of looking for the parent here as some device-trees rely on it + */ + old = of_node_get(ipar); + do { + tmp = of_get_property(old, "#address-cells", NULL); + tnode = of_get_parent(old); + of_node_put(old); + old = tnode; + } while (old && tmp == NULL); + of_node_put(old); + old = NULL; + addrsize = (tmp == NULL) ? 2 : be32_to_cpu(*tmp); + + pr_debug(" -> addrsize=%d\n", addrsize); + + /* Now start the actual "proper" walk of the interrupt tree */ + while (ipar != NULL) { + /* Now check if cursor is an interrupt-controller and if it is + * then we are done + */ + if (of_get_property(ipar, "interrupt-controller", NULL) != + NULL) { + pr_debug(" -> got it !\n"); + for (i = 0; i < intsize; i++) + out_irq->specifier[i] = + of_read_number(intspec +i, 1); + out_irq->size = intsize; + out_irq->controller = ipar; + of_node_put(old); + return 0; + } + + /* Now look for an interrupt-map */ + imap = of_get_property(ipar, "interrupt-map", &imaplen); + /* No interrupt map, check for an interrupt parent */ + if (imap == NULL) { + pr_debug(" -> no map, getting parent\n"); + newpar = of_irq_find_parent(ipar); + goto skiplevel; + } + imaplen /= sizeof(u32); + + /* Look for a mask */ + imask = of_get_property(ipar, "interrupt-map-mask", NULL); + + /* If we were passed no "reg" property and we attempt to parse + * an interrupt-map, then #address-cells must be 0. + * Fail if it's not. + */ + if (addr == NULL && addrsize != 0) { + pr_debug(" -> no reg passed in when needed !\n"); + goto fail; + } + + /* Parse interrupt-map */ + match = 0; + while (imaplen > (addrsize + intsize + 1) && !match) { + /* Compare specifiers */ + match = 1; + for (i = 0; i < addrsize && match; ++i) { + u32 mask = imask ? imask[i] : 0xffffffffu; + match = ((addr[i] ^ imap[i]) & mask) == 0; + } + for (; i < (addrsize + intsize) && match; ++i) { + u32 mask = imask ? imask[i] : 0xffffffffu; + match = + ((intspec[i-addrsize] ^ imap[i]) & mask) == 0; + } + imap += addrsize + intsize; + imaplen -= addrsize + intsize; + + pr_debug(" -> match=%d (imaplen=%d)\n", match, imaplen); + + /* Get the interrupt parent */ + if (of_irq_workarounds & OF_IMAP_NO_PHANDLE) + newpar = of_node_get(of_irq_dflt_pic); + else + newpar = of_find_node_by_phandle(be32_to_cpup(imap)); + imap++; + --imaplen; + + /* Check if not found */ + if (newpar == NULL) { + pr_debug(" -> imap parent not found !\n"); + goto fail; + } + + /* Get #interrupt-cells and #address-cells of new + * parent + */ + tmp = of_get_property(newpar, "#interrupt-cells", NULL); + if (tmp == NULL) { + pr_debug(" -> parent lacks #interrupt-cells!\n"); + goto fail; + } + newintsize = be32_to_cpu(*tmp); + tmp = of_get_property(newpar, "#address-cells", NULL); + newaddrsize = (tmp == NULL) ? 0 : be32_to_cpu(*tmp); + + pr_debug(" -> newintsize=%d, newaddrsize=%d\n", + newintsize, newaddrsize); + + /* Check for malformed properties */ + if (imaplen < (newaddrsize + newintsize)) + goto fail; + + imap += newaddrsize + newintsize; + imaplen -= newaddrsize + newintsize; + + pr_debug(" -> imaplen=%d\n", imaplen); + } + if (!match) + goto fail; + + of_node_put(old); + old = of_node_get(newpar); + addrsize = newaddrsize; + intsize = newintsize; + intspec = imap - intsize; + addr = intspec - addrsize; + + skiplevel: + /* Iterate again with new parent */ + pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>"); + of_node_put(ipar); + ipar = newpar; + newpar = NULL; + } + fail: + of_node_put(ipar); + of_node_put(old); + of_node_put(newpar); + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(of_irq_map_raw); + +/** + * of_irq_map_one - Resolve an interrupt for a device + * @device: the device whose interrupt is to be resolved + * @index: index of the interrupt to resolve + * @out_irq: structure of_irq filled by this function + * + * This function resolves an interrupt, walking the tree, for a given + * device-tree node. It's the high level pendant to of_irq_map_raw(). + */ +int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq) +{ + struct device_node *p; + const __be32 *intspec, *tmp, *addr; + u32 intsize, intlen; + int res = -EINVAL; + + pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index); + + /* OldWorld mac stuff is "special", handle out of line */ + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC) + return of_irq_map_oldworld(device, index, out_irq); + + /* Get the interrupts property */ + intspec = of_get_property(device, "interrupts", &intlen); + if (intspec == NULL) + return -EINVAL; + intlen /= sizeof(*intspec); + + pr_debug(" intspec=%d intlen=%d\n", be32_to_cpup(intspec), intlen); + + /* Get the reg property (if any) */ + addr = of_get_property(device, "reg", NULL); + + /* Look for the interrupt parent. */ + p = of_irq_find_parent(device); + if (p == NULL) + return -EINVAL; + + /* Get size of interrupt specifier */ + tmp = of_get_property(p, "#interrupt-cells", NULL); + if (tmp == NULL) + goto out; + intsize = be32_to_cpu(*tmp); + + pr_debug(" intsize=%d intlen=%d\n", intsize, intlen); + + /* Check index */ + if ((index + 1) * intsize > intlen) + goto out; + + /* Get new specifier and map it */ + res = of_irq_map_raw(p, intspec + index * intsize, intsize, + addr, out_irq); + out: + of_node_put(p); + return res; +} +EXPORT_SYMBOL_GPL(of_irq_map_one); + +/** + * of_irq_to_resource - Decode a node's IRQ and return it as a resource + * @dev: pointer to device tree node + * @index: zero-based index of the irq + * @r: pointer to resource structure to return result into. + */ +int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) +{ + int irq = irq_of_parse_and_map(dev, index); + + /* Only dereference the resource if both the + * resource and the irq are valid. */ + if (r && irq != NO_IRQ) { + r->start = r->end = irq; + r->flags = IORESOURCE_IRQ; + r->name = dev->full_name; + } + + return irq; +} +EXPORT_SYMBOL_GPL(of_irq_to_resource); diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c index ab6522c8e4f..0a694debd22 100644 --- a/drivers/of/of_i2c.c +++ b/drivers/of/of_i2c.c @@ -14,57 +14,65 @@ #include <linux/i2c.h> #include <linux/of.h> #include <linux/of_i2c.h> +#include <linux/of_irq.h> #include <linux/module.h> -void of_register_i2c_devices(struct i2c_adapter *adap, - struct device_node *adap_node) +void of_i2c_register_devices(struct i2c_adapter *adap) { void *result; struct device_node *node; - for_each_child_of_node(adap_node, node) { + /* Only register child devices if the adapter has a node pointer set */ + if (!adap->dev.of_node) + return; + + dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); + + for_each_child_of_node(adap->dev.of_node, node) { struct i2c_board_info info = {}; struct dev_archdata dev_ad = {}; const __be32 *addr; int len; - if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) + dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name); + + if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) { + dev_err(&adap->dev, "of_i2c: modalias failure on %s\n", + node->full_name); continue; + } addr = of_get_property(node, "reg", &len); - if (!addr || len < sizeof(int) || *addr > (1 << 10) - 1) { - printk(KERN_ERR - "of-i2c: invalid i2c device entry\n"); + if (!addr || (len < sizeof(int))) { + dev_err(&adap->dev, "of_i2c: invalid reg on %s\n", + node->full_name); continue; } - info.irq = irq_of_parse_and_map(node, 0); - info.addr = be32_to_cpup(addr); + if (info.addr > (1 << 10) - 1) { + dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", + info.addr, node->full_name); + continue; + } - info.of_node = node; + info.irq = irq_of_parse_and_map(node, 0); + info.of_node = of_node_get(node); info.archdata = &dev_ad; request_module("%s", info.type); result = i2c_new_device(adap, &info); if (result == NULL) { - printk(KERN_ERR - "of-i2c: Failed to load driver for %s\n", - info.type); + dev_err(&adap->dev, "of_i2c: Failure registering %s\n", + node->full_name); + of_node_put(node); irq_dispose_mapping(info.irq); continue; } - - /* - * Get the node to not lose the dev_archdata->of_node. - * Currently there is no way to put it back, as well as no - * of_unregister_i2c_devices() call. - */ - of_node_get(node); } } -EXPORT_SYMBOL(of_register_i2c_devices); +EXPORT_SYMBOL(of_i2c_register_devices); static int of_dev_node_match(struct device *dev, void *data) { diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 42a6715f8e8..1fce00eb421 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -15,6 +15,7 @@ #include <linux/err.h> #include <linux/phy.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/module.h> diff --git a/drivers/of/of_spi.c b/drivers/of/of_spi.c index 5fed7e3c7da..1dbce58a58b 100644 --- a/drivers/of/of_spi.c +++ b/drivers/of/of_spi.c @@ -9,17 +9,17 @@ #include <linux/of.h> #include <linux/device.h> #include <linux/spi/spi.h> +#include <linux/of_irq.h> #include <linux/of_spi.h> /** * of_register_spi_devices - Register child devices onto the SPI bus * @master: Pointer to spi_master device - * @np: parent node of SPI device nodes * - * Registers an spi_device for each child node of 'np' which has a 'reg' + * Registers an spi_device for each child node of master node which has a 'reg' * property. */ -void of_register_spi_devices(struct spi_master *master, struct device_node *np) +void of_register_spi_devices(struct spi_master *master) { struct spi_device *spi; struct device_node *nc; @@ -27,7 +27,10 @@ void of_register_spi_devices(struct spi_master *master, struct device_node *np) int rc; int len; - for_each_child_of_node(np, nc) { + if (!master->dev.of_node) + return; + + for_each_child_of_node(master->dev.of_node, nc) { /* Alloc an spi_device */ spi = spi_alloc_device(master); if (!spi) { diff --git a/drivers/of/platform.c b/drivers/of/platform.c index 7dacc1ebe91..bb72223c22a 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -14,8 +14,105 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_irq.h> #include <linux/of_platform.h> +#include <linux/platform_device.h> + +static int of_dev_node_match(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +/** + * of_find_device_by_node - Find the platform_device associated with a node + * @np: Pointer to device tree node + * + * Returns platform_device pointer, or NULL if not found + */ +struct platform_device *of_find_device_by_node(struct device_node *np) +{ + struct device *dev; + + dev = bus_find_device(&platform_bus_type, NULL, np, of_dev_node_match); + return dev ? to_platform_device(dev) : NULL; +} +EXPORT_SYMBOL(of_find_device_by_node); + +static int platform_driver_probe_shim(struct platform_device *pdev) +{ + struct platform_driver *pdrv; + struct of_platform_driver *ofpdrv; + const struct of_device_id *match; + + pdrv = container_of(pdev->dev.driver, struct platform_driver, driver); + ofpdrv = container_of(pdrv, struct of_platform_driver, platform_driver); + + /* There is an unlikely chance that an of_platform driver might match + * on a non-OF platform device. If so, then of_match_device() will + * come up empty. Return -EINVAL in this case so other drivers get + * the chance to bind. */ + match = of_match_device(pdev->dev.driver->of_match_table, &pdev->dev); + return match ? ofpdrv->probe(pdev, match) : -EINVAL; +} + +static void platform_driver_shutdown_shim(struct platform_device *pdev) +{ + struct platform_driver *pdrv; + struct of_platform_driver *ofpdrv; + + pdrv = container_of(pdev->dev.driver, struct platform_driver, driver); + ofpdrv = container_of(pdrv, struct of_platform_driver, platform_driver); + ofpdrv->shutdown(pdev); +} + +/** + * of_register_platform_driver + */ +int of_register_platform_driver(struct of_platform_driver *drv) +{ + char *of_name; + + /* setup of_platform_driver to platform_driver adaptors */ + drv->platform_driver.driver = drv->driver; + + /* Prefix the driver name with 'of:' to avoid namespace collisions + * and bogus matches. There are some drivers in the tree that + * register both an of_platform_driver and a platform_driver with + * the same name. This is a temporary measure until they are all + * cleaned up --gcl July 29, 2010 */ + of_name = kmalloc(strlen(drv->driver.name) + 5, GFP_KERNEL); + if (!of_name) + return -ENOMEM; + sprintf(of_name, "of:%s", drv->driver.name); + drv->platform_driver.driver.name = of_name; + + if (drv->probe) + drv->platform_driver.probe = platform_driver_probe_shim; + drv->platform_driver.remove = drv->remove; + if (drv->shutdown) + drv->platform_driver.shutdown = platform_driver_shutdown_shim; + drv->platform_driver.suspend = drv->suspend; + drv->platform_driver.resume = drv->resume; + + return platform_driver_register(&drv->platform_driver); +} +EXPORT_SYMBOL(of_register_platform_driver); + +void of_unregister_platform_driver(struct of_platform_driver *drv) +{ + platform_driver_unregister(&drv->platform_driver); + kfree(drv->platform_driver.driver.name); + drv->platform_driver.driver.name = NULL; +} +EXPORT_SYMBOL(of_unregister_platform_driver); + +#if defined(CONFIG_PPC_DCR) +#include <asm/dcr.h> +#endif extern struct device_attribute of_platform_device_attrs[]; @@ -33,11 +130,11 @@ static int of_platform_device_probe(struct device *dev) { int error = -ENODEV; struct of_platform_driver *drv; - struct of_device *of_dev; + struct platform_device *of_dev; const struct of_device_id *match; drv = to_of_platform_driver(dev->driver); - of_dev = to_of_device(dev); + of_dev = to_platform_device(dev); if (!drv->probe) return error; @@ -55,7 +152,7 @@ static int of_platform_device_probe(struct device *dev) static int of_platform_device_remove(struct device *dev) { - struct of_device *of_dev = to_of_device(dev); + struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); if (dev->driver && drv->remove) @@ -65,7 +162,7 @@ static int of_platform_device_remove(struct device *dev) static void of_platform_device_shutdown(struct device *dev) { - struct of_device *of_dev = to_of_device(dev); + struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); if (dev->driver && drv->shutdown) @@ -76,7 +173,7 @@ static void of_platform_device_shutdown(struct device *dev) static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg) { - struct of_device *of_dev = to_of_device(dev); + struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); int ret = 0; @@ -87,7 +184,7 @@ static int of_platform_legacy_suspend(struct device *dev, pm_message_t mesg) static int of_platform_legacy_resume(struct device *dev) { - struct of_device *of_dev = to_of_device(dev); + struct platform_device *of_dev = to_platform_device(dev); struct of_platform_driver *drv = to_of_platform_driver(dev->driver); int ret = 0; @@ -384,15 +481,286 @@ int of_bus_type_init(struct bus_type *bus, const char *name) int of_register_driver(struct of_platform_driver *drv, struct bus_type *bus) { - drv->driver.bus = bus; + /* + * Temporary: of_platform_bus used to be distinct from the platform + * bus. It isn't anymore, and so drivers on the platform bus need + * to be registered in a special way. + * + * After all of_platform_bus_type drivers are converted to + * platform_drivers, this exception can be removed. + */ + if (bus == &platform_bus_type) + return of_register_platform_driver(drv); /* register with core */ + drv->driver.bus = bus; return driver_register(&drv->driver); } EXPORT_SYMBOL(of_register_driver); void of_unregister_driver(struct of_platform_driver *drv) { - driver_unregister(&drv->driver); + if (drv->driver.bus == &platform_bus_type) + of_unregister_platform_driver(drv); + else + driver_unregister(&drv->driver); } EXPORT_SYMBOL(of_unregister_driver); + +#if !defined(CONFIG_SPARC) +/* + * The following routines scan a subtree and registers a device for + * each applicable node. + * + * Note: sparc doesn't use these routines because it has a different + * mechanism for creating devices from device tree nodes. + */ + +/** + * of_device_make_bus_id - Use the device node data to assign a unique name + * @dev: pointer to device structure that is linked to a device tree node + * + * This routine will first try using either the dcr-reg or the reg property + * value to derive a unique name. As a last resort it will use the node + * name followed by a unique number. + */ +void of_device_make_bus_id(struct device *dev) +{ + static atomic_t bus_no_reg_magic; + struct device_node *node = dev->of_node; + const u32 *reg; + u64 addr; + int magic; + +#ifdef CONFIG_PPC_DCR + /* + * If it's a DCR based device, use 'd' for native DCRs + * and 'D' for MMIO DCRs. + */ + reg = of_get_property(node, "dcr-reg", NULL); + if (reg) { +#ifdef CONFIG_PPC_DCR_NATIVE + dev_set_name(dev, "d%x.%s", *reg, node->name); +#else /* CONFIG_PPC_DCR_NATIVE */ + u64 addr = of_translate_dcr_address(node, *reg, NULL); + if (addr != OF_BAD_ADDR) { + dev_set_name(dev, "D%llx.%s", + (unsigned long long)addr, node->name); + return; + } +#endif /* !CONFIG_PPC_DCR_NATIVE */ + } +#endif /* CONFIG_PPC_DCR */ + + /* + * For MMIO, get the physical address + */ + reg = of_get_property(node, "reg", NULL); + if (reg) { + addr = of_translate_address(node, reg); + if (addr != OF_BAD_ADDR) { + dev_set_name(dev, "%llx.%s", + (unsigned long long)addr, node->name); + return; + } + } + + /* + * No BusID, use the node name and add a globally incremented + * counter (and pray...) + */ + magic = atomic_add_return(1, &bus_no_reg_magic); + dev_set_name(dev, "%s.%d", node->name, magic - 1); +} + +/** + * of_device_alloc - Allocate and initialize an of_device + * @np: device node to assign to device + * @bus_id: Name to assign to the device. May be null to use default name. + * @parent: Parent device. + */ +struct platform_device *of_device_alloc(struct device_node *np, + const char *bus_id, + struct device *parent) +{ + struct platform_device *dev; + int rc, i, num_reg = 0, num_irq = 0; + struct resource *res, temp_res; + + /* First count how many resources are needed */ + while (of_address_to_resource(np, num_reg, &temp_res) == 0) + num_reg++; + while (of_irq_to_resource(np, num_irq, &temp_res) != NO_IRQ) + num_irq++; + + /* Allocate memory for both the struct device and the resource table */ + dev = kzalloc(sizeof(*dev) + (sizeof(*res) * (num_reg + num_irq)), + GFP_KERNEL); + if (!dev) + return NULL; + res = (struct resource *) &dev[1]; + + /* Populate the resource table */ + if (num_irq || num_reg) { + dev->num_resources = num_reg + num_irq; + dev->resource = res; + for (i = 0; i < num_reg; i++, res++) { + rc = of_address_to_resource(np, i, res); + WARN_ON(rc); + } + for (i = 0; i < num_irq; i++, res++) { + rc = of_irq_to_resource(np, i, res); + WARN_ON(rc == NO_IRQ); + } + } + + dev->dev.of_node = of_node_get(np); +#if defined(CONFIG_PPC) || defined(CONFIG_MICROBLAZE) + dev->dev.dma_mask = &dev->archdata.dma_mask; +#endif + dev->dev.parent = parent; + dev->dev.release = of_release_dev; + + if (bus_id) + dev_set_name(&dev->dev, "%s", bus_id); + else + of_device_make_bus_id(&dev->dev); + + return dev; +} +EXPORT_SYMBOL(of_device_alloc); + +/** + * of_platform_device_create - Alloc, initialize and register an of_device + * @np: pointer to node to create device for + * @bus_id: name to assign device + * @parent: Linux device model parent device. + */ +struct platform_device *of_platform_device_create(struct device_node *np, + const char *bus_id, + struct device *parent) +{ + struct platform_device *dev; + + dev = of_device_alloc(np, bus_id, parent); + if (!dev) + return NULL; + +#if defined(CONFIG_PPC) || defined(CONFIG_MICROBLAZE) + dev->archdata.dma_mask = 0xffffffffUL; +#endif + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + dev->dev.bus = &platform_bus_type; + + /* We do not fill the DMA ops for platform devices by default. + * This is currently the responsibility of the platform code + * to do such, possibly using a device notifier + */ + + if (of_device_register(dev) != 0) { + of_device_free(dev); + return NULL; + } + + return dev; +} +EXPORT_SYMBOL(of_platform_device_create); + +/** + * of_platform_bus_create - Create an OF device for a bus node and all its + * children. Optionally recursively instantiate matching busses. + * @bus: device node of the bus to instantiate + * @matches: match table, NULL to use the default, OF_NO_DEEP_PROBE to + * disallow recursive creation of child busses + */ +static int of_platform_bus_create(const struct device_node *bus, + const struct of_device_id *matches, + struct device *parent) +{ + struct device_node *child; + struct platform_device *dev; + int rc = 0; + + for_each_child_of_node(bus, child) { + pr_debug(" create child: %s\n", child->full_name); + dev = of_platform_device_create(child, NULL, parent); + if (dev == NULL) + rc = -ENOMEM; + else if (!of_match_node(matches, child)) + continue; + if (rc == 0) { + pr_debug(" and sub busses\n"); + rc = of_platform_bus_create(child, matches, &dev->dev); + } + if (rc) { + of_node_put(child); + break; + } + } + return rc; +} + +/** + * of_platform_bus_probe - Probe the device-tree for platform busses + * @root: parent of the first level to probe or NULL for the root of the tree + * @matches: match table, NULL to use the default + * @parent: parent to hook devices from, NULL for toplevel + * + * Note that children of the provided root are not instantiated as devices + * unless the specified root itself matches the bus list and is not NULL. + */ +int of_platform_bus_probe(struct device_node *root, + const struct of_device_id *matches, + struct device *parent) +{ + struct device_node *child; + struct platform_device *dev; + int rc = 0; + + if (WARN_ON(!matches || matches == OF_NO_DEEP_PROBE)) + return -EINVAL; + if (root == NULL) + root = of_find_node_by_path("/"); + else + of_node_get(root); + if (root == NULL) + return -EINVAL; + + pr_debug("of_platform_bus_probe()\n"); + pr_debug(" starting at: %s\n", root->full_name); + + /* Do a self check of bus type, if there's a match, create + * children + */ + if (of_match_node(matches, root)) { + pr_debug(" root match, create all sub devices\n"); + dev = of_platform_device_create(root, NULL, parent); + if (dev == NULL) { + rc = -ENOMEM; + goto bail; + } + pr_debug(" create all sub busses\n"); + rc = of_platform_bus_create(root, matches, &dev->dev); + goto bail; + } + for_each_child_of_node(root, child) { + if (!of_match_node(matches, child)) + continue; + + pr_debug(" match: %s\n", child->full_name); + dev = of_platform_device_create(child, NULL, parent); + if (dev == NULL) + rc = -ENOMEM; + else + rc = of_platform_bus_create(child, matches, &dev->dev); + if (rc) { + of_node_put(child); + break; + } + } + bail: + of_node_put(root); + return rc; +} +EXPORT_SYMBOL(of_platform_bus_probe); +#endif /* !CONFIG_SPARC */ diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c index 9a5b4b89416..210a6441a06 100644 --- a/drivers/parport/parport_sunbpp.c +++ b/drivers/parport/parport_sunbpp.c @@ -295,7 +295,7 @@ static int __devinit bpp_probe(struct of_device *op, const struct of_device_id * void __iomem *base; struct parport *p; - irq = op->irqs[0]; + irq = op->archdata.irqs[0]; base = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), "sunbpp"); @@ -393,12 +393,12 @@ static struct of_platform_driver bpp_sbus_driver = { static int __init parport_sunbpp_init(void) { - return of_register_driver(&bpp_sbus_driver, &of_bus_type); + return of_register_platform_driver(&bpp_sbus_driver); } static void __exit parport_sunbpp_exit(void) { - of_unregister_driver(&bpp_sbus_driver); + of_unregister_platform_driver(&bpp_sbus_driver); } MODULE_AUTHOR("Derrick J Brashear"); diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index d0f5ad30607..c80a7a6e769 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -157,11 +157,11 @@ config PCMCIA_M8XX config PCMCIA_AU1X00 tristate "Au1x00 pcmcia support" - depends on SOC_AU1X00 && PCMCIA + depends on MIPS_ALCHEMY && PCMCIA config PCMCIA_ALCHEMY_DEVBOARD tristate "Alchemy Db/Pb1xxx PCMCIA socket services" - depends on SOC_AU1X00 && PCMCIA + depends on MIPS_ALCHEMY && PCMCIA select 64BIT_PHYS_ADDR help Enable this driver of you want PCMCIA support on your Alchemy @@ -215,7 +215,7 @@ config PCMCIA_PXA2XX depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \ || MACH_ARMCORE || ARCH_PXA_PALM || TRIZEPS_PCMCIA \ || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \ - || MACH_VPAC270) + || MACH_VPAC270 || MACH_BALLOON3) select PCMCIA_SOC_COMMON help Say Y here to include support for the PXA2xx PCMCIA controller diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile index d006e8beab9..6a607732552 100644 --- a/drivers/pcmcia/Makefile +++ b/drivers/pcmcia/Makefile @@ -70,6 +70,7 @@ pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o pxa2xx-obj-$(CONFIG_MACH_VPAC270) += pxa2xx_vpac270.o +pxa2xx-obj-$(CONFIG_MACH_BALLOON3) += pxa2xx_balloon3.o obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y) diff --git a/drivers/pcmcia/pxa2xx_balloon3.c b/drivers/pcmcia/pxa2xx_balloon3.c new file mode 100644 index 00000000000..dbbdd006320 --- /dev/null +++ b/drivers/pcmcia/pxa2xx_balloon3.c @@ -0,0 +1,158 @@ +/* + * linux/drivers/pcmcia/pxa2xx_balloon3.c + * + * Balloon3 PCMCIA specific routines. + * + * Author: Nick Bane + * Created: June, 2006 + * Copyright: Toby Churchill Ltd + * Derived from pxa2xx_mainstone.c, by Nico Pitre + * + * Various modification by Marek Vasut <marek.vasut@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/irq.h> +#include <linux/io.h> + +#include <mach/balloon3.h> + +#include "soc_common.h" + +/* + * These are a list of interrupt sources that provokes a polled + * check of status + */ +static struct pcmcia_irqs irqs[] = { + { 0, BALLOON3_S0_CD_IRQ, "PCMCIA0 CD" }, + { 0, BALLOON3_BP_NSTSCHG_IRQ, "PCMCIA0 STSCHG" }, +}; + +static int balloon3_pcmcia_hw_init(struct soc_pcmcia_socket *skt) +{ + uint16_t ver; + int ret; + static void __iomem *fpga_ver; + + ver = __raw_readw(BALLOON3_FPGA_VER); + if (ver > 0x0201) + pr_warn("The FPGA code, version 0x%04x, is newer than rel-0.3. " + "PCMCIA/CF support might be broken in this version!", + ver); + + skt->socket.pci_irq = BALLOON3_BP_CF_NRDY_IRQ; + return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs)); +} + +static void balloon3_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) +{ + soc_pcmcia_free_irqs(skt, irqs, ARRAY_SIZE(irqs)); +} + +static unsigned long balloon3_pcmcia_status[2] = { + BALLOON3_CF_nSTSCHG_BVD1, + BALLOON3_CF_nSTSCHG_BVD1 +}; + +static void balloon3_pcmcia_socket_state(struct soc_pcmcia_socket *skt, + struct pcmcia_state *state) +{ + uint16_t status; + int flip; + + /* This actually reads the STATUS register */ + status = __raw_readw(BALLOON3_CF_STATUS_REG); + flip = (status ^ balloon3_pcmcia_status[skt->nr]) + & BALLOON3_CF_nSTSCHG_BVD1; + /* + * Workaround for STSCHG which can't be deasserted: + * We therefore disable/enable corresponding IRQs + * as needed to avoid IRQ locks. + */ + if (flip) { + balloon3_pcmcia_status[skt->nr] = status; + if (status & BALLOON3_CF_nSTSCHG_BVD1) + enable_irq(BALLOON3_BP_NSTSCHG_IRQ); + else + disable_irq(BALLOON3_BP_NSTSCHG_IRQ); + } + + state->detect = !gpio_get_value(BALLOON3_GPIO_S0_CD); + state->ready = !!(status & BALLOON3_CF_nIRQ); + state->bvd1 = !!(status & BALLOON3_CF_nSTSCHG_BVD1); + state->bvd2 = 0; /* not available */ + state->vs_3v = 1; /* Always true its a CF card */ + state->vs_Xv = 0; /* not available */ + state->wrprot = 0; /* not available */ +} + +static int balloon3_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, + const socket_state_t *state) +{ + __raw_writew((state->flags & SS_RESET) ? BALLOON3_CF_RESET : 0, + BALLOON3_CF_CONTROL_REG); + return 0; +} + +static void balloon3_pcmcia_socket_init(struct soc_pcmcia_socket *skt) +{ +} + +static void balloon3_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) +{ +} + +static struct pcmcia_low_level balloon3_pcmcia_ops = { + .owner = THIS_MODULE, + .hw_init = balloon3_pcmcia_hw_init, + .hw_shutdown = balloon3_pcmcia_hw_shutdown, + .socket_state = balloon3_pcmcia_socket_state, + .configure_socket = balloon3_pcmcia_configure_socket, + .socket_init = balloon3_pcmcia_socket_init, + .socket_suspend = balloon3_pcmcia_socket_suspend, + .first = 0, + .nr = 1, +}; + +static struct platform_device *balloon3_pcmcia_device; + +static int __init balloon3_pcmcia_init(void) +{ + int ret; + + balloon3_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); + if (!balloon3_pcmcia_device) + return -ENOMEM; + + ret = platform_device_add_data(balloon3_pcmcia_device, + &balloon3_pcmcia_ops, sizeof(balloon3_pcmcia_ops)); + + if (!ret) + ret = platform_device_add(balloon3_pcmcia_device); + + if (ret) + platform_device_put(balloon3_pcmcia_device); + + return ret; +} + +static void __exit balloon3_pcmcia_exit(void) +{ + platform_device_unregister(balloon3_pcmcia_device); +} + +module_init(balloon3_pcmcia_init); +module_exit(balloon3_pcmcia_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Nick Bane <nick@cecomputing.co.uk>"); +MODULE_ALIAS("platform:pxa2xx-pcmcia"); +MODULE_DESCRIPTION("Balloon3 board CF/PCMCIA driver"); diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 8e9ba177d81..1e5506be39b 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -142,4 +142,15 @@ config CHARGER_PCF50633 help Say Y to include support for NXP PCF50633 Main Battery Charger. +config BATTERY_JZ4740 + tristate "Ingenic JZ4740 battery" + depends on MACH_JZ4740 + depends on MFD_JZ4740_ADC + help + Say Y to enable support for the battery on Ingenic JZ4740 based + boards. + + This driver can be build as a module. If so, the module will be + called jz4740-battery. + endif # POWER_SUPPLY diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 00050809a6c..cf95009d9bc 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -34,3 +34,4 @@ obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o obj-$(CONFIG_BATTERY_Z2) += z2_battery.o obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o +obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c new file mode 100644 index 00000000000..20c4b952e9b --- /dev/null +++ b/drivers/power/jz4740-battery.c @@ -0,0 +1,445 @@ +/* + * Battery measurement code for Ingenic JZ SOC. + * + * Copyright (C) 2009 Jiejing Zhang <kzjeef@gmail.com> + * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> + * + * based on tosa_battery.c + * + * Copyright (C) 2008 Marek Vasut <marek.vasut@gmail.com> +* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <linux/delay.h> +#include <linux/gpio.h> +#include <linux/mfd/core.h> +#include <linux/power_supply.h> + +#include <linux/power/jz4740-battery.h> +#include <linux/jz4740-adc.h> + +struct jz_battery { + struct jz_battery_platform_data *pdata; + struct platform_device *pdev; + + struct resource *mem; + void __iomem *base; + + int irq; + int charge_irq; + + struct mfd_cell *cell; + + int status; + long voltage; + + struct completion read_completion; + + struct power_supply battery; + struct delayed_work work; +}; + +static inline struct jz_battery *psy_to_jz_battery(struct power_supply *psy) +{ + return container_of(psy, struct jz_battery, battery); +} + +static irqreturn_t jz_battery_irq_handler(int irq, void *devid) +{ + struct jz_battery *battery = devid; + + complete(&battery->read_completion); + return IRQ_HANDLED; +} + +static long jz_battery_read_voltage(struct jz_battery *battery) +{ + unsigned long t; + unsigned long val; + long voltage; + + INIT_COMPLETION(battery->read_completion); + + enable_irq(battery->irq); + battery->cell->enable(battery->pdev); + + t = wait_for_completion_interruptible_timeout(&battery->read_completion, + HZ); + + if (t > 0) { + val = readw(battery->base) & 0xfff; + + if (battery->pdata->info.voltage_max_design <= 2500000) + val = (val * 78125UL) >> 7UL; + else + val = ((val * 924375UL) >> 9UL) + 33000; + voltage = (long)val; + } else { + voltage = t ? t : -ETIMEDOUT; + } + + battery->cell->disable(battery->pdev); + disable_irq(battery->irq); + + return voltage; +} + +static int jz_battery_get_capacity(struct power_supply *psy) +{ + struct jz_battery *jz_battery = psy_to_jz_battery(psy); + struct power_supply_info *info = &jz_battery->pdata->info; + long voltage; + int ret; + int voltage_span; + + voltage = jz_battery_read_voltage(jz_battery); + + if (voltage < 0) + return voltage; + + voltage_span = info->voltage_max_design - info->voltage_min_design; + ret = ((voltage - info->voltage_min_design) * 100) / voltage_span; + + if (ret > 100) + ret = 100; + else if (ret < 0) + ret = 0; + + return ret; +} + +static int jz_battery_get_property(struct power_supply *psy, + enum power_supply_property psp, union power_supply_propval *val) +{ + struct jz_battery *jz_battery = psy_to_jz_battery(psy); + struct power_supply_info *info = &jz_battery->pdata->info; + long voltage; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + val->intval = jz_battery->status; + break; + case POWER_SUPPLY_PROP_TECHNOLOGY: + val->intval = jz_battery->pdata->info.technology; + break; + case POWER_SUPPLY_PROP_HEALTH: + voltage = jz_battery_read_voltage(jz_battery); + if (voltage < info->voltage_min_design) + val->intval = POWER_SUPPLY_HEALTH_DEAD; + else + val->intval = POWER_SUPPLY_HEALTH_GOOD; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = jz_battery_get_capacity(psy); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + val->intval = jz_battery_read_voltage(jz_battery); + if (val->intval < 0) + return val->intval; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: + val->intval = info->voltage_max_design; + break; + case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: + val->intval = info->voltage_min_design; + break; + case POWER_SUPPLY_PROP_PRESENT: + val->intval = 1; + break; + default: + return -EINVAL; + } + return 0; +} + +static void jz_battery_external_power_changed(struct power_supply *psy) +{ + struct jz_battery *jz_battery = psy_to_jz_battery(psy); + + cancel_delayed_work(&jz_battery->work); + schedule_delayed_work(&jz_battery->work, 0); +} + +static irqreturn_t jz_battery_charge_irq(int irq, void *data) +{ + struct jz_battery *jz_battery = data; + + cancel_delayed_work(&jz_battery->work); + schedule_delayed_work(&jz_battery->work, 0); + + return IRQ_HANDLED; +} + +static void jz_battery_update(struct jz_battery *jz_battery) +{ + int status; + long voltage; + bool has_changed = false; + int is_charging; + + if (gpio_is_valid(jz_battery->pdata->gpio_charge)) { + is_charging = gpio_get_value(jz_battery->pdata->gpio_charge); + is_charging ^= jz_battery->pdata->gpio_charge_active_low; + if (is_charging) + status = POWER_SUPPLY_STATUS_CHARGING; + else + status = POWER_SUPPLY_STATUS_NOT_CHARGING; + + if (status != jz_battery->status) { + jz_battery->status = status; + has_changed = true; + } + } + + voltage = jz_battery_read_voltage(jz_battery); + if (abs(voltage - jz_battery->voltage) < 50000) { + jz_battery->voltage = voltage; + has_changed = true; + } + + if (has_changed) + power_supply_changed(&jz_battery->battery); +} + +static enum power_supply_property jz_battery_properties[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, + POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, + POWER_SUPPLY_PROP_PRESENT, +}; + +static void jz_battery_work(struct work_struct *work) +{ + /* Too small interval will increase system workload */ + const int interval = HZ * 30; + struct jz_battery *jz_battery = container_of(work, struct jz_battery, + work.work); + + jz_battery_update(jz_battery); + schedule_delayed_work(&jz_battery->work, interval); +} + +static int __devinit jz_battery_probe(struct platform_device *pdev) +{ + int ret = 0; + struct jz_battery_platform_data *pdata = pdev->dev.parent->platform_data; + struct jz_battery *jz_battery; + struct power_supply *battery; + + jz_battery = kzalloc(sizeof(*jz_battery), GFP_KERNEL); + if (!jz_battery) { + dev_err(&pdev->dev, "Failed to allocate driver structure\n"); + return -ENOMEM; + } + + jz_battery->cell = pdev->dev.platform_data; + + jz_battery->irq = platform_get_irq(pdev, 0); + if (jz_battery->irq < 0) { + ret = jz_battery->irq; + dev_err(&pdev->dev, "Failed to get platform irq: %d\n", ret); + goto err_free; + } + + jz_battery->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!jz_battery->mem) { + ret = -ENOENT; + dev_err(&pdev->dev, "Failed to get platform mmio resource\n"); + goto err_free; + } + + jz_battery->mem = request_mem_region(jz_battery->mem->start, + resource_size(jz_battery->mem), pdev->name); + if (!jz_battery->mem) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to request mmio memory region\n"); + goto err_free; + } + + jz_battery->base = ioremap_nocache(jz_battery->mem->start, + resource_size(jz_battery->mem)); + if (!jz_battery->base) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); + goto err_release_mem_region; + } + + battery = &jz_battery->battery; + battery->name = pdata->info.name; + battery->type = POWER_SUPPLY_TYPE_BATTERY; + battery->properties = jz_battery_properties; + battery->num_properties = ARRAY_SIZE(jz_battery_properties); + battery->get_property = jz_battery_get_property; + battery->external_power_changed = jz_battery_external_power_changed; + battery->use_for_apm = 1; + + jz_battery->pdata = pdata; + jz_battery->pdev = pdev; + + init_completion(&jz_battery->read_completion); + + INIT_DELAYED_WORK(&jz_battery->work, jz_battery_work); + + ret = request_irq(jz_battery->irq, jz_battery_irq_handler, 0, pdev->name, + jz_battery); + if (ret) { + dev_err(&pdev->dev, "Failed to request irq %d\n", ret); + goto err_iounmap; + } + disable_irq(jz_battery->irq); + + if (gpio_is_valid(pdata->gpio_charge)) { + ret = gpio_request(pdata->gpio_charge, dev_name(&pdev->dev)); + if (ret) { + dev_err(&pdev->dev, "charger state gpio request failed.\n"); + goto err_free_irq; + } + ret = gpio_direction_input(pdata->gpio_charge); + if (ret) { + dev_err(&pdev->dev, "charger state gpio set direction failed.\n"); + goto err_free_gpio; + } + + jz_battery->charge_irq = gpio_to_irq(pdata->gpio_charge); + + if (jz_battery->charge_irq >= 0) { + ret = request_irq(jz_battery->charge_irq, + jz_battery_charge_irq, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + dev_name(&pdev->dev), jz_battery); + if (ret) { + dev_err(&pdev->dev, "Failed to request charge irq: %d\n", ret); + goto err_free_gpio; + } + } + } else { + jz_battery->charge_irq = -1; + } + + if (jz_battery->pdata->info.voltage_max_design <= 2500000) + jz4740_adc_set_config(pdev->dev.parent, JZ_ADC_CONFIG_BAT_MB, + JZ_ADC_CONFIG_BAT_MB); + else + jz4740_adc_set_config(pdev->dev.parent, JZ_ADC_CONFIG_BAT_MB, 0); + + ret = power_supply_register(&pdev->dev, &jz_battery->battery); + if (ret) { + dev_err(&pdev->dev, "power supply battery register failed.\n"); + goto err_free_charge_irq; + } + + platform_set_drvdata(pdev, jz_battery); + schedule_delayed_work(&jz_battery->work, 0); + + return 0; + +err_free_charge_irq: + if (jz_battery->charge_irq >= 0) + free_irq(jz_battery->charge_irq, jz_battery); +err_free_gpio: + if (gpio_is_valid(pdata->gpio_charge)) + gpio_free(jz_battery->pdata->gpio_charge); +err_free_irq: + free_irq(jz_battery->irq, jz_battery); +err_iounmap: + platform_set_drvdata(pdev, NULL); + iounmap(jz_battery->base); +err_release_mem_region: + release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem)); +err_free: + kfree(jz_battery); + return ret; +} + +static int __devexit jz_battery_remove(struct platform_device *pdev) +{ + struct jz_battery *jz_battery = platform_get_drvdata(pdev); + + cancel_delayed_work_sync(&jz_battery->work); + + if (gpio_is_valid(jz_battery->pdata->gpio_charge)) { + if (jz_battery->charge_irq >= 0) + free_irq(jz_battery->charge_irq, jz_battery); + gpio_free(jz_battery->pdata->gpio_charge); + } + + power_supply_unregister(&jz_battery->battery); + + free_irq(jz_battery->irq, jz_battery); + + iounmap(jz_battery->base); + release_mem_region(jz_battery->mem->start, resource_size(jz_battery->mem)); + + return 0; +} + +#ifdef CONFIG_PM +static int jz_battery_suspend(struct device *dev) +{ + struct jz_battery *jz_battery = dev_get_drvdata(dev); + + cancel_delayed_work_sync(&jz_battery->work); + jz_battery->status = POWER_SUPPLY_STATUS_UNKNOWN; + + return 0; +} + +static int jz_battery_resume(struct device *dev) +{ + struct jz_battery *jz_battery = dev_get_drvdata(dev); + + schedule_delayed_work(&jz_battery->work, 0); + + return 0; +} + +static const struct dev_pm_ops jz_battery_pm_ops = { + .suspend = jz_battery_suspend, + .resume = jz_battery_resume, +}; + +#define JZ_BATTERY_PM_OPS (&jz_battery_pm_ops) +#else +#define JZ_BATTERY_PM_OPS NULL +#endif + +static struct platform_driver jz_battery_driver = { + .probe = jz_battery_probe, + .remove = __devexit_p(jz_battery_remove), + .driver = { + .name = "jz4740-battery", + .owner = THIS_MODULE, + .pm = JZ_BATTERY_PM_OPS, + }, +}; + +static int __init jz_battery_init(void) +{ + return platform_driver_register(&jz_battery_driver); +} +module_init(jz_battery_init); + +static void __exit jz_battery_exit(void) +{ + platform_driver_unregister(&jz_battery_driver); +} +module_exit(jz_battery_exit); + +MODULE_ALIAS("platform:jz4740-battery"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_DESCRIPTION("JZ4740 SoC battery driver"); diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 4e8afce0c81..5071d85ec12 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -29,7 +29,6 @@ static DEFINE_MUTEX(bat_lock); static struct work_struct bat_work; static struct mutex work_lock; static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN; -static struct wm97xx_batt_info *gpdata; static enum power_supply_property *prop; static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) @@ -172,12 +171,6 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) struct wm97xx_pdata *wmdata = dev->dev.platform_data; struct wm97xx_batt_pdata *pdata; - if (gpdata) { - dev_err(&dev->dev, "Do not pass platform_data through " - "wm97xx_bat_set_pdata!\n"); - return -EINVAL; - } - if (!wmdata) { dev_err(&dev->dev, "No platform data supplied\n"); return -EINVAL; @@ -308,15 +301,6 @@ static void __exit wm97xx_bat_exit(void) platform_driver_unregister(&wm97xx_bat_driver); } -/* The interface is deprecated, as well as linux/wm97xx_batt.h */ -void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); - -void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) -{ - gpdata = data; -} -EXPORT_SYMBOL_GPL(wm97xx_bat_set_pdata); - module_init(wm97xx_bat_init); module_exit(wm97xx_bat_exit); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 10ba12c8c5e..9238c8f40f0 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -645,9 +645,16 @@ config RTC_DRV_OMAP DA8xx/OMAP-L13x chips. This driver can also be built as a module called rtc-omap. +config HAVE_S3C_RTC + bool + help + This will include RTC support for Samsung SoCs. If + you want to include RTC support for any machine, kindly + select this in the respective mach-XXXX/Kconfig file. + config RTC_DRV_S3C tristate "Samsung S3C series SoC RTC" - depends on ARCH_S3C2410 || ARCH_S3C64XX + depends on ARCH_S3C2410 || ARCH_S3C64XX || HAVE_S3C_RTC help RTC (Realtime Clock) driver for the clock inbuilt into the Samsung S3C24XX series of SoCs. This can provide periodic @@ -774,7 +781,7 @@ config RTC_DRV_AT91SAM9_GPBR config RTC_DRV_AU1XXX tristate "Au1xxx Counter0 RTC support" - depends on SOC_AU1X00 + depends on MIPS_ALCHEMY help This is a driver for the Au1xxx on-chip Counter0 (Time-Of-Year counter) to be used as a RTC. @@ -905,4 +912,15 @@ config RTC_DRV_MPC5121 This driver can also be built as a module. If so, the module will be called rtc-mpc5121. +config RTC_DRV_JZ4740 + tristate "Ingenic JZ4740 SoC" + depends on RTC_CLASS + depends on MACH_JZ4740 + help + If you say yes here you get support for the Ingenic JZ4740 SoC RTC + controller. + + This driver can also be buillt as a module. If so, the module + will be called rtc-jz4740. + endif # RTC_CLASS diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 5adbba7cf89..fedf9bb3659 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -47,6 +47,7 @@ obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o +obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c new file mode 100644 index 00000000000..2619d57b91d --- /dev/null +++ b/drivers/rtc/rtc-jz4740.c @@ -0,0 +1,345 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * JZ4740 SoC RTC driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#define JZ_REG_RTC_CTRL 0x00 +#define JZ_REG_RTC_SEC 0x04 +#define JZ_REG_RTC_SEC_ALARM 0x08 +#define JZ_REG_RTC_REGULATOR 0x0C +#define JZ_REG_RTC_HIBERNATE 0x20 +#define JZ_REG_RTC_SCRATCHPAD 0x34 + +#define JZ_RTC_CTRL_WRDY BIT(7) +#define JZ_RTC_CTRL_1HZ BIT(6) +#define JZ_RTC_CTRL_1HZ_IRQ BIT(5) +#define JZ_RTC_CTRL_AF BIT(4) +#define JZ_RTC_CTRL_AF_IRQ BIT(3) +#define JZ_RTC_CTRL_AE BIT(2) +#define JZ_RTC_CTRL_ENABLE BIT(0) + +struct jz4740_rtc { + struct resource *mem; + void __iomem *base; + + struct rtc_device *rtc; + + unsigned int irq; + + spinlock_t lock; +}; + +static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg) +{ + return readl(rtc->base + reg); +} + +static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc) +{ + uint32_t ctrl; + int timeout = 1000; + + do { + ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); + } while (!(ctrl & JZ_RTC_CTRL_WRDY) && --timeout); + + return timeout ? 0 : -EIO; +} + +static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg, + uint32_t val) +{ + int ret; + ret = jz4740_rtc_wait_write_ready(rtc); + if (ret == 0) + writel(val, rtc->base + reg); + + return ret; +} + +static int jz4740_rtc_ctrl_set_bits(struct jz4740_rtc *rtc, uint32_t mask, + bool set) +{ + int ret; + unsigned long flags; + uint32_t ctrl; + + spin_lock_irqsave(&rtc->lock, flags); + + ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); + + /* Don't clear interrupt flags by accident */ + ctrl |= JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF; + + if (set) + ctrl |= mask; + else + ctrl &= ~mask; + + ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_CTRL, ctrl); + + spin_unlock_irqrestore(&rtc->lock, flags); + + return ret; +} + +static int jz4740_rtc_read_time(struct device *dev, struct rtc_time *time) +{ + struct jz4740_rtc *rtc = dev_get_drvdata(dev); + uint32_t secs, secs2; + int timeout = 5; + + /* If the seconds register is read while it is updated, it can contain a + * bogus value. This can be avoided by making sure that two consecutive + * reads have the same value. + */ + secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC); + secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC); + + while (secs != secs2 && --timeout) { + secs = secs2; + secs2 = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC); + } + + if (timeout == 0) + return -EIO; + + rtc_time_to_tm(secs, time); + + return rtc_valid_tm(time); +} + +static int jz4740_rtc_set_mmss(struct device *dev, unsigned long secs) +{ + struct jz4740_rtc *rtc = dev_get_drvdata(dev); + + return jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, secs); +} + +static int jz4740_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct jz4740_rtc *rtc = dev_get_drvdata(dev); + uint32_t secs; + uint32_t ctrl; + + secs = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SEC_ALARM); + + ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); + + alrm->enabled = !!(ctrl & JZ_RTC_CTRL_AE); + alrm->pending = !!(ctrl & JZ_RTC_CTRL_AF); + + rtc_time_to_tm(secs, &alrm->time); + + return rtc_valid_tm(&alrm->time); +} + +static int jz4740_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + int ret; + struct jz4740_rtc *rtc = dev_get_drvdata(dev); + unsigned long secs; + + rtc_tm_to_time(&alrm->time, &secs); + + ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC_ALARM, secs); + if (!ret) + ret = jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AE, alrm->enabled); + + return ret; +} + +static int jz4740_rtc_update_irq_enable(struct device *dev, unsigned int enable) +{ + struct jz4740_rtc *rtc = dev_get_drvdata(dev); + return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ_IRQ, enable); +} + +static int jz4740_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) +{ + struct jz4740_rtc *rtc = dev_get_drvdata(dev); + return jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_AF_IRQ, enable); +} + +static struct rtc_class_ops jz4740_rtc_ops = { + .read_time = jz4740_rtc_read_time, + .set_mmss = jz4740_rtc_set_mmss, + .read_alarm = jz4740_rtc_read_alarm, + .set_alarm = jz4740_rtc_set_alarm, + .update_irq_enable = jz4740_rtc_update_irq_enable, + .alarm_irq_enable = jz4740_rtc_alarm_irq_enable, +}; + +static irqreturn_t jz4740_rtc_irq(int irq, void *data) +{ + struct jz4740_rtc *rtc = data; + uint32_t ctrl; + unsigned long events = 0; + + ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); + + if (ctrl & JZ_RTC_CTRL_1HZ) + events |= (RTC_UF | RTC_IRQF); + + if (ctrl & JZ_RTC_CTRL_AF) + events |= (RTC_AF | RTC_IRQF); + + rtc_update_irq(rtc->rtc, 1, events); + + jz4740_rtc_ctrl_set_bits(rtc, JZ_RTC_CTRL_1HZ | JZ_RTC_CTRL_AF, false); + + return IRQ_HANDLED; +} + +void jz4740_rtc_poweroff(struct device *dev) +{ + struct jz4740_rtc *rtc = dev_get_drvdata(dev); + jz4740_rtc_reg_write(rtc, JZ_REG_RTC_HIBERNATE, 1); +} +EXPORT_SYMBOL_GPL(jz4740_rtc_poweroff); + +static int __devinit jz4740_rtc_probe(struct platform_device *pdev) +{ + int ret; + struct jz4740_rtc *rtc; + uint32_t scratchpad; + + rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + rtc->irq = platform_get_irq(pdev, 0); + if (rtc->irq < 0) { + ret = -ENOENT; + dev_err(&pdev->dev, "Failed to get platform irq\n"); + goto err_free; + } + + rtc->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!rtc->mem) { + ret = -ENOENT; + dev_err(&pdev->dev, "Failed to get platform mmio memory\n"); + goto err_free; + } + + rtc->mem = request_mem_region(rtc->mem->start, resource_size(rtc->mem), + pdev->name); + if (!rtc->mem) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to request mmio memory region\n"); + goto err_free; + } + + rtc->base = ioremap_nocache(rtc->mem->start, resource_size(rtc->mem)); + if (!rtc->base) { + ret = -EBUSY; + dev_err(&pdev->dev, "Failed to ioremap mmio memory\n"); + goto err_release_mem_region; + } + + spin_lock_init(&rtc->lock); + + platform_set_drvdata(pdev, rtc); + + rtc->rtc = rtc_device_register(pdev->name, &pdev->dev, &jz4740_rtc_ops, + THIS_MODULE); + if (IS_ERR(rtc->rtc)) { + ret = PTR_ERR(rtc->rtc); + dev_err(&pdev->dev, "Failed to register rtc device: %d\n", ret); + goto err_iounmap; + } + + ret = request_irq(rtc->irq, jz4740_rtc_irq, 0, + pdev->name, rtc); + if (ret) { + dev_err(&pdev->dev, "Failed to request rtc irq: %d\n", ret); + goto err_unregister_rtc; + } + + scratchpad = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_SCRATCHPAD); + if (scratchpad != 0x12345678) { + ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678); + ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0); + if (ret) { + dev_err(&pdev->dev, "Could not write write to RTC registers\n"); + goto err_free_irq; + } + } + + return 0; + +err_free_irq: + free_irq(rtc->irq, rtc); +err_unregister_rtc: + rtc_device_unregister(rtc->rtc); +err_iounmap: + platform_set_drvdata(pdev, NULL); + iounmap(rtc->base); +err_release_mem_region: + release_mem_region(rtc->mem->start, resource_size(rtc->mem)); +err_free: + kfree(rtc); + + return ret; +} + +static int __devexit jz4740_rtc_remove(struct platform_device *pdev) +{ + struct jz4740_rtc *rtc = platform_get_drvdata(pdev); + + free_irq(rtc->irq, rtc); + + rtc_device_unregister(rtc->rtc); + + iounmap(rtc->base); + release_mem_region(rtc->mem->start, resource_size(rtc->mem)); + + kfree(rtc); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +struct platform_driver jz4740_rtc_driver = { + .probe = jz4740_rtc_probe, + .remove = __devexit_p(jz4740_rtc_remove), + .driver = { + .name = "jz4740-rtc", + .owner = THIS_MODULE, + }, +}; + +static int __init jz4740_rtc_init(void) +{ + return platform_driver_register(&jz4740_rtc_driver); +} +module_init(jz4740_rtc_init); + +static void __exit jz4740_rtc_exit(void) +{ + platform_driver_unregister(&jz4740_rtc_driver); +} +module_exit(jz4740_rtc_exit); + +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n"); +MODULE_ALIAS("platform:jz4740-rtc"); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 70b68d35f96..a0d3ec89d41 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -1,5 +1,8 @@ /* drivers/rtc/rtc-s3c.c * + * Copyright (c) 2010 Samsung Electronics Co., Ltd. + * http://www.samsung.com/ + * * Copyright (c) 2004,2006 Simtec Electronics * Ben Dooks, <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ @@ -39,6 +42,7 @@ enum s3c_cpu_type { static struct resource *s3c_rtc_mem; +static struct clk *rtc_clk; static void __iomem *s3c_rtc_base; static int s3c_rtc_alarmno = NO_IRQ; static int s3c_rtc_tickno = NO_IRQ; @@ -53,6 +57,10 @@ static irqreturn_t s3c_rtc_alarmirq(int irq, void *id) struct rtc_device *rdev = id; rtc_update_irq(rdev, 1, RTC_AF | RTC_IRQF); + + if (s3c_rtc_cpu_type == TYPE_S3C64XX) + writeb(S3C2410_INTP_ALM, s3c_rtc_base + S3C2410_INTP); + return IRQ_HANDLED; } @@ -61,6 +69,10 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id) struct rtc_device *rdev = id; rtc_update_irq(rdev, 1, RTC_PF | RTC_IRQF); + + if (s3c_rtc_cpu_type == TYPE_S3C64XX) + writeb(S3C2410_INTP_TIC, s3c_rtc_base + S3C2410_INTP); + return IRQ_HANDLED; } @@ -94,7 +106,7 @@ static int s3c_rtc_setpie(struct device *dev, int enabled) if (enabled) tmp |= S3C64XX_RTCCON_TICEN; - writeb(tmp, s3c_rtc_base + S3C2410_RTCCON); + writew(tmp, s3c_rtc_base + S3C2410_RTCCON); } else { tmp = readb(s3c_rtc_base + S3C2410_TICNT); tmp &= ~S3C2410_TICNT_ENABLE; @@ -128,7 +140,7 @@ static int s3c_rtc_setfreq(struct device *dev, int freq) tmp |= (rtc_dev->max_user_freq / freq)-1; - writeb(tmp, s3c_rtc_base + S3C2410_TICNT); + writel(tmp, s3c_rtc_base + S3C2410_TICNT); spin_unlock_irq(&s3c_rtc_pie_lock); return 0; @@ -431,6 +443,10 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) s3c_rtc_setpie(&dev->dev, 0); s3c_rtc_setaie(0); + clk_disable(rtc_clk); + clk_put(rtc_clk); + rtc_clk = NULL; + iounmap(s3c_rtc_base); release_resource(s3c_rtc_mem); kfree(s3c_rtc_mem); @@ -442,6 +458,7 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; struct resource *res; + unsigned int tmp, i; int ret; pr_debug("%s: probe=%p\n", __func__, pdev); @@ -488,6 +505,16 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) goto err_nomap; } + rtc_clk = clk_get(&pdev->dev, "rtc"); + if (IS_ERR(rtc_clk)) { + dev_err(&pdev->dev, "failed to find rtc clock source\n"); + ret = PTR_ERR(rtc_clk); + rtc_clk = NULL; + goto err_clk; + } + + clk_enable(rtc_clk); + /* check to see if everything is setup correctly */ s3c_rtc_enable(pdev, 1); @@ -510,6 +537,15 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data; + /* Check RTC Time */ + + for (i = S3C2410_RTCSEC; i <= S3C2410_RTCYEAR; i += 0x4) { + tmp = readb(s3c_rtc_base + i); + + if ((tmp & 0xf) > 0x9 || ((tmp >> 4) & 0xf) > 0x9) + writeb(0, s3c_rtc_base + i); + } + if (s3c_rtc_cpu_type == TYPE_S3C64XX) rtc->max_user_freq = 32768; else @@ -523,6 +559,10 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) err_nortc: s3c_rtc_enable(pdev, 0); + clk_disable(rtc_clk); + clk_put(rtc_clk); + + err_clk: iounmap(s3c_rtc_base); err_nomap: diff --git a/drivers/sbus/char/bbc_i2c.c b/drivers/sbus/char/bbc_i2c.c index 8bfdd63a1fc..3e89c313e98 100644 --- a/drivers/sbus/char/bbc_i2c.c +++ b/drivers/sbus/char/bbc_i2c.c @@ -317,7 +317,7 @@ static struct bbc_i2c_bus * __init attach_one_i2c(struct of_device *op, int inde bp->waiting = 0; init_waitqueue_head(&bp->wq); - if (request_irq(op->irqs[0], bbc_i2c_interrupt, + if (request_irq(op->archdata.irqs[0], bbc_i2c_interrupt, IRQF_SHARED, "bbc_i2c", bp)) goto fail; @@ -373,7 +373,7 @@ static int __devinit bbc_i2c_probe(struct of_device *op, err = bbc_envctrl_init(bp); if (err) { - free_irq(op->irqs[0], bp); + free_irq(op->archdata.irqs[0], bp); if (bp->i2c_bussel_reg) of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); if (bp->i2c_control_regs) @@ -392,7 +392,7 @@ static int __devexit bbc_i2c_remove(struct of_device *op) bbc_envctrl_cleanup(bp); - free_irq(op->irqs[0], bp); + free_irq(op->archdata.irqs[0], bp); if (bp->i2c_bussel_reg) of_iounmap(&op->resource[0], bp->i2c_bussel_reg, 1); @@ -425,12 +425,12 @@ static struct of_platform_driver bbc_i2c_driver = { static int __init bbc_i2c_init(void) { - return of_register_driver(&bbc_i2c_driver, &of_bus_type); + return of_register_platform_driver(&bbc_i2c_driver); } static void __exit bbc_i2c_exit(void) { - of_unregister_driver(&bbc_i2c_driver); + of_unregister_platform_driver(&bbc_i2c_driver); } module_init(bbc_i2c_init); diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c index 7baf1b64403..47db97583ea 100644 --- a/drivers/sbus/char/display7seg.c +++ b/drivers/sbus/char/display7seg.c @@ -13,7 +13,7 @@ #include <linux/miscdevice.h> #include <linux/ioport.h> /* request_region */ #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/atomic.h> @@ -26,6 +26,7 @@ #define DRIVER_NAME "d7s" #define PFX DRIVER_NAME ": " +static DEFINE_MUTEX(d7s_mutex); static int sol_compat = 0; /* Solaris compatibility mode */ /* Solaris compatibility flag - @@ -74,7 +75,6 @@ static int d7s_open(struct inode *inode, struct file *f) { if (D7S_MINOR != iminor(inode)) return -ENODEV; - cycle_kernel_lock(); atomic_inc(&d7s_users); return 0; } @@ -110,7 +110,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (D7S_MINOR != iminor(file->f_path.dentry->d_inode)) return -ENODEV; - lock_kernel(); + mutex_lock(&d7s_mutex); switch (cmd) { case D7SIOCWR: /* assign device register values we mask-out D7S_FLIP @@ -151,7 +151,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg) writeb(regs, p->regs); break; }; - unlock_kernel(); + mutex_unlock(&d7s_mutex); return error; } @@ -277,12 +277,12 @@ static struct of_platform_driver d7s_driver = { static int __init d7s_init(void) { - return of_register_driver(&d7s_driver, &of_bus_type); + return of_register_platform_driver(&d7s_driver); } static void __exit d7s_exit(void) { - of_unregister_driver(&d7s_driver); + of_unregister_platform_driver(&d7s_driver); } module_init(d7s_init); diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c index c8166ecf527..3c27f45e2b6 100644 --- a/drivers/sbus/char/envctrl.c +++ b/drivers/sbus/char/envctrl.c @@ -27,7 +27,6 @@ #include <linux/kmod.h> #include <linux/reboot.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/of.h> #include <linux/of_device.h> @@ -699,7 +698,6 @@ envctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static int envctrl_open(struct inode *inode, struct file *file) { - cycle_kernel_lock(); file->private_data = NULL; return 0; } @@ -1142,12 +1140,12 @@ static struct of_platform_driver envctrl_driver = { static int __init envctrl_init(void) { - return of_register_driver(&envctrl_driver, &of_bus_type); + return of_register_platform_driver(&envctrl_driver); } static void __exit envctrl_exit(void) { - of_unregister_driver(&envctrl_driver); + of_unregister_platform_driver(&envctrl_driver); } module_init(envctrl_init); diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 368d66294d8..8bb31c584b6 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c @@ -10,7 +10,7 @@ #include <linux/fcntl.h> #include <linux/poll.h> #include <linux/init.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/of.h> @@ -22,6 +22,7 @@ #include <asm/io.h> #include <asm/upa.h> +static DEFINE_MUTEX(flash_mutex); static DEFINE_SPINLOCK(flash_lock); static struct { unsigned long read_base; /* Physical read address */ @@ -80,7 +81,7 @@ flash_mmap(struct file *file, struct vm_area_struct *vma) static long long flash_llseek(struct file *file, long long offset, int origin) { - lock_kernel(); + mutex_lock(&flash_mutex); switch (origin) { case 0: file->f_pos = offset; @@ -94,10 +95,10 @@ flash_llseek(struct file *file, long long offset, int origin) file->f_pos = flash.read_size; break; default: - unlock_kernel(); + mutex_unlock(&flash_mutex); return -EINVAL; } - unlock_kernel(); + mutex_unlock(&flash_mutex); return file->f_pos; } @@ -125,13 +126,13 @@ flash_read(struct file * file, char __user * buf, static int flash_open(struct inode *inode, struct file *file) { - lock_kernel(); + mutex_lock(&flash_mutex); if (test_and_set_bit(0, (void *)&flash.busy) != 0) { - unlock_kernel(); + mutex_unlock(&flash_mutex); return -EBUSY; } - unlock_kernel(); + mutex_unlock(&flash_mutex); return 0; } @@ -218,12 +219,12 @@ static struct of_platform_driver flash_driver = { static int __init flash_init(void) { - return of_register_driver(&flash_driver, &of_bus_type); + return of_register_platform_driver(&flash_driver); } static void __exit flash_cleanup(void) { - of_unregister_driver(&flash_driver); + of_unregister_platform_driver(&flash_driver); } module_init(flash_init); diff --git a/drivers/sbus/char/openprom.c b/drivers/sbus/char/openprom.c index aacbe14e2e7..8d6e508222b 100644 --- a/drivers/sbus/char/openprom.c +++ b/drivers/sbus/char/openprom.c @@ -33,7 +33,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/string.h> #include <linux/miscdevice.h> #include <linux/init.h> @@ -61,6 +61,7 @@ typedef struct openprom_private_data } DATA; /* ID of the PROM node containing all of the EEPROM options. */ +static DEFINE_MUTEX(openprom_mutex); static struct device_node *options_node; /* @@ -316,7 +317,7 @@ static long openprom_sunos_ioctl(struct file * file, if (bufsize < 0) return bufsize; - lock_kernel(); + mutex_lock(&openprom_mutex); switch (cmd) { case OPROMGETOPT: @@ -367,7 +368,7 @@ static long openprom_sunos_ioctl(struct file * file, } kfree(opp); - unlock_kernel(); + mutex_unlock(&openprom_mutex); return error; } @@ -558,7 +559,7 @@ static int openprom_bsd_ioctl(struct file * file, void __user *argp = (void __user *)arg; int err; - lock_kernel(); + mutex_lock(&openprom_mutex); switch (cmd) { case OPIOCGET: err = opiocget(argp, data); @@ -589,7 +590,7 @@ static int openprom_bsd_ioctl(struct file * file, err = -EINVAL; break; }; - unlock_kernel(); + mutex_unlock(&openprom_mutex); return err; } @@ -697,11 +698,11 @@ static int openprom_open(struct inode * inode, struct file * file) if (!data) return -ENOMEM; - lock_kernel(); + mutex_lock(&openprom_mutex); data->current_node = of_find_node_by_path("/"); data->lastnode = data->current_node; file->private_data = (void *) data; - unlock_kernel(); + mutex_unlock(&openprom_mutex); return 0; } diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 5f253665a1d..41eb6725ff5 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c @@ -9,7 +9,7 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/slab.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/miscdevice.h> @@ -72,6 +72,7 @@ struct ts102_regs { #define UCTRL_STAT_RXNE_STA 0x04 /* receive FIFO not empty status */ #define UCTRL_STAT_RXO_STA 0x08 /* receive FIFO overflow status */ +static DEFINE_MUTEX(uctrl_mutex); static const char *uctrl_extstatus[16] = { "main power available", "internal battery attached", @@ -210,10 +211,10 @@ uctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) static int uctrl_open(struct inode *inode, struct file *file) { - lock_kernel(); + mutex_lock(&uctrl_mutex); uctrl_get_event_status(global_driver); uctrl_get_external_status(global_driver); - unlock_kernel(); + mutex_unlock(&uctrl_mutex); return 0; } @@ -367,7 +368,7 @@ static int __devinit uctrl_probe(struct of_device *op, goto out_free; } - p->irq = op->irqs[0]; + p->irq = op->archdata.irqs[0]; err = request_irq(p->irq, uctrl_interrupt, 0, "uctrl", p); if (err) { printk(KERN_ERR "uctrl: Unable to register irq.\n"); @@ -437,12 +438,12 @@ static struct of_platform_driver uctrl_driver = { static int __init uctrl_init(void) { - return of_register_driver(&uctrl_driver, &of_bus_type); + return of_register_platform_driver(&uctrl_driver); } static void __exit uctrl_exit(void) { - of_unregister_driver(&uctrl_driver); + of_unregister_platform_driver(&uctrl_driver); } module_init(uctrl_init); diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index ca5c15c779c..53d7ed0dc16 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -729,7 +729,7 @@ static int __devinit qpti_register_irq(struct qlogicpti *qpti) { struct of_device *op = qpti->op; - qpti->qhost->irq = qpti->irq = op->irqs[0]; + qpti->qhost->irq = qpti->irq = op->archdata.irqs[0]; /* We used to try various overly-clever things to * reduce the interrupt processing overhead on @@ -1302,7 +1302,7 @@ static int __devinit qpti_sbus_probe(struct of_device *op, const struct of_devic /* Sometimes Antares cards come up not completely * setup, and we get a report of a zero IRQ. */ - if (op->irqs[0] == 0) + if (op->archdata.irqs[0] == 0) return -ENODEV; host = scsi_host_alloc(tpnt, sizeof(struct qlogicpti)); @@ -1467,12 +1467,12 @@ static struct of_platform_driver qpti_sbus_driver = { static int __init qpti_init(void) { - return of_register_driver(&qpti_sbus_driver, &of_bus_type); + return of_register_platform_driver(&qpti_sbus_driver); } static void __exit qpti_exit(void) { - of_unregister_driver(&qpti_sbus_driver); + of_unregister_platform_driver(&qpti_sbus_driver); } MODULE_DESCRIPTION("QlogicISP SBUS driver"); diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c index 386dd9d602b..89ba6fe02f8 100644 --- a/drivers/scsi/sun_esp.c +++ b/drivers/scsi/sun_esp.c @@ -116,7 +116,7 @@ static int __devinit esp_sbus_register_irq(struct esp *esp) struct Scsi_Host *host = esp->host; struct of_device *op = esp->dev; - host->irq = op->irqs[0]; + host->irq = op->archdata.irqs[0]; return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); } @@ -644,12 +644,12 @@ static struct of_platform_driver esp_sbus_driver = { static int __init sunesp_init(void) { - return of_register_driver(&esp_sbus_driver, &of_bus_type); + return of_register_platform_driver(&esp_sbus_driver); } static void __exit sunesp_exit(void) { - of_unregister_driver(&esp_sbus_driver); + of_unregister_platform_driver(&esp_sbus_driver); } MODULE_DESCRIPTION("Sun ESP SCSI driver"); diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index 891e1dd65f2..09ef57034c9 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -302,7 +302,7 @@ static const struct serial8250_config uart_config[] = { }, }; -#if defined (CONFIG_SERIAL_8250_AU1X00) +#if defined(CONFIG_MIPS_ALCHEMY) /* Au1x00 UART hardware has a weird register layout */ static const u8 au_io_in_map[] = { @@ -422,7 +422,6 @@ static unsigned int mem32_serial_in(struct uart_port *p, int offset) return readl(p->membase + offset); } -#ifdef CONFIG_SERIAL_8250_AU1X00 static unsigned int au_serial_in(struct uart_port *p, int offset) { offset = map_8250_in_reg(p, offset) << p->regshift; @@ -434,7 +433,6 @@ static void au_serial_out(struct uart_port *p, int offset, int value) offset = map_8250_out_reg(p, offset) << p->regshift; __raw_writel(value, p->membase + offset); } -#endif static unsigned int tsi_serial_in(struct uart_port *p, int offset) { @@ -503,12 +501,11 @@ static void set_io_from_upio(struct uart_port *p) p->serial_out = mem32_serial_out; break; -#ifdef CONFIG_SERIAL_8250_AU1X00 case UPIO_AU: p->serial_in = au_serial_in; p->serial_out = au_serial_out; break; -#endif + case UPIO_TSI: p->serial_in = tsi_serial_in; p->serial_out = tsi_serial_out; @@ -535,9 +532,7 @@ serial_out_sync(struct uart_8250_port *up, int offset, int value) switch (p->iotype) { case UPIO_MEM: case UPIO_MEM32: -#ifdef CONFIG_SERIAL_8250_AU1X00 case UPIO_AU: -#endif case UPIO_DWAPB: p->serial_out(p, offset, value); p->serial_in(p, UART_LCR); /* safe, no side-effects */ @@ -573,7 +568,7 @@ static inline void _serial_dl_write(struct uart_8250_port *up, int value) serial_outp(up, UART_DLM, value >> 8 & 0xff); } -#if defined(CONFIG_SERIAL_8250_AU1X00) +#if defined(CONFIG_MIPS_ALCHEMY) /* Au1x00 haven't got a standard divisor latch */ static int serial_dl_read(struct uart_8250_port *up) { @@ -2596,11 +2591,9 @@ static void serial8250_config_port(struct uart_port *port, int flags) if (flags & UART_CONFIG_TYPE) autoconfig(up, probeflags); -#ifdef CONFIG_SERIAL_8250_AU1X00 /* if access method is AU, it is a 16550 with a quirk */ if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU) up->bugs |= UART_BUG_NOMSR; -#endif if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ) autoconfig_irq(up); diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 8b23165bc5d..8f23eb54f49 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -258,14 +258,6 @@ config SERIAL_8250_ACORN system, say Y to this option. The driver can handle 1, 2, or 3 port cards. If unsure, say N. -config SERIAL_8250_AU1X00 - bool "Au1x00 serial port support" - depends on SERIAL_8250 != n && SOC_AU1X00 - help - If you have an Au1x00 SOC based board and want to use the serial port, - say Y to this option. The driver can handle up to 4 serial ports, - depending on the SOC. If unsure, say N. - config SERIAL_8250_RM9K bool "Support for MIPS RM9xxx integrated serial port" depends on SERIAL_8250 != n && SERIAL_RM9000 @@ -544,8 +536,8 @@ config SERIAL_S3C6400 config SERIAL_S5PV210 tristate "Samsung S5PV210 Serial port support" - depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442) - select SERIAL_SAMSUNG_UARTS_4 if CPU_S5PV210 + depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_S5P6442 || CPU_S5PV310) + select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_S5PV310) default y help Serial port support for Samsung's S5P Family of SoC's diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c index a9a94ae7234..39f9a1adaa7 100644 --- a/drivers/serial/kgdboc.c +++ b/drivers/serial/kgdboc.c @@ -17,6 +17,7 @@ #include <linux/kdb.h> #include <linux/tty.h> #include <linux/console.h> +#include <linux/vt_kern.h> #define MAX_CONFIG_LEN 40 @@ -31,6 +32,7 @@ static struct kparam_string kps = { .maxlen = MAX_CONFIG_LEN, }; +static int kgdboc_use_kms; /* 1 if we use kernel mode switching */ static struct tty_driver *kgdb_tty_driver; static int kgdb_tty_line; @@ -104,6 +106,12 @@ static int configure_kgdboc(void) kgdboc_io_ops.is_console = 0; kgdb_tty_driver = NULL; + kgdboc_use_kms = 0; + if (strncmp(cptr, "kms,", 4) == 0) { + cptr += 4; + kgdboc_use_kms = 1; + } + if (kgdboc_register_kbd(&cptr)) goto do_register; @@ -201,8 +209,14 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) return configure_kgdboc(); } +static int dbg_restore_graphics; + static void kgdboc_pre_exp_handler(void) { + if (!dbg_restore_graphics && kgdboc_use_kms) { + dbg_restore_graphics = 1; + con_debug_enter(vc_cons[fg_console].d); + } /* Increment the module count when the debugger is active */ if (!kgdb_connected) try_module_get(THIS_MODULE); @@ -213,6 +227,10 @@ static void kgdboc_post_exp_handler(void) /* decrement the module count when the debugger detaches */ if (!kgdb_connected) module_put(THIS_MODULE); + if (kgdboc_use_kms && dbg_restore_graphics) { + dbg_restore_graphics = 0; + con_debug_leave(); + } } static struct kgdb_io kgdboc_io_ops = { diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index 84a35f69901..1a88b363005 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -113,7 +113,9 @@ struct psc_ops { unsigned char (*read_char)(struct uart_port *port); void (*cw_disable_ints)(struct uart_port *port); void (*cw_restore_ints)(struct uart_port *port); - unsigned long (*getuartclk)(void *p); + unsigned int (*set_baudrate)(struct uart_port *port, + struct ktermios *new, + struct ktermios *old); int (*clock)(struct uart_port *port, int enable); int (*fifoc_init)(void); void (*fifoc_uninit)(void); @@ -121,6 +123,16 @@ struct psc_ops { irqreturn_t (*handle_irq)(struct uart_port *port); }; +/* setting the prescaler and divisor reg is common for all chips */ +static inline void mpc52xx_set_divisor(struct mpc52xx_psc __iomem *psc, + u16 prescaler, unsigned int divisor) +{ + /* select prescaler */ + out_be16(&psc->mpc52xx_psc_clock_select, prescaler); + out_8(&psc->ctur, divisor >> 8); + out_8(&psc->ctlr, divisor & 0xff); +} + #ifdef CONFIG_PPC_MPC52xx #define FIFO_52xx(port) ((struct mpc52xx_psc_fifo __iomem *)(PSC(port)+1)) static void mpc52xx_psc_fifo_init(struct uart_port *port) @@ -128,9 +140,6 @@ static void mpc52xx_psc_fifo_init(struct uart_port *port) struct mpc52xx_psc __iomem *psc = PSC(port); struct mpc52xx_psc_fifo __iomem *fifo = FIFO_52xx(port); - /* /32 prescaler */ - out_be16(&psc->mpc52xx_psc_clock_select, 0xdd00); - out_8(&fifo->rfcntl, 0x00); out_be16(&fifo->rfalarm, 0x1ff); out_8(&fifo->tfcntl, 0x07); @@ -219,15 +228,47 @@ static void mpc52xx_psc_cw_restore_ints(struct uart_port *port) out_be16(&PSC(port)->mpc52xx_psc_imr, port->read_status_mask); } -/* Search for bus-frequency property in this node or a parent */ -static unsigned long mpc52xx_getuartclk(void *p) +static unsigned int mpc5200_psc_set_baudrate(struct uart_port *port, + struct ktermios *new, + struct ktermios *old) { - /* - * 5200 UARTs have a / 32 prescaler - * but the generic serial code assumes 16 - * so return ipb freq / 2 - */ - return mpc5xxx_get_bus_frequency(p) / 2; + unsigned int baud; + unsigned int divisor; + + /* The 5200 has a fixed /32 prescaler, uartclk contains the ipb freq */ + baud = uart_get_baud_rate(port, new, old, + port->uartclk / (32 * 0xffff) + 1, + port->uartclk / 32); + divisor = (port->uartclk + 16 * baud) / (32 * baud); + + /* enable the /32 prescaler and set the divisor */ + mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); + return baud; +} + +static unsigned int mpc5200b_psc_set_baudrate(struct uart_port *port, + struct ktermios *new, + struct ktermios *old) +{ + unsigned int baud; + unsigned int divisor; + u16 prescaler; + + /* The 5200B has a selectable /4 or /32 prescaler, uartclk contains the + * ipb freq */ + baud = uart_get_baud_rate(port, new, old, + port->uartclk / (32 * 0xffff) + 1, + port->uartclk / 4); + divisor = (port->uartclk + 2 * baud) / (4 * baud); + + /* select the proper prescaler and set the divisor */ + if (divisor > 0xffff) { + divisor = (divisor + 4) / 8; + prescaler = 0xdd00; /* /32 */ + } else + prescaler = 0xff00; /* /4 */ + mpc52xx_set_divisor(PSC(port), prescaler, divisor); + return baud; } static void mpc52xx_psc_get_irq(struct uart_port *port, struct device_node *np) @@ -258,7 +299,28 @@ static struct psc_ops mpc52xx_psc_ops = { .read_char = mpc52xx_psc_read_char, .cw_disable_ints = mpc52xx_psc_cw_disable_ints, .cw_restore_ints = mpc52xx_psc_cw_restore_ints, - .getuartclk = mpc52xx_getuartclk, + .set_baudrate = mpc5200_psc_set_baudrate, + .get_irq = mpc52xx_psc_get_irq, + .handle_irq = mpc52xx_psc_handle_irq, +}; + +static struct psc_ops mpc5200b_psc_ops = { + .fifo_init = mpc52xx_psc_fifo_init, + .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy, + .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy, + .rx_rdy = mpc52xx_psc_rx_rdy, + .tx_rdy = mpc52xx_psc_tx_rdy, + .tx_empty = mpc52xx_psc_tx_empty, + .stop_rx = mpc52xx_psc_stop_rx, + .start_tx = mpc52xx_psc_start_tx, + .stop_tx = mpc52xx_psc_stop_tx, + .rx_clr_irq = mpc52xx_psc_rx_clr_irq, + .tx_clr_irq = mpc52xx_psc_tx_clr_irq, + .write_char = mpc52xx_psc_write_char, + .read_char = mpc52xx_psc_read_char, + .cw_disable_ints = mpc52xx_psc_cw_disable_ints, + .cw_restore_ints = mpc52xx_psc_cw_restore_ints, + .set_baudrate = mpc5200b_psc_set_baudrate, .get_irq = mpc52xx_psc_get_irq, .handle_irq = mpc52xx_psc_handle_irq, }; @@ -392,9 +454,35 @@ static void mpc512x_psc_cw_restore_ints(struct uart_port *port) out_be32(&FIFO_512x(port)->rximr, port->read_status_mask & 0x7f); } -static unsigned long mpc512x_getuartclk(void *p) +static unsigned int mpc512x_psc_set_baudrate(struct uart_port *port, + struct ktermios *new, + struct ktermios *old) { - return mpc5xxx_get_bus_frequency(p); + unsigned int baud; + unsigned int divisor; + + /* + * The "MPC5121e Microcontroller Reference Manual, Rev. 3" says on + * pg. 30-10 that the chip supports a /32 and a /10 prescaler. + * Furthermore, it states that "After reset, the prescaler by 10 + * for the UART mode is selected", but the reset register value is + * 0x0000 which means a /32 prescaler. This is wrong. + * + * In reality using /32 prescaler doesn't work, as it is not supported! + * Use /16 or /10 prescaler, see "MPC5121e Hardware Design Guide", + * Chapter 4.1 PSC in UART Mode. + * Calculate with a /16 prescaler here. + */ + + /* uartclk contains the ips freq */ + baud = uart_get_baud_rate(port, new, old, + port->uartclk / (16 * 0xffff) + 1, + port->uartclk / 16); + divisor = (port->uartclk + 8 * baud) / (16 * baud); + + /* enable the /16 prescaler and set the divisor */ + mpc52xx_set_divisor(PSC(port), 0xdd00, divisor); + return baud; } /* Init PSC FIFO Controller */ @@ -498,7 +586,7 @@ static struct psc_ops mpc512x_psc_ops = { .read_char = mpc512x_psc_read_char, .cw_disable_ints = mpc512x_psc_cw_disable_ints, .cw_restore_ints = mpc512x_psc_cw_restore_ints, - .getuartclk = mpc512x_getuartclk, + .set_baudrate = mpc512x_psc_set_baudrate, .clock = mpc512x_psc_clock, .fifoc_init = mpc512x_psc_fifoc_init, .fifoc_uninit = mpc512x_psc_fifoc_uninit, @@ -666,8 +754,8 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, struct mpc52xx_psc __iomem *psc = PSC(port); unsigned long flags; unsigned char mr1, mr2; - unsigned short ctr; - unsigned int j, baud, quot; + unsigned int j; + unsigned int baud; /* Prepare what we're gonna write */ mr1 = 0; @@ -704,16 +792,9 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, mr2 |= MPC52xx_PSC_MODE_TXCTS; } - baud = uart_get_baud_rate(port, new, old, 0, port->uartclk/16); - quot = uart_get_divisor(port, baud); - ctr = quot & 0xffff; - /* Get the lock */ spin_lock_irqsave(&port->lock, flags); - /* Update the per-port timeout */ - uart_update_timeout(port, new->c_cflag, baud); - /* Do our best to flush TX & RX, so we don't lose anything */ /* But we don't wait indefinitely ! */ j = 5000000; /* Maximum wait */ @@ -737,8 +818,10 @@ mpc52xx_uart_set_termios(struct uart_port *port, struct ktermios *new, out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1); out_8(&psc->mode, mr1); out_8(&psc->mode, mr2); - out_8(&psc->ctur, ctr >> 8); - out_8(&psc->ctlr, ctr & 0xff); + baud = psc_ops->set_baudrate(port, new, old); + + /* Update the per-port timeout */ + uart_update_timeout(port, new->c_cflag, baud); if (UART_ENABLE_MS(port, new->c_cflag)) mpc52xx_uart_enable_ms(port); @@ -1118,7 +1201,7 @@ mpc52xx_console_setup(struct console *co, char *options) return ret; } - uartclk = psc_ops->getuartclk(np); + uartclk = mpc5xxx_get_bus_frequency(np); if (uartclk == 0) { pr_debug("Could not find uart clock frequency!\n"); return -EINVAL; @@ -1201,6 +1284,7 @@ static struct uart_driver mpc52xx_uart_driver = { static struct of_device_id mpc52xx_uart_of_match[] = { #ifdef CONFIG_PPC_MPC52xx + { .compatible = "fsl,mpc5200b-psc-uart", .data = &mpc5200b_psc_ops, }, { .compatible = "fsl,mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, /* binding used by old lite5200 device trees: */ { .compatible = "mpc5200-psc-uart", .data = &mpc52xx_psc_ops, }, @@ -1233,7 +1317,10 @@ mpc52xx_uart_of_probe(struct of_device *op, const struct of_device_id *match) pr_debug("Found %s assigned to ttyPSC%x\n", mpc52xx_uart_nodes[idx]->full_name, idx); - uartclk = psc_ops->getuartclk(op->dev.of_node); + /* set the uart clock to the input clock of the psc, the different + * prescalers are taken into account in the set_baudrate() methods + * of the respective chip */ + uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node); if (uartclk == 0) { dev_dbg(&op->dev, "Could not find uart clock frequency!\n"); return -EINVAL; diff --git a/drivers/serial/s5pv210.c b/drivers/serial/s5pv210.c index 4a789e5361a..6ebccd70a70 100644 --- a/drivers/serial/s5pv210.c +++ b/drivers/serial/s5pv210.c @@ -28,8 +28,12 @@ static int s5pv210_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *clk) { + struct s3c2410_uartcfg *cfg = port->dev->platform_data; unsigned long ucon = rd_regl(port, S3C2410_UCON); + if ((cfg->clocks_size) == 1) + return 0; + if (strcmp(clk->name, "pclk") == 0) ucon &= ~S5PV210_UCON_CLKMASK; else if (strcmp(clk->name, "uclk1") == 0) @@ -47,10 +51,14 @@ static int s5pv210_serial_setsource(struct uart_port *port, static int s5pv210_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *clk) { + struct s3c2410_uartcfg *cfg = port->dev->platform_data; u32 ucon = rd_regl(port, S3C2410_UCON); clk->divisor = 1; + if ((cfg->clocks_size) == 1) + return 0; + switch (ucon & S5PV210_UCON_CLKMASK) { case S5PV210_UCON_PCLK: clk->name = "pclk"; diff --git a/drivers/serial/samsung.c b/drivers/serial/samsung.c index a9d6c5626a0..b1156ba8ad1 100644 --- a/drivers/serial/samsung.c +++ b/drivers/serial/samsung.c @@ -705,8 +705,13 @@ static void s3c24xx_serial_set_termios(struct uart_port *port, if (ourport->info->has_divslot) { unsigned int div = ourport->baudclk_rate / baud; - udivslot = udivslot_table[div & 15]; - dbg("udivslot = %04x (div %d)\n", udivslot, div & 15); + if (cfg->has_fracval) { + udivslot = (div & 15); + dbg("fracval = %04x\n", udivslot); + } else { + udivslot = udivslot_table[div & 15]; + dbg("udivslot = %04x (div %d)\n", udivslot, div & 15); + } } switch (termios->c_cflag & CSIZE) { diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index 5f90fcd7d10..c291b3add1d 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c @@ -346,6 +346,27 @@ static int scif_rxfill(struct uart_port *port) return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; } } +#elif defined(CONFIG_ARCH_SH7372) +static int scif_txfill(struct uart_port *port) +{ + if (port->type == PORT_SCIFA) + return sci_in(port, SCFDR) >> 8; + else + return sci_in(port, SCTFDR); +} + +static int scif_txroom(struct uart_port *port) +{ + return port->fifosize - scif_txfill(port); +} + +static int scif_rxfill(struct uart_port *port) +{ + if (port->type == PORT_SCIFA) + return sci_in(port, SCFDR) & SCIF_RFDC_MASK; + else + return sci_in(port, SCRFDR); +} #else static int scif_txfill(struct uart_port *port) { @@ -683,7 +704,7 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr) u16 ssr = sci_in(port, SCxSR); /* Disable future Rx interrupts */ - if (port->type == PORT_SCIFA) { + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { disable_irq_nosync(irq); scr |= 0x4000; } else { @@ -928,7 +949,7 @@ static void sci_dma_tx_complete(void *arg) if (!uart_circ_empty(xmit)) { schedule_work(&s->work_tx); - } else if (port->type == PORT_SCIFA) { + } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { u16 ctrl = sci_in(port, SCSCR); sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE); } @@ -1184,7 +1205,7 @@ static void sci_start_tx(struct uart_port *port) unsigned short ctrl; #ifdef CONFIG_SERIAL_SH_SCI_DMA - if (port->type == PORT_SCIFA) { + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { u16 new, scr = sci_in(port, SCSCR); if (s->chan_tx) new = scr | 0x8000; @@ -1197,7 +1218,7 @@ static void sci_start_tx(struct uart_port *port) s->cookie_tx < 0) schedule_work(&s->work_tx); #endif - if (!s->chan_tx || port->type == PORT_SCIFA) { + if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ ctrl = sci_in(port, SCSCR); sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE); @@ -1210,7 +1231,7 @@ static void sci_stop_tx(struct uart_port *port) /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ ctrl = sci_in(port, SCSCR); - if (port->type == PORT_SCIFA) + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) ctrl &= ~0x8000; ctrl &= ~SCI_CTRL_FLAGS_TIE; sci_out(port, SCSCR, ctrl); @@ -1222,7 +1243,7 @@ static void sci_start_rx(struct uart_port *port) /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ ctrl |= sci_in(port, SCSCR); - if (port->type == PORT_SCIFA) + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) ctrl &= ~0x4000; sci_out(port, SCSCR, ctrl); } @@ -1233,7 +1254,7 @@ static void sci_stop_rx(struct uart_port *port) /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ ctrl = sci_in(port, SCSCR); - if (port->type == PORT_SCIFA) + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) ctrl &= ~0x4000; ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); sci_out(port, SCSCR, ctrl); @@ -1271,7 +1292,7 @@ static void rx_timer_fn(unsigned long arg) struct uart_port *port = &s->port; u16 scr = sci_in(port, SCSCR); - if (port->type == PORT_SCIFA) { + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { scr &= ~0x4000; enable_irq(s->irqs[1]); } @@ -1524,6 +1545,8 @@ static const char *sci_type(struct uart_port *port) return "scif"; case PORT_SCIFA: return "scifa"; + case PORT_SCIFB: + return "scifb"; } return NULL; @@ -1612,6 +1635,9 @@ static int __devinit sci_init_single(struct platform_device *dev, port->line = index; switch (p->type) { + case PORT_SCIFB: + port->fifosize = 256; + break; case PORT_SCIFA: port->fifosize = 64; break; diff --git a/drivers/serial/sh-sci.h b/drivers/serial/sh-sci.h index f70c49f915f..9b52f77a930 100644 --- a/drivers/serial/sh-sci.h +++ b/drivers/serial/sh-sci.h @@ -322,7 +322,7 @@ #define CPU_SCIx_FNS(name, sci_offset, sci_size, scif_offset, scif_size)\ static inline unsigned int sci_##name##_in(struct uart_port *port) \ { \ - if (port->type == PORT_SCIF) { \ + if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ SCI_IN(scif_size, scif_offset) \ } else { /* PORT_SCI or PORT_SCIFA */ \ SCI_IN(sci_size, sci_offset); \ @@ -330,7 +330,7 @@ } \ static inline void sci_##name##_out(struct uart_port *port, unsigned int value) \ { \ - if (port->type == PORT_SCIF) { \ + if (port->type == PORT_SCIF || port->type == PORT_SCIFB) { \ SCI_OUT(scif_size, scif_offset, value) \ } else { /* PORT_SCI or PORT_SCIFA */ \ SCI_OUT(sci_size, sci_offset, value); \ @@ -384,8 +384,12 @@ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) + defined(CONFIG_ARCH_SH7377) +#define SCIF_FNS(name, scif_offset, scif_size) \ + CPU_SCIF_FNS(name, scif_offset, scif_size) +#elif defined(CONFIG_ARCH_SH7372) +#define SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) \ + CPU_SCIx_FNS(name, sh4_scifa_offset, sh4_scifa_size, sh4_scifb_offset, sh4_scifb_size) #define SCIF_FNS(name, scif_offset, scif_size) \ CPU_SCIF_FNS(name, scif_offset, scif_size) #else @@ -422,8 +426,7 @@ defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ defined(CONFIG_ARCH_SH7367) || \ - defined(CONFIG_ARCH_SH7377) || \ - defined(CONFIG_ARCH_SH7372) + defined(CONFIG_ARCH_SH7377) SCIF_FNS(SCSMR, 0x00, 16) SCIF_FNS(SCBRR, 0x04, 8) @@ -436,6 +439,20 @@ SCIF_FNS(SCFDR, 0x1c, 16) SCIF_FNS(SCxTDR, 0x20, 8) SCIF_FNS(SCxRDR, 0x24, 8) SCIF_FNS(SCLSR, 0x00, 0) +#elif defined(CONFIG_ARCH_SH7372) +SCIF_FNS(SCSMR, 0x00, 16) +SCIF_FNS(SCBRR, 0x04, 8) +SCIF_FNS(SCSCR, 0x08, 16) +SCIF_FNS(SCTDSR, 0x0c, 16) +SCIF_FNS(SCFER, 0x10, 16) +SCIF_FNS(SCxSR, 0x14, 16) +SCIF_FNS(SCFCR, 0x18, 16) +SCIF_FNS(SCFDR, 0x1c, 16) +SCIF_FNS(SCTFDR, 0x38, 16) +SCIF_FNS(SCRFDR, 0x3c, 16) +SCIx_FNS(SCxTDR, 0x20, 8, 0x40, 8) +SCIx_FNS(SCxRDR, 0x24, 8, 0x60, 8) +SCIF_FNS(SCLSR, 0x00, 0) #elif defined(CONFIG_CPU_SUBTYPE_SH7723) ||\ defined(CONFIG_CPU_SUBTYPE_SH7724) SCIx_FNS(SCSMR, 0x00, 16, 0x00, 16) diff --git a/drivers/serial/sunhv.c b/drivers/serial/sunhv.c index 890f9174296..a779e22d213 100644 --- a/drivers/serial/sunhv.c +++ b/drivers/serial/sunhv.c @@ -525,7 +525,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m unsigned long minor; int err; - if (op->irqs[0] == 0xffffffff) + if (op->archdata.irqs[0] == 0xffffffff) return -ENODEV; port = kzalloc(sizeof(struct uart_port), GFP_KERNEL); @@ -557,7 +557,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m port->membase = (unsigned char __iomem *) __pa(port); - port->irq = op->irqs[0]; + port->irq = op->archdata.irqs[0]; port->dev = &op->dev; @@ -644,12 +644,12 @@ static int __init sunhv_init(void) if (tlb_type != hypervisor) return -ENODEV; - return of_register_driver(&hv_driver, &of_bus_type); + return of_register_platform_driver(&hv_driver); } static void __exit sunhv_exit(void) { - of_unregister_driver(&hv_driver); + of_unregister_platform_driver(&hv_driver); } module_init(sunhv_init); diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c index 5e81bc6b48b..9845fb1cfb1 100644 --- a/drivers/serial/sunsab.c +++ b/drivers/serial/sunsab.c @@ -969,7 +969,7 @@ static int __devinit sunsab_init_one(struct uart_sunsab_port *up, return -ENOMEM; up->regs = (union sab82532_async_regs __iomem *) up->port.membase; - up->port.irq = op->irqs[0]; + up->port.irq = op->archdata.irqs[0]; up->port.fifosize = SAB82532_XMIT_FIFO_SIZE; up->port.iotype = UPIO_MEM; @@ -1130,12 +1130,12 @@ static int __init sunsab_init(void) } } - return of_register_driver(&sab_driver, &of_bus_type); + return of_register_platform_driver(&sab_driver); } static void __exit sunsab_exit(void) { - of_unregister_driver(&sab_driver); + of_unregister_platform_driver(&sab_driver); if (sunsab_reg.nr) { sunserial_unregister_minors(&sunsab_reg, sunsab_reg.nr); } diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c index ffbf4553f66..3cdf74822db 100644 --- a/drivers/serial/sunsu.c +++ b/drivers/serial/sunsu.c @@ -1443,7 +1443,7 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m return -ENOMEM; } - up->port.irq = op->irqs[0]; + up->port.irq = op->archdata.irqs[0]; up->port.dev = &op->dev; @@ -1586,7 +1586,7 @@ static int __init sunsu_init(void) return err; } - err = of_register_driver(&su_driver, &of_bus_type); + err = of_register_platform_driver(&su_driver); if (err && num_uart) sunserial_unregister_minors(&sunsu_reg, num_uart); diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c index f9a24f4ebb3..d1e6bcb5954 100644 --- a/drivers/serial/sunzilog.c +++ b/drivers/serial/sunzilog.c @@ -1426,7 +1426,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m rp = sunzilog_chip_regs[inst]; if (zilog_irq == -1) - zilog_irq = op->irqs[0]; + zilog_irq = op->archdata.irqs[0]; up = &sunzilog_port_table[inst * 2]; @@ -1434,7 +1434,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m up[0].port.mapbase = op->resource[0].start + 0x00; up[0].port.membase = (void __iomem *) &rp->channelA; up[0].port.iotype = UPIO_MEM; - up[0].port.irq = op->irqs[0]; + up[0].port.irq = op->archdata.irqs[0]; up[0].port.uartclk = ZS_CLOCK; up[0].port.fifosize = 1; up[0].port.ops = &sunzilog_pops; @@ -1451,7 +1451,7 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m up[1].port.mapbase = op->resource[0].start + 0x04; up[1].port.membase = (void __iomem *) &rp->channelB; up[1].port.iotype = UPIO_MEM; - up[1].port.irq = op->irqs[0]; + up[1].port.irq = op->archdata.irqs[0]; up[1].port.uartclk = ZS_CLOCK; up[1].port.fifosize = 1; up[1].port.ops = &sunzilog_pops; @@ -1492,12 +1492,12 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m "is a %s\n", dev_name(&op->dev), (unsigned long long) up[0].port.mapbase, - op->irqs[0], sunzilog_type(&up[0].port)); + op->archdata.irqs[0], sunzilog_type(&up[0].port)); printk(KERN_INFO "%s: Mouse at MMIO 0x%llx (irq = %d) " "is a %s\n", dev_name(&op->dev), (unsigned long long) up[1].port.mapbase, - op->irqs[0], sunzilog_type(&up[1].port)); + op->archdata.irqs[0], sunzilog_type(&up[1].port)); kbm_inst++; } @@ -1576,7 +1576,7 @@ static int __init sunzilog_init(void) goto out_free_tables; } - err = of_register_driver(&zs_driver, &of_bus_type); + err = of_register_platform_driver(&zs_driver); if (err) goto out_unregister_uart; @@ -1604,7 +1604,7 @@ out: return err; out_unregister_driver: - of_unregister_driver(&zs_driver); + of_unregister_platform_driver(&zs_driver); out_unregister_uart: if (num_sunzilog) { @@ -1619,7 +1619,7 @@ out_free_tables: static void __exit sunzilog_exit(void) { - of_unregister_driver(&zs_driver); + of_unregister_platform_driver(&zs_driver); if (zilog_irq != -1) { struct uart_sunzilog_port *up = sunzilog_irq_chain; diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index 8acccd56437..caf085d3a76 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c @@ -21,6 +21,7 @@ #include <asm/io.h> #if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE)) #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile index 78bb5127abd..08fc653a825 100644 --- a/drivers/sh/Makefile +++ b/drivers/sh/Makefile @@ -1,9 +1,10 @@ # # Makefile for the SuperH specific drivers. # +obj-y := clk.o intc.o + obj-$(CONFIG_SUPERHYWAY) += superhyway/ obj-$(CONFIG_MAPLE) += maple/ + obj-$(CONFIG_GENERIC_GPIO) += pfc.o -obj-$(CONFIG_SUPERH) += clk.o obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o -obj-y += intc.o diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk-cpg.c index f5c80ba9ab1..8c024b984ed 100644 --- a/drivers/sh/clk-cpg.c +++ b/drivers/sh/clk-cpg.c @@ -68,6 +68,39 @@ static unsigned long sh_clk_div6_recalc(struct clk *clk) return clk->freq_table[idx].frequency; } +static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent) +{ + struct clk_div_mult_table *table = &sh_clk_div6_table; + u32 value; + int ret, i; + + if (!clk->parent_table || !clk->parent_num) + return -EINVAL; + + /* Search the parent */ + for (i = 0; i < clk->parent_num; i++) + if (clk->parent_table[i] == parent) + break; + + if (i == clk->parent_num) + return -ENODEV; + + ret = clk_reparent(clk, parent); + if (ret < 0) + return ret; + + value = __raw_readl(clk->enable_reg) & + ~(((1 << clk->src_width) - 1) << clk->src_shift); + + __raw_writel(value | (i << clk->src_shift), clk->enable_reg); + + /* Rebuild the frequency table */ + clk_rate_table_build(clk, clk->freq_table, table->nr_divisors, + table, &clk->arch_flags); + + return 0; +} + static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate, int algo_id) { @@ -117,7 +150,17 @@ static struct clk_ops sh_clk_div6_clk_ops = { .disable = sh_clk_div6_disable, }; -int __init sh_clk_div6_register(struct clk *clks, int nr) +static struct clk_ops sh_clk_div6_reparent_clk_ops = { + .recalc = sh_clk_div6_recalc, + .round_rate = sh_clk_div_round_rate, + .set_rate = sh_clk_div6_set_rate, + .enable = sh_clk_div6_enable, + .disable = sh_clk_div6_disable, + .set_parent = sh_clk_div6_set_parent, +}; + +static int __init sh_clk_div6_register_ops(struct clk *clks, int nr, + struct clk_ops *ops) { struct clk *clkp; void *freq_table; @@ -136,7 +179,7 @@ int __init sh_clk_div6_register(struct clk *clks, int nr) for (k = 0; !ret && (k < nr); k++) { clkp = clks + k; - clkp->ops = &sh_clk_div6_clk_ops; + clkp->ops = ops; clkp->id = -1; clkp->freq_table = freq_table + (k * freq_table_size); clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END; @@ -147,6 +190,17 @@ int __init sh_clk_div6_register(struct clk *clks, int nr) return ret; } +int __init sh_clk_div6_register(struct clk *clks, int nr) +{ + return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops); +} + +int __init sh_clk_div6_reparent_register(struct clk *clks, int nr) +{ + return sh_clk_div6_register_ops(clks, nr, + &sh_clk_div6_reparent_clk_ops); +} + static unsigned long sh_clk_div4_recalc(struct clk *clk) { struct clk_div4_table *d4t = clk->priv; diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/mpc512x_psc_spi.c index 2534b1ec3ed..10baac3f8ea 100644 --- a/drivers/spi/mpc512x_psc_spi.c +++ b/drivers/spi/mpc512x_psc_spi.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/workqueue.h> #include <linux/completion.h> @@ -440,6 +441,7 @@ static int __init mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, master->setup = mpc512x_psc_spi_setup; master->transfer = mpc512x_psc_spi_transfer; master->cleanup = mpc512x_psc_spi_cleanup; + master->dev.of_node = dev->of_node; tempp = ioremap(regaddr, size); if (!tempp) { diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/mpc52xx_psc_spi.c index 7104cb739da..66d170147dc 100644 --- a/drivers/spi/mpc52xx_psc_spi.c +++ b/drivers/spi/mpc52xx_psc_spi.c @@ -16,8 +16,8 @@ #include <linux/types.h> #include <linux/errno.h> #include <linux/interrupt.h> +#include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/of_spi.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/io.h> @@ -398,6 +398,7 @@ static int __init mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr, master->setup = mpc52xx_psc_spi_setup; master->transfer = mpc52xx_psc_spi_transfer; master->cleanup = mpc52xx_psc_spi_cleanup; + master->dev.of_node = dev->of_node; mps->psc = ioremap(regaddr, size); if (!mps->psc) { @@ -470,7 +471,6 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, const u32 *regaddr_p; u64 regaddr64, size64; s16 id = -1; - int rc; regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL); if (!regaddr_p) { @@ -491,13 +491,8 @@ static int __init mpc52xx_psc_spi_of_probe(struct of_device *op, id = *psc_nump + 1; } - rc = mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, + return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64, irq_of_parse_and_map(op->dev.of_node, 0), id); - if (rc == 0) - of_register_spi_devices(dev_get_drvdata(&op->dev), - op->dev.of_node); - - return rc; } static int __exit mpc52xx_psc_spi_of_remove(struct of_device *op) diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/mpc52xx_spi.c index b1a76bff775..56136ff00e0 100644 --- a/drivers/spi/mpc52xx_spi.c +++ b/drivers/spi/mpc52xx_spi.c @@ -18,7 +18,6 @@ #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/spi/spi.h> -#include <linux/of_spi.h> #include <linux/io.h> #include <linux/of_gpio.h> #include <linux/slab.h> @@ -439,6 +438,7 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, master->setup = mpc52xx_spi_setup; master->transfer = mpc52xx_spi_transfer; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST; + master->dev.of_node = op->dev.of_node; dev_set_drvdata(&op->dev, master); @@ -512,7 +512,6 @@ static int __devinit mpc52xx_spi_probe(struct of_device *op, if (rc) goto err_register; - of_register_spi_devices(master, op->dev.of_node); dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n"); return rc; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b3a1f9259b6..1bb1b88780c 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/mod_devicetable.h> #include <linux/spi/spi.h> +#include <linux/of_spi.h> /* SPI bustype and spi_master class are registered after board init code @@ -540,6 +541,9 @@ int spi_register_master(struct spi_master *master) /* populate children from any spi device tables */ scan_boardinfo(master); status = 0; + + /* Register devices from the device tree */ + of_register_spi_devices(master); done: return status; } diff --git a/drivers/spi/spi_mpc8xxx.c b/drivers/spi/spi_mpc8xxx.c index 97ab0a81338..aad9ae1b9c6 100644 --- a/drivers/spi/spi_mpc8xxx.c +++ b/drivers/spi/spi_mpc8xxx.c @@ -38,7 +38,6 @@ #include <linux/of_platform.h> #include <linux/gpio.h> #include <linux/of_gpio.h> -#include <linux/of_spi.h> #include <linux/slab.h> #include <sysdev/fsl_soc.h> @@ -1009,6 +1008,7 @@ mpc8xxx_spi_probe(struct device *dev, struct resource *mem, unsigned int irq) master->setup = mpc8xxx_spi_setup; master->transfer = mpc8xxx_spi_transfer; master->cleanup = mpc8xxx_spi_cleanup; + master->dev.of_node = dev->of_node; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->dev = dev; @@ -1299,8 +1299,6 @@ static int __devinit of_mpc8xxx_spi_probe(struct of_device *ofdev, goto err; } - of_register_spi_devices(master, np); - return 0; err: diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi_ppc4xx.c index d53466a249d..0f5fa7e2a55 100644 --- a/drivers/spi/spi_ppc4xx.c +++ b/drivers/spi/spi_ppc4xx.c @@ -407,6 +407,7 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op, master = spi_alloc_master(dev, sizeof *hw); if (master == NULL) return -ENOMEM; + master->dev.of_node = np; dev_set_drvdata(dev, master); hw = spi_master_get_devdata(master); hw->master = spi_master_get(master); @@ -545,7 +546,6 @@ static int __init spi_ppc4xx_of_probe(struct of_device *op, } dev_info(dev, "driver initialized\n"); - of_register_spi_devices(master, np); return 0; diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index 1b47363cb73..80f2db5bcfd 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c @@ -390,6 +390,9 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem, master->bus_num = bus_num; master->num_chipselect = pdata->num_chipselect; +#ifdef CONFIG_OF + master->dev.of_node = dev->of_node; +#endif xspi->mem = *mem; xspi->irq = irq; diff --git a/drivers/spi/xilinx_spi_of.c b/drivers/spi/xilinx_spi_of.c index 4654805b08d..f53d3f6b9f6 100644 --- a/drivers/spi/xilinx_spi_of.c +++ b/drivers/spi/xilinx_spi_of.c @@ -29,6 +29,7 @@ #include <linux/io.h> #include <linux/slab.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_device.h> #include <linux/of_spi.h> @@ -80,9 +81,6 @@ static int __devinit xilinx_spi_of_probe(struct of_device *ofdev, dev_set_drvdata(&ofdev->dev, master); - /* Add any subnodes on the SPI bus */ - of_register_spi_devices(master, ofdev->dev.of_node); - return 0; } diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 6a58cb1330c..4aa00e6e57a 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -45,7 +45,8 @@ config USB_ARCH_HAS_OHCI default y if STB03xxx default y if PPC_MPC52xx # MIPS: - default y if SOC_AU1X00 + default y if MIPS_ALCHEMY + default y if MACH_JZ4740 # SH: default y if CPU_SUBTYPE_SH7720 default y if CPU_SUBTYPE_SH7721 diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 82506ca297d..9648b75f028 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -32,6 +32,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/moduleparam.h> +#include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/dma-mapping.h> #include <linux/usb/ch9.h> diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index fc576557d8a..02864a237a2 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -1031,7 +1031,7 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ohci_hcd_ep93xx_driver #endif -#ifdef CONFIG_SOC_AU1X00 +#ifdef CONFIG_MIPS_ALCHEMY #include "ohci-au1xxx.c" #define PLATFORM_DRIVER ohci_hcd_au1xxx_driver #endif @@ -1095,6 +1095,11 @@ MODULE_LICENSE ("GPL"); #define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver #endif +#ifdef CONFIG_MACH_JZ4740 +#include "ohci-jz4740.c" +#define PLATFORM_DRIVER ohci_hcd_jz4740_driver +#endif + #if !defined(PCI_DRIVER) && \ !defined(PLATFORM_DRIVER) && \ !defined(OMAP1_PLATFORM_DRIVER) && \ diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c new file mode 100644 index 00000000000..10e1872f3ab --- /dev/null +++ b/drivers/usb/host/ohci-jz4740.c @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> + +struct jz4740_ohci_hcd { + struct ohci_hcd ohci_hcd; + + struct regulator *vbus; + bool vbus_enabled; + struct clk *clk; +}; + +static inline struct jz4740_ohci_hcd *hcd_to_jz4740_hcd(struct usb_hcd *hcd) +{ + return (struct jz4740_ohci_hcd *)(hcd->hcd_priv); +} + +static inline struct usb_hcd *jz4740_hcd_to_hcd(struct jz4740_ohci_hcd *jz4740_ohci) +{ + return container_of((void *)jz4740_ohci, struct usb_hcd, hcd_priv); +} + +static int ohci_jz4740_start(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int ret; + + ret = ohci_init(ohci); + if (ret < 0) + return ret; + + ohci->num_ports = 1; + + ret = ohci_run(ohci); + if (ret < 0) { + dev_err(hcd->self.controller, "Can not start %s", + hcd->self.bus_name); + ohci_stop(hcd); + return ret; + } + return 0; +} + +static int ohci_jz4740_set_vbus_power(struct jz4740_ohci_hcd *jz4740_ohci, + bool enabled) +{ + int ret = 0; + + if (!jz4740_ohci->vbus) + return 0; + + if (enabled && !jz4740_ohci->vbus_enabled) { + ret = regulator_enable(jz4740_ohci->vbus); + if (ret) + dev_err(jz4740_hcd_to_hcd(jz4740_ohci)->self.controller, + "Could not power vbus\n"); + } else if (!enabled && jz4740_ohci->vbus_enabled) { + ret = regulator_disable(jz4740_ohci->vbus); + } + + if (ret == 0) + jz4740_ohci->vbus_enabled = enabled; + + return ret; +} + +static int ohci_jz4740_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd); + int ret; + + switch (typeReq) { + case SetHubFeature: + if (wValue == USB_PORT_FEAT_POWER) + ret = ohci_jz4740_set_vbus_power(jz4740_ohci, true); + break; + case ClearHubFeature: + if (wValue == USB_PORT_FEAT_POWER) + ret = ohci_jz4740_set_vbus_power(jz4740_ohci, false); + break; + } + + if (ret) + return ret; + + return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); +} + + +static const struct hc_driver ohci_jz4740_hc_driver = { + .description = hcd_name, + .product_desc = "JZ4740 OHCI", + .hcd_priv_size = sizeof(struct jz4740_ohci_hcd), + + /* + * generic hardware linkage + */ + .irq = ohci_irq, + .flags = HCD_USB11 | HCD_MEMORY, + + /* + * basic lifecycle operations + */ + .start = ohci_jz4740_start, + .stop = ohci_stop, + .shutdown = ohci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .endpoint_disable = ohci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ohci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ohci_hub_status_data, + .hub_control = ohci_jz4740_hub_control, +#ifdef CONFIG_PM + .bus_suspend = ohci_bus_suspend, + .bus_resume = ohci_bus_resume, +#endif + .start_port_reset = ohci_start_port_reset, +}; + + +static __devinit int jz4740_ohci_probe(struct platform_device *pdev) +{ + int ret; + struct usb_hcd *hcd; + struct jz4740_ohci_hcd *jz4740_ohci; + struct resource *res; + int irq; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + if (!res) { + dev_err(&pdev->dev, "Failed to get platform resource\n"); + return -ENOENT; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get platform irq\n"); + return irq; + } + + hcd = usb_create_hcd(&ohci_jz4740_hc_driver, &pdev->dev, "jz4740"); + if (!hcd) { + dev_err(&pdev->dev, "Failed to create hcd.\n"); + return -ENOMEM; + } + + jz4740_ohci = hcd_to_jz4740_hcd(hcd); + + res = request_mem_region(res->start, resource_size(res), hcd_name); + if (!res) { + dev_err(&pdev->dev, "Failed to request mem region.\n"); + ret = -EBUSY; + goto err_free; + } + + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); + hcd->regs = ioremap(res->start, resource_size(res)); + + if (!hcd->regs) { + dev_err(&pdev->dev, "Failed to ioremap registers.\n"); + ret = -EBUSY; + goto err_release_mem; + } + + jz4740_ohci->clk = clk_get(&pdev->dev, "uhc"); + if (IS_ERR(jz4740_ohci->clk)) { + ret = PTR_ERR(jz4740_ohci->clk); + dev_err(&pdev->dev, "Failed to get clock: %d\n", ret); + goto err_iounmap; + } + + jz4740_ohci->vbus = regulator_get(&pdev->dev, "vbus"); + if (IS_ERR(jz4740_ohci->vbus)) + jz4740_ohci->vbus = NULL; + + + clk_set_rate(jz4740_ohci->clk, 48000000); + clk_enable(jz4740_ohci->clk); + if (jz4740_ohci->vbus) + ohci_jz4740_set_vbus_power(jz4740_ohci, true); + + platform_set_drvdata(pdev, hcd); + + ohci_hcd_init(hcd_to_ohci(hcd)); + + ret = usb_add_hcd(hcd, irq, 0); + if (ret) { + dev_err(&pdev->dev, "Failed to add hcd: %d\n", ret); + goto err_disable; + } + + return 0; + +err_disable: + platform_set_drvdata(pdev, NULL); + if (jz4740_ohci->vbus) { + regulator_disable(jz4740_ohci->vbus); + regulator_put(jz4740_ohci->vbus); + } + clk_disable(jz4740_ohci->clk); + + clk_put(jz4740_ohci->clk); +err_iounmap: + iounmap(hcd->regs); +err_release_mem: + release_mem_region(res->start, resource_size(res)); +err_free: + usb_put_hcd(hcd); + + return ret; +} + +static __devexit int jz4740_ohci_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct jz4740_ohci_hcd *jz4740_ohci = hcd_to_jz4740_hcd(hcd); + + usb_remove_hcd(hcd); + + platform_set_drvdata(pdev, NULL); + + if (jz4740_ohci->vbus) { + regulator_disable(jz4740_ohci->vbus); + regulator_put(jz4740_ohci->vbus); + } + + clk_disable(jz4740_ohci->clk); + clk_put(jz4740_ohci->clk); + + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + + usb_put_hcd(hcd); + + return 0; +} + +static struct platform_driver ohci_hcd_jz4740_driver = { + .probe = jz4740_ohci_probe, + .remove = __devexit_p(jz4740_ohci_remove), + .driver = { + .name = "jz4740-ohci", + .owner = THIS_MODULE, + }, +}; + +MODULE_ALIAS("platfrom:jz4740-ohci"); diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 3d94a147172..bbeee4ba248 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -1871,6 +1871,7 @@ config FB_MBX_DEBUG config FB_FSL_DIU tristate "Freescale DIU framebuffer support" depends on FB && FSL_SOC + select FB_MODE_HELPERS select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -1895,6 +1896,13 @@ config FB_W100 If unsure, say N. +config SH_MIPI_DSI + tristate + depends on (SUPERH || ARCH_SHMOBILE) && HAVE_CLK + +config SH_LCD_MIPI_DSI + bool + config FB_SH_MOBILE_LCDC tristate "SuperH Mobile LCDC framebuffer support" depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK @@ -1903,9 +1911,17 @@ config FB_SH_MOBILE_LCDC select FB_SYS_IMAGEBLIT select FB_SYS_FOPS select FB_DEFERRED_IO + select SH_MIPI_DSI if SH_LCD_MIPI_DSI ---help--- Frame buffer driver for the on-chip SH-Mobile LCD controller. +config FB_SH_MOBILE_HDMI + tristate "SuperH Mobile HDMI controller support" + depends on FB_SH_MOBILE_LCDC + select FB_MODE_HELPERS + ---help--- + Driver for the on-chip SH-Mobile HDMI controller. + config FB_TMIO tristate "Toshiba Mobile IO FrameBuffer support" depends on FB && MFD_CORE @@ -2229,6 +2245,15 @@ config FB_BROADSHEET and could also have been called by other names when coupled with a bridge adapter. +config FB_JZ4740 + tristate "JZ4740 LCD framebuffer support" + depends on FB && MACH_JZ4740 + select FB_SYS_FILLRECT + select FB_SYS_COPYAREA + select FB_SYS_IMAGEBLIT + help + Framebuffer support for the JZ4740 SoC. + source "drivers/video/omap/Kconfig" source "drivers/video/omap2/Kconfig" diff --git a/drivers/video/Makefile b/drivers/video/Makefile index ddc2af2ba45..485e8ed1318 100644 --- a/drivers/video/Makefile +++ b/drivers/video/Makefile @@ -123,6 +123,8 @@ obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o obj-$(CONFIG_FB_PS3) += ps3fb.o obj-$(CONFIG_FB_SM501) += sm501fb.o obj-$(CONFIG_FB_XILINX) += xilinxfb.o +obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o +obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o obj-$(CONFIG_FB_OMAP) += omap/ obj-y += omap2/ @@ -131,6 +133,7 @@ obj-$(CONFIG_FB_CARMINE) += carminefb.o obj-$(CONFIG_FB_MB862XX) += mb862xx/ obj-$(CONFIG_FB_MSM) += msm/ obj-$(CONFIG_FB_NUC900) += nuc900fb.o +obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o # Platform or fallback drivers go here obj-$(CONFIG_FB_UVESA) += uvesafb.o diff --git a/drivers/video/bw2.c b/drivers/video/bw2.c index 09f1b9b462f..c7796637baf 100644 --- a/drivers/video/bw2.c +++ b/drivers/video/bw2.c @@ -390,12 +390,12 @@ static int __init bw2_init(void) if (fb_get_options("bw2fb", NULL)) return -ENODEV; - return of_register_driver(&bw2_driver, &of_bus_type); + return of_register_platform_driver(&bw2_driver); } static void __exit bw2_exit(void) { - of_unregister_driver(&bw2_driver); + of_unregister_platform_driver(&bw2_driver); } module_init(bw2_init); diff --git a/drivers/video/cg14.c b/drivers/video/cg14.c index e5dc2241194..d09fde8beb6 100644 --- a/drivers/video/cg14.c +++ b/drivers/video/cg14.c @@ -610,12 +610,12 @@ static int __init cg14_init(void) if (fb_get_options("cg14fb", NULL)) return -ENODEV; - return of_register_driver(&cg14_driver, &of_bus_type); + return of_register_platform_driver(&cg14_driver); } static void __exit cg14_exit(void) { - of_unregister_driver(&cg14_driver); + of_unregister_platform_driver(&cg14_driver); } module_init(cg14_init); diff --git a/drivers/video/cg3.c b/drivers/video/cg3.c index 558d73a948a..64aa29809fb 100644 --- a/drivers/video/cg3.c +++ b/drivers/video/cg3.c @@ -477,12 +477,12 @@ static int __init cg3_init(void) if (fb_get_options("cg3fb", NULL)) return -ENODEV; - return of_register_driver(&cg3_driver, &of_bus_type); + return of_register_platform_driver(&cg3_driver); } static void __exit cg3_exit(void) { - of_unregister_driver(&cg3_driver); + of_unregister_platform_driver(&cg3_driver); } module_init(cg3_init); diff --git a/drivers/video/cg6.c b/drivers/video/cg6.c index 480d761a27a..2389a719dcc 100644 --- a/drivers/video/cg6.c +++ b/drivers/video/cg6.c @@ -870,12 +870,12 @@ static int __init cg6_init(void) if (fb_get_options("cg6fb", NULL)) return -ENODEV; - return of_register_driver(&cg6_driver, &of_bus_type); + return of_register_platform_driver(&cg6_driver); } static void __exit cg6_exit(void) { - of_unregister_driver(&cg6_driver); + of_unregister_platform_driver(&cg6_driver); } module_init(cg6_init); diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index b0a3fa00706..3b3f5749af9 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -2342,6 +2342,30 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) return 0; } +static int fbcon_debug_enter(struct vc_data *vc) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + + ops->save_graphics = ops->graphics; + ops->graphics = 0; + if (info->fbops->fb_debug_enter) + info->fbops->fb_debug_enter(info); + fbcon_set_palette(vc, color_table); + return 0; +} + +static int fbcon_debug_leave(struct vc_data *vc) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + + ops->graphics = ops->save_graphics; + if (info->fbops->fb_debug_leave) + info->fbops->fb_debug_leave(info); + return 0; +} + static int fbcon_get_font(struct vc_data *vc, struct console_font *font) { u8 *fontdata = vc->vc_font.data; @@ -3276,6 +3300,8 @@ static const struct consw fb_con = { .con_screen_pos = fbcon_screen_pos, .con_getxy = fbcon_getxy, .con_resize = fbcon_resize, + .con_debug_enter = fbcon_debug_enter, + .con_debug_leave = fbcon_debug_leave, }; static struct notifier_block fbcon_event_notifier = { diff --git a/drivers/video/console/fbcon.h b/drivers/video/console/fbcon.h index 89a346880ec..6bd2e0c7f20 100644 --- a/drivers/video/console/fbcon.h +++ b/drivers/video/console/fbcon.h @@ -74,6 +74,7 @@ struct fbcon_ops { int cursor_reset; int blank_state; int graphics; + int save_graphics; /* for debug enter/leave */ int flags; int rotate; int cur_rotate; diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c index 49fcbe8f18a..c225dcce89e 100644 --- a/drivers/video/controlfb.c +++ b/drivers/video/controlfb.c @@ -40,6 +40,8 @@ #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c index 95c0227f47f..f6ecfab296d 100644 --- a/drivers/video/ffb.c +++ b/drivers/video/ffb.c @@ -1067,12 +1067,12 @@ static int __init ffb_init(void) if (fb_get_options("ffb", NULL)) return -ENODEV; - return of_register_driver(&ffb_driver, &of_bus_type); + return of_register_platform_driver(&ffb_driver); } static void __exit ffb_exit(void) { - of_unregister_driver(&ffb_driver); + of_unregister_platform_driver(&ffb_driver); } module_init(ffb_init); diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c index 27455ce298b..e38ad222454 100644 --- a/drivers/video/fsl-diu-fb.c +++ b/drivers/video/fsl-diu-fb.c @@ -34,7 +34,8 @@ #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> -#include "fsl-diu-fb.h" +#include <linux/fsl-diu-fb.h> +#include "edid.h" /* * These parameters give default parameters @@ -217,6 +218,7 @@ struct mfb_info { int x_aoi_d; /* aoi display x offset to physical screen */ int y_aoi_d; /* aoi display y offset to physical screen */ struct fsl_diu_data *parent; + u8 *edid_data; }; @@ -317,6 +319,17 @@ static void fsl_diu_free(void *virt, size_t size) free_pages_exact(virt, size); } +/* + * Workaround for failed writing desc register of planes. + * Needed with MPC5121 DIU rev 2.0 silicon. + */ +void wr_reg_wa(u32 *reg, u32 val) +{ + do { + out_be32(reg, val); + } while (in_be32(reg) != val); +} + static int fsl_diu_enable_panel(struct fb_info *info) { struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par; @@ -330,7 +343,7 @@ static int fsl_diu_enable_panel(struct fb_info *info) switch (mfbi->index) { case 0: /* plane 0 */ if (hw->desc[0] != ad->paddr) - out_be32(&hw->desc[0], ad->paddr); + wr_reg_wa(&hw->desc[0], ad->paddr); break; case 1: /* plane 1 AOI 0 */ cmfbi = machine_data->fsl_diu_info[2]->par; @@ -340,7 +353,7 @@ static int fsl_diu_enable_panel(struct fb_info *info) cpu_to_le32(cmfbi->ad->paddr); else ad->next_ad = 0; - out_be32(&hw->desc[1], ad->paddr); + wr_reg_wa(&hw->desc[1], ad->paddr); } break; case 3: /* plane 2 AOI 0 */ @@ -351,14 +364,14 @@ static int fsl_diu_enable_panel(struct fb_info *info) cpu_to_le32(cmfbi->ad->paddr); else ad->next_ad = 0; - out_be32(&hw->desc[2], ad->paddr); + wr_reg_wa(&hw->desc[2], ad->paddr); } break; case 2: /* plane 1 AOI 1 */ pmfbi = machine_data->fsl_diu_info[1]->par; ad->next_ad = 0; if (hw->desc[1] == machine_data->dummy_ad->paddr) - out_be32(&hw->desc[1], ad->paddr); + wr_reg_wa(&hw->desc[1], ad->paddr); else /* AOI0 open */ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr); break; @@ -366,7 +379,7 @@ static int fsl_diu_enable_panel(struct fb_info *info) pmfbi = machine_data->fsl_diu_info[3]->par; ad->next_ad = 0; if (hw->desc[2] == machine_data->dummy_ad->paddr) - out_be32(&hw->desc[2], ad->paddr); + wr_reg_wa(&hw->desc[2], ad->paddr); else /* AOI0 was open */ pmfbi->ad->next_ad = cpu_to_le32(ad->paddr); break; @@ -390,27 +403,24 @@ static int fsl_diu_disable_panel(struct fb_info *info) switch (mfbi->index) { case 0: /* plane 0 */ if (hw->desc[0] != machine_data->dummy_ad->paddr) - out_be32(&hw->desc[0], - machine_data->dummy_ad->paddr); + wr_reg_wa(&hw->desc[0], machine_data->dummy_ad->paddr); break; case 1: /* plane 1 AOI 0 */ cmfbi = machine_data->fsl_diu_info[2]->par; if (cmfbi->count > 0) /* AOI1 is open */ - out_be32(&hw->desc[1], cmfbi->ad->paddr); + wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr); /* move AOI1 to the first */ else /* AOI1 was closed */ - out_be32(&hw->desc[1], - machine_data->dummy_ad->paddr); + wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr); /* close AOI 0 */ break; case 3: /* plane 2 AOI 0 */ cmfbi = machine_data->fsl_diu_info[4]->par; if (cmfbi->count > 0) /* AOI1 is open */ - out_be32(&hw->desc[2], cmfbi->ad->paddr); + wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr); /* move AOI1 to the first */ else /* AOI1 was closed */ - out_be32(&hw->desc[2], - machine_data->dummy_ad->paddr); + wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr); /* close AOI 0 */ break; case 2: /* plane 1 AOI 1 */ @@ -421,7 +431,7 @@ static int fsl_diu_disable_panel(struct fb_info *info) /* AOI0 is open, must be the first */ pmfbi->ad->next_ad = 0; } else /* AOI1 is the first in the chain */ - out_be32(&hw->desc[1], machine_data->dummy_ad->paddr); + wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr); /* close AOI 1 */ break; case 4: /* plane 2 AOI 1 */ @@ -432,7 +442,7 @@ static int fsl_diu_disable_panel(struct fb_info *info) /* AOI0 is open, must be the first */ pmfbi->ad->next_ad = 0; } else /* AOI1 is the first in the chain */ - out_be32(&hw->desc[2], machine_data->dummy_ad->paddr); + wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr); /* close AOI 1 */ break; default: @@ -1100,6 +1110,10 @@ static int fsl_diu_open(struct fb_info *info, int user) struct mfb_info *mfbi = info->par; int res = 0; + /* free boot splash memory on first /dev/fb0 open */ + if (!mfbi->index && diu_ops.release_bootmem) + diu_ops.release_bootmem(); + spin_lock(&diu_lock); mfbi->count++; if (mfbi->count == 1) { @@ -1173,18 +1187,30 @@ static int __devinit install_fb(struct fb_info *info) int rc; struct mfb_info *mfbi = info->par; const char *aoi_mode, *init_aoi_mode = "320x240"; + struct fb_videomode *db = fsl_diu_mode_db; + unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db); + int has_default_mode = 1; if (init_fbinfo(info)) return -EINVAL; - if (mfbi->index == 0) /* plane 0 */ + if (mfbi->index == 0) { /* plane 0 */ + if (mfbi->edid_data) { + /* Now build modedb from EDID */ + fb_edid_to_monspecs(mfbi->edid_data, &info->monspecs); + fb_videomode_to_modelist(info->monspecs.modedb, + info->monspecs.modedb_len, + &info->modelist); + db = info->monspecs.modedb; + dbsize = info->monspecs.modedb_len; + } aoi_mode = fb_mode; - else + } else { aoi_mode = init_aoi_mode; + } pr_debug("mode used = %s\n", aoi_mode); - rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db, - ARRAY_SIZE(fsl_diu_mode_db), &fsl_diu_default_mode, default_bpp); - + rc = fb_find_mode(&info->var, info, aoi_mode, db, dbsize, + &fsl_diu_default_mode, default_bpp); switch (rc) { case 1: pr_debug("using mode specified in @mode\n"); @@ -1202,10 +1228,50 @@ static int __devinit install_fb(struct fb_info *info) default: pr_debug("rc = %d\n", rc); pr_debug("failed to find mode\n"); - return -EINVAL; + /* + * For plane 0 we continue and look into + * driver's internal modedb. + */ + if (mfbi->index == 0 && mfbi->edid_data) + has_default_mode = 0; + else + return -EINVAL; break; } + if (!has_default_mode) { + rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db, + ARRAY_SIZE(fsl_diu_mode_db), + &fsl_diu_default_mode, + default_bpp); + if (rc > 0 && rc < 5) + has_default_mode = 1; + } + + /* Still not found, use preferred mode from database if any */ + if (!has_default_mode && info->monspecs.modedb) { + struct fb_monspecs *specs = &info->monspecs; + struct fb_videomode *modedb = &specs->modedb[0]; + + /* + * Get preferred timing. If not found, + * first mode in database will be used. + */ + if (specs->misc & FB_MISC_1ST_DETAIL) { + int i; + + for (i = 0; i < specs->modedb_len; i++) { + if (specs->modedb[i].flag & FB_MODE_IS_FIRST) { + modedb = &specs->modedb[i]; + break; + } + } + } + + info->var.bits_per_pixel = default_bpp; + fb_videomode_to_var(&info->var, modedb); + } + pr_debug("xres_virtual %d\n", info->var.xres_virtual); pr_debug("bits_per_pixel %d\n", info->var.bits_per_pixel); @@ -1244,6 +1310,9 @@ static void uninstall_fb(struct fb_info *info) if (!mfbi->registered) return; + if (mfbi->index == 0) + kfree(mfbi->edid_data); + unregister_framebuffer(info); unmap_video_memory(info); if (&info->cmap) @@ -1427,6 +1496,7 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev, int ret, i, error = 0; struct resource res; struct fsl_diu_data *machine_data; + int diu_mode; machine_data = kzalloc(sizeof(struct fsl_diu_data), GFP_KERNEL); if (!machine_data) @@ -1443,6 +1513,17 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev, mfbi = machine_data->fsl_diu_info[i]->par; memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info)); mfbi->parent = machine_data; + + if (mfbi->index == 0) { + const u8 *prop; + int len; + + /* Get EDID */ + prop = of_get_property(np, "edid", &len); + if (prop && len == EDID_LENGTH) + mfbi->edid_data = kmemdup(prop, EDID_LENGTH, + GFP_KERNEL); + } } ret = of_address_to_resource(np, 0, &res); @@ -1463,7 +1544,9 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev, goto error2; } - out_be32(&dr.diu_reg->diu_mode, 0); /* disable DIU anyway*/ + diu_mode = in_be32(&dr.diu_reg->diu_mode); + if (diu_mode != MFB_MODE1) + out_be32(&dr.diu_reg->diu_mode, 0); /* disable DIU */ /* Get the IRQ of the DIU */ machine_data->irq = irq_of_parse_and_map(np, 0); @@ -1511,7 +1594,13 @@ static int __devinit fsl_diu_probe(struct of_device *ofdev, machine_data->dummy_ad->offset_xyd = 0; machine_data->dummy_ad->next_ad = 0; - out_be32(&dr.diu_reg->desc[0], machine_data->dummy_ad->paddr); + /* + * Let DIU display splash screen if it was pre-initialized + * by the bootloader, set dummy area descriptor otherwise. + */ + if (diu_mode != MFB_MODE1) + out_be32(&dr.diu_reg->desc[0], machine_data->dummy_ad->paddr); + out_be32(&dr.diu_reg->desc[1], machine_data->dummy_ad->paddr); out_be32(&dr.diu_reg->desc[2], machine_data->dummy_ad->paddr); diff --git a/drivers/video/fsl-diu-fb.h b/drivers/video/fsl-diu-fb.h deleted file mode 100644 index fc295d7ea46..00000000000 --- a/drivers/video/fsl-diu-fb.h +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved. - * - * Freescale DIU Frame Buffer device driver - * - * Authors: Hongjun Chen <hong-jun.chen@freescale.com> - * Paul Widmer <paul.widmer@freescale.com> - * Srikanth Srinivasan <srikanth.srinivasan@freescale.com> - * York Sun <yorksun@freescale.com> - * - * Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - * - */ - -#ifndef __FSL_DIU_FB_H__ -#define __FSL_DIU_FB_H__ - -/* Arbitrary threshold to determine the allocation method - * See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory() - */ -#define MEM_ALLOC_THRESHOLD (1024*768*4+32) -/* Minimum value that the pixel clock can be set to in pico seconds - * This is determined by platform clock/3 where the minimum platform - * clock is 533MHz. This gives 5629 pico seconds. - */ -#define MIN_PIX_CLK 5629 -#define MAX_PIX_CLK 96096 - -#include <linux/types.h> - -struct mfb_alpha { - int enable; - int alpha; -}; - -struct mfb_chroma_key { - int enable; - __u8 red_max; - __u8 green_max; - __u8 blue_max; - __u8 red_min; - __u8 green_min; - __u8 blue_min; -}; - -struct aoi_display_offset { - int x_aoi_d; - int y_aoi_d; -}; - -#define MFB_SET_CHROMA_KEY _IOW('M', 1, struct mfb_chroma_key) -#define MFB_WAIT_FOR_VSYNC _IOW('F', 0x20, u_int32_t) -#define MFB_SET_BRIGHTNESS _IOW('M', 3, __u8) - -#define MFB_SET_ALPHA 0x80014d00 -#define MFB_GET_ALPHA 0x40014d00 -#define MFB_SET_AOID 0x80084d04 -#define MFB_GET_AOID 0x40084d04 -#define MFB_SET_PIXFMT 0x80014d08 -#define MFB_GET_PIXFMT 0x40014d08 - -#define FBIOGET_GWINFO 0x46E0 -#define FBIOPUT_GWINFO 0x46E1 - -#ifdef __KERNEL__ -#include <linux/spinlock.h> - -/* - * These are the fields of area descriptor(in DDR memory) for every plane - */ -struct diu_ad { - /* Word 0(32-bit) in DDR memory */ -/* __u16 comp; */ -/* __u16 pixel_s:2; */ -/* __u16 pallete:1; */ -/* __u16 red_c:2; */ -/* __u16 green_c:2; */ -/* __u16 blue_c:2; */ -/* __u16 alpha_c:3; */ -/* __u16 byte_f:1; */ -/* __u16 res0:3; */ - - __be32 pix_fmt; /* hard coding pixel format */ - - /* Word 1(32-bit) in DDR memory */ - __le32 addr; - - /* Word 2(32-bit) in DDR memory */ -/* __u32 delta_xs:11; */ -/* __u32 res1:1; */ -/* __u32 delta_ys:11; */ -/* __u32 res2:1; */ -/* __u32 g_alpha:8; */ - __le32 src_size_g_alpha; - - /* Word 3(32-bit) in DDR memory */ -/* __u32 delta_xi:11; */ -/* __u32 res3:5; */ -/* __u32 delta_yi:11; */ -/* __u32 res4:3; */ -/* __u32 flip:2; */ - __le32 aoi_size; - - /* Word 4(32-bit) in DDR memory */ - /*__u32 offset_xi:11; - __u32 res5:5; - __u32 offset_yi:11; - __u32 res6:5; - */ - __le32 offset_xyi; - - /* Word 5(32-bit) in DDR memory */ - /*__u32 offset_xd:11; - __u32 res7:5; - __u32 offset_yd:11; - __u32 res8:5; */ - __le32 offset_xyd; - - - /* Word 6(32-bit) in DDR memory */ - __u8 ckmax_r; - __u8 ckmax_g; - __u8 ckmax_b; - __u8 res9; - - /* Word 7(32-bit) in DDR memory */ - __u8 ckmin_r; - __u8 ckmin_g; - __u8 ckmin_b; - __u8 res10; -/* __u32 res10:8; */ - - /* Word 8(32-bit) in DDR memory */ - __le32 next_ad; - - /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */ - __u32 paddr; -} __attribute__ ((packed)); - -/* DIU register map */ -struct diu { - __be32 desc[3]; - __be32 gamma; - __be32 pallete; - __be32 cursor; - __be32 curs_pos; - __be32 diu_mode; - __be32 bgnd; - __be32 bgnd_wb; - __be32 disp_size; - __be32 wb_size; - __be32 wb_mem_addr; - __be32 hsyn_para; - __be32 vsyn_para; - __be32 syn_pol; - __be32 thresholds; - __be32 int_status; - __be32 int_mask; - __be32 colorbar[8]; - __be32 filling; - __be32 plut; -} __attribute__ ((packed)); - -struct diu_hw { - struct diu *diu_reg; - spinlock_t reg_lock; - - __u32 mode; /* DIU operation mode */ -}; - -struct diu_addr { - __u8 __iomem *vaddr; /* Virtual address */ - dma_addr_t paddr; /* Physical address */ - __u32 offset; -}; - -struct diu_pool { - struct diu_addr ad; - struct diu_addr gamma; - struct diu_addr pallete; - struct diu_addr cursor; -}; - -#define FSL_DIU_BASE_OFFSET 0x2C000 /* Offset of DIU */ -#define INT_LCDC 64 /* DIU interrupt number */ - -#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */ - /* 1 for plane 0, 2 for plane 1&2 each */ - -/* Minimum X and Y resolutions */ -#define MIN_XRES 64 -#define MIN_YRES 64 - -/* HW cursor parameters */ -#define MAX_CURS 32 - -/* Modes of operation of DIU */ -#define MFB_MODE0 0 /* DIU off */ -#define MFB_MODE1 1 /* All three planes output to display */ -#define MFB_MODE2 2 /* Plane 1 to display, planes 2+3 written back*/ -#define MFB_MODE3 3 /* All three planes written back to memory */ -#define MFB_MODE4 4 /* Color bar generation */ - -/* INT_STATUS/INT_MASK field descriptions */ -#define INT_VSYNC 0x01 /* Vsync interrupt */ -#define INT_VSYNC_WB 0x02 /* Vsync interrupt for write back operation */ -#define INT_UNDRUN 0x04 /* Under run exception interrupt */ -#define INT_PARERR 0x08 /* Display parameters error interrupt */ -#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */ - -/* Panels'operation modes */ -#define MFB_TYPE_OUTPUT 0 /* Panel output to display */ -#define MFB_TYPE_OFF 1 /* Panel off */ -#define MFB_TYPE_WB 2 /* Panel written back to memory */ -#define MFB_TYPE_TEST 3 /* Panel generate color bar */ - -#endif /* __KERNEL__ */ -#endif /* __FSL_DIU_FB_H__ */ diff --git a/drivers/video/jz4740_fb.c b/drivers/video/jz4740_fb.c new file mode 100644 index 00000000000..670ecaa0385 --- /dev/null +++ b/drivers/video/jz4740_fb.c @@ -0,0 +1,847 @@ +/* + * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> + * JZ4740 SoC LCD framebuffer driver + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 675 Mass Ave, Cambridge, MA 02139, USA. + * + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> + +#include <linux/clk.h> +#include <linux/delay.h> + +#include <linux/console.h> +#include <linux/fb.h> + +#include <linux/dma-mapping.h> + +#include <asm/mach-jz4740/jz4740_fb.h> +#include <asm/mach-jz4740/gpio.h> + +#define JZ_REG_LCD_CFG 0x00 +#define JZ_REG_LCD_VSYNC 0x04 +#define JZ_REG_LCD_HSYNC 0x08 +#define JZ_REG_LCD_VAT 0x0C +#define JZ_REG_LCD_DAH 0x10 +#define JZ_REG_LCD_DAV 0x14 +#define JZ_REG_LCD_PS 0x18 +#define JZ_REG_LCD_CLS 0x1C +#define JZ_REG_LCD_SPL 0x20 +#define JZ_REG_LCD_REV 0x24 +#define JZ_REG_LCD_CTRL 0x30 +#define JZ_REG_LCD_STATE 0x34 +#define JZ_REG_LCD_IID 0x38 +#define JZ_REG_LCD_DA0 0x40 +#define JZ_REG_LCD_SA0 0x44 +#define JZ_REG_LCD_FID0 0x48 +#define JZ_REG_LCD_CMD0 0x4C +#define JZ_REG_LCD_DA1 0x50 +#define JZ_REG_LCD_SA1 0x54 +#define JZ_REG_LCD_FID1 0x58 +#define JZ_REG_LCD_CMD1 0x5C + +#define JZ_LCD_CFG_SLCD BIT(31) +#define JZ_LCD_CFG_PS_DISABLE BIT(23) +#define JZ_LCD_CFG_CLS_DISABLE BIT(22) +#define JZ_LCD_CFG_SPL_DISABLE BIT(21) +#define JZ_LCD_CFG_REV_DISABLE BIT(20) +#define JZ_LCD_CFG_HSYNCM BIT(19) +#define JZ_LCD_CFG_PCLKM BIT(18) +#define JZ_LCD_CFG_INV BIT(17) +#define JZ_LCD_CFG_SYNC_DIR BIT(16) +#define JZ_LCD_CFG_PS_POLARITY BIT(15) +#define JZ_LCD_CFG_CLS_POLARITY BIT(14) +#define JZ_LCD_CFG_SPL_POLARITY BIT(13) +#define JZ_LCD_CFG_REV_POLARITY BIT(12) +#define JZ_LCD_CFG_HSYNC_ACTIVE_LOW BIT(11) +#define JZ_LCD_CFG_PCLK_FALLING_EDGE BIT(10) +#define JZ_LCD_CFG_DE_ACTIVE_LOW BIT(9) +#define JZ_LCD_CFG_VSYNC_ACTIVE_LOW BIT(8) +#define JZ_LCD_CFG_18_BIT BIT(7) +#define JZ_LCD_CFG_PDW (BIT(5) | BIT(4)) +#define JZ_LCD_CFG_MODE_MASK 0xf + +#define JZ_LCD_CTRL_BURST_4 (0x0 << 28) +#define JZ_LCD_CTRL_BURST_8 (0x1 << 28) +#define JZ_LCD_CTRL_BURST_16 (0x2 << 28) +#define JZ_LCD_CTRL_RGB555 BIT(27) +#define JZ_LCD_CTRL_OFUP BIT(26) +#define JZ_LCD_CTRL_FRC_GRAYSCALE_16 (0x0 << 24) +#define JZ_LCD_CTRL_FRC_GRAYSCALE_4 (0x1 << 24) +#define JZ_LCD_CTRL_FRC_GRAYSCALE_2 (0x2 << 24) +#define JZ_LCD_CTRL_PDD_MASK (0xff << 16) +#define JZ_LCD_CTRL_EOF_IRQ BIT(13) +#define JZ_LCD_CTRL_SOF_IRQ BIT(12) +#define JZ_LCD_CTRL_OFU_IRQ BIT(11) +#define JZ_LCD_CTRL_IFU0_IRQ BIT(10) +#define JZ_LCD_CTRL_IFU1_IRQ BIT(9) +#define JZ_LCD_CTRL_DD_IRQ BIT(8) +#define JZ_LCD_CTRL_QDD_IRQ BIT(7) +#define JZ_LCD_CTRL_REVERSE_ENDIAN BIT(6) +#define JZ_LCD_CTRL_LSB_FISRT BIT(5) +#define JZ_LCD_CTRL_DISABLE BIT(4) +#define JZ_LCD_CTRL_ENABLE BIT(3) +#define JZ_LCD_CTRL_BPP_1 0x0 +#define JZ_LCD_CTRL_BPP_2 0x1 +#define JZ_LCD_CTRL_BPP_4 0x2 +#define JZ_LCD_CTRL_BPP_8 0x3 +#define JZ_LCD_CTRL_BPP_15_16 0x4 +#define JZ_LCD_CTRL_BPP_18_24 0x5 + +#define JZ_LCD_CMD_SOF_IRQ BIT(15) +#define JZ_LCD_CMD_EOF_IRQ BIT(16) +#define JZ_LCD_CMD_ENABLE_PAL BIT(12) + +#define JZ_LCD_SYNC_MASK 0x3ff + +#define JZ_LCD_STATE_DISABLED BIT(0) + +struct jzfb_framedesc { + uint32_t next; + uint32_t addr; + uint32_t id; + uint32_t cmd; +} __packed; + +struct jzfb { + struct fb_info *fb; + struct platform_device *pdev; + void __iomem *base; + struct resource *mem; + struct jz4740_fb_platform_data *pdata; + + size_t vidmem_size; + void *vidmem; + dma_addr_t vidmem_phys; + struct jzfb_framedesc *framedesc; + dma_addr_t framedesc_phys; + + struct clk *ldclk; + struct clk *lpclk; + + unsigned is_enabled:1; + struct mutex lock; + + uint32_t pseudo_palette[16]; +}; + +static const struct fb_fix_screeninfo jzfb_fix __devinitdata = { + .id = "JZ4740 FB", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .xpanstep = 0, + .ypanstep = 0, + .ywrapstep = 0, + .accel = FB_ACCEL_NONE, +}; + +static const struct jz_gpio_bulk_request jz_lcd_ctrl_pins[] = { + JZ_GPIO_BULK_PIN(LCD_PCLK), + JZ_GPIO_BULK_PIN(LCD_HSYNC), + JZ_GPIO_BULK_PIN(LCD_VSYNC), + JZ_GPIO_BULK_PIN(LCD_DE), + JZ_GPIO_BULK_PIN(LCD_PS), + JZ_GPIO_BULK_PIN(LCD_REV), + JZ_GPIO_BULK_PIN(LCD_CLS), + JZ_GPIO_BULK_PIN(LCD_SPL), +}; + +static const struct jz_gpio_bulk_request jz_lcd_data_pins[] = { + JZ_GPIO_BULK_PIN(LCD_DATA0), + JZ_GPIO_BULK_PIN(LCD_DATA1), + JZ_GPIO_BULK_PIN(LCD_DATA2), + JZ_GPIO_BULK_PIN(LCD_DATA3), + JZ_GPIO_BULK_PIN(LCD_DATA4), + JZ_GPIO_BULK_PIN(LCD_DATA5), + JZ_GPIO_BULK_PIN(LCD_DATA6), + JZ_GPIO_BULK_PIN(LCD_DATA7), + JZ_GPIO_BULK_PIN(LCD_DATA8), + JZ_GPIO_BULK_PIN(LCD_DATA9), + JZ_GPIO_BULK_PIN(LCD_DATA10), + JZ_GPIO_BULK_PIN(LCD_DATA11), + JZ_GPIO_BULK_PIN(LCD_DATA12), + JZ_GPIO_BULK_PIN(LCD_DATA13), + JZ_GPIO_BULK_PIN(LCD_DATA14), + JZ_GPIO_BULK_PIN(LCD_DATA15), + JZ_GPIO_BULK_PIN(LCD_DATA16), + JZ_GPIO_BULK_PIN(LCD_DATA17), +}; + +static unsigned int jzfb_num_ctrl_pins(struct jzfb *jzfb) +{ + unsigned int num; + + switch (jzfb->pdata->lcd_type) { + case JZ_LCD_TYPE_GENERIC_16_BIT: + num = 4; + break; + case JZ_LCD_TYPE_GENERIC_18_BIT: + num = 4; + break; + case JZ_LCD_TYPE_8BIT_SERIAL: + num = 3; + break; + case JZ_LCD_TYPE_SPECIAL_TFT_1: + case JZ_LCD_TYPE_SPECIAL_TFT_2: + case JZ_LCD_TYPE_SPECIAL_TFT_3: + num = 8; + break; + default: + num = 0; + break; + } + return num; +} + +static unsigned int jzfb_num_data_pins(struct jzfb *jzfb) +{ + unsigned int num; + + switch (jzfb->pdata->lcd_type) { + case JZ_LCD_TYPE_GENERIC_16_BIT: + num = 16; + break; + case JZ_LCD_TYPE_GENERIC_18_BIT: + num = 18; + break; + case JZ_LCD_TYPE_8BIT_SERIAL: + num = 8; + break; + case JZ_LCD_TYPE_SPECIAL_TFT_1: + case JZ_LCD_TYPE_SPECIAL_TFT_2: + case JZ_LCD_TYPE_SPECIAL_TFT_3: + if (jzfb->pdata->bpp == 18) + num = 18; + else + num = 16; + break; + default: + num = 0; + break; + } + return num; +} + +/* Based on CNVT_TOHW macro from skeletonfb.c */ +static inline uint32_t jzfb_convert_color_to_hw(unsigned val, + struct fb_bitfield *bf) +{ + return (((val << bf->length) + 0x7FFF - val) >> 16) << bf->offset; +} + +static int jzfb_setcolreg(unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, struct fb_info *fb) +{ + uint32_t color; + + if (regno >= 16) + return -EINVAL; + + color = jzfb_convert_color_to_hw(red, &fb->var.red); + color |= jzfb_convert_color_to_hw(green, &fb->var.green); + color |= jzfb_convert_color_to_hw(blue, &fb->var.blue); + color |= jzfb_convert_color_to_hw(transp, &fb->var.transp); + + ((uint32_t *)(fb->pseudo_palette))[regno] = color; + + return 0; +} + +static int jzfb_get_controller_bpp(struct jzfb *jzfb) +{ + switch (jzfb->pdata->bpp) { + case 18: + case 24: + return 32; + case 15: + return 16; + default: + return jzfb->pdata->bpp; + } +} + +static struct fb_videomode *jzfb_get_mode(struct jzfb *jzfb, + struct fb_var_screeninfo *var) +{ + size_t i; + struct fb_videomode *mode = jzfb->pdata->modes; + + for (i = 0; i < jzfb->pdata->num_modes; ++i, ++mode) { + if (mode->xres == var->xres && mode->yres == var->yres) + return mode; + } + + return NULL; +} + +static int jzfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fb) +{ + struct jzfb *jzfb = fb->par; + struct fb_videomode *mode; + + if (var->bits_per_pixel != jzfb_get_controller_bpp(jzfb) && + var->bits_per_pixel != jzfb->pdata->bpp) + return -EINVAL; + + mode = jzfb_get_mode(jzfb, var); + if (mode == NULL) + return -EINVAL; + + fb_videomode_to_var(var, mode); + + switch (jzfb->pdata->bpp) { + case 8: + break; + case 15: + var->red.offset = 10; + var->red.length = 5; + var->green.offset = 6; + var->green.length = 5; + var->blue.offset = 0; + var->blue.length = 5; + break; + case 16: + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + break; + case 18: + var->red.offset = 16; + var->red.length = 6; + var->green.offset = 8; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 6; + var->bits_per_pixel = 32; + break; + case 32: + case 24: + var->transp.offset = 24; + var->transp.length = 8; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->bits_per_pixel = 32; + break; + default: + break; + } + + return 0; +} + +static int jzfb_set_par(struct fb_info *info) +{ + struct jzfb *jzfb = info->par; + struct jz4740_fb_platform_data *pdata = jzfb->pdata; + struct fb_var_screeninfo *var = &info->var; + struct fb_videomode *mode; + uint16_t hds, vds; + uint16_t hde, vde; + uint16_t ht, vt; + uint32_t ctrl; + uint32_t cfg; + unsigned long rate; + + mode = jzfb_get_mode(jzfb, var); + if (mode == NULL) + return -EINVAL; + + if (mode == info->mode) + return 0; + + info->mode = mode; + + hds = mode->hsync_len + mode->left_margin; + hde = hds + mode->xres; + ht = hde + mode->right_margin; + + vds = mode->vsync_len + mode->upper_margin; + vde = vds + mode->yres; + vt = vde + mode->lower_margin; + + ctrl = JZ_LCD_CTRL_OFUP | JZ_LCD_CTRL_BURST_16; + + switch (pdata->bpp) { + case 1: + ctrl |= JZ_LCD_CTRL_BPP_1; + break; + case 2: + ctrl |= JZ_LCD_CTRL_BPP_2; + break; + case 4: + ctrl |= JZ_LCD_CTRL_BPP_4; + break; + case 8: + ctrl |= JZ_LCD_CTRL_BPP_8; + break; + case 15: + ctrl |= JZ_LCD_CTRL_RGB555; /* Falltrough */ + case 16: + ctrl |= JZ_LCD_CTRL_BPP_15_16; + break; + case 18: + case 24: + case 32: + ctrl |= JZ_LCD_CTRL_BPP_18_24; + break; + default: + break; + } + + cfg = pdata->lcd_type & 0xf; + + if (!(mode->sync & FB_SYNC_HOR_HIGH_ACT)) + cfg |= JZ_LCD_CFG_HSYNC_ACTIVE_LOW; + + if (!(mode->sync & FB_SYNC_VERT_HIGH_ACT)) + cfg |= JZ_LCD_CFG_VSYNC_ACTIVE_LOW; + + if (pdata->pixclk_falling_edge) + cfg |= JZ_LCD_CFG_PCLK_FALLING_EDGE; + + if (pdata->date_enable_active_low) + cfg |= JZ_LCD_CFG_DE_ACTIVE_LOW; + + if (pdata->lcd_type == JZ_LCD_TYPE_GENERIC_18_BIT) + cfg |= JZ_LCD_CFG_18_BIT; + + if (mode->pixclock) { + rate = PICOS2KHZ(mode->pixclock) * 1000; + mode->refresh = rate / vt / ht; + } else { + if (pdata->lcd_type == JZ_LCD_TYPE_8BIT_SERIAL) + rate = mode->refresh * (vt + 2 * mode->xres) * ht; + else + rate = mode->refresh * vt * ht; + + mode->pixclock = KHZ2PICOS(rate / 1000); + } + + mutex_lock(&jzfb->lock); + if (!jzfb->is_enabled) + clk_enable(jzfb->ldclk); + else + ctrl |= JZ_LCD_CTRL_ENABLE; + + switch (pdata->lcd_type) { + case JZ_LCD_TYPE_SPECIAL_TFT_1: + case JZ_LCD_TYPE_SPECIAL_TFT_2: + case JZ_LCD_TYPE_SPECIAL_TFT_3: + writel(pdata->special_tft_config.spl, jzfb->base + JZ_REG_LCD_SPL); + writel(pdata->special_tft_config.cls, jzfb->base + JZ_REG_LCD_CLS); + writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_PS); + writel(pdata->special_tft_config.ps, jzfb->base + JZ_REG_LCD_REV); + break; + default: + cfg |= JZ_LCD_CFG_PS_DISABLE; + cfg |= JZ_LCD_CFG_CLS_DISABLE; + cfg |= JZ_LCD_CFG_SPL_DISABLE; + cfg |= JZ_LCD_CFG_REV_DISABLE; + break; + } + + writel(mode->hsync_len, jzfb->base + JZ_REG_LCD_HSYNC); + writel(mode->vsync_len, jzfb->base + JZ_REG_LCD_VSYNC); + + writel((ht << 16) | vt, jzfb->base + JZ_REG_LCD_VAT); + + writel((hds << 16) | hde, jzfb->base + JZ_REG_LCD_DAH); + writel((vds << 16) | vde, jzfb->base + JZ_REG_LCD_DAV); + + writel(cfg, jzfb->base + JZ_REG_LCD_CFG); + + writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); + + if (!jzfb->is_enabled) + clk_disable(jzfb->ldclk); + + mutex_unlock(&jzfb->lock); + + clk_set_rate(jzfb->lpclk, rate); + clk_set_rate(jzfb->ldclk, rate * 3); + + return 0; +} + +static void jzfb_enable(struct jzfb *jzfb) +{ + uint32_t ctrl; + + clk_enable(jzfb->ldclk); + + jz_gpio_bulk_resume(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); + jz_gpio_bulk_resume(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); + + writel(0, jzfb->base + JZ_REG_LCD_STATE); + + writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0); + + ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL); + ctrl |= JZ_LCD_CTRL_ENABLE; + ctrl &= ~JZ_LCD_CTRL_DISABLE; + writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); +} + +static void jzfb_disable(struct jzfb *jzfb) +{ + uint32_t ctrl; + + ctrl = readl(jzfb->base + JZ_REG_LCD_CTRL); + ctrl |= JZ_LCD_CTRL_DISABLE; + writel(ctrl, jzfb->base + JZ_REG_LCD_CTRL); + do { + ctrl = readl(jzfb->base + JZ_REG_LCD_STATE); + } while (!(ctrl & JZ_LCD_STATE_DISABLED)); + + jz_gpio_bulk_suspend(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); + jz_gpio_bulk_suspend(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); + + clk_disable(jzfb->ldclk); +} + +static int jzfb_blank(int blank_mode, struct fb_info *info) +{ + struct jzfb *jzfb = info->par; + + switch (blank_mode) { + case FB_BLANK_UNBLANK: + mutex_lock(&jzfb->lock); + if (jzfb->is_enabled) { + mutex_unlock(&jzfb->lock); + return 0; + } + + jzfb_enable(jzfb); + jzfb->is_enabled = 1; + + mutex_unlock(&jzfb->lock); + break; + default: + mutex_lock(&jzfb->lock); + if (!jzfb->is_enabled) { + mutex_unlock(&jzfb->lock); + return 0; + } + + jzfb_disable(jzfb); + jzfb->is_enabled = 0; + + mutex_unlock(&jzfb->lock); + break; + } + + return 0; +} + +static int jzfb_alloc_devmem(struct jzfb *jzfb) +{ + int max_videosize = 0; + struct fb_videomode *mode = jzfb->pdata->modes; + void *page; + int i; + + for (i = 0; i < jzfb->pdata->num_modes; ++mode, ++i) { + if (max_videosize < mode->xres * mode->yres) + max_videosize = mode->xres * mode->yres; + } + + max_videosize *= jzfb_get_controller_bpp(jzfb) >> 3; + + jzfb->framedesc = dma_alloc_coherent(&jzfb->pdev->dev, + sizeof(*jzfb->framedesc), + &jzfb->framedesc_phys, GFP_KERNEL); + + if (!jzfb->framedesc) + return -ENOMEM; + + jzfb->vidmem_size = PAGE_ALIGN(max_videosize); + jzfb->vidmem = dma_alloc_coherent(&jzfb->pdev->dev, + jzfb->vidmem_size, + &jzfb->vidmem_phys, GFP_KERNEL); + + if (!jzfb->vidmem) + goto err_free_framedesc; + + for (page = jzfb->vidmem; + page < jzfb->vidmem + PAGE_ALIGN(jzfb->vidmem_size); + page += PAGE_SIZE) { + SetPageReserved(virt_to_page(page)); + } + + jzfb->framedesc->next = jzfb->framedesc_phys; + jzfb->framedesc->addr = jzfb->vidmem_phys; + jzfb->framedesc->id = 0xdeafbead; + jzfb->framedesc->cmd = 0; + jzfb->framedesc->cmd |= max_videosize / 4; + + return 0; + +err_free_framedesc: + dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), + jzfb->framedesc, jzfb->framedesc_phys); + return -ENOMEM; +} + +static void jzfb_free_devmem(struct jzfb *jzfb) +{ + dma_free_coherent(&jzfb->pdev->dev, jzfb->vidmem_size, + jzfb->vidmem, jzfb->vidmem_phys); + dma_free_coherent(&jzfb->pdev->dev, sizeof(*jzfb->framedesc), + jzfb->framedesc, jzfb->framedesc_phys); +} + +static struct fb_ops jzfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = jzfb_check_var, + .fb_set_par = jzfb_set_par, + .fb_blank = jzfb_blank, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_setcolreg = jzfb_setcolreg, +}; + +static int __devinit jzfb_probe(struct platform_device *pdev) +{ + int ret; + struct jzfb *jzfb; + struct fb_info *fb; + struct jz4740_fb_platform_data *pdata = pdev->dev.platform_data; + struct resource *mem; + + if (!pdata) { + dev_err(&pdev->dev, "Missing platform data\n"); + return -ENXIO; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "Failed to get register memory resource\n"); + return -ENXIO; + } + + mem = request_mem_region(mem->start, resource_size(mem), pdev->name); + if (!mem) { + dev_err(&pdev->dev, "Failed to request register memory region\n"); + return -EBUSY; + } + + fb = framebuffer_alloc(sizeof(struct jzfb), &pdev->dev); + if (!fb) { + dev_err(&pdev->dev, "Failed to allocate framebuffer device\n"); + ret = -ENOMEM; + goto err_release_mem_region; + } + + fb->fbops = &jzfb_ops; + fb->flags = FBINFO_DEFAULT; + + jzfb = fb->par; + jzfb->pdev = pdev; + jzfb->pdata = pdata; + jzfb->mem = mem; + + jzfb->ldclk = clk_get(&pdev->dev, "lcd"); + if (IS_ERR(jzfb->ldclk)) { + ret = PTR_ERR(jzfb->ldclk); + dev_err(&pdev->dev, "Failed to get lcd clock: %d\n", ret); + goto err_framebuffer_release; + } + + jzfb->lpclk = clk_get(&pdev->dev, "lcd_pclk"); + if (IS_ERR(jzfb->lpclk)) { + ret = PTR_ERR(jzfb->lpclk); + dev_err(&pdev->dev, "Failed to get lcd pixel clock: %d\n", ret); + goto err_put_ldclk; + } + + jzfb->base = ioremap(mem->start, resource_size(mem)); + if (!jzfb->base) { + dev_err(&pdev->dev, "Failed to ioremap register memory region\n"); + ret = -EBUSY; + goto err_put_lpclk; + } + + platform_set_drvdata(pdev, jzfb); + + mutex_init(&jzfb->lock); + + fb_videomode_to_modelist(pdata->modes, pdata->num_modes, + &fb->modelist); + fb_videomode_to_var(&fb->var, pdata->modes); + fb->var.bits_per_pixel = pdata->bpp; + jzfb_check_var(&fb->var, fb); + + ret = jzfb_alloc_devmem(jzfb); + if (ret) { + dev_err(&pdev->dev, "Failed to allocate video memory\n"); + goto err_iounmap; + } + + fb->fix = jzfb_fix; + fb->fix.line_length = fb->var.bits_per_pixel * fb->var.xres / 8; + fb->fix.mmio_start = mem->start; + fb->fix.mmio_len = resource_size(mem); + fb->fix.smem_start = jzfb->vidmem_phys; + fb->fix.smem_len = fb->fix.line_length * fb->var.yres; + fb->screen_base = jzfb->vidmem; + fb->pseudo_palette = jzfb->pseudo_palette; + + fb_alloc_cmap(&fb->cmap, 256, 0); + + clk_enable(jzfb->ldclk); + jzfb->is_enabled = 1; + + writel(jzfb->framedesc->next, jzfb->base + JZ_REG_LCD_DA0); + + fb->mode = NULL; + jzfb_set_par(fb); + + jz_gpio_bulk_request(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); + jz_gpio_bulk_request(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); + + ret = register_framebuffer(fb); + if (ret) { + dev_err(&pdev->dev, "Failed to register framebuffer: %d\n", ret); + goto err_free_devmem; + } + + jzfb->fb = fb; + + return 0; + +err_free_devmem: + jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); + jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); + + fb_dealloc_cmap(&fb->cmap); + jzfb_free_devmem(jzfb); +err_iounmap: + iounmap(jzfb->base); +err_put_lpclk: + clk_put(jzfb->lpclk); +err_put_ldclk: + clk_put(jzfb->ldclk); +err_framebuffer_release: + framebuffer_release(fb); +err_release_mem_region: + release_mem_region(mem->start, resource_size(mem)); + return ret; +} + +static int __devexit jzfb_remove(struct platform_device *pdev) +{ + struct jzfb *jzfb = platform_get_drvdata(pdev); + + jzfb_blank(FB_BLANK_POWERDOWN, jzfb->fb); + + jz_gpio_bulk_free(jz_lcd_ctrl_pins, jzfb_num_ctrl_pins(jzfb)); + jz_gpio_bulk_free(jz_lcd_data_pins, jzfb_num_data_pins(jzfb)); + + iounmap(jzfb->base); + release_mem_region(jzfb->mem->start, resource_size(jzfb->mem)); + + fb_dealloc_cmap(&jzfb->fb->cmap); + jzfb_free_devmem(jzfb); + + platform_set_drvdata(pdev, NULL); + + clk_put(jzfb->lpclk); + clk_put(jzfb->ldclk); + + framebuffer_release(jzfb->fb); + + return 0; +} + +#ifdef CONFIG_PM + +static int jzfb_suspend(struct device *dev) +{ + struct jzfb *jzfb = dev_get_drvdata(dev); + + acquire_console_sem(); + fb_set_suspend(jzfb->fb, 1); + release_console_sem(); + + mutex_lock(&jzfb->lock); + if (jzfb->is_enabled) + jzfb_disable(jzfb); + mutex_unlock(&jzfb->lock); + + return 0; +} + +static int jzfb_resume(struct device *dev) +{ + struct jzfb *jzfb = dev_get_drvdata(dev); + clk_enable(jzfb->ldclk); + + mutex_lock(&jzfb->lock); + if (jzfb->is_enabled) + jzfb_enable(jzfb); + mutex_unlock(&jzfb->lock); + + acquire_console_sem(); + fb_set_suspend(jzfb->fb, 0); + release_console_sem(); + + return 0; +} + +static const struct dev_pm_ops jzfb_pm_ops = { + .suspend = jzfb_suspend, + .resume = jzfb_resume, + .poweroff = jzfb_suspend, + .restore = jzfb_resume, +}; + +#define JZFB_PM_OPS (&jzfb_pm_ops) + +#else +#define JZFB_PM_OPS NULL +#endif + +static struct platform_driver jzfb_driver = { + .probe = jzfb_probe, + .remove = __devexit_p(jzfb_remove), + .driver = { + .name = "jz4740-fb", + .pm = JZFB_PM_OPS, + }, +}; + +static int __init jzfb_init(void) +{ + return platform_driver_register(&jzfb_driver); +} +module_init(jzfb_init); + +static void __exit jzfb_exit(void) +{ + platform_driver_unregister(&jzfb_driver); +} +module_exit(jzfb_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); +MODULE_DESCRIPTION("JZ4740 SoC LCD framebuffer driver"); +MODULE_ALIAS("platform:jz4740-fb"); diff --git a/drivers/video/leo.c b/drivers/video/leo.c index 9e8bf7d5e24..ad677637ffb 100644 --- a/drivers/video/leo.c +++ b/drivers/video/leo.c @@ -677,12 +677,12 @@ static int __init leo_init(void) if (fb_get_options("leofb", NULL)) return -ENODEV; - return of_register_driver(&leo_driver, &of_bus_type); + return of_register_platform_driver(&leo_driver); } static void __exit leo_exit(void) { - of_unregister_driver(&leo_driver); + of_unregister_platform_driver(&leo_driver); } module_init(leo_init); diff --git a/drivers/video/offb.c b/drivers/video/offb.c index 46dda7d8aae..cb163a5397b 100644 --- a/drivers/video/offb.c +++ b/drivers/video/offb.c @@ -19,13 +19,14 @@ #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> +#include <linux/of.h> +#include <linux/of_address.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> -#include <asm/prom.h> #ifdef CONFIG_PPC64 #include <asm/pci-bridge.h> diff --git a/drivers/video/p9100.c b/drivers/video/p9100.c index 6552751e81a..688b055abab 100644 --- a/drivers/video/p9100.c +++ b/drivers/video/p9100.c @@ -367,12 +367,12 @@ static int __init p9100_init(void) if (fb_get_options("p9100fb", NULL)) return -ENODEV; - return of_register_driver(&p9100_driver, &of_bus_type); + return of_register_platform_driver(&p9100_driver); } static void __exit p9100_exit(void) { - of_unregister_driver(&p9100_driver); + of_unregister_platform_driver(&p9100_driver); } module_init(p9100_init); diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c new file mode 100644 index 00000000000..5699ce0c178 --- /dev/null +++ b/drivers/video/sh_mipi_dsi.c @@ -0,0 +1,505 @@ +/* + * Renesas SH-mobile MIPI DSI support + * + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/types.h> + +#include <video/mipi_display.h> +#include <video/sh_mipi_dsi.h> +#include <video/sh_mobile_lcdc.h> + +#define CMTSRTCTR 0x80d0 +#define CMTSRTREQ 0x8070 + +#define DSIINTE 0x0060 + +/* E.g., sh7372 has 2 MIPI-DSIs - one for each LCDC */ +#define MAX_SH_MIPI_DSI 2 + +struct sh_mipi { + void __iomem *base; + struct clk *dsit_clk; + struct clk *dsip_clk; +}; + +static struct sh_mipi *mipi_dsi[MAX_SH_MIPI_DSI]; + +/* Protect the above array */ +static DEFINE_MUTEX(array_lock); + +static struct sh_mipi *sh_mipi_by_handle(int handle) +{ + if (handle >= ARRAY_SIZE(mipi_dsi) || handle < 0) + return NULL; + + return mipi_dsi[handle]; +} + +static int sh_mipi_send_short(struct sh_mipi *mipi, u8 dsi_cmd, + u8 cmd, u8 param) +{ + u32 data = (dsi_cmd << 24) | (cmd << 16) | (param << 8); + int cnt = 100; + + /* transmit a short packet to LCD panel */ + iowrite32(1 | data, mipi->base + 0x80d0); /* CMTSRTCTR */ + iowrite32(1, mipi->base + 0x8070); /* CMTSRTREQ */ + + while ((ioread32(mipi->base + 0x8070) & 1) && --cnt) + udelay(1); + + return cnt ? 0 : -ETIMEDOUT; +} + +#define LCD_CHAN2MIPI(c) ((c) < LCDC_CHAN_MAINLCD || (c) > LCDC_CHAN_SUBLCD ? \ + -EINVAL : (c) - 1) + +static int sh_mipi_dcs(int handle, u8 cmd) +{ + struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle)); + if (!mipi) + return -ENODEV; + return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE, cmd, 0); +} + +static int sh_mipi_dcs_param(int handle, u8 cmd, u8 param) +{ + struct sh_mipi *mipi = sh_mipi_by_handle(LCD_CHAN2MIPI(handle)); + if (!mipi) + return -ENODEV; + return sh_mipi_send_short(mipi, MIPI_DSI_DCS_SHORT_WRITE_PARAM, cmd, + param); +} + +static void sh_mipi_dsi_enable(struct sh_mipi *mipi, bool enable) +{ + /* + * enable LCDC data tx, transition to LPS after completion of each HS + * packet + */ + iowrite32(0x00000002 | enable, mipi->base + 0x8000); /* DTCTR */ +} + +static void sh_mipi_shutdown(struct platform_device *pdev) +{ + struct sh_mipi *mipi = platform_get_drvdata(pdev); + + sh_mipi_dsi_enable(mipi, false); +} + +static void mipi_display_on(void *arg, struct fb_info *info) +{ + struct sh_mipi *mipi = arg; + + sh_mipi_dsi_enable(mipi, true); +} + +static void mipi_display_off(void *arg) +{ + struct sh_mipi *mipi = arg; + + sh_mipi_dsi_enable(mipi, false); +} + +static int __init sh_mipi_setup(struct sh_mipi *mipi, + struct sh_mipi_dsi_info *pdata) +{ + void __iomem *base = mipi->base; + struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan; + u32 pctype, datatype, pixfmt; + u32 linelength; + bool yuv; + + /* Select data format */ + switch (pdata->data_format) { + case MIPI_RGB888: + pctype = 0; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24; + pixfmt = MIPI_DCS_PIXEL_FMT_24BIT; + linelength = ch->lcd_cfg.xres * 3; + yuv = false; + break; + case MIPI_RGB565: + pctype = 1; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16; + pixfmt = MIPI_DCS_PIXEL_FMT_16BIT; + linelength = ch->lcd_cfg.xres * 2; + yuv = false; + break; + case MIPI_RGB666_LP: + pctype = 2; + datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18; + pixfmt = MIPI_DCS_PIXEL_FMT_24BIT; + linelength = ch->lcd_cfg.xres * 3; + yuv = false; + break; + case MIPI_RGB666: + pctype = 3; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18; + pixfmt = MIPI_DCS_PIXEL_FMT_18BIT; + linelength = (ch->lcd_cfg.xres * 18 + 7) / 8; + yuv = false; + break; + case MIPI_BGR888: + pctype = 8; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_24; + pixfmt = MIPI_DCS_PIXEL_FMT_24BIT; + linelength = ch->lcd_cfg.xres * 3; + yuv = false; + break; + case MIPI_BGR565: + pctype = 9; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_16; + pixfmt = MIPI_DCS_PIXEL_FMT_16BIT; + linelength = ch->lcd_cfg.xres * 2; + yuv = false; + break; + case MIPI_BGR666_LP: + pctype = 0xa; + datatype = MIPI_DSI_PIXEL_STREAM_3BYTE_18; + pixfmt = MIPI_DCS_PIXEL_FMT_24BIT; + linelength = ch->lcd_cfg.xres * 3; + yuv = false; + break; + case MIPI_BGR666: + pctype = 0xb; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_18; + pixfmt = MIPI_DCS_PIXEL_FMT_18BIT; + linelength = (ch->lcd_cfg.xres * 18 + 7) / 8; + yuv = false; + break; + case MIPI_YUYV: + pctype = 4; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16; + pixfmt = MIPI_DCS_PIXEL_FMT_16BIT; + linelength = ch->lcd_cfg.xres * 2; + yuv = true; + break; + case MIPI_UYVY: + pctype = 5; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16; + pixfmt = MIPI_DCS_PIXEL_FMT_16BIT; + linelength = ch->lcd_cfg.xres * 2; + yuv = true; + break; + case MIPI_YUV420_L: + pctype = 6; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12; + pixfmt = MIPI_DCS_PIXEL_FMT_12BIT; + linelength = (ch->lcd_cfg.xres * 12 + 7) / 8; + yuv = true; + break; + case MIPI_YUV420: + pctype = 7; + datatype = MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12; + pixfmt = MIPI_DCS_PIXEL_FMT_12BIT; + /* Length of U/V line */ + linelength = (ch->lcd_cfg.xres + 1) / 2; + yuv = true; + break; + default: + return -EINVAL; + } + + if ((yuv && ch->interface_type != YUV422) || + (!yuv && ch->interface_type != RGB24)) + return -EINVAL; + + /* reset DSI link */ + iowrite32(0x00000001, base); /* SYSCTRL */ + /* Hold reset for 100 cycles of the slowest of bus, HS byte and LP clock */ + udelay(50); + iowrite32(0x00000000, base); /* SYSCTRL */ + + /* setup DSI link */ + + /* + * Default = ULPS enable | + * Contention detection enabled | + * EoT packet transmission enable | + * CRC check enable | + * ECC check enable + * additionally enable first two lanes + */ + iowrite32(0x00003703, base + 0x04); /* SYSCONF */ + /* + * T_wakeup = 0x7000 + * T_hs-trail = 3 + * T_hs-prepare = 3 + * T_clk-trail = 3 + * T_clk-prepare = 2 + */ + iowrite32(0x70003332, base + 0x08); /* TIMSET */ + /* no responses requested */ + iowrite32(0x00000000, base + 0x18); /* RESREQSET0 */ + /* request response to packets of type 0x28 */ + iowrite32(0x00000100, base + 0x1c); /* RESREQSET1 */ + /* High-speed transmission timeout, default 0xffffffff */ + iowrite32(0x0fffffff, base + 0x20); /* HSTTOVSET */ + /* LP reception timeout, default 0xffffffff */ + iowrite32(0x0fffffff, base + 0x24); /* LPRTOVSET */ + /* Turn-around timeout, default 0xffffffff */ + iowrite32(0x0fffffff, base + 0x28); /* TATOVSET */ + /* Peripheral reset timeout, default 0xffffffff */ + iowrite32(0x0fffffff, base + 0x2c); /* PRTOVSET */ + /* Enable timeout counters */ + iowrite32(0x00000f00, base + 0x30); /* DSICTRL */ + /* Interrupts not used, disable all */ + iowrite32(0, base + DSIINTE); + /* DSI-Tx bias on */ + iowrite32(0x00000001, base + 0x70); /* PHYCTRL */ + udelay(200); + /* Deassert resets, power on, set multiplier */ + iowrite32(0x03070b01, base + 0x70); /* PHYCTRL */ + + /* setup l-bridge */ + + /* + * Enable transmission of all packets, + * transmit LPS after each HS packet completion + */ + iowrite32(0x00000006, base + 0x8000); /* DTCTR */ + /* VSYNC width = 2 (<< 17) */ + iowrite32(0x00040000 | (pctype << 12) | datatype, base + 0x8020); /* VMCTR1 */ + /* + * Non-burst mode with sync pulses: VSE and HSE are output, + * HSA period allowed, no commands in LP + */ + iowrite32(0x00e00000, base + 0x8024); /* VMCTR2 */ + /* + * 0x660 = 1632 bytes per line (RGB24, 544 pixels: see + * sh_mobile_lcdc_info.ch[0].lcd_cfg.xres), HSALEN = 1 - default + * (unused, since VMCTR2[HSABM] = 0) + */ + iowrite32(1 | (linelength << 16), base + 0x8028); /* VMLEN1 */ + + msleep(5); + + /* setup LCD panel */ + + /* cf. drivers/video/omap/lcd_mipid.c */ + sh_mipi_dcs(ch->chan, MIPI_DCS_EXIT_SLEEP_MODE); + msleep(120); + /* + * [7] - Page Address Mode + * [6] - Column Address Mode + * [5] - Page / Column Address Mode + * [4] - Display Device Line Refresh Order + * [3] - RGB/BGR Order + * [2] - Display Data Latch Data Order + * [1] - Flip Horizontal + * [0] - Flip Vertical + */ + sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_ADDRESS_MODE, 0x00); + /* cf. set_data_lines() */ + sh_mipi_dcs_param(ch->chan, MIPI_DCS_SET_PIXEL_FORMAT, + pixfmt << 4); + sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON); + + return 0; +} + +static int __init sh_mipi_probe(struct platform_device *pdev) +{ + struct sh_mipi *mipi; + struct sh_mipi_dsi_info *pdata = pdev->dev.platform_data; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + unsigned long rate, f_current; + int idx = pdev->id, ret; + char dsip_clk[] = "dsi.p_clk"; + + if (!res || idx >= ARRAY_SIZE(mipi_dsi) || !pdata) + return -ENODEV; + + mutex_lock(&array_lock); + if (idx < 0) + for (idx = 0; idx < ARRAY_SIZE(mipi_dsi) && mipi_dsi[idx]; idx++) + ; + + if (idx == ARRAY_SIZE(mipi_dsi)) { + ret = -EBUSY; + goto efindslot; + } + + mipi = kzalloc(sizeof(*mipi), GFP_KERNEL); + if (!mipi) { + ret = -ENOMEM; + goto ealloc; + } + + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "MIPI register region already claimed\n"); + ret = -EBUSY; + goto ereqreg; + } + + mipi->base = ioremap(res->start, resource_size(res)); + if (!mipi->base) { + ret = -ENOMEM; + goto emap; + } + + mipi->dsit_clk = clk_get(&pdev->dev, "dsit_clk"); + if (IS_ERR(mipi->dsit_clk)) { + ret = PTR_ERR(mipi->dsit_clk); + goto eclktget; + } + + f_current = clk_get_rate(mipi->dsit_clk); + /* 80MHz required by the datasheet */ + rate = clk_round_rate(mipi->dsit_clk, 80000000); + if (rate > 0 && rate != f_current) + ret = clk_set_rate(mipi->dsit_clk, rate); + else + ret = rate; + if (ret < 0) + goto esettrate; + + dev_dbg(&pdev->dev, "DSI-T clk %lu -> %lu\n", f_current, rate); + + sprintf(dsip_clk, "dsi%1.1dp_clk", idx); + mipi->dsip_clk = clk_get(&pdev->dev, dsip_clk); + if (IS_ERR(mipi->dsip_clk)) { + ret = PTR_ERR(mipi->dsip_clk); + goto eclkpget; + } + + f_current = clk_get_rate(mipi->dsip_clk); + /* Between 10 and 50MHz */ + rate = clk_round_rate(mipi->dsip_clk, 24000000); + if (rate > 0 && rate != f_current) + ret = clk_set_rate(mipi->dsip_clk, rate); + else + ret = rate; + if (ret < 0) + goto esetprate; + + dev_dbg(&pdev->dev, "DSI-P clk %lu -> %lu\n", f_current, rate); + + msleep(10); + + ret = clk_enable(mipi->dsit_clk); + if (ret < 0) + goto eclkton; + + ret = clk_enable(mipi->dsip_clk); + if (ret < 0) + goto eclkpon; + + mipi_dsi[idx] = mipi; + + ret = sh_mipi_setup(mipi, pdata); + if (ret < 0) + goto emipisetup; + + mutex_unlock(&array_lock); + platform_set_drvdata(pdev, mipi); + + /* Set up LCDC callbacks */ + pdata->lcd_chan->board_cfg.board_data = mipi; + pdata->lcd_chan->board_cfg.display_on = mipi_display_on; + pdata->lcd_chan->board_cfg.display_off = mipi_display_off; + + return 0; + +emipisetup: + mipi_dsi[idx] = NULL; + clk_disable(mipi->dsip_clk); +eclkpon: + clk_disable(mipi->dsit_clk); +eclkton: +esetprate: + clk_put(mipi->dsip_clk); +eclkpget: +esettrate: + clk_put(mipi->dsit_clk); +eclktget: + iounmap(mipi->base); +emap: + release_mem_region(res->start, resource_size(res)); +ereqreg: + kfree(mipi); +ealloc: +efindslot: + mutex_unlock(&array_lock); + + return ret; +} + +static int __exit sh_mipi_remove(struct platform_device *pdev) +{ + struct sh_mipi_dsi_info *pdata = pdev->dev.platform_data; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct sh_mipi *mipi = platform_get_drvdata(pdev); + int i, ret; + + mutex_lock(&array_lock); + + for (i = 0; i < ARRAY_SIZE(mipi_dsi) && mipi_dsi[i] != mipi; i++) + ; + + if (i == ARRAY_SIZE(mipi_dsi)) { + ret = -EINVAL; + } else { + ret = 0; + mipi_dsi[i] = NULL; + } + + mutex_unlock(&array_lock); + + if (ret < 0) + return ret; + + pdata->lcd_chan->board_cfg.display_on = NULL; + pdata->lcd_chan->board_cfg.display_off = NULL; + pdata->lcd_chan->board_cfg.board_data = NULL; + + clk_disable(mipi->dsip_clk); + clk_disable(mipi->dsit_clk); + clk_put(mipi->dsit_clk); + clk_put(mipi->dsip_clk); + iounmap(mipi->base); + if (res) + release_mem_region(res->start, resource_size(res)); + platform_set_drvdata(pdev, NULL); + kfree(mipi); + + return 0; +} + +static struct platform_driver sh_mipi_driver = { + .remove = __exit_p(sh_mipi_remove), + .shutdown = sh_mipi_shutdown, + .driver = { + .name = "sh-mipi-dsi", + }, +}; + +static int __init sh_mipi_init(void) +{ + return platform_driver_probe(&sh_mipi_driver, sh_mipi_probe); +} +module_init(sh_mipi_init); + +static void __exit sh_mipi_exit(void) +{ + platform_driver_unregister(&sh_mipi_driver); +} +module_exit(sh_mipi_exit); + +MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); +MODULE_DESCRIPTION("SuperH / ARM-shmobile MIPI DSI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c new file mode 100644 index 00000000000..2fde08cc66b --- /dev/null +++ b/drivers/video/sh_mobile_hdmi.c @@ -0,0 +1,1028 @@ +/* + * SH-Mobile High-Definition Multimedia Interface (HDMI) driver + * for SLISHDMI13T and SLIPHDMIT IP cores + * + * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/console.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include <video/sh_mobile_hdmi.h> +#include <video/sh_mobile_lcdc.h> + +#define HDMI_SYSTEM_CTRL 0x00 /* System control */ +#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control, + bits 19..16 of 20-bit N for Audio Clock Regeneration packet */ +#define HDMI_20_BIT_N_FOR_AUDIO_RPKT_15_8 0x02 /* bits 15..8 of 20-bit N for Audio Clock Regeneration packet */ +#define HDMI_20_BIT_N_FOR_AUDIO_RPKT_7_0 0x03 /* bits 7..0 of 20-bit N for Audio Clock Regeneration packet */ +#define HDMI_SPDIF_AUDIO_SAMP_FREQ_CTS 0x04 /* SPDIF audio sampling frequency, + bits 19..16 of Internal CTS */ +#define HDMI_INTERNAL_CTS_15_8 0x05 /* bits 15..8 of Internal CTS */ +#define HDMI_INTERNAL_CTS_7_0 0x06 /* bits 7..0 of Internal CTS */ +#define HDMI_EXTERNAL_CTS_19_16 0x07 /* External CTS */ +#define HDMI_EXTERNAL_CTS_15_8 0x08 /* External CTS */ +#define HDMI_EXTERNAL_CTS_7_0 0x09 /* External CTS */ +#define HDMI_AUDIO_SETTING_1 0x0A /* Audio setting.1 */ +#define HDMI_AUDIO_SETTING_2 0x0B /* Audio setting.2 */ +#define HDMI_I2S_AUDIO_SET 0x0C /* I2S audio setting */ +#define HDMI_DSD_AUDIO_SET 0x0D /* DSD audio setting */ +#define HDMI_DEBUG_MONITOR_1 0x0E /* Debug monitor.1 */ +#define HDMI_DEBUG_MONITOR_2 0x0F /* Debug monitor.2 */ +#define HDMI_I2S_INPUT_PIN_SWAP 0x10 /* I2S input pin swap */ +#define HDMI_AUDIO_STATUS_BITS_SETTING_1 0x11 /* Audio status bits setting.1 */ +#define HDMI_AUDIO_STATUS_BITS_SETTING_2 0x12 /* Audio status bits setting.2 */ +#define HDMI_CATEGORY_CODE 0x13 /* Category code */ +#define HDMI_SOURCE_NUM_AUDIO_WORD_LEN 0x14 /* Source number/Audio word length */ +#define HDMI_AUDIO_VIDEO_SETTING_1 0x15 /* Audio/Video setting.1 */ +#define HDMI_VIDEO_SETTING_1 0x16 /* Video setting.1 */ +#define HDMI_DEEP_COLOR_MODES 0x17 /* Deep Color Modes */ + +/* 12 16- and 10-bit Color space conversion parameters: 0x18..0x2f */ +#define HDMI_COLOR_SPACE_CONVERSION_PARAMETERS 0x18 + +#define HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS 0x30 /* External video parameter settings */ +#define HDMI_EXTERNAL_H_TOTAL_7_0 0x31 /* External horizontal total (LSB) */ +#define HDMI_EXTERNAL_H_TOTAL_11_8 0x32 /* External horizontal total (MSB) */ +#define HDMI_EXTERNAL_H_BLANK_7_0 0x33 /* External horizontal blank (LSB) */ +#define HDMI_EXTERNAL_H_BLANK_9_8 0x34 /* External horizontal blank (MSB) */ +#define HDMI_EXTERNAL_H_DELAY_7_0 0x35 /* External horizontal delay (LSB) */ +#define HDMI_EXTERNAL_H_DELAY_9_8 0x36 /* External horizontal delay (MSB) */ +#define HDMI_EXTERNAL_H_DURATION_7_0 0x37 /* External horizontal duration (LSB) */ +#define HDMI_EXTERNAL_H_DURATION_9_8 0x38 /* External horizontal duration (MSB) */ +#define HDMI_EXTERNAL_V_TOTAL_7_0 0x39 /* External vertical total (LSB) */ +#define HDMI_EXTERNAL_V_TOTAL_9_8 0x3A /* External vertical total (MSB) */ +#define HDMI_AUDIO_VIDEO_SETTING_2 0x3B /* Audio/Video setting.2 */ +#define HDMI_EXTERNAL_V_BLANK 0x3D /* External vertical blank */ +#define HDMI_EXTERNAL_V_DELAY 0x3E /* External vertical delay */ +#define HDMI_EXTERNAL_V_DURATION 0x3F /* External vertical duration */ +#define HDMI_CTRL_PKT_MANUAL_SEND_CONTROL 0x40 /* Control packet manual send control */ +#define HDMI_CTRL_PKT_AUTO_SEND 0x41 /* Control packet auto send with VSYNC control */ +#define HDMI_AUTO_CHECKSUM_OPTION 0x42 /* Auto checksum option */ +#define HDMI_VIDEO_SETTING_2 0x45 /* Video setting.2 */ +#define HDMI_OUTPUT_OPTION 0x46 /* Output option */ +#define HDMI_SLIPHDMIT_PARAM_OPTION 0x51 /* SLIPHDMIT parameter option */ +#define HDMI_HSYNC_PMENT_AT_EMB_7_0 0x52 /* HSYNC placement at embedded sync (LSB) */ +#define HDMI_HSYNC_PMENT_AT_EMB_15_8 0x53 /* HSYNC placement at embedded sync (MSB) */ +#define HDMI_VSYNC_PMENT_AT_EMB_7_0 0x54 /* VSYNC placement at embedded sync (LSB) */ +#define HDMI_VSYNC_PMENT_AT_EMB_14_8 0x55 /* VSYNC placement at embedded sync (MSB) */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_1 0x56 /* SLIPHDMIT parameter settings.1 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_2 0x57 /* SLIPHDMIT parameter settings.2 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_3 0x58 /* SLIPHDMIT parameter settings.3 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_5 0x59 /* SLIPHDMIT parameter settings.5 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_6 0x5A /* SLIPHDMIT parameter settings.6 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_7 0x5B /* SLIPHDMIT parameter settings.7 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_8 0x5C /* SLIPHDMIT parameter settings.8 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_9 0x5D /* SLIPHDMIT parameter settings.9 */ +#define HDMI_SLIPHDMIT_PARAM_SETTINGS_10 0x5E /* SLIPHDMIT parameter settings.10 */ +#define HDMI_CTRL_PKT_BUF_INDEX 0x5F /* Control packet buffer index */ +#define HDMI_CTRL_PKT_BUF_ACCESS_HB0 0x60 /* Control packet data buffer access window - HB0 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_HB1 0x61 /* Control packet data buffer access window - HB1 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_HB2 0x62 /* Control packet data buffer access window - HB2 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB0 0x63 /* Control packet data buffer access window - PB0 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB1 0x64 /* Control packet data buffer access window - PB1 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB2 0x65 /* Control packet data buffer access window - PB2 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB3 0x66 /* Control packet data buffer access window - PB3 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB4 0x67 /* Control packet data buffer access window - PB4 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB5 0x68 /* Control packet data buffer access window - PB5 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB6 0x69 /* Control packet data buffer access window - PB6 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB7 0x6A /* Control packet data buffer access window - PB7 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB8 0x6B /* Control packet data buffer access window - PB8 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB9 0x6C /* Control packet data buffer access window - PB9 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB10 0x6D /* Control packet data buffer access window - PB10 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB11 0x6E /* Control packet data buffer access window - PB11 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB12 0x6F /* Control packet data buffer access window - PB12 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB13 0x70 /* Control packet data buffer access window - PB13 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB14 0x71 /* Control packet data buffer access window - PB14 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB15 0x72 /* Control packet data buffer access window - PB15 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB16 0x73 /* Control packet data buffer access window - PB16 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB17 0x74 /* Control packet data buffer access window - PB17 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB18 0x75 /* Control packet data buffer access window - PB18 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB19 0x76 /* Control packet data buffer access window - PB19 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB20 0x77 /* Control packet data buffer access window - PB20 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB21 0x78 /* Control packet data buffer access window - PB21 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB22 0x79 /* Control packet data buffer access window - PB22 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB23 0x7A /* Control packet data buffer access window - PB23 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB24 0x7B /* Control packet data buffer access window - PB24 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB25 0x7C /* Control packet data buffer access window - PB25 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB26 0x7D /* Control packet data buffer access window - PB26 */ +#define HDMI_CTRL_PKT_BUF_ACCESS_PB27 0x7E /* Control packet data buffer access window - PB27 */ +#define HDMI_EDID_KSV_FIFO_ACCESS_WINDOW 0x80 /* EDID/KSV FIFO access window */ +#define HDMI_DDC_BUS_ACCESS_FREQ_CTRL_7_0 0x81 /* DDC bus access frequency control (LSB) */ +#define HDMI_DDC_BUS_ACCESS_FREQ_CTRL_15_8 0x82 /* DDC bus access frequency control (MSB) */ +#define HDMI_INTERRUPT_MASK_1 0x92 /* Interrupt mask.1 */ +#define HDMI_INTERRUPT_MASK_2 0x93 /* Interrupt mask.2 */ +#define HDMI_INTERRUPT_STATUS_1 0x94 /* Interrupt status.1 */ +#define HDMI_INTERRUPT_STATUS_2 0x95 /* Interrupt status.2 */ +#define HDMI_INTERRUPT_MASK_3 0x96 /* Interrupt mask.3 */ +#define HDMI_INTERRUPT_MASK_4 0x97 /* Interrupt mask.4 */ +#define HDMI_INTERRUPT_STATUS_3 0x98 /* Interrupt status.3 */ +#define HDMI_INTERRUPT_STATUS_4 0x99 /* Interrupt status.4 */ +#define HDMI_SOFTWARE_HDCP_CONTROL_1 0x9A /* Software HDCP control.1 */ +#define HDMI_FRAME_COUNTER 0x9C /* Frame counter */ +#define HDMI_FRAME_COUNTER_FOR_RI_CHECK 0x9D /* Frame counter for Ri check */ +#define HDMI_HDCP_CONTROL 0xAF /* HDCP control */ +#define HDMI_RI_FRAME_COUNT_REGISTER 0xB2 /* Ri frame count register */ +#define HDMI_DDC_BUS_CONTROL 0xB7 /* DDC bus control */ +#define HDMI_HDCP_STATUS 0xB8 /* HDCP status */ +#define HDMI_SHA0 0xB9 /* sha0 */ +#define HDMI_SHA1 0xBA /* sha1 */ +#define HDMI_SHA2 0xBB /* sha2 */ +#define HDMI_SHA3 0xBC /* sha3 */ +#define HDMI_SHA4 0xBD /* sha4 */ +#define HDMI_BCAPS_READ 0xBE /* BCAPS read / debug */ +#define HDMI_AKSV_BKSV_7_0_MONITOR 0xBF /* AKSV/BKSV[7:0] monitor */ +#define HDMI_AKSV_BKSV_15_8_MONITOR 0xC0 /* AKSV/BKSV[15:8] monitor */ +#define HDMI_AKSV_BKSV_23_16_MONITOR 0xC1 /* AKSV/BKSV[23:16] monitor */ +#define HDMI_AKSV_BKSV_31_24_MONITOR 0xC2 /* AKSV/BKSV[31:24] monitor */ +#define HDMI_AKSV_BKSV_39_32_MONITOR 0xC3 /* AKSV/BKSV[39:32] monitor */ +#define HDMI_EDID_SEGMENT_POINTER 0xC4 /* EDID segment pointer */ +#define HDMI_EDID_WORD_ADDRESS 0xC5 /* EDID word address */ +#define HDMI_EDID_DATA_FIFO_ADDRESS 0xC6 /* EDID data FIFO address */ +#define HDMI_NUM_OF_HDMI_DEVICES 0xC7 /* Number of HDMI devices */ +#define HDMI_HDCP_ERROR_CODE 0xC8 /* HDCP error code */ +#define HDMI_100MS_TIMER_SET 0xC9 /* 100ms timer setting */ +#define HDMI_5SEC_TIMER_SET 0xCA /* 5sec timer setting */ +#define HDMI_RI_READ_COUNT 0xCB /* Ri read count */ +#define HDMI_AN_SEED 0xCC /* An seed */ +#define HDMI_MAX_NUM_OF_RCIVRS_ALLOWED 0xCD /* Maximum number of receivers allowed */ +#define HDMI_HDCP_MEMORY_ACCESS_CONTROL_1 0xCE /* HDCP memory access control.1 */ +#define HDMI_HDCP_MEMORY_ACCESS_CONTROL_2 0xCF /* HDCP memory access control.2 */ +#define HDMI_HDCP_CONTROL_2 0xD0 /* HDCP Control 2 */ +#define HDMI_HDCP_KEY_MEMORY_CONTROL 0xD2 /* HDCP Key Memory Control */ +#define HDMI_COLOR_SPACE_CONV_CONFIG_1 0xD3 /* Color space conversion configuration.1 */ +#define HDMI_VIDEO_SETTING_3 0xD4 /* Video setting.3 */ +#define HDMI_RI_7_0 0xD5 /* Ri[7:0] */ +#define HDMI_RI_15_8 0xD6 /* Ri[15:8] */ +#define HDMI_PJ 0xD7 /* Pj */ +#define HDMI_SHA_RD 0xD8 /* sha_rd */ +#define HDMI_RI_7_0_SAVED 0xD9 /* Ri[7:0] saved */ +#define HDMI_RI_15_8_SAVED 0xDA /* Ri[15:8] saved */ +#define HDMI_PJ_SAVED 0xDB /* Pj saved */ +#define HDMI_NUM_OF_DEVICES 0xDC /* Number of devices */ +#define HDMI_HOT_PLUG_MSENS_STATUS 0xDF /* Hot plug/MSENS status */ +#define HDMI_BCAPS_WRITE 0xE0 /* bcaps */ +#define HDMI_BSTAT_7_0 0xE1 /* bstat[7:0] */ +#define HDMI_BSTAT_15_8 0xE2 /* bstat[15:8] */ +#define HDMI_BKSV_7_0 0xE3 /* bksv[7:0] */ +#define HDMI_BKSV_15_8 0xE4 /* bksv[15:8] */ +#define HDMI_BKSV_23_16 0xE5 /* bksv[23:16] */ +#define HDMI_BKSV_31_24 0xE6 /* bksv[31:24] */ +#define HDMI_BKSV_39_32 0xE7 /* bksv[39:32] */ +#define HDMI_AN_7_0 0xE8 /* An[7:0] */ +#define HDMI_AN_15_8 0xE9 /* An [15:8] */ +#define HDMI_AN_23_16 0xEA /* An [23:16] */ +#define HDMI_AN_31_24 0xEB /* An [31:24] */ +#define HDMI_AN_39_32 0xEC /* An [39:32] */ +#define HDMI_AN_47_40 0xED /* An [47:40] */ +#define HDMI_AN_55_48 0xEE /* An [55:48] */ +#define HDMI_AN_63_56 0xEF /* An [63:56] */ +#define HDMI_PRODUCT_ID 0xF0 /* Product ID */ +#define HDMI_REVISION_ID 0xF1 /* Revision ID */ +#define HDMI_TEST_MODE 0xFE /* Test mode */ + +enum hotplug_state { + HDMI_HOTPLUG_DISCONNECTED, + HDMI_HOTPLUG_CONNECTED, + HDMI_HOTPLUG_EDID_DONE, +}; + +struct sh_hdmi { + void __iomem *base; + enum hotplug_state hp_state; + struct clk *hdmi_clk; + struct device *dev; + struct fb_info *info; + struct delayed_work edid_work; + struct fb_var_screeninfo var; +}; + +static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg) +{ + iowrite8(data, hdmi->base + reg); +} + +static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg) +{ + return ioread8(hdmi->base + reg); +} + +/* External video parameter settings */ +static void hdmi_external_video_param(struct sh_hdmi *hdmi) +{ + struct fb_var_screeninfo *var = &hdmi->var; + u16 htotal, hblank, hdelay, vtotal, vblank, vdelay, voffset; + u8 sync = 0; + + htotal = var->xres + var->right_margin + var->left_margin + var->hsync_len; + + hdelay = var->hsync_len + var->left_margin; + hblank = var->right_margin + hdelay; + + /* + * Vertical timing looks a bit different in Figure 18, + * but let's try the same first by setting offset = 0 + */ + vtotal = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; + + vdelay = var->vsync_len + var->upper_margin; + vblank = var->lower_margin + vdelay; + voffset = min(var->upper_margin / 2, 6U); + + /* + * [3]: VSYNC polarity: Positive + * [2]: HSYNC polarity: Positive + * [1]: Interlace/Progressive: Progressive + * [0]: External video settings enable: used. + */ + if (var->sync & FB_SYNC_HOR_HIGH_ACT) + sync |= 4; + if (var->sync & FB_SYNC_VERT_HIGH_ACT) + sync |= 8; + + pr_debug("H: %u, %u, %u, %u; V: %u, %u, %u, %u; sync 0x%x\n", + htotal, hblank, hdelay, var->hsync_len, + vtotal, vblank, vdelay, var->vsync_len, sync); + + hdmi_write(hdmi, sync | (voffset << 4), HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS); + + hdmi_write(hdmi, htotal, HDMI_EXTERNAL_H_TOTAL_7_0); + hdmi_write(hdmi, htotal >> 8, HDMI_EXTERNAL_H_TOTAL_11_8); + + hdmi_write(hdmi, hblank, HDMI_EXTERNAL_H_BLANK_7_0); + hdmi_write(hdmi, hblank >> 8, HDMI_EXTERNAL_H_BLANK_9_8); + + hdmi_write(hdmi, hdelay, HDMI_EXTERNAL_H_DELAY_7_0); + hdmi_write(hdmi, hdelay >> 8, HDMI_EXTERNAL_H_DELAY_9_8); + + hdmi_write(hdmi, var->hsync_len, HDMI_EXTERNAL_H_DURATION_7_0); + hdmi_write(hdmi, var->hsync_len >> 8, HDMI_EXTERNAL_H_DURATION_9_8); + + hdmi_write(hdmi, vtotal, HDMI_EXTERNAL_V_TOTAL_7_0); + hdmi_write(hdmi, vtotal >> 8, HDMI_EXTERNAL_V_TOTAL_9_8); + + hdmi_write(hdmi, vblank, HDMI_EXTERNAL_V_BLANK); + + hdmi_write(hdmi, vdelay, HDMI_EXTERNAL_V_DELAY); + + hdmi_write(hdmi, var->vsync_len, HDMI_EXTERNAL_V_DURATION); + + /* Set bit 0 of HDMI_EXTERNAL_VIDEO_PARAM_SETTINGS here for manual mode */ +} + +/** + * sh_hdmi_video_config() + */ +static void sh_hdmi_video_config(struct sh_hdmi *hdmi) +{ + /* + * [7:4]: Audio sampling frequency: 48kHz + * [3:1]: Input video format: RGB and YCbCr 4:4:4 (Y on Green) + * [0]: Internal/External DE select: internal + */ + hdmi_write(hdmi, 0x20, HDMI_AUDIO_VIDEO_SETTING_1); + + /* + * [7:6]: Video output format: RGB 4:4:4 + * [5:4]: Input video data width: 8 bit + * [3:1]: EAV/SAV location: channel 1 + * [0]: Video input color space: RGB + */ + hdmi_write(hdmi, 0x34, HDMI_VIDEO_SETTING_1); + + /* + * [7:6]: Together with bit [6] of HDMI_AUDIO_VIDEO_SETTING_2, which is + * left at 0 by default, this configures 24bpp and sets the Color Depth + * (CD) field in the General Control Packet + */ + hdmi_write(hdmi, 0x20, HDMI_DEEP_COLOR_MODES); +} + +/** + * sh_hdmi_audio_config() + */ +static void sh_hdmi_audio_config(struct sh_hdmi *hdmi) +{ + /* + * [7:4] L/R data swap control + * [3:0] appropriate N[19:16] + */ + hdmi_write(hdmi, 0x00, HDMI_L_R_DATA_SWAP_CTRL_RPKT); + /* appropriate N[15:8] */ + hdmi_write(hdmi, 0x18, HDMI_20_BIT_N_FOR_AUDIO_RPKT_15_8); + /* appropriate N[7:0] */ + hdmi_write(hdmi, 0x00, HDMI_20_BIT_N_FOR_AUDIO_RPKT_7_0); + + /* [7:4] 48 kHz SPDIF not used */ + hdmi_write(hdmi, 0x20, HDMI_SPDIF_AUDIO_SAMP_FREQ_CTS); + + /* + * [6:5] set required down sampling rate if required + * [4:3] set required audio source + */ + hdmi_write(hdmi, 0x00, HDMI_AUDIO_SETTING_1); + + /* [3:0] set sending channel number for channel status */ + hdmi_write(hdmi, 0x40, HDMI_AUDIO_SETTING_2); + + /* + * [5:2] set valid I2S source input pin + * [1:0] set input I2S source mode + */ + hdmi_write(hdmi, 0x04, HDMI_I2S_AUDIO_SET); + + /* [7:4] set valid DSD source input pin */ + hdmi_write(hdmi, 0x00, HDMI_DSD_AUDIO_SET); + + /* [7:0] set appropriate I2S input pin swap settings if required */ + hdmi_write(hdmi, 0x00, HDMI_I2S_INPUT_PIN_SWAP); + + /* + * [7] set validity bit for channel status + * [3:0] set original sample frequency for channel status + */ + hdmi_write(hdmi, 0x00, HDMI_AUDIO_STATUS_BITS_SETTING_1); + + /* + * [7] set value for channel status + * [6] set value for channel status + * [5] set copyright bit for channel status + * [4:2] set additional information for channel status + * [1:0] set clock accuracy for channel status + */ + hdmi_write(hdmi, 0x00, HDMI_AUDIO_STATUS_BITS_SETTING_2); + + /* [7:0] set category code for channel status */ + hdmi_write(hdmi, 0x00, HDMI_CATEGORY_CODE); + + /* + * [7:4] set source number for channel status + * [3:0] set word length for channel status + */ + hdmi_write(hdmi, 0x00, HDMI_SOURCE_NUM_AUDIO_WORD_LEN); + + /* [7:4] set sample frequency for channel status */ + hdmi_write(hdmi, 0x20, HDMI_AUDIO_VIDEO_SETTING_1); +} + +/** + * sh_hdmi_phy_config() + */ +static void sh_hdmi_phy_config(struct sh_hdmi *hdmi) +{ + /* 720p, 8bit, 74.25MHz. Might need to be adjusted for other formats */ + hdmi_write(hdmi, 0x19, HDMI_SLIPHDMIT_PARAM_SETTINGS_1); + hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_2); + hdmi_write(hdmi, 0x00, HDMI_SLIPHDMIT_PARAM_SETTINGS_3); + /* PLLA_CONFIG[7:0]: VCO gain, VCO offset, LPF resistance[0] */ + hdmi_write(hdmi, 0x44, HDMI_SLIPHDMIT_PARAM_SETTINGS_5); + hdmi_write(hdmi, 0x32, HDMI_SLIPHDMIT_PARAM_SETTINGS_6); + hdmi_write(hdmi, 0x4A, HDMI_SLIPHDMIT_PARAM_SETTINGS_7); + hdmi_write(hdmi, 0x0E, HDMI_SLIPHDMIT_PARAM_SETTINGS_8); + hdmi_write(hdmi, 0x25, HDMI_SLIPHDMIT_PARAM_SETTINGS_9); + hdmi_write(hdmi, 0x04, HDMI_SLIPHDMIT_PARAM_SETTINGS_10); +} + +/** + * sh_hdmi_avi_infoframe_setup() - Auxiliary Video Information InfoFrame CONTROL PACKET + */ +static void sh_hdmi_avi_infoframe_setup(struct sh_hdmi *hdmi) +{ + /* AVI InfoFrame */ + hdmi_write(hdmi, 0x06, HDMI_CTRL_PKT_BUF_INDEX); + + /* Packet Type = 0x82 */ + hdmi_write(hdmi, 0x82, HDMI_CTRL_PKT_BUF_ACCESS_HB0); + + /* Version = 0x02 */ + hdmi_write(hdmi, 0x02, HDMI_CTRL_PKT_BUF_ACCESS_HB1); + + /* Length = 13 (0x0D) */ + hdmi_write(hdmi, 0x0D, HDMI_CTRL_PKT_BUF_ACCESS_HB2); + + /* N. A. Checksum */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0); + + /* + * Y = RGB + * A0 = No Data + * B = Bar Data not valid + * S = No Data + */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB1); + + /* + * C = No Data + * M = 16:9 Picture Aspect Ratio + * R = Same as picture aspect ratio + */ + hdmi_write(hdmi, 0x28, HDMI_CTRL_PKT_BUF_ACCESS_PB2); + + /* + * ITC = No Data + * EC = xvYCC601 + * Q = Default (depends on video format) + * SC = No Known non_uniform Scaling + */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB3); + + /* + * VIC = 1280 x 720p: ignored if external config is used + * Send 2 for 720 x 480p, 16 for 1080p + */ + hdmi_write(hdmi, 4, HDMI_CTRL_PKT_BUF_ACCESS_PB4); + + /* PR = No Repetition */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB5); + + /* Line Number of End of Top Bar (lower 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB6); + + /* Line Number of End of Top Bar (upper 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB7); + + /* Line Number of Start of Bottom Bar (lower 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB8); + + /* Line Number of Start of Bottom Bar (upper 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB9); + + /* Pixel Number of End of Left Bar (lower 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB10); + + /* Pixel Number of End of Left Bar (upper 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB11); + + /* Pixel Number of Start of Right Bar (lower 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB12); + + /* Pixel Number of Start of Right Bar (upper 8 bits) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB13); +} + +/** + * sh_hdmi_audio_infoframe_setup() - Audio InfoFrame of CONTROL PACKET + */ +static void sh_hdmi_audio_infoframe_setup(struct sh_hdmi *hdmi) +{ + /* Audio InfoFrame */ + hdmi_write(hdmi, 0x08, HDMI_CTRL_PKT_BUF_INDEX); + + /* Packet Type = 0x84 */ + hdmi_write(hdmi, 0x84, HDMI_CTRL_PKT_BUF_ACCESS_HB0); + + /* Version Number = 0x01 */ + hdmi_write(hdmi, 0x01, HDMI_CTRL_PKT_BUF_ACCESS_HB1); + + /* 0 Length = 10 (0x0A) */ + hdmi_write(hdmi, 0x0A, HDMI_CTRL_PKT_BUF_ACCESS_HB2); + + /* n. a. Checksum */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0); + + /* Audio Channel Count = Refer to Stream Header */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB1); + + /* Refer to Stream Header */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB2); + + /* Format depends on coding type (i.e. CT0...CT3) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB3); + + /* Speaker Channel Allocation = Front Right + Front Left */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB4); + + /* Level Shift Value = 0 dB, Down - mix is permitted or no information */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB5); + + /* Reserved (0) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB6); + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB7); + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB8); + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB9); + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB10); +} + +/** + * sh_hdmi_gamut_metadata_setup() - Gamut Metadata Packet of CONTROL PACKET + */ +static void sh_hdmi_gamut_metadata_setup(struct sh_hdmi *hdmi) +{ + int i; + + /* Gamut Metadata Packet */ + hdmi_write(hdmi, 0x04, HDMI_CTRL_PKT_BUF_INDEX); + + /* Packet Type = 0x0A */ + hdmi_write(hdmi, 0x0A, HDMI_CTRL_PKT_BUF_ACCESS_HB0); + /* Gamut Packet is not used, so default value */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1); + /* Gamut Packet is not used, so default value */ + hdmi_write(hdmi, 0x10, HDMI_CTRL_PKT_BUF_ACCESS_HB2); + + /* GBD bytes 0 through 27 */ + for (i = 0; i <= 27; i++) + /* HDMI_CTRL_PKT_BUF_ACCESS_PB0_63H - PB27_7EH */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i); +} + +/** + * sh_hdmi_acp_setup() - Audio Content Protection Packet (ACP) + */ +static void sh_hdmi_acp_setup(struct sh_hdmi *hdmi) +{ + int i; + + /* Audio Content Protection Packet (ACP) */ + hdmi_write(hdmi, 0x01, HDMI_CTRL_PKT_BUF_INDEX); + + /* Packet Type = 0x04 */ + hdmi_write(hdmi, 0x04, HDMI_CTRL_PKT_BUF_ACCESS_HB0); + /* ACP_Type */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1); + /* Reserved (0) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB2); + + /* GBD bytes 0 through 27 */ + for (i = 0; i <= 27; i++) + /* HDMI_CTRL_PKT_BUF_ACCESS_PB0 - PB27 */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i); +} + +/** + * sh_hdmi_isrc1_setup() - ISRC1 Packet + */ +static void sh_hdmi_isrc1_setup(struct sh_hdmi *hdmi) +{ + int i; + + /* ISRC1 Packet */ + hdmi_write(hdmi, 0x02, HDMI_CTRL_PKT_BUF_INDEX); + + /* Packet Type = 0x05 */ + hdmi_write(hdmi, 0x05, HDMI_CTRL_PKT_BUF_ACCESS_HB0); + /* ISRC_Cont, ISRC_Valid, Reserved (0), ISRC_Status */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1); + /* Reserved (0) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB2); + + /* PB0 UPC_EAN_ISRC_0-15 */ + /* Bytes PB16-PB27 shall be set to a value of 0. */ + for (i = 0; i <= 27; i++) + /* HDMI_CTRL_PKT_BUF_ACCESS_PB0 - PB27 */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i); +} + +/** + * sh_hdmi_isrc2_setup() - ISRC2 Packet + */ +static void sh_hdmi_isrc2_setup(struct sh_hdmi *hdmi) +{ + int i; + + /* ISRC2 Packet */ + hdmi_write(hdmi, 0x03, HDMI_CTRL_PKT_BUF_INDEX); + + /* HB0 Packet Type = 0x06 */ + hdmi_write(hdmi, 0x06, HDMI_CTRL_PKT_BUF_ACCESS_HB0); + /* Reserved (0) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB1); + /* Reserved (0) */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_HB2); + + /* PB0 UPC_EAN_ISRC_16-31 */ + /* Bytes PB16-PB27 shall be set to a value of 0. */ + for (i = 0; i <= 27; i++) + /* HDMI_CTRL_PKT_BUF_ACCESS_PB0 - PB27 */ + hdmi_write(hdmi, 0x00, HDMI_CTRL_PKT_BUF_ACCESS_PB0 + i); +} + +/** + * sh_hdmi_configure() - Initialise HDMI for output + */ +static void sh_hdmi_configure(struct sh_hdmi *hdmi) +{ + /* Configure video format */ + sh_hdmi_video_config(hdmi); + + /* Configure audio format */ + sh_hdmi_audio_config(hdmi); + + /* Configure PHY */ + sh_hdmi_phy_config(hdmi); + + /* Auxiliary Video Information (AVI) InfoFrame */ + sh_hdmi_avi_infoframe_setup(hdmi); + + /* Audio InfoFrame */ + sh_hdmi_audio_infoframe_setup(hdmi); + + /* Gamut Metadata packet */ + sh_hdmi_gamut_metadata_setup(hdmi); + + /* Audio Content Protection (ACP) Packet */ + sh_hdmi_acp_setup(hdmi); + + /* ISRC1 Packet */ + sh_hdmi_isrc1_setup(hdmi); + + /* ISRC2 Packet */ + sh_hdmi_isrc2_setup(hdmi); + + /* + * Control packet auto send with VSYNC control: auto send + * General control, Gamut metadata, ISRC, and ACP packets + */ + hdmi_write(hdmi, 0x8E, HDMI_CTRL_PKT_AUTO_SEND); + + /* FIXME */ + msleep(10); + + /* PS mode b->d, reset PLLA and PLLB */ + hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL); + + udelay(10); + + hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL); +} + +static void sh_hdmi_read_edid(struct sh_hdmi *hdmi) +{ + struct fb_var_screeninfo *var = &hdmi->var; + struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data; + struct fb_videomode *lcd_cfg = &pdata->lcd_chan->lcd_cfg; + unsigned long height = var->height, width = var->width; + int i; + u8 edid[128]; + + /* Read EDID */ + pr_debug("Read back EDID code:"); + for (i = 0; i < 128; i++) { + edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW); +#ifdef DEBUG + if ((i % 16) == 0) { + printk(KERN_CONT "\n"); + printk(KERN_DEBUG "%02X | %02X", i, edid[i]); + } else { + printk(KERN_CONT " %02X", edid[i]); + } +#endif + } +#ifdef DEBUG + printk(KERN_CONT "\n"); +#endif + fb_parse_edid(edid, var); + pr_debug("%u-%u-%u-%u x %u-%u-%u-%u @ %lu kHz monitor detected\n", + var->left_margin, var->xres, var->right_margin, var->hsync_len, + var->upper_margin, var->yres, var->lower_margin, var->vsync_len, + PICOS2KHZ(var->pixclock)); + + /* FIXME: Use user-provided configuration instead of EDID */ + var->width = width; + var->xres = lcd_cfg->xres; + var->xres_virtual = lcd_cfg->xres; + var->left_margin = lcd_cfg->left_margin; + var->right_margin = lcd_cfg->right_margin; + var->hsync_len = lcd_cfg->hsync_len; + var->height = height; + var->yres = lcd_cfg->yres; + var->yres_virtual = lcd_cfg->yres * 2; + var->upper_margin = lcd_cfg->upper_margin; + var->lower_margin = lcd_cfg->lower_margin; + var->vsync_len = lcd_cfg->vsync_len; + var->sync = lcd_cfg->sync; + var->pixclock = lcd_cfg->pixclock; + + hdmi_external_video_param(hdmi); +} + +static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id) +{ + struct sh_hdmi *hdmi = dev_id; + u8 status1, status2, mask1, mask2; + + /* mode_b and PLLA and PLLB reset */ + hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL); + + /* How long shall reset be held? */ + udelay(10); + + /* mode_b and PLLA and PLLB reset release */ + hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL); + + status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1); + status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2); + + mask1 = hdmi_read(hdmi, HDMI_INTERRUPT_MASK_1); + mask2 = hdmi_read(hdmi, HDMI_INTERRUPT_MASK_2); + + /* Correct would be to ack only set bits, but the datasheet requires 0xff */ + hdmi_write(hdmi, 0xFF, HDMI_INTERRUPT_STATUS_1); + hdmi_write(hdmi, 0xFF, HDMI_INTERRUPT_STATUS_2); + + if (printk_ratelimit()) + pr_debug("IRQ #%d: Status #1: 0x%x & 0x%x, #2: 0x%x & 0x%x\n", + irq, status1, mask1, status2, mask2); + + if (!((status1 & mask1) | (status2 & mask2))) { + return IRQ_NONE; + } else if (status1 & 0xc0) { + u8 msens; + + /* Datasheet specifies 10ms... */ + udelay(500); + + msens = hdmi_read(hdmi, HDMI_HOT_PLUG_MSENS_STATUS); + pr_debug("MSENS 0x%x\n", msens); + /* Check, if hot plug & MSENS pin status are both high */ + if ((msens & 0xC0) == 0xC0) { + /* Display plug in */ + hdmi->hp_state = HDMI_HOTPLUG_CONNECTED; + + /* Set EDID word address */ + hdmi_write(hdmi, 0x00, HDMI_EDID_WORD_ADDRESS); + /* Set EDID segment pointer */ + hdmi_write(hdmi, 0x00, HDMI_EDID_SEGMENT_POINTER); + /* Enable EDID interrupt */ + hdmi_write(hdmi, 0xC6, HDMI_INTERRUPT_MASK_1); + } else if (!(status1 & 0x80)) { + /* Display unplug, beware multiple interrupts */ + if (hdmi->hp_state != HDMI_HOTPLUG_DISCONNECTED) + schedule_delayed_work(&hdmi->edid_work, 0); + + hdmi->hp_state = HDMI_HOTPLUG_DISCONNECTED; + /* display_off will switch back to mode_a */ + } + } else if (status1 & 2) { + /* EDID error interrupt: retry */ + /* Set EDID word address */ + hdmi_write(hdmi, 0x00, HDMI_EDID_WORD_ADDRESS); + /* Set EDID segment pointer */ + hdmi_write(hdmi, 0x00, HDMI_EDID_SEGMENT_POINTER); + } else if (status1 & 4) { + /* Disable EDID interrupt */ + hdmi_write(hdmi, 0xC0, HDMI_INTERRUPT_MASK_1); + hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE; + schedule_delayed_work(&hdmi->edid_work, msecs_to_jiffies(10)); + } + + return IRQ_HANDLED; +} + +static void hdmi_display_on(void *arg, struct fb_info *info) +{ + struct sh_hdmi *hdmi = arg; + struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data; + + if (info->var.xres != 1280 || info->var.yres != 720) { + dev_warn(info->device, "Unsupported framebuffer geometry %ux%u\n", + info->var.xres, info->var.yres); + return; + } + + pr_debug("%s(%p): state %x\n", __func__, pdata->lcd_dev, info->state); + /* + * FIXME: not a good place to store fb_info. And we cannot nullify it + * even on monitor disconnect. What should the lifecycle be? + */ + hdmi->info = info; + switch (hdmi->hp_state) { + case HDMI_HOTPLUG_EDID_DONE: + /* PS mode d->e. All functions are active */ + hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL); + pr_debug("HDMI running\n"); + break; + case HDMI_HOTPLUG_DISCONNECTED: + info->state = FBINFO_STATE_SUSPENDED; + default: + hdmi->var = info->var; + } +} + +static void hdmi_display_off(void *arg) +{ + struct sh_hdmi *hdmi = arg; + struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data; + + pr_debug("%s(%p)\n", __func__, pdata->lcd_dev); + /* PS mode e->a */ + hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL); +} + +/* Hotplug interrupt occurred, read EDID */ +static void edid_work_fn(struct work_struct *work) +{ + struct sh_hdmi *hdmi = container_of(work, struct sh_hdmi, edid_work.work); + struct sh_mobile_hdmi_info *pdata = hdmi->dev->platform_data; + + pr_debug("%s(%p): begin, hotplug status %d\n", __func__, + pdata->lcd_dev, hdmi->hp_state); + + if (!pdata->lcd_dev) + return; + + if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) { + pm_runtime_get_sync(hdmi->dev); + /* A device has been plugged in */ + sh_hdmi_read_edid(hdmi); + msleep(10); + sh_hdmi_configure(hdmi); + /* Switched to another (d) power-save mode */ + msleep(10); + + if (!hdmi->info) + return; + + acquire_console_sem(); + + /* HDMI plug in */ + hdmi->info->var = hdmi->var; + if (hdmi->info->state != FBINFO_STATE_RUNNING) + fb_set_suspend(hdmi->info, 0); + else + hdmi_display_on(hdmi, hdmi->info); + + release_console_sem(); + } else { + if (!hdmi->info) + return; + + acquire_console_sem(); + + /* HDMI disconnect */ + fb_set_suspend(hdmi->info, 1); + + release_console_sem(); + pm_runtime_put(hdmi->dev); + } + + pr_debug("%s(%p): end\n", __func__, pdata->lcd_dev); +} + +static int __init sh_hdmi_probe(struct platform_device *pdev) +{ + struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data; + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0), ret; + struct sh_hdmi *hdmi; + long rate; + + if (!res || !pdata || irq < 0) + return -ENODEV; + + hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); + if (!hdmi) { + dev_err(&pdev->dev, "Cannot allocate device data\n"); + return -ENOMEM; + } + + hdmi->dev = &pdev->dev; + + hdmi->hdmi_clk = clk_get(&pdev->dev, "ick"); + if (IS_ERR(hdmi->hdmi_clk)) { + ret = PTR_ERR(hdmi->hdmi_clk); + dev_err(&pdev->dev, "Unable to get clock: %d\n", ret); + goto egetclk; + } + + rate = PICOS2KHZ(pdata->lcd_chan->lcd_cfg.pixclock) * 1000; + + rate = clk_round_rate(hdmi->hdmi_clk, rate); + if (rate < 0) { + ret = rate; + dev_err(&pdev->dev, "Cannot get suitable rate: %ld\n", rate); + goto erate; + } + + ret = clk_set_rate(hdmi->hdmi_clk, rate); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot set rate %ld: %d\n", rate, ret); + goto erate; + } + + pr_debug("HDMI set frequency %lu\n", rate); + + ret = clk_enable(hdmi->hdmi_clk); + if (ret < 0) { + dev_err(&pdev->dev, "Cannot enable clock: %d\n", ret); + goto eclkenable; + } + + dev_info(&pdev->dev, "Enabled HDMI clock at %luHz\n", rate); + + if (!request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev))) { + dev_err(&pdev->dev, "HDMI register region already claimed\n"); + ret = -EBUSY; + goto ereqreg; + } + + hdmi->base = ioremap(res->start, resource_size(res)); + if (!hdmi->base) { + dev_err(&pdev->dev, "HDMI register region already claimed\n"); + ret = -ENOMEM; + goto emap; + } + + platform_set_drvdata(pdev, hdmi); + +#if 1 + /* Product and revision IDs are 0 in sh-mobile version */ + dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n", + hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID)); +#endif + + /* Set up LCDC callbacks */ + pdata->lcd_chan->board_cfg.board_data = hdmi; + pdata->lcd_chan->board_cfg.display_on = hdmi_display_on; + pdata->lcd_chan->board_cfg.display_off = hdmi_display_off; + + INIT_DELAYED_WORK(&hdmi->edid_work, edid_work_fn); + + pm_runtime_enable(&pdev->dev); + pm_runtime_resume(&pdev->dev); + + ret = request_irq(irq, sh_hdmi_hotplug, 0, + dev_name(&pdev->dev), hdmi); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to request irq: %d\n", ret); + goto ereqirq; + } + + return 0; + +ereqirq: + pm_runtime_disable(&pdev->dev); + iounmap(hdmi->base); +emap: + release_mem_region(res->start, resource_size(res)); +ereqreg: + clk_disable(hdmi->hdmi_clk); +eclkenable: +erate: + clk_put(hdmi->hdmi_clk); +egetclk: + kfree(hdmi); + + return ret; +} + +static int __exit sh_hdmi_remove(struct platform_device *pdev) +{ + struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data; + struct sh_hdmi *hdmi = platform_get_drvdata(pdev); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + + pdata->lcd_chan->board_cfg.display_on = NULL; + pdata->lcd_chan->board_cfg.display_off = NULL; + pdata->lcd_chan->board_cfg.board_data = NULL; + + free_irq(irq, hdmi); + pm_runtime_disable(&pdev->dev); + cancel_delayed_work_sync(&hdmi->edid_work); + clk_disable(hdmi->hdmi_clk); + clk_put(hdmi->hdmi_clk); + iounmap(hdmi->base); + release_mem_region(res->start, resource_size(res)); + kfree(hdmi); + + return 0; +} + +static struct platform_driver sh_hdmi_driver = { + .remove = __exit_p(sh_hdmi_remove), + .driver = { + .name = "sh-mobile-hdmi", + }, +}; + +static int __init sh_hdmi_init(void) +{ + return platform_driver_probe(&sh_hdmi_driver, sh_hdmi_probe); +} +module_init(sh_hdmi_init); + +static void __exit sh_hdmi_exit(void) +{ + platform_driver_unregister(&sh_hdmi_driver); +} +module_exit(sh_hdmi_exit); + +MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); +MODULE_DESCRIPTION("SuperH / ARM-shmobile HDMI driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index 12c451a711e..d72075a9f01 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -56,6 +56,7 @@ static int lcdc_shared_regs[] = { /* per-channel registers */ enum { LDDCKPAT1R, LDDCKPAT2R, LDMT1R, LDMT2R, LDMT3R, LDDFR, LDSM1R, LDSM2R, LDSA1R, LDMLSR, LDHCNR, LDHSYNR, LDVLNR, LDVSYNR, LDPMR, + LDHAJR, NR_CH_REGS }; static unsigned long lcdc_offs_mainlcd[NR_CH_REGS] = { @@ -74,6 +75,7 @@ static unsigned long lcdc_offs_mainlcd[NR_CH_REGS] = { [LDVLNR] = 0x450, [LDVSYNR] = 0x454, [LDPMR] = 0x460, + [LDHAJR] = 0x4a0, }; static unsigned long lcdc_offs_sublcd[NR_CH_REGS] = { @@ -137,6 +139,7 @@ struct sh_mobile_lcdc_priv { struct clk *dot_clk; unsigned long lddckr; struct sh_mobile_lcdc_chan ch[2]; + struct notifier_block notifier; unsigned long saved_shared_regs[NR_SHARED_REGS]; int started; }; @@ -404,6 +407,56 @@ static void sh_mobile_lcdc_start_stop(struct sh_mobile_lcdc_priv *priv, lcdc_write(priv, _LDDCKSTPR, 1); /* stop dotclock */ } +static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) +{ + struct fb_var_screeninfo *var = &ch->info->var; + unsigned long h_total, hsync_pos; + u32 tmp; + + tmp = ch->ldmt1r_value; + tmp |= (var->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28; + tmp |= (var->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27; + tmp |= (ch->cfg.flags & LCDC_FLAGS_DWPOL) ? 1 << 26 : 0; + tmp |= (ch->cfg.flags & LCDC_FLAGS_DIPOL) ? 1 << 25 : 0; + tmp |= (ch->cfg.flags & LCDC_FLAGS_DAPOL) ? 1 << 24 : 0; + tmp |= (ch->cfg.flags & LCDC_FLAGS_HSCNT) ? 1 << 17 : 0; + tmp |= (ch->cfg.flags & LCDC_FLAGS_DWCNT) ? 1 << 16 : 0; + lcdc_write_chan(ch, LDMT1R, tmp); + + /* setup SYS bus */ + lcdc_write_chan(ch, LDMT2R, ch->cfg.sys_bus_cfg.ldmt2r); + lcdc_write_chan(ch, LDMT3R, ch->cfg.sys_bus_cfg.ldmt3r); + + /* horizontal configuration */ + h_total = var->xres + var->hsync_len + + var->left_margin + var->right_margin; + tmp = h_total / 8; /* HTCN */ + tmp |= (var->xres / 8) << 16; /* HDCN */ + lcdc_write_chan(ch, LDHCNR, tmp); + + hsync_pos = var->xres + var->right_margin; + tmp = hsync_pos / 8; /* HSYNP */ + tmp |= (var->hsync_len / 8) << 16; /* HSYNW */ + lcdc_write_chan(ch, LDHSYNR, tmp); + + /* vertical configuration */ + tmp = var->yres + var->vsync_len + + var->upper_margin + var->lower_margin; /* VTLN */ + tmp |= var->yres << 16; /* VDLN */ + lcdc_write_chan(ch, LDVLNR, tmp); + + tmp = var->yres + var->lower_margin; /* VSYNP */ + tmp |= var->vsync_len << 16; /* VSYNW */ + lcdc_write_chan(ch, LDVSYNR, tmp); + + /* Adjust horizontal synchronisation for HDMI */ + tmp = ((var->xres & 7) << 24) | + ((h_total & 7) << 16) | + ((var->hsync_len & 7) << 8) | + hsync_pos; + lcdc_write_chan(ch, LDHAJR, tmp); +} + static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) { struct sh_mobile_lcdc_chan *ch; @@ -470,49 +523,11 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) if (!ch->enabled) continue; - tmp = ch->ldmt1r_value; - tmp |= (lcd_cfg->sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 1 << 28; - tmp |= (lcd_cfg->sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 1 << 27; - tmp |= (ch->cfg.flags & LCDC_FLAGS_DWPOL) ? 1 << 26 : 0; - tmp |= (ch->cfg.flags & LCDC_FLAGS_DIPOL) ? 1 << 25 : 0; - tmp |= (ch->cfg.flags & LCDC_FLAGS_DAPOL) ? 1 << 24 : 0; - tmp |= (ch->cfg.flags & LCDC_FLAGS_HSCNT) ? 1 << 17 : 0; - tmp |= (ch->cfg.flags & LCDC_FLAGS_DWCNT) ? 1 << 16 : 0; - lcdc_write_chan(ch, LDMT1R, tmp); - - /* setup SYS bus */ - lcdc_write_chan(ch, LDMT2R, ch->cfg.sys_bus_cfg.ldmt2r); - lcdc_write_chan(ch, LDMT3R, ch->cfg.sys_bus_cfg.ldmt3r); - - /* horizontal configuration */ - tmp = lcd_cfg->xres + lcd_cfg->hsync_len; - tmp += lcd_cfg->left_margin; - tmp += lcd_cfg->right_margin; - tmp /= 8; /* HTCN */ - tmp |= (lcd_cfg->xres / 8) << 16; /* HDCN */ - lcdc_write_chan(ch, LDHCNR, tmp); - - tmp = lcd_cfg->xres; - tmp += lcd_cfg->right_margin; - tmp /= 8; /* HSYNP */ - tmp |= (lcd_cfg->hsync_len / 8) << 16; /* HSYNW */ - lcdc_write_chan(ch, LDHSYNR, tmp); + sh_mobile_lcdc_geometry(ch); /* power supply */ lcdc_write_chan(ch, LDPMR, 0); - /* vertical configuration */ - tmp = lcd_cfg->yres + lcd_cfg->vsync_len; - tmp += lcd_cfg->upper_margin; - tmp += lcd_cfg->lower_margin; /* VTLN */ - tmp |= lcd_cfg->yres << 16; /* VDLN */ - lcdc_write_chan(ch, LDVLNR, tmp); - - tmp = lcd_cfg->yres; - tmp += lcd_cfg->lower_margin; /* VSYNP */ - tmp |= lcd_cfg->vsync_len << 16; /* VSYNW */ - lcdc_write_chan(ch, LDVSYNR, tmp); - board_cfg = &ch->cfg.board_cfg; if (board_cfg->setup_sys) ret = board_cfg->setup_sys(board_cfg->board_data, ch, @@ -577,7 +592,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) board_cfg = &ch->cfg.board_cfg; if (board_cfg->display_on) - board_cfg->display_on(board_cfg->board_data); + board_cfg->display_on(board_cfg->board_data, ch->info); } return 0; @@ -943,6 +958,62 @@ static const struct dev_pm_ops sh_mobile_lcdc_dev_pm_ops = { .runtime_resume = sh_mobile_lcdc_runtime_resume, }; +static int sh_mobile_lcdc_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct fb_event *event = data; + struct fb_info *info = event->info; + struct sh_mobile_lcdc_chan *ch = info->par; + struct sh_mobile_lcdc_board_cfg *board_cfg = &ch->cfg.board_cfg; + struct fb_var_screeninfo *var; + + if (&ch->lcdc->notifier != nb) + return 0; + + dev_dbg(info->dev, "%s(): action = %lu, data = %p\n", + __func__, action, event->data); + + switch(action) { + case FB_EVENT_SUSPEND: + if (board_cfg->display_off) + board_cfg->display_off(board_cfg->board_data); + pm_runtime_put(info->device); + break; + case FB_EVENT_RESUME: + var = &info->var; + + /* HDMI must be enabled before LCDC configuration */ + if (board_cfg->display_on) + board_cfg->display_on(board_cfg->board_data, ch->info); + + /* Check if the new display is not in our modelist */ + if (ch->info->modelist.next && + !fb_match_mode(var, &ch->info->modelist)) { + struct fb_videomode mode; + int ret; + + /* Can we handle this display? */ + if (var->xres > ch->cfg.lcd_cfg.xres || + var->yres > ch->cfg.lcd_cfg.yres) + return -ENOMEM; + + /* Add to the modelist */ + fb_var_to_videomode(&mode, var); + ret = fb_add_videomode(&mode, &ch->info->modelist); + if (ret < 0) + return ret; + } + + pm_runtime_get_sync(info->device); + + sh_mobile_lcdc_geometry(ch); + + break; + } + + return 0; +} + static int sh_mobile_lcdc_remove(struct platform_device *pdev); static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) @@ -1020,15 +1091,19 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + priv->base = ioremap_nocache(res->start, resource_size(res)); + if (!priv->base) + goto err1; + error = sh_mobile_lcdc_setup_clocks(pdev, pdata->clock_source, priv); if (error) { dev_err(&pdev->dev, "unable to setup clocks\n"); goto err1; } - priv->base = ioremap_nocache(res->start, (res->end - res->start) + 1); - for (i = 0; i < j; i++) { + struct fb_var_screeninfo *var; + struct fb_videomode *lcd_cfg; cfg = &priv->ch[i].cfg; priv->ch[i].info = framebuffer_alloc(0, &pdev->dev); @@ -1039,22 +1114,33 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) } info = priv->ch[i].info; + var = &info->var; + lcd_cfg = &cfg->lcd_cfg; info->fbops = &sh_mobile_lcdc_ops; - info->var.xres = info->var.xres_virtual = cfg->lcd_cfg.xres; - info->var.yres = cfg->lcd_cfg.yres; + var->xres = var->xres_virtual = lcd_cfg->xres; + var->yres = lcd_cfg->yres; /* Default Y virtual resolution is 2x panel size */ - info->var.yres_virtual = info->var.yres * 2; - info->var.width = cfg->lcd_size_cfg.width; - info->var.height = cfg->lcd_size_cfg.height; - info->var.activate = FB_ACTIVATE_NOW; - error = sh_mobile_lcdc_set_bpp(&info->var, cfg->bpp); + var->yres_virtual = var->yres * 2; + var->width = cfg->lcd_size_cfg.width; + var->height = cfg->lcd_size_cfg.height; + var->activate = FB_ACTIVATE_NOW; + var->left_margin = lcd_cfg->left_margin; + var->right_margin = lcd_cfg->right_margin; + var->upper_margin = lcd_cfg->upper_margin; + var->lower_margin = lcd_cfg->lower_margin; + var->hsync_len = lcd_cfg->hsync_len; + var->vsync_len = lcd_cfg->vsync_len; + var->sync = lcd_cfg->sync; + var->pixclock = lcd_cfg->pixclock; + + error = sh_mobile_lcdc_set_bpp(var, cfg->bpp); if (error) break; info->fix = sh_mobile_lcdc_fix; - info->fix.line_length = cfg->lcd_cfg.xres * (cfg->bpp / 8); + info->fix.line_length = lcd_cfg->xres * (cfg->bpp / 8); info->fix.smem_len = info->fix.line_length * - info->var.yres_virtual; + var->yres_virtual; buf = dma_alloc_coherent(&pdev->dev, info->fix.smem_len, &priv->ch[i].dma_handle, GFP_KERNEL); @@ -1119,10 +1205,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) ch->cfg.bpp); /* deferred io mode: disable clock to save power */ - if (info->fbdefio) + if (info->fbdefio || info->state == FBINFO_STATE_SUSPENDED) sh_mobile_lcdc_clk_off(priv); } + /* Failure ignored */ + priv->notifier.notifier_call = sh_mobile_lcdc_notify; + fb_register_client(&priv->notifier); + return 0; err1: sh_mobile_lcdc_remove(pdev); @@ -1136,6 +1226,8 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) struct fb_info *info; int i; + fb_unregister_client(&priv->notifier); + for (i = 0; i < ARRAY_SIZE(priv->ch); i++) if (priv->ch[i].info && priv->ch[i].info->dev) unregister_framebuffer(priv->ch[i].info); diff --git a/drivers/video/sunxvr1000.c b/drivers/video/sunxvr1000.c index 489b44e8db8..7288934c0d4 100644 --- a/drivers/video/sunxvr1000.c +++ b/drivers/video/sunxvr1000.c @@ -213,12 +213,12 @@ static int __init gfb_init(void) if (fb_get_options("gfb", NULL)) return -ENODEV; - return of_register_driver(&gfb_driver, &of_bus_type); + return of_register_platform_driver(&gfb_driver); } static void __exit gfb_exit(void) { - of_unregister_driver(&gfb_driver); + of_unregister_platform_driver(&gfb_driver); } module_init(gfb_init); diff --git a/drivers/video/tcx.c b/drivers/video/tcx.c index cc039b33d2d..f375e0db677 100644 --- a/drivers/video/tcx.c +++ b/drivers/video/tcx.c @@ -526,12 +526,12 @@ static int __init tcx_init(void) if (fb_get_options("tcxfb", NULL)) return -ENODEV; - return of_register_driver(&tcx_driver, &of_bus_type); + return of_register_platform_driver(&tcx_driver); } static void __exit tcx_exit(void) { - of_unregister_driver(&tcx_driver); + of_unregister_platform_driver(&tcx_driver); } module_init(tcx_init); diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c index 98054839004..3ee5e63cfa4 100644 --- a/drivers/video/tdfxfb.c +++ b/drivers/video/tdfxfb.c @@ -1571,8 +1571,8 @@ out_err_iobase: if (default_par->mtrr_handle >= 0) mtrr_del(default_par->mtrr_handle, info->fix.smem_start, info->fix.smem_len); - release_mem_region(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); + release_region(pci_resource_start(pdev, 2), + pci_resource_len(pdev, 2)); out_err_screenbase: if (info->screen_base) iounmap(info->screen_base); diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c index fa97d3e7c21..7c7f42a1279 100644 --- a/drivers/video/xen-fbfront.c +++ b/drivers/video/xen-fbfront.c @@ -684,7 +684,7 @@ static struct xenbus_driver xenfb_driver = { static int __init xenfb_init(void) { - if (!xen_domain()) + if (!xen_pv_domain()) return -ENODEV; /* Nothing to do if running in dom0. */ diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c index 574dc54e12d..29b5daacc21 100644 --- a/drivers/video/xilinxfb.c +++ b/drivers/video/xilinxfb.c @@ -485,6 +485,8 @@ static int __devexit xilinxfb_of_remove(struct of_device *op) /* Match table for of_platform binding */ static struct of_device_id xilinxfb_of_match[] __devinitdata = { { .compatible = "xlnx,xps-tft-1.00.a", }, + { .compatible = "xlnx,xps-tft-2.00.a", }, + { .compatible = "xlnx,xps-tft-2.01.a", }, { .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", }, { .compatible = "xlnx,plb-dvi-cntlr-ref-1.00.c", }, {}, diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index afcfacc9bbe..b04b1846893 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -875,6 +875,24 @@ config TXX9_WDT help Hardware driver for the built-in watchdog timer on TXx9 MIPS SoCs. +config OCTEON_WDT + tristate "Cavium OCTEON SOC family Watchdog Timer" + depends on CPU_CAVIUM_OCTEON + default y + select EXPORT_UASM if OCTEON_WDT = m + help + Hardware driver for OCTEON's on chip watchdog timer. + Enables the watchdog for all cores running Linux. It + installs a NMI handler and pokes the watchdog based on an + interrupt. On first expiration of the watchdog, the + interrupt handler pokes it. The second expiration causes an + NMI that prints a message. The third expiration causes a + global soft reset. + + When userspace has /dev/watchdog open, no poking is done + from the first interrupt, it is then only poked when the + device is written. + # PARISC Architecture # POWERPC Architecture diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 72f3e2073f8..e30289a5e36 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -114,6 +114,8 @@ obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o obj-$(CONFIG_AR7_WDT) += ar7_wdt.o obj-$(CONFIG_TXX9_WDT) += txx9wdt.o +obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o +octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o # PARISC Architecture diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index d62b9ce8f77..30a2512fd52 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -545,7 +545,7 @@ static int __devinit cpwd_probe(struct of_device *op, goto out; } - p->irq = op->irqs[0]; + p->irq = op->archdata.irqs[0]; spin_lock_init(&p->lock); @@ -688,12 +688,12 @@ static struct of_platform_driver cpwd_driver = { static int __init cpwd_init(void) { - return of_register_driver(&cpwd_driver, &of_bus_type); + return of_register_platform_driver(&cpwd_driver); } static void __exit cpwd_exit(void) { - of_unregister_driver(&cpwd_driver); + of_unregister_platform_driver(&cpwd_driver); } module_init(cpwd_init); diff --git a/drivers/watchdog/octeon-wdt-main.c b/drivers/watchdog/octeon-wdt-main.c new file mode 100644 index 00000000000..2a410170eca --- /dev/null +++ b/drivers/watchdog/octeon-wdt-main.c @@ -0,0 +1,745 @@ +/* + * Octeon Watchdog driver + * + * Copyright (C) 2007, 2008, 2009, 2010 Cavium Networks + * + * Some parts derived from wdt.c + * + * (c) Copyright 1996-1997 Alan Cox <alan@lxorguk.ukuu.org.uk>, + * All Rights Reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide + * warranty for any of this software. This material is provided + * "AS-IS" and at no charge. + * + * (c) Copyright 1995 Alan Cox <alan@lxorguk.ukuu.org.uk> + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * + * The OCTEON watchdog has a maximum timeout of 2^32 * io_clock. + * For most systems this is less than 10 seconds, so to allow for + * software to request longer watchdog heartbeats, we maintain software + * counters to count multiples of the base rate. If the system locks + * up in such a manner that we can not run the software counters, the + * only result is a watchdog reset sooner than was requested. But + * that is OK, because in this case userspace would likely not be able + * to do anything anyhow. + * + * The hardware watchdog interval we call the period. The OCTEON + * watchdog goes through several stages, after the first period an + * irq is asserted, then if it is not reset, after the next period NMI + * is asserted, then after an additional period a chip wide soft reset. + * So for the software counters, we reset watchdog after each period + * and decrement the counter. But for the last two periods we need to + * let the watchdog progress to the NMI stage so we disable the irq + * and let it proceed. Once in the NMI, we print the register state + * to the serial port and then wait for the reset. + * + * A watchdog is maintained for each CPU in the system, that way if + * one CPU suffers a lockup, we also get a register dump and reset. + * The userspace ping resets the watchdog on all CPUs. + * + * Before userspace opens the watchdog device, we still run the + * watchdogs to catch any lockups that may be kernel related. + * + */ + +#include <linux/miscdevice.h> +#include <linux/interrupt.h> +#include <linux/watchdog.h> +#include <linux/cpumask.h> +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/delay.h> +#include <linux/cpu.h> +#include <linux/smp.h> +#include <linux/fs.h> + +#include <asm/mipsregs.h> +#include <asm/uasm.h> + +#include <asm/octeon/octeon.h> + +/* The count needed to achieve timeout_sec. */ +static unsigned int timeout_cnt; + +/* The maximum period supported. */ +static unsigned int max_timeout_sec; + +/* The current period. */ +static unsigned int timeout_sec; + +/* Set to non-zero when userspace countdown mode active */ +static int do_coundown; +static unsigned int countdown_reset; +static unsigned int per_cpu_countdown[NR_CPUS]; + +static cpumask_t irq_enabled_cpus; + +#define WD_TIMO 60 /* Default heartbeat = 60 seconds */ + +static int heartbeat = WD_TIMO; +module_param(heartbeat, int, S_IRUGO); +MODULE_PARM_DESC(heartbeat, + "Watchdog heartbeat in seconds. (0 < heartbeat, default=" + __MODULE_STRING(WD_TIMO) ")"); + +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, S_IRUGO); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +static unsigned long octeon_wdt_is_open; +static char expect_close; + +static u32 __initdata nmi_stage1_insns[64]; +/* We need one branch and therefore one relocation per target label. */ +static struct uasm_label __initdata labels[5]; +static struct uasm_reloc __initdata relocs[5]; + +enum lable_id { + label_enter_bootloader = 1 +}; + +/* Some CP0 registers */ +#define K0 26 +#define C0_CVMMEMCTL 11, 7 +#define C0_STATUS 12, 0 +#define C0_EBASE 15, 1 +#define C0_DESAVE 31, 0 + +void octeon_wdt_nmi_stage2(void); + +static void __init octeon_wdt_build_stage1(void) +{ + int i; + int len; + u32 *p = nmi_stage1_insns; +#ifdef CONFIG_HOTPLUG_CPU + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; +#endif + + /* + * For the next few instructions running the debugger may + * cause corruption of k0 in the saved registers. Since we're + * about to crash, nobody probably cares. + * + * Save K0 into the debug scratch register + */ + uasm_i_dmtc0(&p, K0, C0_DESAVE); + + uasm_i_mfc0(&p, K0, C0_STATUS); +#ifdef CONFIG_HOTPLUG_CPU + uasm_il_bbit0(&p, &r, K0, ilog2(ST0_NMI), label_enter_bootloader); +#endif + /* Force 64-bit addressing enabled */ + uasm_i_ori(&p, K0, K0, ST0_UX | ST0_SX | ST0_KX); + uasm_i_mtc0(&p, K0, C0_STATUS); + +#ifdef CONFIG_HOTPLUG_CPU + uasm_i_mfc0(&p, K0, C0_EBASE); + /* Coreid number in K0 */ + uasm_i_andi(&p, K0, K0, 0xf); + /* 8 * coreid in bits 16-31 */ + uasm_i_dsll_safe(&p, K0, K0, 3 + 16); + uasm_i_ori(&p, K0, K0, 0x8001); + uasm_i_dsll_safe(&p, K0, K0, 16); + uasm_i_ori(&p, K0, K0, 0x0700); + uasm_i_drotr_safe(&p, K0, K0, 32); + /* + * Should result in: 0x8001,0700,0000,8*coreid which is + * CVMX_CIU_WDOGX(coreid) - 0x0500 + * + * Now ld K0, CVMX_CIU_WDOGX(coreid) + */ + uasm_i_ld(&p, K0, 0x500, K0); + /* + * If bit one set handle the NMI as a watchdog event. + * otherwise transfer control to bootloader. + */ + uasm_il_bbit0(&p, &r, K0, 1, label_enter_bootloader); + uasm_i_nop(&p); +#endif + + /* Clear Dcache so cvmseg works right. */ + uasm_i_cache(&p, 1, 0, 0); + + /* Use K0 to do a read/modify/write of CVMMEMCTL */ + uasm_i_dmfc0(&p, K0, C0_CVMMEMCTL); + /* Clear out the size of CVMSEG */ + uasm_i_dins(&p, K0, 0, 0, 6); + /* Set CVMSEG to its largest value */ + uasm_i_ori(&p, K0, K0, 0x1c0 | 54); + /* Store the CVMMEMCTL value */ + uasm_i_dmtc0(&p, K0, C0_CVMMEMCTL); + + /* Load the address of the second stage handler */ + UASM_i_LA(&p, K0, (long)octeon_wdt_nmi_stage2); + uasm_i_jr(&p, K0); + uasm_i_dmfc0(&p, K0, C0_DESAVE); + +#ifdef CONFIG_HOTPLUG_CPU + uasm_build_label(&l, p, label_enter_bootloader); + /* Jump to the bootloader and restore K0 */ + UASM_i_LA(&p, K0, (long)octeon_bootloader_entry_addr); + uasm_i_jr(&p, K0); + uasm_i_dmfc0(&p, K0, C0_DESAVE); +#endif + uasm_resolve_relocs(relocs, labels); + + len = (int)(p - nmi_stage1_insns); + pr_debug("Synthesized NMI stage 1 handler (%d instructions).\n", len); + + pr_debug("\t.set push\n"); + pr_debug("\t.set noreorder\n"); + for (i = 0; i < len; i++) + pr_debug("\t.word 0x%08x\n", nmi_stage1_insns[i]); + pr_debug("\t.set pop\n"); + + if (len > 32) + panic("NMI stage 1 handler exceeds 32 instructions, was %d\n", len); +} + +static int cpu2core(int cpu) +{ +#ifdef CONFIG_SMP + return cpu_logical_map(cpu); +#else + return cvmx_get_core_num(); +#endif +} + +static int core2cpu(int coreid) +{ +#ifdef CONFIG_SMP + return cpu_number_map(coreid); +#else + return 0; +#endif +} + +/** + * Poke the watchdog when an interrupt is received + * + * @cpl: + * @dev_id: + * + * Returns + */ +static irqreturn_t octeon_wdt_poke_irq(int cpl, void *dev_id) +{ + unsigned int core = cvmx_get_core_num(); + int cpu = core2cpu(core); + + if (do_coundown) { + if (per_cpu_countdown[cpu] > 0) { + /* We're alive, poke the watchdog */ + cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + per_cpu_countdown[cpu]--; + } else { + /* Bad news, you are about to reboot. */ + disable_irq_nosync(cpl); + cpumask_clear_cpu(cpu, &irq_enabled_cpus); + } + } else { + /* Not open, just ping away... */ + cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + } + return IRQ_HANDLED; +} + +/* From setup.c */ +extern int prom_putchar(char c); + +/** + * Write a string to the uart + * + * @str: String to write + */ +static void octeon_wdt_write_string(const char *str) +{ + /* Just loop writing one byte at a time */ + while (*str) + prom_putchar(*str++); +} + +/** + * Write a hex number out of the uart + * + * @value: Number to display + * @digits: Number of digits to print (1 to 16) + */ +static void octeon_wdt_write_hex(u64 value, int digits) +{ + int d; + int v; + for (d = 0; d < digits; d++) { + v = (value >> ((digits - d - 1) * 4)) & 0xf; + if (v >= 10) + prom_putchar('a' + v - 10); + else + prom_putchar('0' + v); + } +} + +const char *reg_name[] = { + "$0", "at", "v0", "v1", "a0", "a1", "a2", "a3", + "a4", "a5", "a6", "a7", "t0", "t1", "t2", "t3", + "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", + "t8", "t9", "k0", "k1", "gp", "sp", "s8", "ra" +}; + +/** + * NMI stage 3 handler. NMIs are handled in the following manner: + * 1) The first NMI handler enables CVMSEG and transfers from + * the bootbus region into normal memory. It is careful to not + * destroy any registers. + * 2) The second stage handler uses CVMSEG to save the registers + * and create a stack for C code. It then calls the third level + * handler with one argument, a pointer to the register values. + * 3) The third, and final, level handler is the following C + * function that prints out some useful infomration. + * + * @reg: Pointer to register state before the NMI + */ +void octeon_wdt_nmi_stage3(u64 reg[32]) +{ + u64 i; + + unsigned int coreid = cvmx_get_core_num(); + /* + * Save status and cause early to get them before any changes + * might happen. + */ + u64 cp0_cause = read_c0_cause(); + u64 cp0_status = read_c0_status(); + u64 cp0_error_epc = read_c0_errorepc(); + u64 cp0_epc = read_c0_epc(); + + /* Delay so output from all cores output is not jumbled together. */ + __delay(100000000ull * coreid); + + octeon_wdt_write_string("\r\n*** NMI Watchdog interrupt on Core 0x"); + octeon_wdt_write_hex(coreid, 1); + octeon_wdt_write_string(" ***\r\n"); + for (i = 0; i < 32; i++) { + octeon_wdt_write_string("\t"); + octeon_wdt_write_string(reg_name[i]); + octeon_wdt_write_string("\t0x"); + octeon_wdt_write_hex(reg[i], 16); + if (i & 1) + octeon_wdt_write_string("\r\n"); + } + octeon_wdt_write_string("\terr_epc\t0x"); + octeon_wdt_write_hex(cp0_error_epc, 16); + + octeon_wdt_write_string("\tepc\t0x"); + octeon_wdt_write_hex(cp0_epc, 16); + octeon_wdt_write_string("\r\n"); + + octeon_wdt_write_string("\tstatus\t0x"); + octeon_wdt_write_hex(cp0_status, 16); + octeon_wdt_write_string("\tcause\t0x"); + octeon_wdt_write_hex(cp0_cause, 16); + octeon_wdt_write_string("\r\n"); + + octeon_wdt_write_string("\tsum0\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_SUM0(coreid * 2)), 16); + octeon_wdt_write_string("\ten0\t0x"); + octeon_wdt_write_hex(cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)), 16); + octeon_wdt_write_string("\r\n"); + + octeon_wdt_write_string("*** Chip soft reset soon ***\r\n"); +} + +static void octeon_wdt_disable_interrupt(int cpu) +{ + unsigned int core; + unsigned int irq; + union cvmx_ciu_wdogx ciu_wdog; + + core = cpu2core(cpu); + + irq = OCTEON_IRQ_WDOG0 + core; + + /* Poke the watchdog to clear out its state */ + cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + + /* Disable the hardware. */ + ciu_wdog.u64 = 0; + cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64); + + free_irq(irq, octeon_wdt_poke_irq); +} + +static void octeon_wdt_setup_interrupt(int cpu) +{ + unsigned int core; + unsigned int irq; + union cvmx_ciu_wdogx ciu_wdog; + + core = cpu2core(cpu); + + /* Disable it before doing anything with the interrupts. */ + ciu_wdog.u64 = 0; + cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64); + + per_cpu_countdown[cpu] = countdown_reset; + + irq = OCTEON_IRQ_WDOG0 + core; + + if (request_irq(irq, octeon_wdt_poke_irq, + IRQF_DISABLED, "octeon_wdt", octeon_wdt_poke_irq)) + panic("octeon_wdt: Couldn't obtain irq %d", irq); + + cpumask_set_cpu(cpu, &irq_enabled_cpus); + + /* Poke the watchdog to clear out its state */ + cvmx_write_csr(CVMX_CIU_PP_POKEX(core), 1); + + /* Finally enable the watchdog now that all handlers are installed */ + ciu_wdog.u64 = 0; + ciu_wdog.s.len = timeout_cnt; + ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */ + cvmx_write_csr(CVMX_CIU_WDOGX(core), ciu_wdog.u64); +} + +static int octeon_wdt_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned long)hcpu; + + switch (action) { + case CPU_DOWN_PREPARE: + octeon_wdt_disable_interrupt(cpu); + break; + case CPU_ONLINE: + case CPU_DOWN_FAILED: + octeon_wdt_setup_interrupt(cpu); + break; + default: + break; + } + return NOTIFY_OK; +} + +static void octeon_wdt_ping(void) +{ + int cpu; + int coreid; + + for_each_online_cpu(cpu) { + coreid = cpu2core(cpu); + cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1); + per_cpu_countdown[cpu] = countdown_reset; + if ((countdown_reset || !do_coundown) && + !cpumask_test_cpu(cpu, &irq_enabled_cpus)) { + /* We have to enable the irq */ + int irq = OCTEON_IRQ_WDOG0 + coreid; + enable_irq(irq); + cpumask_set_cpu(cpu, &irq_enabled_cpus); + } + } +} + +static void octeon_wdt_calc_parameters(int t) +{ + unsigned int periods; + + timeout_sec = max_timeout_sec; + + + /* + * Find the largest interrupt period, that can evenly divide + * the requested heartbeat time. + */ + while ((t % timeout_sec) != 0) + timeout_sec--; + + periods = t / timeout_sec; + + /* + * The last two periods are after the irq is disabled, and + * then to the nmi, so we subtract them off. + */ + + countdown_reset = periods > 2 ? periods - 2 : 0; + heartbeat = t; + timeout_cnt = ((octeon_get_clock_rate() >> 8) * timeout_sec) >> 8; +} + +static int octeon_wdt_set_heartbeat(int t) +{ + int cpu; + int coreid; + union cvmx_ciu_wdogx ciu_wdog; + + if (t <= 0) + return -1; + + octeon_wdt_calc_parameters(t); + + for_each_online_cpu(cpu) { + coreid = cpu2core(cpu); + cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1); + ciu_wdog.u64 = 0; + ciu_wdog.s.len = timeout_cnt; + ciu_wdog.s.mode = 3; /* 3 = Interrupt + NMI + Soft-Reset */ + cvmx_write_csr(CVMX_CIU_WDOGX(coreid), ciu_wdog.u64); + cvmx_write_csr(CVMX_CIU_PP_POKEX(coreid), 1); + } + octeon_wdt_ping(); /* Get the irqs back on. */ + return 0; +} + +/** + * octeon_wdt_write: + * @file: file handle to the watchdog + * @buf: buffer to write (unused as data does not matter here + * @count: count of bytes + * @ppos: pointer to the position to write. No seeks allowed + * + * A write to a watchdog device is defined as a keepalive signal. Any + * write of data will do, as we we don't define content meaning. + */ + +static ssize_t octeon_wdt_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + if (count) { + if (!nowayout) { + size_t i; + + /* In case it was set long ago */ + expect_close = 0; + + for (i = 0; i != count; i++) { + char c; + if (get_user(c, buf + i)) + return -EFAULT; + if (c == 'V') + expect_close = 1; + } + } + octeon_wdt_ping(); + } + return count; +} + +/** + * octeon_wdt_ioctl: + * @file: file handle to the device + * @cmd: watchdog command + * @arg: argument pointer + * + * The watchdog API defines a common set of functions for all + * watchdogs according to their available features. We only + * actually usefully support querying capabilities and setting + * the timeout. + */ + +static long octeon_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int __user *p = argp; + int new_heartbeat; + + static struct watchdog_info ident = { + .options = WDIOF_SETTIMEOUT| + WDIOF_MAGICCLOSE| + WDIOF_KEEPALIVEPING, + .firmware_version = 1, + .identity = "OCTEON", + }; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + case WDIOC_KEEPALIVE: + octeon_wdt_ping(); + return 0; + case WDIOC_SETTIMEOUT: + if (get_user(new_heartbeat, p)) + return -EFAULT; + if (octeon_wdt_set_heartbeat(new_heartbeat)) + return -EINVAL; + /* Fall through. */ + case WDIOC_GETTIMEOUT: + return put_user(heartbeat, p); + default: + return -ENOTTY; + } +} + +/** + * octeon_wdt_open: + * @inode: inode of device + * @file: file handle to device + * + * The watchdog device has been opened. The watchdog device is single + * open and on opening we do a ping to reset the counters. + */ + +static int octeon_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(0, &octeon_wdt_is_open)) + return -EBUSY; + /* + * Activate + */ + octeon_wdt_ping(); + do_coundown = 1; + return nonseekable_open(inode, file); +} + +/** + * octeon_wdt_release: + * @inode: inode to board + * @file: file handle to board + * + * The watchdog has a configurable API. There is a religious dispute + * between people who want their watchdog to be able to shut down and + * those who want to be sure if the watchdog manager dies the machine + * reboots. In the former case we disable the counters, in the latter + * case you have to open it again very soon. + */ + +static int octeon_wdt_release(struct inode *inode, struct file *file) +{ + if (expect_close) { + do_coundown = 0; + octeon_wdt_ping(); + } else { + pr_crit("octeon_wdt: WDT device closed unexpectedly. WDT will not stop!\n"); + } + clear_bit(0, &octeon_wdt_is_open); + expect_close = 0; + return 0; +} + +static const struct file_operations octeon_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = octeon_wdt_write, + .unlocked_ioctl = octeon_wdt_ioctl, + .open = octeon_wdt_open, + .release = octeon_wdt_release, +}; + +static struct miscdevice octeon_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &octeon_wdt_fops, +}; + +static struct notifier_block octeon_wdt_cpu_notifier = { + .notifier_call = octeon_wdt_cpu_callback, +}; + + +/** + * Module/ driver initialization. + * + * Returns Zero on success + */ +static int __init octeon_wdt_init(void) +{ + int i; + int ret; + int cpu; + u64 *ptr; + + /* + * Watchdog time expiration length = The 16 bits of LEN + * represent the most significant bits of a 24 bit decrementer + * that decrements every 256 cycles. + * + * Try for a timeout of 5 sec, if that fails a smaller number + * of even seconds, + */ + max_timeout_sec = 6; + do { + max_timeout_sec--; + timeout_cnt = ((octeon_get_clock_rate() >> 8) * max_timeout_sec) >> 8; + } while (timeout_cnt > 65535); + + BUG_ON(timeout_cnt == 0); + + octeon_wdt_calc_parameters(heartbeat); + + pr_info("octeon_wdt: Initial granularity %d Sec.\n", timeout_sec); + + ret = misc_register(&octeon_wdt_miscdev); + if (ret) { + pr_err("octeon_wdt: cannot register miscdev on minor=%d (err=%d)\n", + WATCHDOG_MINOR, ret); + goto out; + } + + /* Build the NMI handler ... */ + octeon_wdt_build_stage1(); + + /* ... and install it. */ + ptr = (u64 *) nmi_stage1_insns; + for (i = 0; i < 16; i++) { + cvmx_write_csr(CVMX_MIO_BOOT_LOC_ADR, i * 8); + cvmx_write_csr(CVMX_MIO_BOOT_LOC_DAT, ptr[i]); + } + cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0x81fc0000); + + cpumask_clear(&irq_enabled_cpus); + + for_each_online_cpu(cpu) + octeon_wdt_setup_interrupt(cpu); + + register_hotcpu_notifier(&octeon_wdt_cpu_notifier); +out: + return ret; +} + +/** + * Module / driver shutdown + */ +static void __exit octeon_wdt_cleanup(void) +{ + int cpu; + + misc_deregister(&octeon_wdt_miscdev); + + unregister_hotcpu_notifier(&octeon_wdt_cpu_notifier); + + for_each_online_cpu(cpu) { + int core = cpu2core(cpu); + /* Disable the watchdog */ + cvmx_write_csr(CVMX_CIU_WDOGX(core), 0); + /* Free the interrupt handler */ + free_irq(OCTEON_IRQ_WDOG0 + core, octeon_wdt_poke_irq); + } + /* + * Disable the boot-bus memory, the code it points to is soon + * to go missing. + */ + cvmx_write_csr(CVMX_MIO_BOOT_LOC_CFGX(0), 0); +} + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); +MODULE_DESCRIPTION("Cavium Networks Octeon Watchdog driver."); +module_init(octeon_wdt_init); +module_exit(octeon_wdt_cleanup); diff --git a/drivers/watchdog/octeon-wdt-nmi.S b/drivers/watchdog/octeon-wdt-nmi.S new file mode 100644 index 00000000000..8a900a5e323 --- /dev/null +++ b/drivers/watchdog/octeon-wdt-nmi.S @@ -0,0 +1,64 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 Cavium Networks + */ +#include <asm/asm.h> +#include <asm/regdef.h> + +#define SAVE_REG(r) sd $r, -32768+6912-(32-r)*8($0) + + NESTED(octeon_wdt_nmi_stage2, 0, sp) + .set push + .set noreorder + .set noat + /* Save all registers to the top CVMSEG. This shouldn't + * corrupt any state used by the kernel. Also all registers + * should have the value right before the NMI. */ + SAVE_REG(0) + SAVE_REG(1) + SAVE_REG(2) + SAVE_REG(3) + SAVE_REG(4) + SAVE_REG(5) + SAVE_REG(6) + SAVE_REG(7) + SAVE_REG(8) + SAVE_REG(9) + SAVE_REG(10) + SAVE_REG(11) + SAVE_REG(12) + SAVE_REG(13) + SAVE_REG(14) + SAVE_REG(15) + SAVE_REG(16) + SAVE_REG(17) + SAVE_REG(18) + SAVE_REG(19) + SAVE_REG(20) + SAVE_REG(21) + SAVE_REG(22) + SAVE_REG(23) + SAVE_REG(24) + SAVE_REG(25) + SAVE_REG(26) + SAVE_REG(27) + SAVE_REG(28) + SAVE_REG(29) + SAVE_REG(30) + SAVE_REG(31) + /* Set the stack to begin right below the registers */ + li sp, -32768+6912-32*8 + /* Load the address of the third stage handler */ + dla a0, octeon_wdt_nmi_stage3 + /* Call the third stage handler */ + jal a0 + /* a0 is the address of the saved registers */ + move a0, sp + /* Loop forvever if we get here. */ +1: b 1b + nop + .set pop + END(octeon_wdt_nmi_stage2) diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c index 5dceeddc885..4082b4ace1f 100644 --- a/drivers/watchdog/riowd.c +++ b/drivers/watchdog/riowd.c @@ -250,12 +250,12 @@ static struct of_platform_driver riowd_driver = { static int __init riowd_init(void) { - return of_register_driver(&riowd_driver, &of_bus_type); + return of_register_platform_driver(&riowd_driver); } static void __exit riowd_exit(void) { - of_unregister_driver(&riowd_driver); + of_unregister_platform_driver(&riowd_driver); } module_init(riowd_init); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index fad3df2c127..0a882693663 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -62,4 +62,13 @@ config XEN_SYS_HYPERVISOR virtual environment, /sys/hypervisor will still be present, but will have no xen contents. +config XEN_PLATFORM_PCI + tristate "xen platform pci device driver" + depends on XEN_PVHVM + default m + help + Driver for the Xen PCI Platform device: it is responsible for + initializing xenbus and grant_table when running in a Xen HVM + domain. As a consequence this driver is required to run any Xen PV + frontend on Xen HVM. endmenu diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 7c284342f30..e392fb776af 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_XEN_XENCOMM) += xencomm.o obj-$(CONFIG_XEN_BALLOON) += balloon.o obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o obj-$(CONFIG_XENFS) += xenfs/ -obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
\ No newline at end of file +obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o +obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o diff --git a/drivers/xen/events.c b/drivers/xen/events.c index db8f506817f..5e1f34892dc 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -29,6 +29,7 @@ #include <linux/bootmem.h> #include <linux/slab.h> +#include <asm/desc.h> #include <asm/ptrace.h> #include <asm/irq.h> #include <asm/idle.h> @@ -36,10 +37,14 @@ #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> +#include <xen/xen.h> +#include <xen/hvm.h> #include <xen/xen-ops.h> #include <xen/events.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> +#include <xen/interface/hvm/hvm_op.h> +#include <xen/interface/hvm/params.h> /* * This lock protects updates to the following mapping and reference-count @@ -335,9 +340,18 @@ static int find_unbound_irq(void) int irq; struct irq_desc *desc; - for (irq = 0; irq < nr_irqs; irq++) + for (irq = 0; irq < nr_irqs; irq++) { + desc = irq_to_desc(irq); + /* only 0->15 have init'd desc; handle irq > 16 */ + if (desc == NULL) + break; + if (desc->chip == &no_irq_chip) + break; + if (desc->chip != &xen_dynamic_chip) + continue; if (irq_info[irq].type == IRQT_UNBOUND) break; + } if (irq == nr_irqs) panic("No available IRQ to bind to: increase nr_irqs!\n"); @@ -346,7 +360,7 @@ static int find_unbound_irq(void) if (WARN_ON(desc == NULL)) return -1; - dynamic_irq_init(irq); + dynamic_irq_init_keep_chip_data(irq); return irq; } @@ -617,17 +631,13 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count); * a bitset of words which contain pending event bits. The second * level is a bitset of pending events themselves. */ -void xen_evtchn_do_upcall(struct pt_regs *regs) +static void __xen_evtchn_do_upcall(void) { int cpu = get_cpu(); - struct pt_regs *old_regs = set_irq_regs(regs); struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); unsigned count; - exit_idle(); - irq_enter(); - do { unsigned long pending_words; @@ -664,14 +674,31 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) count = __get_cpu_var(xed_nesting_count); __get_cpu_var(xed_nesting_count) = 0; - } while(count != 1); + } while (count != 1 || vcpu_info->evtchn_upcall_pending); out: + + put_cpu(); +} + +void xen_evtchn_do_upcall(struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + exit_idle(); + irq_enter(); + + __xen_evtchn_do_upcall(); + irq_exit(); set_irq_regs(old_regs); +} - put_cpu(); +void xen_hvm_evtchn_do_upcall(void) +{ + __xen_evtchn_do_upcall(); } +EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall); /* Rebind a new event channel to an existing irq. */ void rebind_evtchn_irq(int evtchn, int irq) @@ -708,7 +735,10 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) struct evtchn_bind_vcpu bind_vcpu; int evtchn = evtchn_from_irq(irq); - if (!VALID_EVTCHN(evtchn)) + /* events delivered via platform PCI interrupts are always + * routed to vcpu 0 */ + if (!VALID_EVTCHN(evtchn) || + (xen_hvm_domain() && !xen_have_vector_callback)) return -1; /* Send future instances of this interrupt to other vcpu. */ @@ -933,6 +963,44 @@ static struct irq_chip xen_dynamic_chip __read_mostly = { .retrigger = retrigger_dynirq, }; +int xen_set_callback_via(uint64_t via) +{ + struct xen_hvm_param a; + a.domid = DOMID_SELF; + a.index = HVM_PARAM_CALLBACK_IRQ; + a.value = via; + return HYPERVISOR_hvm_op(HVMOP_set_param, &a); +} +EXPORT_SYMBOL_GPL(xen_set_callback_via); + +#ifdef CONFIG_XEN_PVHVM +/* Vector callbacks are better than PCI interrupts to receive event + * channel notifications because we can receive vector callbacks on any + * vcpu and we don't need PCI support or APIC interactions. */ +void xen_callback_vector(void) +{ + int rc; + uint64_t callback_via; + if (xen_have_vector_callback) { + callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK); + rc = xen_set_callback_via(callback_via); + if (rc) { + printk(KERN_ERR "Request for Xen HVM callback vector" + " failed.\n"); + xen_have_vector_callback = 0; + return; + } + printk(KERN_INFO "Xen HVM callback vector for event delivery is " + "enabled\n"); + /* in the restore case the vector has already been allocated */ + if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors)) + alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector); + } +} +#else +void xen_callback_vector(void) {} +#endif + void __init xen_init_IRQ(void) { int i; @@ -947,5 +1015,10 @@ void __init xen_init_IRQ(void) for (i = 0; i < NR_EVENT_CHANNELS; i++) mask_evtchn(i); - irq_ctx_init(smp_processor_id()); + if (xen_hvm_domain()) { + xen_callback_vector(); + native_init_IRQ(); + } else { + irq_ctx_init(smp_processor_id()); + } } diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index f66db3b91d6..6c453181649 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -37,11 +37,13 @@ #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/uaccess.h> +#include <linux/io.h> #include <xen/xen.h> #include <xen/interface/xen.h> #include <xen/page.h> #include <xen/grant_table.h> +#include <xen/interface/memory.h> #include <asm/xen/hypercall.h> #include <asm/pgtable.h> @@ -59,6 +61,8 @@ static unsigned int boot_max_nr_grant_frames; static int gnttab_free_count; static grant_ref_t gnttab_free_head; static DEFINE_SPINLOCK(gnttab_list_lock); +unsigned long xen_hvm_resume_frames; +EXPORT_SYMBOL_GPL(xen_hvm_resume_frames); static struct grant_entry *shared; @@ -433,7 +437,7 @@ static unsigned int __max_nr_grant_frames(void) return query.max_nr_frames; } -static inline unsigned int max_nr_grant_frames(void) +unsigned int gnttab_max_grant_frames(void) { unsigned int xen_max = __max_nr_grant_frames(); @@ -441,6 +445,7 @@ static inline unsigned int max_nr_grant_frames(void) return boot_max_nr_grant_frames; return xen_max; } +EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); static int gnttab_map(unsigned int start_idx, unsigned int end_idx) { @@ -449,6 +454,30 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) unsigned int nr_gframes = end_idx + 1; int rc; + if (xen_hvm_domain()) { + struct xen_add_to_physmap xatp; + unsigned int i = end_idx; + rc = 0; + /* + * Loop backwards, so that the first hypercall has the largest + * index, ensuring that the table will grow only once. + */ + do { + xatp.domid = DOMID_SELF; + xatp.idx = i; + xatp.space = XENMAPSPACE_grant_table; + xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i; + rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp); + if (rc != 0) { + printk(KERN_WARNING + "grant table add_to_physmap failed, err=%d\n", rc); + break; + } + } while (i-- > start_idx); + + return rc; + } + frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC); if (!frames) return -ENOMEM; @@ -465,7 +494,7 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) BUG_ON(rc || setup.status); - rc = arch_gnttab_map_shared(frames, nr_gframes, max_nr_grant_frames(), + rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(), &shared); BUG_ON(rc); @@ -476,9 +505,27 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx) int gnttab_resume(void) { - if (max_nr_grant_frames() < nr_grant_frames) + unsigned int max_nr_gframes; + + max_nr_gframes = gnttab_max_grant_frames(); + if (max_nr_gframes < nr_grant_frames) return -ENOSYS; - return gnttab_map(0, nr_grant_frames - 1); + + if (xen_pv_domain()) + return gnttab_map(0, nr_grant_frames - 1); + + if (!shared) { + shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes); + if (shared == NULL) { + printk(KERN_WARNING + "Failed to ioremap gnttab share frames!"); + return -ENOMEM; + } + } + + gnttab_map(0, nr_grant_frames - 1); + + return 0; } int gnttab_suspend(void) @@ -495,7 +542,7 @@ static int gnttab_expand(unsigned int req_entries) cur = nr_grant_frames; extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) / GREFS_PER_GRANT_FRAME); - if (cur + extra > max_nr_grant_frames()) + if (cur + extra > gnttab_max_grant_frames()) return -ENOSPC; rc = gnttab_map(cur, cur + extra - 1); @@ -505,15 +552,12 @@ static int gnttab_expand(unsigned int req_entries) return rc; } -static int __devinit gnttab_init(void) +int gnttab_init(void) { int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; - if (!xen_domain()) - return -ENODEV; - nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); @@ -556,5 +600,18 @@ static int __devinit gnttab_init(void) kfree(gnttab_list); return -ENOMEM; } +EXPORT_SYMBOL_GPL(gnttab_init); + +static int __devinit __gnttab_init(void) +{ + /* Delay grant-table initialization in the PV on HVM case */ + if (xen_hvm_domain()) + return 0; + + if (!xen_pv_domain()) + return -ENODEV; + + return gnttab_init(); +} -core_initcall(gnttab_init); +core_initcall(__gnttab_init); diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 07e857b0de1..1799bd89031 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -9,6 +9,7 @@ #include <linux/stop_machine.h> #include <linux/freezer.h> +#include <xen/xen.h> #include <xen/xenbus.h> #include <xen/grant_table.h> #include <xen/events.h> @@ -17,6 +18,7 @@ #include <asm/xen/hypercall.h> #include <asm/xen/page.h> +#include <asm/xen/hypervisor.h> enum shutdown_state { SHUTDOWN_INVALID = -1, @@ -33,10 +35,30 @@ enum shutdown_state { static enum shutdown_state shutting_down = SHUTDOWN_INVALID; #ifdef CONFIG_PM_SLEEP -static int xen_suspend(void *data) +static int xen_hvm_suspend(void *data) { + struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; int *cancelled = data; + + BUG_ON(!irqs_disabled()); + + *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); + + xen_hvm_post_suspend(*cancelled); + gnttab_resume(); + + if (!*cancelled) { + xen_irq_resume(); + xen_timer_resume(); + } + + return 0; +} + +static int xen_suspend(void *data) +{ int err; + int *cancelled = data; BUG_ON(!irqs_disabled()); @@ -106,7 +128,10 @@ static void do_suspend(void) goto out_resume; } - err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); + if (xen_hvm_domain()) + err = stop_machine(xen_hvm_suspend, &cancelled, cpumask_of(0)); + else + err = stop_machine(xen_suspend, &cancelled, cpumask_of(0)); dpm_resume_noirq(PMSG_RESUME); @@ -255,7 +280,19 @@ static int shutdown_event(struct notifier_block *notifier, return NOTIFY_DONE; } -static int __init setup_shutdown_event(void) +static int __init __setup_shutdown_event(void) +{ + /* Delay initialization in the PV on HVM case */ + if (xen_hvm_domain()) + return 0; + + if (!xen_pv_domain()) + return -ENODEV; + + return xen_setup_shutdown_event(); +} + +int xen_setup_shutdown_event(void) { static struct notifier_block xenstore_notifier = { .notifier_call = shutdown_event @@ -264,5 +301,6 @@ static int __init setup_shutdown_event(void) return 0; } +EXPORT_SYMBOL_GPL(xen_setup_shutdown_event); -subsys_initcall(setup_shutdown_event); +subsys_initcall(__setup_shutdown_event); diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c new file mode 100644 index 00000000000..c01b5ddce52 --- /dev/null +++ b/drivers/xen/platform-pci.c @@ -0,0 +1,207 @@ +/****************************************************************************** + * platform-pci.c + * + * Xen platform PCI device driver + * Copyright (c) 2005, Intel Corporation. + * Copyright (c) 2007, XenSource Inc. + * Copyright (c) 2010, Citrix + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple + * Place - Suite 330, Boston, MA 02111-1307 USA. + * + */ + + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include <xen/platform_pci.h> +#include <xen/grant_table.h> +#include <xen/xenbus.h> +#include <xen/events.h> +#include <xen/hvm.h> +#include <xen/xen-ops.h> + +#define DRV_NAME "xen-platform-pci" + +MODULE_AUTHOR("ssmith@xensource.com and stefano.stabellini@eu.citrix.com"); +MODULE_DESCRIPTION("Xen platform PCI device"); +MODULE_LICENSE("GPL"); + +static unsigned long platform_mmio; +static unsigned long platform_mmio_alloc; +static unsigned long platform_mmiolen; +static uint64_t callback_via; + +unsigned long alloc_xen_mmio(unsigned long len) +{ + unsigned long addr; + + addr = platform_mmio + platform_mmio_alloc; + platform_mmio_alloc += len; + BUG_ON(platform_mmio_alloc > platform_mmiolen); + + return addr; +} + +static uint64_t get_callback_via(struct pci_dev *pdev) +{ + u8 pin; + int irq; + + irq = pdev->irq; + if (irq < 16) + return irq; /* ISA IRQ */ + + pin = pdev->pin; + + /* We don't know the GSI. Specify the PCI INTx line instead. */ + return ((uint64_t)0x01 << 56) | /* PCI INTx identifier */ + ((uint64_t)pci_domain_nr(pdev->bus) << 32) | + ((uint64_t)pdev->bus->number << 16) | + ((uint64_t)(pdev->devfn & 0xff) << 8) | + ((uint64_t)(pin - 1) & 3); +} + +static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id) +{ + xen_hvm_evtchn_do_upcall(); + return IRQ_HANDLED; +} + +static int xen_allocate_irq(struct pci_dev *pdev) +{ + return request_irq(pdev->irq, do_hvm_evtchn_intr, + IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TRIGGER_RISING, + "xen-platform-pci", pdev); +} + +static int platform_pci_resume(struct pci_dev *pdev) +{ + int err; + if (xen_have_vector_callback) + return 0; + err = xen_set_callback_via(callback_via); + if (err) { + dev_err(&pdev->dev, "platform_pci_resume failure!\n"); + return err; + } + return 0; +} + +static int __devinit platform_pci_init(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int i, ret; + long ioaddr, iolen; + long mmio_addr, mmio_len; + unsigned int max_nr_gframes; + + i = pci_enable_device(pdev); + if (i) + return i; + + ioaddr = pci_resource_start(pdev, 0); + iolen = pci_resource_len(pdev, 0); + + mmio_addr = pci_resource_start(pdev, 1); + mmio_len = pci_resource_len(pdev, 1); + + if (mmio_addr == 0 || ioaddr == 0) { + dev_err(&pdev->dev, "no resources found\n"); + ret = -ENOENT; + goto pci_out; + } + + if (request_mem_region(mmio_addr, mmio_len, DRV_NAME) == NULL) { + dev_err(&pdev->dev, "MEM I/O resource 0x%lx @ 0x%lx busy\n", + mmio_addr, mmio_len); + ret = -EBUSY; + goto pci_out; + } + + if (request_region(ioaddr, iolen, DRV_NAME) == NULL) { + dev_err(&pdev->dev, "I/O resource 0x%lx @ 0x%lx busy\n", + iolen, ioaddr); + ret = -EBUSY; + goto mem_out; + } + + platform_mmio = mmio_addr; + platform_mmiolen = mmio_len; + + if (!xen_have_vector_callback) { + ret = xen_allocate_irq(pdev); + if (ret) { + dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret); + goto out; + } + callback_via = get_callback_via(pdev); + ret = xen_set_callback_via(callback_via); + if (ret) { + dev_warn(&pdev->dev, "Unable to set the evtchn callback " + "err=%d\n", ret); + goto out; + } + } + + max_nr_gframes = gnttab_max_grant_frames(); + xen_hvm_resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); + ret = gnttab_init(); + if (ret) + goto out; + xenbus_probe(NULL); + ret = xen_setup_shutdown_event(); + if (ret) + goto out; + return 0; + +out: + release_region(ioaddr, iolen); +mem_out: + release_mem_region(mmio_addr, mmio_len); +pci_out: + pci_disable_device(pdev); + return ret; +} + +static struct pci_device_id platform_pci_tbl[] __devinitdata = { + {PCI_VENDOR_ID_XEN, PCI_DEVICE_ID_XEN_PLATFORM, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, platform_pci_tbl); + +static struct pci_driver platform_driver = { + .name = DRV_NAME, + .probe = platform_pci_init, + .id_table = platform_pci_tbl, +#ifdef CONFIG_PM + .resume_early = platform_pci_resume, +#endif +}; + +static int __init platform_pci_module_init(void) +{ + /* no unplug has been done, IGNORE hasn't been specified: just + * return now */ + if (!xen_platform_pci_unplug) + return -ENODEV; + + return pci_register_driver(&platform_driver); +} + +module_init(platform_pci_module_init); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index 3479332113e..29bac511887 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -56,6 +56,9 @@ #include <xen/events.h> #include <xen/page.h> +#include <xen/platform_pci.h> +#include <xen/hvm.h> + #include "xenbus_comms.h" #include "xenbus_probe.h" @@ -752,10 +755,7 @@ int register_xenstore_notifier(struct notifier_block *nb) { int ret = 0; - if (xenstored_ready > 0) - ret = nb->notifier_call(nb, 0, NULL); - else - blocking_notifier_chain_register(&xenstore_chain, nb); + blocking_notifier_chain_register(&xenstore_chain, nb); return ret; } @@ -779,8 +779,23 @@ void xenbus_probe(struct work_struct *unused) /* Notify others that xenstore is up */ blocking_notifier_call_chain(&xenstore_chain, 0, NULL); } +EXPORT_SYMBOL_GPL(xenbus_probe); + +static int __init xenbus_probe_initcall(void) +{ + if (!xen_domain()) + return -ENODEV; + + if (xen_initial_domain() || xen_hvm_domain()) + return 0; + + xenbus_probe(NULL); + return 0; +} + +device_initcall(xenbus_probe_initcall); -static int __init xenbus_probe_init(void) +static int __init xenbus_init(void) { int err = 0; @@ -805,11 +820,24 @@ static int __init xenbus_probe_init(void) if (xen_initial_domain()) { /* dom0 not yet supported */ } else { + if (xen_hvm_domain()) { + uint64_t v = 0; + err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); + if (err) + goto out_error; + xen_store_evtchn = (int)v; + err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); + if (err) + goto out_error; + xen_store_mfn = (unsigned long)v; + xen_store_interface = ioremap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); + } else { + xen_store_evtchn = xen_start_info->store_evtchn; + xen_store_mfn = xen_start_info->store_mfn; + xen_store_interface = mfn_to_virt(xen_store_mfn); + } xenstored_ready = 1; - xen_store_evtchn = xen_start_info->store_evtchn; - xen_store_mfn = xen_start_info->store_mfn; } - xen_store_interface = mfn_to_virt(xen_store_mfn); /* Initialize the interface to xenstore. */ err = xs_init(); @@ -819,9 +847,6 @@ static int __init xenbus_probe_init(void) goto out_unreg_back; } - if (!xen_initial_domain()) - xenbus_probe(NULL); - #ifdef CONFIG_XEN_COMPAT_XENFS /* * Create xenfs mountpoint in /proc for compatibility with @@ -842,7 +867,7 @@ static int __init xenbus_probe_init(void) return err; } -postcore_initcall(xenbus_probe_init); +postcore_initcall(xenbus_init); MODULE_LICENSE("GPL"); @@ -950,6 +975,9 @@ static void wait_for_devices(struct xenbus_driver *xendrv) #ifndef MODULE static int __init boot_wait_for_devices(void) { + if (xen_hvm_domain() && !xen_platform_pci_unplug) + return -ENODEV; + ready_to_wait_for_devices = 1; wait_for_devices(NULL); return 0; diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 7b547f53f65..5534690075a 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -76,6 +76,14 @@ struct xs_handle { /* * Mutex ordering: transaction_mutex -> watch_mutex -> request_mutex. * response_mutex is never taken simultaneously with the other three. + * + * transaction_mutex must be held before incrementing + * transaction_count. The mutex is held when a suspend is in + * progress to prevent new transactions starting. + * + * When decrementing transaction_count to zero the wait queue + * should be woken up, the suspend code waits for count to + * reach zero. */ /* One request at a time. */ @@ -85,7 +93,9 @@ struct xs_handle { struct mutex response_mutex; /* Protect transactions against save/restore. */ - struct rw_semaphore transaction_mutex; + struct mutex transaction_mutex; + atomic_t transaction_count; + wait_queue_head_t transaction_wq; /* Protect watch (de)register against save/restore. */ struct rw_semaphore watch_mutex; @@ -157,6 +167,31 @@ static void *read_reply(enum xsd_sockmsg_type *type, unsigned int *len) return body; } +static void transaction_start(void) +{ + mutex_lock(&xs_state.transaction_mutex); + atomic_inc(&xs_state.transaction_count); + mutex_unlock(&xs_state.transaction_mutex); +} + +static void transaction_end(void) +{ + if (atomic_dec_and_test(&xs_state.transaction_count)) + wake_up(&xs_state.transaction_wq); +} + +static void transaction_suspend(void) +{ + mutex_lock(&xs_state.transaction_mutex); + wait_event(xs_state.transaction_wq, + atomic_read(&xs_state.transaction_count) == 0); +} + +static void transaction_resume(void) +{ + mutex_unlock(&xs_state.transaction_mutex); +} + void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) { void *ret; @@ -164,7 +199,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) int err; if (req_msg.type == XS_TRANSACTION_START) - down_read(&xs_state.transaction_mutex); + transaction_start(); mutex_lock(&xs_state.request_mutex); @@ -180,7 +215,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) if ((msg->type == XS_TRANSACTION_END) || ((req_msg.type == XS_TRANSACTION_START) && (msg->type == XS_ERROR))) - up_read(&xs_state.transaction_mutex); + transaction_end(); return ret; } @@ -432,11 +467,11 @@ int xenbus_transaction_start(struct xenbus_transaction *t) { char *id_str; - down_read(&xs_state.transaction_mutex); + transaction_start(); id_str = xs_single(XBT_NIL, XS_TRANSACTION_START, "", NULL); if (IS_ERR(id_str)) { - up_read(&xs_state.transaction_mutex); + transaction_end(); return PTR_ERR(id_str); } @@ -461,7 +496,7 @@ int xenbus_transaction_end(struct xenbus_transaction t, int abort) err = xs_error(xs_single(t, XS_TRANSACTION_END, abortstr, NULL)); - up_read(&xs_state.transaction_mutex); + transaction_end(); return err; } @@ -662,7 +697,7 @@ EXPORT_SYMBOL_GPL(unregister_xenbus_watch); void xs_suspend(void) { - down_write(&xs_state.transaction_mutex); + transaction_suspend(); down_write(&xs_state.watch_mutex); mutex_lock(&xs_state.request_mutex); mutex_lock(&xs_state.response_mutex); @@ -677,7 +712,7 @@ void xs_resume(void) mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); - up_write(&xs_state.transaction_mutex); + transaction_resume(); /* No need for watches_lock: the watch_mutex is sufficient. */ list_for_each_entry(watch, &watches, list) { @@ -693,7 +728,7 @@ void xs_suspend_cancel(void) mutex_unlock(&xs_state.response_mutex); mutex_unlock(&xs_state.request_mutex); up_write(&xs_state.watch_mutex); - up_write(&xs_state.transaction_mutex); + mutex_unlock(&xs_state.transaction_mutex); } static int xenwatch_thread(void *unused) @@ -843,8 +878,10 @@ int xs_init(void) mutex_init(&xs_state.request_mutex); mutex_init(&xs_state.response_mutex); - init_rwsem(&xs_state.transaction_mutex); + mutex_init(&xs_state.transaction_mutex); init_rwsem(&xs_state.watch_mutex); + atomic_set(&xs_state.transaction_count, 0); + init_waitqueue_head(&xs_state.transaction_wq); /* Initialize the shared memory rings to talk to xenstored */ err = xb_init_comms(); diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c index 8924d93136f..78bfab0700b 100644 --- a/drivers/xen/xenfs/super.c +++ b/drivers/xen/xenfs/super.c @@ -65,7 +65,7 @@ static struct file_system_type xenfs_type = { static int __init xenfs_init(void) { - if (xen_pv_domain()) + if (xen_domain()) return register_filesystem(&xenfs_type); printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); @@ -74,7 +74,7 @@ static int __init xenfs_init(void) static void __exit xenfs_exit(void) { - if (xen_pv_domain()) + if (xen_domain()) unregister_filesystem(&xenfs_type); } diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c index f28ece39736..3b39c3752e2 100644 --- a/drivers/xen/xenfs/xenbus.c +++ b/drivers/xen/xenfs/xenbus.c @@ -124,6 +124,9 @@ static ssize_t xenbus_file_read(struct file *filp, mutex_lock(&u->reply_mutex); while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; + ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) |