diff options
Diffstat (limited to 'drivers')
556 files changed, 34070 insertions, 15709 deletions
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index 7ad48dfc12d..b8725461d88 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -279,9 +279,9 @@ int __init acpi_numa_init(void) /* SRAT: Static Resource Affinity Table */ if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, - acpi_parse_x2apic_affinity, NR_CPUS); + acpi_parse_x2apic_affinity, nr_cpu_ids); acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, - acpi_parse_processor_affinity, NR_CPUS); + acpi_parse_processor_affinity, nr_cpu_ids); ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, acpi_parse_memory_affinity, NR_NODE_MEMBLKS); diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index a959f6a0750..d648a9860b8 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -561,7 +561,7 @@ end: } int acpi_processor_preregister_performance( - struct acpi_processor_performance *performance) + struct acpi_processor_performance __percpu *performance) { int count, count_target; int retval = 0; diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index 8a713f1e965..919a28558d3 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -11,6 +11,9 @@ #include <asm/smp.h> #include "agp.h" +int intel_agp_enabled; +EXPORT_SYMBOL(intel_agp_enabled); + /* * If we have Intel graphics, we're not going to have anything other than * an Intel IOMMU. So make the correct use of the PCI DMA API contingent @@ -65,6 +68,10 @@ #define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 #define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a #define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 +#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106 /* cover 915 and 945 variants */ #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ @@ -99,7 +106,9 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \ - agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB) + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) extern int agp_memory_reserved; @@ -148,6 +157,25 @@ extern int agp_memory_reserved; #define INTEL_I7505_AGPCTRL 0x70 #define INTEL_I7505_MCHCFG 0x50 +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 +#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) +#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) +#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) +#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) +#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) +#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) +#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) +#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) +#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) +#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) +#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) +#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) +#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) +#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) +#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) +#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) + static const struct aper_size_info_fixed intel_i810_sizes[] = { {64, 16384, 4}, @@ -294,6 +322,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem, off_t pg_start, int mask_type) { int i, j; + u32 cache_bits = 0; + + if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) + { + cache_bits = I830_PTE_SYSTEM_CACHED; + } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { writel(agp_bridge->driver->mask_memory(agp_bridge, @@ -614,7 +649,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] = static void intel_i830_init_gtt_entries(void) { u16 gmch_ctrl; - int gtt_entries; + int gtt_entries = 0; u8 rdct; int local = 0; static const int ddt[4] = { 0, 16, 32, 64 }; @@ -706,6 +741,63 @@ static void intel_i830_init_gtt_entries(void) gtt_entries = 0; break; } + } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) { + /* + * SandyBridge has new memory control reg at 0x50.w + */ + u16 snb_gmch_ctl; + pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); + switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) { + case SNB_GMCH_GMS_STOLEN_32M: + gtt_entries = MB(32) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_64M: + gtt_entries = MB(64) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_96M: + gtt_entries = MB(96) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_128M: + gtt_entries = MB(128) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_160M: + gtt_entries = MB(160) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_192M: + gtt_entries = MB(192) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_224M: + gtt_entries = MB(224) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_256M: + gtt_entries = MB(256) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_288M: + gtt_entries = MB(288) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_320M: + gtt_entries = MB(320) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_352M: + gtt_entries = MB(352) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_384M: + gtt_entries = MB(384) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_416M: + gtt_entries = MB(416) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_448M: + gtt_entries = MB(448) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_480M: + gtt_entries = MB(480) - KB(size); + break; + case SNB_GMCH_GMS_STOLEN_512M: + gtt_entries = MB(512) - KB(size); + break; + } } else { switch (gmch_ctrl & I855_GMCH_GMS_MASK) { case I855_GMCH_GMS_STOLEN_1M: @@ -1357,6 +1449,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB: case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB: case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB: + case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB: + case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB: *gtt_offset = *gtt_size = MB(2); break; default: @@ -2338,9 +2432,9 @@ static const struct intel_driver_description { NULL, &intel_g33_driver }, { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview", + { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150", NULL, &intel_g33_driver }, - { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview", + { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150", NULL, &intel_g33_driver }, { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, "GM45", NULL, &intel_i965_driver }, @@ -2355,13 +2449,17 @@ static const struct intel_driver_description { { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, "G41", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0, - "Ironlake/D", NULL, &intel_i965_driver }, + "HD Graphics", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, - "Ironlake/M", NULL, &intel_i965_driver }, + "HD Graphics", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, - "Ironlake/MA", NULL, &intel_i965_driver }, + "HD Graphics", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0, - "Ironlake/MC2", NULL, &intel_i965_driver }, + "HD Graphics", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0, + "Sandybridge", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0, + "Sandybridge", NULL, &intel_i965_driver }, { 0, 0, 0, NULL, NULL, NULL } }; @@ -2371,7 +2469,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, struct agp_bridge_data *bridge; u8 cap_ptr = 0; struct resource *r; - int i; + int i, err; cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP); @@ -2463,7 +2561,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev, } pci_set_drvdata(pdev, bridge); - return agp_add_bridge(bridge); + err = agp_add_bridge(bridge); + if (!err) + intel_agp_enabled = 1; + return err; } static void __devexit agp_intel_remove(struct pci_dev *pdev) @@ -2568,6 +2669,8 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), + ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), + ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), { } }; diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index 4254457d391..b861c08263a 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c @@ -158,13 +158,11 @@ static unsigned int cy_isa_addresses[] = { #define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses) -#ifdef MODULE static long maddr[NR_CARDS]; static int irq[NR_CARDS]; module_param_array(maddr, long, NULL, 0); module_param_array(irq, int, NULL, 0); -#endif #endif /* CONFIG_ISA */ @@ -598,12 +596,6 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip, save_car = readb(base_addr + (CyCAR << index)); cy_writeb(base_addr + (CyCAR << index), save_xir); - /* validate the port# (as configured and open) */ - if (channel + chip * 4 >= cinfo->nports) { - cy_writeb(base_addr + (CySRER << index), - readb(base_addr + (CySRER << index)) & ~CyTxRdy); - goto end; - } info = &cinfo->ports[channel + chip * 4]; tty = tty_port_tty_get(&info->port); if (tty == NULL) { @@ -3316,13 +3308,10 @@ static int __init cy_detect_isa(void) unsigned short cy_isa_irq, nboard; void __iomem *cy_isa_address; unsigned short i, j, cy_isa_nchan; -#ifdef MODULE int isparam = 0; -#endif nboard = 0; -#ifdef MODULE /* Check for module parameters */ for (i = 0; i < NR_CARDS; i++) { if (maddr[i] || i) { @@ -3332,7 +3321,6 @@ static int __init cy_detect_isa(void) if (!maddr[i]) break; } -#endif /* scan the address table probing for Cyclom-Y/ISA boards */ for (i = 0; i < NR_ISA_ADDRS; i++) { @@ -3353,11 +3341,10 @@ static int __init cy_detect_isa(void) iounmap(cy_isa_address); continue; } -#ifdef MODULE + if (isparam && i < NR_CARDS && irq[i]) cy_isa_irq = irq[i]; else -#endif /* find out the board's irq by probing */ cy_isa_irq = detect_isa_irq(cy_isa_address); if (cy_isa_irq == 0) { @@ -4208,3 +4195,4 @@ module_exit(cy_cleanup_module); MODULE_LICENSE("GPL"); MODULE_VERSION(CY_VERSION); MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR); +MODULE_FIRMWARE("cyzfirm.bin"); diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c index 4c3b59be286..465185fc0f5 100644 --- a/drivers/char/hvc_console.c +++ b/drivers/char/hvc_console.c @@ -146,7 +146,7 @@ static void hvc_console_print(struct console *co, const char *b, return; /* This console adapter was removed so it is not usable. */ - if (vtermnos[index] < 0) + if (vtermnos[index] == -1) return; while (count > 0 || i > 0) { diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index 517271c762e..911e1da6def 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c @@ -208,6 +208,7 @@ static int DumpFifoBuffer( char __user *, int); static void ip2_init_board(int, const struct firmware *); static unsigned short find_eisa_board(int); +static int ip2_setup(char *str); /***************/ /* Static Data */ @@ -263,7 +264,7 @@ static int tracewrap; /* Macros */ /**********/ -#if defined(MODULE) && defined(IP2DEBUG_OPEN) +#ifdef IP2DEBUG_OPEN #define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \ tty->name,(pCh->flags), \ tty->count,/*GET_USE_COUNT(module)*/0,s) @@ -285,7 +286,10 @@ MODULE_AUTHOR("Doug McNash"); MODULE_DESCRIPTION("Computone IntelliPort Plus Driver"); MODULE_LICENSE("GPL"); +#define MAX_CMD_STR 50 + static int poll_only; +static char cmd[MAX_CMD_STR]; static int Eisa_irq; static int Eisa_slot; @@ -309,6 +313,8 @@ module_param_array(io, int, NULL, 0); MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards"); module_param(poll_only, bool, 0); MODULE_PARM_DESC(poll_only, "Do not use card interrupts"); +module_param_string(ip2, cmd, MAX_CMD_STR, 0); +MODULE_PARM_DESC(ip2, "Contains module parameter passed with 'ip2='"); /* for sysfs class support */ static struct class *ip2_class; @@ -487,7 +493,6 @@ static const struct firmware *ip2_request_firmware(void) return fw; } -#ifndef MODULE /****************************************************************************** * ip2_setup: * str: kernel command line string @@ -531,7 +536,6 @@ static int __init ip2_setup(char *str) return 1; } __setup("ip2=", ip2_setup); -#endif /* !MODULE */ static int __init ip2_loadmain(void) { @@ -539,14 +543,20 @@ static int __init ip2_loadmain(void) int err = 0; i2eBordStrPtr pB = NULL; int rc = -1; - struct pci_dev *pdev = NULL; const struct firmware *fw = NULL; + char *str; + + str = cmd; if (poll_only) { /* Hard lock the interrupts to zero */ irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0; } + /* Check module parameter with 'ip2=' has been passed or not */ + if (!poll_only && (!strncmp(str, "ip2=", 4))) + ip2_setup(str); + ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0); /* process command line arguments to modprobe or @@ -612,6 +622,7 @@ static int __init ip2_loadmain(void) case PCI: #ifdef CONFIG_PCI { + struct pci_dev *pdev = NULL; u32 addr; int status; @@ -626,7 +637,7 @@ static int __init ip2_loadmain(void) if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "can't enable device\n"); - break; + goto out; } ip2config.type[i] = PCI; ip2config.pci_dev[i] = pci_dev_get(pdev); @@ -638,6 +649,8 @@ static int __init ip2_loadmain(void) dev_err(&pdev->dev, "I/O address error\n"); ip2config.irq[i] = pdev->irq; +out: + pci_dev_put(pdev); } #else printk(KERN_ERR "IP2: PCI card specified but PCI " @@ -656,7 +669,6 @@ static int __init ip2_loadmain(void) break; } /* switch */ } /* for */ - pci_dev_put(pdev); for (i = 0; i < IP2_MAX_BOARDS; ++i) { if (ip2config.addr[i]) { @@ -3197,3 +3209,5 @@ static struct pci_device_id ip2main_pci_tbl[] __devinitdata = { }; MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl); + +MODULE_FIRMWARE("intelliport2.bin"); diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index 300d5bd6cd0..be2e8f9a27c 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c @@ -113,6 +113,8 @@ * 64-bit verification */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/firmware.h> #include <linux/kernel.h> @@ -140,7 +142,6 @@ #define InterruptTheCard(base) outw(0, (base) + 0xc) #define ClearInterrupt(base) inw((base) + 0x0a) -#define pr_dbg(str...) pr_debug("ISICOM: " str) #ifdef DEBUG #define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c)) #else @@ -249,8 +250,7 @@ static int lock_card(struct isi_board *card) spin_unlock_irqrestore(&card->card_lock, card->flags); msleep(10); } - printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n", - card->base); + pr_warning("Failed to lock Card (0x%lx)\n", card->base); return 0; /* Failed to acquire the card! */ } @@ -379,13 +379,13 @@ static inline int __isicom_paranoia_check(struct isi_port const *port, char *name, const char *routine) { if (!port) { - printk(KERN_WARNING "ISICOM: Warning: bad isicom magic for " - "dev %s in %s.\n", name, routine); + pr_warning("Warning: bad isicom magic for dev %s in %s.\n", + name, routine); return 1; } if (port->magic != ISICOM_MAGIC) { - printk(KERN_WARNING "ISICOM: Warning: NULL isicom port for " - "dev %s in %s.\n", name, routine); + pr_warning("Warning: NULL isicom port for dev %s in %s.\n", + name, routine); return 1; } @@ -450,8 +450,8 @@ static void isicom_tx(unsigned long _data) if (!(inw(base + 0x02) & (1 << port->channel))) continue; - pr_dbg("txing %d bytes, port%d.\n", txcount, - port->channel + 1); + pr_debug("txing %d bytes, port%d.\n", + txcount, port->channel + 1); outw((port->channel << isi_card[card].shift_count) | txcount, base); residue = NO; @@ -547,8 +547,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id) byte_count = header & 0xff; if (channel + 1 > card->port_count) { - printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%lx): " - "%d(channel) > port_count.\n", base, channel+1); + pr_warning("%s(0x%lx): %d(channel) > port_count.\n", + __func__, base, channel+1); outw(0x0000, base+0x04); /* enable interrupts */ spin_unlock(&card->card_lock); return IRQ_HANDLED; @@ -582,14 +582,15 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id) if (port->status & ISI_DCD) { if (!(header & ISI_DCD)) { /* Carrier has been lost */ - pr_dbg("interrupt: DCD->low.\n" - ); + pr_debug("%s: DCD->low.\n", + __func__); port->status &= ~ISI_DCD; tty_hangup(tty); } } else if (header & ISI_DCD) { /* Carrier has been detected */ - pr_dbg("interrupt: DCD->high.\n"); + pr_debug("%s: DCD->high.\n", + __func__); port->status |= ISI_DCD; wake_up_interruptible(&port->port.open_wait); } @@ -641,17 +642,19 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id) break; case 2: /* Statistics */ - pr_dbg("isicom_interrupt: stats!!!.\n"); + pr_debug("%s: stats!!!\n", __func__); break; default: - pr_dbg("Intr: Unknown code in status packet.\n"); + pr_debug("%s: Unknown code in status packet.\n", + __func__); break; } } else { /* Data Packet */ count = tty_prepare_flip_string(tty, &rp, byte_count & ~1); - pr_dbg("Intr: Can rx %d of %d bytes.\n", count, byte_count); + pr_debug("%s: Can rx %d of %d bytes.\n", + __func__, count, byte_count); word_count = count >> 1; insw(base, rp, word_count); byte_count -= (word_count << 1); @@ -661,8 +664,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id) byte_count -= 2; } if (byte_count > 0) { - pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping " - "bytes...\n", base, channel + 1); + pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n", + __func__, base, channel + 1); /* drain out unread xtra data */ while (byte_count > 0) { inw(base); @@ -888,8 +891,8 @@ static void isicom_shutdown_port(struct isi_port *port) struct isi_board *card = port->card; if (--card->count < 0) { - pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n", - card->base, card->count); + pr_debug("%s: bad board(0x%lx) count %d.\n", + __func__, card->base, card->count); card->count = 0; } /* last port was closed, shutdown that board too */ @@ -1681,13 +1684,13 @@ static int __init isicom_init(void) retval = tty_register_driver(isicom_normal); if (retval) { - pr_dbg("Couldn't register the dialin driver\n"); + pr_debug("Couldn't register the dialin driver\n"); goto err_puttty; } retval = pci_register_driver(&isicom_driver); if (retval < 0) { - printk(KERN_ERR "ISICOM: Unable to register pci driver.\n"); + pr_err("Unable to register pci driver.\n"); goto err_unrtty; } @@ -1717,3 +1720,8 @@ module_exit(isicom_exit); MODULE_AUTHOR("MultiTech"); MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE("isi608.bin"); +MODULE_FIRMWARE("isi608em.bin"); +MODULE_FIRMWARE("isi616em.bin"); +MODULE_FIRMWARE("isi4608.bin"); +MODULE_FIRMWARE("isi4616.bin"); diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 63ee3bbc1ce..166495d6a1d 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c @@ -164,24 +164,25 @@ static unsigned int moxaFuncTout = HZ / 2; static unsigned int moxaLowWaterChk; static DEFINE_MUTEX(moxa_openlock); static DEFINE_SPINLOCK(moxa_lock); -/* Variables for insmod */ -#ifdef MODULE + static unsigned long baseaddr[MAX_BOARDS]; static unsigned int type[MAX_BOARDS]; static unsigned int numports[MAX_BOARDS]; -#endif MODULE_AUTHOR("William Chen"); MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); MODULE_LICENSE("GPL"); -#ifdef MODULE +MODULE_FIRMWARE("c218tunx.cod"); +MODULE_FIRMWARE("cp204unx.cod"); +MODULE_FIRMWARE("c320tunx.cod"); + module_param_array(type, uint, NULL, 0); MODULE_PARM_DESC(type, "card type: C218=2, C320=4"); module_param_array(baseaddr, ulong, NULL, 0); MODULE_PARM_DESC(baseaddr, "base address"); module_param_array(numports, uint, NULL, 0); MODULE_PARM_DESC(numports, "numports (ignored for C218)"); -#endif + module_param(ttymajor, int, 0); /* @@ -1024,6 +1025,8 @@ static int __init moxa_init(void) { unsigned int isabrds = 0; int retval = 0; + struct moxa_board_conf *brd = moxa_boards; + unsigned int i; printk(KERN_INFO "MOXA Intellio family driver version %s\n", MOXA_VERSION); @@ -1051,10 +1054,7 @@ static int __init moxa_init(void) } /* Find the boards defined from module args. */ -#ifdef MODULE - { - struct moxa_board_conf *brd = moxa_boards; - unsigned int i; + for (i = 0; i < MAX_BOARDS; i++) { if (!baseaddr[i]) break; @@ -1087,8 +1087,6 @@ static int __init moxa_init(void) isabrds++; } } - } -#endif #ifdef CONFIG_PCI retval = pci_register_driver(&moxa_pci_driver); diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 3d923065d9a..e0c5d2a6904 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c @@ -895,8 +895,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty) if (inb(info->ioaddr + UART_LSR) == 0xff) { spin_unlock_irqrestore(&info->slock, flags); if (capable(CAP_SYS_ADMIN)) { - if (tty) - set_bit(TTY_IO_ERROR, &tty->flags); + set_bit(TTY_IO_ERROR, &tty->flags); return 0; } else return -ENODEV; diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index 2ad7d37afbd..a3f32a15fde 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c @@ -136,10 +136,6 @@ static int debug; #define RECEIVE_BUF_MAX 4 -/* Define all types of vendors and devices to support */ -#define VENDOR1 0x1931 /* Vendor Option */ -#define DEVICE1 0x000c /* HSDPA card */ - #define R_IIR 0x0000 /* Interrupt Identity Register */ #define R_FCR 0x0000 /* Flow Control Register */ #define R_IER 0x0004 /* Interrupt Enable Register */ @@ -371,6 +367,8 @@ struct port { struct mutex tty_sem; wait_queue_head_t tty_wait; struct async_icount tty_icount; + + struct nozomi *dc; }; /* Private data one for each card in the system */ @@ -405,7 +403,7 @@ struct buffer { /* Global variables */ static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = { - {PCI_DEVICE(VENDOR1, DEVICE1)}, + {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */ {}, }; @@ -414,6 +412,8 @@ MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl); static struct nozomi *ndevs[NOZOMI_MAX_CARDS]; static struct tty_driver *ntty_driver; +static const struct tty_port_operations noz_tty_port_ops; + /* * find card by tty_index */ @@ -853,8 +853,6 @@ static int receive_data(enum port_type index, struct nozomi *dc) goto put; } - tty_buffer_request_room(tty, size); - while (size > 0) { read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX); @@ -1473,9 +1471,11 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, for (i = 0; i < MAX_PORT; i++) { struct device *tty_dev; - - mutex_init(&dc->port[i].tty_sem); - tty_port_init(&dc->port[i].port); + struct port *port = &dc->port[i]; + port->dc = dc; + mutex_init(&port->tty_sem); + tty_port_init(&port->port); + port->port.ops = &noz_tty_port_ops; tty_dev = tty_register_device(ntty_driver, dc->index_start + i, &pdev->dev); @@ -1600,67 +1600,74 @@ static void set_dtr(const struct tty_struct *tty, int dtr) * ---------------------------------------------------------------------------- */ -/* Called when the userspace process opens the tty, /dev/noz*. */ -static int ntty_open(struct tty_struct *tty, struct file *file) +static int ntty_install(struct tty_driver *driver, struct tty_struct *tty) { struct port *port = get_port_by_tty(tty); struct nozomi *dc = get_dc_by_tty(tty); - unsigned long flags; - + int ret; if (!port || !dc || dc->state != NOZOMI_STATE_READY) return -ENODEV; - - if (mutex_lock_interruptible(&port->tty_sem)) - return -ERESTARTSYS; - - port->port.count++; - dc->open_ttys++; - - /* Enable interrupt downlink for channel */ - if (port->port.count == 1) { - tty->driver_data = port; - tty_port_tty_set(&port->port, tty); - DBG1("open: %d", port->token_dl); - spin_lock_irqsave(&dc->spin_mutex, flags); - dc->last_ier = dc->last_ier | port->token_dl; - writew(dc->last_ier, dc->reg_ier); - spin_unlock_irqrestore(&dc->spin_mutex, flags); + ret = tty_init_termios(tty); + if (ret == 0) { + tty_driver_kref_get(driver); + driver->ttys[tty->index] = tty; } - mutex_unlock(&port->tty_sem); - return 0; + return ret; } -/* Called when the userspace process close the tty, /dev/noz*. Also - called immediately if ntty_open fails in which case tty->driver_data - will be NULL an we exit by the first return */ +static void ntty_cleanup(struct tty_struct *tty) +{ + tty->driver_data = NULL; +} -static void ntty_close(struct tty_struct *tty, struct file *file) +static int ntty_activate(struct tty_port *tport, struct tty_struct *tty) { - struct nozomi *dc = get_dc_by_tty(tty); - struct port *nport = tty->driver_data; - struct tty_port *port = &nport->port; + struct port *port = container_of(tport, struct port, port); + struct nozomi *dc = port->dc; unsigned long flags; - if (!dc || !nport) - return; + DBG1("open: %d", port->token_dl); + spin_lock_irqsave(&dc->spin_mutex, flags); + dc->last_ier = dc->last_ier | port->token_dl; + writew(dc->last_ier, dc->reg_ier); + dc->open_ttys++; + spin_unlock_irqrestore(&dc->spin_mutex, flags); + printk("noz: activated %d: %p\n", tty->index, tport); + return 0; +} - /* Users cannot interrupt a close */ - mutex_lock(&nport->tty_sem); +static int ntty_open(struct tty_struct *tty, struct file *filp) +{ + struct port *port = get_port_by_tty(tty); + return tty_port_open(&port->port, tty, filp); +} - WARN_ON(!port->count); +static void ntty_shutdown(struct tty_port *tport) +{ + struct port *port = container_of(tport, struct port, port); + struct nozomi *dc = port->dc; + unsigned long flags; + DBG1("close: %d", port->token_dl); + spin_lock_irqsave(&dc->spin_mutex, flags); + dc->last_ier &= ~(port->token_dl); + writew(dc->last_ier, dc->reg_ier); dc->open_ttys--; - port->count--; + spin_unlock_irqrestore(&dc->spin_mutex, flags); + printk("noz: shutdown %p\n", tport); +} - if (port->count == 0) { - DBG1("close: %d", nport->token_dl); - tty_port_tty_set(port, NULL); - spin_lock_irqsave(&dc->spin_mutex, flags); - dc->last_ier &= ~(nport->token_dl); - writew(dc->last_ier, dc->reg_ier); - spin_unlock_irqrestore(&dc->spin_mutex, flags); - } - mutex_unlock(&nport->tty_sem); +static void ntty_close(struct tty_struct *tty, struct file *filp) +{ + struct port *port = tty->driver_data; + if (port) + tty_port_close(&port->port, tty, filp); +} + +static void ntty_hangup(struct tty_struct *tty) +{ + struct port *port = tty->driver_data; + tty_port_hangup(&port->port); } /* @@ -1680,15 +1687,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer, if (!dc || !port) return -ENODEV; - if (unlikely(!mutex_trylock(&port->tty_sem))) { - /* - * must test lock as tty layer wraps calls - * to this function with BKL - */ - dev_err(&dc->pdev->dev, "Would have deadlocked - " - "return EAGAIN\n"); - return -EAGAIN; - } + mutex_lock(&port->tty_sem); if (unlikely(!port->port.count)) { DBG1(" "); @@ -1728,25 +1727,23 @@ exit: * This method is called by the upper tty layer. * #according to sources N_TTY.c it expects a value >= 0 and * does not check for negative values. + * + * If the port is unplugged report lots of room and let the bits + * dribble away so we don't block anything. */ static int ntty_write_room(struct tty_struct *tty) { struct port *port = tty->driver_data; - int room = 0; + int room = 4096; const struct nozomi *dc = get_dc_by_tty(tty); - if (!dc || !port) - return 0; - if (!mutex_trylock(&port->tty_sem)) - return 0; - - if (!port->port.count) - goto exit; - - room = port->fifo_ul.size - kfifo_len(&port->fifo_ul); - -exit: - mutex_unlock(&port->tty_sem); + if (dc) { + mutex_lock(&port->tty_sem); + if (port->port.count) + room = port->fifo_ul.size - + kfifo_len(&port->fifo_ul); + mutex_unlock(&port->tty_sem); + } return room; } @@ -1906,10 +1903,16 @@ exit_in_buffer: return rval; } +static const struct tty_port_operations noz_tty_port_ops = { + .activate = ntty_activate, + .shutdown = ntty_shutdown, +}; + static const struct tty_operations tty_ops = { .ioctl = ntty_ioctl, .open = ntty_open, .close = ntty_close, + .hangup = ntty_hangup, .write = ntty_write, .write_room = ntty_write_room, .unthrottle = ntty_unthrottle, @@ -1917,6 +1920,8 @@ static const struct tty_operations tty_ops = { .chars_in_buffer = ntty_chars_in_buffer, .tiocmget = ntty_tiocmget, .tiocmset = ntty_tiocmset, + .install = ntty_install, + .cleanup = ntty_cleanup, }; /* Module initialization */ diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index 452370af95d..986aa606a6b 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c @@ -658,8 +658,7 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id) info->mon.char_max = char_count; info->mon.char_last = char_count; #endif - len = tty_buffer_request_room(tty, char_count); - while (len--) { + while (char_count--) { data = base_addr[CyRDR]; tty_insert_flip_char(tty, data, TTY_NORMAL); #ifdef CYCLOM_16Y_HACK diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index 268e17f9ec3..07ac14d949c 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c @@ -646,8 +646,6 @@ static void sx_receive(struct specialix_board *bp) dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count); port->hits[count > 8 ? 9 : count]++; - tty_buffer_request_room(tty, count); - while (count--) tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL); tty_flip_buffer_push(tty); diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index 4846b73ef28..0658fc54822 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c @@ -2031,7 +2031,7 @@ static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) return 0; - if (!tty || !info->xmit_buf) + if (!info->xmit_buf) return 0; spin_lock_irqsave(&info->irq_spinlock, flags); @@ -2121,7 +2121,7 @@ static int mgsl_write(struct tty_struct * tty, if (mgsl_paranoia_check(info, tty->name, "mgsl_write")) goto cleanup; - if (!tty || !info->xmit_buf) + if (!info->xmit_buf) goto cleanup; if ( info->params.mode == MGSL_MODE_HDLC || diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index 8678f0c8699..4561ce2fba6 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c @@ -468,7 +468,7 @@ static unsigned int free_tbuf_count(struct slgt_info *info); static unsigned int tbuf_bytes(struct slgt_info *info); static void reset_tbufs(struct slgt_info *info); static void tdma_reset(struct slgt_info *info); -static void tx_load(struct slgt_info *info, const char *buf, unsigned int count); +static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count); static void get_signals(struct slgt_info *info); static void set_signals(struct slgt_info *info); @@ -813,59 +813,32 @@ static int write(struct tty_struct *tty, int ret = 0; struct slgt_info *info = tty->driver_data; unsigned long flags; - unsigned int bufs_needed; if (sanity_check(info, tty->name, "write")) - goto cleanup; + return -EIO; + DBGINFO(("%s write count=%d\n", info->device_name, count)); - if (!info->tx_buf) - goto cleanup; + if (!info->tx_buf || (count > info->max_frame_size)) + return -EIO; - if (count > info->max_frame_size) { - ret = -EIO; - goto cleanup; - } + if (!count || tty->stopped || tty->hw_stopped) + return 0; - if (!count) - goto cleanup; + spin_lock_irqsave(&info->lock, flags); - if (!info->tx_active && info->tx_count) { + if (info->tx_count) { /* send accumulated data from send_char() */ - tx_load(info, info->tx_buf, info->tx_count); - goto start; + if (!tx_load(info, info->tx_buf, info->tx_count)) + goto cleanup; + info->tx_count = 0; } - bufs_needed = (count/DMABUFSIZE); - if (count % DMABUFSIZE) - ++bufs_needed; - if (bufs_needed > free_tbuf_count(info)) - goto cleanup; - ret = info->tx_count = count; - tx_load(info, buf, count); - goto start; - -start: - if (info->tx_count && !tty->stopped && !tty->hw_stopped) { - spin_lock_irqsave(&info->lock,flags); - if (!info->tx_active) - tx_start(info); - else if (!(rd_reg32(info, TDCSR) & BIT0)) { - /* transmit still active but transmit DMA stopped */ - unsigned int i = info->tbuf_current; - if (!i) - i = info->tbuf_count; - i--; - /* if DMA buf unsent must try later after tx idle */ - if (desc_count(info->tbufs[i])) - ret = 0; - } - if (ret > 0) - update_tx_timer(info); - spin_unlock_irqrestore(&info->lock,flags); - } + if (tx_load(info, buf, count)) + ret = count; cleanup: + spin_unlock_irqrestore(&info->lock, flags); DBGINFO(("%s write rc=%d\n", info->device_name, ret)); return ret; } @@ -882,7 +855,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch) if (!info->tx_buf) return 0; spin_lock_irqsave(&info->lock,flags); - if (!info->tx_active && (info->tx_count < info->max_frame_size)) { + if (info->tx_count < info->max_frame_size) { info->tx_buf[info->tx_count++] = ch; ret = 1; } @@ -981,10 +954,8 @@ static void flush_chars(struct tty_struct *tty) DBGINFO(("%s flush_chars start transmit\n", info->device_name)); spin_lock_irqsave(&info->lock,flags); - if (!info->tx_active && info->tx_count) { - tx_load(info, info->tx_buf,info->tx_count); - tx_start(info); - } + if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count)) + info->tx_count = 0; spin_unlock_irqrestore(&info->lock,flags); } @@ -997,10 +968,9 @@ static void flush_buffer(struct tty_struct *tty) return; DBGINFO(("%s flush_buffer\n", info->device_name)); - spin_lock_irqsave(&info->lock,flags); - if (!info->tx_active) - info->tx_count = 0; - spin_unlock_irqrestore(&info->lock,flags); + spin_lock_irqsave(&info->lock, flags); + info->tx_count = 0; + spin_unlock_irqrestore(&info->lock, flags); tty_wakeup(tty); } @@ -1033,12 +1003,10 @@ static void tx_release(struct tty_struct *tty) if (sanity_check(info, tty->name, "tx_release")) return; DBGINFO(("%s tx_release\n", info->device_name)); - spin_lock_irqsave(&info->lock,flags); - if (!info->tx_active && info->tx_count) { - tx_load(info, info->tx_buf, info->tx_count); - tx_start(info); - } - spin_unlock_irqrestore(&info->lock,flags); + spin_lock_irqsave(&info->lock, flags); + if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count)) + info->tx_count = 0; + spin_unlock_irqrestore(&info->lock, flags); } /* @@ -1506,27 +1474,25 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, DBGINFO(("%s hdlc_xmit\n", dev->name)); + if (!skb->len) + return NETDEV_TX_OK; + /* stop sending until this frame completes */ netif_stop_queue(dev); - /* copy data to device buffers */ - info->tx_count = skb->len; - tx_load(info, skb->data, skb->len); - /* update network statistics */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - /* done with socket buffer, so free it */ - dev_kfree_skb(skb); - /* save start time for transmit timeout detection */ dev->trans_start = jiffies; - spin_lock_irqsave(&info->lock,flags); - tx_start(info); - update_tx_timer(info); - spin_unlock_irqrestore(&info->lock,flags); + spin_lock_irqsave(&info->lock, flags); + tx_load(info, skb->data, skb->len); + spin_unlock_irqrestore(&info->lock, flags); + + /* done with socket buffer, so free it */ + dev_kfree_skb(skb); return NETDEV_TX_OK; } @@ -2180,7 +2146,7 @@ static void isr_serial(struct slgt_info *info) if (info->params.mode == MGSL_MODE_ASYNC) { if (status & IRQ_TXIDLE) { - if (info->tx_count) + if (info->tx_active) isr_txeom(info, status); } if (info->rx_pio && (status & IRQ_RXDATA)) @@ -2276,13 +2242,42 @@ static void isr_tdma(struct slgt_info *info) } } +/* + * return true if there are unsent tx DMA buffers, otherwise false + * + * if there are unsent buffers then info->tbuf_start + * is set to index of first unsent buffer + */ +static bool unsent_tbufs(struct slgt_info *info) +{ + unsigned int i = info->tbuf_current; + bool rc = false; + + /* + * search backwards from last loaded buffer (precedes tbuf_current) + * for first unsent buffer (desc_count > 0) + */ + + do { + if (i) + i--; + else + i = info->tbuf_count - 1; + if (!desc_count(info->tbufs[i])) + break; + info->tbuf_start = i; + rc = true; + } while (i != info->tbuf_current); + + return rc; +} + static void isr_txeom(struct slgt_info *info, unsigned short status) { DBGISR(("%s txeom status=%04x\n", info->device_name, status)); slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER); tdma_reset(info); - reset_tbufs(info); if (status & IRQ_TXUNDER) { unsigned short val = rd_reg16(info, TCR); wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */ @@ -2297,8 +2292,12 @@ static void isr_txeom(struct slgt_info *info, unsigned short status) info->icount.txok++; } + if (unsent_tbufs(info)) { + tx_start(info); + update_tx_timer(info); + return; + } info->tx_active = false; - info->tx_count = 0; del_timer(&info->tx_timer); @@ -3949,7 +3948,7 @@ static void tx_start(struct slgt_info *info) info->tx_enabled = true; } - if (info->tx_count) { + if (desc_count(info->tbufs[info->tbuf_start])) { info->drop_rts_on_tx_done = false; if (info->params.mode != MGSL_MODE_ASYNC) { @@ -4772,25 +4771,36 @@ static unsigned int tbuf_bytes(struct slgt_info *info) } /* - * load transmit DMA buffer(s) with data + * load data into transmit DMA buffer ring and start transmitter if needed + * return true if data accepted, otherwise false (buffers full) */ -static void tx_load(struct slgt_info *info, const char *buf, unsigned int size) +static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size) { unsigned short count; unsigned int i; struct slgt_desc *d; - if (size == 0) - return; + /* check required buffer space */ + if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info)) + return false; DBGDATA(info, buf, size, "tx"); + /* + * copy data to one or more DMA buffers in circular ring + * tbuf_start = first buffer for this data + * tbuf_current = next free buffer + * + * Copy all data before making data visible to DMA controller by + * setting descriptor count of the first buffer. + * This prevents an active DMA controller from reading the first DMA + * buffers of a frame and stopping before the final buffers are filled. + */ + info->tbuf_start = i = info->tbuf_current; while (size) { d = &info->tbufs[i]; - if (++i == info->tbuf_count) - i = 0; count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size); memcpy(d->buf, buf, count); @@ -4808,11 +4818,27 @@ static void tx_load(struct slgt_info *info, const char *buf, unsigned int size) else set_desc_eof(*d, 0); - set_desc_count(*d, count); + /* set descriptor count for all but first buffer */ + if (i != info->tbuf_start) + set_desc_count(*d, count); d->buf_count = count; + + if (++i == info->tbuf_count) + i = 0; } info->tbuf_current = i; + + /* set first buffer count to make new data visible to DMA controller */ + d = &info->tbufs[info->tbuf_start]; + set_desc_count(*d, d->buf_count); + + /* start transmitter if needed and update transmit timeout */ + if (!info->tx_active) + tx_start(info); + update_tx_timer(info); + + return true; } static int register_test(struct slgt_info *info) @@ -4934,9 +4960,7 @@ static int loopback_test(struct slgt_info *info) spin_lock_irqsave(&info->lock,flags); async_mode(info); rx_start(info); - info->tx_count = count; tx_load(info, buf, count); - tx_start(info); spin_unlock_irqrestore(&info->lock, flags); /* wait for receive complete */ diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c index 66fa4e10d76..af8d9771572 100644 --- a/drivers/char/tty_buffer.c +++ b/drivers/char/tty_buffer.c @@ -231,9 +231,10 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size) EXPORT_SYMBOL_GPL(tty_buffer_request_room); /** - * tty_insert_flip_string - Add characters to the tty buffer + * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer * @tty: tty structure * @chars: characters + * @flag: flag value for each character * @size: size * * Queue a series of bytes to the tty buffering. All the characters @@ -242,18 +243,19 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room); * Locking: Called functions may take tty->buf.lock */ -int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, - size_t size) +int tty_insert_flip_string_fixed_flag(struct tty_struct *tty, + const unsigned char *chars, char flag, size_t size) { int copied = 0; do { - int space = tty_buffer_request_room(tty, size - copied); + int goal = min(size - copied, TTY_BUFFER_PAGE); + int space = tty_buffer_request_room(tty, goal); struct tty_buffer *tb = tty->buf.tail; /* If there is no space then tb may be NULL */ if (unlikely(space == 0)) break; memcpy(tb->char_buf_ptr + tb->used, chars, space); - memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); + memset(tb->flag_buf_ptr + tb->used, flag, space); tb->used += space; copied += space; chars += space; @@ -262,7 +264,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, } while (unlikely(size > copied)); return copied; } -EXPORT_SYMBOL(tty_insert_flip_string); +EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); /** * tty_insert_flip_string_flags - Add characters to the tty buffer @@ -283,7 +285,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty, { int copied = 0; do { - int space = tty_buffer_request_room(tty, size - copied); + int goal = min(size - copied, TTY_BUFFER_PAGE); + int space = tty_buffer_request_room(tty, goal); struct tty_buffer *tb = tty->buf.tail; /* If there is no space then tb may be NULL */ if (unlikely(space == 0)) diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c index 3f653f7d849..500e740ec5e 100644 --- a/drivers/char/tty_ldisc.c +++ b/drivers/char/tty_ldisc.c @@ -706,12 +706,13 @@ static void tty_reset_termios(struct tty_struct *tty) /** * tty_ldisc_reinit - reinitialise the tty ldisc * @tty: tty to reinit + * @ldisc: line discipline to reinitialize * - * Switch the tty back to N_TTY line discipline and leave the - * ldisc state closed + * Switch the tty to a line discipline and leave the ldisc + * state closed */ -static void tty_ldisc_reinit(struct tty_struct *tty) +static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc) { struct tty_ldisc *ld; @@ -721,10 +722,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty) /* * Switch the line discipline back */ - ld = tty_ldisc_get(N_TTY); + ld = tty_ldisc_get(ldisc); BUG_ON(IS_ERR(ld)); tty_ldisc_assign(tty, ld); - tty_set_termios_ldisc(tty, N_TTY); + tty_set_termios_ldisc(tty, ldisc); } /** @@ -745,6 +746,8 @@ static void tty_ldisc_reinit(struct tty_struct *tty) void tty_ldisc_hangup(struct tty_struct *tty) { struct tty_ldisc *ld; + int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS; + int err = 0; /* * FIXME! What are the locking issues here? This may me overdoing @@ -772,25 +775,32 @@ void tty_ldisc_hangup(struct tty_struct *tty) wake_up_interruptible_poll(&tty->read_wait, POLLIN); /* * Shutdown the current line discipline, and reset it to - * N_TTY. + * N_TTY if need be. + * + * Avoid racing set_ldisc or tty_ldisc_release */ - if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { - /* Avoid racing set_ldisc or tty_ldisc_release */ - mutex_lock(&tty->ldisc_mutex); - tty_ldisc_halt(tty); - if (tty->ldisc) { /* Not yet closed */ - /* Switch back to N_TTY */ - tty_ldisc_reinit(tty); - /* At this point we have a closed ldisc and we want to - reopen it. We could defer this to the next open but - it means auditing a lot of other paths so this is - a FIXME */ + mutex_lock(&tty->ldisc_mutex); + tty_ldisc_halt(tty); + /* At this point we have a closed ldisc and we want to + reopen it. We could defer this to the next open but + it means auditing a lot of other paths so this is + a FIXME */ + if (tty->ldisc) { /* Not yet closed */ + if (reset == 0) { + tty_ldisc_reinit(tty, tty->termios->c_line); + err = tty_ldisc_open(tty, tty->ldisc); + } + /* If the re-open fails or we reset then go to N_TTY. The + N_TTY open cannot fail */ + if (reset || err) { + tty_ldisc_reinit(tty, N_TTY); WARN_ON(tty_ldisc_open(tty, tty->ldisc)); - tty_ldisc_enable(tty); } - mutex_unlock(&tty->ldisc_mutex); - tty_reset_termios(tty); + tty_ldisc_enable(tty); } + mutex_unlock(&tty->ldisc_mutex); + if (reset) + tty_reset_termios(tty); } /** diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 6aa10284104..87778dcf872 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, ret = -EFAULT; goto out; } - if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { + if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) { ret = -EINVAL; goto out; } @@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc) * telling it that it has acquired. Also check if it has died and * clean up (similar to logic employed in change_console()) */ - if (vc->vt_mode.mode == VT_PROCESS) { + if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else @@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc) * vt to auto control. */ vc = vc_cons[fg_console].d; - if (vc->vt_mode.mode == VT_PROCESS) { + if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) { /* * Send the signal as privileged - kill_pid() will * tell us if the process has gone or something else @@ -1693,27 +1693,28 @@ void change_console(struct vc_data *new_vc) */ vc->vt_newvt = new_vc->vc_num; if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) { + if(vc->vt_mode.mode == VT_PROCESS) + /* + * It worked. Mark the vt to switch to and + * return. The process needs to send us a + * VT_RELDISP ioctl to complete the switch. + */ + return; + } else { /* - * It worked. Mark the vt to switch to and - * return. The process needs to send us a - * VT_RELDISP ioctl to complete the switch. + * The controlling process has died, so we revert back to + * normal operation. In this case, we'll also change back + * to KD_TEXT mode. I'm not sure if this is strictly correct + * but it saves the agony when the X server dies and the screen + * remains blanked due to KD_GRAPHICS! It would be nice to do + * this outside of VT_PROCESS but there is no single process + * to account for and tracking tty count may be undesirable. */ - return; + reset_vc(vc); } /* - * The controlling process has died, so we revert back to - * normal operation. In this case, we'll also change back - * to KD_TEXT mode. I'm not sure if this is strictly correct - * but it saves the agony when the X server dies and the screen - * remains blanked due to KD_GRAPHICS! It would be nice to do - * this outside of VT_PROCESS but there is no single process - * to account for and tracking tty count may be undesirable. - */ - reset_vc(vc); - - /* - * Fall through to normal (VT_AUTO) handling of the switch... + * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch... */ } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index e02d74b1e89..c27f80e5d53 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -13,6 +13,22 @@ menuconfig DMADEVICES DMA Device drivers supported by the configured arch, it may be empty in some cases. +config DMADEVICES_DEBUG + bool "DMA Engine debugging" + depends on DMADEVICES != n + help + This is an option for use by developers; most people should + say N here. This enables DMA engine core and driver debugging. + +config DMADEVICES_VDEBUG + bool "DMA Engine verbose debugging" + depends on DMADEVICES_DEBUG != n + help + This is an option for use by developers; most people should + say N here. This enables deeper (more verbose) debugging of + the DMA engine core and drivers. + + if DMADEVICES comment "DMA Devices" @@ -69,6 +85,13 @@ config FSL_DMA The Elo is the DMA controller on some 82xx and 83xx parts, and the Elo Plus is the DMA controller on 85xx and 86xx parts. +config MPC512X_DMA + tristate "Freescale MPC512x built-in DMA engine support" + depends on PPC_MPC512x + select DMA_ENGINE + ---help--- + Enable support for the Freescale MPC512x built-in DMA engine. + config MV_XOR bool "Marvell XOR engine support" depends on PLAT_ORION diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 807053d4823..22bba3d5e2b 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,9 +1,17 @@ +ifeq ($(CONFIG_DMADEVICES_DEBUG),y) + EXTRA_CFLAGS += -DDEBUG +endif +ifeq ($(CONFIG_DMADEVICES_VDEBUG),y) + EXTRA_CFLAGS += -DVERBOSE_DEBUG +endif + obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o +obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 64a937262a4..1656fdcdb6c 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -39,7 +39,6 @@ struct coh901318_desc { unsigned int sg_len; struct coh901318_lli *data; enum dma_data_direction dir; - int pending_irqs; unsigned long flags; }; @@ -72,7 +71,6 @@ struct coh901318_chan { unsigned long nbr_active_done; unsigned long busy; - int pending_irqs; struct coh901318_base *base; }; @@ -80,18 +78,16 @@ struct coh901318_chan { static void coh901318_list_print(struct coh901318_chan *cohc, struct coh901318_lli *lli) { - struct coh901318_lli *l; - dma_addr_t addr = virt_to_phys(lli); + struct coh901318_lli *l = lli; int i = 0; - while (addr) { - l = phys_to_virt(addr); + while (l) { dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" - ", dst 0x%x, link 0x%x link_virt 0x%p\n", + ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", i, l, l->control, l->src_addr, l->dst_addr, - l->link_addr, phys_to_virt(l->link_addr)); + l->link_addr, l->virt_link_addr); i++; - addr = l->link_addr; + l = l->virt_link_addr; } } @@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, goto err_kmalloc; tmp = dev_buf; - tmp += sprintf(tmp, "DMA -- enable dma channels\n"); + tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) if (started_channels & (1 << i)) @@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc) * TODO: alloc a pile of descs instead of just one, * avoid many small allocations. */ - desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); + desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); if (desc == NULL) goto out; INIT_LIST_HEAD(&desc->node); + dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); } else { /* Reuse an old desc. */ desc = list_first_entry(&cohc->free, struct coh901318_desc, node); list_del(&desc->node); + /* Initialize it a bit so it's not insane */ + desc->sg = NULL; + desc->sg_len = 0; + desc->desc.callback = NULL; + desc->desc.callback_param = NULL; } out: @@ -364,10 +366,6 @@ static void coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) { list_add_tail(&desc->node, &cohc->active); - - BUG_ON(cohc->pending_irqs != 0); - - cohc->pending_irqs = desc->pending_irqs; } static struct coh901318_desc * @@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) return cohd_que; } +/* + * This tasklet is called from the interrupt handler to + * handle each descriptor (DMA job) that is sent to a channel. + */ static void dma_tasklet(unsigned long data) { struct coh901318_chan *cohc = (struct coh901318_chan *) data; @@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data) dma_async_tx_callback callback; void *callback_param; + dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" + " nbr_active_done %ld\n", __func__, + cohc->id, cohc->nbr_active_done); + spin_lock_irqsave(&cohc->lock, flags); - /* get first active entry from list */ + /* get first active descriptor entry from list */ cohd_fin = coh901318_first_active_get(cohc); - BUG_ON(cohd_fin->pending_irqs == 0); - if (cohd_fin == NULL) goto err; - cohd_fin->pending_irqs--; - cohc->completed = cohd_fin->desc.cookie; + /* locate callback to client */ + callback = cohd_fin->desc.callback; + callback_param = cohd_fin->desc.callback_param; - if (cohc->nbr_active_done == 0) - return; + /* sign this job as completed on the channel */ + cohc->completed = cohd_fin->desc.cookie; - if (!cohd_fin->pending_irqs) { - /* release the lli allocation*/ - coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); - } + /* release the lli allocation and remove the descriptor */ + coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); - dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d" - " nbr_active_done %ld\n", __func__, - cohc->id, cohc->pending_irqs, cohc->nbr_active_done); + /* return desc to free-list */ + coh901318_desc_remove(cohd_fin); + coh901318_desc_free(cohc, cohd_fin); - /* callback to client */ - callback = cohd_fin->desc.callback; - callback_param = cohd_fin->desc.callback_param; - - if (!cohd_fin->pending_irqs) { - coh901318_desc_remove(cohd_fin); + spin_unlock_irqrestore(&cohc->lock, flags); - /* return desc to free-list */ - coh901318_desc_free(cohc, cohd_fin); - } + /* Call the callback when we're done */ + if (callback) + callback(callback_param); - if (cohc->nbr_active_done) - cohc->nbr_active_done--; + spin_lock_irqsave(&cohc->lock, flags); + /* + * If another interrupt fired while the tasklet was scheduling, + * we don't get called twice, so we have this number of active + * counter that keep track of the number of IRQs expected to + * be handled for this channel. If there happen to be more than + * one IRQ to be ack:ed, we simply schedule this tasklet again. + */ + cohc->nbr_active_done--; if (cohc->nbr_active_done) { + dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " + "came in while we were scheduling this tasklet\n"); if (cohc_chan_conf(cohc)->priority_high) tasklet_hi_schedule(&cohc->tasklet); else tasklet_schedule(&cohc->tasklet); } - spin_unlock_irqrestore(&cohc->lock, flags); - if (callback) - callback(callback_param); + spin_unlock_irqrestore(&cohc->lock, flags); return; @@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc) if (!cohc->allocated) return; - BUG_ON(cohc->pending_irqs == 0); + spin_lock(&cohc->lock); - cohc->pending_irqs--; cohc->nbr_active_done++; - if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL) + if (coh901318_queue_start(cohc) == NULL) cohc->busy = 0; BUG_ON(list_empty(&cohc->active)); + spin_unlock(&cohc->lock); + if (cohc_chan_conf(cohc)->priority_high) tasklet_hi_schedule(&cohc->tasklet); else @@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, struct coh901318_chan *cohc = to_coh901318_chan(chan); int lli_len; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; + int ret; spin_lock_irqsave(&cohc->lock, flg); @@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, if (data == NULL) goto err; - cohd = coh901318_desc_get(cohc); - cohd->sg = NULL; - cohd->sg_len = 0; - cohd->data = data; - - cohd->pending_irqs = - coh901318_lli_fill_memcpy( - &cohc->base->pool, data, src, size, dest, - cohc_chan_param(cohc)->ctrl_lli_chained, - ctrl_last); - cohd->flags = flags; + ret = coh901318_lli_fill_memcpy( + &cohc->base->pool, data, src, size, dest, + cohc_chan_param(cohc)->ctrl_lli_chained, + ctrl_last); + if (ret) + goto err; COH_DBG(coh901318_list_print(cohc, data)); - dma_async_tx_descriptor_init(&cohd->desc, chan); - + /* Pick a descriptor to handle this transfer */ + cohd = coh901318_desc_get(cohc); + cohd->data = data; + cohd->flags = flags; cohd->desc.tx_submit = coh901318_tx_submit; spin_unlock_irqrestore(&cohc->lock, flg); @@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_lli *data; struct coh901318_desc *cohd; + const struct coh901318_params *params; struct scatterlist *sg; int len = 0; int size; @@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; + u32 config; unsigned long flg; + int ret; if (!sgl) goto out; @@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Trigger interrupt after last lli */ ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; - cohd = coh901318_desc_get(cohc); - cohd->sg = NULL; - cohd->sg_len = 0; - cohd->dir = direction; + params = cohc_chan_param(cohc); + config = params->config; if (direction == DMA_TO_DEVICE) { u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; + config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; ctrl_chained |= tx_flags; ctrl_last |= tx_flags; ctrl |= tx_flags; @@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; + config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; ctrl_chained |= rx_flags; ctrl_last |= rx_flags; ctrl |= rx_flags; } else goto err_direction; - dma_async_tx_descriptor_init(&cohd->desc, chan); - - cohd->desc.tx_submit = coh901318_tx_submit; - + coh901318_set_conf(cohc, config); /* The dma only supports transmitting packages up to * MAX_DMA_PACKET_SIZE. Calculate to total number of @@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, len += factor; } + pr_debug("Allocate %d lli:s for this transfer\n", len); data = coh901318_lli_alloc(&cohc->base->pool, len); if (data == NULL) goto err_dma_alloc; /* initiate allocated data list */ - cohd->pending_irqs = - coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, - cohc_dev_addr(cohc), - ctrl_chained, - ctrl, - ctrl_last, - direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); - cohd->data = data; - - cohd->flags = flags; + ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, + cohc_dev_addr(cohc), + ctrl_chained, + ctrl, + ctrl_last, + direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); + if (ret) + goto err_lli_fill; COH_DBG(coh901318_list_print(cohc, data)); + /* Pick a descriptor to handle this transfer */ + cohd = coh901318_desc_get(cohc); + cohd->dir = direction; + cohd->flags = flags; + cohd->desc.tx_submit = coh901318_tx_submit; + cohd->data = data; + spin_unlock_irqrestore(&cohc->lock, flg); return &cohd->desc; + err_lli_fill: err_dma_alloc: err_direction: - coh901318_desc_remove(cohd); - coh901318_desc_free(cohc, cohd); spin_unlock_irqrestore(&cohc->lock, flg); out: return NULL; @@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan) /* release the lli allocation*/ coh901318_lli_free(&cohc->base->pool, &cohd->data); - coh901318_desc_remove(cohd); - /* return desc to free-list */ + coh901318_desc_remove(cohd); coh901318_desc_free(cohc, cohd); } @@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan) /* release the lli allocation*/ coh901318_lli_free(&cohc->base->pool, &cohd->data); - coh901318_desc_remove(cohd); - /* return desc to free-list */ + coh901318_desc_remove(cohd); coh901318_desc_free(cohc, cohd); } cohc->nbr_active_done = 0; cohc->busy = 0; - cohc->pending_irqs = 0; spin_unlock_irqrestore(&cohc->lock, flags); } @@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans, spin_lock_init(&cohc->lock); - cohc->pending_irqs = 0; cohc->nbr_active_done = 0; cohc->busy = 0; INIT_LIST_HEAD(&cohc->free); @@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev) base->dma_memcpy.device_issue_pending = coh901318_issue_pending; base->dma_memcpy.device_terminate_all = coh901318_terminate_all; base->dma_memcpy.dev = &pdev->dev; + /* + * This controller can only access address at even 32bit boundaries, + * i.e. 2^2 + */ + base->dma_memcpy.copy_align = 2; err = dma_async_device_register(&base->dma_memcpy); if (err) goto err_register_memcpy; - dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", + dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", (u32) base->virtbase); return err; diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c index f5120f238a4..71d58c1a1e8 100644 --- a/drivers/dma/coh901318_lli.c +++ b/drivers/dma/coh901318_lli.c @@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) lli = head; lli->phy_this = phy; + lli->link_addr = 0x00000000; + lli->virt_link_addr = 0x00000000U; for (i = 1; i < len; i++) { lli_prev = lli; @@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) DEBUGFS_POOL_COUNTER_ADD(pool, 1); lli->phy_this = phy; + lli->link_addr = 0x00000000; + lli->virt_link_addr = 0x00000000U; lli_prev->link_addr = phy; lli_prev->virt_link_addr = lli; } - lli->link_addr = 0x00000000U; - spin_unlock(&pool->lock); return head; @@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool, lli->src_addr = src; lli->dst_addr = dst; - /* One irq per single transfer */ - return 1; + return 0; } int @@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, lli->src_addr = src; lli->dst_addr = dst; - /* One irq per single transfer */ - return 1; + return 0; } int @@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, u32 ctrl_sg; dma_addr_t src = 0; dma_addr_t dst = 0; - int nbr_of_irq = 0; u32 bytes_to_transfer; u32 elem_size; @@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ctrl_sg = ctrl ? ctrl : ctrl_last; - if ((ctrl_sg & ctrl_irq_mask)) - nbr_of_irq++; - if (dir == DMA_TO_DEVICE) /* increment source address */ - src = sg_dma_address(sg); + src = sg_phys(sg); else /* increment destination address */ - dst = sg_dma_address(sg); + dst = sg_phys(sg); bytes_to_transfer = sg_dma_len(sg); @@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, } spin_unlock(&pool->lock); - /* There can be many IRQs per sg transfer */ - return nbr_of_irq; + return 0; err: spin_unlock(&pool->lock); return -EINVAL; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index e7a3230fb7d..87399cafce3 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -284,7 +284,7 @@ struct dma_chan_tbl_ent { /** * channel_table - percpu lookup table for memory-to-memory offload providers */ -static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; static int __init dma_channel_table_init(void) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 948d563941c..6fa55fe3dd2 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -237,7 +237,7 @@ static int dmatest_func(void *data) dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; - u8 pq_coefs[pq_sources]; + u8 pq_coefs[pq_sources + 1]; int ret; int src_cnt; int dst_cnt; @@ -257,7 +257,7 @@ static int dmatest_func(void *data) } else if (thread->type == DMA_PQ) { src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ dst_cnt = 2; - for (i = 0; i < pq_sources; i++) + for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; } else goto err_srcs; @@ -347,7 +347,7 @@ static int dmatest_func(void *data) else if (thread->type == DMA_XOR) tx = dev->device_prep_dma_xor(chan, dma_dsts[0] + dst_off, - dma_srcs, xor_sources, + dma_srcs, src_cnt, len, flags); else if (thread->type == DMA_PQ) { dma_addr_t dma_pq[dst_cnt]; @@ -355,7 +355,7 @@ static int dmatest_func(void *data) for (i = 0; i < dst_cnt; i++) dma_pq[i] = dma_dsts[i] + dst_off; tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, - pq_sources, pq_coefs, + src_cnt, pq_coefs, len, flags); } diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 296f9e747fa..bbb4be5a3ff 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -37,19 +37,19 @@ #include <asm/fsldma.h> #include "fsldma.h" -static void dma_init(struct fsl_dma_chan *fsl_chan) +static void dma_init(struct fsldma_chan *chan) { /* Reset the channel */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); + DMA_OUT(chan, &chan->regs->mr, 0, 32); - switch (fsl_chan->feature & FSL_DMA_IP_MASK) { + switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: /* Set the channel to below modes: * EIE - Error interrupt enable * EOSIE - End of segments interrupt enable (basic mode) * EOLNIE - End of links interrupt enable */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); break; case FSL_DMA_IP_83XX: @@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) * EOTIE - End-of-transfer interrupt enable * PRC_RM - PCI read multiple */ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | FSL_DMA_MR_PRC_RM, 32); break; } - } -static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) +static void set_sr(struct fsldma_chan *chan, u32 val) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); + DMA_OUT(chan, &chan->regs->sr, val, 32); } -static u32 get_sr(struct fsl_dma_chan *fsl_chan) +static u32 get_sr(struct fsldma_chan *chan) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); + return DMA_IN(chan, &chan->regs->sr, 32); } -static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, +static void set_desc_cnt(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, u32 count) { - hw->count = CPU_TO_DMA(fsl_chan, count, 32); + hw->count = CPU_TO_DMA(chan, count, 32); } -static void set_desc_src(struct fsl_dma_chan *fsl_chan, +static void set_desc_src(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t src) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; - hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); + hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); } -static void set_desc_dest(struct fsl_dma_chan *fsl_chan, - struct fsl_dma_ld_hw *hw, dma_addr_t dest) +static void set_desc_dst(struct fsldma_chan *chan, + struct fsl_dma_ld_hw *hw, dma_addr_t dst) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; - hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); + hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); } -static void set_desc_next(struct fsl_dma_chan *fsl_chan, +static void set_desc_next(struct fsldma_chan *chan, struct fsl_dma_ld_hw *hw, dma_addr_t next) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; - hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); -} - -static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) -{ - DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); + hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); } -static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) +static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; + DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); } -static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) +static dma_addr_t get_cdar(struct fsldma_chan *chan) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); + return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; } -static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) +static dma_addr_t get_ndar(struct fsldma_chan *chan) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); + return DMA_IN(chan, &chan->regs->ndar, 64); } -static u32 get_bcr(struct fsl_dma_chan *fsl_chan) +static u32 get_bcr(struct fsldma_chan *chan) { - return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); + return DMA_IN(chan, &chan->regs->bcr, 32); } -static int dma_is_idle(struct fsl_dma_chan *fsl_chan) +static int dma_is_idle(struct fsldma_chan *chan) { - u32 sr = get_sr(fsl_chan); + u32 sr = get_sr(chan); return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); } -static void dma_start(struct fsl_dma_chan *fsl_chan) +static void dma_start(struct fsldma_chan *chan) { - u32 mr_set = 0; - - if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); - mr_set |= FSL_DMA_MR_EMP_EN; - } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) - & ~FSL_DMA_MR_EMP_EN, 32); + u32 mode; + + mode = DMA_IN(chan, &chan->regs->mr, 32); + + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { + if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { + DMA_OUT(chan, &chan->regs->bcr, 0, 32); + mode |= FSL_DMA_MR_EMP_EN; + } else { + mode &= ~FSL_DMA_MR_EMP_EN; + } } - if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) - mr_set |= FSL_DMA_MR_EMS_EN; + if (chan->feature & FSL_DMA_CHAN_START_EXT) + mode |= FSL_DMA_MR_EMS_EN; else - mr_set |= FSL_DMA_MR_CS; + mode |= FSL_DMA_MR_CS; - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) - | mr_set, 32); + DMA_OUT(chan, &chan->regs->mr, mode, 32); } -static void dma_halt(struct fsl_dma_chan *fsl_chan) +static void dma_halt(struct fsldma_chan *chan) { + u32 mode; int i; - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, - 32); - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS - | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); + mode = DMA_IN(chan, &chan->regs->mr, 32); + mode |= FSL_DMA_MR_CA; + DMA_OUT(chan, &chan->regs->mr, mode, 32); + + mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); + DMA_OUT(chan, &chan->regs->mr, mode, 32); for (i = 0; i < 100; i++) { - if (dma_is_idle(fsl_chan)) - break; + if (dma_is_idle(chan)) + return; + udelay(10); } - if (i >= 100 && !dma_is_idle(fsl_chan)) - dev_err(fsl_chan->dev, "DMA halt timeout!\n"); + + if (!dma_is_idle(chan)) + dev_err(chan->dev, "DMA halt timeout!\n"); } -static void set_ld_eol(struct fsl_dma_chan *fsl_chan, +static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) { u64 snoop_bits; - snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0; - desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, - DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL + desc->hw.next_ln_addr = CPU_TO_DMA(chan, + DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | snoop_bits, 64); } -static void append_ld_queue(struct fsl_dma_chan *fsl_chan, - struct fsl_desc_sw *new_desc) -{ - struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); - - if (list_empty(&fsl_chan->ld_queue)) - return; - - /* Link to the new descriptor physical address and - * Enable End-of-segment interrupt for - * the last link descriptor. - * (the previous node's next link descriptor) - * - * For FSL_DMA_IP_83xx, the snoop enable bit need be set. - */ - queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, - new_desc->async_tx.phys | FSL_DMA_EOSIE | - (((fsl_chan->feature & FSL_DMA_IP_MASK) - == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); -} - /** * fsl_chan_set_src_loop_size - Set source address hold transfer size - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set source address hold transfer size. The source @@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan, * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, * SA + 1 ... and so on. */ -static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) +static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) { + u32 mode; + + mode = DMA_IN(chan, &chan->regs->mr, 32); + switch (size) { case 0: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & - (~FSL_DMA_MR_SAHE), 32); + mode &= ~FSL_DMA_MR_SAHE; break; case 1: case 2: case 4: case 8: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | - FSL_DMA_MR_SAHE | (__ilog2(size) << 14), - 32); + mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); break; } + + DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** - * fsl_chan_set_dest_loop_size - Set destination address hold transfer size - * @fsl_chan : Freescale DMA channel + * fsl_chan_set_dst_loop_size - Set destination address hold transfer size + * @chan : Freescale DMA channel * @size : Address loop size, 0 for disable loop * * The set destination address hold transfer size. The destination @@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * TA + 1 ... and so on. */ -static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) +static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) { + u32 mode; + + mode = DMA_IN(chan, &chan->regs->mr, 32); + switch (size) { case 0: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & - (~FSL_DMA_MR_DAHE), 32); + mode &= ~FSL_DMA_MR_DAHE; break; case 1: case 2: case 4: case 8: - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | - FSL_DMA_MR_DAHE | (__ilog2(size) << 16), - 32); + mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); break; } + + DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_set_request_count - Set DMA Request Count for external control - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @size : Number of bytes to transfer in a single request * * The Freescale DMA channel can be controlled by the external signal DREQ#. @@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) * * A size of 0 disables external pause control. The maximum size is 1024. */ -static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) +static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) { + u32 mode; + BUG_ON(size > 1024); - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) - | ((__ilog2(size) << 24) & 0x0f000000), - 32); + + mode = DMA_IN(chan, &chan->regs->mr, 32); + mode |= (__ilog2(size) << 24) & 0x0f000000; + + DMA_OUT(chan, &chan->regs->mr, mode, 32); } /** * fsl_chan_toggle_ext_pause - Toggle channel external pause status - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * The Freescale DMA channel can be controlled by the external signal DREQ#. * The DMA Request Count feature should be used in addition to this feature * to set the number of bytes to transfer before pausing the channel. */ -static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) +static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) { if (enable) - fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; + chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; else - fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; + chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; } /** * fsl_chan_toggle_ext_start - Toggle channel external start status - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * @enable : 0 is disabled, 1 is enabled. * * If enable the external start, the channel can be started by an @@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) * transfer immediately. The DMA channel will wait for the * control pin asserted. */ -static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) +static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) { if (enable) - fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; + chan->feature |= FSL_DMA_CHAN_START_EXT; else - fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; + chan->feature &= ~FSL_DMA_CHAN_START_EXT; +} + +static void append_ld_queue(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); + + if (list_empty(&chan->ld_pending)) + goto out_splice; + + /* + * Add the hardware descriptor to the chain of hardware descriptors + * that already exists in memory. + * + * This will un-set the EOL bit of the existing transaction, and the + * last link in this transaction will become the EOL descriptor. + */ + set_desc_next(chan, &tail->hw, desc->async_tx.phys); + + /* + * Add the software descriptor and all children to the list + * of pending transactions + */ +out_splice: + list_splice_tail_init(&desc->tx_list, &chan->ld_pending); } static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); + struct fsldma_chan *chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; unsigned long flags; dma_cookie_t cookie; - /* cookie increment and adding to ld_queue must be atomic */ - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); - cookie = fsl_chan->common.cookie; + /* + * assign cookies to all of the software descriptors + * that make up this transaction + */ + cookie = chan->common.cookie; list_for_each_entry(child, &desc->tx_list, node) { cookie++; if (cookie < 0) cookie = 1; - desc->async_tx.cookie = cookie; + child->async_tx.cookie = cookie; } - fsl_chan->common.cookie = cookie; - append_ld_queue(fsl_chan, desc); - list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); + chan->common.cookie = cookie; + + /* put this transaction onto the tail of the pending queue */ + append_ld_queue(chan, desc); - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_unlock_irqrestore(&chan->desc_lock, flags); return cookie; } /** * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * * Return - The descriptor allocated. NULL for failed. */ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( - struct fsl_dma_chan *fsl_chan) + struct fsldma_chan *chan) { + struct fsl_desc_sw *desc; dma_addr_t pdesc; - struct fsl_desc_sw *desc_sw; - - desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); - if (desc_sw) { - memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); - INIT_LIST_HEAD(&desc_sw->tx_list); - dma_async_tx_descriptor_init(&desc_sw->async_tx, - &fsl_chan->common); - desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; - desc_sw->async_tx.phys = pdesc; + + desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); + if (!desc) { + dev_dbg(chan->dev, "out of memory for link desc\n"); + return NULL; } - return desc_sw; + memset(desc, 0, sizeof(*desc)); + INIT_LIST_HEAD(&desc->tx_list); + dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); + desc->async_tx.tx_submit = fsl_dma_tx_submit; + desc->async_tx.phys = pdesc; + + return desc; } /** * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * * This function will create a dma pool for descriptor allocation. * * Return - The number of descriptors allocated. */ -static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) +static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *chan = to_fsl_chan(dchan); /* Has this channel already been allocated? */ - if (fsl_chan->desc_pool) + if (chan->desc_pool) return 1; - /* We need the descriptor to be aligned to 32bytes + /* + * We need the descriptor to be aligned to 32bytes * for meeting FSL DMA specification requirement. */ - fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", - fsl_chan->dev, sizeof(struct fsl_desc_sw), - 32, 0); - if (!fsl_chan->desc_pool) { - dev_err(fsl_chan->dev, "No memory for channel %d " - "descriptor dma pool.\n", fsl_chan->id); - return 0; + chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", + chan->dev, + sizeof(struct fsl_desc_sw), + __alignof__(struct fsl_desc_sw), 0); + if (!chan->desc_pool) { + dev_err(chan->dev, "unable to allocate channel %d " + "descriptor pool\n", chan->id); + return -ENOMEM; } + /* there is at least one descriptor free to be allocated */ return 1; } /** - * fsl_dma_free_chan_resources - Free all resources of the channel. - * @fsl_chan : Freescale DMA channel + * fsldma_free_desc_list - Free all descriptors in a queue + * @chan: Freescae DMA channel + * @list: the list to free + * + * LOCKING: must hold chan->desc_lock */ -static void fsl_dma_free_chan_resources(struct dma_chan *chan) +static void fsldma_free_desc_list(struct fsldma_chan *chan, + struct list_head *list) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); struct fsl_desc_sw *desc, *_desc; - unsigned long flags; - dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); - spin_lock_irqsave(&fsl_chan->desc_lock, flags); - list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { -#ifdef FSL_DMA_LD_DEBUG - dev_dbg(fsl_chan->dev, - "LD %p will be released.\n", desc); -#endif + list_for_each_entry_safe(desc, _desc, list, node) { + list_del(&desc->node); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); + } +} + +static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, + struct list_head *list) +{ + struct fsl_desc_sw *desc, *_desc; + + list_for_each_entry_safe_reverse(desc, _desc, list, node) { list_del(&desc->node); - /* free link descriptor */ - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); - dma_pool_destroy(fsl_chan->desc_pool); +} + +/** + * fsl_dma_free_chan_resources - Free all resources of the channel. + * @chan : Freescale DMA channel + */ +static void fsl_dma_free_chan_resources(struct dma_chan *dchan) +{ + struct fsldma_chan *chan = to_fsl_chan(dchan); + unsigned long flags; + + dev_dbg(chan->dev, "Free all channel resources.\n"); + spin_lock_irqsave(&chan->desc_lock, flags); + fsldma_free_desc_list(chan, &chan->ld_pending); + fsldma_free_desc_list(chan, &chan->ld_running); + spin_unlock_irqrestore(&chan->desc_lock, flags); - fsl_chan->desc_pool = NULL; + dma_pool_destroy(chan->desc_pool); + chan->desc_pool = NULL; } static struct dma_async_tx_descriptor * -fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) +fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) { - struct fsl_dma_chan *fsl_chan; + struct fsldma_chan *chan; struct fsl_desc_sw *new; - if (!chan) + if (!dchan) return NULL; - fsl_chan = to_fsl_chan(chan); + chan = to_fsl_chan(dchan); - new = fsl_dma_alloc_descriptor(fsl_chan); + new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); + dev_err(chan->dev, "No free memory for link descriptor\n"); return NULL; } @@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) list_add_tail(&new->node, &new->tx_list); /* Set End-of-link to the last link descriptor of new list*/ - set_ld_eol(fsl_chan, new); + set_ld_eol(chan, new); return &new->async_tx; } static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( - struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, + struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct fsl_dma_chan *fsl_chan; + struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new; - struct list_head *list; size_t copy; - if (!chan) + if (!dchan) return NULL; if (!len) return NULL; - fsl_chan = to_fsl_chan(chan); + chan = to_fsl_chan(dchan); do { /* Allocate the link descriptor from DMA pool */ - new = fsl_dma_alloc_descriptor(fsl_chan); + new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(fsl_chan->dev, + dev_err(chan->dev, "No free memory for link descriptor\n"); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); + dev_dbg(chan->dev, "new link desc alloc %p\n", new); #endif copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); - set_desc_cnt(fsl_chan, &new->hw, copy); - set_desc_src(fsl_chan, &new->hw, dma_src); - set_desc_dest(fsl_chan, &new->hw, dma_dest); + set_desc_cnt(chan, &new->hw, copy); + set_desc_src(chan, &new->hw, dma_src); + set_desc_dst(chan, &new->hw, dma_dst); if (!first) first = new; else - set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); + set_desc_next(chan, &prev->hw, new->async_tx.phys); new->async_tx.cookie = 0; async_tx_ack(&new->async_tx); @@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( prev = new; len -= copy; dma_src += copy; - dma_dest += copy; + dma_dst += copy; /* Insert the link descriptor to the LD ring */ list_add_tail(&new->node, &first->tx_list); @@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list*/ - set_ld_eol(fsl_chan, new); + set_ld_eol(chan, new); return &first->async_tx; @@ -543,12 +578,7 @@ fail: if (!first) return NULL; - list = &first->tx_list; - list_for_each_entry_safe_reverse(new, prev, list, node) { - list_del(&new->node); - dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); - } - + fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } @@ -565,13 +595,12 @@ fail: * chan->private variable. */ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( - struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flags) { - struct fsl_dma_chan *fsl_chan; + struct fsldma_chan *chan; struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsl_dma_slave *slave; - struct list_head *tx_list; size_t copy; int i; @@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( struct fsl_dma_hw_addr *hw; dma_addr_t dma_dst, dma_src; - if (!chan) + if (!dchan) return NULL; - if (!chan->private) + if (!dchan->private) return NULL; - fsl_chan = to_fsl_chan(chan); - slave = chan->private; + chan = to_fsl_chan(dchan); + slave = dchan->private; if (list_empty(&slave->addresses)) return NULL; @@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( } /* Allocate the link descriptor from DMA pool */ - new = fsl_dma_alloc_descriptor(fsl_chan); + new = fsl_dma_alloc_descriptor(chan); if (!new) { - dev_err(fsl_chan->dev, "No free memory for " + dev_err(chan->dev, "No free memory for " "link descriptor\n"); goto fail; } #ifdef FSL_DMA_LD_DEBUG - dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); + dev_dbg(chan->dev, "new link desc alloc %p\n", new); #endif /* @@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( } /* Fill in the descriptor */ - set_desc_cnt(fsl_chan, &new->hw, copy); - set_desc_src(fsl_chan, &new->hw, dma_src); - set_desc_dest(fsl_chan, &new->hw, dma_dst); + set_desc_cnt(chan, &new->hw, copy); + set_desc_src(chan, &new->hw, dma_src); + set_desc_dst(chan, &new->hw, dma_dst); /* * If this is not the first descriptor, chain the @@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( if (!first) { first = new; } else { - set_desc_next(fsl_chan, &prev->hw, + set_desc_next(chan, &prev->hw, new->async_tx.phys); } @@ -708,23 +737,23 @@ finished: new->async_tx.cookie = -EBUSY; /* Set End-of-link to the last link descriptor of new list */ - set_ld_eol(fsl_chan, new); + set_ld_eol(chan, new); /* Enable extra controller features */ - if (fsl_chan->set_src_loop_size) - fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); + if (chan->set_src_loop_size) + chan->set_src_loop_size(chan, slave->src_loop_size); - if (fsl_chan->set_dest_loop_size) - fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); + if (chan->set_dst_loop_size) + chan->set_dst_loop_size(chan, slave->dst_loop_size); - if (fsl_chan->toggle_ext_start) - fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); + if (chan->toggle_ext_start) + chan->toggle_ext_start(chan, slave->external_start); - if (fsl_chan->toggle_ext_pause) - fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); + if (chan->toggle_ext_pause) + chan->toggle_ext_pause(chan, slave->external_pause); - if (fsl_chan->set_request_count) - fsl_chan->set_request_count(fsl_chan, slave->request_count); + if (chan->set_request_count) + chan->set_request_count(chan, slave->request_count); return &first->async_tx; @@ -741,215 +770,216 @@ fail: * * We're re-using variables for the loop, oh well */ - tx_list = &first->tx_list; - list_for_each_entry_safe_reverse(new, prev, tx_list, node) { - list_del_init(&new->node); - dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); - } - + fsldma_free_desc_list_reverse(chan, &first->tx_list); return NULL; } -static void fsl_dma_device_terminate_all(struct dma_chan *chan) +static void fsl_dma_device_terminate_all(struct dma_chan *dchan) { - struct fsl_dma_chan *fsl_chan; - struct fsl_desc_sw *desc, *tmp; + struct fsldma_chan *chan; unsigned long flags; - if (!chan) + if (!dchan) return; - fsl_chan = to_fsl_chan(chan); + chan = to_fsl_chan(dchan); /* Halt the DMA engine */ - dma_halt(fsl_chan); + dma_halt(chan); - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); /* Remove and free all of the descriptors in the LD queue */ - list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { - list_del(&desc->node); - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); - } + fsldma_free_desc_list(chan, &chan->ld_pending); + fsldma_free_desc_list(chan, &chan->ld_running); - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** * fsl_dma_update_completed_cookie - Update the completed cookie. - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel + * + * CONTEXT: hardirq */ -static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) +static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) { - struct fsl_desc_sw *cur_desc, *desc; - dma_addr_t ld_phy; + struct fsl_desc_sw *desc; + unsigned long flags; + dma_cookie_t cookie; - ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; + spin_lock_irqsave(&chan->desc_lock, flags); - if (ld_phy) { - cur_desc = NULL; - list_for_each_entry(desc, &fsl_chan->ld_queue, node) - if (desc->async_tx.phys == ld_phy) { - cur_desc = desc; - break; - } + if (list_empty(&chan->ld_running)) { + dev_dbg(chan->dev, "no running descriptors\n"); + goto out_unlock; + } - if (cur_desc && cur_desc->async_tx.cookie) { - if (dma_is_idle(fsl_chan)) - fsl_chan->completed_cookie = - cur_desc->async_tx.cookie; - else - fsl_chan->completed_cookie = - cur_desc->async_tx.cookie - 1; - } + /* Get the last descriptor, update the cookie to that */ + desc = to_fsl_desc(chan->ld_running.prev); + if (dma_is_idle(chan)) + cookie = desc->async_tx.cookie; + else { + cookie = desc->async_tx.cookie - 1; + if (unlikely(cookie < DMA_MIN_COOKIE)) + cookie = DMA_MAX_COOKIE; } + + chan->completed_cookie = cookie; + +out_unlock: + spin_unlock_irqrestore(&chan->desc_lock, flags); +} + +/** + * fsldma_desc_status - Check the status of a descriptor + * @chan: Freescale DMA channel + * @desc: DMA SW descriptor + * + * This function will return the status of the given descriptor + */ +static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + return dma_async_is_complete(desc->async_tx.cookie, + chan->completed_cookie, + chan->common.cookie); } /** * fsl_chan_ld_cleanup - Clean up link descriptors - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel * * This function clean up the ld_queue of DMA channel. - * If 'in_intr' is set, the function will move the link descriptor to - * the recycle list. Otherwise, free it directly. */ -static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) +static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) { struct fsl_desc_sw *desc, *_desc; unsigned long flags; - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); - dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", - fsl_chan->completed_cookie); - list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { + dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); + list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { dma_async_tx_callback callback; void *callback_param; - if (dma_async_is_complete(desc->async_tx.cookie, - fsl_chan->completed_cookie, fsl_chan->common.cookie) - == DMA_IN_PROGRESS) + if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) break; - callback = desc->async_tx.callback; - callback_param = desc->async_tx.callback_param; - - /* Remove from ld_queue list */ + /* Remove from the list of running transactions */ list_del(&desc->node); - dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", - desc); - dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); - /* Run the link descriptor callback function */ + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; if (callback) { - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); - dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", - desc); + spin_unlock_irqrestore(&chan->desc_lock, flags); + dev_dbg(chan->dev, "LD %p callback\n", desc); callback(callback_param); - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); } + + /* Run any dependencies, then free the descriptor */ + dma_run_dependencies(&desc->async_tx); + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); } - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** - * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. - * @fsl_chan : Freescale DMA channel + * fsl_chan_xfer_ld_queue - transfer any pending transactions + * @chan : Freescale DMA channel + * + * This will make sure that any pending transactions will be run. + * If the DMA controller is idle, it will be started. Otherwise, + * the DMA controller's interrupt handler will start any pending + * transactions when it becomes idle. */ -static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) +static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) { - struct list_head *ld_node; - dma_addr_t next_dest_addr; + struct fsl_desc_sw *desc; unsigned long flags; - spin_lock_irqsave(&fsl_chan->desc_lock, flags); + spin_lock_irqsave(&chan->desc_lock, flags); - if (!dma_is_idle(fsl_chan)) + /* + * If the list of pending descriptors is empty, then we + * don't need to do any work at all + */ + if (list_empty(&chan->ld_pending)) { + dev_dbg(chan->dev, "no pending LDs\n"); goto out_unlock; + } - dma_halt(fsl_chan); + /* + * The DMA controller is not idle, which means the interrupt + * handler will start any queued transactions when it runs + * at the end of the current transaction + */ + if (!dma_is_idle(chan)) { + dev_dbg(chan->dev, "DMA controller still busy\n"); + goto out_unlock; + } - /* If there are some link descriptors - * not transfered in queue. We need to start it. + /* + * TODO: + * make sure the dma_halt() function really un-wedges the + * controller as much as possible */ + dma_halt(chan); - /* Find the first un-transfer desciptor */ - for (ld_node = fsl_chan->ld_queue.next; - (ld_node != &fsl_chan->ld_queue) - && (dma_async_is_complete( - to_fsl_desc(ld_node)->async_tx.cookie, - fsl_chan->completed_cookie, - fsl_chan->common.cookie) == DMA_SUCCESS); - ld_node = ld_node->next); - - if (ld_node != &fsl_chan->ld_queue) { - /* Get the ld start address from ld_queue */ - next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; - dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", - (unsigned long long)next_dest_addr); - set_cdar(fsl_chan, next_dest_addr); - dma_start(fsl_chan); - } else { - set_cdar(fsl_chan, 0); - set_ndar(fsl_chan, 0); - } + /* + * If there are some link descriptors which have not been + * transferred, we need to start the controller + */ + + /* + * Move all elements from the queue of pending transactions + * onto the list of running transactions + */ + desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); + list_splice_tail_init(&chan->ld_pending, &chan->ld_running); + + /* + * Program the descriptor's address into the DMA controller, + * then start the DMA transaction + */ + set_cdar(chan, desc->async_tx.phys); + dma_start(chan); out_unlock: - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); + spin_unlock_irqrestore(&chan->desc_lock, flags); } /** * fsl_dma_memcpy_issue_pending - Issue the DMA start command - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel */ -static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) +static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); - -#ifdef FSL_DMA_LD_DEBUG - struct fsl_desc_sw *ld; - unsigned long flags; - - spin_lock_irqsave(&fsl_chan->desc_lock, flags); - if (list_empty(&fsl_chan->ld_queue)) { - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); - return; - } - - dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); - list_for_each_entry(ld, &fsl_chan->ld_queue, node) { - int i; - dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", - fsl_chan->id, ld->async_tx.phys); - for (i = 0; i < 8; i++) - dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", - i, *(((u32 *)&ld->hw) + i)); - } - dev_dbg(fsl_chan->dev, "----------------\n"); - spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); -#endif - - fsl_chan_xfer_ld_queue(fsl_chan); + struct fsldma_chan *chan = to_fsl_chan(dchan); + fsl_chan_xfer_ld_queue(chan); } /** * fsl_dma_is_complete - Determine the DMA status - * @fsl_chan : Freescale DMA channel + * @chan : Freescale DMA channel */ -static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, +static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); + struct fsldma_chan *chan = to_fsl_chan(dchan); dma_cookie_t last_used; dma_cookie_t last_complete; - fsl_chan_ld_cleanup(fsl_chan); + fsl_chan_ld_cleanup(chan); - last_used = chan->cookie; - last_complete = fsl_chan->completed_cookie; + last_used = dchan->cookie; + last_complete = chan->completed_cookie; if (done) *done = last_complete; @@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, return dma_async_is_complete(cookie, last_complete, last_used); } -static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) +/*----------------------------------------------------------------------------*/ +/* Interrupt Handling */ +/*----------------------------------------------------------------------------*/ + +static irqreturn_t fsldma_chan_irq(int irq, void *data) { - struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; - u32 stat; + struct fsldma_chan *chan = data; int update_cookie = 0; int xfer_ld_q = 0; + u32 stat; - stat = get_sr(fsl_chan); - dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", - fsl_chan->id, stat); - set_sr(fsl_chan, stat); /* Clear the event register */ + /* save and clear the status register */ + stat = get_sr(chan); + set_sr(chan, stat); + dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); if (!stat) return IRQ_NONE; if (stat & FSL_DMA_SR_TE) - dev_err(fsl_chan->dev, "Transfer Error!\n"); + dev_err(chan->dev, "Transfer Error!\n"); - /* Programming Error + /* + * Programming Error * The DMA_INTERRUPT async_tx is a NULL transfer, which will * triger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { - dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); - if (get_bcr(fsl_chan) == 0) { + dev_dbg(chan->dev, "irq: Programming Error INT\n"); + if (get_bcr(chan) == 0) { /* BCR register is 0, this is a DMA_INTERRUPT async_tx. * Now, update the completed cookie, and continue the * next uncompleted transfer. @@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) stat &= ~FSL_DMA_SR_PE; } - /* If the link descriptor segment transfer finishes, + /* + * If the link descriptor segment transfer finishes, * we will recycle the used descriptor. */ if (stat & FSL_DMA_SR_EOSI) { - dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); - dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", - (unsigned long long)get_cdar(fsl_chan), - (unsigned long long)get_ndar(fsl_chan)); + dev_dbg(chan->dev, "irq: End-of-segments INT\n"); + dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", + (unsigned long long)get_cdar(chan), + (unsigned long long)get_ndar(chan)); stat &= ~FSL_DMA_SR_EOSI; update_cookie = 1; } - /* For MPC8349, EOCDI event need to update cookie + /* + * For MPC8349, EOCDI event need to update cookie * and start the next transfer if it exist. */ if (stat & FSL_DMA_SR_EOCDI) { - dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); + dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); stat &= ~FSL_DMA_SR_EOCDI; update_cookie = 1; xfer_ld_q = 1; } - /* If it current transfer is the end-of-transfer, + /* + * If it current transfer is the end-of-transfer, * we should clear the Channel Start bit for * prepare next transfer. */ if (stat & FSL_DMA_SR_EOLNI) { - dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); + dev_dbg(chan->dev, "irq: End-of-link INT\n"); stat &= ~FSL_DMA_SR_EOLNI; xfer_ld_q = 1; } if (update_cookie) - fsl_dma_update_completed_cookie(fsl_chan); + fsl_dma_update_completed_cookie(chan); if (xfer_ld_q) - fsl_chan_xfer_ld_queue(fsl_chan); + fsl_chan_xfer_ld_queue(chan); if (stat) - dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", - stat); + dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); - dev_dbg(fsl_chan->dev, "event: Exit\n"); - tasklet_schedule(&fsl_chan->tasklet); + dev_dbg(chan->dev, "irq: Exit\n"); + tasklet_schedule(&chan->tasklet); return IRQ_HANDLED; } -static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) +static void dma_do_tasklet(unsigned long data) { - struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; - u32 gsr; - int ch_nr; + struct fsldma_chan *chan = (struct fsldma_chan *)data; + fsl_chan_ld_cleanup(chan); +} + +static irqreturn_t fsldma_ctrl_irq(int irq, void *data) +{ + struct fsldma_device *fdev = data; + struct fsldma_chan *chan; + unsigned int handled = 0; + u32 gsr, mask; + int i; - gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) - : in_le32(fdev->reg_base); - ch_nr = (32 - ffs(gsr)) / 8; + gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) + : in_le32(fdev->regs); + mask = 0xff000000; + dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + if (gsr & mask) { + dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); + fsldma_chan_irq(irq, chan); + handled++; + } - return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, - fdev->chan[ch_nr]) : IRQ_NONE; + gsr &= ~mask; + mask >>= 8; + } + + return IRQ_RETVAL(handled); } -static void dma_do_tasklet(unsigned long data) +static void fsldma_free_irqs(struct fsldma_device *fdev) { - struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; - fsl_chan_ld_cleanup(fsl_chan); + struct fsldma_chan *chan; + int i; + + if (fdev->irq != NO_IRQ) { + dev_dbg(fdev->dev, "free per-controller IRQ\n"); + free_irq(fdev->irq, fdev); + return; + } + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (chan && chan->irq != NO_IRQ) { + dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); + free_irq(chan->irq, chan); + } + } } -static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, +static int fsldma_request_irqs(struct fsldma_device *fdev) +{ + struct fsldma_chan *chan; + int ret; + int i; + + /* if we have a per-controller IRQ, use that */ + if (fdev->irq != NO_IRQ) { + dev_dbg(fdev->dev, "request per-controller IRQ\n"); + ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, + "fsldma-controller", fdev); + return ret; + } + + /* no per-controller IRQ, use the per-channel IRQs */ + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + if (chan->irq == NO_IRQ) { + dev_err(fdev->dev, "no interrupts property defined for " + "DMA channel %d. Please fix your " + "device tree\n", chan->id); + ret = -ENODEV; + goto out_unwind; + } + + dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); + ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, + "fsldma-chan", chan); + if (ret) { + dev_err(fdev->dev, "unable to request IRQ for DMA " + "channel %d\n", chan->id); + goto out_unwind; + } + } + + return 0; + +out_unwind: + for (/* none */; i >= 0; i--) { + chan = fdev->chan[i]; + if (!chan) + continue; + + if (chan->irq == NO_IRQ) + continue; + + free_irq(chan->irq, chan); + } + + return ret; +} + +/*----------------------------------------------------------------------------*/ +/* OpenFirmware Subsystem */ +/*----------------------------------------------------------------------------*/ + +static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, struct device_node *node, u32 feature, const char *compatible) { - struct fsl_dma_chan *new_fsl_chan; + struct fsldma_chan *chan; + struct resource res; int err; /* alloc channel */ - new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); - if (!new_fsl_chan) { - dev_err(fdev->dev, "No free memory for allocating " - "dma channels!\n"); - return -ENOMEM; + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) { + dev_err(fdev->dev, "no free memory for DMA channels!\n"); + err = -ENOMEM; + goto out_return; } - /* get dma channel register base */ - err = of_address_to_resource(node, 0, &new_fsl_chan->reg); - if (err) { - dev_err(fdev->dev, "Can't get %s property 'reg'\n", - node->full_name); - goto err_no_reg; + /* ioremap registers for use */ + chan->regs = of_iomap(node, 0); + if (!chan->regs) { + dev_err(fdev->dev, "unable to ioremap registers\n"); + err = -ENOMEM; + goto out_free_chan; } - new_fsl_chan->feature = feature; + err = of_address_to_resource(node, 0, &res); + if (err) { + dev_err(fdev->dev, "unable to find 'reg' property\n"); + goto out_iounmap_regs; + } + chan->feature = feature; if (!fdev->feature) - fdev->feature = new_fsl_chan->feature; + fdev->feature = chan->feature; - /* If the DMA device's feature is different than its channels', - * report the bug. + /* + * If the DMA device's feature is different than the feature + * of its channels, report the bug */ - WARN_ON(fdev->feature != new_fsl_chan->feature); + WARN_ON(fdev->feature != chan->feature); - new_fsl_chan->dev = fdev->dev; - new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, - new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); - - new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; - if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { - dev_err(fdev->dev, "There is no %d channel!\n", - new_fsl_chan->id); + chan->dev = fdev->dev; + chan->id = ((res.start - 0x100) & 0xfff) >> 7; + if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { + dev_err(fdev->dev, "too many channels for device\n"); err = -EINVAL; - goto err_no_chan; + goto out_iounmap_regs; } - fdev->chan[new_fsl_chan->id] = new_fsl_chan; - tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, - (unsigned long)new_fsl_chan); - /* Init the channel */ - dma_init(new_fsl_chan); + fdev->chan[chan->id] = chan; + tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); + + /* Initialize the channel */ + dma_init(chan); /* Clear cdar registers */ - set_cdar(new_fsl_chan, 0); + set_cdar(chan, 0); - switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { + switch (chan->feature & FSL_DMA_IP_MASK) { case FSL_DMA_IP_85XX: - new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; + chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; case FSL_DMA_IP_83XX: - new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; - new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; - new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; - new_fsl_chan->set_request_count = fsl_chan_set_request_count; + chan->toggle_ext_start = fsl_chan_toggle_ext_start; + chan->set_src_loop_size = fsl_chan_set_src_loop_size; + chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; + chan->set_request_count = fsl_chan_set_request_count; } - spin_lock_init(&new_fsl_chan->desc_lock); - INIT_LIST_HEAD(&new_fsl_chan->ld_queue); + spin_lock_init(&chan->desc_lock); + INIT_LIST_HEAD(&chan->ld_pending); + INIT_LIST_HEAD(&chan->ld_running); + + chan->common.device = &fdev->common; - new_fsl_chan->common.device = &fdev->common; + /* find the IRQ line, if it exists in the device tree */ + chan->irq = irq_of_parse_and_map(node, 0); /* Add the channel to DMA device channel list */ - list_add_tail(&new_fsl_chan->common.device_node, - &fdev->common.channels); + list_add_tail(&chan->common.device_node, &fdev->common.channels); fdev->common.chancnt++; - new_fsl_chan->irq = irq_of_parse_and_map(node, 0); - if (new_fsl_chan->irq != NO_IRQ) { - err = request_irq(new_fsl_chan->irq, - &fsl_dma_chan_do_interrupt, IRQF_SHARED, - "fsldma-channel", new_fsl_chan); - if (err) { - dev_err(fdev->dev, "DMA channel %s request_irq error " - "with return %d\n", node->full_name, err); - goto err_no_irq; - } - } - - dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, - compatible, - new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq); + dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, + chan->irq != NO_IRQ ? chan->irq : fdev->irq); return 0; -err_no_irq: - list_del(&new_fsl_chan->common.device_node); -err_no_chan: - iounmap(new_fsl_chan->reg_base); -err_no_reg: - kfree(new_fsl_chan); +out_iounmap_regs: + iounmap(chan->regs); +out_free_chan: + kfree(chan); +out_return: return err; } -static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) +static void fsl_dma_chan_remove(struct fsldma_chan *chan) { - if (fchan->irq != NO_IRQ) - free_irq(fchan->irq, fchan); - list_del(&fchan->common.device_node); - iounmap(fchan->reg_base); - kfree(fchan); + irq_dispose_mapping(chan->irq); + list_del(&chan->common.device_node); + iounmap(chan->regs); + kfree(chan); } -static int __devinit of_fsl_dma_probe(struct of_device *dev, +static int __devinit fsldma_of_probe(struct of_device *op, const struct of_device_id *match) { - int err; - struct fsl_dma_device *fdev; + struct fsldma_device *fdev; struct device_node *child; + int err; - fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); + fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); if (!fdev) { - dev_err(&dev->dev, "No enough memory for 'priv'\n"); - return -ENOMEM; + dev_err(&op->dev, "No enough memory for 'priv'\n"); + err = -ENOMEM; + goto out_return; } - fdev->dev = &dev->dev; + + fdev->dev = &op->dev; INIT_LIST_HEAD(&fdev->common.channels); - /* get DMA controller register base */ - err = of_address_to_resource(dev->node, 0, &fdev->reg); - if (err) { - dev_err(&dev->dev, "Can't get %s property 'reg'\n", - dev->node->full_name); - goto err_no_reg; + /* ioremap the registers for use */ + fdev->regs = of_iomap(op->node, 0); + if (!fdev->regs) { + dev_err(&op->dev, "unable to ioremap registers\n"); + err = -ENOMEM; + goto out_free_fdev; } - dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " - "controller at 0x%llx...\n", - match->compatible, (unsigned long long)fdev->reg.start); - fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end - - fdev->reg.start + 1); + /* map the channel IRQ if it exists, but don't hookup the handler yet */ + fdev->irq = irq_of_parse_and_map(op->node, 0); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); @@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_terminate_all = fsl_dma_device_terminate_all; - fdev->common.dev = &dev->dev; + fdev->common.dev = &op->dev; - fdev->irq = irq_of_parse_and_map(dev->node, 0); - if (fdev->irq != NO_IRQ) { - err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, - "fsldma-device", fdev); - if (err) { - dev_err(&dev->dev, "DMA device request_irq error " - "with return %d\n", err); - goto err; - } - } - - dev_set_drvdata(&(dev->dev), fdev); + dev_set_drvdata(&op->dev, fdev); - /* We cannot use of_platform_bus_probe() because there is no - * of_platform_bus_remove. Instead, we manually instantiate every DMA + /* + * We cannot use of_platform_bus_probe() because there is no + * of_platform_bus_remove(). Instead, we manually instantiate every DMA * channel object. */ - for_each_child_of_node(dev->node, child) { - if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) + for_each_child_of_node(op->node, child) { + if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, "fsl,eloplus-dma-channel"); - if (of_device_is_compatible(child, "fsl,elo-dma-channel")) + } + + if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { fsl_dma_chan_probe(fdev, child, FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, "fsl,elo-dma-channel"); + } + } + + /* + * Hookup the IRQ handler(s) + * + * If we have a per-controller interrupt, we prefer that to the + * per-channel interrupts to reduce the number of shared interrupt + * handlers on the same IRQ line + */ + err = fsldma_request_irqs(fdev); + if (err) { + dev_err(fdev->dev, "unable to request IRQs\n"); + goto out_free_fdev; } dma_async_device_register(&fdev->common); return 0; -err: - iounmap(fdev->reg_base); -err_no_reg: +out_free_fdev: + irq_dispose_mapping(fdev->irq); kfree(fdev); +out_return: return err; } -static int of_fsl_dma_remove(struct of_device *of_dev) +static int fsldma_of_remove(struct of_device *op) { - struct fsl_dma_device *fdev; + struct fsldma_device *fdev; unsigned int i; - fdev = dev_get_drvdata(&of_dev->dev); - + fdev = dev_get_drvdata(&op->dev); dma_async_device_unregister(&fdev->common); - for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) + fsldma_free_irqs(fdev); + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { if (fdev->chan[i]) fsl_dma_chan_remove(fdev->chan[i]); + } - if (fdev->irq != NO_IRQ) - free_irq(fdev->irq, fdev); - - iounmap(fdev->reg_base); - + iounmap(fdev->regs); + dev_set_drvdata(&op->dev, NULL); kfree(fdev); - dev_set_drvdata(&of_dev->dev, NULL); return 0; } -static struct of_device_id of_fsl_dma_ids[] = { +static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,elo-dma", }, {} }; -static struct of_platform_driver of_fsl_dma_driver = { - .name = "fsl-elo-dma", - .match_table = of_fsl_dma_ids, - .probe = of_fsl_dma_probe, - .remove = of_fsl_dma_remove, +static struct of_platform_driver fsldma_of_driver = { + .name = "fsl-elo-dma", + .match_table = fsldma_of_ids, + .probe = fsldma_of_probe, + .remove = fsldma_of_remove, }; -static __init int of_fsl_dma_init(void) +/*----------------------------------------------------------------------------*/ +/* Module Init / Exit */ +/*----------------------------------------------------------------------------*/ + +static __init int fsldma_init(void) { int ret; pr_info("Freescale Elo / Elo Plus DMA driver\n"); - ret = of_register_platform_driver(&of_fsl_dma_driver); + ret = of_register_platform_driver(&fsldma_of_driver); if (ret) pr_err("fsldma: failed to register platform driver\n"); return ret; } -static void __exit of_fsl_dma_exit(void) +static void __exit fsldma_exit(void) { - of_unregister_platform_driver(&of_fsl_dma_driver); + of_unregister_platform_driver(&fsldma_of_driver); } -subsys_initcall(of_fsl_dma_init); -module_exit(of_fsl_dma_exit); +subsys_initcall(fsldma_init); +module_exit(fsldma_exit); MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 0df14cbb8ca..cb4d6ff5159 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -92,11 +92,9 @@ struct fsl_desc_sw { struct list_head node; struct list_head tx_list; struct dma_async_tx_descriptor async_tx; - struct list_head *ld; - void *priv; } __attribute__((aligned(32))); -struct fsl_dma_chan_regs { +struct fsldma_chan_regs { u32 mr; /* 0x00 - Mode Register */ u32 sr; /* 0x04 - Status Register */ u64 cdar; /* 0x08 - Current descriptor address register */ @@ -106,20 +104,19 @@ struct fsl_dma_chan_regs { u64 ndar; /* 0x24 - Next Descriptor Address Register */ }; -struct fsl_dma_chan; +struct fsldma_chan; #define FSL_DMA_MAX_CHANS_PER_DEVICE 4 -struct fsl_dma_device { - void __iomem *reg_base; /* DGSR register base */ - struct resource reg; /* Resource for register */ +struct fsldma_device { + void __iomem *regs; /* DGSR register base */ struct device *dev; struct dma_device common; - struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; + struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; u32 feature; /* The same as DMA channels */ int irq; /* Channel IRQ */ }; -/* Define macros for fsl_dma_chan->feature property */ +/* Define macros for fsldma_chan->feature property */ #define FSL_DMA_LITTLE_ENDIAN 0x00000000 #define FSL_DMA_BIG_ENDIAN 0x00000001 @@ -130,28 +127,28 @@ struct fsl_dma_device { #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_START_EXT 0x00002000 -struct fsl_dma_chan { - struct fsl_dma_chan_regs __iomem *reg_base; +struct fsldma_chan { + struct fsldma_chan_regs __iomem *regs; dma_cookie_t completed_cookie; /* The maximum cookie completed */ spinlock_t desc_lock; /* Descriptor operation lock */ - struct list_head ld_queue; /* Link descriptors queue */ + struct list_head ld_pending; /* Link descriptors queue */ + struct list_head ld_running; /* Link descriptors queue */ struct dma_chan common; /* DMA common channel */ struct dma_pool *desc_pool; /* Descriptors pool */ struct device *dev; /* Channel device */ - struct resource reg; /* Resource for register */ int irq; /* Channel IRQ */ int id; /* Raw id of this channel */ struct tasklet_struct tasklet; u32 feature; - void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); - void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); - void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); - void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); - void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); + void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); + void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); + void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size); + void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size); + void (*set_request_count)(struct fsldma_chan *fsl_chan, int size); }; -#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) +#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common) #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index dcc4ab78b32..5d0e42b263d 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) return IRQ_HANDLED; } -static void ioat1_cleanup_tasklet(unsigned long data); - /* common channel initialization */ -void ioat_init_channel(struct ioatdma_device *device, - struct ioat_chan_common *chan, int idx, - void (*timer_fn)(unsigned long), - void (*tasklet)(unsigned long), - unsigned long ioat) +void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx) { struct dma_device *dma = &device->common; + struct dma_chan *c = &chan->common; + unsigned long data = (unsigned long) c; chan->device = device; chan->reg_base = device->reg_base + (0x80 * (idx + 1)); @@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device, list_add_tail(&chan->common.device_node, &dma->channels); device->idx[idx] = chan; init_timer(&chan->timer); - chan->timer.function = timer_fn; - chan->timer.data = ioat; - tasklet_init(&chan->cleanup_task, tasklet, ioat); + chan->timer.function = device->timer_fn; + chan->timer.data = data; + tasklet_init(&chan->cleanup_task, device->cleanup_fn, data); tasklet_disable(&chan->cleanup_task); } -static void ioat1_timer_event(unsigned long data); - /** * ioat1_dma_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated @@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) if (!ioat) break; - ioat_init_channel(device, &ioat->base, i, - ioat1_timer_event, - ioat1_cleanup_tasklet, - (unsigned long) ioat); + ioat_init_channel(device, &ioat->base, i); ioat->xfercap = xfercap; spin_lock_init(&ioat->desc_lock); INIT_LIST_HEAD(&ioat->free_desc); @@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, return &desc->txd; } -static void ioat1_cleanup_tasklet(unsigned long data) +static void ioat1_cleanup_event(unsigned long data) { - struct ioat_dma_chan *chan = (void *)data; + struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); - ioat1_cleanup(chan); - writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); + ioat1_cleanup(ioat); + writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, @@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) static void ioat1_timer_event(unsigned long data) { - struct ioat_dma_chan *ioat = (void *) data; + struct ioat_dma_chan *ioat = to_ioat_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); @@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data) spin_unlock_bh(&chan->cleanup_lock); } -static enum dma_status -ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, +enum dma_status +ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = to_chan_common(c); + struct ioatdma_device *device = chan->device; if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) return DMA_SUCCESS; - ioat1_cleanup(ioat); + device->cleanup_fn((unsigned long) c); return ioat_is_complete(c, cookie, done, used); } @@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) device->intr_quirk = ioat1_intr_quirk; device->enumerate_channels = ioat1_enumerate_channels; device->self_test = ioat_dma_self_test; + device->timer_fn = ioat1_timer_event; + device->cleanup_fn = ioat1_cleanup_event; dma = &device->common; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources; - dma->device_is_tx_complete = ioat1_dma_is_complete; + dma->device_is_tx_complete = ioat_is_dma_complete; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index bbc3e78ef33..4f747a25407 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -61,7 +61,7 @@ * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) * @enumerate_channels: hw version specific channel enumeration * @reset_hw: hw version specific channel (re)initialization - * @cleanup_tasklet: select between the v2 and v3 cleanup routines + * @cleanup_fn: select between the v2 and v3 cleanup routines * @timer_fn: select between the v2 and v3 timer watchdog routines * @self_test: hardware version specific self test for each supported op type * @@ -80,7 +80,7 @@ struct ioatdma_device { void (*intr_quirk)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device); int (*reset_hw)(struct ioat_chan_common *chan); - void (*cleanup_tasklet)(unsigned long data); + void (*cleanup_fn)(unsigned long data); void (*timer_fn)(unsigned long data); int (*self_test)(struct ioatdma_device *device); }; @@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, - struct ioat_chan_common *chan, int idx, - void (*timer_fn)(unsigned long), - void (*tasklet)(unsigned long), - unsigned long ioat); + struct ioat_chan_common *chan, int idx); +enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used); void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 5cc37afe2bc..1ed5d66d7dc 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order, void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) { - void * __iomem reg_base = ioat->base.reg_base; + struct ioat_chan_common *chan = &ioat->base; - ioat->pending = 0; ioat->dmacount += ioat2_ring_pending(ioat); ioat->issued = ioat->head; /* make descriptor updates globally visible before notifying channel */ wmb(); - writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - dev_dbg(to_dev(&ioat->base), + writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x count: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); } -void ioat2_issue_pending(struct dma_chan *chan) +void ioat2_issue_pending(struct dma_chan *c) { - struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - spin_lock_bh(&ioat->ring_lock); - if (ioat->pending == 1) + if (ioat2_ring_pending(ioat)) { + spin_lock_bh(&ioat->ring_lock); __ioat2_issue_pending(ioat); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->ring_lock); + } } /** * ioat2_update_pending - log pending descriptors * @ioat: ioat2+ channel * - * set pending to '1' unless pending is already set to '2', pending == 2 - * indicates that submission is temporarily blocked due to an in-flight - * reset. If we are already above the ioat_pending_level threshold then - * just issue pending. - * - * called with ring_lock held + * Check if the number of unsubmitted descriptors has exceeded the + * watermark. Called with ring_lock held */ static void ioat2_update_pending(struct ioat2_dma_chan *ioat) { - if (unlikely(ioat->pending == 2)) - return; - else if (ioat2_ring_pending(ioat) > ioat_pending_level) + if (ioat2_ring_pending(ioat) > ioat_pending_level) __ioat2_issue_pending(ioat); - else - ioat->pending = 1; } static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) @@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) seen_current = true; } ioat->tail += i; - BUG_ON(!seen_current); /* no active descs have written a completion? */ + BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; if (ioat->head == ioat->tail) { @@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -void ioat2_cleanup_tasklet(unsigned long data) +void ioat2_cleanup_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); ioat2_cleanup(ioat); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); @@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) void ioat2_timer_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&chan->cleanup_lock); @@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) if (!ioat) break; - ioat_init_channel(device, &ioat->base, i, - device->timer_fn, - device->cleanup_tasklet, - (unsigned long) ioat); + ioat_init_channel(device, &ioat->base, i); ioat->xfercap_log = xfercap_log; spin_lock_init(&ioat->ring_lock); if (device->reset_hw(&ioat->base)) { @@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ioat->head = 0; ioat->issued = 0; ioat->tail = 0; - ioat->pending = 0; ioat->alloc_order = order; spin_unlock_bh(&ioat->ring_lock); @@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); spin_unlock_bh(&chan->cleanup_lock); - device->timer_fn((unsigned long) ioat); + device->timer_fn((unsigned long) &chan->common); } else spin_unlock_bh(&chan->cleanup_lock); return -ENOMEM; @@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) tasklet_disable(&chan->cleanup_task); del_timer_sync(&chan->timer); - device->cleanup_tasklet((unsigned long) ioat); + device->cleanup_fn((unsigned long) c); device->reset_hw(chan); spin_lock_bh(&ioat->ring_lock); @@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c) chan->last_completion = 0; chan->completion_dma = 0; - ioat->pending = 0; ioat->dmacount = 0; } -enum dma_status -ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) -{ - struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - struct ioatdma_device *device = ioat->base.device; - - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) - return DMA_SUCCESS; - - device->cleanup_tasklet((unsigned long) ioat); - - return ioat_is_complete(c, cookie, done, used); -} - static ssize_t ring_size_show(struct dma_chan *c, char *page) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); @@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) device->enumerate_channels = ioat2_enumerate_channels; device->reset_hw = ioat2_reset_hw; - device->cleanup_tasklet = ioat2_cleanup_tasklet; + device->cleanup_fn = ioat2_cleanup_event; device->timer_fn = ioat2_timer_event; device->self_test = ioat_dma_self_test; dma = &device->common; @@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_is_tx_complete = ioat2_is_complete; + dma->device_is_tx_complete = ioat_is_dma_complete; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 3afad8da43c..ef2871fd786 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order; * @head: allocated index * @issued: hardware notification point * @tail: cleanup index - * @pending: lock free indicator for issued != head * @dmacount: identical to 'head' except for occasionally resetting to zero * @alloc_order: log2 of the number of allocated descriptors * @ring: software ring buffer implementation of hardware ring @@ -61,7 +60,6 @@ struct ioat2_dma_chan { u16 tail; u16 dmacount; u16 alloc_order; - int pending; struct ioat_ring_ent **ring; spinlock_t ring_lock; }; @@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, void ioat2_issue_pending(struct dma_chan *chan); int ioat2_alloc_chan_resources(struct dma_chan *c); void ioat2_free_chan_resources(struct dma_chan *c); -enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); bool reshape_ring(struct ioat2_dma_chan *ioat, int order); void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); -void ioat2_cleanup_tasklet(unsigned long data); +void ioat2_cleanup_event(unsigned long data); void ioat2_timer_event(unsigned long data); int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 9908c9e94b2..26febc56dab 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) } } ioat->tail += i; - BUG_ON(!seen_current); /* no active descs have written a completion? */ + BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; - if (ioat->head == ioat->tail) { + + active = ioat2_ring_active(ioat); + if (active == 0) { dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } + /* 5 microsecond delay per pending descriptor */ + writew(min((5 * active), IOAT_INTRDELAY_MASK), + chan->device->reg_base + IOAT_INTRDELAY_OFFSET); } -static void ioat3_cleanup(struct ioat2_dma_chan *ioat) +/* try to cleanup, but yield (via spin_trylock) to incoming submissions + * with the expectation that we will immediately poll again shortly + */ +static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; @@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -static void ioat3_cleanup_tasklet(unsigned long data) +/* run cleanup now because we already delayed the interrupt via INTRDELAY */ +static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + + prefetch(chan->completion); + + spin_lock_bh(&chan->cleanup_lock); + if (!ioat_cleanup_preamble(chan, &phys_complete)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + spin_lock_bh(&ioat->ring_lock); + + __cleanup(ioat, phys_complete); + + spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&chan->cleanup_lock); +} + +static void ioat3_cleanup_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - ioat3_cleanup(ioat); - writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, - ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + ioat3_cleanup_sync(ioat); + writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; - u32 status; - - status = ioat_chansts(chan); - if (is_ioat_active(status) || is_ioat_idle(status)) - ioat_suspend(chan); - while (is_ioat_active(status) || is_ioat_idle(status)) { - status = ioat_chansts(chan); - cpu_relax(); - } + ioat2_quiesce(chan, 0); if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); @@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) static void ioat3_timer_event(unsigned long data) { - struct ioat2_dma_chan *ioat = (void *) data; + struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&chan->cleanup_lock); @@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) return DMA_SUCCESS; - ioat3_cleanup(ioat); + ioat3_cleanup_poll(ioat); return ioat_is_complete(c, cookie, done, used); } @@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (is_raid_device) { dma->device_is_tx_complete = ioat3_is_complete; - device->cleanup_tasklet = ioat3_cleanup_tasklet; + device->cleanup_fn = ioat3_cleanup_event; device->timer_fn = ioat3_timer_event; } else { - dma->device_is_tx_complete = ioat2_is_complete; - device->cleanup_tasklet = ioat2_cleanup_tasklet; + dma->device_is_tx_complete = ioat_is_dma_complete; + device->cleanup_fn = ioat2_cleanup_event; device->timer_fn = ioat2_timer_event; } diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index e8ae63baf58..1391798542b 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -60,7 +60,7 @@ #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ -#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ +#define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */ #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index e80bae1673f..2a446397c88 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, break; case IPU_PIX_FMT_BGRA32: case IPU_PIX_FMT_BGR32: + case IPU_PIX_FMT_ABGR32: params->ip.bpp = 0; params->ip.pfs = 4; params->ip.npb = 7; @@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, params->ip.wid2 = 7; /* Blue bit width - 1 */ params->ip.wid3 = 7; /* Alpha bit width - 1 */ break; - case IPU_PIX_FMT_ABGR32: - params->ip.bpp = 0; - params->ip.pfs = 4; - params->ip.npb = 7; - params->ip.sat = 2; /* SAT = 32-bit access */ - params->ip.ofs0 = 8; /* Red bit offset */ - params->ip.ofs1 = 16; /* Green bit offset */ - params->ip.ofs2 = 24; /* Blue bit offset */ - params->ip.ofs3 = 0; /* Alpha bit offset */ - params->ip.wid0 = 7; /* Red bit width - 1 */ - params->ip.wid1 = 7; /* Green bit width - 1 */ - params->ip.wid2 = 7; /* Blue bit width - 1 */ - params->ip.wid3 = 7; /* Alpha bit width - 1 */ - break; case IPU_PIX_FMT_UYVY: params->ip.bpp = 2; params->ip.pfs = 6; diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c new file mode 100644 index 00000000000..3fdf1f46bd6 --- /dev/null +++ b/drivers/dma/mpc512x_dma.c @@ -0,0 +1,800 @@ +/* + * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. + * Copyright (C) Semihalf 2009 + * + * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description + * (defines, structures and comments) was taken from MPC5121 DMA driver + * written by Hongjun Chen <hong-jun.chen@freescale.com>. + * + * Approved as OSADL project by a majority of OSADL members and funded + * by OSADL membership fees in 2009; for details see www.osadl.org. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + +/* + * This is initial version of MPC5121 DMA driver. Only memory to memory + * transfers are supported (tested using dmatest module). + */ + +#include <linux/module.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/of_device.h> +#include <linux/of_platform.h> + +#include <linux/random.h> + +/* Number of DMA Transfer descriptors allocated per channel */ +#define MPC_DMA_DESCRIPTORS 64 + +/* Macro definitions */ +#define MPC_DMA_CHANNELS 64 +#define MPC_DMA_TCD_OFFSET 0x1000 + +/* Arbitration mode of group and channel */ +#define MPC_DMA_DMACR_EDCG (1 << 31) +#define MPC_DMA_DMACR_ERGA (1 << 3) +#define MPC_DMA_DMACR_ERCA (1 << 2) + +/* Error codes */ +#define MPC_DMA_DMAES_VLD (1 << 31) +#define MPC_DMA_DMAES_GPE (1 << 15) +#define MPC_DMA_DMAES_CPE (1 << 14) +#define MPC_DMA_DMAES_ERRCHN(err) \ + (((err) >> 8) & 0x3f) +#define MPC_DMA_DMAES_SAE (1 << 7) +#define MPC_DMA_DMAES_SOE (1 << 6) +#define MPC_DMA_DMAES_DAE (1 << 5) +#define MPC_DMA_DMAES_DOE (1 << 4) +#define MPC_DMA_DMAES_NCE (1 << 3) +#define MPC_DMA_DMAES_SGE (1 << 2) +#define MPC_DMA_DMAES_SBE (1 << 1) +#define MPC_DMA_DMAES_DBE (1 << 0) + +#define MPC_DMA_TSIZE_1 0x00 +#define MPC_DMA_TSIZE_2 0x01 +#define MPC_DMA_TSIZE_4 0x02 +#define MPC_DMA_TSIZE_16 0x04 +#define MPC_DMA_TSIZE_32 0x05 + +/* MPC5121 DMA engine registers */ +struct __attribute__ ((__packed__)) mpc_dma_regs { + /* 0x00 */ + u32 dmacr; /* DMA control register */ + u32 dmaes; /* DMA error status */ + /* 0x08 */ + u32 dmaerqh; /* DMA enable request high(channels 63~32) */ + u32 dmaerql; /* DMA enable request low(channels 31~0) */ + u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ + u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ + /* 0x18 */ + u8 dmaserq; /* DMA set enable request */ + u8 dmacerq; /* DMA clear enable request */ + u8 dmaseei; /* DMA set enable error interrupt */ + u8 dmaceei; /* DMA clear enable error interrupt */ + /* 0x1c */ + u8 dmacint; /* DMA clear interrupt request */ + u8 dmacerr; /* DMA clear error */ + u8 dmassrt; /* DMA set start bit */ + u8 dmacdne; /* DMA clear DONE status bit */ + /* 0x20 */ + u32 dmainth; /* DMA interrupt request high(ch63~32) */ + u32 dmaintl; /* DMA interrupt request low(ch31~0) */ + u32 dmaerrh; /* DMA error high(ch63~32) */ + u32 dmaerrl; /* DMA error low(ch31~0) */ + /* 0x30 */ + u32 dmahrsh; /* DMA hw request status high(ch63~32) */ + u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ + u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ + u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ + /* 0x40 ~ 0xff */ + u32 reserve0[48]; /* Reserved */ + /* 0x100 */ + u8 dchpri[MPC_DMA_CHANNELS]; + /* DMA channels(0~63) priority */ +}; + +struct __attribute__ ((__packed__)) mpc_dma_tcd { + /* 0x00 */ + u32 saddr; /* Source address */ + + u32 smod:5; /* Source address modulo */ + u32 ssize:3; /* Source data transfer size */ + u32 dmod:5; /* Destination address modulo */ + u32 dsize:3; /* Destination data transfer size */ + u32 soff:16; /* Signed source address offset */ + + /* 0x08 */ + u32 nbytes; /* Inner "minor" byte count */ + u32 slast; /* Last source address adjustment */ + u32 daddr; /* Destination address */ + + /* 0x14 */ + u32 citer_elink:1; /* Enable channel-to-channel linking on + * minor loop complete + */ + u32 citer_linkch:6; /* Link channel for minor loop complete */ + u32 citer:9; /* Current "major" iteration count */ + u32 doff:16; /* Signed destination address offset */ + + /* 0x18 */ + u32 dlast_sga; /* Last Destination address adjustment/scatter + * gather address + */ + + /* 0x1c */ + u32 biter_elink:1; /* Enable channel-to-channel linking on major + * loop complete + */ + u32 biter_linkch:6; + u32 biter:9; /* Beginning "major" iteration count */ + u32 bwc:2; /* Bandwidth control */ + u32 major_linkch:6; /* Link channel number */ + u32 done:1; /* Channel done */ + u32 active:1; /* Channel active */ + u32 major_elink:1; /* Enable channel-to-channel linking on major + * loop complete + */ + u32 e_sg:1; /* Enable scatter/gather processing */ + u32 d_req:1; /* Disable request */ + u32 int_half:1; /* Enable an interrupt when major counter is + * half complete + */ + u32 int_maj:1; /* Enable an interrupt when major iteration + * count completes + */ + u32 start:1; /* Channel start */ +}; + +struct mpc_dma_desc { + struct dma_async_tx_descriptor desc; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + int error; + struct list_head node; +}; + +struct mpc_dma_chan { + struct dma_chan chan; + struct list_head free; + struct list_head prepared; + struct list_head queued; + struct list_head active; + struct list_head completed; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + dma_cookie_t completed_cookie; + + /* Lock for this structure */ + spinlock_t lock; +}; + +struct mpc_dma { + struct dma_device dma; + struct tasklet_struct tasklet; + struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; + struct mpc_dma_regs __iomem *regs; + struct mpc_dma_tcd __iomem *tcd; + int irq; + uint error_status; + + /* Lock for error_status field in this structure */ + spinlock_t error_status_lock; +}; + +#define DRV_NAME "mpc512x_dma" + +/* Convert struct dma_chan to struct mpc_dma_chan */ +static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct mpc_dma_chan, chan); +} + +/* Convert struct dma_chan to struct mpc_dma */ +static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); + return container_of(mchan, struct mpc_dma, channels[c->chan_id]); +} + +/* + * Execute all queued DMA descriptors. + * + * Following requirements must be met while calling mpc_dma_execute(): + * a) mchan->lock is acquired, + * b) mchan->active list is empty, + * c) mchan->queued list contains at least one entry. + */ +static void mpc_dma_execute(struct mpc_dma_chan *mchan) +{ + struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); + struct mpc_dma_desc *first = NULL; + struct mpc_dma_desc *prev = NULL; + struct mpc_dma_desc *mdesc; + int cid = mchan->chan.chan_id; + + /* Move all queued descriptors to active list */ + list_splice_tail_init(&mchan->queued, &mchan->active); + + /* Chain descriptors into one transaction */ + list_for_each_entry(mdesc, &mchan->active, node) { + if (!first) + first = mdesc; + + if (!prev) { + prev = mdesc; + continue; + } + + prev->tcd->dlast_sga = mdesc->tcd_paddr; + prev->tcd->e_sg = 1; + mdesc->tcd->start = 1; + + prev = mdesc; + } + + prev->tcd->start = 0; + prev->tcd->int_maj = 1; + + /* Send first descriptor in chain into hardware */ + memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); + out_8(&mdma->regs->dmassrt, cid); +} + +/* Handle interrupt on one half of DMA controller (32 channels) */ +static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) +{ + struct mpc_dma_chan *mchan; + struct mpc_dma_desc *mdesc; + u32 status = is | es; + int ch; + + while ((ch = fls(status) - 1) >= 0) { + status &= ~(1 << ch); + mchan = &mdma->channels[ch + off]; + + spin_lock(&mchan->lock); + + /* Check error status */ + if (es & (1 << ch)) + list_for_each_entry(mdesc, &mchan->active, node) + mdesc->error = -EIO; + + /* Execute queued descriptors */ + list_splice_tail_init(&mchan->active, &mchan->completed); + if (!list_empty(&mchan->queued)) + mpc_dma_execute(mchan); + + spin_unlock(&mchan->lock); + } +} + +/* Interrupt handler */ +static irqreturn_t mpc_dma_irq(int irq, void *data) +{ + struct mpc_dma *mdma = data; + uint es; + + /* Save error status register */ + es = in_be32(&mdma->regs->dmaes); + spin_lock(&mdma->error_status_lock); + if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) + mdma->error_status = es; + spin_unlock(&mdma->error_status_lock); + + /* Handle interrupt on each channel */ + mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), + in_be32(&mdma->regs->dmaerrh), 32); + mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), + in_be32(&mdma->regs->dmaerrl), 0); + + /* Ack interrupt on all channels */ + out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); + + /* Schedule tasklet */ + tasklet_schedule(&mdma->tasklet); + + return IRQ_HANDLED; +} + +/* DMA Tasklet */ +static void mpc_dma_tasklet(unsigned long data) +{ + struct mpc_dma *mdma = (void *)data; + dma_cookie_t last_cookie = 0; + struct mpc_dma_chan *mchan; + struct mpc_dma_desc *mdesc; + struct dma_async_tx_descriptor *desc; + unsigned long flags; + LIST_HEAD(list); + uint es; + int i; + + spin_lock_irqsave(&mdma->error_status_lock, flags); + es = mdma->error_status; + mdma->error_status = 0; + spin_unlock_irqrestore(&mdma->error_status_lock, flags); + + /* Print nice error report */ + if (es) { + dev_err(mdma->dma.dev, + "Hardware reported following error(s) on channel %u:\n", + MPC_DMA_DMAES_ERRCHN(es)); + + if (es & MPC_DMA_DMAES_GPE) + dev_err(mdma->dma.dev, "- Group Priority Error\n"); + if (es & MPC_DMA_DMAES_CPE) + dev_err(mdma->dma.dev, "- Channel Priority Error\n"); + if (es & MPC_DMA_DMAES_SAE) + dev_err(mdma->dma.dev, "- Source Address Error\n"); + if (es & MPC_DMA_DMAES_SOE) + dev_err(mdma->dma.dev, "- Source Offset" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_DAE) + dev_err(mdma->dma.dev, "- Destination Address" + " Error\n"); + if (es & MPC_DMA_DMAES_DOE) + dev_err(mdma->dma.dev, "- Destination Offset" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_NCE) + dev_err(mdma->dma.dev, "- NBytes/Citter" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_SGE) + dev_err(mdma->dma.dev, "- Scatter/Gather" + " Configuration Error\n"); + if (es & MPC_DMA_DMAES_SBE) + dev_err(mdma->dma.dev, "- Source Bus Error\n"); + if (es & MPC_DMA_DMAES_DBE) + dev_err(mdma->dma.dev, "- Destination Bus Error\n"); + } + + for (i = 0; i < mdma->dma.chancnt; i++) { + mchan = &mdma->channels[i]; + + /* Get all completed descriptors */ + spin_lock_irqsave(&mchan->lock, flags); + if (!list_empty(&mchan->completed)) + list_splice_tail_init(&mchan->completed, &list); + spin_unlock_irqrestore(&mchan->lock, flags); + + if (list_empty(&list)) + continue; + + /* Execute callbacks and run dependencies */ + list_for_each_entry(mdesc, &list, node) { + desc = &mdesc->desc; + + if (desc->callback) + desc->callback(desc->callback_param); + + last_cookie = desc->cookie; + dma_run_dependencies(desc); + } + + /* Free descriptors */ + spin_lock_irqsave(&mchan->lock, flags); + list_splice_tail_init(&list, &mchan->free); + mchan->completed_cookie = last_cookie; + spin_unlock_irqrestore(&mchan->lock, flags); + } +} + +/* Submit descriptor to hardware */ +static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); + struct mpc_dma_desc *mdesc; + unsigned long flags; + dma_cookie_t cookie; + + mdesc = container_of(txd, struct mpc_dma_desc, desc); + + spin_lock_irqsave(&mchan->lock, flags); + + /* Move descriptor to queue */ + list_move_tail(&mdesc->node, &mchan->queued); + + /* If channel is idle, execute all queued descriptors */ + if (list_empty(&mchan->active)) + mpc_dma_execute(mchan); + + /* Update cookie */ + cookie = mchan->chan.cookie + 1; + if (cookie <= 0) + cookie = 1; + + mchan->chan.cookie = cookie; + mdesc->desc.cookie = cookie; + + spin_unlock_irqrestore(&mchan->lock, flags); + + return cookie; +} + +/* Alloc channel resources */ +static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma_desc *mdesc; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + unsigned long flags; + LIST_HEAD(descs); + int i; + + /* Alloc DMA memory for Transfer Control Descriptors */ + tcd = dma_alloc_coherent(mdma->dma.dev, + MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), + &tcd_paddr, GFP_KERNEL); + if (!tcd) + return -ENOMEM; + + /* Alloc descriptors for this channel */ + for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { + mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); + if (!mdesc) { + dev_notice(mdma->dma.dev, "Memory allocation error. " + "Allocated only %u descriptors\n", i); + break; + } + + dma_async_tx_descriptor_init(&mdesc->desc, chan); + mdesc->desc.flags = DMA_CTRL_ACK; + mdesc->desc.tx_submit = mpc_dma_tx_submit; + + mdesc->tcd = &tcd[i]; + mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); + + list_add_tail(&mdesc->node, &descs); + } + + /* Return error only if no descriptors were allocated */ + if (i == 0) { + dma_free_coherent(mdma->dma.dev, + MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), + tcd, tcd_paddr); + return -ENOMEM; + } + + spin_lock_irqsave(&mchan->lock, flags); + mchan->tcd = tcd; + mchan->tcd_paddr = tcd_paddr; + list_splice_tail_init(&descs, &mchan->free); + spin_unlock_irqrestore(&mchan->lock, flags); + + /* Enable Error Interrupt */ + out_8(&mdma->regs->dmaseei, chan->chan_id); + + return 0; +} + +/* Free channel resources */ +static void mpc_dma_free_chan_resources(struct dma_chan *chan) +{ + struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma_desc *mdesc, *tmp; + struct mpc_dma_tcd *tcd; + dma_addr_t tcd_paddr; + unsigned long flags; + LIST_HEAD(descs); + + spin_lock_irqsave(&mchan->lock, flags); + + /* Channel must be idle */ + BUG_ON(!list_empty(&mchan->prepared)); + BUG_ON(!list_empty(&mchan->queued)); + BUG_ON(!list_empty(&mchan->active)); + BUG_ON(!list_empty(&mchan->completed)); + + /* Move data */ + list_splice_tail_init(&mchan->free, &descs); + tcd = mchan->tcd; + tcd_paddr = mchan->tcd_paddr; + + spin_unlock_irqrestore(&mchan->lock, flags); + + /* Free DMA memory used by descriptors */ + dma_free_coherent(mdma->dma.dev, + MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), + tcd, tcd_paddr); + + /* Free descriptors */ + list_for_each_entry_safe(mdesc, tmp, &descs, node) + kfree(mdesc); + + /* Disable Error Interrupt */ + out_8(&mdma->regs->dmaceei, chan->chan_id); +} + +/* Send all pending descriptor to hardware */ +static void mpc_dma_issue_pending(struct dma_chan *chan) +{ + /* + * We are posting descriptors to the hardware as soon as + * they are ready, so this function does nothing. + */ +} + +/* Check request completion status */ +static enum dma_status +mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + unsigned long flags; + dma_cookie_t last_used; + dma_cookie_t last_complete; + + spin_lock_irqsave(&mchan->lock, flags); + last_used = mchan->chan.cookie; + last_complete = mchan->completed_cookie; + spin_unlock_irqrestore(&mchan->lock, flags); + + if (done) + *done = last_complete; + + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +/* Prepare descriptor for memory to memory copy */ +static struct dma_async_tx_descriptor * +mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma_desc *mdesc = NULL; + struct mpc_dma_tcd *tcd; + unsigned long iflags; + + /* Get free descriptor */ + spin_lock_irqsave(&mchan->lock, iflags); + if (!list_empty(&mchan->free)) { + mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, + node); + list_del(&mdesc->node); + } + spin_unlock_irqrestore(&mchan->lock, iflags); + + if (!mdesc) + return NULL; + + mdesc->error = 0; + tcd = mdesc->tcd; + + /* Prepare Transfer Control Descriptor for this transaction */ + memset(tcd, 0, sizeof(struct mpc_dma_tcd)); + + if (IS_ALIGNED(src | dst | len, 32)) { + tcd->ssize = MPC_DMA_TSIZE_32; + tcd->dsize = MPC_DMA_TSIZE_32; + tcd->soff = 32; + tcd->doff = 32; + } else if (IS_ALIGNED(src | dst | len, 16)) { + tcd->ssize = MPC_DMA_TSIZE_16; + tcd->dsize = MPC_DMA_TSIZE_16; + tcd->soff = 16; + tcd->doff = 16; + } else if (IS_ALIGNED(src | dst | len, 4)) { + tcd->ssize = MPC_DMA_TSIZE_4; + tcd->dsize = MPC_DMA_TSIZE_4; + tcd->soff = 4; + tcd->doff = 4; + } else if (IS_ALIGNED(src | dst | len, 2)) { + tcd->ssize = MPC_DMA_TSIZE_2; + tcd->dsize = MPC_DMA_TSIZE_2; + tcd->soff = 2; + tcd->doff = 2; + } else { + tcd->ssize = MPC_DMA_TSIZE_1; + tcd->dsize = MPC_DMA_TSIZE_1; + tcd->soff = 1; + tcd->doff = 1; + } + + tcd->saddr = src; + tcd->daddr = dst; + tcd->nbytes = len; + tcd->biter = 1; + tcd->citer = 1; + + /* Place descriptor in prepared list */ + spin_lock_irqsave(&mchan->lock, iflags); + list_add_tail(&mdesc->node, &mchan->prepared); + spin_unlock_irqrestore(&mchan->lock, iflags); + + return &mdesc->desc; +} + +static int __devinit mpc_dma_probe(struct of_device *op, + const struct of_device_id *match) +{ + struct device_node *dn = op->node; + struct device *dev = &op->dev; + struct dma_device *dma; + struct mpc_dma *mdma; + struct mpc_dma_chan *mchan; + struct resource res; + ulong regs_start, regs_size; + int retval, i; + + mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); + if (!mdma) { + dev_err(dev, "Memory exhausted!\n"); + return -ENOMEM; + } + + mdma->irq = irq_of_parse_and_map(dn, 0); + if (mdma->irq == NO_IRQ) { + dev_err(dev, "Error mapping IRQ!\n"); + return -EINVAL; + } + + retval = of_address_to_resource(dn, 0, &res); + if (retval) { + dev_err(dev, "Error parsing memory region!\n"); + return retval; + } + + regs_start = res.start; + regs_size = res.end - res.start + 1; + + if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { + dev_err(dev, "Error requesting memory region!\n"); + return -EBUSY; + } + + mdma->regs = devm_ioremap(dev, regs_start, regs_size); + if (!mdma->regs) { + dev_err(dev, "Error mapping memory region!\n"); + return -ENOMEM; + } + + mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) + + MPC_DMA_TCD_OFFSET); + + retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME, + mdma); + if (retval) { + dev_err(dev, "Error requesting IRQ!\n"); + return -EINVAL; + } + + spin_lock_init(&mdma->error_status_lock); + + dma = &mdma->dma; + dma->dev = dev; + dma->chancnt = MPC_DMA_CHANNELS; + dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; + dma->device_free_chan_resources = mpc_dma_free_chan_resources; + dma->device_issue_pending = mpc_dma_issue_pending; + dma->device_is_tx_complete = mpc_dma_is_tx_complete; + dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; + + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + + for (i = 0; i < dma->chancnt; i++) { + mchan = &mdma->channels[i]; + + mchan->chan.device = dma; + mchan->chan.chan_id = i; + mchan->chan.cookie = 1; + mchan->completed_cookie = mchan->chan.cookie; + + INIT_LIST_HEAD(&mchan->free); + INIT_LIST_HEAD(&mchan->prepared); + INIT_LIST_HEAD(&mchan->queued); + INIT_LIST_HEAD(&mchan->active); + INIT_LIST_HEAD(&mchan->completed); + + spin_lock_init(&mchan->lock); + list_add_tail(&mchan->chan.device_node, &dma->channels); + } + + tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); + + /* + * Configure DMA Engine: + * - Dynamic clock, + * - Round-robin group arbitration, + * - Round-robin channel arbitration. + */ + out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | + MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA); + + /* Disable hardware DMA requests */ + out_be32(&mdma->regs->dmaerqh, 0); + out_be32(&mdma->regs->dmaerql, 0); + + /* Disable error interrupts */ + out_be32(&mdma->regs->dmaeeih, 0); + out_be32(&mdma->regs->dmaeeil, 0); + + /* Clear interrupts status */ + out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); + out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); + + /* Route interrupts to IPIC */ + out_be32(&mdma->regs->dmaihsa, 0); + out_be32(&mdma->regs->dmailsa, 0); + + /* Register DMA engine */ + dev_set_drvdata(dev, mdma); + retval = dma_async_device_register(dma); + if (retval) { + devm_free_irq(dev, mdma->irq, mdma); + irq_dispose_mapping(mdma->irq); + } + + return retval; +} + +static int __devexit mpc_dma_remove(struct of_device *op) +{ + struct device *dev = &op->dev; + struct mpc_dma *mdma = dev_get_drvdata(dev); + + dma_async_device_unregister(&mdma->dma); + devm_free_irq(dev, mdma->irq, mdma); + irq_dispose_mapping(mdma->irq); + + return 0; +} + +static struct of_device_id mpc_dma_match[] = { + { .compatible = "fsl,mpc5121-dma", }, + {}, +}; + +static struct of_platform_driver mpc_dma_driver = { + .match_table = mpc_dma_match, + .probe = mpc_dma_probe, + .remove = __devexit_p(mpc_dma_remove), + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + }, +}; + +static int __init mpc_dma_init(void) +{ + return of_register_platform_driver(&mpc_dma_driver); +} +module_init(mpc_dma_init); + +static void __exit mpc_dma_exit(void) +{ + of_unregister_platform_driver(&mpc_dma_driver); +} +module_exit(mpc_dma_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c index 0a3478e910f..e69d87f24a2 100644 --- a/drivers/dma/ppc4xx/adma.c +++ b/drivers/dma/ppc4xx/adma.c @@ -4940,7 +4940,7 @@ out_free: return ret; } -static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { +static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = { { .compatible = "ibm,dma-440spe", }, { .compatible = "amcc,xor-accelerator", }, {}, diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 3391e6739d0..cf17dbb8014 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -13,7 +13,7 @@ module_param(report_gart_errors, int, 0644); static int ecc_enable_override; module_param(ecc_enable_override, int, 0644); -static struct msr *msrs; +static struct msr __percpu *msrs; /* Lookup table for all possible MC control instances */ struct amd64_pvt; @@ -2553,14 +2553,14 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) if (on) { if (reg->l & K8_MSR_MCGCTL_NBE) - pvt->flags.ecc_report = 1; + pvt->flags.nb_mce_enable = 1; reg->l |= K8_MSR_MCGCTL_NBE; } else { /* - * Turn off ECC reporting only when it was off before + * Turn off NB MCE reporting only when it was off before */ - if (!pvt->flags.ecc_report) + if (!pvt->flags.nb_mce_enable) reg->l &= ~K8_MSR_MCGCTL_NBE; } } @@ -2571,22 +2571,11 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on) return 0; } -/* - * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" - * enable it. - */ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) { struct amd64_pvt *pvt = mci->pvt_info; u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn; - if (!ecc_enable_override) - return; - - amd64_printk(KERN_WARNING, - "'ecc_enable_override' parameter is active, " - "Enabling AMD ECC hardware now: CAUTION\n"); - amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value); /* turn on UECCn and CECCEn bits */ @@ -2611,6 +2600,8 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) "This node reports that DRAM ECC is " "currently Disabled; ENABLING now\n"); + pvt->flags.nb_ecc_prev = 0; + /* Attempt to turn on DRAM ECC Enable */ value |= K8_NBCFG_ECC_ENABLE; pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); @@ -2625,7 +2616,10 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) amd64_printk(KERN_DEBUG, "Hardware accepted DRAM ECC Enable\n"); } + } else { + pvt->flags.nb_ecc_prev = 1; } + debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", (value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled"); @@ -2644,12 +2638,18 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) value &= ~mask; value |= pvt->old_nbctl; - /* restore the NB Enable MCGCTL bit */ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); + /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */ + if (!pvt->flags.nb_ecc_prev) { + amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value); + value &= ~K8_NBCFG_ECC_ENABLE; + pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); + } + + /* restore the NB Enable MCGCTL bit */ if (amd64_toggle_ecc_err_reporting(pvt, OFF)) - amd64_printk(KERN_WARNING, "Error restoring ECC reporting over " - "MCGCTL!\n"); + amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n"); } /* @@ -2690,8 +2690,9 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) if (!ecc_enable_override) { amd64_printk(KERN_NOTICE, "%s", ecc_msg); return -ENODEV; + } else { + amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n"); } - ecc_enable_override = 0; } return 0; diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 41bc561e598..0d4bf563824 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -487,7 +487,8 @@ struct amd64_pvt { /* misc settings */ struct flags { unsigned long cf8_extcfg:1; - unsigned long ecc_report:1; + unsigned long nb_mce_enable:1; + unsigned long nb_ecc_prev:1; } flags; }; diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 4eeaed57e21..8be720b278b 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -25,6 +25,7 @@ #include <linux/firewire.h> #include <linux/firewire-cdev.h> #include <linux/idr.h> +#include <linux/irqflags.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/kref.h> @@ -32,7 +33,6 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/poll.h> -#include <linux/preempt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/string.h> @@ -368,39 +368,56 @@ void fw_device_cdev_remove(struct fw_device *device) for_each_client(device, wake_up_client); } -static int ioctl_get_info(struct client *client, void *buffer) +union ioctl_arg { + struct fw_cdev_get_info get_info; + struct fw_cdev_send_request send_request; + struct fw_cdev_allocate allocate; + struct fw_cdev_deallocate deallocate; + struct fw_cdev_send_response send_response; + struct fw_cdev_initiate_bus_reset initiate_bus_reset; + struct fw_cdev_add_descriptor add_descriptor; + struct fw_cdev_remove_descriptor remove_descriptor; + struct fw_cdev_create_iso_context create_iso_context; + struct fw_cdev_queue_iso queue_iso; + struct fw_cdev_start_iso start_iso; + struct fw_cdev_stop_iso stop_iso; + struct fw_cdev_get_cycle_timer get_cycle_timer; + struct fw_cdev_allocate_iso_resource allocate_iso_resource; + struct fw_cdev_send_stream_packet send_stream_packet; + struct fw_cdev_get_cycle_timer2 get_cycle_timer2; +}; + +static int ioctl_get_info(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_get_info *get_info = buffer; + struct fw_cdev_get_info *a = &arg->get_info; struct fw_cdev_event_bus_reset bus_reset; unsigned long ret = 0; - client->version = get_info->version; - get_info->version = FW_CDEV_VERSION; - get_info->card = client->device->card->index; + client->version = a->version; + a->version = FW_CDEV_VERSION; + a->card = client->device->card->index; down_read(&fw_device_rwsem); - if (get_info->rom != 0) { - void __user *uptr = u64_to_uptr(get_info->rom); - size_t want = get_info->rom_length; + if (a->rom != 0) { + size_t want = a->rom_length; size_t have = client->device->config_rom_length * 4; - ret = copy_to_user(uptr, client->device->config_rom, - min(want, have)); + ret = copy_to_user(u64_to_uptr(a->rom), + client->device->config_rom, min(want, have)); } - get_info->rom_length = client->device->config_rom_length * 4; + a->rom_length = client->device->config_rom_length * 4; up_read(&fw_device_rwsem); if (ret != 0) return -EFAULT; - client->bus_reset_closure = get_info->bus_reset_closure; - if (get_info->bus_reset != 0) { - void __user *uptr = u64_to_uptr(get_info->bus_reset); - + client->bus_reset_closure = a->bus_reset_closure; + if (a->bus_reset != 0) { fill_bus_reset_event(&bus_reset, client); - if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) + if (copy_to_user(u64_to_uptr(a->bus_reset), + &bus_reset, sizeof(bus_reset))) return -EFAULT; } @@ -571,11 +588,9 @@ static int init_request(struct client *client, return ret; } -static int ioctl_send_request(struct client *client, void *buffer) +static int ioctl_send_request(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_send_request *request = buffer; - - switch (request->tcode) { + switch (arg->send_request.tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: case TCODE_READ_QUADLET_REQUEST: @@ -592,7 +607,7 @@ static int ioctl_send_request(struct client *client, void *buffer) return -EINVAL; } - return init_request(client, request, client->device->node_id, + return init_request(client, &arg->send_request, client->device->node_id, client->device->max_speed); } @@ -683,9 +698,9 @@ static void release_address_handler(struct client *client, kfree(r); } -static int ioctl_allocate(struct client *client, void *buffer) +static int ioctl_allocate(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_allocate *request = buffer; + struct fw_cdev_allocate *a = &arg->allocate; struct address_handler_resource *r; struct fw_address_region region; int ret; @@ -694,13 +709,13 @@ static int ioctl_allocate(struct client *client, void *buffer) if (r == NULL) return -ENOMEM; - region.start = request->offset; - region.end = request->offset + request->length; - r->handler.length = request->length; + region.start = a->offset; + region.end = a->offset + a->length; + r->handler.length = a->length; r->handler.address_callback = handle_request; - r->handler.callback_data = r; - r->closure = request->closure; - r->client = client; + r->handler.callback_data = r; + r->closure = a->closure; + r->client = client; ret = fw_core_add_address_handler(&r->handler, ®ion); if (ret < 0) { @@ -714,27 +729,25 @@ static int ioctl_allocate(struct client *client, void *buffer) release_address_handler(client, &r->resource); return ret; } - request->handle = r->resource.handle; + a->handle = r->resource.handle; return 0; } -static int ioctl_deallocate(struct client *client, void *buffer) +static int ioctl_deallocate(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_deallocate *request = buffer; - - return release_client_resource(client, request->handle, + return release_client_resource(client, arg->deallocate.handle, release_address_handler, NULL); } -static int ioctl_send_response(struct client *client, void *buffer) +static int ioctl_send_response(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_send_response *request = buffer; + struct fw_cdev_send_response *a = &arg->send_response; struct client_resource *resource; struct inbound_transaction_resource *r; int ret = 0; - if (release_client_resource(client, request->handle, + if (release_client_resource(client, a->handle, release_request, &resource) < 0) return -EINVAL; @@ -743,28 +756,24 @@ static int ioctl_send_response(struct client *client, void *buffer) if (is_fcp_request(r->request)) goto out; - if (request->length < r->length) - r->length = request->length; - if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { + if (a->length < r->length) + r->length = a->length; + if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) { ret = -EFAULT; kfree(r->request); goto out; } - fw_send_response(client->device->card, r->request, request->rcode); + fw_send_response(client->device->card, r->request, a->rcode); out: kfree(r); return ret; } -static int ioctl_initiate_bus_reset(struct client *client, void *buffer) +static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_initiate_bus_reset *request = buffer; - int short_reset; - - short_reset = (request->type == FW_CDEV_SHORT_RESET); - - return fw_core_initiate_bus_reset(client->device->card, short_reset); + return fw_core_initiate_bus_reset(client->device->card, + arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET); } static void release_descriptor(struct client *client, @@ -777,9 +786,9 @@ static void release_descriptor(struct client *client, kfree(r); } -static int ioctl_add_descriptor(struct client *client, void *buffer) +static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_add_descriptor *request = buffer; + struct fw_cdev_add_descriptor *a = &arg->add_descriptor; struct descriptor_resource *r; int ret; @@ -787,22 +796,21 @@ static int ioctl_add_descriptor(struct client *client, void *buffer) if (!client->device->is_local) return -ENOSYS; - if (request->length > 256) + if (a->length > 256) return -EINVAL; - r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); + r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL); if (r == NULL) return -ENOMEM; - if (copy_from_user(r->data, - u64_to_uptr(request->data), request->length * 4)) { + if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) { ret = -EFAULT; goto failed; } - r->descriptor.length = request->length; - r->descriptor.immediate = request->immediate; - r->descriptor.key = request->key; + r->descriptor.length = a->length; + r->descriptor.immediate = a->immediate; + r->descriptor.key = a->key; r->descriptor.data = r->data; ret = fw_core_add_descriptor(&r->descriptor); @@ -815,7 +823,7 @@ static int ioctl_add_descriptor(struct client *client, void *buffer) fw_core_remove_descriptor(&r->descriptor); goto failed; } - request->handle = r->resource.handle; + a->handle = r->resource.handle; return 0; failed: @@ -824,11 +832,9 @@ static int ioctl_add_descriptor(struct client *client, void *buffer) return ret; } -static int ioctl_remove_descriptor(struct client *client, void *buffer) +static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_remove_descriptor *request = buffer; - - return release_client_resource(client, request->handle, + return release_client_resource(client, arg->remove_descriptor.handle, release_descriptor, NULL); } @@ -851,49 +857,44 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, sizeof(e->interrupt) + header_length, NULL, 0); } -static int ioctl_create_iso_context(struct client *client, void *buffer) +static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_create_iso_context *request = buffer; + struct fw_cdev_create_iso_context *a = &arg->create_iso_context; struct fw_iso_context *context; /* We only support one context at this time. */ if (client->iso_context != NULL) return -EBUSY; - if (request->channel > 63) + if (a->channel > 63) return -EINVAL; - switch (request->type) { + switch (a->type) { case FW_ISO_CONTEXT_RECEIVE: - if (request->header_size < 4 || (request->header_size & 3)) + if (a->header_size < 4 || (a->header_size & 3)) return -EINVAL; - break; case FW_ISO_CONTEXT_TRANSMIT: - if (request->speed > SCODE_3200) + if (a->speed > SCODE_3200) return -EINVAL; - break; default: return -EINVAL; } - context = fw_iso_context_create(client->device->card, - request->type, - request->channel, - request->speed, - request->header_size, - iso_callback, client); + context = fw_iso_context_create(client->device->card, a->type, + a->channel, a->speed, a->header_size, + iso_callback, client); if (IS_ERR(context)) return PTR_ERR(context); - client->iso_closure = request->closure; + client->iso_closure = a->closure; client->iso_context = context; /* We only support one context at this time. */ - request->handle = 0; + a->handle = 0; return 0; } @@ -906,9 +907,9 @@ static int ioctl_create_iso_context(struct client *client, void *buffer) #define GET_SY(v) (((v) >> 20) & 0x0f) #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) -static int ioctl_queue_iso(struct client *client, void *buffer) +static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_queue_iso *request = buffer; + struct fw_cdev_queue_iso *a = &arg->queue_iso; struct fw_cdev_iso_packet __user *p, *end, *next; struct fw_iso_context *ctx = client->iso_context; unsigned long payload, buffer_end, header_length; @@ -919,7 +920,7 @@ static int ioctl_queue_iso(struct client *client, void *buffer) u8 header[256]; } u; - if (ctx == NULL || request->handle != 0) + if (ctx == NULL || a->handle != 0) return -EINVAL; /* @@ -929,23 +930,23 @@ static int ioctl_queue_iso(struct client *client, void *buffer) * set them both to 0, which will still let packets with * payload_length == 0 through. In other words, if no packets * use the indirect payload, the iso buffer need not be mapped - * and the request->data pointer is ignored. + * and the a->data pointer is ignored. */ - payload = (unsigned long)request->data - client->vm_start; + payload = (unsigned long)a->data - client->vm_start; buffer_end = client->buffer.page_count << PAGE_SHIFT; - if (request->data == 0 || client->buffer.pages == NULL || + if (a->data == 0 || client->buffer.pages == NULL || payload >= buffer_end) { payload = 0; buffer_end = 0; } - p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); + p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets); - if (!access_ok(VERIFY_READ, p, request->size)) + if (!access_ok(VERIFY_READ, p, a->size)) return -EFAULT; - end = (void __user *)p + request->size; + end = (void __user *)p + a->size; count = 0; while (p < end) { if (get_user(control, &p->control)) @@ -995,61 +996,78 @@ static int ioctl_queue_iso(struct client *client, void *buffer) count++; } - request->size -= uptr_to_u64(p) - request->packets; - request->packets = uptr_to_u64(p); - request->data = client->vm_start + payload; + a->size -= uptr_to_u64(p) - a->packets; + a->packets = uptr_to_u64(p); + a->data = client->vm_start + payload; return count; } -static int ioctl_start_iso(struct client *client, void *buffer) +static int ioctl_start_iso(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_start_iso *request = buffer; + struct fw_cdev_start_iso *a = &arg->start_iso; - if (client->iso_context == NULL || request->handle != 0) + if (client->iso_context == NULL || a->handle != 0) return -EINVAL; - if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { - if (request->tags == 0 || request->tags > 15) - return -EINVAL; - - if (request->sync > 15) - return -EINVAL; - } + if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE && + (a->tags == 0 || a->tags > 15 || a->sync > 15)) + return -EINVAL; - return fw_iso_context_start(client->iso_context, request->cycle, - request->sync, request->tags); + return fw_iso_context_start(client->iso_context, + a->cycle, a->sync, a->tags); } -static int ioctl_stop_iso(struct client *client, void *buffer) +static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_stop_iso *request = buffer; + struct fw_cdev_stop_iso *a = &arg->stop_iso; - if (client->iso_context == NULL || request->handle != 0) + if (client->iso_context == NULL || a->handle != 0) return -EINVAL; return fw_iso_context_stop(client->iso_context); } -static int ioctl_get_cycle_timer(struct client *client, void *buffer) +static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_get_cycle_timer *request = buffer; + struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; struct fw_card *card = client->device->card; - unsigned long long bus_time; - struct timeval tv; - unsigned long flags; + struct timespec ts = {0, 0}; + u32 cycle_time; + int ret = 0; + + local_irq_disable(); + + cycle_time = card->driver->get_cycle_time(card); - preempt_disable(); - local_irq_save(flags); + switch (a->clk_id) { + case CLOCK_REALTIME: getnstimeofday(&ts); break; + case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break; + case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break; + default: + ret = -EINVAL; + } - bus_time = card->driver->get_bus_time(card); - do_gettimeofday(&tv); + local_irq_enable(); - local_irq_restore(flags); - preempt_enable(); + a->tv_sec = ts.tv_sec; + a->tv_nsec = ts.tv_nsec; + a->cycle_timer = cycle_time; + + return ret; +} + +static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg) +{ + struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer; + struct fw_cdev_get_cycle_timer2 ct2; + + ct2.clk_id = CLOCK_REALTIME; + ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2); + + a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC; + a->cycle_timer = ct2.cycle_timer; - request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec; - request->cycle_timer = bus_time & 0xffffffff; return 0; } @@ -1220,33 +1238,32 @@ static int init_iso_resource(struct client *client, return ret; } -static int ioctl_allocate_iso_resource(struct client *client, void *buffer) +static int ioctl_allocate_iso_resource(struct client *client, + union ioctl_arg *arg) { - struct fw_cdev_allocate_iso_resource *request = buffer; - - return init_iso_resource(client, request, ISO_RES_ALLOC); + return init_iso_resource(client, + &arg->allocate_iso_resource, ISO_RES_ALLOC); } -static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) +static int ioctl_deallocate_iso_resource(struct client *client, + union ioctl_arg *arg) { - struct fw_cdev_deallocate *request = buffer; - - return release_client_resource(client, request->handle, - release_iso_resource, NULL); + return release_client_resource(client, + arg->deallocate.handle, release_iso_resource, NULL); } -static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) +static int ioctl_allocate_iso_resource_once(struct client *client, + union ioctl_arg *arg) { - struct fw_cdev_allocate_iso_resource *request = buffer; - - return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); + return init_iso_resource(client, + &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE); } -static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) +static int ioctl_deallocate_iso_resource_once(struct client *client, + union ioctl_arg *arg) { - struct fw_cdev_allocate_iso_resource *request = buffer; - - return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); + return init_iso_resource(client, + &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE); } /* @@ -1254,16 +1271,17 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe * limited by the device's link speed, the local node's link speed, * and all PHY port speeds between the two links. */ -static int ioctl_get_speed(struct client *client, void *buffer) +static int ioctl_get_speed(struct client *client, union ioctl_arg *arg) { return client->device->max_speed; } -static int ioctl_send_broadcast_request(struct client *client, void *buffer) +static int ioctl_send_broadcast_request(struct client *client, + union ioctl_arg *arg) { - struct fw_cdev_send_request *request = buffer; + struct fw_cdev_send_request *a = &arg->send_request; - switch (request->tcode) { + switch (a->tcode) { case TCODE_WRITE_QUADLET_REQUEST: case TCODE_WRITE_BLOCK_REQUEST: break; @@ -1272,36 +1290,36 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer) } /* Security policy: Only allow accesses to Units Space. */ - if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) + if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) return -EACCES; - return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); + return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100); } -static int ioctl_send_stream_packet(struct client *client, void *buffer) +static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg) { - struct fw_cdev_send_stream_packet *p = buffer; + struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet; struct fw_cdev_send_request request; int dest; - if (p->speed > client->device->card->link_speed || - p->length > 1024 << p->speed) + if (a->speed > client->device->card->link_speed || + a->length > 1024 << a->speed) return -EIO; - if (p->tag > 3 || p->channel > 63 || p->sy > 15) + if (a->tag > 3 || a->channel > 63 || a->sy > 15) return -EINVAL; - dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); + dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy); request.tcode = TCODE_STREAM_DATA; - request.length = p->length; - request.closure = p->closure; - request.data = p->data; - request.generation = p->generation; + request.length = a->length; + request.closure = a->closure; + request.data = a->data; + request.generation = a->generation; - return init_request(client, &request, dest, p->speed); + return init_request(client, &request, dest, a->speed); } -static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { +static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = { ioctl_get_info, ioctl_send_request, ioctl_allocate, @@ -1322,47 +1340,35 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { ioctl_get_speed, ioctl_send_broadcast_request, ioctl_send_stream_packet, + ioctl_get_cycle_timer2, }; static int dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg) { - char buffer[sizeof(union { - struct fw_cdev_get_info _00; - struct fw_cdev_send_request _01; - struct fw_cdev_allocate _02; - struct fw_cdev_deallocate _03; - struct fw_cdev_send_response _04; - struct fw_cdev_initiate_bus_reset _05; - struct fw_cdev_add_descriptor _06; - struct fw_cdev_remove_descriptor _07; - struct fw_cdev_create_iso_context _08; - struct fw_cdev_queue_iso _09; - struct fw_cdev_start_iso _0a; - struct fw_cdev_stop_iso _0b; - struct fw_cdev_get_cycle_timer _0c; - struct fw_cdev_allocate_iso_resource _0d; - struct fw_cdev_send_stream_packet _13; - })]; + union ioctl_arg buffer; int ret; + if (fw_device_is_shutdown(client->device)) + return -ENODEV; + if (_IOC_TYPE(cmd) != '#' || _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) return -EINVAL; if (_IOC_DIR(cmd) & _IOC_WRITE) { if (_IOC_SIZE(cmd) > sizeof(buffer) || - copy_from_user(buffer, arg, _IOC_SIZE(cmd))) + copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) return -EFAULT; } - ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); + ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer); if (ret < 0) return ret; if (_IOC_DIR(cmd) & _IOC_READ) { if (_IOC_SIZE(cmd) > sizeof(buffer) || - copy_to_user(arg, buffer, _IOC_SIZE(cmd))) + copy_to_user(arg, &buffer, _IOC_SIZE(cmd))) return -EFAULT; } @@ -1372,24 +1378,14 @@ static int dispatch_ioctl(struct client *client, static long fw_device_op_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct client *client = file->private_data; - - if (fw_device_is_shutdown(client->device)) - return -ENODEV; - - return dispatch_ioctl(client, cmd, (void __user *) arg); + return dispatch_ioctl(file->private_data, cmd, (void __user *)arg); } #ifdef CONFIG_COMPAT static long fw_device_op_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct client *client = file->private_data; - - if (fw_device_is_shutdown(client->device)) - return -ENODEV; - - return dispatch_ioctl(client, cmd, compat_ptr(arg)); + return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg)); } #endif diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 9d0dfcbe2c1..014cabd3afd 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -18,6 +18,7 @@ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ +#include <linux/bug.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/device.h> @@ -43,7 +44,7 @@ #include "core.h" -void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) +void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p) { ci->p = p + 1; ci->end = ci->p + (p[0] >> 16); @@ -59,9 +60,76 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value) } EXPORT_SYMBOL(fw_csr_iterator_next); +static const u32 *search_leaf(const u32 *directory, int search_key) +{ + struct fw_csr_iterator ci; + int last_key = 0, key, value; + + fw_csr_iterator_init(&ci, directory); + while (fw_csr_iterator_next(&ci, &key, &value)) { + if (last_key == search_key && + key == (CSR_DESCRIPTOR | CSR_LEAF)) + return ci.p - 1 + value; + + last_key = key; + } + + return NULL; +} + +static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) +{ + unsigned int quadlets, i; + char c; + + if (!size || !buf) + return -EINVAL; + + quadlets = min(block[0] >> 16, 256U); + if (quadlets < 2) + return -ENODATA; + + if (block[1] != 0 || block[2] != 0) + /* unknown language/character set */ + return -ENODATA; + + block += 3; + quadlets -= 2; + for (i = 0; i < quadlets * 4 && i < size - 1; i++) { + c = block[i / 4] >> (24 - 8 * (i % 4)); + if (c == '\0') + break; + buf[i] = c; + } + buf[i] = '\0'; + + return i; +} + +/** + * fw_csr_string - reads a string from the configuration ROM + * @directory: e.g. root directory or unit directory + * @key: the key of the preceding directory entry + * @buf: where to put the string + * @size: size of @buf, in bytes + * + * The string is taken from a minimal ASCII text descriptor leaf after + * the immediate entry with @key. The string is zero-terminated. + * Returns strlen(buf) or a negative error code. + */ +int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) +{ + const u32 *leaf = search_leaf(directory, key); + if (!leaf) + return -ENOENT; + + return textual_leaf_to_string(leaf, buf, size); +} +EXPORT_SYMBOL(fw_csr_string); + static bool is_fw_unit(struct device *dev); -static int match_unit_directory(u32 *directory, u32 match_flags, +static int match_unit_directory(const u32 *directory, u32 match_flags, const struct ieee1394_device_id *id) { struct fw_csr_iterator ci; @@ -195,7 +263,7 @@ static ssize_t show_immediate(struct device *dev, struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); struct fw_csr_iterator ci; - u32 *dir; + const u32 *dir; int key, value, ret = -ENOENT; down_read(&fw_device_rwsem); @@ -226,10 +294,10 @@ static ssize_t show_text_leaf(struct device *dev, { struct config_rom_attribute *attr = container_of(dattr, struct config_rom_attribute, attr); - struct fw_csr_iterator ci; - u32 *dir, *block = NULL, *p, *end; - int length, key, value, last_key = 0, ret = -ENOENT; - char *b; + const u32 *dir; + size_t bufsize; + char dummy_buf[2]; + int ret; down_read(&fw_device_rwsem); @@ -238,40 +306,23 @@ static ssize_t show_text_leaf(struct device *dev, else dir = fw_device(dev)->config_rom + 5; - fw_csr_iterator_init(&ci, dir); - while (fw_csr_iterator_next(&ci, &key, &value)) { - if (attr->key == last_key && - key == (CSR_DESCRIPTOR | CSR_LEAF)) - block = ci.p - 1 + value; - last_key = key; + if (buf) { + bufsize = PAGE_SIZE - 1; + } else { + buf = dummy_buf; + bufsize = 1; } - if (block == NULL) - goto out; - - length = min(block[0] >> 16, 256U); - if (length < 3) - goto out; - - if (block[1] != 0 || block[2] != 0) - /* Unknown encoding. */ - goto out; + ret = fw_csr_string(dir, attr->key, buf, bufsize); - if (buf == NULL) { - ret = length * 4; - goto out; + if (ret >= 0) { + /* Strip trailing whitespace and add newline. */ + while (ret > 0 && isspace(buf[ret - 1])) + ret--; + strcpy(buf + ret, "\n"); + ret++; } - b = buf; - end = &block[length + 1]; - for (p = &block[3]; p < end; p++, b += 4) - * (u32 *) b = (__force u32) __cpu_to_be32(*p); - - /* Strip trailing whitespace and add newline. */ - while (b--, (isspace(*b) || *b == '\0') && b > buf); - strcpy(b + 1, "\n"); - ret = b + 2 - buf; - out: up_read(&fw_device_rwsem); return ret; @@ -371,7 +422,7 @@ static ssize_t guid_show(struct device *dev, return ret; } -static int units_sprintf(char *buf, u32 *directory) +static int units_sprintf(char *buf, const u32 *directory) { struct fw_csr_iterator ci; int key, value; @@ -441,28 +492,29 @@ static int read_rom(struct fw_device *device, return rcode; } -#define READ_BIB_ROM_SIZE 256 -#define READ_BIB_STACK_SIZE 16 +#define MAX_CONFIG_ROM_SIZE 256 /* * Read the bus info block, perform a speed probe, and read all of the rest of * the config ROM. We do all this with a cached bus generation. If the bus - * generation changes under us, read_bus_info_block will fail and get retried. + * generation changes under us, read_config_rom will fail and get retried. * It's better to start all over in this case because the node from which we * are reading the ROM may have changed the ROM during the reset. */ -static int read_bus_info_block(struct fw_device *device, int generation) +static int read_config_rom(struct fw_device *device, int generation) { - u32 *rom, *stack, *old_rom, *new_rom; + const u32 *old_rom, *new_rom; + u32 *rom, *stack; u32 sp, key; int i, end, length, ret = -1; - rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE + - sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL); + rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE + + sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL); if (rom == NULL) return -ENOMEM; - stack = &rom[READ_BIB_ROM_SIZE]; + stack = &rom[MAX_CONFIG_ROM_SIZE]; + memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE); device->max_speed = SCODE_100; @@ -529,40 +581,54 @@ static int read_bus_info_block(struct fw_device *device, int generation) */ key = stack[--sp]; i = key & 0xffffff; - if (i >= READ_BIB_ROM_SIZE) - /* - * The reference points outside the standard - * config rom area, something's fishy. - */ + if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) goto out; /* Read header quadlet for the block to get the length. */ if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) goto out; end = i + (rom[i] >> 16) + 1; - i++; - if (end > READ_BIB_ROM_SIZE) + if (end > MAX_CONFIG_ROM_SIZE) { /* - * This block extends outside standard config - * area (and the array we're reading it - * into). That's broken, so ignore this - * device. + * This block extends outside the config ROM which is + * a firmware bug. Ignore this whole block, i.e. + * simply set a fake block length of 0. */ - goto out; + fw_error("skipped invalid ROM block %x at %llx\n", + rom[i], + i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); + rom[i] = 0; + end = i; + } + i++; /* * Now read in the block. If this is a directory * block, check the entries as we read them to see if * it references another block, and push it in that case. */ - while (i < end) { + for (; i < end; i++) { if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) goto out; - if ((key >> 30) == 3 && (rom[i] >> 30) > 1 && - sp < READ_BIB_STACK_SIZE) - stack[sp++] = i + rom[i]; - i++; + + if ((key >> 30) != 3 || (rom[i] >> 30) < 2) + continue; + /* + * Offset points outside the ROM. May be a firmware + * bug or an Extended ROM entry (IEEE 1212-2001 clause + * 7.7.18). Simply overwrite this pointer here by a + * fake immediate entry so that later iterators over + * the ROM don't have to check offsets all the time. + */ + if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) { + fw_error("skipped unsupported ROM entry %x at %llx\n", + rom[i], + i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); + rom[i] = 0; + continue; + } + stack[sp++] = i + rom[i]; } if (length < i) length = i; @@ -905,7 +971,7 @@ static void fw_device_init(struct work_struct *work) * device. */ - if (read_bus_info_block(device, device->generation) < 0) { + if (read_config_rom(device, device->generation) < 0) { if (device->config_rom_retries < MAX_RETRIES && atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { device->config_rom_retries++; @@ -1022,7 +1088,7 @@ enum { }; /* Reread and compare bus info block and header of root directory */ -static int reread_bus_info_block(struct fw_device *device, int generation) +static int reread_config_rom(struct fw_device *device, int generation) { u32 q; int i; @@ -1048,7 +1114,7 @@ static void fw_device_refresh(struct work_struct *work) struct fw_card *card = device->card; int node_id = device->node_id; - switch (reread_bus_info_block(device, device->generation)) { + switch (reread_config_rom(device, device->generation)) { case REREAD_BIB_ERROR: if (device->config_rom_retries < MAX_RETRIES / 2 && atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { @@ -1082,7 +1148,7 @@ static void fw_device_refresh(struct work_struct *work) */ device_for_each_child(&device->device, NULL, shutdown_unit); - if (read_bus_info_block(device, device->generation) < 0) { + if (read_config_rom(device, device->generation) < 0) { if (device->config_rom_retries < MAX_RETRIES && atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { device->config_rom_retries++; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 495849eb13c..673b03f8b4e 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -921,23 +921,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, void *payload, size_t length, void *callback_data) { int reg = offset & ~CSR_REGISTER_BASE; - unsigned long long bus_time; __be32 *data = payload; int rcode = RCODE_COMPLETE; switch (reg) { case CSR_CYCLE_TIME: - case CSR_BUS_TIME: - if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) { - rcode = RCODE_TYPE_ERROR; - break; - } - - bus_time = card->driver->get_bus_time(card); - if (reg == CSR_CYCLE_TIME) - *data = cpu_to_be32(bus_time); + if (TCODE_IS_READ_REQUEST(tcode) && length == 4) + *data = cpu_to_be32(card->driver->get_cycle_time(card)); else - *data = cpu_to_be32(bus_time >> 25); + rcode = RCODE_TYPE_ERROR; break; case CSR_BROADCAST_CHANNEL: @@ -968,6 +960,9 @@ static void handle_registers(struct fw_card *card, struct fw_request *request, case CSR_BUSY_TIMEOUT: /* FIXME: Implement this. */ + case CSR_BUS_TIME: + /* Useless without initialization by the bus manager. */ + default: rcode = RCODE_ADDRESS_ERROR; break; diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h index ed3b1a765c0..fb0321300cc 100644 --- a/drivers/firewire/core.h +++ b/drivers/firewire/core.h @@ -70,7 +70,7 @@ struct fw_card_driver { int (*enable_phys_dma)(struct fw_card *card, int node_id, int generation); - u64 (*get_bus_time)(struct fw_card *card); + u32 (*get_cycle_time)(struct fw_card *card); struct fw_iso_context * (*allocate_iso_context)(struct fw_card *card, diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 43ebf337b13..75dc6988cff 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -38,7 +38,6 @@ #include <linux/spinlock.h> #include <linux/string.h> -#include <asm/atomic.h> #include <asm/byteorder.h> #include <asm/page.h> #include <asm/system.h> @@ -73,20 +72,6 @@ struct descriptor { __le16 transfer_status; } __attribute__((aligned(16))); -struct db_descriptor { - __le16 first_size; - __le16 control; - __le16 second_req_count; - __le16 first_req_count; - __le32 branch_address; - __le16 second_res_count; - __le16 first_res_count; - __le32 reserved0; - __le32 first_buffer; - __le32 second_buffer; - __le32 reserved1; -} __attribute__((aligned(16))); - #define CONTROL_SET(regs) (regs) #define CONTROL_CLEAR(regs) ((regs) + 4) #define COMMAND_PTR(regs) ((regs) + 12) @@ -181,31 +166,16 @@ struct fw_ohci { struct fw_card card; __iomem char *registers; - dma_addr_t self_id_bus; - __le32 *self_id_cpu; - struct tasklet_struct bus_reset_tasklet; int node_id; int generation; int request_generation; /* for timestamping incoming requests */ - atomic_t bus_seconds; - - bool use_dualbuffer; - bool old_uninorth; - bool bus_reset_packet_quirk; + unsigned quirks; /* * Spinlock for accessing fw_ohci data. Never call out of * this driver with this lock held. */ spinlock_t lock; - u32 self_id_buffer[512]; - - /* Config rom buffers */ - __be32 *config_rom; - dma_addr_t config_rom_bus; - __be32 *next_config_rom; - dma_addr_t next_config_rom_bus; - __be32 next_header; struct ar_context ar_request_ctx; struct ar_context ar_response_ctx; @@ -217,6 +187,18 @@ struct fw_ohci { u64 ir_context_channels; u32 ir_context_mask; struct iso_context *ir_context_list; + + __be32 *config_rom; + dma_addr_t config_rom_bus; + __be32 *next_config_rom; + dma_addr_t next_config_rom_bus; + __be32 next_header; + + __le32 *self_id_cpu; + dma_addr_t self_id_bus; + struct tasklet_struct bus_reset_tasklet; + + u32 self_id_buffer[512]; }; static inline struct fw_ohci *fw_ohci(struct fw_card *card) @@ -249,6 +231,30 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card) static char ohci_driver_name[] = KBUILD_MODNAME; +#define QUIRK_CYCLE_TIMER 1 +#define QUIRK_RESET_PACKET 2 +#define QUIRK_BE_HEADERS 4 + +/* In case of multiple matches in ohci_quirks[], only the first one is used. */ +static const struct { + unsigned short vendor, device, flags; +} ohci_quirks[] = { + {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET}, + {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, + {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, + {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER}, + {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS}, +}; + +/* This overrides anything that was found in ohci_quirks[]. */ +static int param_quirks; +module_param_named(quirks, param_quirks, int, 0644); +MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" + ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) + ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) + ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS) + ")"); + #ifdef CONFIG_FIREWIRE_OHCI_DEBUG #define OHCI_PARAM_DEBUG_AT_AR 1 @@ -275,7 +281,7 @@ static void log_irqs(u32 evt) !(evt & OHCI1394_busReset)) return; - fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, + fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, evt & OHCI1394_selfIDComplete ? " selfID" : "", evt & OHCI1394_RQPkt ? " AR_req" : "", evt & OHCI1394_RSPkt ? " AR_resp" : "", @@ -285,7 +291,6 @@ static void log_irqs(u32 evt) evt & OHCI1394_isochTx ? " IT" : "", evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", - evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", evt & OHCI1394_regAccessFail ? " regAccessFail" : "", evt & OHCI1394_busReset ? " busReset" : "", @@ -293,8 +298,7 @@ static void log_irqs(u32 evt) OHCI1394_RSPkt | OHCI1394_reqTxComplete | OHCI1394_respTxComplete | OHCI1394_isochRx | OHCI1394_isochTx | OHCI1394_postedWriteErr | - OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | - OHCI1394_cycleInconsistent | + OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | OHCI1394_busReset) ? " ?" : ""); } @@ -524,7 +528,7 @@ static void ar_context_release(struct ar_context *ctx) #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) #define cond_le32_to_cpu(v) \ - (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v)) + (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) #else #define cond_le32_to_cpu(v) le32_to_cpu(v) #endif @@ -605,7 +609,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) * at a slightly incorrect time (in bus_reset_tasklet). */ if (evt == OHCI1394_evt_bus_reset) { - if (!ohci->bus_reset_packet_quirk) + if (!(ohci->quirks & QUIRK_RESET_PACKET)) ohci->request_generation = (p.header[2] >> 16) & 0xff; } else if (ctx == &ohci->ar_request_ctx) { fw_core_handle_request(&ohci->card, &p); @@ -1329,7 +1333,7 @@ static void bus_reset_tasklet(unsigned long data) context_stop(&ohci->at_response_ctx); reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); - if (ohci->bus_reset_packet_quirk) + if (ohci->quirks & QUIRK_RESET_PACKET) ohci->request_generation = generation; /* @@ -1384,7 +1388,7 @@ static void bus_reset_tasklet(unsigned long data) static irqreturn_t irq_handler(int irq, void *data) { struct fw_ohci *ohci = data; - u32 event, iso_event, cycle_time; + u32 event, iso_event; int i; event = reg_read(ohci, OHCI1394_IntEventClear); @@ -1454,12 +1458,6 @@ static irqreturn_t irq_handler(int irq, void *data) fw_notify("isochronous cycle inconsistent\n"); } - if (event & OHCI1394_cycle64Seconds) { - cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); - if ((cycle_time & 0x80000000) == 0) - atomic_inc(&ohci->bus_seconds); - } - return IRQ_HANDLED; } @@ -1553,8 +1551,7 @@ static int ohci_enable(struct fw_card *card, OHCI1394_reqTxComplete | OHCI1394_respTxComplete | OHCI1394_isochRx | OHCI1394_isochTx | OHCI1394_postedWriteErr | OHCI1394_cycleTooLong | - OHCI1394_cycleInconsistent | - OHCI1394_cycle64Seconds | OHCI1394_regAccessFail | + OHCI1394_cycleInconsistent | OHCI1394_regAccessFail | OHCI1394_masterIntEnable); if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); @@ -1794,16 +1791,61 @@ static int ohci_enable_phys_dma(struct fw_card *card, #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */ } -static u64 ohci_get_bus_time(struct fw_card *card) +static u32 cycle_timer_ticks(u32 cycle_timer) { - struct fw_ohci *ohci = fw_ohci(card); - u32 cycle_time; - u64 bus_time; + u32 ticks; - cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer); - bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time; + ticks = cycle_timer & 0xfff; + ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); + ticks += (3072 * 8000) * (cycle_timer >> 25); + + return ticks; +} + +/* + * Some controllers exhibit one or more of the following bugs when updating the + * iso cycle timer register: + * - When the lowest six bits are wrapping around to zero, a read that happens + * at the same time will return garbage in the lowest ten bits. + * - When the cycleOffset field wraps around to zero, the cycleCount field is + * not incremented for about 60 ns. + * - Occasionally, the entire register reads zero. + * + * To catch these, we read the register three times and ensure that the + * difference between each two consecutive reads is approximately the same, i.e. + * less than twice the other. Furthermore, any negative difference indicates an + * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to + * execute, so we have enough precision to compute the ratio of the differences.) + */ +static u32 ohci_get_cycle_time(struct fw_card *card) +{ + struct fw_ohci *ohci = fw_ohci(card); + u32 c0, c1, c2; + u32 t0, t1, t2; + s32 diff01, diff12; + int i; - return bus_time; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + + if (ohci->quirks & QUIRK_CYCLE_TIMER) { + i = 0; + c1 = c2; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + do { + c0 = c1; + c1 = c2; + c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); + t0 = cycle_timer_ticks(c0); + t1 = cycle_timer_ticks(c1); + t2 = cycle_timer_ticks(c2); + diff01 = t1 - t0; + diff12 = t2 - t1; + } while ((diff01 <= 0 || diff12 <= 0 || + diff01 / diff12 >= 2 || diff12 / diff01 >= 2) + && i++ < 20); + } + + return c2; } static void copy_iso_headers(struct iso_context *ctx, void *p) @@ -1828,52 +1870,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p) ctx->header_length += ctx->base.header_size; } -static int handle_ir_dualbuffer_packet(struct context *context, - struct descriptor *d, - struct descriptor *last) -{ - struct iso_context *ctx = - container_of(context, struct iso_context, context); - struct db_descriptor *db = (struct db_descriptor *) d; - __le32 *ir_header; - size_t header_length; - void *p, *end; - - if (db->first_res_count != 0 && db->second_res_count != 0) { - if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) { - /* This descriptor isn't done yet, stop iteration. */ - return 0; - } - ctx->excess_bytes -= le16_to_cpu(db->second_req_count); - } - - header_length = le16_to_cpu(db->first_req_count) - - le16_to_cpu(db->first_res_count); - - p = db + 1; - end = p + header_length; - while (p < end) { - copy_iso_headers(ctx, p); - ctx->excess_bytes += - (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff; - p += max(ctx->base.header_size, (size_t)8); - } - - ctx->excess_bytes -= le16_to_cpu(db->second_req_count) - - le16_to_cpu(db->second_res_count); - - if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) { - ir_header = (__le32 *) (db + 1); - ctx->base.callback(&ctx->base, - le32_to_cpu(ir_header[0]) & 0xffff, - ctx->header_length, ctx->header, - ctx->base.callback_data); - ctx->header_length = 0; - } - - return 1; -} - static int handle_ir_packet_per_buffer(struct context *context, struct descriptor *d, struct descriptor *last) @@ -1960,10 +1956,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, channels = &ohci->ir_context_channels; mask = &ohci->ir_context_mask; list = ohci->ir_context_list; - if (ohci->use_dualbuffer) - callback = handle_ir_dualbuffer_packet; - else - callback = handle_ir_packet_per_buffer; + callback = handle_ir_packet_per_buffer; } spin_lock_irqsave(&ohci->lock, flags); @@ -2026,8 +2019,6 @@ static int ohci_start_iso(struct fw_iso_context *base, } else { index = ctx - ohci->ir_context_list; control = IR_CONTEXT_ISOCH_HEADER; - if (ohci->use_dualbuffer) - control |= IR_CONTEXT_DUAL_BUFFER_MODE; match = (tags << 28) | (sync << 8) | ctx->base.channel; if (cycle >= 0) { match |= (cycle & 0x07fff) << 12; @@ -2188,92 +2179,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base, return 0; } -static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base, - struct fw_iso_packet *packet, - struct fw_iso_buffer *buffer, - unsigned long payload) -{ - struct iso_context *ctx = container_of(base, struct iso_context, base); - struct db_descriptor *db = NULL; - struct descriptor *d; - struct fw_iso_packet *p; - dma_addr_t d_bus, page_bus; - u32 z, header_z, length, rest; - int page, offset, packet_count, header_size; - - /* - * FIXME: Cycle lost behavior should be configurable: lose - * packet, retransmit or terminate.. - */ - - p = packet; - z = 2; - - /* - * The OHCI controller puts the isochronous header and trailer in the - * buffer, so we need at least 8 bytes. - */ - packet_count = p->header_length / ctx->base.header_size; - header_size = packet_count * max(ctx->base.header_size, (size_t)8); - - /* Get header size in number of descriptors. */ - header_z = DIV_ROUND_UP(header_size, sizeof(*d)); - page = payload >> PAGE_SHIFT; - offset = payload & ~PAGE_MASK; - rest = p->payload_length; - /* - * The controllers I've tested have not worked correctly when - * second_req_count is zero. Rather than do something we know won't - * work, return an error - */ - if (rest == 0) - return -EINVAL; - - while (rest > 0) { - d = context_get_descriptors(&ctx->context, - z + header_z, &d_bus); - if (d == NULL) - return -ENOMEM; - - db = (struct db_descriptor *) d; - db->control = cpu_to_le16(DESCRIPTOR_STATUS | - DESCRIPTOR_BRANCH_ALWAYS); - db->first_size = - cpu_to_le16(max(ctx->base.header_size, (size_t)8)); - if (p->skip && rest == p->payload_length) { - db->control |= cpu_to_le16(DESCRIPTOR_WAIT); - db->first_req_count = db->first_size; - } else { - db->first_req_count = cpu_to_le16(header_size); - } - db->first_res_count = db->first_req_count; - db->first_buffer = cpu_to_le32(d_bus + sizeof(*db)); - - if (p->skip && rest == p->payload_length) - length = 4; - else if (offset + rest < PAGE_SIZE) - length = rest; - else - length = PAGE_SIZE - offset; - - db->second_req_count = cpu_to_le16(length); - db->second_res_count = db->second_req_count; - page_bus = page_private(buffer->pages[page]); - db->second_buffer = cpu_to_le32(page_bus + offset); - - if (p->interrupt && length == rest) - db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); - - context_append(&ctx->context, d, z, header_z); - offset = (offset + length) & ~PAGE_MASK; - rest -= length; - if (offset == 0) - page++; - } - - return 0; -} - static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base, struct fw_iso_packet *packet, struct fw_iso_buffer *buffer, @@ -2364,9 +2269,6 @@ static int ohci_queue_iso(struct fw_iso_context *base, spin_lock_irqsave(&ctx->context.ohci->lock, flags); if (base->type == FW_ISO_CONTEXT_TRANSMIT) ret = ohci_queue_iso_transmit(base, packet, buffer, payload); - else if (ctx->context.ohci->use_dualbuffer) - ret = ohci_queue_iso_receive_dualbuffer(base, packet, - buffer, payload); else ret = ohci_queue_iso_receive_packet_per_buffer(base, packet, buffer, payload); @@ -2383,7 +2285,7 @@ static const struct fw_card_driver ohci_driver = { .send_response = ohci_send_response, .cancel_packet = ohci_cancel_packet, .enable_phys_dma = ohci_enable_phys_dma, - .get_bus_time = ohci_get_bus_time, + .get_cycle_time = ohci_get_cycle_time, .allocate_iso_context = ohci_allocate_iso_context, .free_iso_context = ohci_free_iso_context, @@ -2421,17 +2323,13 @@ static void ohci_pmac_off(struct pci_dev *dev) #define ohci_pmac_off(dev) #endif /* CONFIG_PPC_PMAC */ -#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT -#define PCI_DEVICE_ID_AGERE_FW643 0x5901 -#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024 - static int __devinit pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct fw_ohci *ohci; u32 bus_options, max_receive, link_speed, version; u64 guid; - int err; + int i, err, n_ir, n_it; size_t size; ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); @@ -2472,36 +2370,15 @@ static int __devinit pci_probe(struct pci_dev *dev, goto fail_iomem; } - version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; -#if 0 - /* FIXME: make it a context option or remove dual-buffer mode */ - ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; -#endif - - /* dual-buffer mode is broken if more than one IR context is active */ - if (dev->vendor == PCI_VENDOR_ID_AGERE && - dev->device == PCI_DEVICE_ID_AGERE_FW643) - ohci->use_dualbuffer = false; - - /* dual-buffer mode is broken */ - if (dev->vendor == PCI_VENDOR_ID_RICOH && - dev->device == PCI_DEVICE_ID_RICOH_R5C832) - ohci->use_dualbuffer = false; - -/* x86-32 currently doesn't use highmem for dma_alloc_coherent */ -#if !defined(CONFIG_X86_32) - /* dual-buffer mode is broken with descriptor addresses above 2G */ - if (dev->vendor == PCI_VENDOR_ID_TI && - (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 || - dev->device == PCI_DEVICE_ID_TI_TSB43AB23)) - ohci->use_dualbuffer = false; -#endif - -#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) - ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE && - dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW; -#endif - ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI; + for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) + if (ohci_quirks[i].vendor == dev->vendor && + (ohci_quirks[i].device == dev->device || + ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) { + ohci->quirks = ohci_quirks[i].flags; + break; + } + if (param_quirks) + ohci->quirks = param_quirks; ar_context_init(&ohci->ar_request_ctx, ohci, OHCI1394_AsReqRcvContextControlSet); @@ -2516,17 +2393,19 @@ static int __devinit pci_probe(struct pci_dev *dev, OHCI1394_AsRspTrContextControlSet, handle_at_packet); reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); - ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); + ohci->ir_context_channels = ~0ULL; + ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); - size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask); - ohci->it_context_list = kzalloc(size, GFP_KERNEL); + n_ir = hweight32(ohci->ir_context_mask); + size = sizeof(struct iso_context) * n_ir; + ohci->ir_context_list = kzalloc(size, GFP_KERNEL); reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); - ohci->ir_context_channels = ~0ULL; - ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); + ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); - size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask); - ohci->ir_context_list = kzalloc(size, GFP_KERNEL); + n_it = hweight32(ohci->it_context_mask); + size = sizeof(struct iso_context) * n_it; + ohci->it_context_list = kzalloc(size, GFP_KERNEL); if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { err = -ENOMEM; @@ -2553,8 +2432,11 @@ static int __devinit pci_probe(struct pci_dev *dev, if (err) goto fail_self_id; - fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n", - dev_name(&dev->dev), version >> 16, version & 0xff); + version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; + fw_notify("Added fw-ohci device %s, OHCI v%x.%x, " + "%d IR + %d IT contexts, quirks 0x%x\n", + dev_name(&dev->dev), version >> 16, version & 0xff, + n_ir, n_it, ohci->quirks); return 0; @@ -2662,7 +2544,7 @@ static int pci_resume(struct pci_dev *dev) } #endif -static struct pci_device_id pci_table[] = { +static const struct pci_device_id pci_table[] = { { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, { } }; diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 70fef40cd22..ca264f2fdf0 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1014,7 +1014,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) return 0; } -static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory) +static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, + const u32 *directory) { struct fw_csr_iterator ci; int key, value; @@ -1027,7 +1028,7 @@ static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory) return 0; } -static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory, +static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory, u32 *model, u32 *firmware_revision) { struct fw_csr_iterator ci; diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 39c5aa75b8f..abe3f446ca4 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -4,7 +4,7 @@ ccflags-y := -Iinclude/drm -drm-y := drm_auth.o drm_bufs.o drm_cache.o \ +drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \ drm_context.o drm_dma.o drm_drawable.o \ drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c new file mode 100644 index 00000000000..55d03ed0500 --- /dev/null +++ b/drivers/gpu/drm/drm_buffer.c @@ -0,0 +1,184 @@ +/************************************************************************** + * + * Copyright 2010 Pauli Nieminen. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ +/* + * Multipart buffer for coping data which is larger than the page size. + * + * Authors: + * Pauli Nieminen <suokkos-at-gmail-dot-com> + */ + +#include "drm_buffer.h" + +/** + * Allocate the drm buffer object. + * + * buf: Pointer to a pointer where the object is stored. + * size: The number of bytes to allocate. + */ +int drm_buffer_alloc(struct drm_buffer **buf, int size) +{ + int nr_pages = size / PAGE_SIZE + 1; + int idx; + + /* Allocating pointer table to end of structure makes drm_buffer + * variable sized */ + *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *), + GFP_KERNEL); + + if (*buf == NULL) { + DRM_ERROR("Failed to allocate drm buffer object to hold" + " %d bytes in %d pages.\n", + size, nr_pages); + return -ENOMEM; + } + + (*buf)->size = size; + + for (idx = 0; idx < nr_pages; ++idx) { + + (*buf)->data[idx] = + kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE), + GFP_KERNEL); + + + if ((*buf)->data[idx] == NULL) { + DRM_ERROR("Failed to allocate %dth page for drm" + " buffer with %d bytes and %d pages.\n", + idx + 1, size, nr_pages); + goto error_out; + } + + } + + return 0; + +error_out: + + /* Only last element can be null pointer so check for it first. */ + if ((*buf)->data[idx]) + kfree((*buf)->data[idx]); + + for (--idx; idx >= 0; --idx) + kfree((*buf)->data[idx]); + + kfree(*buf); + return -ENOMEM; +} +EXPORT_SYMBOL(drm_buffer_alloc); + +/** + * Copy the user data to the begin of the buffer and reset the processing + * iterator. + * + * user_data: A pointer the data that is copied to the buffer. + * size: The Number of bytes to copy. + */ +extern int drm_buffer_copy_from_user(struct drm_buffer *buf, + void __user *user_data, int size) +{ + int nr_pages = size / PAGE_SIZE + 1; + int idx; + + if (size > buf->size) { + DRM_ERROR("Requesting to copy %d bytes to a drm buffer with" + " %d bytes space\n", + size, buf->size); + return -EFAULT; + } + + for (idx = 0; idx < nr_pages; ++idx) { + + if (DRM_COPY_FROM_USER(buf->data[idx], + user_data + idx * PAGE_SIZE, + min(PAGE_SIZE, size - idx * PAGE_SIZE))) { + DRM_ERROR("Failed to copy user data (%p) to drm buffer" + " (%p) %dth page.\n", + user_data, buf, idx); + return -EFAULT; + + } + } + buf->iterator = 0; + return 0; +} +EXPORT_SYMBOL(drm_buffer_copy_from_user); + +/** + * Free the drm buffer object + */ +void drm_buffer_free(struct drm_buffer *buf) +{ + + if (buf != NULL) { + + int nr_pages = buf->size / PAGE_SIZE + 1; + int idx; + for (idx = 0; idx < nr_pages; ++idx) + kfree(buf->data[idx]); + + kfree(buf); + } +} +EXPORT_SYMBOL(drm_buffer_free); + +/** + * Read an object from buffer that may be split to multiple parts. If object + * is not split function just returns the pointer to object in buffer. But in + * case of split object data is copied to given stack object that is suplied + * by caller. + * + * The processing location of the buffer is also advanced to the next byte + * after the object. + * + * objsize: The size of the objet in bytes. + * stack_obj: A pointer to a memory location where object can be copied. + */ +void *drm_buffer_read_object(struct drm_buffer *buf, + int objsize, void *stack_obj) +{ + int idx = drm_buffer_index(buf); + int page = drm_buffer_page(buf); + void *obj = 0; + + if (idx + objsize <= PAGE_SIZE) { + obj = &buf->data[page][idx]; + } else { + /* The object is split which forces copy to temporary object.*/ + int beginsz = PAGE_SIZE - idx; + memcpy(stack_obj, &buf->data[page][idx], beginsz); + + memcpy(stack_obj + beginsz, &buf->data[page + 1][0], + objsize - beginsz); + + obj = stack_obj; + } + + drm_buffer_advance(buf, objsize); + return obj; +} +EXPORT_SYMBOL(drm_buffer_read_object); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 7d0f00a935f..f2aaf39be39 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; } else if (set->fb == NULL) { mode_changed = true; - } else if ((set->fb->bits_per_pixel != - set->crtc->fb->bits_per_pixel) || - set->fb->depth != set->crtc->fb->depth) - fb_changed = true; - else + } else fb_changed = true; } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 766c46875a2..f3c58e2bd75 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW) + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED), + + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED) }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index ab6c9733041..f97e7c42ac8 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -60,8 +60,7 @@ #define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) /* use +hsync +vsync for detailed mode */ #define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) -/* define the number of Extension EDID block */ -#define MAX_EDID_EXT_NUM 4 + #define LEVEL_DMT 0 #define LEVEL_GTF 1 @@ -114,14 +113,14 @@ static const u8 edid_header[] = { }; /** - * edid_is_valid - sanity check EDID data + * drm_edid_is_valid - sanity check EDID data * @edid: EDID data * * Sanity check the EDID block by looking at the header, the version number * and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's * valid. */ -static bool edid_is_valid(struct edid *edid) +bool drm_edid_is_valid(struct edid *edid) { int i, score = 0; u8 csum = 0; @@ -163,6 +162,7 @@ bad: } return 0; } +EXPORT_SYMBOL(drm_edid_is_valid); /** * edid_vendor - match a string against EDID's obfuscated vendor field @@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector, } /* Chose real EDID extension number */ - edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? - MAX_EDID_EXT_NUM : edid->extensions; + edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? + DRM_MAX_EDID_EXT_NUM : edid->extensions; /* Find CEA extension */ for (i = 0; i < edid_ext_num; i++) { @@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector, for (i = 0; i < 4; i++) { if (drm_do_probe_ddc_edid(adapter, buf, len)) return -1; - if (edid_is_valid((struct edid *)buf)) + if (drm_edid_is_valid((struct edid *)buf)) return 0; } @@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector, int ret; struct edid *edid; - edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1), + edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), GFP_KERNEL); if (edid == NULL) { dev_warn(&connector->dev->pdev->dev, @@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector, if (edid->extensions != 0) { int edid_ext_num = edid->extensions; - if (edid_ext_num > MAX_EDID_EXT_NUM) { + if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) { dev_warn(&connector->dev->pdev->dev, "The number of extension(%d) is " "over max (%d), actually read number (%d)\n", - edid_ext_num, MAX_EDID_EXT_NUM, - MAX_EDID_EXT_NUM); + edid_ext_num, DRM_MAX_EDID_EXT_NUM, + DRM_MAX_EDID_EXT_NUM); /* Reset EDID extension number to be read */ - edid_ext_num = MAX_EDID_EXT_NUM; + edid_ext_num = DRM_MAX_EDID_EXT_NUM; } /* Read EDID including extensions too */ ret = drm_ddc_read_edid(connector, adapter, (char *)edid, @@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid) goto end; /* Chose real EDID extension number */ - edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? - MAX_EDID_EXT_NUM : edid->extensions; + edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ? + DRM_MAX_EDID_EXT_NUM : edid->extensions; /* Find CEA extension */ for (i = 0; i < edid_ext_num; i++) { @@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) if (edid == NULL) { return 0; } - if (!edid_is_valid(edid)) { + if (!drm_edid_is_valid(edid)) { dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n", drm_get_connector_name(connector)); return 0; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 0f9e90552dc..50549703584 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -27,6 +27,7 @@ * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ +#include <linux/kernel.h> #include <linux/sysrq.h> #include <linux/fb.h> #include "drmP.h" @@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector) } EXPORT_SYMBOL(drm_fb_helper_add_connector); -static int my_atoi(const char *name) -{ - int val = 0; - - for (;; name++) { - switch (*name) { - case '0' ... '9': - val = 10*val+(*name-'0'); - break; - default: - return val; - } - } -} - /** * drm_fb_helper_connector_parse_command_line - parse command line for connector * @connector - connector to parse line for @@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con namelen = i; if (!refresh_specified && !bpp_specified && !yres_specified) { - refresh = my_atoi(&name[i+1]); + refresh = simple_strtol(&name[i+1], NULL, 10); refresh_specified = 1; if (cvt || rb) cvt = 0; @@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con case '-': namelen = i; if (!bpp_specified && !yres_specified) { - bpp = my_atoi(&name[i+1]); + bpp = simple_strtol(&name[i+1], NULL, 10); bpp_specified = 1; if (cvt || rb) cvt = 0; @@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con break; case 'x': if (!yres_specified) { - yres = my_atoi(&name[i+1]); + yres = simple_strtol(&name[i+1], NULL, 10); yres_specified = 1; } else goto done; @@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con } } if (i < 0 && yres_specified) { - xres = my_atoi(name); + xres = simple_strtol(name, NULL, 10); res_specified = 1; } done: @@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info) int i; if (var->pixclock != 0) { - DRM_ERROR("PIXEL CLCOK SET\n"); + DRM_ERROR("PIXEL CLOCK SET\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 8bf3770f294..aa89d4b0b4c 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle) idr_remove(&filp->object_idr, handle); spin_unlock(&filp->table_lock); - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(obj); return 0; } @@ -325,9 +323,7 @@ again: } err: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return ret; } @@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data, return -ENOENT; ret = drm_gem_handle_create(file_priv, obj, &handle); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); if (ret) return ret; @@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; - drm_gem_object_handle_unreference(obj); + drm_gem_object_handle_unreference_unlocked(obj); return 0; } @@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) void drm_gem_release(struct drm_device *dev, struct drm_file *file_private) { - mutex_lock(&dev->struct_mutex); idr_for_each(&file_private->object_idr, &drm_gem_object_release_handle, NULL); idr_destroy(&file_private->object_idr); - mutex_unlock(&dev->struct_mutex); +} + +static void +drm_gem_object_free_common(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + fput(obj->filp); + atomic_dec(&dev->object_count); + atomic_sub(obj->size, &dev->object_memory); + kfree(obj); } /** * Called after the last reference to the object has been lost. + * Must be called holding struct_ mutex * * Frees the object */ @@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref) if (dev->driver->gem_free_object != NULL) dev->driver->gem_free_object(obj); - fput(obj->filp); - atomic_dec(&dev->object_count); - atomic_sub(obj->size, &dev->object_memory); - kfree(obj); + drm_gem_object_free_common(obj); } EXPORT_SYMBOL(drm_gem_object_free); /** + * Called after the last reference to the object has been lost. + * Must be called without holding struct_mutex + * + * Frees the object + */ +void +drm_gem_object_free_unlocked(struct kref *kref) +{ + struct drm_gem_object *obj = (struct drm_gem_object *) kref; + struct drm_device *dev = obj->dev; + + if (dev->driver->gem_free_object_unlocked != NULL) + dev->driver->gem_free_object_unlocked(obj); + else if (dev->driver->gem_free_object != NULL) { + mutex_lock(&dev->struct_mutex); + dev->driver->gem_free_object(obj); + mutex_unlock(&dev->struct_mutex); + } + + drm_gem_object_free_common(obj); +} +EXPORT_SYMBOL(drm_gem_object_free_unlocked); + +static void drm_gem_object_ref_bug(struct kref *list_kref) +{ + BUG(); +} + +/** * Called after the last handle to the object has been closed * * Removes any name for the object. Note that this must be @@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref) /* * The object name held a reference to this object, drop * that now. + * + * This cannot be the last reference, since the handle holds one too. */ - drm_gem_object_unreference(obj); + kref_put(&obj->refcount, drm_gem_object_ref_bug); } else spin_unlock(&dev->object_name_lock); @@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open); void drm_gem_vm_close(struct vm_area_struct *vma) { struct drm_gem_object *obj = vma->vm_private_data; - struct drm_device *dev = obj->dev; - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); } EXPORT_SYMBOL(drm_gem_vm_close); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a894ade0309..1376dfe44c9 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; drm_i915_private_t *dev_priv = dev->dev_private; - if (!IS_IRONLAKE(dev)) { + if (!HAS_PCH_SPLIT(dev)) { seq_printf(m, "Interrupt enable: %08x\n", I915_READ(IER)); seq_printf(m, "Interrupt identity: %08x\n", @@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data) return 0; } +static const char *pin_flag(int pinned) +{ + if (pinned > 0) + return " P"; + else if (pinned < 0) + return " p"; + else + return ""; +} + +static const char *tiling_flag(int tiling) +{ + switch (tiling) { + default: + case I915_TILING_NONE: return ""; + case I915_TILING_X: return " X"; + case I915_TILING_Y: return " Y"; + } +} + +static const char *dirty_flag(int dirty) +{ + return dirty ? " dirty" : ""; +} + +static const char *purgeable_flag(int purgeable) +{ + return purgeable ? " purgeable" : ""; +} + static int i915_error_state(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_error_state *error; unsigned long flags; + int i, page, offset, elt; spin_lock_irqsave(&dev_priv->error_lock, flags); if (!dev_priv->first_error) { @@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec, error->time.tv_usec); + seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); seq_printf(m, "EIR: 0x%08x\n", error->eir); seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); @@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused) seq_printf(m, " INSTPS: 0x%08x\n", error->instps); seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); } + seq_printf(m, "seqno: 0x%08x\n", error->seqno); + + if (error->active_bo_count) { + seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); + + for (i = 0; i < error->active_bo_count; i++) { + seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", + error->active_bo[i].gtt_offset, + error->active_bo[i].size, + error->active_bo[i].read_domains, + error->active_bo[i].write_domain, + error->active_bo[i].seqno, + pin_flag(error->active_bo[i].pinned), + tiling_flag(error->active_bo[i].tiling), + dirty_flag(error->active_bo[i].dirty), + purgeable_flag(error->active_bo[i].purgeable)); + + if (error->active_bo[i].name) + seq_printf(m, " (name: %d)", error->active_bo[i].name); + if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE) + seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg); + + seq_printf(m, "\n"); + } + } + + for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { + if (error->batchbuffer[i]) { + struct drm_i915_error_object *obj = error->batchbuffer[i]; + + seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); + offset += 4; + } + } + } + } + + if (error->ringbuffer) { + struct drm_i915_error_object *obj = error->ringbuffer; + + seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset); + offset = 0; + for (page = 0; page < obj->page_count; page++) { + for (elt = 0; elt < PAGE_SIZE/4; elt++) { + seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]); + offset += 4; + } + } + } out: spin_unlock_irqrestore(&dev_priv->error_lock, flags); @@ -386,6 +471,165 @@ out: return 0; } +static int i915_rstdby_delays(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u16 crstanddelay = I915_READ16(CRSTANDVID); + + seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); + + return 0; +} + +static int i915_cur_delayinfo(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u16 rgvswctl = I915_READ16(MEMSWCTL); + + seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3); + seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1); + seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf, + rgvswctl & 0x3f); + + return 0; +} + +static int i915_delayfreq_table(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 delayfreq; + int i; + + for (i = 0; i < 16; i++) { + delayfreq = I915_READ(PXVFREQ_BASE + i * 4); + seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq); + } + + return 0; +} + +static inline int MAP_TO_MV(int map) +{ + return 1250 - (map * 25); +} + +static int i915_inttoext_table(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 inttoext; + int i; + + for (i = 1; i <= 32; i++) { + inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); + seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); + } + + return 0; +} + +static int i915_drpc_info(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + u32 rgvmodectl = I915_READ(MEMMODECTL); + + seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? + "yes" : "no"); + seq_printf(m, "Boost freq: %d\n", + (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >> + MEMMODE_BOOST_FREQ_SHIFT); + seq_printf(m, "HW control enabled: %s\n", + rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no"); + seq_printf(m, "SW control enabled: %s\n", + rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no"); + seq_printf(m, "Gated voltage change: %s\n", + rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no"); + seq_printf(m, "Starting frequency: P%d\n", + (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT); + seq_printf(m, "Max frequency: P%d\n", + (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT); + seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK)); + + return 0; +} + +static int i915_fbc_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct drm_crtc *crtc; + drm_i915_private_t *dev_priv = dev->dev_private; + bool fbc_enabled = false; + + if (!dev_priv->display.fbc_enabled) { + seq_printf(m, "FBC unsupported on this chipset\n"); + return 0; + } + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + if (!crtc->enabled) + continue; + if (dev_priv->display.fbc_enabled(crtc)) + fbc_enabled = true; + } + + if (fbc_enabled) { + seq_printf(m, "FBC enabled\n"); + } else { + seq_printf(m, "FBC disabled: "); + switch (dev_priv->no_fbc_reason) { + case FBC_STOLEN_TOO_SMALL: + seq_printf(m, "not enough stolen memory"); + break; + case FBC_UNSUPPORTED_MODE: + seq_printf(m, "mode not supported"); + break; + case FBC_MODE_TOO_LARGE: + seq_printf(m, "mode too large"); + break; + case FBC_BAD_PLANE: + seq_printf(m, "FBC unsupported on plane"); + break; + case FBC_NOT_TILED: + seq_printf(m, "scanout buffer not tiled"); + break; + default: + seq_printf(m, "unknown reason"); + } + seq_printf(m, "\n"); + } + return 0; +} + +static int i915_sr_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + bool sr_enabled = false; + + if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev)) + sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; + else if (IS_I915GM(dev)) + sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; + else if (IS_PINEVIEW(dev)) + sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN; + + seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" : + "disabled"); + + return 0; +} + static int i915_wedged_open(struct inode *inode, struct file *filp) @@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = { {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, {"i915_batchbuffers", i915_batchbuffer_info, 0}, {"i915_error_state", i915_error_state, 0}, + {"i915_rstdby_delays", i915_rstdby_delays, 0}, + {"i915_cur_delayinfo", i915_cur_delayinfo, 0}, + {"i915_delayfreq_table", i915_delayfreq_table, 0}, + {"i915_inttoext_table", i915_inttoext_table, 0}, + {"i915_drpc_info", i915_drpc_info, 0}, + {"i915_fbc_status", i915_fbc_status, 0}, + {"i915_sr_status", i915_sr_status, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2307f98349f..8bfc0bbf13e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -35,6 +35,9 @@ #include "i915_drv.h" #include "i915_trace.h" #include <linux/vgaarb.h> +#include <linux/acpi.h> +#include <linux/pnp.h> +#include <linux/vga_switcheroo.h> /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time @@ -933,6 +936,120 @@ static int i915_get_bridge_dev(struct drm_device *dev) return 0; } +#define MCHBAR_I915 0x44 +#define MCHBAR_I965 0x48 +#define MCHBAR_SIZE (4*4096) + +#define DEVEN_REG 0x54 +#define DEVEN_MCHBAR_EN (1 << 28) + +/* Allocate space for the MCH regs if needed, return nonzero on error */ +static int +intel_alloc_mchbar_resource(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp_lo, temp_hi = 0; + u64 mchbar_addr; + int ret = 0; + + if (IS_I965G(dev)) + pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); + pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); + mchbar_addr = ((u64)temp_hi << 32) | temp_lo; + + /* If ACPI doesn't have it, assume we need to allocate it ourselves */ +#ifdef CONFIG_PNP + if (mchbar_addr && + pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { + ret = 0; + goto out; + } +#endif + + /* Get some space for it */ + ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, + MCHBAR_SIZE, MCHBAR_SIZE, + PCIBIOS_MIN_MEM, + 0, pcibios_align_resource, + dev_priv->bridge_dev); + if (ret) { + DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); + dev_priv->mch_res.start = 0; + goto out; + } + + if (IS_I965G(dev)) + pci_write_config_dword(dev_priv->bridge_dev, reg + 4, + upper_32_bits(dev_priv->mch_res.start)); + + pci_write_config_dword(dev_priv->bridge_dev, reg, + lower_32_bits(dev_priv->mch_res.start)); +out: + return ret; +} + +/* Setup MCHBAR if possible, return true if we should disable it again */ +static void +intel_setup_mchbar(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + bool enabled; + + dev_priv->mchbar_need_disable = false; + + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); + enabled = !!(temp & DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + enabled = temp & 1; + } + + /* If it's already enabled, don't have to do anything */ + if (enabled) + return; + + if (intel_alloc_mchbar_resource(dev)) + return; + + dev_priv->mchbar_need_disable = true; + + /* Space is allocated or reserved, so enable it. */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, + temp | DEVEN_MCHBAR_EN); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); + } +} + +static void +intel_teardown_mchbar(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; + u32 temp; + + if (dev_priv->mchbar_need_disable) { + if (IS_I915G(dev) || IS_I915GM(dev)) { + pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); + temp &= ~DEVEN_MCHBAR_EN; + pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); + } else { + pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); + temp &= ~1; + pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); + } + } + + if (dev_priv->mch_res.start) + release_resource(&dev_priv->mch_res); +} + /** * i915_probe_agp - get AGP bootup configuration * @pdev: PCI device @@ -978,59 +1095,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size, * Some of the preallocated space is taken by the GTT * and popup. GTT is 1K per MB of aperture size, and popup is 4K. */ - if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev)) + if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) overhead = 4096; else overhead = (*aperture_size / 1024) + 4096; - switch (tmp & INTEL_GMCH_GMS_MASK) { - case INTEL_855_GMCH_GMS_DISABLED: - DRM_ERROR("video memory is disabled\n"); - return -1; - case INTEL_855_GMCH_GMS_STOLEN_1M: - stolen = 1 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_4M: - stolen = 4 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_8M: - stolen = 8 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_16M: - stolen = 16 * 1024 * 1024; - break; - case INTEL_855_GMCH_GMS_STOLEN_32M: - stolen = 32 * 1024 * 1024; - break; - case INTEL_915G_GMCH_GMS_STOLEN_48M: - stolen = 48 * 1024 * 1024; - break; - case INTEL_915G_GMCH_GMS_STOLEN_64M: - stolen = 64 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_128M: - stolen = 128 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_256M: - stolen = 256 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_96M: - stolen = 96 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_160M: - stolen = 160 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_224M: - stolen = 224 * 1024 * 1024; - break; - case INTEL_GMCH_GMS_STOLEN_352M: - stolen = 352 * 1024 * 1024; - break; - default: - DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", - tmp & INTEL_GMCH_GMS_MASK); - return -1; + if (IS_GEN6(dev)) { + /* SNB has memory control reg at 0x50.w */ + pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp); + + switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) { + case INTEL_855_GMCH_GMS_DISABLED: + DRM_ERROR("video memory is disabled\n"); + return -1; + case SNB_GMCH_GMS_STOLEN_32M: + stolen = 32 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_64M: + stolen = 64 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_96M: + stolen = 96 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_128M: + stolen = 128 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_160M: + stolen = 160 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_192M: + stolen = 192 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_224M: + stolen = 224 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_256M: + stolen = 256 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_288M: + stolen = 288 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_320M: + stolen = 320 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_352M: + stolen = 352 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_384M: + stolen = 384 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_416M: + stolen = 416 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_448M: + stolen = 448 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_480M: + stolen = 480 * 1024 * 1024; + break; + case SNB_GMCH_GMS_STOLEN_512M: + stolen = 512 * 1024 * 1024; + break; + default: + DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", + tmp & SNB_GMCH_GMS_STOLEN_MASK); + return -1; + } + } else { + switch (tmp & INTEL_GMCH_GMS_MASK) { + case INTEL_855_GMCH_GMS_DISABLED: + DRM_ERROR("video memory is disabled\n"); + return -1; + case INTEL_855_GMCH_GMS_STOLEN_1M: + stolen = 1 * 1024 * 1024; + break; + case INTEL_855_GMCH_GMS_STOLEN_4M: + stolen = 4 * 1024 * 1024; + break; + case INTEL_855_GMCH_GMS_STOLEN_8M: + stolen = 8 * 1024 * 1024; + break; + case INTEL_855_GMCH_GMS_STOLEN_16M: + stolen = 16 * 1024 * 1024; + break; + case INTEL_855_GMCH_GMS_STOLEN_32M: + stolen = 32 * 1024 * 1024; + break; + case INTEL_915G_GMCH_GMS_STOLEN_48M: + stolen = 48 * 1024 * 1024; + break; + case INTEL_915G_GMCH_GMS_STOLEN_64M: + stolen = 64 * 1024 * 1024; + break; + case INTEL_GMCH_GMS_STOLEN_128M: + stolen = 128 * 1024 * 1024; + break; + case INTEL_GMCH_GMS_STOLEN_256M: + stolen = 256 * 1024 * 1024; + break; + case INTEL_GMCH_GMS_STOLEN_96M: + stolen = 96 * 1024 * 1024; + break; + case INTEL_GMCH_GMS_STOLEN_160M: + stolen = 160 * 1024 * 1024; + break; + case INTEL_GMCH_GMS_STOLEN_224M: + stolen = 224 * 1024 * 1024; + break; + case INTEL_GMCH_GMS_STOLEN_352M: + stolen = 352 * 1024 * 1024; + break; + default: + DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n", + tmp & INTEL_GMCH_GMS_MASK); + return -1; + } } + *preallocated_size = stolen - overhead; *start = overhead; @@ -1064,7 +1245,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev, int gtt_offset, gtt_size; if (IS_I965G(dev)) { - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { gtt_offset = 2*1024*1024; gtt_size = 2*1024*1024; } else { @@ -1133,6 +1314,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) /* Leave 1M for line length buffer & misc. */ compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); if (!compressed_fb) { + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; i915_warn_stolen(dev); return; } @@ -1140,6 +1322,7 @@ static void i915_setup_compression(struct drm_device *dev, int size) compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); if (!compressed_fb) { i915_warn_stolen(dev); + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; return; } @@ -1199,6 +1382,32 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } +static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { + printk(KERN_INFO "i915: switched off\n"); + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(dev->pdev, PCI_D0); + i915_resume(dev); + } else { + printk(KERN_ERR "i915: switched off\n"); + i915_suspend(dev, pmm); + } +} + +static bool i915_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + bool can_switch; + + spin_lock(&dev->count_lock); + can_switch = (dev->open_count == 0); + spin_unlock(&dev->count_lock); + return can_switch; +} + static int i915_load_modeset_init(struct drm_device *dev, unsigned long prealloc_start, unsigned long prealloc_size, @@ -1260,6 +1469,12 @@ static int i915_load_modeset_init(struct drm_device *dev, if (ret) goto destroy_ringbuffer; + ret = vga_switcheroo_register_client(dev->pdev, + i915_switcheroo_set_state, + i915_switcheroo_can_switch); + if (ret) + goto destroy_ringbuffer; + intel_modeset_init(dev); ret = drm_irq_install(dev); @@ -1281,7 +1496,9 @@ static int i915_load_modeset_init(struct drm_device *dev, return 0; destroy_ringbuffer: + mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); + mutex_unlock(&dev->struct_mutex); out: return ret; } @@ -1445,11 +1662,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev->driver->get_vblank_counter = i915_get_vblank_counter; dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ - if (IS_G4X(dev) || IS_IRONLAKE(dev)) { + if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) { dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ dev->driver->get_vblank_counter = gm45_get_vblank_counter; } + /* Try to make sure MCHBAR is enabled before poking at it */ + intel_setup_mchbar(dev); + i915_gem_load(dev); /* Init HWS */ @@ -1523,6 +1743,8 @@ int i915_driver_unload(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + i915_destroy_error_state(dev); + destroy_workqueue(dev_priv->wq); del_timer_sync(&dev_priv->hangcheck_timer); @@ -1544,6 +1766,7 @@ int i915_driver_unload(struct drm_device *dev) dev_priv->child_dev_num = 0; } drm_irq_uninstall(dev); + vga_switcheroo_unregister_client(dev->pdev); vga_client_register(dev->pdev, NULL, NULL, NULL); } @@ -1569,6 +1792,8 @@ int i915_driver_unload(struct drm_device *dev) intel_cleanup_overlay(dev); } + intel_teardown_mchbar(dev); + pci_dev_put(dev_priv->bridge_dev); kfree(dev->dev_private); @@ -1611,6 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev) if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) { drm_fb_helper_restore(); + vga_switcheroo_process_delayed_switch(); return; } diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cf4cb3e9a0c..1b2e95455c0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); static struct drm_driver driver; +extern int intel_agp_enabled; #define INTEL_VGA_DEVICE(id, info) { \ .class = PCI_CLASS_DISPLAY_VGA << 8, \ @@ -136,6 +137,16 @@ const static struct intel_device_info intel_ironlake_m_info = { .has_hotplug = 1, }; +const static struct intel_device_info intel_sandybridge_d_info = { + .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_hotplug = 1, +}; + +const static struct intel_device_info intel_sandybridge_m_info = { + .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1, + .has_hotplug = 1, +}; + const static struct pci_device_id pciidlist[] = { INTEL_VGA_DEVICE(0x3577, &intel_i830_info), INTEL_VGA_DEVICE(0x2562, &intel_845g_info), @@ -167,6 +178,8 @@ const static struct pci_device_id pciidlist[] = { INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), + INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), + INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), {0, 0, 0} }; @@ -201,7 +214,7 @@ static int i915_drm_freeze(struct drm_device *dev) return 0; } -static int i915_suspend(struct drm_device *dev, pm_message_t state) +int i915_suspend(struct drm_device *dev, pm_message_t state) { int error; @@ -255,7 +268,7 @@ static int i915_drm_thaw(struct drm_device *dev) return error; } -static int i915_resume(struct drm_device *dev) +int i915_resume(struct drm_device *dev) { if (pci_enable_device(dev->pdev)) return -EIO; @@ -546,6 +559,11 @@ static struct drm_driver driver = { static int __init i915_init(void) { + if (!intel_agp_enabled) { + DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); + return -ENODEV; + } + driver.num_ioctls = i915_max_ioctl; i915_gem_shrinker_init(); @@ -571,6 +589,11 @@ static int __init i915_init(void) driver.driver_features &= ~DRIVER_MODESET; #endif + if (!(driver.driver_features & DRIVER_MODESET)) { + driver.suspend = i915_suspend; + driver.resume = i915_resume; + } + return drm_init(&driver); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b99b6a841d9..979439cfb82 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -150,7 +150,27 @@ struct drm_i915_error_state { u32 instps; u32 instdone1; u32 seqno; + u64 bbaddr; struct timeval time; + struct drm_i915_error_object { + int page_count; + u32 gtt_offset; + u32 *pages[0]; + } *ringbuffer, *batchbuffer[2]; + struct drm_i915_error_buffer { + size_t size; + u32 name; + u32 seqno; + u32 gtt_offset; + u32 read_domains; + u32 write_domain; + u32 fence_reg; + s32 pinned:2; + u32 tiling:2; + u32 dirty:1; + u32 purgeable:1; + } *active_bo; + u32 active_bo_count; }; struct drm_i915_display_funcs { @@ -192,6 +212,14 @@ struct intel_device_info { u8 cursor_needs_physical : 1; }; +enum no_fbc_reason { + FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */ + FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */ + FBC_MODE_TOO_LARGE, /* mode too large for compression */ + FBC_BAD_PLANE, /* fbc not supported on plane */ + FBC_NOT_TILED, /* buffer not tiled */ +}; + typedef struct drm_i915_private { struct drm_device *dev; @@ -452,6 +480,7 @@ typedef struct drm_i915_private { u32 savePIPEB_DATA_N1; u32 savePIPEB_LINK_M1; u32 savePIPEB_LINK_N1; + u32 saveMCHBAR_RENDER_STANDBY; struct { struct drm_mm gtt_space; @@ -590,6 +619,14 @@ typedef struct drm_i915_private { int child_dev_num; struct child_device_config *child_dev; struct drm_connector *int_lvds_connector; + + bool mchbar_need_disable; + + u8 cur_delay; + u8 min_delay; + u8 max_delay; + + enum no_fbc_reason no_fbc_reason; } drm_i915_private_t; /** driver private structure attached to each drm_gem_object */ @@ -736,6 +773,8 @@ extern unsigned int i915_fbpercrtc; extern unsigned int i915_powersave; extern unsigned int i915_lvds_downclock; +extern int i915_suspend(struct drm_device *dev, pm_message_t state); +extern int i915_resume(struct drm_device *dev); extern void i915_save_display(struct drm_device *dev); extern void i915_restore_display(struct drm_device *dev); extern int i915_master_create(struct drm_device *dev, struct drm_master *master); @@ -761,6 +800,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags); /* i915_irq.c */ void i915_hangcheck_elapsed(unsigned long data); +void i915_destroy_error_state(struct drm_device *dev); extern int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int i915_irq_wait(struct drm_device *dev, void *data, @@ -897,7 +937,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); bool i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode); -bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); +bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, + int tiling_mode); /* i915_gem_debug.c */ void i915_gem_dump_object(struct drm_gem_object *obj, int len, @@ -1026,7 +1067,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define IS_845G(dev) ((dev)->pci_device == 0x2562) #define IS_I85X(dev) ((dev)->pci_device == 0x3582) #define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) +#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx) #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) #define IS_I945G(dev) ((dev)->pci_device == 0x2772) @@ -1045,8 +1086,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) +#define IS_GEN3(dev) (IS_I915G(dev) || \ + IS_I915GM(dev) || \ + IS_I945G(dev) || \ + IS_I945GM(dev) || \ + IS_G33(dev) || \ + IS_PINEVIEW(dev)) +#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \ + (dev)->pci_device == 0x2982 || \ + (dev)->pci_device == 0x2992 || \ + (dev)->pci_device == 0x29A2 || \ + (dev)->pci_device == 0x2A02 || \ + (dev)->pci_device == 0x2A12 || \ + (dev)->pci_device == 0x2E02 || \ + (dev)->pci_device == 0x2E12 || \ + (dev)->pci_device == 0x2E22 || \ + (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2A42 || \ + (dev)->pci_device == 0x2E42) + #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) +#define IS_GEN6(dev) ((dev)->pci_device == 0x0102) + /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte * rows, which changed the alignment requirements and fence programming. */ @@ -1067,6 +1129,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) +#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \ + IS_GEN6(dev)) + #define PRIMARY_RINGBUFFER_SIZE (128*1024) #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ec8a0d7ffa3..fba37e9f775 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, return -ENOMEM; ret = drm_gem_handle_create(file_priv, obj, &handle); - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(obj); if (ret) return ret; @@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, */ if (args->offset > obj->size || args->size > obj->size || args->offset + args->size > obj->size) { - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return -EINVAL; } @@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, file_priv); } - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return ret; } @@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, */ if (args->offset > obj->size || args->size > obj->size || args->offset + args->size > obj->size) { - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return -EINVAL; } @@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, DRM_INFO("pwrite failed %d\n", ret); #endif - drm_gem_object_unreference(obj); + drm_gem_object_unreference_unlocked(obj); return ret; } @@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); up_write(¤t->mm->mmap_sem); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); if (IS_ERR((void *)addr)) return addr; @@ -1562,6 +1558,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj) i915_verify_inactive(dev, __FILE__, __LINE__); } +static void +i915_gem_process_flushing_list(struct drm_device *dev, + uint32_t flush_domains, uint32_t seqno) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv, *next; + + list_for_each_entry_safe(obj_priv, next, + &dev_priv->mm.gpu_write_list, + gpu_write_list) { + struct drm_gem_object *obj = obj_priv->obj; + + if ((obj->write_domain & flush_domains) == + obj->write_domain) { + uint32_t old_write_domain = obj->write_domain; + + obj->write_domain = 0; + list_del_init(&obj_priv->gpu_write_list); + i915_gem_object_move_to_active(obj, seqno); + + /* update the fence lru list */ + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + list_move_tail(&obj_priv->fence_list, + &dev_priv->mm.fence_list); + + trace_i915_gem_object_change_domain(obj, + obj->read_domains, + old_write_domain); + } + } +} + /** * Creates a new sequence number, emitting a write of it to the status page * plus an interrupt, which will trigger i915_user_interrupt_handler. @@ -1620,29 +1648,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv, /* Associate any objects on the flushing list matching the write * domain we're flushing with our flush. */ - if (flush_domains != 0) { - struct drm_i915_gem_object *obj_priv, *next; - - list_for_each_entry_safe(obj_priv, next, - &dev_priv->mm.gpu_write_list, - gpu_write_list) { - struct drm_gem_object *obj = obj_priv->obj; - - if ((obj->write_domain & flush_domains) == - obj->write_domain) { - uint32_t old_write_domain = obj->write_domain; - - obj->write_domain = 0; - list_del_init(&obj_priv->gpu_write_list); - i915_gem_object_move_to_active(obj, seqno); - - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); - } - } - - } + if (flush_domains != 0) + i915_gem_process_flushing_list(dev, flush_domains, seqno); if (!dev_priv->mm.suspended) { mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); @@ -1822,7 +1829,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible) return -EIO; if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ier = I915_READ(DEIER) | I915_READ(GTIER); else ier = I915_READ(IER); @@ -1991,6 +1998,7 @@ int i915_gem_object_unbind(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; int ret = 0; @@ -2046,8 +2054,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj) } /* Remove ourselves from the LRU list if present. */ + spin_lock(&dev_priv->mm.active_list_lock); if (!list_empty(&obj_priv->list)) list_del_init(&obj_priv->list); + spin_unlock(&dev_priv->mm.active_list_lock); if (i915_gem_object_is_purgeable(obj_priv)) i915_gem_object_truncate(obj); @@ -2085,11 +2095,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size) } static int +i915_gpu_idle(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + bool lists_empty; + uint32_t seqno; + + spin_lock(&dev_priv->mm.active_list_lock); + lists_empty = list_empty(&dev_priv->mm.flushing_list) && + list_empty(&dev_priv->mm.active_list); + spin_unlock(&dev_priv->mm.active_list_lock); + + if (lists_empty) + return 0; + + /* Flush everything onto the inactive list. */ + i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); + if (seqno == 0) + return -ENOMEM; + + return i915_wait_request(dev, seqno); +} + +static int i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; int ret; - uint32_t seqno; bool lists_empty; spin_lock(&dev_priv->mm.active_list_lock); @@ -2102,12 +2135,7 @@ i915_gem_evict_everything(struct drm_device *dev) return -ENOSPC; /* Flush everything (on to the inactive lists) and evict */ - i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); - if (seqno == 0) - return -ENOMEM; - - ret = i915_wait_request(dev, seqno); + ret = i915_gpu_idle(dev); if (ret) return ret; @@ -2265,6 +2293,28 @@ i915_gem_object_get_pages(struct drm_gem_object *obj, return 0; } +static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) +{ + struct drm_gem_object *obj = reg->obj; + struct drm_device *dev = obj->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int regnum = obj_priv->fence_reg; + uint64_t val; + + val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & + 0xfffff000) << 32; + val |= obj_priv->gtt_offset & 0xfffff000; + val |= (uint64_t)((obj_priv->stride / 128) - 1) << + SANDYBRIDGE_FENCE_PITCH_SHIFT; + + if (obj_priv->tiling_mode == I915_TILING_Y) + val |= 1 << I965_FENCE_TILING_Y_SHIFT; + val |= I965_FENCE_REG_VALID; + + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); +} + static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) { struct drm_gem_object *obj = reg->obj; @@ -2361,6 +2411,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); } +static int i915_find_fence_reg(struct drm_device *dev) +{ + struct drm_i915_fence_reg *reg = NULL; + struct drm_i915_gem_object *obj_priv = NULL; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj = NULL; + int i, avail, ret; + + /* First try to find a free reg */ + avail = 0; + for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { + reg = &dev_priv->fence_regs[i]; + if (!reg->obj) + return i; + + obj_priv = reg->obj->driver_private; + if (!obj_priv->pin_count) + avail++; + } + + if (avail == 0) + return -ENOSPC; + + /* None available, try to steal one or wait for a user to finish */ + i = I915_FENCE_REG_NONE; + list_for_each_entry(obj_priv, &dev_priv->mm.fence_list, + fence_list) { + obj = obj_priv->obj; + + if (obj_priv->pin_count) + continue; + + /* found one! */ + i = obj_priv->fence_reg; + break; + } + + BUG_ON(i == I915_FENCE_REG_NONE); + + /* We only have a reference on obj from the active list. put_fence_reg + * might drop that one, causing a use-after-free in it. So hold a + * private reference to obj like the other callers of put_fence_reg + * (set_tiling ioctl) do. */ + drm_gem_object_reference(obj); + ret = i915_gem_object_put_fence_reg(obj); + drm_gem_object_unreference(obj); + if (ret != 0) + return ret; + + return i; +} + /** * i915_gem_object_get_fence_reg - set up a fence reg for an object * @obj: object to map through a fence reg @@ -2381,8 +2483,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; struct drm_i915_fence_reg *reg = NULL; - struct drm_i915_gem_object *old_obj_priv = NULL; - int i, ret, avail; + int ret; /* Just update our place in the LRU if our fence is getting used. */ if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { @@ -2410,86 +2511,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj) break; } - /* First try to find a free reg */ - avail = 0; - for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { - reg = &dev_priv->fence_regs[i]; - if (!reg->obj) - break; - - old_obj_priv = reg->obj->driver_private; - if (!old_obj_priv->pin_count) - avail++; - } - - /* None available, try to steal one or wait for a user to finish */ - if (i == dev_priv->num_fence_regs) { - struct drm_gem_object *old_obj = NULL; - - if (avail == 0) - return -ENOSPC; - - list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list, - fence_list) { - old_obj = old_obj_priv->obj; - - if (old_obj_priv->pin_count) - continue; - - /* Take a reference, as otherwise the wait_rendering - * below may cause the object to get freed out from - * under us. - */ - drm_gem_object_reference(old_obj); - - /* i915 uses fences for GPU access to tiled buffers */ - if (IS_I965G(dev) || !old_obj_priv->active) - break; - - /* This brings the object to the head of the LRU if it - * had been written to. The only way this should - * result in us waiting longer than the expected - * optimal amount of time is if there was a - * fence-using buffer later that was read-only. - */ - i915_gem_object_flush_gpu_write_domain(old_obj); - ret = i915_gem_object_wait_rendering(old_obj); - if (ret != 0) { - drm_gem_object_unreference(old_obj); - return ret; - } - - break; - } - - /* - * Zap this virtual mapping so we can set up a fence again - * for this object next time we need it. - */ - i915_gem_release_mmap(old_obj); - - i = old_obj_priv->fence_reg; - reg = &dev_priv->fence_regs[i]; - - old_obj_priv->fence_reg = I915_FENCE_REG_NONE; - list_del_init(&old_obj_priv->fence_list); - - drm_gem_object_unreference(old_obj); - } + ret = i915_find_fence_reg(dev); + if (ret < 0) + return ret; - obj_priv->fence_reg = i; + obj_priv->fence_reg = ret; + reg = &dev_priv->fence_regs[obj_priv->fence_reg]; list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list); reg->obj = obj; - if (IS_I965G(dev)) + if (IS_GEN6(dev)) + sandybridge_write_fence_reg(reg); + else if (IS_I965G(dev)) i965_write_fence_reg(reg); else if (IS_I9XX(dev)) i915_write_fence_reg(reg); else i830_write_fence_reg(reg); - trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode); + trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, + obj_priv->tiling_mode); return 0; } @@ -2508,9 +2550,12 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; - if (IS_I965G(dev)) + if (IS_GEN6(dev)) { + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + + (obj_priv->fence_reg * 8), 0); + } else if (IS_I965G(dev)) { I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); - else { + } else { uint32_t fence_reg; if (obj_priv->fence_reg < 8) @@ -2544,6 +2589,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) if (obj_priv->fence_reg == I915_FENCE_REG_NONE) return 0; + /* If we've changed tiling, GTT-mappings of the object + * need to re-fault to ensure that the correct fence register + * setup is in place. + */ + i915_gem_release_mmap(obj); + /* On the i915, GPU access to tiled buffers is via a fence, * therefore we must wait for any outstanding access to complete * before clearing the fence. @@ -2552,12 +2603,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj) int ret; i915_gem_object_flush_gpu_write_domain(obj); - i915_gem_object_flush_gtt_write_domain(obj); ret = i915_gem_object_wait_rendering(obj); if (ret != 0) return ret; } + i915_gem_object_flush_gtt_write_domain(obj); i915_gem_clear_fence_reg (obj); return 0; @@ -2697,7 +2748,6 @@ static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; - uint32_t seqno; uint32_t old_write_domain; if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) @@ -2706,9 +2756,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) /* Queue the GPU write cache flushing we need. */ old_write_domain = obj->write_domain; i915_gem_flush(dev, 0, obj->write_domain); - seqno = i915_add_request(dev, NULL, obj->write_domain); + (void) i915_add_request(dev, NULL, obj->write_domain); BUG_ON(obj->write_domain); - i915_gem_object_move_to_active(obj, seqno); trace_i915_gem_object_change_domain(obj, obj->read_domains, @@ -3247,7 +3296,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, obj_priv->tiling_mode != I915_TILING_NONE; /* Check fence reg constraints and rebind if necessary */ - if (need_fence && !i915_obj_fenceable(dev, obj)) + if (need_fence && !i915_gem_object_fence_offset_ok(obj, + obj_priv->tiling_mode)) i915_gem_object_unbind(obj); /* Choose the GTT offset for our buffer and put it there. */ @@ -3317,6 +3367,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, } /* Validate that the target is in a valid r/w GPU domain */ + if (reloc->write_domain & (reloc->write_domain - 1)) { + DRM_ERROR("reloc with multiple write domains: " + "obj %p target %d offset %d " + "read %08x write %08x", + obj, reloc->target_handle, + (int) reloc->offset, + reloc->read_domains, + reloc->write_domain); + return -EINVAL; + } if (reloc->write_domain & I915_GEM_DOMAIN_CPU || reloc->read_domains & I915_GEM_DOMAIN_CPU) { DRM_ERROR("reloc with read/write CPU domains: " @@ -4445,8 +4505,7 @@ int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - uint32_t seqno, cur_seqno, last_seqno; - int stuck, ret; + int ret; mutex_lock(&dev->struct_mutex); @@ -4455,115 +4514,36 @@ i915_gem_idle(struct drm_device *dev) return 0; } - /* Hack! Don't let anybody do execbuf while we don't control the chip. - * We need to replace this with a semaphore, or something. - */ - dev_priv->mm.suspended = 1; - del_timer(&dev_priv->hangcheck_timer); - - /* Cancel the retire work handler, wait for it to finish if running - */ - mutex_unlock(&dev->struct_mutex); - cancel_delayed_work_sync(&dev_priv->mm.retire_work); - mutex_lock(&dev->struct_mutex); - - i915_kernel_lost_context(dev); - - /* Flush the GPU along with all non-CPU write domains - */ - i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); - seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS); - - if (seqno == 0) { + ret = i915_gpu_idle(dev); + if (ret) { mutex_unlock(&dev->struct_mutex); - return -ENOMEM; + return ret; } - dev_priv->mm.waiting_gem_seqno = seqno; - last_seqno = 0; - stuck = 0; - for (;;) { - cur_seqno = i915_get_gem_seqno(dev); - if (i915_seqno_passed(cur_seqno, seqno)) - break; - if (last_seqno == cur_seqno) { - if (stuck++ > 100) { - DRM_ERROR("hardware wedged\n"); - atomic_set(&dev_priv->mm.wedged, 1); - DRM_WAKEUP(&dev_priv->irq_queue); - break; - } + /* Under UMS, be paranoid and evict. */ + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + ret = i915_gem_evict_from_inactive_list(dev); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; } - msleep(10); - last_seqno = cur_seqno; - } - dev_priv->mm.waiting_gem_seqno = 0; - - i915_gem_retire_requests(dev); - - spin_lock(&dev_priv->mm.active_list_lock); - if (!atomic_read(&dev_priv->mm.wedged)) { - /* Active and flushing should now be empty as we've - * waited for a sequence higher than any pending execbuffer - */ - WARN_ON(!list_empty(&dev_priv->mm.active_list)); - WARN_ON(!list_empty(&dev_priv->mm.flushing_list)); - /* Request should now be empty as we've also waited - * for the last request in the list - */ - WARN_ON(!list_empty(&dev_priv->mm.request_list)); } - /* Empty the active and flushing lists to inactive. If there's - * anything left at this point, it means that we're wedged and - * nothing good's going to happen by leaving them there. So strip - * the GPU domains and just stuff them onto inactive. + /* Hack! Don't let anybody do execbuf while we don't control the chip. + * We need to replace this with a semaphore, or something. + * And not confound mm.suspended! */ - while (!list_empty(&dev_priv->mm.active_list)) { - struct drm_gem_object *obj; - uint32_t old_write_domain; - - obj = list_first_entry(&dev_priv->mm.active_list, - struct drm_i915_gem_object, - list)->obj; - old_write_domain = obj->write_domain; - obj->write_domain &= ~I915_GEM_GPU_DOMAINS; - i915_gem_object_move_to_inactive(obj); - - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); - } - spin_unlock(&dev_priv->mm.active_list_lock); - - while (!list_empty(&dev_priv->mm.flushing_list)) { - struct drm_gem_object *obj; - uint32_t old_write_domain; - - obj = list_first_entry(&dev_priv->mm.flushing_list, - struct drm_i915_gem_object, - list)->obj; - old_write_domain = obj->write_domain; - obj->write_domain &= ~I915_GEM_GPU_DOMAINS; - i915_gem_object_move_to_inactive(obj); - - trace_i915_gem_object_change_domain(obj, - obj->read_domains, - old_write_domain); - } - - - /* Move all inactive buffers out of the GTT. */ - ret = i915_gem_evict_from_inactive_list(dev); - WARN_ON(!list_empty(&dev_priv->mm.inactive_list)); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ret; - } + dev_priv->mm.suspended = 1; + del_timer(&dev_priv->hangcheck_timer); + i915_kernel_lost_context(dev); i915_gem_cleanup_ringbuffer(dev); + mutex_unlock(&dev->struct_mutex); + /* Cancel the retire work handler, which should be idle now. */ + cancel_delayed_work_sync(&dev_priv->mm.retire_work); + return 0; } @@ -4607,8 +4587,13 @@ i915_gem_init_hws(struct drm_device *dev) } dev_priv->hws_obj = obj; memset(dev_priv->hw_status_page, 0, PAGE_SIZE); - I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); - I915_READ(HWS_PGA); /* posting read */ + if (IS_GEN6(dev)) { + I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr); + I915_READ(HWS_PGA_GEN6); /* posting read */ + } else { + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + I915_READ(HWS_PGA); /* posting read */ + } DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); return 0; @@ -4850,7 +4835,8 @@ i915_gem_load(struct drm_device *dev) spin_unlock(&shrink_list_lock); /* Old X drivers will take 0-2 for front, back, depth buffers */ - dev_priv->fence_reg_start = 3; + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + dev_priv->fence_reg_start = 3; if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dev_priv->num_fence_regs = 16; diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index df278b2685b..b5c55d88ff7 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -25,8 +25,6 @@ * */ -#include <linux/acpi.h> -#include <linux/pnp.h> #include "linux/string.h" #include "linux/bitops.h" #include "drmP.h" @@ -83,120 +81,6 @@ * to match what the GPU expects. */ -#define MCHBAR_I915 0x44 -#define MCHBAR_I965 0x48 -#define MCHBAR_SIZE (4*4096) - -#define DEVEN_REG 0x54 -#define DEVEN_MCHBAR_EN (1 << 28) - -/* Allocate space for the MCH regs if needed, return nonzero on error */ -static int -intel_alloc_mchbar_resource(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; - u32 temp_lo, temp_hi = 0; - u64 mchbar_addr; - int ret = 0; - - if (IS_I965G(dev)) - pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi); - pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo); - mchbar_addr = ((u64)temp_hi << 32) | temp_lo; - - /* If ACPI doesn't have it, assume we need to allocate it ourselves */ -#ifdef CONFIG_PNP - if (mchbar_addr && - pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) { - ret = 0; - goto out; - } -#endif - - /* Get some space for it */ - ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res, - MCHBAR_SIZE, MCHBAR_SIZE, - PCIBIOS_MIN_MEM, - 0, pcibios_align_resource, - dev_priv->bridge_dev); - if (ret) { - DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret); - dev_priv->mch_res.start = 0; - goto out; - } - - if (IS_I965G(dev)) - pci_write_config_dword(dev_priv->bridge_dev, reg + 4, - upper_32_bits(dev_priv->mch_res.start)); - - pci_write_config_dword(dev_priv->bridge_dev, reg, - lower_32_bits(dev_priv->mch_res.start)); -out: - return ret; -} - -/* Setup MCHBAR if possible, return true if we should disable it again */ -static bool -intel_setup_mchbar(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; - u32 temp; - bool need_disable = false, enabled; - - if (IS_I915G(dev) || IS_I915GM(dev)) { - pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); - enabled = !!(temp & DEVEN_MCHBAR_EN); - } else { - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); - enabled = temp & 1; - } - - /* If it's already enabled, don't have to do anything */ - if (enabled) - goto out; - - if (intel_alloc_mchbar_resource(dev)) - goto out; - - need_disable = true; - - /* Space is allocated or reserved, so enable it. */ - if (IS_I915G(dev) || IS_I915GM(dev)) { - pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, - temp | DEVEN_MCHBAR_EN); - } else { - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); - pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1); - } -out: - return need_disable; -} - -static void -intel_teardown_mchbar(struct drm_device *dev, bool disable) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915; - u32 temp; - - if (disable) { - if (IS_I915G(dev) || IS_I915GM(dev)) { - pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp); - temp &= ~DEVEN_MCHBAR_EN; - pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp); - } else { - pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp); - temp &= ~1; - pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp); - } - } - - if (dev_priv->mch_res.start) - release_resource(&dev_priv->mch_res); -} - /** * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. @@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; - bool need_disable; - if (IS_IRONLAKE(dev)) { + if (IS_IRONLAKE(dev) || IS_GEN6(dev)) { /* On Ironlake whatever DRAM config, GPU always do * same swizzling setup. */ @@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) } else if (IS_MOBILE(dev)) { uint32_t dcc; - /* Try to make sure MCHBAR is enabled before poking at it */ - need_disable = intel_setup_mchbar(dev); - /* On mobile 9xx chipsets, channel interleave by the CPU is * determined by DCC. For single-channel, neither the CPU * nor the GPU do swizzling. For dual channel interleaved, @@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; } - - intel_teardown_mchbar(dev, need_disable); } else { /* The 965, G33, and newer, have a very flexible memory * configuration. It will enable dual-channel mode @@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) dev_priv->mm.bit_6_swizzle_y = swizzle_y; } - -/** - * Returns whether an object is currently fenceable. If not, it may need - * to be unbound and have its pitch adjusted. - */ -bool -i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj) -{ - struct drm_i915_gem_object *obj_priv = obj->driver_private; - - if (IS_I965G(dev)) { - /* The 965 can have fences at any page boundary. */ - if (obj->size & 4095) - return false; - return true; - } else if (IS_I9XX(dev)) { - if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) - return false; - } else { - if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) - return false; - } - - /* Power of two sized... */ - if (obj->size & (obj->size - 1)) - return false; - - /* Objects must be size aligned as well */ - if (obj_priv->gtt_offset & (obj->size - 1)) - return false; - return true; -} - /* Check pitch constriants for all chips & tiling formats */ bool i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) @@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) return true; } -static bool +bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) { struct drm_device *dev = obj->dev; @@ -438,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj_priv = obj->driver_private; if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return -EINVAL; } @@ -493,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, goto err; } - /* If we've changed tiling, GTT-mappings of the object - * need to re-fault to ensure that the correct fence register - * setup is in place. - */ - i915_gem_release_mmap(obj); - obj_priv->tiling_mode = args->tiling_mode; obj_priv->stride = args->stride; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a17d6bdfe63..5388354da0d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, DE_GSE); else i915_enable_pipestat(dev_priv, 1, @@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work) drm_sysfs_hotplug_event(dev); } +static void i915_handle_rps_change(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + u32 busy_up, busy_down, max_avg, min_avg; + u16 rgvswctl; + u8 new_delay = dev_priv->cur_delay; + + I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG); + busy_up = I915_READ(RCPREVBSYTUPAVG); + busy_down = I915_READ(RCPREVBSYTDNAVG); + max_avg = I915_READ(RCBMAXAVG); + min_avg = I915_READ(RCBMINAVG); + + /* Handle RCS change request from hw */ + if (busy_up > max_avg) { + if (dev_priv->cur_delay != dev_priv->max_delay) + new_delay = dev_priv->cur_delay - 1; + if (new_delay < dev_priv->max_delay) + new_delay = dev_priv->max_delay; + } else if (busy_down < min_avg) { + if (dev_priv->cur_delay != dev_priv->min_delay) + new_delay = dev_priv->cur_delay + 1; + if (new_delay > dev_priv->min_delay) + new_delay = dev_priv->min_delay; + } + + DRM_DEBUG("rps change requested: %d -> %d\n", + dev_priv->cur_delay, new_delay); + + rgvswctl = I915_READ(MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_ERROR("gpu busy, RCS change rejected\n"); + return; /* still busy with another command */ + } + + /* Program the new state */ + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; + I915_WRITE(MEMSWCTL, rgvswctl); + POSTING_READ(MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE(MEMSWCTL, rgvswctl); + + dev_priv->cur_delay = new_delay; + + DRM_DEBUG("rps changed\n"); + + return; +} + irqreturn_t ironlake_irq_handler(struct drm_device *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; @@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev) queue_work(dev_priv->wq, &dev_priv->hotplug_work); } + if (de_iir & DE_PCU_EVENT) { + I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS)); + i915_handle_rps_change(dev); + } + /* should clear PCH hotplug event before clear CPU irq */ I915_WRITE(SDEIIR, pch_iir); I915_WRITE(GTIIR, gt_iir); @@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work) } } +static struct drm_i915_error_object * +i915_error_object_create(struct drm_device *dev, + struct drm_gem_object *src) +{ + struct drm_i915_error_object *dst; + struct drm_i915_gem_object *src_priv; + int page, page_count; + + if (src == NULL) + return NULL; + + src_priv = src->driver_private; + if (src_priv->pages == NULL) + return NULL; + + page_count = src->size / PAGE_SIZE; + + dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); + if (dst == NULL) + return NULL; + + for (page = 0; page < page_count; page++) { + void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC); + if (d == NULL) + goto unwind; + s = kmap_atomic(src_priv->pages[page], KM_USER0); + memcpy(d, s, PAGE_SIZE); + kunmap_atomic(s, KM_USER0); + dst->pages[page] = d; + } + dst->page_count = page_count; + dst->gtt_offset = src_priv->gtt_offset; + + return dst; + +unwind: + while (page--) + kfree(dst->pages[page]); + kfree(dst); + return NULL; +} + +static void +i915_error_object_free(struct drm_i915_error_object *obj) +{ + int page; + + if (obj == NULL) + return; + + for (page = 0; page < obj->page_count; page++) + kfree(obj->pages[page]); + + kfree(obj); +} + +static void +i915_error_state_free(struct drm_device *dev, + struct drm_i915_error_state *error) +{ + i915_error_object_free(error->batchbuffer[0]); + i915_error_object_free(error->batchbuffer[1]); + i915_error_object_free(error->ringbuffer); + kfree(error->active_bo); + kfree(error); +} + +static u32 +i915_get_bbaddr(struct drm_device *dev, u32 *ring) +{ + u32 cmd; + + if (IS_I830(dev) || IS_845G(dev)) + cmd = MI_BATCH_BUFFER; + else if (IS_I965G(dev)) + cmd = (MI_BATCH_BUFFER_START | (2 << 6) | + MI_BATCH_NON_SECURE_I965); + else + cmd = (MI_BATCH_BUFFER_START | (2 << 6)); + + return ring[0] == cmd ? ring[1] : 0; +} + +static u32 +i915_ringbuffer_last_batch(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 head, bbaddr; + u32 *ring; + + /* Locate the current position in the ringbuffer and walk back + * to find the most recently dispatched batch buffer. + */ + bbaddr = 0; + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; + ring = (u32 *)(dev_priv->ring.virtual_start + head); + + while (--ring >= (u32 *)dev_priv->ring.virtual_start) { + bbaddr = i915_get_bbaddr(dev, ring); + if (bbaddr) + break; + } + + if (bbaddr == 0) { + ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size); + while (--ring >= (u32 *)dev_priv->ring.virtual_start) { + bbaddr = i915_get_bbaddr(dev, ring); + if (bbaddr) + break; + } + } + + return bbaddr; +} + /** * i915_capture_error_state - capture an error record for later analysis * @dev: drm device @@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work) static void i915_capture_error_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; struct drm_i915_error_state *error; + struct drm_gem_object *batchbuffer[2]; unsigned long flags; + u32 bbaddr; + int count; spin_lock_irqsave(&dev_priv->error_lock, flags); - if (dev_priv->first_error) - goto out; + error = dev_priv->first_error; + spin_unlock_irqrestore(&dev_priv->error_lock, flags); + if (error) + return; error = kmalloc(sizeof(*error), GFP_ATOMIC); if (!error) { - DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n"); - goto out; + DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); + return; } + error->seqno = i915_get_gem_seqno(dev); error->eir = I915_READ(EIR); error->pgtbl_er = I915_READ(PGTBL_ER); error->pipeastat = I915_READ(PIPEASTAT); @@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->ipehr = I915_READ(IPEHR); error->instdone = I915_READ(INSTDONE); error->acthd = I915_READ(ACTHD); + error->bbaddr = 0; } else { error->ipeir = I915_READ(IPEIR_I965); error->ipehr = I915_READ(IPEHR_I965); @@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev) error->instps = I915_READ(INSTPS); error->instdone1 = I915_READ(INSTDONE1); error->acthd = I915_READ(ACTHD_I965); + error->bbaddr = I915_READ64(BB_ADDR); } - do_gettimeofday(&error->time); + bbaddr = i915_ringbuffer_last_batch(dev); + + /* Grab the current batchbuffer, most likely to have crashed. */ + batchbuffer[0] = NULL; + batchbuffer[1] = NULL; + count = 0; + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { + struct drm_gem_object *obj = obj_priv->obj; + + if (batchbuffer[0] == NULL && + bbaddr >= obj_priv->gtt_offset && + bbaddr < obj_priv->gtt_offset + obj->size) + batchbuffer[0] = obj; + + if (batchbuffer[1] == NULL && + error->acthd >= obj_priv->gtt_offset && + error->acthd < obj_priv->gtt_offset + obj->size && + batchbuffer[0] != obj) + batchbuffer[1] = obj; + + count++; + } - dev_priv->first_error = error; + /* We need to copy these to an anonymous buffer as the simplest + * method to avoid being overwritten by userpace. + */ + error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]); + error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]); + + /* Record the ringbuffer */ + error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj); + + /* Record buffers on the active list. */ + error->active_bo = NULL; + error->active_bo_count = 0; + + if (count) + error->active_bo = kmalloc(sizeof(*error->active_bo)*count, + GFP_ATOMIC); + + if (error->active_bo) { + int i = 0; + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) { + struct drm_gem_object *obj = obj_priv->obj; + + error->active_bo[i].size = obj->size; + error->active_bo[i].name = obj->name; + error->active_bo[i].seqno = obj_priv->last_rendering_seqno; + error->active_bo[i].gtt_offset = obj_priv->gtt_offset; + error->active_bo[i].read_domains = obj->read_domains; + error->active_bo[i].write_domain = obj->write_domain; + error->active_bo[i].fence_reg = obj_priv->fence_reg; + error->active_bo[i].pinned = 0; + if (obj_priv->pin_count > 0) + error->active_bo[i].pinned = 1; + if (obj_priv->user_pin_count > 0) + error->active_bo[i].pinned = -1; + error->active_bo[i].tiling = obj_priv->tiling_mode; + error->active_bo[i].dirty = obj_priv->dirty; + error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED; + + if (++i == count) + break; + } + error->active_bo_count = i; + } + + do_gettimeofday(&error->time); -out: + spin_lock_irqsave(&dev_priv->error_lock, flags); + if (dev_priv->first_error == NULL) { + dev_priv->first_error = error; + error = NULL; + } spin_unlock_irqrestore(&dev_priv->error_lock, flags); + + if (error) + i915_error_state_free(dev, error); +} + +void i915_destroy_error_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state *error; + + spin_lock(&dev_priv->error_lock); + error = dev_priv->first_error; + dev_priv->first_error = NULL; + spin_unlock(&dev_priv->error_lock); + + if (error) + i915_error_state_free(dev, error); } /** @@ -576,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) atomic_inc(&dev_priv->irq_received); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return ironlake_irq_handler(dev); iir = I915_READ(IIR); @@ -737,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev) spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); else i915_enable_irq(dev_priv, I915_USER_INTERRUPT); @@ -753,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev) spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); else i915_disable_irq(dev_priv, I915_USER_INTERRUPT); @@ -861,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe) return -EINVAL; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_enable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else if (IS_I965G(dev)) @@ -883,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) ironlake_disable_display_irq(dev_priv, (pipe == 0) ? DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); else @@ -897,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) opregion_enable_asle(dev); dev_priv->irq_enabled = 1; } @@ -973,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data) struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; uint32_t acthd; - + + /* No reset support on this chip yet. */ + if (IS_GEN6(dev)) + return; + if (!IS_I965G(dev)) acthd = I915_READ(ACTHD); else @@ -1064,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev) I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); (void) I915_READ(SDEIER); + if (IS_IRONLAKE_M(dev)) { + /* Clear & enable PCU event interrupts */ + I915_WRITE(DEIIR, DE_PCU_EVENT); + I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); + ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); + } + return 0; } @@ -1076,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev) INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { ironlake_irq_preinstall(dev); return; } @@ -1108,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev) dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return ironlake_irq_postinstall(dev); /* Unmask the interrupts that we always want on. */ @@ -1196,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev) dev_priv->vblank_pipe = 0; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { ironlake_irq_uninstall(dev); return; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ab1bd2d3d3b..3d59862c7cc 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -53,6 +53,25 @@ #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) +#define SNB_GMCH_CTRL 0x50 +#define SNB_GMCH_GMS_STOLEN_MASK 0xF8 +#define SNB_GMCH_GMS_STOLEN_32M (1 << 3) +#define SNB_GMCH_GMS_STOLEN_64M (2 << 3) +#define SNB_GMCH_GMS_STOLEN_96M (3 << 3) +#define SNB_GMCH_GMS_STOLEN_128M (4 << 3) +#define SNB_GMCH_GMS_STOLEN_160M (5 << 3) +#define SNB_GMCH_GMS_STOLEN_192M (6 << 3) +#define SNB_GMCH_GMS_STOLEN_224M (7 << 3) +#define SNB_GMCH_GMS_STOLEN_256M (8 << 3) +#define SNB_GMCH_GMS_STOLEN_288M (9 << 3) +#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3) +#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3) +#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3) +#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3) +#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3) +#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3) +#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3) + /* PCI config space */ #define HPLLCC 0xc0 /* 855 only */ @@ -61,6 +80,7 @@ #define GC_CLOCK_100_200 (1 << 0) #define GC_CLOCK_100_133 (2 << 0) #define GC_CLOCK_166_250 (3 << 0) +#define GCFGC2 0xda #define GCFGC 0xf0 /* 915+ only */ #define GC_LOW_FREQUENCY_ENABLE (1 << 7) #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) @@ -234,6 +254,9 @@ #define I965_FENCE_REG_VALID (1<<0) #define I965_FENCE_MAX_PITCH_VAL 0x0400 +#define FENCE_REG_SANDYBRIDGE_0 0x100000 +#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 + /* * Instruction and interrupt control regs */ @@ -265,6 +288,7 @@ #define INSTDONE1 0x0207c /* 965+ only */ #define ACTHD_I965 0x02074 #define HWS_PGA 0x02080 +#define HWS_PGA_GEN6 0x04080 #define HWS_ADDRESS_MASK 0xfffff000 #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA 0x2088 /* 965GM+ only */ @@ -282,7 +306,7 @@ #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) #define I915_DISPLAY_PORT_INTERRUPT (1<<17) #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) -#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) +#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */ #define I915_HWB_OOM_INTERRUPT (1<<13) #define I915_SYNC_STATUS_INTERRUPT (1<<12) #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) @@ -306,11 +330,14 @@ #define I915_ERROR_MEMORY_REFRESH (1<<1) #define I915_ERROR_INSTRUCTION (1<<0) #define INSTPM 0x020c0 +#define INSTPM_SELF_EN (1<<12) /* 915GM only */ #define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC2 0x020dc #define FW_BLC_SELF 0x020e0 /* 915+ only */ -#define FW_BLC_SELF_EN (1<<15) +#define FW_BLC_SELF_EN_MASK (1<<31) +#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */ +#define FW_BLC_SELF_EN (1<<15) /* 945 only */ #define MM_BURST_LENGTH 0x00700000 #define MM_FIFO_WATERMARK 0x0001F000 #define LM_BURST_LENGTH 0x00000700 @@ -324,6 +351,7 @@ #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) #define CM0_RC_OP_FLUSH_DISABLE (1<<0) +#define BB_ADDR 0x02140 /* 8 bytes */ #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ @@ -784,10 +812,144 @@ #define CLKCFG_MEM_800 (3 << 4) #define CLKCFG_MEM_MASK (7 << 4) -/** GM965 GM45 render standby register */ -#define MCHBAR_RENDER_STANDBY 0x111B8 +#define CRSTANDVID 0x11100 +#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ +#define PXVFREQ_PX_MASK 0x7f000000 +#define PXVFREQ_PX_SHIFT 24 +#define VIDFREQ_BASE 0x11110 +#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */ +#define VIDFREQ2 0x11114 +#define VIDFREQ3 0x11118 +#define VIDFREQ4 0x1111c +#define VIDFREQ_P0_MASK 0x1f000000 +#define VIDFREQ_P0_SHIFT 24 +#define VIDFREQ_P0_CSCLK_MASK 0x00f00000 +#define VIDFREQ_P0_CSCLK_SHIFT 20 +#define VIDFREQ_P0_CRCLK_MASK 0x000f0000 +#define VIDFREQ_P0_CRCLK_SHIFT 16 +#define VIDFREQ_P1_MASK 0x00001f00 +#define VIDFREQ_P1_SHIFT 8 +#define VIDFREQ_P1_CSCLK_MASK 0x000000f0 +#define VIDFREQ_P1_CSCLK_SHIFT 4 +#define VIDFREQ_P1_CRCLK_MASK 0x0000000f +#define INTTOEXT_BASE_ILK 0x11300 +#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */ +#define INTTOEXT_MAP3_SHIFT 24 +#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT) +#define INTTOEXT_MAP2_SHIFT 16 +#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT) +#define INTTOEXT_MAP1_SHIFT 8 +#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT) +#define INTTOEXT_MAP0_SHIFT 0 +#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT) +#define MEMSWCTL 0x11170 /* Ironlake only */ +#define MEMCTL_CMD_MASK 0xe000 +#define MEMCTL_CMD_SHIFT 13 +#define MEMCTL_CMD_RCLK_OFF 0 +#define MEMCTL_CMD_RCLK_ON 1 +#define MEMCTL_CMD_CHFREQ 2 +#define MEMCTL_CMD_CHVID 3 +#define MEMCTL_CMD_VMMOFF 4 +#define MEMCTL_CMD_VMMON 5 +#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears + when command complete */ +#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */ +#define MEMCTL_FREQ_SHIFT 8 +#define MEMCTL_SFCAVM (1<<7) +#define MEMCTL_TGT_VID_MASK 0x007f +#define MEMIHYST 0x1117c +#define MEMINTREN 0x11180 /* 16 bits */ +#define MEMINT_RSEXIT_EN (1<<8) +#define MEMINT_CX_SUPR_EN (1<<7) +#define MEMINT_CONT_BUSY_EN (1<<6) +#define MEMINT_AVG_BUSY_EN (1<<5) +#define MEMINT_EVAL_CHG_EN (1<<4) +#define MEMINT_MON_IDLE_EN (1<<3) +#define MEMINT_UP_EVAL_EN (1<<2) +#define MEMINT_DOWN_EVAL_EN (1<<1) +#define MEMINT_SW_CMD_EN (1<<0) +#define MEMINTRSTR 0x11182 /* 16 bits */ +#define MEM_RSEXIT_MASK 0xc000 +#define MEM_RSEXIT_SHIFT 14 +#define MEM_CONT_BUSY_MASK 0x3000 +#define MEM_CONT_BUSY_SHIFT 12 +#define MEM_AVG_BUSY_MASK 0x0c00 +#define MEM_AVG_BUSY_SHIFT 10 +#define MEM_EVAL_CHG_MASK 0x0300 +#define MEM_EVAL_BUSY_SHIFT 8 +#define MEM_MON_IDLE_MASK 0x00c0 +#define MEM_MON_IDLE_SHIFT 6 +#define MEM_UP_EVAL_MASK 0x0030 +#define MEM_UP_EVAL_SHIFT 4 +#define MEM_DOWN_EVAL_MASK 0x000c +#define MEM_DOWN_EVAL_SHIFT 2 +#define MEM_SW_CMD_MASK 0x0003 +#define MEM_INT_STEER_GFX 0 +#define MEM_INT_STEER_CMR 1 +#define MEM_INT_STEER_SMI 2 +#define MEM_INT_STEER_SCI 3 +#define MEMINTRSTS 0x11184 +#define MEMINT_RSEXIT (1<<7) +#define MEMINT_CONT_BUSY (1<<6) +#define MEMINT_AVG_BUSY (1<<5) +#define MEMINT_EVAL_CHG (1<<4) +#define MEMINT_MON_IDLE (1<<3) +#define MEMINT_UP_EVAL (1<<2) +#define MEMINT_DOWN_EVAL (1<<1) +#define MEMINT_SW_CMD (1<<0) +#define MEMMODECTL 0x11190 +#define MEMMODE_BOOST_EN (1<<31) +#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */ +#define MEMMODE_BOOST_FREQ_SHIFT 24 +#define MEMMODE_IDLE_MODE_MASK 0x00030000 +#define MEMMODE_IDLE_MODE_SHIFT 16 +#define MEMMODE_IDLE_MODE_EVAL 0 +#define MEMMODE_IDLE_MODE_CONT 1 +#define MEMMODE_HWIDLE_EN (1<<15) +#define MEMMODE_SWMODE_EN (1<<14) +#define MEMMODE_RCLK_GATE (1<<13) +#define MEMMODE_HW_UPDATE (1<<12) +#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */ +#define MEMMODE_FSTART_SHIFT 8 +#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */ +#define MEMMODE_FMAX_SHIFT 4 +#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */ +#define RCBMAXAVG 0x1119c +#define MEMSWCTL2 0x1119e /* Cantiga only */ +#define SWMEMCMD_RENDER_OFF (0 << 13) +#define SWMEMCMD_RENDER_ON (1 << 13) +#define SWMEMCMD_SWFREQ (2 << 13) +#define SWMEMCMD_TARVID (3 << 13) +#define SWMEMCMD_VRM_OFF (4 << 13) +#define SWMEMCMD_VRM_ON (5 << 13) +#define CMDSTS (1<<12) +#define SFCAVM (1<<11) +#define SWFREQ_MASK 0x0380 /* P0-7 */ +#define SWFREQ_SHIFT 7 +#define TARVID_MASK 0x001f +#define MEMSTAT_CTG 0x111a0 +#define RCBMINAVG 0x111a0 +#define RCUPEI 0x111b0 +#define RCDNEI 0x111b4 +#define MCHBAR_RENDER_STANDBY 0x111b8 #define RCX_SW_EXIT (1<<23) #define RSX_STATUS_MASK 0x00700000 +#define VIDCTL 0x111c0 +#define VIDSTS 0x111c8 +#define VIDSTART 0x111cc /* 8 bits */ +#define MEMSTAT_ILK 0x111f8 +#define MEMSTAT_VID_MASK 0x7f00 +#define MEMSTAT_VID_SHIFT 8 +#define MEMSTAT_PSTATE_MASK 0x00f8 +#define MEMSTAT_PSTATE_SHIFT 3 +#define MEMSTAT_MON_ACTV (1<<2) +#define MEMSTAT_SRC_CTL_MASK 0x0003 +#define MEMSTAT_SRC_CTL_CORE 0 +#define MEMSTAT_SRC_CTL_TRB 1 +#define MEMSTAT_SRC_CTL_THM 2 +#define MEMSTAT_SRC_CTL_STDBY 3 +#define RCPREVBSYTUPAVG 0x113b8 +#define RCPREVBSYTDNAVG 0x113bc #define PEG_BAND_GAP_DATA 0x14d68 /* diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index a3b90c9561d..ac0d1a73ac2 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev) I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR); I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL); + I915_WRITE(MCHBAR_RENDER_STANDBY, + dev_priv->saveMCHBAR_RENDER_STANDBY); } else { I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); @@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev) dev_priv->saveGTIMR = I915_READ(GTIMR); dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR); dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR); + dev_priv->saveMCHBAR_RENDER_STANDBY = + I915_READ(MCHBAR_RENDER_STANDBY); } else { dev_priv->saveIER = I915_READ(IER); dev_priv->saveIMR = I915_READ(IMR); } + if (IS_IRONLAKE_M(dev)) + ironlake_disable_drps(dev); + /* Cache mode state */ dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); @@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev) /* Clock gating state */ intel_init_clock_gating(dev); + if (IS_IRONLAKE_M(dev)) + ironlake_enable_drps(dev); + /* Cache mode state */ I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 15fbc1b5a83..70c9d4ba704 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -247,6 +247,7 @@ static void parse_general_features(struct drm_i915_private *dev_priv, struct bdb_header *bdb) { + struct drm_device *dev = dev_priv->dev; struct bdb_general_features *general; /* Set sensible defaults in case we can't find the general block */ @@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv, if (IS_I85X(dev_priv->dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48; - else if (IS_IRONLAKE(dev_priv->dev)) + else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev)) dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 120; else diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 79dd4026586..fccf07470c8 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode) struct drm_i915_private *dev_priv = dev->dev_private; u32 temp, reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) reg = PCH_ADPA; else reg = ADPA; @@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, else dpll_md_reg = DPLL_B_MD; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) adpa_reg = PCH_ADPA; else adpa_reg = ADPA; @@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, * Disable separate mode multiplier used when cloning SDVO to CRT * XXX this needs to be adjusted when we really are cloning */ - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { dpll_md = I915_READ(dpll_md_reg); I915_WRITE(dpll_md_reg, dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); @@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, if (intel_crtc->pipe == 0) { adpa |= ADPA_PIPE_A_SELECT; - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_A, 0); } else { adpa |= ADPA_PIPE_B_SELECT; - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) I915_WRITE(BCLRPAT_B, 0); } @@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) u32 hotplug_en; int i, tries = 0; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return intel_ironlake_crt_detect_hotplug(connector); /* @@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev) &intel_output->enc); /* Set up the DDC bus. */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) i2c_reg = PCH_GPIOA; else { i2c_reg = GPIOA; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index b27202d23eb..9cd6de5f990 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -232,7 +232,7 @@ struct intel_limit { #define G4X_P2_DISPLAY_PORT_FAST 10 #define G4X_P2_DISPLAY_PORT_LIMIT 0 -/* Ironlake */ +/* Ironlake / Sandybridge */ /* as we calculate clock using (register_value + 2) for N/M1/M2, so here the range value for them is (actual_value-2). */ @@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; const intel_limit_t *limit; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) limit = intel_ironlake_limit(crtc); else if (IS_G4X(dev)) { limit = intel_g4x_limit(crtc); @@ -886,7 +886,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { int lvds_reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) lvds_reg = PCH_LVDS; else lvds_reg = LVDS; @@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc, if (intel_fb->obj->size > dev_priv->cfb_size) { DRM_DEBUG_KMS("framebuffer too large, disabling " "compression\n"); + dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; goto out_disable; } if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { DRM_DEBUG_KMS("mode incompatible with compression, " "disabling\n"); + dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; goto out_disable; } if ((mode->hdisplay > 2048) || (mode->vdisplay > 1536)) { DRM_DEBUG_KMS("mode too large for compression, disabling\n"); + dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; goto out_disable; } if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { DRM_DEBUG_KMS("plane not 0, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_BAD_PLANE; goto out_disable; } if (obj_priv->tiling_mode != I915_TILING_X) { DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); + dev_priv->no_fbc_reason = FBC_NOT_TILED; goto out_disable; } @@ -1366,7 +1371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, dspcntr &= ~DISPPLANE_TILED; } - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) /* must disable */ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; @@ -1427,7 +1432,7 @@ static void i915_disable_vga (struct drm_device *dev) u8 sr1; u32 vga_reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) vga_reg = CPU_VGACNTRL; else vga_reg = VGACNTRL; @@ -2111,7 +2116,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = crtc->dev; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ if (mode->clock * 3 > 27000 * 4) return MODE_CLOCK_HIGH; @@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock, srwm = total_size - sr_entries; if (srwm < 0) srwm = 1; - I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); + + if (IS_I945G(dev) || IS_I945GM(dev)) + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); + else if (IS_I915GM(dev)) { + /* 915M has a smaller SRWM field */ + I915_WRITE(FW_BLC_SELF, srwm & 0x3f); + I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); + } } else { /* Turn off self refresh if both pipes are enabled */ - I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) - & ~FW_BLC_SELF_EN); + if (IS_I945G(dev) || IS_I945GM(dev)) { + I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) + & ~FW_BLC_SELF_EN); + } else if (IS_I915GM(dev)) { + I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); + } } DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", @@ -2967,7 +2983,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, refclk / 1000); } else if (IS_I9XX(dev)) { refclk = 96000; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) refclk = 120000; /* 120Mhz refclk */ } else { refclk = 48000; @@ -3025,7 +3041,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, } /* FDI link */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { int lane, link_bw, bpp; /* eDP doesn't require FDI link, so just set DP M/N according to current link config */ @@ -3102,7 +3118,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, * PCH B stepping, previous chipset stepping should be * ignoring this setting. */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { temp = I915_READ(PCH_DREF_CONTROL); /* Always enable nonspread source */ temp &= ~DREF_NONSPREAD_SOURCE_MASK; @@ -3149,7 +3165,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, reduced_clock.m2; } - if (!IS_IRONLAKE(dev)) + if (!HAS_PCH_SPLIT(dev)) dpll = DPLL_VGA_MODE_DIS; if (IS_I9XX(dev)) { @@ -3162,7 +3178,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; - else if (IS_IRONLAKE(dev)) + else if (HAS_PCH_SPLIT(dev)) dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; } if (is_dp) @@ -3174,7 +3190,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, else { dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; /* also FPA1 */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; if (IS_G4X(dev) && has_reduced_clock) dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; @@ -3193,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; break; } - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); } else { if (is_lvds) { @@ -3227,7 +3243,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Ironlake's plane is forced to pipe, bit 24 is to enable color space conversion */ - if (!IS_IRONLAKE(dev)) { + if (!HAS_PCH_SPLIT(dev)) { if (pipe == 0) dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; else @@ -3254,14 +3270,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Disable the panel fitter if it was on our pipe */ - if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe) + if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe) I915_WRITE(PFIT_CONTROL, 0); DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); drm_mode_debug_printmodeline(mode); /* assign to Ironlake registers */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { fp_reg = pch_fp_reg; dpll_reg = pch_dpll_reg; } @@ -3282,7 +3298,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, if (is_lvds) { u32 lvds; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) lvds_reg = PCH_LVDS; lvds = I915_READ(lvds_reg); @@ -3304,12 +3320,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* set the dithering flag */ if (IS_I965G(dev)) { if (dev_priv->lvds_dither) { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) pipeconf |= PIPE_ENABLE_DITHER; else lvds |= LVDS_ENABLE_DITHER; } else { - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) pipeconf &= ~PIPE_ENABLE_DITHER; else lvds &= ~LVDS_ENABLE_DITHER; @@ -3328,7 +3344,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* Wait for the clocks to stabilize. */ udelay(150); - if (IS_I965G(dev) && !IS_IRONLAKE(dev)) { + if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) { if (is_sdvo) { sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | @@ -3375,14 +3391,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, /* pipesrc and dspsize control the size that is scaled from, which should * always be the user's requested size. */ - if (!IS_IRONLAKE(dev)) { + if (!HAS_PCH_SPLIT(dev)) { I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); I915_WRITE(dsppos_reg, 0); } I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); I915_WRITE(link_m1_reg, m_n.link_m); @@ -3438,7 +3454,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) return; /* use legacy palette for Ironlake */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : LGC_PALETTE_B; @@ -3553,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, intel_crtc->cursor_bo = bo; return 0; -fail: - mutex_lock(&dev->struct_mutex); fail_locked: - drm_gem_object_unreference(bo); mutex_unlock(&dev->struct_mutex); +fail: + drm_gem_object_unreference_unlocked(bo); return ret; } @@ -3922,7 +3937,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule) int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dpll = I915_READ(dpll_reg); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) @@ -3961,7 +3976,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; int dpll = I915_READ(dpll_reg); - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return; if (!dev_priv->lvds_downclock_avail) @@ -4011,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work) mutex_lock(&dev->struct_mutex); + if (IS_I945G(dev) || IS_I945GM(dev)) { + DRM_DEBUG_DRIVER("enable memory self refresh on 945\n"); + I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); + } + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { /* Skip inactive CRTCs */ if (!crtc->fb) @@ -4044,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; - if (!dev_priv->busy) + if (!dev_priv->busy) { + if (IS_I945G(dev) || IS_I945GM(dev)) { + u32 fw_blc_self; + + DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); + fw_blc_self = I915_READ(FW_BLC_SELF); + fw_blc_self &= ~FW_BLC_SELF_EN; + I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); + } dev_priv->busy = true; - else + } else mod_timer(&dev_priv->idle_timer, jiffies + msecs_to_jiffies(GPU_IDLE_TIMEOUT)); @@ -4058,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) intel_fb = to_intel_framebuffer(crtc->fb); if (intel_fb->obj == obj) { if (!intel_crtc->busy) { + if (IS_I945G(dev) || IS_I945GM(dev)) { + u32 fw_blc_self; + + DRM_DEBUG_DRIVER("disable memory self refresh on 945\n"); + fw_blc_self = I915_READ(FW_BLC_SELF); + fw_blc_self &= ~FW_BLC_SELF_EN; + I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK); + } /* Non-busy -> busy, upclock */ intel_increase_pllclock(crtc, true); intel_crtc->busy = true; @@ -4382,7 +4418,7 @@ static void intel_setup_outputs(struct drm_device *dev) if (IS_MOBILE(dev) && !IS_I830(dev)) intel_lvds_init(dev); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { int found; if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) @@ -4451,7 +4487,7 @@ static void intel_setup_outputs(struct drm_device *dev) DRM_DEBUG_KMS("probing DP_D\n"); intel_dp_init(dev, DP_D); } - } else if (IS_I8XX(dev)) + } else if (IS_GEN2(dev)) intel_dvo_init(dev); if (SUPPORTS_TV(dev)) @@ -4476,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) intelfb_remove(dev, fb); drm_framebuffer_cleanup(fb); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(intel_fb->obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(intel_fb->obj); kfree(intel_fb); } @@ -4541,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev, ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); if (ret) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return NULL; } @@ -4591,6 +4623,91 @@ err_unref: return NULL; } +void ironlake_enable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl; + u8 fmax, fmin, fstart, vstart; + int i = 0; + + /* 100ms RC evaluation intervals */ + I915_WRITE(RCUPEI, 100000); + I915_WRITE(RCDNEI, 100000); + + /* Set max/min thresholds to 90ms and 80ms respectively */ + I915_WRITE(RCBMAXAVG, 90000); + I915_WRITE(RCBMINAVG, 80000); + + I915_WRITE(MEMIHYST, 1); + + /* Set up min, max, and cur for interrupt handling */ + fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> + PXVFREQ_PX_SHIFT; + + dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */ + dev_priv->min_delay = fmin; + dev_priv->cur_delay = fstart; + + I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + /* + * Interrupts will be enabled in ironlake_irq_postinstall + */ + + I915_WRITE(VIDSTART, vstart); + POSTING_READ(VIDSTART); + + rgvmodectl |= MEMMODE_SWMODE_EN; + I915_WRITE(MEMMODECTL, rgvmodectl); + + while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) { + if (i++ > 100) { + DRM_ERROR("stuck trying to change perf mode\n"); + break; + } + msleep(1); + } + msleep(1); + + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; + I915_WRITE(MEMSWCTL, rgvswctl); + POSTING_READ(MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE(MEMSWCTL, rgvswctl); +} + +void ironlake_disable_drps(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u32 rgvswctl; + u8 fstart; + + /* Ack interrupts, disable EFC interrupt */ + I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); + I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); + I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); + I915_WRITE(DEIIR, DE_PCU_EVENT); + I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ + fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; + I915_WRITE(MEMSWCTL, rgvswctl); + msleep(1); + rgvswctl |= MEMCTL_CMD_STS; + I915_WRITE(MEMSWCTL, rgvswctl); + msleep(1); + +} + void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4599,7 +4716,7 @@ void intel_init_clock_gating(struct drm_device *dev) * Disable clock gating reported to work incorrectly according to the * specs, but enable as much else as we can. */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { return; } else if (IS_G4X(dev)) { uint32_t dspclk_gate; @@ -4672,7 +4789,7 @@ static void intel_init_display(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; /* We always want a DPMS function */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) dev_priv->display.dpms = ironlake_crtc_dpms; else dev_priv->display.dpms = i9xx_crtc_dpms; @@ -4715,7 +4832,7 @@ static void intel_init_display(struct drm_device *dev) i830_get_display_clock_speed; /* For FIFO watermark updates */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) dev_priv->display.update_wm = NULL; else if (IS_G4X(dev)) dev_priv->display.update_wm = g4x_update_wm; @@ -4774,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev) DRM_DEBUG_KMS("%d display pipe%s available.\n", num_pipe, num_pipe > 1 ? "s" : ""); - if (IS_I85X(dev)) - pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock); - else if (IS_I9XX(dev) || IS_G4X(dev)) - pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock); - for (i = 0; i < num_pipe; i++) { intel_crtc_init(dev, i); } @@ -4787,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev) intel_init_clock_gating(dev); + if (IS_IRONLAKE_M(dev)) + ironlake_enable_drps(dev); + INIT_WORK(&dev_priv->idle_work, intel_idle_update); setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, (unsigned long)dev); @@ -4834,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev) drm_gem_object_unreference(dev_priv->pwrctx); } + if (IS_IRONLAKE_M(dev)) + ironlake_disable_drps(dev); + mutex_unlock(&dev->struct_mutex); drm_mode_config_cleanup(dev); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 439506cefc1..3ef3a0d0edd 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -231,7 +231,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, */ if (IS_eDP(intel_output)) aux_clock_divider = 225; /* eDP input clock at 450Mhz */ - else if (IS_IRONLAKE(dev)) + else if (HAS_PCH_SPLIT(dev)) aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */ else aux_clock_divider = intel_hrawclk(dev) / 2; @@ -584,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, intel_dp_compute_m_n(3, lane_count, mode->clock, adjusted_mode->clock, &m_n); - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { if (intel_crtc->pipe == 0) { I915_WRITE(TRANSA_DATA_M1, ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | @@ -1176,7 +1176,7 @@ intel_dp_detect(struct drm_connector *connector) dp_priv->has_audio = false; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return ironlake_dp_detect(connector); temp = I915_READ(PORT_HOTPLUG_EN); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index a51573da1ff..3a467ca5785 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, int regno); extern void intel_init_clock_gating(struct drm_device *dev); +extern void ironlake_enable_drps(struct drm_device *dev); +extern void ironlake_disable_drps(struct drm_device *dev); extern int intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index aaabbcbe590..8cd791dc5b2 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -35,6 +35,7 @@ #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> +#include <linux/vga_switcheroo.h> #include "drmP.h" #include "drm.h" @@ -235,6 +236,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, obj_priv->gtt_offset, fbo); mutex_unlock(&dev->struct_mutex); + vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unpin: diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 0e268deed76..a30f8bfc198 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) /* HW workaround, need to toggle enable bit off and on for 12bpc, but * we do this anyway which shows more stable in testing. */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); POSTING_READ(hdmi_priv->sdvox_reg); } @@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) /* HW workaround, need to write this twice for issue that may result * in first write getting masked. */ - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(hdmi_priv->sdvox_reg, temp); POSTING_READ(hdmi_priv->sdvox_reg); } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 8673c735b8a..fcc753ca5d9 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { I915_WRITE(PCH_GMBUS0, 0); } else { I915_WRITE(GMBUS0, 0); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index c2e8a45780d..14e516fdc2d 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level) struct drm_i915_private *dev_priv = dev->dev_private; u32 blc_pwm_ctl, reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) reg = BLC_PWM_CPU_CTL; else reg = BLC_PWM_CTL; @@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) reg = BLC_PWM_PCH_CTL2; else reg = BLC_PWM_CTL; @@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) static void intel_lvds_set_power(struct drm_device *dev, bool on) { struct drm_i915_private *dev_priv = dev->dev_private; - u32 pp_status, ctl_reg, status_reg; + u32 pp_status, ctl_reg, status_reg, lvds_reg; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { ctl_reg = PCH_PP_CONTROL; status_reg = PCH_PP_STATUS; + lvds_reg = PCH_LVDS; } else { ctl_reg = PP_CONTROL; status_reg = PP_STATUS; + lvds_reg = LVDS; } if (on) { + I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); + POSTING_READ(lvds_reg); + I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); do { @@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on) do { pp_status = I915_READ(status_reg); } while (pp_status & PP_ON); + + I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); + POSTING_READ(lvds_reg); } } @@ -137,7 +145,7 @@ static void intel_lvds_save(struct drm_connector *connector) u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; u32 pwm_ctl_reg; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { pp_on_reg = PCH_PP_ON_DELAYS; pp_off_reg = PCH_PP_OFF_DELAYS; pp_ctl_reg = PCH_PP_CONTROL; @@ -174,7 +182,7 @@ static void intel_lvds_restore(struct drm_connector *connector) u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; u32 pwm_ctl_reg; - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { pp_on_reg = PCH_PP_ON_DELAYS; pp_off_reg = PCH_PP_OFF_DELAYS; pp_ctl_reg = PCH_PP_CONTROL; @@ -297,7 +305,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, } /* full screen scale for now */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) goto out; /* 965+ wants fuzzy fitting */ @@ -327,7 +335,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder, * to register description and PRM. * Change the value here to see the borders for debugging */ - if (!IS_IRONLAKE(dev)) { + if (!HAS_PCH_SPLIT(dev)) { I915_WRITE(BCLRPAT_A, 0); I915_WRITE(BCLRPAT_B, 0); } @@ -548,7 +556,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) reg = BLC_PWM_CPU_CTL; else reg = BLC_PWM_CTL; @@ -587,7 +595,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder, * settings. */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) return; /* @@ -655,8 +663,15 @@ static const struct dmi_system_id bad_lid_status[] = { */ static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; enum drm_connector_status status = connector_status_connected; + /* ACPI lid methods were generally unreliable in this generation, so + * don't even bother. + */ + if (IS_GEN2(dev)) + return connector_status_connected; + if (!dmi_check_system(bad_lid_status) && !acpi_lid_open()) status = connector_status_disconnected; @@ -1020,7 +1035,7 @@ void intel_lvds_init(struct drm_device *dev) return; } - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) return; if (dev_priv->edp_support) { @@ -1123,7 +1138,7 @@ void intel_lvds_init(struct drm_device *dev) */ /* Ironlake: FIXME if still fail, not try pipe mode now */ - if (IS_IRONLAKE(dev)) + if (HAS_PCH_SPLIT(dev)) goto failed; lvds = I915_READ(LVDS); @@ -1144,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev) goto failed; out: - if (IS_IRONLAKE(dev)) { + if (HAS_PCH_SPLIT(dev)) { u32 pwm; /* make sure PWM is enabled */ pwm = I915_READ(BLC_PWM_CPU_CTL2); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 2639591c72e..d355d1d527e 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -172,7 +172,7 @@ struct overlay_registers { #define OFC_UPDATE 0x1 #define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev)) -#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev)) +#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev)) static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay) @@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay) { - struct drm_device *dev = overlay->dev; - drm_i915_private_t *dev_priv = dev->dev_private; - if (OVERLAY_NONPHYSICAL(overlay->dev)) io_mapping_unmap_atomic(overlay->virt_addr); overlay->virt_addr = NULL; - I915_READ(OVADD); /* flush wc cashes */ - return; } @@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay) overlay->active = 1; overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP; - BEGIN_LP_RING(6); - OUT_RING(MI_FLUSH); - OUT_RING(MI_NOOP); + BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); OUT_RING(overlay->flip_addr | OFC_UPDATE); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); @@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - BEGIN_LP_RING(4); - OUT_RING(MI_FLUSH); - OUT_RING(MI_NOOP); + BEGIN_LP_RING(2); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); ADVANCE_LP_RING(); @@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) /* wait for overlay to go idle */ overlay->hw_wedged = SWITCH_OFF_STAGE_1; - BEGIN_LP_RING(6); - OUT_RING(MI_FLUSH); - OUT_RING(MI_NOOP); + BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); OUT_RING(flip_addr); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); @@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) /* turn overlay off */ overlay->hw_wedged = SWITCH_OFF_STAGE_2; - BEGIN_LP_RING(6); - OUT_RING(MI_FLUSH); - OUT_RING(MI_NOOP); + BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); OUT_RING(flip_addr); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); @@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay, overlay->hw_wedged = SWITCH_OFF_STAGE_2; - BEGIN_LP_RING(6); - OUT_RING(MI_FLUSH); - OUT_RING(MI_NOOP); + BEGIN_LP_RING(4); OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF); OUT_RING(flip_addr); OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); @@ -1179,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, out_unlock: mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->mode_config.mutex); - drm_gem_object_unreference(new_bo); + drm_gem_object_unreference_unlocked(new_bo); kfree(params); return ret; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 82678d30ab0..48daee5c9c6 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -35,6 +35,7 @@ #include "i915_drm.h" #include "i915_drv.h" #include "intel_sdvo_regs.h" +#include <linux/dmi.h> static char *tv_format_names[] = { "NTSC_M" , "NTSC_J" , "NTSC_443", @@ -2283,6 +2284,25 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device) return 0x72; } +static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id) +{ + DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident); + return 1; +} + +static struct dmi_system_id intel_sdvo_bad_tv[] = { + { + .callback = intel_sdvo_bad_tv_callback, + .ident = "IntelG45/ICH10R/DME1737", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"), + DMI_MATCH(DMI_PRODUCT_NAME, "4800784"), + }, + }, + + { } /* terminating entry */ +}; + static bool intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) { @@ -2323,7 +2343,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags) (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | (1 << INTEL_ANALOG_CLONE_BIT); } - } else if (flags & SDVO_OUTPUT_SVID0) { + } else if ((flags & SDVO_OUTPUT_SVID0) && + !dmi_check_system(intel_sdvo_bad_tv)) { sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 48c290b5da8..32db806f3b5 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile @@ -16,7 +16,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o \ - nv40_grctx.o \ + nv40_grctx.o nv50_grctx.o \ nv04_instmem.o nv50_instmem.o \ nv50_crtc.o nv50_dac.o nv50_sor.o \ nv50_cursor.o nv50_display.o nv50_fbcon.o \ diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 48227e74475..0e0730a5313 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -11,6 +11,8 @@ #include "nouveau_drm.h" #include "nv50_display.h" +#include <linux/vga_switcheroo.h> + #define NOUVEAU_DSM_SUPPORTED 0x00 #define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00 @@ -28,31 +30,30 @@ #define NOUVEAU_DSM_POWER_SPEED 0x01 #define NOUVEAU_DSM_POWER_STAMINA 0x02 -static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) -{ - static char muid[] = { - 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, - 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, - }; +static struct nouveau_dsm_priv { + bool dsm_detected; + acpi_handle dhandle; + acpi_handle dsm_handle; +} nouveau_dsm_priv; + +static const char nouveau_dsm_muid[] = { + 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, + 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4, +}; - struct pci_dev *pdev = dev->pdev; - struct acpi_handle *handle; +static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result) +{ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_object_list input; union acpi_object params[4]; union acpi_object *obj; int err; - handle = DEVICE_ACPI_HANDLE(&pdev->dev); - - if (!handle) - return -ENODEV; - input.count = 4; input.pointer = params; params[0].type = ACPI_TYPE_BUFFER; - params[0].buffer.length = sizeof(muid); - params[0].buffer.pointer = (char *)muid; + params[0].buffer.length = sizeof(nouveau_dsm_muid); + params[0].buffer.pointer = (char *)nouveau_dsm_muid; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = 0x00000102; params[2].type = ACPI_TYPE_INTEGER; @@ -62,7 +63,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) err = acpi_evaluate_object(handle, "_DSM", &input, &output); if (err) { - NV_INFO(dev, "failed to evaluate _DSM: %d\n", err); + printk(KERN_INFO "failed to evaluate _DSM: %d\n", err); return err; } @@ -86,40 +87,119 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result) return 0; } -int nouveau_hybrid_setup(struct drm_device *dev) +static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id) { - int result; - - if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE, - &result)) - return -ENODEV; - - NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result); - - if (result) { /* Ensure that the external GPU is enabled */ - nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL); - nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED, - NULL); - } else { /* Stamina mode - disable the external GPU */ - nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA, - NULL); - nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA, - NULL); - } + return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL); +} + +static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state) +{ + int arg; + if (state == VGA_SWITCHEROO_ON) + arg = NOUVEAU_DSM_POWER_SPEED; + else + arg = NOUVEAU_DSM_POWER_STAMINA; + nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL); + return 0; +} + +static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id) +{ + if (id == VGA_SWITCHEROO_IGD) + return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA); + else + return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED); +} +static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + if (id == VGA_SWITCHEROO_IGD) + return 0; + + return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state); +} + +static int nouveau_dsm_init(void) +{ return 0; } -bool nouveau_dsm_probe(struct drm_device *dev) +static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { - int support = 0; + if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler nouveau_dsm_handler = { + .switchto = nouveau_dsm_switchto, + .power_state = nouveau_dsm_power_state, + .init = nouveau_dsm_init, + .get_client_id = nouveau_dsm_get_client_id, +}; - if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED, - NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support)) +static bool nouveau_dsm_pci_probe(struct pci_dev *pdev) +{ + acpi_handle dhandle, nvidia_handle; + acpi_status status; + int ret; + uint32_t result; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle); + if (ACPI_FAILURE(status)) { return false; + } - if (!support) + ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED, + NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result); + if (ret < 0) return false; + nouveau_dsm_priv.dhandle = dhandle; + nouveau_dsm_priv.dsm_handle = nvidia_handle; return true; } + +static bool nouveau_dsm_detect(void) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + struct pci_dev *pdev = NULL; + int has_dsm = 0; + int vga_count = 0; + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + + has_dsm |= (nouveau_dsm_pci_probe(pdev) == true); + } + + if (vga_count == 2 && has_dsm) { + acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer); + printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", + acpi_method_name); + nouveau_dsm_priv.dsm_detected = true; + return true; + } + return false; +} + +void nouveau_register_dsm_handler(void) +{ + bool r; + + r = nouveau_dsm_detect(); + if (!r) + return; + + vga_switcheroo_register_handler(&nouveau_dsm_handler); +} + +void nouveau_unregister_dsm_handler(void) +{ + vga_switcheroo_unregister_handler(); +} diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 0e9cd1d4913..71247da17da 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -311,11 +311,11 @@ valid_reg(struct nvbios *bios, uint32_t reg) /* C51 has misaligned regs on purpose. Marvellous */ if (reg & 0x2 || - (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) + (reg & 0x1 && dev_priv->vbios.chip_version != 0x51)) NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg); /* warn on C51 regs that haven't been verified accessible in tracing */ - if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 && + if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 && reg != 0x130d && reg != 0x1311 && reg != 0x60081d) NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n", reg); @@ -420,7 +420,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data) LOG_OLD_VALUE(bios_rd32(bios, reg)); BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data); - if (dev_priv->VBIOS.execute) { + if (dev_priv->vbios.execute) { still_alive(); nv_wr32(bios->dev, reg, data); } @@ -647,7 +647,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; - if (dev_priv->VBIOS.execute) { + if (dev_priv->vbios.execute) { still_alive(); nv_wr32(dev, reg + 4, reg1); nv_wr32(dev, reg + 0, reg0); @@ -689,7 +689,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk) static int dcb_entry_idx_from_crtchead(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; /* * For the results of this function to be correct, CR44 must have been @@ -700,7 +700,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev) uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0); - if (dcb_entry > bios->bdcb.dcb.entries) { + if (dcb_entry > bios->dcb.entries) { NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently " "(%02X)\n", dcb_entry); dcb_entry = 0x7f; /* unused / invalid marker */ @@ -713,25 +713,26 @@ static struct nouveau_i2c_chan * init_i2c_device_find(struct drm_device *dev, int i2c_index) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb; + struct dcb_table *dcb = &dev_priv->vbios.dcb; if (i2c_index == 0xff) { /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */ int idx = dcb_entry_idx_from_crtchead(dev), shift = 0; - int default_indices = bdcb->i2c_default_indices; + int default_indices = dcb->i2c_default_indices; - if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default) + if (idx != 0x7f && dcb->entry[idx].i2c_upper_default) shift = 4; i2c_index = (default_indices >> shift) & 0xf; } if (i2c_index == 0x80) /* g80+ */ - i2c_index = bdcb->i2c_default_indices & 0xf; + i2c_index = dcb->i2c_default_indices & 0xf; return nouveau_i2c_find(dev, i2c_index); } -static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) +static uint32_t +get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) { /* * For mlv < 0x80, it is an index into a table of TMDS base addresses. @@ -744,6 +745,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) */ struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nvbios *bios = &dev_priv->vbios; const int pramdac_offset[13] = { 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 }; const uint32_t pramdac_table[4] = { @@ -756,13 +758,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) dcb_entry = dcb_entry_idx_from_crtchead(dev); if (dcb_entry == 0x7f) return 0; - dacoffset = pramdac_offset[ - dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or]; + dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or]; if (mlv == 0x81) dacoffset ^= 8; return 0x6808b0 + dacoffset; } else { - if (mlv > ARRAY_SIZE(pramdac_table)) { + if (mlv >= ARRAY_SIZE(pramdac_table)) { NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n", mlv); return 0; @@ -2574,19 +2575,19 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c }; - const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr]; + const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr]; const uint8_t *gpio_entry; int i; if (!iexec->execute) return 1; - if (bios->bdcb.version != 0x40) { + if (bios->dcb.version != 0x40) { NV_ERROR(bios->dev, "DCB table not version 4.0\n"); return 0; } - if (!bios->bdcb.gpio_table_ptr) { + if (!bios->dcb.gpio_table_ptr) { NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); return 0; } @@ -3123,7 +3124,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, struct dcb_entry *dcbent, int head, bool dl) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; struct init_exec iexec = {true, false}; NV_TRACE(dev, "0x%04X: Parsing digital output script table\n", @@ -3140,7 +3141,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr, static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0); uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); @@ -3194,7 +3195,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int * of a list of pxclks and script pointers. */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; unsigned int outputset = (dcbent->or == 4) ? 1 : 0; uint16_t scriptptr = 0, clktable; uint8_t clktableptr = 0; @@ -3261,7 +3262,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer]; uint32_t sel_clk_binding, sel_clk; int ret; @@ -3395,7 +3396,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) #ifndef __powerpc__ NV_ERROR(dev, "Pointer to flat panel table invalid\n"); #endif - bios->pub.digital_min_front_porch = 0x4b; + bios->digital_min_front_porch = 0x4b; return 0; } @@ -3428,7 +3429,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) * fptable[4] is the minimum * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap */ - bios->pub.digital_min_front_porch = fptable[4]; + bios->digital_min_front_porch = fptable[4]; ofs = -7; break; default: @@ -3467,7 +3468,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */ if (lth.lvds_ver > 0x10) - bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; + bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf; /* * If either the strap or xlated fpindex value are 0xf there is no @@ -3491,7 +3492,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios) bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr]; if (!mode) /* just checking whether we can produce a mode */ @@ -3562,11 +3563,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b * until later, when this function should be called with non-zero pxclk */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0; struct lvdstableheader lth; uint16_t lvdsofs; - int ret, chip_version = bios->pub.chip_version; + int ret, chip_version = bios->chip_version; ret = parse_lvds_manufacturer_table_header(dev, bios, <h); if (ret) @@ -3682,7 +3683,7 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent, uint16_t record, int record_len, int record_nr) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint32_t entry; uint16_t table; int i, v; @@ -3716,7 +3717,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, int *length) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint8_t *table; if (!bios->display.dp_table_ptr) { @@ -3725,7 +3726,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent, } table = &bios->data[bios->display.dp_table_ptr]; - if (table[0] != 0x21) { + if (table[0] != 0x20 && table[0] != 0x21) { NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n", table[0]); return NULL; @@ -3765,7 +3766,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint8_t *table = &bios->data[bios->display.script_table_ptr]; uint8_t *otable = NULL; uint16_t script; @@ -3918,8 +3919,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; - int cv = bios->pub.chip_version; + struct nvbios *bios = &dev_priv->vbios; + int cv = bios->chip_version; uint16_t clktable = 0, scriptptr; uint32_t sel_clk_binding, sel_clk; @@ -3978,8 +3979,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims */ struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; - int cv = bios->pub.chip_version, pllindex = 0; + struct nvbios *bios = &dev_priv->vbios; + int cv = bios->chip_version, pllindex = 0; uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0; uint32_t crystal_strap_mask, crystal_straps; @@ -4332,7 +4333,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint */ bios->major_version = bios->data[offset + 3]; - bios->pub.chip_version = bios->data[offset + 2]; + bios->chip_version = bios->data[offset + 2]; NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n", bios->data[offset + 3], bios->data[offset + 2], bios->data[offset + 1], bios->data[offset]); @@ -4402,7 +4403,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st } /* First entry is normal dac, 2nd tv-out perhaps? */ - bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; + bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff; return 0; } @@ -4526,8 +4527,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st return -ENOSYS; } - bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); - bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); + bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]); + bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]); return 0; } @@ -4796,11 +4797,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi uint16_t legacy_scripts_offset, legacy_i2c_offset; /* load needed defaults in case we can't parse this info */ - bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; - bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; - bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; - bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; - bios->pub.digital_min_front_porch = 0x4b; + bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX; + bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX; + bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX; + bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX; + bios->digital_min_front_porch = 0x4b; bios->fmaxvco = 256000; bios->fminvco = 128000; bios->fp.duallink_transition_clk = 90000; @@ -4907,10 +4908,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset]; bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1]; bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2]; - bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; - bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; - bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; - bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; + bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4]; + bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5]; + bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6]; + bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7]; if (bmplength > 74) { bios->fmaxvco = ROM32(bmp[67]); @@ -4984,7 +4985,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i else NV_WARN(dev, "DCB I2C table has more entries than indexable " - "(%d entries, max index 15)\n", i2ctable[2]); + "(%d entries, max %d)\n", i2ctable[2], + DCB_MAX_NUM_I2C_ENTRIES); entry_len = i2ctable[3]; /* [4] is i2c_default_indices, read in parse_dcb_table() */ } @@ -5000,8 +5002,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i if (index == 0xf) return 0; - if (index > i2c_entries) { - NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n", + if (index >= i2c_entries) { + NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n", index, i2ctable[2]); return -ENOENT; } @@ -5036,7 +5038,7 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i static struct dcb_gpio_entry * new_gpio_entry(struct nvbios *bios) { - struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio; + struct dcb_gpio_table *gpio = &bios->dcb.gpio; return &gpio->entry[gpio->entries++]; } @@ -5045,14 +5047,14 @@ struct dcb_gpio_entry * nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; int i; - for (i = 0; i < bios->bdcb.gpio.entries; i++) { - if (bios->bdcb.gpio.entry[i].tag != tag) + for (i = 0; i < bios->dcb.gpio.entries; i++) { + if (bios->dcb.gpio.entry[i].tag != tag) continue; - return &bios->bdcb.gpio.entry[i]; + return &bios->dcb.gpio.entry[i]; } return NULL; @@ -5100,7 +5102,7 @@ static void parse_dcb_gpio_table(struct nvbios *bios) { struct drm_device *dev = bios->dev; - uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr; + uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr; uint8_t *gpio_table = &bios->data[gpio_table_ptr]; int header_len = gpio_table[1], entries = gpio_table[2], @@ -5108,7 +5110,7 @@ parse_dcb_gpio_table(struct nvbios *bios) void (*parse_entry)(struct nvbios *, uint16_t) = NULL; int i; - if (bios->bdcb.version >= 0x40) { + if (bios->dcb.version >= 0x40) { if (gpio_table_ptr && entry_len != 4) { NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); return; @@ -5116,7 +5118,7 @@ parse_dcb_gpio_table(struct nvbios *bios) parse_entry = parse_dcb40_gpio_entry; - } else if (bios->bdcb.version >= 0x30) { + } else if (bios->dcb.version >= 0x30) { if (gpio_table_ptr && entry_len != 2) { NV_WARN(dev, "Invalid DCB GPIO table entry length.\n"); return; @@ -5124,7 +5126,7 @@ parse_dcb_gpio_table(struct nvbios *bios) parse_entry = parse_dcb30_gpio_entry; - } else if (bios->bdcb.version >= 0x22) { + } else if (bios->dcb.version >= 0x22) { /* * DCBs older than v3.0 don't really have a GPIO * table, instead they keep some GPIO info at fixed @@ -5158,30 +5160,67 @@ struct dcb_connector_table_entry * nouveau_bios_connector_entry(struct drm_device *dev, int index) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; struct dcb_connector_table_entry *cte; - if (index >= bios->bdcb.connector.entries) + if (index >= bios->dcb.connector.entries) return NULL; - cte = &bios->bdcb.connector.entry[index]; + cte = &bios->dcb.connector.entry[index]; if (cte->type == 0xff) return NULL; return cte; } +static enum dcb_connector_type +divine_connector_type(struct nvbios *bios, int index) +{ + struct dcb_table *dcb = &bios->dcb; + unsigned encoders = 0, type = DCB_CONNECTOR_NONE; + int i; + + for (i = 0; i < dcb->entries; i++) { + if (dcb->entry[i].connector == index) + encoders |= (1 << dcb->entry[i].type); + } + + if (encoders & (1 << OUTPUT_DP)) { + if (encoders & (1 << OUTPUT_TMDS)) + type = DCB_CONNECTOR_DP; + else + type = DCB_CONNECTOR_eDP; + } else + if (encoders & (1 << OUTPUT_TMDS)) { + if (encoders & (1 << OUTPUT_ANALOG)) + type = DCB_CONNECTOR_DVI_I; + else + type = DCB_CONNECTOR_DVI_D; + } else + if (encoders & (1 << OUTPUT_ANALOG)) { + type = DCB_CONNECTOR_VGA; + } else + if (encoders & (1 << OUTPUT_LVDS)) { + type = DCB_CONNECTOR_LVDS; + } else + if (encoders & (1 << OUTPUT_TV)) { + type = DCB_CONNECTOR_TV_0; + } + + return type; +} + static void parse_dcb_connector_table(struct nvbios *bios) { struct drm_device *dev = bios->dev; - struct dcb_connector_table *ct = &bios->bdcb.connector; + struct dcb_connector_table *ct = &bios->dcb.connector; struct dcb_connector_table_entry *cte; - uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr]; + uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr]; uint8_t *entry; int i; - if (!bios->bdcb.connector_table_ptr) { + if (!bios->dcb.connector_table_ptr) { NV_DEBUG_KMS(dev, "No DCB connector table present\n"); return; } @@ -5203,6 +5242,7 @@ parse_dcb_connector_table(struct nvbios *bios) cte->entry = ROM16(entry[0]); else cte->entry = ROM32(entry[0]); + cte->type = (cte->entry & 0x000000ff) >> 0; cte->index = (cte->entry & 0x00000f00) >> 8; switch (cte->entry & 0x00033000) { @@ -5228,10 +5268,33 @@ parse_dcb_connector_table(struct nvbios *bios) NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n", i, cte->entry, cte->type, cte->index, cte->gpio_tag); + + /* check for known types, fallback to guessing the type + * from attached encoders if we hit an unknown. + */ + switch (cte->type) { + case DCB_CONNECTOR_VGA: + case DCB_CONNECTOR_TV_0: + case DCB_CONNECTOR_TV_1: + case DCB_CONNECTOR_TV_3: + case DCB_CONNECTOR_DVI_I: + case DCB_CONNECTOR_DVI_D: + case DCB_CONNECTOR_LVDS: + case DCB_CONNECTOR_DP: + case DCB_CONNECTOR_eDP: + case DCB_CONNECTOR_HDMI_0: + case DCB_CONNECTOR_HDMI_1: + break; + default: + cte->type = divine_connector_type(bios, cte->index); + NV_WARN(dev, "unknown type, using 0x%02x", cte->type); + break; + } + } } -static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) +static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb) { struct dcb_entry *entry = &dcb->entry[dcb->entries]; @@ -5241,7 +5304,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb) return entry; } -static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) +static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads) { struct dcb_entry *entry = new_dcb_entry(dcb); @@ -5252,7 +5315,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads) /* "or" mostly unused in early gen crt modesetting, 0 is fine */ } -static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) +static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads) { struct dcb_entry *entry = new_dcb_entry(dcb); @@ -5279,7 +5342,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads) #endif } -static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) +static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads) { struct dcb_entry *entry = new_dcb_entry(dcb); @@ -5290,13 +5353,13 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads) } static bool -parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, +parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf, struct dcb_entry *entry) { entry->type = conn & 0xf; entry->i2c_index = (conn >> 4) & 0xf; entry->heads = (conn >> 8) & 0xf; - if (bdcb->version >= 0x40) + if (dcb->version >= 0x40) entry->connector = (conn >> 12) & 0xf; entry->bus = (conn >> 16) & 0xf; entry->location = (conn >> 20) & 0x3; @@ -5314,7 +5377,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, * Although the rest of a CRT conf dword is usually * zeros, mac biosen have stuff there so we must mask */ - entry->crtconf.maxfreq = (bdcb->version < 0x30) ? + entry->crtconf.maxfreq = (dcb->version < 0x30) ? (conf & 0xffff) * 10 : (conf & 0xff) * 10000; break; @@ -5323,7 +5386,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, uint32_t mask; if (conf & 0x1) entry->lvdsconf.use_straps_for_mode = true; - if (bdcb->version < 0x22) { + if (dcb->version < 0x22) { mask = ~0xd; /* * The laptop in bug 14567 lies and claims to not use @@ -5347,7 +5410,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, * Until we even try to use these on G8x, it's * useless reporting unknown bits. They all are. */ - if (bdcb->version >= 0x40) + if (dcb->version >= 0x40) break; NV_ERROR(dev, "Unknown LVDS configuration bits, " @@ -5357,7 +5420,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, } case OUTPUT_TV: { - if (bdcb->version >= 0x30) + if (dcb->version >= 0x30) entry->tvconf.has_component_output = conf & (0x8 << 4); else entry->tvconf.has_component_output = false; @@ -5384,8 +5447,10 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, break; case 0xe: /* weird g80 mobile type that "nv" treats as a terminator */ - bdcb->dcb.entries--; + dcb->entries--; return false; + default: + break; } /* unsure what DCB version introduces this, 3.0? */ @@ -5396,7 +5461,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, } static bool -parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, +parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf, struct dcb_entry *entry) { switch (conn & 0x0000000f) { @@ -5462,27 +5527,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb, return true; } -static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb, +static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb, uint32_t conn, uint32_t conf) { - struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb); + struct dcb_entry *entry = new_dcb_entry(dcb); bool ret; - if (bdcb->version >= 0x20) - ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry); + if (dcb->version >= 0x20) + ret = parse_dcb20_entry(dev, dcb, conn, conf, entry); else - ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry); + ret = parse_dcb15_entry(dev, dcb, conn, conf, entry); if (!ret) return ret; - read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table, - entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]); + read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table, + entry->i2c_index, &dcb->i2c[entry->i2c_index]); return true; } static -void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb) +void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb) { /* * DCB v2.0 lists each output combination separately. @@ -5534,8 +5599,7 @@ static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct bios_parsed_dcb *bdcb = &bios->bdcb; - struct parsed_dcb *dcb; + struct dcb_table *dcb = &bios->dcb; uint16_t dcbptr = 0, i2ctabptr = 0; uint8_t *dcbtable; uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES; @@ -5543,9 +5607,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) int recordlength = 8, confofs = 4; int i; - dcb = bios->pub.dcb = &bdcb->dcb; - dcb->entries = 0; - /* get the offset from 0x36 */ if (dev_priv->card_type > NV_04) { dcbptr = ROM16(bios->data[0x36]); @@ -5567,21 +5628,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) dcbtable = &bios->data[dcbptr]; /* get DCB version */ - bdcb->version = dcbtable[0]; + dcb->version = dcbtable[0]; NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n", - bdcb->version >> 4, bdcb->version & 0xf); + dcb->version >> 4, dcb->version & 0xf); - if (bdcb->version >= 0x20) { /* NV17+ */ + if (dcb->version >= 0x20) { /* NV17+ */ uint32_t sig; - if (bdcb->version >= 0x30) { /* NV40+ */ + if (dcb->version >= 0x30) { /* NV40+ */ headerlen = dcbtable[1]; entries = dcbtable[2]; recordlength = dcbtable[3]; i2ctabptr = ROM16(dcbtable[4]); sig = ROM32(dcbtable[6]); - bdcb->gpio_table_ptr = ROM16(dcbtable[10]); - bdcb->connector_table_ptr = ROM16(dcbtable[20]); + dcb->gpio_table_ptr = ROM16(dcbtable[10]); + dcb->connector_table_ptr = ROM16(dcbtable[20]); } else { i2ctabptr = ROM16(dcbtable[2]); sig = ROM32(dcbtable[4]); @@ -5593,7 +5654,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) "signature (%08X)\n", sig); return -EINVAL; } - } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */ + } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */ char sig[8] = { 0 }; strncpy(sig, (char *)&dcbtable[-7], 7); @@ -5641,14 +5702,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) if (!i2ctabptr) NV_WARN(dev, "No pointer to DCB I2C port table\n"); else { - bdcb->i2c_table = &bios->data[i2ctabptr]; - if (bdcb->version >= 0x30) - bdcb->i2c_default_indices = bdcb->i2c_table[4]; + dcb->i2c_table = &bios->data[i2ctabptr]; + if (dcb->version >= 0x30) + dcb->i2c_default_indices = dcb->i2c_table[4]; } - parse_dcb_gpio_table(bios); - parse_dcb_connector_table(bios); - if (entries > DCB_MAX_NUM_ENTRIES) entries = DCB_MAX_NUM_ENTRIES; @@ -5673,7 +5731,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n", dcb->entries, connection, config); - if (!parse_dcb_entry(dev, bdcb, connection, config)) + if (!parse_dcb_entry(dev, dcb, connection, config)) break; } @@ -5681,18 +5739,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads) * apart for v2.1+ not being known for requiring merging, this * guarantees dcbent->index is the index of the entry in the rom image */ - if (bdcb->version < 0x21) + if (dcb->version < 0x21) merge_like_dcb_entries(dev, dcb); - return dcb->entries ? 0 : -ENXIO; + if (!dcb->entries) + return -ENXIO; + + parse_dcb_gpio_table(bios); + parse_dcb_connector_table(bios); + return 0; } static void fixup_legacy_connector(struct nvbios *bios) { - struct bios_parsed_dcb *bdcb = &bios->bdcb; - struct parsed_dcb *dcb = &bdcb->dcb; - int high = 0, i; + struct dcb_table *dcb = &bios->dcb; + int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { }; /* * DCB 3.0 also has the table in most cases, but there are some cards @@ -5700,9 +5762,11 @@ fixup_legacy_connector(struct nvbios *bios) * indices are all 0. We don't need the connector indices on pre-G80 * chips (yet?) so limit the use to DCB 4.0 and above. */ - if (bdcb->version >= 0x40) + if (dcb->version >= 0x40) return; + dcb->connector.entries = 0; + /* * No known connector info before v3.0, so make it up. the rule here * is: anything on the same i2c bus is considered to be on the same @@ -5710,37 +5774,38 @@ fixup_legacy_connector(struct nvbios *bios) * its own unique connector index. */ for (i = 0; i < dcb->entries; i++) { - if (dcb->entry[i].i2c_index == 0xf) - continue; - /* * Ignore the I2C index for on-chip TV-out, as there * are cards with bogus values (nv31m in bug 23212), * and it's otherwise useless. */ if (dcb->entry[i].type == OUTPUT_TV && - dcb->entry[i].location == DCB_LOC_ON_CHIP) { + dcb->entry[i].location == DCB_LOC_ON_CHIP) dcb->entry[i].i2c_index = 0xf; + i2c = dcb->entry[i].i2c_index; + + if (i2c_conn[i2c]) { + dcb->entry[i].connector = i2c_conn[i2c] - 1; continue; } - dcb->entry[i].connector = dcb->entry[i].i2c_index; - if (dcb->entry[i].connector > high) - high = dcb->entry[i].connector; + dcb->entry[i].connector = dcb->connector.entries++; + if (i2c != 0xf) + i2c_conn[i2c] = dcb->connector.entries; } - for (i = 0; i < dcb->entries; i++) { - if (dcb->entry[i].i2c_index != 0xf) - continue; - - dcb->entry[i].connector = ++high; + /* Fake the connector table as well as just connector indices */ + for (i = 0; i < dcb->connector.entries; i++) { + dcb->connector.entry[i].index = i; + dcb->connector.entry[i].type = divine_connector_type(bios, i); + dcb->connector.entry[i].gpio_tag = 0xff; } } static void fixup_legacy_i2c(struct nvbios *bios) { - struct parsed_dcb *dcb = &bios->bdcb.dcb; + struct dcb_table *dcb = &bios->dcb; int i; for (i = 0; i < dcb->entries; i++) { @@ -5826,7 +5891,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev, uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; const uint8_t edid_sig[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; uint16_t offset = 0; @@ -5859,7 +5924,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, struct dcb_entry *dcbent) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; struct init_exec iexec = { true, false }; mutex_lock(&bios->lock); @@ -5872,7 +5937,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table, static bool NVInitVBIOS(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; memset(bios, 0, sizeof(struct nvbios)); mutex_init(&bios->lock); @@ -5888,7 +5953,7 @@ static bool NVInitVBIOS(struct drm_device *dev) static int nouveau_parse_vbios_struct(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' }; const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 }; int offset; @@ -5915,7 +5980,7 @@ int nouveau_run_vbios_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; int i, ret = 0; NVLockVgaCrtcs(dev, false); @@ -5946,9 +6011,9 @@ nouveau_run_vbios_init(struct drm_device *dev) } if (dev_priv->card_type >= NV_50) { - for (i = 0; i < bios->bdcb.dcb.entries; i++) { + for (i = 0; i < bios->dcb.entries; i++) { nouveau_bios_run_display_table(dev, - &bios->bdcb.dcb.entry[i], + &bios->dcb.entry[i], 0, 0); } } @@ -5962,11 +6027,11 @@ static void nouveau_bios_i2c_devices_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; struct dcb_i2c_entry *entry; int i; - entry = &bios->bdcb.dcb.i2c[0]; + entry = &bios->dcb.i2c[0]; for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++) nouveau_i2c_fini(dev, entry); } @@ -5975,13 +6040,11 @@ int nouveau_bios_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint32_t saved_nv_pextdev_boot_0; bool was_locked; int ret; - dev_priv->vbios = &bios->pub; - if (!NVInitVBIOS(dev)) return -ENODEV; @@ -6023,10 +6086,8 @@ nouveau_bios_init(struct drm_device *dev) bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0); ret = nouveau_run_vbios_init(dev); - if (ret) { - dev_priv->vbios = NULL; + if (ret) return ret; - } /* feature_byte on BMP is poor, but init always sets CR4B */ was_locked = NVLockVgaCrtcs(dev, false); diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index fd94bd6dc26..9f688aa9a65 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h @@ -34,9 +34,67 @@ #define DCB_LOC_ON_CHIP 0 +struct dcb_i2c_entry { + uint8_t port_type; + uint8_t read, write; + struct nouveau_i2c_chan *chan; +}; + +enum dcb_gpio_tag { + DCB_GPIO_TVDAC0 = 0xc, + DCB_GPIO_TVDAC1 = 0x2d, +}; + +struct dcb_gpio_entry { + enum dcb_gpio_tag tag; + int line; + bool invert; +}; + +struct dcb_gpio_table { + int entries; + struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES]; +}; + +enum dcb_connector_type { + DCB_CONNECTOR_VGA = 0x00, + DCB_CONNECTOR_TV_0 = 0x10, + DCB_CONNECTOR_TV_1 = 0x11, + DCB_CONNECTOR_TV_3 = 0x13, + DCB_CONNECTOR_DVI_I = 0x30, + DCB_CONNECTOR_DVI_D = 0x31, + DCB_CONNECTOR_LVDS = 0x40, + DCB_CONNECTOR_DP = 0x46, + DCB_CONNECTOR_eDP = 0x47, + DCB_CONNECTOR_HDMI_0 = 0x60, + DCB_CONNECTOR_HDMI_1 = 0x61, + DCB_CONNECTOR_NONE = 0xff +}; + +struct dcb_connector_table_entry { + uint32_t entry; + enum dcb_connector_type type; + uint8_t index; + uint8_t gpio_tag; +}; + +struct dcb_connector_table { + int entries; + struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES]; +}; + +enum dcb_type { + OUTPUT_ANALOG = 0, + OUTPUT_TV = 1, + OUTPUT_TMDS = 2, + OUTPUT_LVDS = 3, + OUTPUT_DP = 6, + OUTPUT_ANY = -1 +}; + struct dcb_entry { int index; /* may not be raw dcb index if merging has happened */ - uint8_t type; + enum dcb_type type; uint8_t i2c_index; uint8_t heads; uint8_t connector; @@ -71,69 +129,22 @@ struct dcb_entry { bool i2c_upper_default; }; -struct dcb_i2c_entry { - uint8_t port_type; - uint8_t read, write; - struct nouveau_i2c_chan *chan; -}; +struct dcb_table { + uint8_t version; -struct parsed_dcb { int entries; struct dcb_entry entry[DCB_MAX_NUM_ENTRIES]; - struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES]; -}; - -enum dcb_gpio_tag { - DCB_GPIO_TVDAC0 = 0xc, - DCB_GPIO_TVDAC1 = 0x2d, -}; - -struct dcb_gpio_entry { - enum dcb_gpio_tag tag; - int line; - bool invert; -}; - -struct parsed_dcb_gpio { - int entries; - struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES]; -}; - -struct dcb_connector_table_entry { - uint32_t entry; - uint8_t type; - uint8_t index; - uint8_t gpio_tag; -}; - -struct dcb_connector_table { - int entries; - struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES]; -}; - -struct bios_parsed_dcb { - uint8_t version; - - struct parsed_dcb dcb; uint8_t *i2c_table; uint8_t i2c_default_indices; + struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES]; uint16_t gpio_table_ptr; - struct parsed_dcb_gpio gpio; + struct dcb_gpio_table gpio; uint16_t connector_table_ptr; struct dcb_connector_table connector; }; -enum nouveau_encoder_type { - OUTPUT_ANALOG = 0, - OUTPUT_TV = 1, - OUTPUT_TMDS = 2, - OUTPUT_LVDS = 3, - OUTPUT_DP = 6, - OUTPUT_ANY = -1 -}; - enum nouveau_or { OUTPUT_A = (1 << 0), OUTPUT_B = (1 << 1), @@ -190,8 +201,8 @@ struct pll_lims { int refclk; }; -struct nouveau_bios_info { - struct parsed_dcb *dcb; +struct nvbios { + struct drm_device *dev; uint8_t chip_version; @@ -199,11 +210,6 @@ struct nouveau_bios_info { uint32_t tvdactestval; uint8_t digital_min_front_porch; bool fp_no_ddc; -}; - -struct nvbios { - struct drm_device *dev; - struct nouveau_bios_info pub; struct mutex lock; @@ -234,7 +240,7 @@ struct nvbios { uint16_t some_script_ptr; /* BIT I + 14 */ uint16_t init96_tbl_ptr; /* BIT I + 16 */ - struct bios_parsed_dcb bdcb; + struct dcb_table dcb; struct { int crtchead; diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c index ee2b84504d0..88f9bc0941e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_calc.c +++ b/drivers/gpu/drm/nouveau/nouveau_calc.c @@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk, * returns calculated clock */ struct drm_nouveau_private *dev_priv = dev->dev_private; - int cv = dev_priv->vbios->chip_version; + int cv = dev_priv->vbios.chip_version; int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq; int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m; int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n; @@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk, * returns calculated clock */ struct drm_nouveau_private *dev_priv = dev->dev_private; - int chip_version = dev_priv->vbios->chip_version; + int chip_version = dev_priv->vbios.chip_version; int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq; int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq; int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq; diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c index 2281f99da7f..6dfb425cbae 100644 --- a/drivers/gpu/drm/nouveau/nouveau_channel.c +++ b/drivers/gpu/drm/nouveau/nouveau_channel.c @@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *pb = chan->pushbuf_bo; struct nouveau_gpuobj *pushbuf = NULL; - uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT; int ret; + if (dev_priv->card_type >= NV_50) { + ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, + dev_priv->vm_end, NV_DMA_ACCESS_RO, + NV_DMA_TARGET_AGP, &pushbuf); + chan->pushbuf_base = pb->bo.offset; + } else if (pb->bo.mem.mem_type == TTM_PL_TT) { ret = nouveau_gpuobj_gart_dma_new(chan, 0, dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RO, &pushbuf, NULL); - chan->pushbuf_base = start; + chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM, &pushbuf); - chan->pushbuf_base = start; + chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in @@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) dev_priv->fb_available_size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI, &pushbuf); - chan->pushbuf_base = start; + chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT; } ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf); @@ -275,9 +280,18 @@ nouveau_channel_free(struct nouveau_channel *chan) */ nouveau_fence_fini(chan); - /* Ensure the channel is no longer active on the GPU */ + /* This will prevent pfifo from switching channels. */ pfifo->reassign(dev, false); + /* We want to give pgraph a chance to idle and get rid of all potential + * errors. We need to do this before the lock, otherwise the irq handler + * is unable to process them. + */ + if (pgraph->channel(dev) == chan) + nouveau_wait_for_idle(dev); + + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + pgraph->fifo_access(dev, false); if (pgraph->channel(dev) == chan) pgraph->unload_context(dev); @@ -293,6 +307,8 @@ nouveau_channel_free(struct nouveau_channel *chan) pfifo->reassign(dev, true); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + /* Release the channel's resources */ nouveau_gpuobj_ref_del(dev, &chan->pushbuf); if (chan->pushbuf_bo) { @@ -369,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, return ret; init->channel = chan->id; + if (chan->dma.ib_max) + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | + NOUVEAU_GEM_DOMAIN_GART; + else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; + else + init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; + init->subchan[0].handle = NvM2MF; if (dev_priv->card_type < NV_50) init->subchan[0].grclass = 0x0039; @@ -408,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { - DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH), @@ -418,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = { DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH), DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH), - DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index d2f63353ea9..24327f468c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, connector->interlace_allowed = true; } - if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { + if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { drm_connector_property_set_value(connector, dev->mode_config.dvi_i_subconnector_property, nv_encoder->dcb->type == OUTPUT_TMDS ? @@ -236,15 +236,17 @@ nouveau_connector_detect(struct drm_connector *connector) struct nouveau_i2c_chan *i2c; int type, flags; - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS) nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS); if (nv_encoder && nv_connector->native_mode) { + unsigned status = connector_status_connected; + #ifdef CONFIG_ACPI if (!nouveau_ignorelid && !acpi_lid_open()) - return connector_status_disconnected; + status = connector_status_unknown; #endif nouveau_connector_set_encoder(connector, nv_encoder); - return connector_status_connected; + return status; } /* Cleanup the previous EDID block. */ @@ -279,7 +281,7 @@ nouveau_connector_detect(struct drm_connector *connector) * same i2c channel so the value returned from ddc_detect * isn't necessarily correct. */ - if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { + if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL) type = OUTPUT_TMDS; else @@ -321,11 +323,11 @@ detect_analog: static void nouveau_connector_force(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; + struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder; int type; - if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) { + if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) { if (connector->force == DRM_FORCE_ON_DIGITAL) type = OUTPUT_TMDS; else @@ -335,7 +337,7 @@ nouveau_connector_force(struct drm_connector *connector) nv_encoder = find_encoder_by_type(connector, type); if (!nv_encoder) { - NV_ERROR(dev, "can't find encoder to force %s on!\n", + NV_ERROR(connector->dev, "can't find encoder to force %s on!\n", drm_get_connector_name(connector)); connector->status = connector_status_disconnected; return; @@ -369,7 +371,7 @@ nouveau_connector_set_property(struct drm_connector *connector, } /* LVDS always needs gpu scaling */ - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS && + if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS && value == DRM_MODE_SCALE_NONE) return -EINVAL; @@ -535,7 +537,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* If we're not LVDS, destroy the previous native mode, the attached * monitor could have changed. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && + if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS && nv_connector->native_mode) { drm_mode_destroy(dev, nv_connector->native_mode); nv_connector->native_mode = NULL; @@ -563,7 +565,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) ret = get_slave_funcs(nv_encoder)-> get_modes(to_drm_encoder(nv_encoder), connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + if (nv_encoder->dcb->type == OUTPUT_LVDS) ret += nouveau_connector_scaler_modes_add(connector); return ret; @@ -613,6 +615,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector, clock *= 3; break; + default: + BUG_ON(1); + return MODE_BAD; } if (clock < min_clock) @@ -680,7 +685,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, /* Firstly try getting EDID over DDC, if allowed and I2C channel * is available. */ - if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) + if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf) i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index); if (i2c) { @@ -695,7 +700,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, */ if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) && (nv_encoder->dcb->lvdsconf.use_straps_for_mode || - dev_priv->VBIOS.pub.fp_no_ddc)) { + dev_priv->vbios.fp_no_ddc)) { nv_connector->native_mode = drm_mode_duplicate(dev, &native); goto out; } @@ -704,7 +709,7 @@ nouveau_connector_create_lvds(struct drm_device *dev, * stored for the panel stored in them. */ if (!nv_connector->edid && !nv_connector->native_mode && - !dev_priv->VBIOS.pub.fp_no_ddc) { + !dev_priv->vbios.fp_no_ddc) { struct edid *edid = (struct edid *)nouveau_bios_embedded_edid(dev); if (edid) { @@ -739,46 +744,66 @@ out: } int -nouveau_connector_create(struct drm_device *dev, int index, int type) +nouveau_connector_create(struct drm_device *dev, + struct dcb_connector_table_entry *dcb) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_connector *nv_connector = NULL; struct drm_connector *connector; struct drm_encoder *encoder; - int ret; + int ret, type; NV_DEBUG_KMS(dev, "\n"); - nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); - if (!nv_connector) - return -ENOMEM; - nv_connector->dcb = nouveau_bios_connector_entry(dev, index); - connector = &nv_connector->base; - - switch (type) { - case DRM_MODE_CONNECTOR_VGA: + switch (dcb->type) { + case DCB_CONNECTOR_NONE: + return 0; + case DCB_CONNECTOR_VGA: NV_INFO(dev, "Detected a VGA connector\n"); + type = DRM_MODE_CONNECTOR_VGA; break; - case DRM_MODE_CONNECTOR_DVID: - NV_INFO(dev, "Detected a DVI-D connector\n"); + case DCB_CONNECTOR_TV_0: + case DCB_CONNECTOR_TV_1: + case DCB_CONNECTOR_TV_3: + NV_INFO(dev, "Detected a TV connector\n"); + type = DRM_MODE_CONNECTOR_TV; break; - case DRM_MODE_CONNECTOR_DVII: + case DCB_CONNECTOR_DVI_I: NV_INFO(dev, "Detected a DVI-I connector\n"); + type = DRM_MODE_CONNECTOR_DVII; break; - case DRM_MODE_CONNECTOR_LVDS: - NV_INFO(dev, "Detected a LVDS connector\n"); + case DCB_CONNECTOR_DVI_D: + NV_INFO(dev, "Detected a DVI-D connector\n"); + type = DRM_MODE_CONNECTOR_DVID; break; - case DRM_MODE_CONNECTOR_TV: - NV_INFO(dev, "Detected a TV connector\n"); + case DCB_CONNECTOR_HDMI_0: + case DCB_CONNECTOR_HDMI_1: + NV_INFO(dev, "Detected a HDMI connector\n"); + type = DRM_MODE_CONNECTOR_HDMIA; + break; + case DCB_CONNECTOR_LVDS: + NV_INFO(dev, "Detected a LVDS connector\n"); + type = DRM_MODE_CONNECTOR_LVDS; break; - case DRM_MODE_CONNECTOR_DisplayPort: + case DCB_CONNECTOR_DP: NV_INFO(dev, "Detected a DisplayPort connector\n"); + type = DRM_MODE_CONNECTOR_DisplayPort; break; - default: - NV_ERROR(dev, "Unknown connector, this is not good.\n"); + case DCB_CONNECTOR_eDP: + NV_INFO(dev, "Detected an eDP connector\n"); + type = DRM_MODE_CONNECTOR_eDP; break; + default: + NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type); + return -EINVAL; } + nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); + if (!nv_connector) + return -ENOMEM; + nv_connector->dcb = dcb; + connector = &nv_connector->base; + /* defaults, will get overridden in detect() */ connector->interlace_allowed = false; connector->doublescan_allowed = false; @@ -786,55 +811,65 @@ nouveau_connector_create(struct drm_device *dev, int index, int type) drm_connector_init(dev, connector, &nouveau_connector_funcs, type); drm_connector_helper_add(connector, &nouveau_connector_helper_funcs); + /* attach encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); + + if (nv_encoder->dcb->connector != dcb->index) + continue; + + if (get_slave_funcs(nv_encoder)) + get_slave_funcs(nv_encoder)->create_resources(encoder, connector); + + drm_mode_connector_attach_encoder(connector, encoder); + } + + if (!connector->encoder_ids[0]) { + NV_WARN(dev, " no encoders, ignoring\n"); + drm_connector_cleanup(connector); + kfree(connector); + return 0; + } + /* Init DVI-I specific properties */ - if (type == DRM_MODE_CONNECTOR_DVII) { + if (dcb->type == DCB_CONNECTOR_DVI_I) { drm_mode_create_dvi_i_properties(dev); drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0); drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0); } - if (type != DRM_MODE_CONNECTOR_LVDS) + if (dcb->type != DCB_CONNECTOR_LVDS) nv_connector->use_dithering = false; - if (type == DRM_MODE_CONNECTOR_DVID || - type == DRM_MODE_CONNECTOR_DVII || - type == DRM_MODE_CONNECTOR_LVDS || - type == DRM_MODE_CONNECTOR_DisplayPort) { - nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; - - drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, - nv_connector->scaling_mode); - drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property, - nv_connector->use_dithering ? DRM_MODE_DITHERING_ON - : DRM_MODE_DITHERING_OFF); - - } else { - nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; - - if (type == DRM_MODE_CONNECTOR_VGA && - dev_priv->card_type >= NV_50) { + switch (dcb->type) { + case DCB_CONNECTOR_VGA: + if (dev_priv->card_type >= NV_50) { drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, nv_connector->scaling_mode); } - } - - /* attach encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - - if (nv_encoder->dcb->connector != index) - continue; - - if (get_slave_funcs(nv_encoder)) - get_slave_funcs(nv_encoder)->create_resources(encoder, connector); + /* fall-through */ + case DCB_CONNECTOR_TV_0: + case DCB_CONNECTOR_TV_1: + case DCB_CONNECTOR_TV_3: + nv_connector->scaling_mode = DRM_MODE_SCALE_NONE; + break; + default: + nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN; - drm_mode_connector_attach_encoder(connector, encoder); + drm_connector_attach_property(connector, + dev->mode_config.scaling_mode_property, + nv_connector->scaling_mode); + drm_connector_attach_property(connector, + dev->mode_config.dithering_mode_property, + nv_connector->use_dithering ? + DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF); + break; } drm_sysfs_connector_add(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + if (dcb->type == DCB_CONNECTOR_LVDS) { ret = nouveau_connector_create_lvds(dev, connector); if (ret) { connector->funcs->destroy(connector); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index 728b8090e5f..4ef38abc2d9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector( return container_of(con, struct nouveau_connector, base); } -int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type); +int nouveau_connector_create(struct drm_device *, + struct dcb_connector_table_entry *); #endif /* __NOUVEAU_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index d79db3698f1..8ff9ef5d4b4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -47,12 +47,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data) seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2); seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2); seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2); + if (chan->dma.ib_max) { + seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max); + seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put); + seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free); + } seq_printf(m, "gpu fifo state:\n"); seq_printf(m, " get: 0x%08x\n", nvchan_rd32(chan, chan->user_get)); seq_printf(m, " put: 0x%08x\n", nvchan_rd32(chan, chan->user_put)); + if (chan->dma.ib_max) { + seq_printf(m, " ib get: 0x%08x\n", + nvchan_rd32(chan, 0x88)); + seq_printf(m, " ib put: 0x%08x\n", + nvchan_rd32(chan, 0x8c)); + } seq_printf(m, "last fence : %d\n", chan->fence.sequence); seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack); @@ -133,9 +144,22 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data) return 0; } +static int +nouveau_debugfs_vbios_image(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private; + int i; + + for (i = 0; i < dev_priv->vbios.length; i++) + seq_printf(m, "%c", dev_priv->vbios.data[i]); + return 0; +} + static struct drm_info_list nouveau_debugfs_list[] = { { "chipset", nouveau_debugfs_chipset_info, 0, NULL }, { "memory", nouveau_debugfs_memory_info, 0, NULL }, + { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL }, }; #define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list) diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index dfc94391d71..cf1c5c0a0ab 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb) if (drm_fb->fbdev) nouveau_fbcon_remove(dev, drm_fb); - if (fb->nvbo) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(fb->nvbo->gem); - mutex_unlock(&dev->struct_mutex); - } + if (fb->nvbo) + drm_gem_object_unreference_unlocked(fb->nvbo->gem); drm_framebuffer_cleanup(drm_fb); kfree(fb); diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 50d9e67745a..c8482a108a7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c @@ -32,7 +32,22 @@ void nouveau_dma_pre_init(struct nouveau_channel *chan) { - chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2; + struct drm_nouveau_private *dev_priv = chan->dev->dev_private; + struct nouveau_bo *pushbuf = chan->pushbuf_bo; + + if (dev_priv->card_type == NV_50) { + const int ib_size = pushbuf->bo.mem.size / 2; + + chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2; + chan->dma.ib_max = (ib_size / 8) - 1; + chan->dma.ib_put = 0; + chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; + + chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; + } else { + chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2; + } + chan->dma.put = 0; chan->dma.cur = chan->dma.put; chan->dma.free = chan->dma.max - chan->dma.cur; @@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout) return (val - chan->pushbuf_base) >> 2; } +void +nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, + int delta, int length) +{ + struct nouveau_bo *pb = chan->pushbuf_bo; + uint64_t offset = bo->bo.offset + delta; + int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; + + BUG_ON(chan->dma.ib_free < 1); + nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); + nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); + + chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; + nvchan_wr32(chan, 0x8c, chan->dma.ib_put); + chan->dma.ib_free--; +} + +static int +nv50_dma_push_wait(struct nouveau_channel *chan, int count) +{ + uint32_t cnt = 0, prev_get = 0; + + while (chan->dma.ib_free < count) { + uint32_t get = nvchan_rd32(chan, 0x88); + if (get != prev_get) { + prev_get = get; + cnt = 0; + } + + if ((++cnt & 0xff) == 0) { + DRM_UDELAY(1); + if (cnt > 100000) + return -EBUSY; + } + + chan->dma.ib_free = get - chan->dma.ib_put; + if (chan->dma.ib_free <= 0) + chan->dma.ib_free += chan->dma.ib_max + 1; + } + + return 0; +} + +static int +nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) +{ + uint32_t cnt = 0, prev_get = 0; + int ret; + + ret = nv50_dma_push_wait(chan, slots + 1); + if (unlikely(ret)) + return ret; + + while (chan->dma.free < count) { + int get = READ_GET(chan, &prev_get, &cnt); + if (unlikely(get < 0)) { + if (get == -EINVAL) + continue; + + return get; + } + + if (get <= chan->dma.cur) { + chan->dma.free = chan->dma.max - chan->dma.cur; + if (chan->dma.free >= count) + break; + + FIRE_RING(chan); + do { + get = READ_GET(chan, &prev_get, &cnt); + if (unlikely(get < 0)) { + if (get == -EINVAL) + continue; + return get; + } + } while (get == 0); + chan->dma.cur = 0; + chan->dma.put = 0; + } + + chan->dma.free = get - chan->dma.cur - 1; + } + + return 0; +} + int -nouveau_dma_wait(struct nouveau_channel *chan, int size) +nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) { uint32_t prev_get = 0, cnt = 0; int get; + if (chan->dma.ib_max) + return nv50_dma_wait(chan, slots, size); + while (chan->dma.free < size) { get = READ_GET(chan, &prev_get, &cnt); if (unlikely(get == -EBUSY)) diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h index dabfd655f93..8b05c15866d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.h +++ b/drivers/gpu/drm/nouveau/nouveau_dma.h @@ -31,6 +31,9 @@ #define NOUVEAU_DMA_DEBUG 0 #endif +void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *, + int delta, int length); + /* * There's a hw race condition where you can't jump to your PUT offset, * to avoid this we jump to offset + SKIPS and fill the difference with @@ -96,13 +99,11 @@ enum { static __must_check inline int RING_SPACE(struct nouveau_channel *chan, int size) { - if (chan->dma.free < size) { - int ret; + int ret; - ret = nouveau_dma_wait(chan, size); - if (ret) - return ret; - } + ret = nouveau_dma_wait(chan, 1, size); + if (ret) + return ret; chan->dma.free -= size; return 0; @@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan) return; chan->accel_done = true; - WRITE_PUT(chan->dma.cur); + if (chan->dma.ib_max) { + nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2, + (chan->dma.cur - chan->dma.put) << 2); + } else { + WRITE_PUT(chan->dma.cur); + } + chan->dma.put = chan->dma.cur; } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index da3b93b8450..30cc09e8a70 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c @@ -75,11 +75,11 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status"); int nouveau_ignorelid = 0; module_param_named(ignorelid, nouveau_ignorelid, int, 0400); -MODULE_PARM_DESC(noagp, "Disable all acceleration"); +MODULE_PARM_DESC(noaccel, "Disable all acceleration"); int nouveau_noaccel = 0; module_param_named(noaccel, nouveau_noaccel, int, 0400); -MODULE_PARM_DESC(noagp, "Disable fbcon acceleration"); +MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration"); int nouveau_nofbaccel = 0; module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400); @@ -135,7 +135,7 @@ nouveau_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } -static int +int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -233,7 +233,7 @@ out_abort: return ret; } -static int +int nouveau_pci_resume(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); @@ -402,8 +402,10 @@ static int __init nouveau_init(void) nouveau_modeset = 1; } - if (nouveau_modeset == 1) + if (nouveau_modeset == 1) { driver.driver_features |= DRIVER_MODESET; + nouveau_register_dsm_handler(); + } return drm_init(&driver); } @@ -411,6 +413,7 @@ static int __init nouveau_init(void) static void __exit nouveau_exit(void) { drm_exit(&driver); + nouveau_unregister_dsm_handler(); } module_init(nouveau_init); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 1c15ef37b71..5f8d987af36 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -34,7 +34,7 @@ #define DRIVER_MAJOR 0 #define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 15 +#define DRIVER_PATCHLEVEL 16 #define NOUVEAU_FAMILY 0x0000FFFF #define NOUVEAU_FLAGS 0xFFFF0000 @@ -83,6 +83,7 @@ struct nouveau_bo { struct drm_file *reserved_by; struct list_head entry; int pbbo_index; + bool validate_mapped; struct nouveau_channel *channel; @@ -239,6 +240,11 @@ struct nouveau_channel { int cur; int put; /* access via pushbuf_bo */ + + int ib_base; + int ib_max; + int ib_free; + int ib_put; } dma; uint32_t sw_subchannel[8]; @@ -533,6 +539,9 @@ struct drm_nouveau_private { struct nouveau_engine engine; struct nouveau_channel *channel; + /* For PFIFO and PGRAPH. */ + spinlock_t context_switch_lock; + /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ struct nouveau_gpuobj *ramht; uint32_t ramin_rsvd_vram; @@ -596,8 +605,7 @@ struct drm_nouveau_private { struct list_head gpuobj_list; - struct nvbios VBIOS; - struct nouveau_bios_info *vbios; + struct nvbios vbios; struct nv04_mode_state mode_reg; struct nv04_mode_state saved_reg; @@ -614,7 +622,6 @@ struct drm_nouveau_private { } susres; struct backlight_device *backlight; - bool acpi_dsm; struct nouveau_channel *evo; @@ -682,6 +689,9 @@ extern int nouveau_ignorelid; extern int nouveau_nofbaccel; extern int nouveau_noaccel; +extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state); +extern int nouveau_pci_resume(struct pci_dev *pdev); + /* nouveau_state.c */ extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); extern int nouveau_load(struct drm_device *, unsigned long flags); @@ -696,12 +706,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout, uint32_t reg, uint32_t mask, uint32_t val); extern bool nouveau_wait_for_idle(struct drm_device *); extern int nouveau_card_init(struct drm_device *); -extern int nouveau_ioctl_card_init(struct drm_device *, void *data, - struct drm_file *); -extern int nouveau_ioctl_suspend(struct drm_device *, void *data, - struct drm_file *); -extern int nouveau_ioctl_resume(struct drm_device *, void *data, - struct drm_file *); /* nouveau_mem.c */ extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start, @@ -845,21 +849,15 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan) /* nouveau_dma.c */ extern void nouveau_dma_pre_init(struct nouveau_channel *); extern int nouveau_dma_init(struct nouveau_channel *); -extern int nouveau_dma_wait(struct nouveau_channel *, int size); +extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size); /* nouveau_acpi.c */ -#ifdef CONFIG_ACPI -extern int nouveau_hybrid_setup(struct drm_device *dev); -extern bool nouveau_dsm_probe(struct drm_device *dev); +#if defined(CONFIG_ACPI) +void nouveau_register_dsm_handler(void); +void nouveau_unregister_dsm_handler(void); #else -static inline int nouveau_hybrid_setup(struct drm_device *dev) -{ - return 0; -} -static inline bool nouveau_dsm_probe(struct drm_device *dev) -{ - return false; -} +static inline void nouveau_register_dsm_handler(void) {} +static inline void nouveau_unregister_dsm_handler(void) {} #endif /* nouveau_backlight.c */ @@ -1027,6 +1025,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *); extern int nv50_graph_load_context(struct nouveau_channel *); extern int nv50_graph_unload_context(struct drm_device *); extern void nv50_graph_context_switch(struct drm_device *); +extern int nv50_grctx_init(struct nouveau_grctx *); /* nouveau_grctx.c */ extern int nouveau_grctx_prog_load(struct drm_device *); @@ -1152,16 +1151,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *, struct drm_file *); extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *, struct drm_file *); -extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *, - struct drm_file *); -extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *, - struct drm_file *); -extern int nouveau_gem_ioctl_pin(struct drm_device *, void *, - struct drm_file *); -extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *, - struct drm_file *); -extern int nouveau_gem_ioctl_tile(struct drm_device *, void *, - struct drm_file *); extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *, struct drm_file *); extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *, diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index ea879a2efef..68cedd9194f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -36,6 +36,7 @@ #include <linux/fb.h> #include <linux/init.h> #include <linux/screen_info.h> +#include <linux/vga_switcheroo.h> #include "drmP.h" #include "drm.h" @@ -370,6 +371,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, nvbo->bo.offset, nvbo); mutex_unlock(&dev->struct_mutex); + vga_switcheroo_client_fb_set(dev->pdev, info); return 0; out_unref: @@ -401,10 +403,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb) unregister_framebuffer(info); nouveau_bo_unmap(nouveau_fb->nvbo); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(nouveau_fb->nvbo->gem); + drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); nouveau_fb->nvbo = NULL; - mutex_unlock(&dev->struct_mutex); if (par) drm_fb_helper_free(&par->helper); framebuffer_release(info); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 70cc30803e3..0d22f66f1c7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(nvbo->gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(nvbo->gem); if (ret) - drm_gem_object_unreference(nvbo->gem); + drm_gem_object_unreference_unlocked(nvbo->gem); return ret; } @@ -243,6 +241,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) nouveau_fence_unref((void *)&prev_fence); } + if (unlikely(nvbo->validate_mapped)) { + ttm_bo_kunmap(&nvbo->kmap); + nvbo->validate_mapped = false; + } + list_del(&nvbo->entry); nvbo->reserved_by = NULL; ttm_bo_unreserve(&nvbo->bo); @@ -302,11 +305,14 @@ retry: if (ret == -EAGAIN) ret = ttm_bo_wait_unreserved(&nvbo->bo, false); drm_gem_object_unreference(gem); - if (ret) + if (ret) { + NV_ERROR(dev, "fail reserve\n"); return ret; + } goto retry; } + b->user_priv = (uint64_t)(unsigned long)nvbo; nvbo->reserved_by = file_priv; nvbo->pbbo_index = i; if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && @@ -336,8 +342,10 @@ retry: } ret = ttm_bo_wait_cpu(&nvbo->bo, false); - if (ret) + if (ret) { + NV_ERROR(dev, "fail wait_cpu\n"); return ret; + } goto retry; } } @@ -351,6 +359,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, { struct drm_nouveau_gem_pushbuf_bo __user *upbbo = (void __force __user *)(uintptr_t)user_pbbo_ptr; + struct drm_device *dev = chan->dev; struct nouveau_bo *nvbo; int ret, relocs = 0; @@ -362,39 +371,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, spin_lock(&nvbo->bo.lock); ret = ttm_bo_wait(&nvbo->bo, false, false, false); spin_unlock(&nvbo->bo.lock); - if (unlikely(ret)) + if (unlikely(ret)) { + NV_ERROR(dev, "fail wait other chan\n"); return ret; + } } ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, b->write_domains, b->valid_domains); - if (unlikely(ret)) + if (unlikely(ret)) { + NV_ERROR(dev, "fail set_domain\n"); return ret; + } nvbo->channel = chan; ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, false, false); nvbo->channel = NULL; - if (unlikely(ret)) + if (unlikely(ret)) { + NV_ERROR(dev, "fail ttm_validate\n"); return ret; + } - if (nvbo->bo.offset == b->presumed_offset && + if (nvbo->bo.offset == b->presumed.offset && ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && - b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || + b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || (nvbo->bo.mem.mem_type == TTM_PL_TT && - b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) + b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) continue; if (nvbo->bo.mem.mem_type == TTM_PL_TT) - b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; + b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; else - b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; - b->presumed_offset = nvbo->bo.offset; - b->presumed_ok = 0; + b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; + b->presumed.offset = nvbo->bo.offset; + b->presumed.valid = 0; relocs++; - if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) + if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, + &b->presumed, sizeof(b->presumed))) return -EFAULT; } @@ -408,6 +424,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, uint64_t user_buffers, int nr_buffers, struct validate_op *op, int *apply_relocs) { + struct drm_device *dev = chan->dev; int ret, relocs = 0; INIT_LIST_HEAD(&op->vram_list); @@ -418,11 +435,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, return 0; ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); - if (unlikely(ret)) + if (unlikely(ret)) { + NV_ERROR(dev, "validate_init\n"); return ret; + } ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); if (unlikely(ret < 0)) { + NV_ERROR(dev, "validate vram_list\n"); validate_fini(op, NULL); return ret; } @@ -430,6 +450,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); if (unlikely(ret < 0)) { + NV_ERROR(dev, "validate gart_list\n"); validate_fini(op, NULL); return ret; } @@ -437,6 +458,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_list(chan, &op->both_list, pbbo, user_buffers); if (unlikely(ret < 0)) { + NV_ERROR(dev, "validate both_list\n"); validate_fini(op, NULL); return ret; } @@ -465,59 +487,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) } static int -nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, - struct drm_nouveau_gem_pushbuf_bo *bo, - unsigned nr_relocs, uint64_t ptr_relocs, - unsigned nr_dwords, unsigned first_dword, - uint32_t *pushbuf, bool is_iomem) +nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, + struct drm_nouveau_gem_pushbuf *req, + struct drm_nouveau_gem_pushbuf_bo *bo) { struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; - struct drm_device *dev = chan->dev; int ret = 0; unsigned i; - reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); + reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); if (IS_ERR(reloc)) return PTR_ERR(reloc); - for (i = 0; i < nr_relocs; i++) { + for (i = 0; i < req->nr_relocs; i++) { struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; struct drm_nouveau_gem_pushbuf_bo *b; + struct nouveau_bo *nvbo; uint32_t data; - if (r->bo_index >= nr_bo || r->reloc_index < first_dword || - r->reloc_index >= first_dword + nr_dwords) { - NV_ERROR(dev, "Bad relocation %d\n", i); - NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo); - NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords); + if (unlikely(r->bo_index > req->nr_buffers)) { + NV_ERROR(dev, "reloc bo index invalid\n"); ret = -EINVAL; break; } b = &bo[r->bo_index]; - if (b->presumed_ok) + if (b->presumed.valid) continue; + if (unlikely(r->reloc_bo_index > req->nr_buffers)) { + NV_ERROR(dev, "reloc container bo index invalid\n"); + ret = -EINVAL; + break; + } + nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; + + if (unlikely(r->reloc_bo_offset + 4 > + nvbo->bo.mem.num_pages << PAGE_SHIFT)) { + NV_ERROR(dev, "reloc outside of bo\n"); + ret = -EINVAL; + break; + } + + if (!nvbo->kmap.virtual) { + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, + &nvbo->kmap); + if (ret) { + NV_ERROR(dev, "failed kmap for reloc\n"); + break; + } + nvbo->validate_mapped = true; + } + if (r->flags & NOUVEAU_GEM_RELOC_LOW) - data = b->presumed_offset + r->data; + data = b->presumed.offset + r->data; else if (r->flags & NOUVEAU_GEM_RELOC_HIGH) - data = (b->presumed_offset + r->data) >> 32; + data = (b->presumed.offset + r->data) >> 32; else data = r->data; if (r->flags & NOUVEAU_GEM_RELOC_OR) { - if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) + if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) data |= r->tor; else data |= r->vor; } - if (is_iomem) - iowrite32_native(data, (void __force __iomem *) - &pushbuf[r->reloc_index]); - else - pushbuf[r->reloc_index] = data; + spin_lock(&nvbo->bo.lock); + ret = ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.lock); + if (ret) { + NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret); + break; + } + + nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } kfree(reloc); @@ -528,127 +573,50 @@ int nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_gem_pushbuf *req = data; - struct drm_nouveau_gem_pushbuf_bo *bo = NULL; + struct drm_nouveau_gem_pushbuf_push *push; + struct drm_nouveau_gem_pushbuf_bo *bo; struct nouveau_channel *chan; struct validate_op op; - struct nouveau_fence* fence = 0; - uint32_t *pushbuf = NULL; - int ret = 0, do_reloc = 0, i; + struct nouveau_fence *fence = 0; + int i, j, ret = 0, do_reloc = 0; NOUVEAU_CHECK_INITIALISED_WITH_RETURN; NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); - if (req->nr_dwords >= chan->dma.max || - req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || - req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { - NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); - NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords, - chan->dma.max - 1); - NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, - NOUVEAU_GEM_MAX_BUFFERS); - NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, - NOUVEAU_GEM_MAX_RELOCS); - return -EINVAL; - } - - pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t)); - if (IS_ERR(pushbuf)) - return PTR_ERR(pushbuf); - - bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); - if (IS_ERR(bo)) { - kfree(pushbuf); - return PTR_ERR(bo); - } - - mutex_lock(&dev->struct_mutex); - - /* Validate buffer list */ - ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, - req->nr_buffers, &op, &do_reloc); - if (ret) - goto out; - - /* Apply any relocations that are required */ - if (do_reloc) { - ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, - bo, req->nr_relocs, - req->relocs, - req->nr_dwords, 0, - pushbuf, false); - if (ret) - goto out; - } - - /* Emit push buffer to the hw - */ - ret = RING_SPACE(chan, req->nr_dwords); - if (ret) - goto out; - - OUT_RINGp(chan, pushbuf, req->nr_dwords); + req->vram_available = dev_priv->fb_aper_free; + req->gart_available = dev_priv->gart_info.aper_free; + if (unlikely(req->nr_push == 0)) + goto out_next; - ret = nouveau_fence_new(chan, &fence, true); - if (ret) { - NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); - WIND_RING(chan); - goto out; + if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { + NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n", + req->nr_push, NOUVEAU_GEM_MAX_PUSH); + return -EINVAL; } - if (nouveau_gem_pushbuf_sync(chan)) { - ret = nouveau_fence_wait(fence, NULL, false, false); - if (ret) { - for (i = 0; i < req->nr_dwords; i++) - NV_ERROR(dev, "0x%08x\n", pushbuf[i]); - NV_ERROR(dev, "^^ above push buffer is fail :(\n"); - } + if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { + NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n", + req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); + return -EINVAL; } -out: - validate_fini(&op, fence); - nouveau_fence_unref((void**)&fence); - mutex_unlock(&dev->struct_mutex); - kfree(pushbuf); - kfree(bo); - return ret; -} - -#define PUSHBUF_CAL (dev_priv->card_type >= NV_20) - -int -nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_gem_pushbuf_call *req = data; - struct drm_nouveau_gem_pushbuf_bo *bo = NULL; - struct nouveau_channel *chan; - struct drm_gem_object *gem; - struct nouveau_bo *pbbo; - struct validate_op op; - struct nouveau_fence* fence = 0; - int i, ret = 0, do_reloc = 0; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); - - if (unlikely(req->handle == 0)) - goto out_next; - - if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || - req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { - NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); - NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, - NOUVEAU_GEM_MAX_BUFFERS); - NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, - NOUVEAU_GEM_MAX_RELOCS); + if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { + NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n", + req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); return -EINVAL; } + push = u_memcpya(req->push, req->nr_push, sizeof(*push)); + if (IS_ERR(push)) + return PTR_ERR(push); + bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); - if (IS_ERR(bo)) + if (IS_ERR(bo)) { + kfree(push); return PTR_ERR(bo); + } mutex_lock(&dev->struct_mutex); @@ -660,122 +628,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, goto out; } - /* Validate DMA push buffer */ - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) { - NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle); - ret = -EINVAL; - goto out; - } - pbbo = nouveau_gem_object(gem); - - if ((req->offset & 3) || req->nr_dwords < 2 || - (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size || - (unsigned long)req->nr_dwords > - ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) { - NV_ERROR(dev, "pb call misaligned or out of bounds: " - "%d + %d * 4 > %ld\n", - req->offset, req->nr_dwords, pbbo->bo.mem.size); - ret = -EINVAL; - drm_gem_object_unreference(gem); - goto out; - } - - ret = ttm_bo_reserve(&pbbo->bo, false, false, true, - chan->fence.sequence); - if (ret) { - NV_ERROR(dev, "resv pb: %d\n", ret); - drm_gem_object_unreference(gem); - goto out; - } - - nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type); - ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false); - if (ret) { - NV_ERROR(dev, "validate pb: %d\n", ret); - ttm_bo_unreserve(&pbbo->bo); - drm_gem_object_unreference(gem); - goto out; - } - - list_add_tail(&pbbo->entry, &op.both_list); - - /* If presumed return address doesn't match, we need to map the - * push buffer and fix it.. - */ - if (!PUSHBUF_CAL) { - uint32_t retaddy; - - if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) { - ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS); - if (ret) { - NV_ERROR(dev, "jmp_space: %d\n", ret); - goto out; - } - } - - retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); - retaddy |= 0x20000000; - if (retaddy != req->suffix0) { - req->suffix0 = retaddy; - do_reloc = 1; - } - } - /* Apply any relocations that are required */ if (do_reloc) { - void *pbvirt; - bool is_iomem; - ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages, - &pbbo->kmap); + ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); if (ret) { - NV_ERROR(dev, "kmap pb: %d\n", ret); + NV_ERROR(dev, "reloc apply: %d\n", ret); goto out; } + } - pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); - ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, - req->nr_relocs, - req->relocs, - req->nr_dwords, - req->offset / 4, - pbvirt, is_iomem); - - if (!PUSHBUF_CAL) { - nouveau_bo_wr32(pbbo, - req->offset / 4 + req->nr_dwords - 2, - req->suffix0); - } - - ttm_bo_kunmap(&pbbo->kmap); + if (chan->dma.ib_max) { + ret = nouveau_dma_wait(chan, req->nr_push + 1, 6); if (ret) { - NV_ERROR(dev, "reloc apply: %d\n", ret); + NV_INFO(dev, "nv50cal_space: %d\n", ret); goto out; } - } - if (PUSHBUF_CAL) { - ret = RING_SPACE(chan, 2); + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + + nv50_dma_push(chan, nvbo, push[i].offset, + push[i].length); + } + } else + if (dev_priv->card_type >= NV_20) { + ret = RING_SPACE(chan, req->nr_push * 2); if (ret) { NV_ERROR(dev, "cal_space: %d\n", ret); goto out; } - OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + - req->offset) | 2); - OUT_RING(chan, 0); + + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + struct drm_mm_node *mem = nvbo->bo.mem.mm_node; + + OUT_RING(chan, ((mem->start << PAGE_SHIFT) + + push[i].offset) | 2); + OUT_RING(chan, 0); + } } else { - ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); + ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); if (ret) { NV_ERROR(dev, "jmp_space: %d\n", ret); goto out; } - OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + - req->offset) | 0x20000000); - OUT_RING(chan, 0); - /* Space the jumps apart with NOPs. */ - for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + struct drm_mm_node *mem = nvbo->bo.mem.mm_node; + uint32_t cmd; + + cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); + cmd |= 0x20000000; + if (unlikely(cmd != req->suffix0)) { + if (!nvbo->kmap.virtual) { + ret = ttm_bo_kmap(&nvbo->bo, 0, + nvbo->bo.mem. + num_pages, + &nvbo->kmap); + if (ret) { + WIND_RING(chan); + goto out; + } + nvbo->validate_mapped = true; + } + + nouveau_bo_wr32(nvbo, (push[i].offset + + push[i].length - 8) / 4, cmd); + } + + OUT_RING(chan, ((mem->start << PAGE_SHIFT) + + push[i].offset) | 0x20000000); OUT_RING(chan, 0); + for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) + OUT_RING(chan, 0); + } } ret = nouveau_fence_new(chan, &fence, true); @@ -790,9 +720,14 @@ out: nouveau_fence_unref((void**)&fence); mutex_unlock(&dev->struct_mutex); kfree(bo); + kfree(push); out_next: - if (PUSHBUF_CAL) { + if (chan->dma.ib_max) { + req->suffix0 = 0x00000000; + req->suffix1 = 0x00000000; + } else + if (dev_priv->card_type >= NV_20) { req->suffix0 = 0x00020000; req->suffix1 = 0x00000000; } else { @@ -804,19 +739,6 @@ out_next: return ret; } -int -nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_gem_pushbuf_call *req = data; - - req->vram_available = dev_priv->fb_aper_free; - req->gart_available = dev_priv->gart_info.aper_free; - - return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv); -} - static inline uint32_t domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) { @@ -831,74 +753,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) } int -nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_gem_pin *req = data; - struct drm_gem_object *gem; - struct nouveau_bo *nvbo; - int ret = 0; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - NV_ERROR(dev, "pin only allowed without kernel modesetting\n"); - return -EINVAL; - } - - if (!DRM_SUSER(DRM_CURPROC)) - return -EPERM; - - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return -EINVAL; - nvbo = nouveau_gem_object(gem); - - ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain)); - if (ret) - goto out; - - req->offset = nvbo->bo.offset; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) - req->domain = NOUVEAU_GEM_DOMAIN_GART; - else - req->domain = NOUVEAU_GEM_DOMAIN_VRAM; - -out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -int -nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_gem_pin *req = data; - struct drm_gem_object *gem; - int ret; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return -EINVAL; - - ret = nouveau_bo_unpin(nouveau_gem_object(gem)); - - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -int nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -935,9 +789,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, } out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } @@ -965,9 +817,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, ret = 0; out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } @@ -986,9 +836,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, return -EINVAL; ret = nouveau_gem_info(gem, req); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c index dc46792a5c9..7855b35effc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hw.c +++ b/drivers/gpu/drm/nouveau/nouveau_hw.c @@ -160,7 +160,7 @@ static void setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int chip_version = dev_priv->vbios->chip_version; + int chip_version = dev_priv->vbios.chip_version; uint32_t oldpll = NVReadRAMDAC(dev, 0, reg); int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff; uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1; @@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1, struct nouveau_pll_vals *pv) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int chip_version = dev_priv->vbios->chip_version; + int chip_version = dev_priv->vbios.chip_version; bool nv3035 = chip_version == 0x30 || chip_version == 0x35; uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70); uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1); @@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1, struct nouveau_pll_vals *pv) { struct drm_nouveau_private *dev_priv = dev->dev_private; - int cv = dev_priv->vbios->chip_version; + int cv = dev_priv->vbios.chip_version; if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 || cv >= 0x40) { diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 70e994d2812..88583e7bf65 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -254,16 +254,16 @@ struct nouveau_i2c_chan * nouveau_i2c_find(struct drm_device *dev, int index) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; - if (index > DCB_MAX_NUM_I2C_ENTRIES) + if (index >= DCB_MAX_NUM_I2C_ENTRIES) return NULL; - if (!bios->bdcb.dcb.i2c[index].chan) { - if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index)) + if (!bios->dcb.i2c[index].chan) { + if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index)) return NULL; } - return bios->bdcb.dcb.i2c[index].chan; + return bios->dcb.i2c[index].chan; } diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c index 447f9f69d6b..95220ddebb4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_irq.c +++ b/drivers/gpu/drm/nouveau/nouveau_irq.c @@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS) struct drm_device *dev = (struct drm_device *)arg; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t status, fbdev_flags = 0; + unsigned long flags; status = nv_rd32(dev, NV03_PMC_INTR_0); if (!status) return IRQ_NONE; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + if (dev_priv->fbdev_info) { fbdev_flags = dev_priv->fbdev_info->flags; dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED; @@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS) if (dev_priv->fbdev_info) dev_priv->fbdev_info->flags = fbdev_flags; + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); + return IRQ_HANDLED; } diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index d99dc087f9b..9537f3e3011 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c @@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan) chan->notifier_bo = ntfy; out_err: - if (ret) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(ntfy->gem); - mutex_unlock(&dev->struct_mutex); - } + if (ret) + drm_gem_object_unreference_unlocked(ntfy->gem); return ret; } @@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) nouveau_bo_unmap(chan->notifier_bo); mutex_lock(&dev->struct_mutex); nouveau_bo_unpin(chan->notifier_bo); - drm_gem_object_unreference(chan->notifier_bo->gem); mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); nouveau_mem_takedown(&chan->notifier_heap); } diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index a4851af5b05..eb8f084d5f5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -29,6 +29,7 @@ #include "drm_sarea.h" #include "drm_crtc_helper.h" #include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> #include "nouveau_drv.h" #include "nouveau_drm.h" @@ -371,6 +372,30 @@ out_err: return ret; } +static void nouveau_switcheroo_set_state(struct pci_dev *pdev, + enum vga_switcheroo_state state) +{ + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { + printk(KERN_ERR "VGA switcheroo: switched nouveau on\n"); + nouveau_pci_resume(pdev); + } else { + printk(KERN_ERR "VGA switcheroo: switched nouveau off\n"); + nouveau_pci_suspend(pdev, pmm); + } +} + +static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + bool can_switch; + + spin_lock(&dev->count_lock); + can_switch = (dev->open_count == 0); + spin_unlock(&dev->count_lock); + return can_switch; +} + int nouveau_card_init(struct drm_device *dev) { @@ -384,6 +409,8 @@ nouveau_card_init(struct drm_device *dev) return 0; vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); + vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state, + nouveau_switcheroo_can_switch); /* Initialise internal driver API hooks */ ret = nouveau_init_engine_ptrs(dev); @@ -391,6 +418,7 @@ nouveau_card_init(struct drm_device *dev) goto out; engine = &dev_priv->engine; dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; + spin_lock_init(&dev_priv->context_switch_lock); /* Parse BIOS tables / Run init tables if card not POSTed */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { @@ -617,11 +645,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags) NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class); - dev_priv->acpi_dsm = nouveau_dsm_probe(dev); - - if (dev_priv->acpi_dsm) - nouveau_hybrid_setup(dev); - dev_priv->wq = create_workqueue("nouveau"); if (!dev_priv->wq) return -EINVAL; @@ -776,13 +799,6 @@ int nouveau_unload(struct drm_device *dev) return 0; } -int -nouveau_ioctl_card_init(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - return nouveau_card_init(dev); -} - int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index d2f143ed97c..a1d1ebb073d 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c @@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); nv_crtc->cursor.show(nv_crtc, true); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index 1d73b15d70d..1cb19e3acb5 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c @@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder) if (dcb->type == OUTPUT_TV) { testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0); - if (dev_priv->vbios->tvdactestval) - testval = dev_priv->vbios->tvdactestval; + if (dev_priv->vbios.tvdactestval) + testval = dev_priv->vbios.tvdactestval; } else { testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */ - if (dev_priv->vbios->dactestval) - testval = dev_priv->vbios->dactestval; + if (dev_priv->vbios.dactestval) + testval = dev_priv->vbios.dactestval; } saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index 483f875bdb6..41634d4752f 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c @@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; if (!nv_gf4_disp_arch(dev) || (output_mode->hsync_start - output_mode->hdisplay) >= - dev_priv->vbios->digital_min_front_porch) + dev_priv->vbios.digital_min_front_porch) regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay; else - regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1; + regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1; regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew; diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index ef77215fa5b..c7898b4f6df 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c @@ -93,10 +93,9 @@ int nv04_display_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct parsed_dcb *dcb = dev_priv->vbios->dcb; + struct dcb_table *dcb = &dev_priv->vbios.dcb; struct drm_encoder *encoder; struct drm_crtc *crtc; - uint16_t connector[16] = { 0 }; int i, ret; NV_DEBUG_KMS(dev, "\n"); @@ -154,52 +153,10 @@ nv04_display_create(struct drm_device *dev) if (ret) continue; - - connector[dcbent->connector] |= (1 << dcbent->type); } - for (i = 0; i < dcb->entries; i++) { - struct dcb_entry *dcbent = &dcb->entry[i]; - uint16_t encoders; - int type; - - encoders = connector[dcbent->connector]; - if (!(encoders & (1 << dcbent->type))) - continue; - connector[dcbent->connector] = 0; - - switch (dcbent->type) { - case OUTPUT_ANALOG: - if (!MULTIPLE_ENCODERS(encoders)) - type = DRM_MODE_CONNECTOR_VGA; - else - type = DRM_MODE_CONNECTOR_DVII; - break; - case OUTPUT_TMDS: - if (!MULTIPLE_ENCODERS(encoders)) - type = DRM_MODE_CONNECTOR_DVID; - else - type = DRM_MODE_CONNECTOR_DVII; - break; - case OUTPUT_LVDS: - type = DRM_MODE_CONNECTOR_LVDS; -#if 0 - /* don't create i2c adapter when lvds ddc not allowed */ - if (dcbent->lvdsconf.use_straps_for_mode || - dev_priv->vbios->fp_no_ddc) - i2c_index = 0xf; -#endif - break; - case OUTPUT_TV: - type = DRM_MODE_CONNECTOR_TV; - break; - default: - type = DRM_MODE_CONNECTOR_Unknown; - continue; - } - - nouveau_connector_create(dev, dcbent->connector, type); - } + for (i = 0; i < dcb->connector.entries; i++) + nouveau_connector_create(dev, &dcb->connector.entry[i]); /* Save previous state */ NVLockVgaCrtcs(dev, false); diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index fd01caabd5c..3da90c2c4e6 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -118,7 +118,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) return; } - width = (image->width + 31) & ~31; + width = ALIGN(image->width, 32); dsize = (width * image->height) >> 5; if (info->fix.visual == FB_VISUAL_TRUECOLOR || diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c index f31347b8c9b..66fe55983b6 100644 --- a/drivers/gpu/drm/nouveau/nv04_fifo.c +++ b/drivers/gpu/drm/nouveau/nv04_fifo.c @@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; + unsigned long flags; int ret; ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0, @@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + /* Setup initial state */ dev_priv->engine.instmem.prepare_access(dev, true); RAMFC_WR(DMA_PUT, chan->pushbuf_base); @@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan) /* enable the fifo dma operation */ nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); + + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c index 9c63099e9c4..c4e3404337d 100644 --- a/drivers/gpu/drm/nouveau/nv04_tv.c +++ b/drivers/gpu/drm/nouveau/nv04_tv.c @@ -262,7 +262,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry) nv_encoder->or = ffs(entry->or) - 1; /* Run the slave-specific initialization */ - adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter; + adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter; was_locked = NVLockVgaCrtcs(dev, false); diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 21ac6e49b6e..74c880374fb 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c @@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); - if (dev_priv->vbios->tvdactestval) - testval = dev_priv->vbios->tvdactestval; + if (dev_priv->vbios.tvdactestval) + testval = dev_priv->vbios.tvdactestval; dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); head = (dacclk & 0x100) >> 8; @@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder) !enc->crtc && nv04_dfp_get_bound_head(dev, dcb) == head) { nv04_dfp_bind_head(dev, dcb, head ^ 1, - dev_priv->VBIOS.fp.dual_link); + dev_priv->vbios.fp.dual_link); } } diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c index b4f19ccb8b4..6b2ef4a9fce 100644 --- a/drivers/gpu/drm/nouveau/nv40_fifo.c +++ b/drivers/gpu/drm/nouveau/nv40_fifo.c @@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t fc = NV40_RAMFC(chan->id); + unsigned long flags; int ret; ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0, @@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan) if (ret) return ret; + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + dev_priv->engine.instmem.prepare_access(dev, true); nv_wi32(dev, fc + 0, chan->pushbuf_base); nv_wi32(dev, fc + 4, chan->pushbuf_base); @@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan) /* enable the fifo dma operation */ nv_wr32(dev, NV04_PFIFO_MODE, nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id)); + + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index d1a651e3400..cfabeb974a5 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c @@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, nv_crtc->cursor.show(nv_crtc, true); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gem); return ret; } diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c index f08f042a8e1..1fd9537beff 100644 --- a/drivers/gpu/drm/nouveau/nv50_dac.c +++ b/drivers/gpu/drm/nouveau/nv50_dac.c @@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) } /* Use bios provided value if possible. */ - if (dev_priv->vbios->dactestval) { - load_pattern = dev_priv->vbios->dactestval; + if (dev_priv->vbios.dactestval) { + load_pattern = dev_priv->vbios.dactestval; NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", load_pattern); } else { diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 90f0bf59fbc..61a89f2dc55 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -370,9 +370,7 @@ nv50_display_init(struct drm_device *dev) struct nouveau_connector *conn = nouveau_connector(connector); struct dcb_gpio_entry *gpio; - if (connector->connector_type != DRM_MODE_CONNECTOR_DVII && - connector->connector_type != DRM_MODE_CONNECTOR_DVID && - connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + if (conn->dcb->gpio_tag == 0xff) continue; gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag); @@ -465,8 +463,7 @@ static int nv50_display_disable(struct drm_device *dev) int nv50_display_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; - struct parsed_dcb *dcb = dev_priv->vbios->dcb; - uint32_t connector[16] = {}; + struct dcb_table *dcb = &dev_priv->vbios.dcb; int ret, i; NV_DEBUG_KMS(dev, "\n"); @@ -522,44 +519,13 @@ int nv50_display_create(struct drm_device *dev) NV_WARN(dev, "DCB encoder %d unknown\n", entry->type); continue; } - - connector[entry->connector] |= (1 << entry->type); } - /* It appears that DCB 3.0+ VBIOS has a connector table, however, - * I'm not 100% certain how to decode it correctly yet so just - * look at what encoders are present on each connector index and - * attempt to derive the connector type from that. - */ - for (i = 0 ; i < dcb->entries; i++) { - struct dcb_entry *entry = &dcb->entry[i]; - uint16_t encoders; - int type; - - encoders = connector[entry->connector]; - if (!(encoders & (1 << entry->type))) + for (i = 0 ; i < dcb->connector.entries; i++) { + if (i != 0 && dcb->connector.entry[i].index == + dcb->connector.entry[i - 1].index) continue; - connector[entry->connector] = 0; - - if (encoders & (1 << OUTPUT_DP)) { - type = DRM_MODE_CONNECTOR_DisplayPort; - } else if (encoders & (1 << OUTPUT_TMDS)) { - if (encoders & (1 << OUTPUT_ANALOG)) - type = DRM_MODE_CONNECTOR_DVII; - else - type = DRM_MODE_CONNECTOR_DVID; - } else if (encoders & (1 << OUTPUT_ANALOG)) { - type = DRM_MODE_CONNECTOR_VGA; - } else if (encoders & (1 << OUTPUT_LVDS)) { - type = DRM_MODE_CONNECTOR_LVDS; - } else { - type = DRM_MODE_CONNECTOR_Unknown; - } - - if (type == DRM_MODE_CONNECTOR_Unknown) - continue; - - nouveau_connector_create(dev, entry->connector, type); + nouveau_connector_create(dev, &dcb->connector.entry[i]); } ret = nv50_display_init(dev); @@ -667,8 +633,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead, return -1; } - for (i = 0; i < dev_priv->vbios->dcb->entries; i++) { - struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i]; + for (i = 0; i < dev_priv->vbios.dcb.entries; i++) { + struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i]; if (dcbent->type != type) continue; @@ -692,7 +658,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_connector *nv_connector = NULL; struct drm_encoder *encoder; - struct nvbios *bios = &dev_priv->VBIOS; + struct nvbios *bios = &dev_priv->vbios; uint32_t mc, script = 0, or; list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { @@ -710,7 +676,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent, switch (dcbent->type) { case OUTPUT_LVDS: script = (mc >> 8) & 0xf; - if (bios->pub.fp_no_ddc) { + if (bios->fp_no_ddc) { if (bios->fp.dual_link) script |= 0x0100; if (bios->fp.if_is_24bit) diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 0f57cdf7ccb..993c7126fbd 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) return; } - width = (image->width + 31) & ~31; + width = ALIGN(image->width, 32); dwords = (width * image->height) >> 5; BEGIN_RING(chan, NvSub2D, 0x0814, 2); diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 204a79ff10f..e20c0e2474f 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c @@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *ramfc = NULL; + unsigned long flags; int ret; NV_DEBUG(dev, "ch%d\n", chan->id); @@ -278,19 +279,21 @@ nv50_fifo_create_context(struct nouveau_channel *chan) return ret; } + spin_lock_irqsave(&dev_priv->context_switch_lock, flags); + dev_priv->engine.instmem.prepare_access(dev, true); - nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base); - nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base); nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4); nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4)); - nv_wo32(dev, ramfc, 0x3c/4, 0x00086078); nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff); nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff); nv_wo32(dev, ramfc, 0x40/4, 0x00000000); nv_wo32(dev, ramfc, 0x7c/4, 0x30000001); nv_wo32(dev, ramfc, 0x78/4, 0x00000000); - nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff); + nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078); + nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base + + chan->dma.ib_base * 4); + nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16); if (!IS_G80) { nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id); @@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan) ret = nv50_fifo_channel_enable(dev, chan->id, false); if (ret) { NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret); + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); nouveau_gpuobj_ref_del(dev, &chan->ramfc); return ret; } + spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); return 0; } diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 6d504801b51..857a09671a3 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c @@ -28,30 +28,7 @@ #include "drm.h" #include "nouveau_drv.h" -MODULE_FIRMWARE("nouveau/nv50.ctxprog"); -MODULE_FIRMWARE("nouveau/nv50.ctxvals"); -MODULE_FIRMWARE("nouveau/nv84.ctxprog"); -MODULE_FIRMWARE("nouveau/nv84.ctxvals"); -MODULE_FIRMWARE("nouveau/nv86.ctxprog"); -MODULE_FIRMWARE("nouveau/nv86.ctxvals"); -MODULE_FIRMWARE("nouveau/nv92.ctxprog"); -MODULE_FIRMWARE("nouveau/nv92.ctxvals"); -MODULE_FIRMWARE("nouveau/nv94.ctxprog"); -MODULE_FIRMWARE("nouveau/nv94.ctxvals"); -MODULE_FIRMWARE("nouveau/nv96.ctxprog"); -MODULE_FIRMWARE("nouveau/nv96.ctxvals"); -MODULE_FIRMWARE("nouveau/nv98.ctxprog"); -MODULE_FIRMWARE("nouveau/nv98.ctxvals"); -MODULE_FIRMWARE("nouveau/nva0.ctxprog"); -MODULE_FIRMWARE("nouveau/nva0.ctxvals"); -MODULE_FIRMWARE("nouveau/nva5.ctxprog"); -MODULE_FIRMWARE("nouveau/nva5.ctxvals"); -MODULE_FIRMWARE("nouveau/nva8.ctxprog"); -MODULE_FIRMWARE("nouveau/nva8.ctxvals"); -MODULE_FIRMWARE("nouveau/nvaa.ctxprog"); -MODULE_FIRMWARE("nouveau/nvaa.ctxvals"); -MODULE_FIRMWARE("nouveau/nvac.ctxprog"); -MODULE_FIRMWARE("nouveau/nvac.ctxvals"); +#include "nouveau_grctx.h" #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) @@ -111,9 +88,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev) NV_DEBUG(dev, "\n"); - nouveau_grctx_prog_load(dev); - if (!dev_priv->engine.graph.ctxprog) - dev_priv->engine.graph.accel_blocked = true; + if (nouveau_ctxfw) { + nouveau_grctx_prog_load(dev); + dev_priv->engine.graph.grctx_size = 0x70000; + } + if (!dev_priv->engine.graph.ctxprog) { + struct nouveau_grctx ctx = {}; + uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL); + int i; + if (!cp) { + NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n"); + dev_priv->engine.graph.accel_blocked = true; + return 0; + } + ctx.dev = dev; + ctx.mode = NOUVEAU_GRCTX_PROG; + ctx.data = cp; + ctx.ctxprog_max = 512; + if (!nv50_grctx_init(&ctx)) { + dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; + + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); + for (i = 0; i < ctx.ctxprog_len; i++) + nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); + } else { + dev_priv->engine.graph.accel_blocked = true; + } + kfree(cp); + } nv_wr32(dev, 0x400320, 4); nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); @@ -193,13 +195,13 @@ nv50_graph_create_context(struct nouveau_channel *chan) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *ramin = chan->ramin->gpuobj; struct nouveau_gpuobj *ctx; - uint32_t grctx_size = 0x70000; + struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; int hdr, ret; NV_DEBUG(dev, "ch%d\n", chan->id); - ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, - NVOBJ_FLAG_ZERO_ALLOC | + ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, + 0x1000, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); if (ret) return ret; @@ -209,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) dev_priv->engine.instmem.prepare_access(dev, true); nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002); nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance + - grctx_size - 1); + pgraph->grctx_size - 1); nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance); nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0); nv_wo32(dev, ramin, (hdr + 0x10)/4, 0); @@ -217,7 +219,15 @@ nv50_graph_create_context(struct nouveau_channel *chan) dev_priv->engine.instmem.finish_access(dev); dev_priv->engine.instmem.prepare_access(dev, true); - nouveau_grctx_vals_load(dev, ctx); + if (!pgraph->ctxprog) { + struct nouveau_grctx ctx = {}; + ctx.dev = chan->dev; + ctx.mode = NOUVEAU_GRCTX_VALS; + ctx.data = chan->ramin_grctx->gpuobj; + nv50_grctx_init(&ctx); + } else { + nouveau_grctx_vals_load(dev, ctx); + } nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); if ((dev_priv->chipset & 0xf0) == 0xa0) nv_wo32(dev, ctx, 0x00004/4, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c new file mode 100644 index 00000000000..d105fcd42ca --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv50_grctx.c @@ -0,0 +1,2367 @@ +/* + * Copyright 2009 Marcin KoÅ›cielnicki + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#define CP_FLAG_CLEAR 0 +#define CP_FLAG_SET 1 +#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) +#define CP_FLAG_SWAP_DIRECTION_LOAD 0 +#define CP_FLAG_SWAP_DIRECTION_SAVE 1 +#define CP_FLAG_UNK01 ((0 * 32) + 1) +#define CP_FLAG_UNK01_CLEAR 0 +#define CP_FLAG_UNK01_SET 1 +#define CP_FLAG_UNK03 ((0 * 32) + 3) +#define CP_FLAG_UNK03_CLEAR 0 +#define CP_FLAG_UNK03_SET 1 +#define CP_FLAG_USER_SAVE ((0 * 32) + 5) +#define CP_FLAG_USER_SAVE_NOT_PENDING 0 +#define CP_FLAG_USER_SAVE_PENDING 1 +#define CP_FLAG_USER_LOAD ((0 * 32) + 6) +#define CP_FLAG_USER_LOAD_NOT_PENDING 0 +#define CP_FLAG_USER_LOAD_PENDING 1 +#define CP_FLAG_UNK0B ((0 * 32) + 0xb) +#define CP_FLAG_UNK0B_CLEAR 0 +#define CP_FLAG_UNK0B_SET 1 +#define CP_FLAG_UNK1D ((0 * 32) + 0x1d) +#define CP_FLAG_UNK1D_CLEAR 0 +#define CP_FLAG_UNK1D_SET 1 +#define CP_FLAG_UNK20 ((1 * 32) + 0) +#define CP_FLAG_UNK20_CLEAR 0 +#define CP_FLAG_UNK20_SET 1 +#define CP_FLAG_STATUS ((2 * 32) + 0) +#define CP_FLAG_STATUS_BUSY 0 +#define CP_FLAG_STATUS_IDLE 1 +#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4) +#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 +#define CP_FLAG_AUTO_SAVE_PENDING 1 +#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) +#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 +#define CP_FLAG_AUTO_LOAD_PENDING 1 +#define CP_FLAG_XFER ((2 * 32) + 11) +#define CP_FLAG_XFER_IDLE 0 +#define CP_FLAG_XFER_BUSY 1 +#define CP_FLAG_NEWCTX ((2 * 32) + 12) +#define CP_FLAG_NEWCTX_BUSY 0 +#define CP_FLAG_NEWCTX_DONE 1 +#define CP_FLAG_ALWAYS ((2 * 32) + 13) +#define CP_FLAG_ALWAYS_FALSE 0 +#define CP_FLAG_ALWAYS_TRUE 1 + +#define CP_CTX 0x00100000 +#define CP_CTX_COUNT 0x000f0000 +#define CP_CTX_COUNT_SHIFT 16 +#define CP_CTX_REG 0x00003fff +#define CP_LOAD_SR 0x00200000 +#define CP_LOAD_SR_VALUE 0x000fffff +#define CP_BRA 0x00400000 +#define CP_BRA_IP 0x0001ff00 +#define CP_BRA_IP_SHIFT 8 +#define CP_BRA_IF_CLEAR 0x00000080 +#define CP_BRA_FLAG 0x0000007f +#define CP_WAIT 0x00500000 +#define CP_WAIT_SET 0x00000080 +#define CP_WAIT_FLAG 0x0000007f +#define CP_SET 0x00700000 +#define CP_SET_1 0x00000080 +#define CP_SET_FLAG 0x0000007f +#define CP_NEWCTX 0x00600004 +#define CP_NEXT_TO_SWAP 0x00600005 +#define CP_SET_CONTEXT_POINTER 0x00600006 +#define CP_SET_XFER_POINTER 0x00600007 +#define CP_ENABLE 0x00600009 +#define CP_END 0x0060000c +#define CP_NEXT_TO_CURRENT 0x0060000d +#define CP_DISABLE1 0x0090ffff +#define CP_DISABLE2 0x0091ffff +#define CP_XFER_1 0x008000ff +#define CP_XFER_2 0x008800ff +#define CP_SEEK_1 0x00c000ff +#define CP_SEEK_2 0x00c800ff + +#include "drmP.h" +#include "nouveau_drv.h" +#include "nouveau_grctx.h" + +/* + * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's + * the GPU itself that does context-switching, but it needs a special + * microcode to do it. And it's the driver's task to supply this microcode, + * further known as ctxprog, as well as the initial context values, known + * as ctxvals. + * + * Without ctxprog, you cannot switch contexts. Not even in software, since + * the majority of context [xfer strands] isn't accessible directly. You're + * stuck with a single channel, and you also suffer all the problems resulting + * from missing ctxvals, since you cannot load them. + * + * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to + * run 2d operations, but trying to utilise 3d or CUDA will just lock you up, + * since you don't have... some sort of needed setup. + * + * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since + * it's too much hassle to handle no-ctxprog as a special case. + */ + +/* + * How ctxprogs work. + * + * The ctxprog is written in its own kind of microcode, with very small and + * crappy set of available commands. You upload it to a small [512 insns] + * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to + * switch channel. or when the driver explicitely requests it. Stuff visible + * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands, + * the per-channel context save area in VRAM [known as ctxvals or grctx], + * 4 flags registers, a scratch register, two grctx pointers, plus many + * random poorly-understood details. + * + * When ctxprog runs, it's supposed to check what operations are asked of it, + * save old context if requested, optionally reset PGRAPH and switch to the + * new channel, and load the new context. Context consists of three major + * parts: subset of MMIO registers and two "xfer areas". + */ + +/* TODO: + * - document unimplemented bits compared to nvidia + * - NVAx: make a TP subroutine, use it. + * - use 0x4008fc instead of 0x1540? + */ + +enum cp_label { + cp_check_load = 1, + cp_setup_auto_load, + cp_setup_load, + cp_setup_save, + cp_swap_state, + cp_prepare_exit, + cp_exit, +}; + +static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx); +static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx); +static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx); + +/* Main function: construct the ctxprog skeleton, call the other functions. */ + +int +nv50_grctx_init(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + + switch (dev_priv->chipset) { + case 0x50: + case 0x84: + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + case 0xa0: + case 0xa5: + case 0xa8: + case 0xaa: + case 0xac: + break; + default: + NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for " + "your NV%x card.\n", dev_priv->chipset); + NV_ERROR(ctx->dev, "Disabling acceleration. Please contact " + "the devs.\n"); + return -ENOSYS; + } + /* decide whether we're loading/unloading the context */ + cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); + cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); + + cp_name(ctx, cp_check_load); + cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); + cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); + cp_bra (ctx, ALWAYS, TRUE, cp_exit); + + /* setup for context load */ + cp_name(ctx, cp_setup_auto_load); + cp_out (ctx, CP_DISABLE1); + cp_out (ctx, CP_DISABLE2); + cp_out (ctx, CP_ENABLE); + cp_out (ctx, CP_NEXT_TO_SWAP); + cp_set (ctx, UNK01, SET); + cp_name(ctx, cp_setup_load); + cp_out (ctx, CP_NEWCTX); + cp_wait(ctx, NEWCTX, BUSY); + cp_set (ctx, UNK1D, CLEAR); + cp_set (ctx, SWAP_DIRECTION, LOAD); + cp_bra (ctx, UNK0B, SET, cp_prepare_exit); + cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); + + /* setup for context save */ + cp_name(ctx, cp_setup_save); + cp_set (ctx, UNK1D, SET); + cp_wait(ctx, STATUS, BUSY); + cp_set (ctx, UNK01, SET); + cp_set (ctx, SWAP_DIRECTION, SAVE); + + /* general PGRAPH state */ + cp_name(ctx, cp_swap_state); + cp_set (ctx, UNK03, SET); + cp_pos (ctx, 0x00004/4); + cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */ + cp_pos (ctx, 0x00100/4); + nv50_graph_construct_mmio(ctx); + nv50_graph_construct_xfer1(ctx); + nv50_graph_construct_xfer2(ctx); + + cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); + + cp_set (ctx, UNK20, SET); + cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */ + cp_lsr (ctx, ctx->ctxvals_base); + cp_out (ctx, CP_SET_XFER_POINTER); + cp_lsr (ctx, 4); + cp_out (ctx, CP_SEEK_1); + cp_out (ctx, CP_XFER_1); + cp_wait(ctx, XFER, BUSY); + + /* pre-exit state updates */ + cp_name(ctx, cp_prepare_exit); + cp_set (ctx, UNK01, CLEAR); + cp_set (ctx, UNK03, CLEAR); + cp_set (ctx, UNK1D, CLEAR); + + cp_bra (ctx, USER_SAVE, PENDING, cp_exit); + cp_out (ctx, CP_NEXT_TO_CURRENT); + + cp_name(ctx, cp_exit); + cp_set (ctx, USER_SAVE, NOT_PENDING); + cp_set (ctx, USER_LOAD, NOT_PENDING); + cp_out (ctx, CP_END); + ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ + + return 0; +} + +/* + * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which + * registers to save/restore and the default values for them. + */ + +static void +nv50_graph_construct_mmio(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i, j; + int offset, base; + uint32_t units = nv_rd32 (ctx->dev, 0x1540); + + /* 0800 */ + cp_ctx(ctx, 0x400808, 7); + gr_def(ctx, 0x400814, 0x00000030); + cp_ctx(ctx, 0x400834, 0x32); + if (dev_priv->chipset == 0x50) { + gr_def(ctx, 0x400834, 0xff400040); + gr_def(ctx, 0x400838, 0xfff00080); + gr_def(ctx, 0x40083c, 0xfff70090); + gr_def(ctx, 0x400840, 0xffe806a8); + } + gr_def(ctx, 0x400844, 0x00000002); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + gr_def(ctx, 0x400894, 0x00001000); + gr_def(ctx, 0x4008e8, 0x00000003); + gr_def(ctx, 0x4008ec, 0x00001000); + if (dev_priv->chipset == 0x50) + cp_ctx(ctx, 0x400908, 0xb); + else if (dev_priv->chipset < 0xa0) + cp_ctx(ctx, 0x400908, 0xc); + else + cp_ctx(ctx, 0x400908, 0xe); + + if (dev_priv->chipset >= 0xa0) + cp_ctx(ctx, 0x400b00, 0x1); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + cp_ctx(ctx, 0x400b10, 0x1); + gr_def(ctx, 0x400b10, 0x0001629d); + cp_ctx(ctx, 0x400b20, 0x1); + gr_def(ctx, 0x400b20, 0x0001629d); + } + + /* 0C00 */ + cp_ctx(ctx, 0x400c08, 0x2); + gr_def(ctx, 0x400c08, 0x0000fe0c); + + /* 1000 */ + if (dev_priv->chipset < 0xa0) { + cp_ctx(ctx, 0x401008, 0x4); + gr_def(ctx, 0x401014, 0x00001000); + } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) { + cp_ctx(ctx, 0x401008, 0x5); + gr_def(ctx, 0x401018, 0x00001000); + } else { + cp_ctx(ctx, 0x401008, 0x5); + gr_def(ctx, 0x401018, 0x00004000); + } + + /* 1400 */ + cp_ctx(ctx, 0x401400, 0x8); + cp_ctx(ctx, 0x401424, 0x3); + if (dev_priv->chipset == 0x50) + gr_def(ctx, 0x40142c, 0x0001fd87); + else + gr_def(ctx, 0x40142c, 0x00000187); + cp_ctx(ctx, 0x401540, 0x5); + gr_def(ctx, 0x401550, 0x00001018); + + /* 1800 */ + cp_ctx(ctx, 0x401814, 0x1); + gr_def(ctx, 0x401814, 0x000000ff); + if (dev_priv->chipset == 0x50) { + cp_ctx(ctx, 0x40181c, 0xe); + gr_def(ctx, 0x401850, 0x00000004); + } else if (dev_priv->chipset < 0xa0) { + cp_ctx(ctx, 0x40181c, 0xf); + gr_def(ctx, 0x401854, 0x00000004); + } else { + cp_ctx(ctx, 0x40181c, 0x13); + gr_def(ctx, 0x401864, 0x00000004); + } + + /* 1C00 */ + cp_ctx(ctx, 0x401c00, 0x1); + switch (dev_priv->chipset) { + case 0x50: + gr_def(ctx, 0x401c00, 0x0001005f); + break; + case 0x84: + case 0x86: + case 0x94: + gr_def(ctx, 0x401c00, 0x044d00df); + break; + case 0x92: + case 0x96: + case 0x98: + case 0xa0: + case 0xaa: + case 0xac: + gr_def(ctx, 0x401c00, 0x042500df); + break; + case 0xa5: + case 0xa8: + gr_def(ctx, 0x401c00, 0x142500df); + break; + } + + /* 2400 */ + cp_ctx(ctx, 0x402400, 0x1); + if (dev_priv->chipset == 0x50) + cp_ctx(ctx, 0x402408, 0x1); + else + cp_ctx(ctx, 0x402408, 0x2); + gr_def(ctx, 0x402408, 0x00000600); + + /* 2800 */ + cp_ctx(ctx, 0x402800, 0x1); + if (dev_priv->chipset == 0x50) + gr_def(ctx, 0x402800, 0x00000006); + + /* 2C00 */ + cp_ctx(ctx, 0x402c08, 0x6); + if (dev_priv->chipset != 0x50) + gr_def(ctx, 0x402c14, 0x01000000); + gr_def(ctx, 0x402c18, 0x000000ff); + if (dev_priv->chipset == 0x50) + cp_ctx(ctx, 0x402ca0, 0x1); + else + cp_ctx(ctx, 0x402ca0, 0x2); + if (dev_priv->chipset < 0xa0) + gr_def(ctx, 0x402ca0, 0x00000400); + else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) + gr_def(ctx, 0x402ca0, 0x00000800); + else + gr_def(ctx, 0x402ca0, 0x00000400); + cp_ctx(ctx, 0x402cac, 0x4); + + /* 3000 */ + cp_ctx(ctx, 0x403004, 0x1); + gr_def(ctx, 0x403004, 0x00000001); + + /* 3404 */ + if (dev_priv->chipset >= 0xa0) { + cp_ctx(ctx, 0x403404, 0x1); + gr_def(ctx, 0x403404, 0x00000001); + } + + /* 5000 */ + cp_ctx(ctx, 0x405000, 0x1); + switch (dev_priv->chipset) { + case 0x50: + gr_def(ctx, 0x405000, 0x00300080); + break; + case 0x84: + case 0xa0: + case 0xa5: + case 0xa8: + case 0xaa: + case 0xac: + gr_def(ctx, 0x405000, 0x000e0080); + break; + case 0x86: + case 0x92: + case 0x94: + case 0x96: + case 0x98: + gr_def(ctx, 0x405000, 0x00000080); + break; + } + cp_ctx(ctx, 0x405014, 0x1); + gr_def(ctx, 0x405014, 0x00000004); + cp_ctx(ctx, 0x40501c, 0x1); + cp_ctx(ctx, 0x405024, 0x1); + cp_ctx(ctx, 0x40502c, 0x1); + + /* 5400 or maybe 4800 */ + if (dev_priv->chipset == 0x50) { + offset = 0x405400; + cp_ctx(ctx, 0x405400, 0xea); + } else if (dev_priv->chipset < 0x94) { + offset = 0x405400; + cp_ctx(ctx, 0x405400, 0xcb); + } else if (dev_priv->chipset < 0xa0) { + offset = 0x405400; + cp_ctx(ctx, 0x405400, 0xcc); + } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + offset = 0x404800; + cp_ctx(ctx, 0x404800, 0xda); + } else { + offset = 0x405400; + cp_ctx(ctx, 0x405400, 0xd4); + } + gr_def(ctx, offset + 0x0c, 0x00000002); + gr_def(ctx, offset + 0x10, 0x00000001); + if (dev_priv->chipset >= 0x94) + offset += 4; + gr_def(ctx, offset + 0x1c, 0x00000001); + gr_def(ctx, offset + 0x20, 0x00000100); + gr_def(ctx, offset + 0x38, 0x00000002); + gr_def(ctx, offset + 0x3c, 0x00000001); + gr_def(ctx, offset + 0x40, 0x00000001); + gr_def(ctx, offset + 0x50, 0x00000001); + gr_def(ctx, offset + 0x54, 0x003fffff); + gr_def(ctx, offset + 0x58, 0x00001fff); + gr_def(ctx, offset + 0x60, 0x00000001); + gr_def(ctx, offset + 0x64, 0x00000001); + gr_def(ctx, offset + 0x6c, 0x00000001); + gr_def(ctx, offset + 0x70, 0x00000001); + gr_def(ctx, offset + 0x74, 0x00000001); + gr_def(ctx, offset + 0x78, 0x00000004); + gr_def(ctx, offset + 0x7c, 0x00000001); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + offset += 4; + gr_def(ctx, offset + 0x80, 0x00000001); + gr_def(ctx, offset + 0x84, 0x00000001); + gr_def(ctx, offset + 0x88, 0x00000007); + gr_def(ctx, offset + 0x8c, 0x00000001); + gr_def(ctx, offset + 0x90, 0x00000007); + gr_def(ctx, offset + 0x94, 0x00000001); + gr_def(ctx, offset + 0x98, 0x00000001); + gr_def(ctx, offset + 0x9c, 0x00000001); + if (dev_priv->chipset == 0x50) { + gr_def(ctx, offset + 0xb0, 0x00000001); + gr_def(ctx, offset + 0xb4, 0x00000001); + gr_def(ctx, offset + 0xbc, 0x00000001); + gr_def(ctx, offset + 0xc0, 0x0000000a); + gr_def(ctx, offset + 0xd0, 0x00000040); + gr_def(ctx, offset + 0xd8, 0x00000002); + gr_def(ctx, offset + 0xdc, 0x00000100); + gr_def(ctx, offset + 0xe0, 0x00000001); + gr_def(ctx, offset + 0xe4, 0x00000100); + gr_def(ctx, offset + 0x100, 0x00000001); + gr_def(ctx, offset + 0x124, 0x00000004); + gr_def(ctx, offset + 0x13c, 0x00000001); + gr_def(ctx, offset + 0x140, 0x00000100); + gr_def(ctx, offset + 0x148, 0x00000001); + gr_def(ctx, offset + 0x154, 0x00000100); + gr_def(ctx, offset + 0x158, 0x00000001); + gr_def(ctx, offset + 0x15c, 0x00000100); + gr_def(ctx, offset + 0x164, 0x00000001); + gr_def(ctx, offset + 0x170, 0x00000100); + gr_def(ctx, offset + 0x174, 0x00000001); + gr_def(ctx, offset + 0x17c, 0x00000001); + gr_def(ctx, offset + 0x188, 0x00000002); + gr_def(ctx, offset + 0x190, 0x00000001); + gr_def(ctx, offset + 0x198, 0x00000001); + gr_def(ctx, offset + 0x1ac, 0x00000003); + offset += 0xd0; + } else { + gr_def(ctx, offset + 0xb0, 0x00000001); + gr_def(ctx, offset + 0xb4, 0x00000100); + gr_def(ctx, offset + 0xbc, 0x00000001); + gr_def(ctx, offset + 0xc8, 0x00000100); + gr_def(ctx, offset + 0xcc, 0x00000001); + gr_def(ctx, offset + 0xd0, 0x00000100); + gr_def(ctx, offset + 0xd8, 0x00000001); + gr_def(ctx, offset + 0xe4, 0x00000100); + } + gr_def(ctx, offset + 0xf8, 0x00000004); + gr_def(ctx, offset + 0xfc, 0x00000070); + gr_def(ctx, offset + 0x100, 0x00000080); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + offset += 4; + gr_def(ctx, offset + 0x114, 0x0000000c); + if (dev_priv->chipset == 0x50) + offset -= 4; + gr_def(ctx, offset + 0x11c, 0x00000008); + gr_def(ctx, offset + 0x120, 0x00000014); + if (dev_priv->chipset == 0x50) { + gr_def(ctx, offset + 0x124, 0x00000026); + offset -= 0x18; + } else { + gr_def(ctx, offset + 0x128, 0x00000029); + gr_def(ctx, offset + 0x12c, 0x00000027); + gr_def(ctx, offset + 0x130, 0x00000026); + gr_def(ctx, offset + 0x134, 0x00000008); + gr_def(ctx, offset + 0x138, 0x00000004); + gr_def(ctx, offset + 0x13c, 0x00000027); + } + gr_def(ctx, offset + 0x148, 0x00000001); + gr_def(ctx, offset + 0x14c, 0x00000002); + gr_def(ctx, offset + 0x150, 0x00000003); + gr_def(ctx, offset + 0x154, 0x00000004); + gr_def(ctx, offset + 0x158, 0x00000005); + gr_def(ctx, offset + 0x15c, 0x00000006); + gr_def(ctx, offset + 0x160, 0x00000007); + gr_def(ctx, offset + 0x164, 0x00000001); + gr_def(ctx, offset + 0x1a8, 0x000000cf); + if (dev_priv->chipset == 0x50) + offset -= 4; + gr_def(ctx, offset + 0x1d8, 0x00000080); + gr_def(ctx, offset + 0x1dc, 0x00000004); + gr_def(ctx, offset + 0x1e0, 0x00000004); + if (dev_priv->chipset == 0x50) + offset -= 4; + else + gr_def(ctx, offset + 0x1e4, 0x00000003); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + gr_def(ctx, offset + 0x1ec, 0x00000003); + offset += 8; + } + gr_def(ctx, offset + 0x1e8, 0x00000001); + if (dev_priv->chipset == 0x50) + offset -= 4; + gr_def(ctx, offset + 0x1f4, 0x00000012); + gr_def(ctx, offset + 0x1f8, 0x00000010); + gr_def(ctx, offset + 0x1fc, 0x0000000c); + gr_def(ctx, offset + 0x200, 0x00000001); + gr_def(ctx, offset + 0x210, 0x00000004); + gr_def(ctx, offset + 0x214, 0x00000002); + gr_def(ctx, offset + 0x218, 0x00000004); + if (dev_priv->chipset >= 0xa0) + offset += 4; + gr_def(ctx, offset + 0x224, 0x003fffff); + gr_def(ctx, offset + 0x228, 0x00001fff); + if (dev_priv->chipset == 0x50) + offset -= 0x20; + else if (dev_priv->chipset >= 0xa0) { + gr_def(ctx, offset + 0x250, 0x00000001); + gr_def(ctx, offset + 0x254, 0x00000001); + gr_def(ctx, offset + 0x258, 0x00000002); + offset += 0x10; + } + gr_def(ctx, offset + 0x250, 0x00000004); + gr_def(ctx, offset + 0x254, 0x00000014); + gr_def(ctx, offset + 0x258, 0x00000001); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + offset += 4; + gr_def(ctx, offset + 0x264, 0x00000002); + if (dev_priv->chipset >= 0xa0) + offset += 8; + gr_def(ctx, offset + 0x270, 0x00000001); + gr_def(ctx, offset + 0x278, 0x00000002); + gr_def(ctx, offset + 0x27c, 0x00001000); + if (dev_priv->chipset == 0x50) + offset -= 0xc; + else { + gr_def(ctx, offset + 0x280, 0x00000e00); + gr_def(ctx, offset + 0x284, 0x00001000); + gr_def(ctx, offset + 0x288, 0x00001e00); + } + gr_def(ctx, offset + 0x290, 0x00000001); + gr_def(ctx, offset + 0x294, 0x00000001); + gr_def(ctx, offset + 0x298, 0x00000001); + gr_def(ctx, offset + 0x29c, 0x00000001); + gr_def(ctx, offset + 0x2a0, 0x00000001); + gr_def(ctx, offset + 0x2b0, 0x00000200); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + gr_def(ctx, offset + 0x2b4, 0x00000200); + offset += 4; + } + if (dev_priv->chipset < 0xa0) { + gr_def(ctx, offset + 0x2b8, 0x00000001); + gr_def(ctx, offset + 0x2bc, 0x00000070); + gr_def(ctx, offset + 0x2c0, 0x00000080); + gr_def(ctx, offset + 0x2cc, 0x00000001); + gr_def(ctx, offset + 0x2d0, 0x00000070); + gr_def(ctx, offset + 0x2d4, 0x00000080); + } else { + gr_def(ctx, offset + 0x2b8, 0x00000001); + gr_def(ctx, offset + 0x2bc, 0x000000f0); + gr_def(ctx, offset + 0x2c0, 0x000000ff); + gr_def(ctx, offset + 0x2cc, 0x00000001); + gr_def(ctx, offset + 0x2d0, 0x000000f0); + gr_def(ctx, offset + 0x2d4, 0x000000ff); + gr_def(ctx, offset + 0x2dc, 0x00000009); + offset += 4; + } + gr_def(ctx, offset + 0x2e4, 0x00000001); + gr_def(ctx, offset + 0x2e8, 0x000000cf); + gr_def(ctx, offset + 0x2f0, 0x00000001); + gr_def(ctx, offset + 0x300, 0x000000cf); + gr_def(ctx, offset + 0x308, 0x00000002); + gr_def(ctx, offset + 0x310, 0x00000001); + gr_def(ctx, offset + 0x318, 0x00000001); + gr_def(ctx, offset + 0x320, 0x000000cf); + gr_def(ctx, offset + 0x324, 0x000000cf); + gr_def(ctx, offset + 0x328, 0x00000001); + + /* 6000? */ + if (dev_priv->chipset == 0x50) + cp_ctx(ctx, 0x4063e0, 0x1); + + /* 6800 */ + if (dev_priv->chipset < 0x90) { + cp_ctx(ctx, 0x406814, 0x2b); + gr_def(ctx, 0x406818, 0x00000f80); + gr_def(ctx, 0x406860, 0x007f0080); + gr_def(ctx, 0x40689c, 0x007f0080); + } else { + cp_ctx(ctx, 0x406814, 0x4); + if (dev_priv->chipset == 0x98) + gr_def(ctx, 0x406818, 0x00000f80); + else + gr_def(ctx, 0x406818, 0x00001f80); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + gr_def(ctx, 0x40681c, 0x00000030); + cp_ctx(ctx, 0x406830, 0x3); + } + + /* 7000: per-ROP group state */ + for (i = 0; i < 8; i++) { + if (units & (1<<(i+16))) { + cp_ctx(ctx, 0x407000 + (i<<8), 3); + if (dev_priv->chipset == 0x50) + gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); + else if (dev_priv->chipset != 0xa5) + gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); + else + gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); + gr_def(ctx, 0x407004 + (i<<8), 0x89058001); + + if (dev_priv->chipset == 0x50) { + cp_ctx(ctx, 0x407010 + (i<<8), 1); + } else if (dev_priv->chipset < 0xa0) { + cp_ctx(ctx, 0x407010 + (i<<8), 2); + gr_def(ctx, 0x407010 + (i<<8), 0x00001000); + gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); + } else { + cp_ctx(ctx, 0x407010 + (i<<8), 3); + gr_def(ctx, 0x407010 + (i<<8), 0x00001000); + if (dev_priv->chipset != 0xa5) + gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); + else + gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); + } + + cp_ctx(ctx, 0x407080 + (i<<8), 4); + if (dev_priv->chipset != 0xa5) + gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); + else + gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); + if (dev_priv->chipset == 0x50) + gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); + else + gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); + gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); + + if (dev_priv->chipset < 0xa0) + cp_ctx(ctx, 0x407094 + (i<<8), 1); + else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + cp_ctx(ctx, 0x407094 + (i<<8), 3); + else { + cp_ctx(ctx, 0x407094 + (i<<8), 4); + gr_def(ctx, 0x4070a0 + (i<<8), 1); + } + } + } + + cp_ctx(ctx, 0x407c00, 0x3); + if (dev_priv->chipset < 0x90) + gr_def(ctx, 0x407c00, 0x00010040); + else if (dev_priv->chipset < 0xa0) + gr_def(ctx, 0x407c00, 0x00390040); + else + gr_def(ctx, 0x407c00, 0x003d0040); + gr_def(ctx, 0x407c08, 0x00000022); + if (dev_priv->chipset >= 0xa0) { + cp_ctx(ctx, 0x407c10, 0x3); + cp_ctx(ctx, 0x407c20, 0x1); + cp_ctx(ctx, 0x407c2c, 0x1); + } + + if (dev_priv->chipset < 0xa0) { + cp_ctx(ctx, 0x407d00, 0x9); + } else { + cp_ctx(ctx, 0x407d00, 0x15); + } + if (dev_priv->chipset == 0x98) + gr_def(ctx, 0x407d08, 0x00380040); + else { + if (dev_priv->chipset < 0x90) + gr_def(ctx, 0x407d08, 0x00010040); + else if (dev_priv->chipset < 0xa0) + gr_def(ctx, 0x407d08, 0x00390040); + else + gr_def(ctx, 0x407d08, 0x003d0040); + gr_def(ctx, 0x407d0c, 0x00000022); + } + + /* 8000+: per-TP state */ + for (i = 0; i < 10; i++) { + if (units & (1<<i)) { + if (dev_priv->chipset < 0xa0) + base = 0x408000 + (i<<12); + else + base = 0x408000 + (i<<11); + if (dev_priv->chipset < 0xa0) + offset = base + 0xc00; + else + offset = base + 0x80; + cp_ctx(ctx, offset + 0x00, 1); + gr_def(ctx, offset + 0x00, 0x0000ff0a); + cp_ctx(ctx, offset + 0x08, 1); + + /* per-MP state */ + for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) { + if (!(units & (1 << (j+24)))) continue; + if (dev_priv->chipset < 0xa0) + offset = base + 0x200 + (j<<7); + else + offset = base + 0x100 + (j<<7); + cp_ctx(ctx, offset, 0x20); + gr_def(ctx, offset + 0x00, 0x01800000); + gr_def(ctx, offset + 0x04, 0x00160000); + gr_def(ctx, offset + 0x08, 0x01800000); + gr_def(ctx, offset + 0x18, 0x0003ffff); + switch (dev_priv->chipset) { + case 0x50: + gr_def(ctx, offset + 0x1c, 0x00080000); + break; + case 0x84: + gr_def(ctx, offset + 0x1c, 0x00880000); + break; + case 0x86: + gr_def(ctx, offset + 0x1c, 0x008c0000); + break; + case 0x92: + case 0x96: + case 0x98: + gr_def(ctx, offset + 0x1c, 0x118c0000); + break; + case 0x94: + gr_def(ctx, offset + 0x1c, 0x10880000); + break; + case 0xa0: + case 0xa5: + gr_def(ctx, offset + 0x1c, 0x310c0000); + break; + case 0xa8: + case 0xaa: + case 0xac: + gr_def(ctx, offset + 0x1c, 0x300c0000); + break; + } + gr_def(ctx, offset + 0x40, 0x00010401); + if (dev_priv->chipset == 0x50) + gr_def(ctx, offset + 0x48, 0x00000040); + else + gr_def(ctx, offset + 0x48, 0x00000078); + gr_def(ctx, offset + 0x50, 0x000000bf); + gr_def(ctx, offset + 0x58, 0x00001210); + if (dev_priv->chipset == 0x50) + gr_def(ctx, offset + 0x5c, 0x00000080); + else + gr_def(ctx, offset + 0x5c, 0x08000080); + if (dev_priv->chipset >= 0xa0) + gr_def(ctx, offset + 0x68, 0x0000003e); + } + + if (dev_priv->chipset < 0xa0) + cp_ctx(ctx, base + 0x300, 0x4); + else + cp_ctx(ctx, base + 0x300, 0x5); + if (dev_priv->chipset == 0x50) + gr_def(ctx, base + 0x304, 0x00007070); + else if (dev_priv->chipset < 0xa0) + gr_def(ctx, base + 0x304, 0x00027070); + else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + gr_def(ctx, base + 0x304, 0x01127070); + else + gr_def(ctx, base + 0x304, 0x05127070); + + if (dev_priv->chipset < 0xa0) + cp_ctx(ctx, base + 0x318, 1); + else + cp_ctx(ctx, base + 0x320, 1); + if (dev_priv->chipset == 0x50) + gr_def(ctx, base + 0x318, 0x0003ffff); + else if (dev_priv->chipset < 0xa0) + gr_def(ctx, base + 0x318, 0x03ffffff); + else + gr_def(ctx, base + 0x320, 0x07ffffff); + + if (dev_priv->chipset < 0xa0) + cp_ctx(ctx, base + 0x324, 5); + else + cp_ctx(ctx, base + 0x328, 4); + + if (dev_priv->chipset < 0xa0) { + cp_ctx(ctx, base + 0x340, 9); + offset = base + 0x340; + } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + cp_ctx(ctx, base + 0x33c, 0xb); + offset = base + 0x344; + } else { + cp_ctx(ctx, base + 0x33c, 0xd); + offset = base + 0x344; + } + gr_def(ctx, offset + 0x0, 0x00120407); + gr_def(ctx, offset + 0x4, 0x05091507); + if (dev_priv->chipset == 0x84) + gr_def(ctx, offset + 0x8, 0x05100202); + else + gr_def(ctx, offset + 0x8, 0x05010202); + gr_def(ctx, offset + 0xc, 0x00030201); + + cp_ctx(ctx, base + 0x400, 2); + gr_def(ctx, base + 0x404, 0x00000040); + cp_ctx(ctx, base + 0x40c, 2); + gr_def(ctx, base + 0x40c, 0x0d0c0b0a); + gr_def(ctx, base + 0x410, 0x00141210); + + if (dev_priv->chipset < 0xa0) + offset = base + 0x800; + else + offset = base + 0x500; + cp_ctx(ctx, offset, 6); + gr_def(ctx, offset + 0x0, 0x000001f0); + gr_def(ctx, offset + 0x4, 0x00000001); + gr_def(ctx, offset + 0x8, 0x00000003); + if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa) + gr_def(ctx, offset + 0xc, 0x00008000); + gr_def(ctx, offset + 0x14, 0x00039e00); + cp_ctx(ctx, offset + 0x1c, 2); + if (dev_priv->chipset == 0x50) + gr_def(ctx, offset + 0x1c, 0x00000040); + else + gr_def(ctx, offset + 0x1c, 0x00000100); + gr_def(ctx, offset + 0x20, 0x00003800); + + if (dev_priv->chipset >= 0xa0) { + cp_ctx(ctx, base + 0x54c, 2); + if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) + gr_def(ctx, base + 0x54c, 0x003fe006); + else + gr_def(ctx, base + 0x54c, 0x003fe007); + gr_def(ctx, base + 0x550, 0x003fe000); + } + + if (dev_priv->chipset < 0xa0) + offset = base + 0xa00; + else + offset = base + 0x680; + cp_ctx(ctx, offset, 1); + gr_def(ctx, offset, 0x00404040); + + if (dev_priv->chipset < 0xa0) + offset = base + 0xe00; + else + offset = base + 0x700; + cp_ctx(ctx, offset, 2); + if (dev_priv->chipset < 0xa0) + gr_def(ctx, offset, 0x0077f005); + else if (dev_priv->chipset == 0xa5) + gr_def(ctx, offset, 0x6cf7f007); + else if (dev_priv->chipset == 0xa8) + gr_def(ctx, offset, 0x6cfff007); + else if (dev_priv->chipset == 0xac) + gr_def(ctx, offset, 0x0cfff007); + else + gr_def(ctx, offset, 0x0cf7f007); + if (dev_priv->chipset == 0x50) + gr_def(ctx, offset + 0x4, 0x00007fff); + else if (dev_priv->chipset < 0xa0) + gr_def(ctx, offset + 0x4, 0x003f7fff); + else + gr_def(ctx, offset + 0x4, 0x02bf7fff); + cp_ctx(ctx, offset + 0x2c, 1); + if (dev_priv->chipset == 0x50) { + cp_ctx(ctx, offset + 0x50, 9); + gr_def(ctx, offset + 0x54, 0x000003ff); + gr_def(ctx, offset + 0x58, 0x00000003); + gr_def(ctx, offset + 0x5c, 0x00000003); + gr_def(ctx, offset + 0x60, 0x000001ff); + gr_def(ctx, offset + 0x64, 0x0000001f); + gr_def(ctx, offset + 0x68, 0x0000000f); + gr_def(ctx, offset + 0x6c, 0x0000000f); + } else if(dev_priv->chipset < 0xa0) { + cp_ctx(ctx, offset + 0x50, 1); + cp_ctx(ctx, offset + 0x70, 1); + } else { + cp_ctx(ctx, offset + 0x50, 1); + cp_ctx(ctx, offset + 0x60, 5); + } + } + } +} + +/* + * xfer areas. These are a pain. + * + * There are 2 xfer areas: the first one is big and contains all sorts of + * stuff, the second is small and contains some per-TP context. + * + * Each area is split into 8 "strands". The areas, when saved to grctx, + * are made of 8-word blocks. Each block contains a single word from + * each strand. The strands are independent of each other, their + * addresses are unrelated to each other, and data in them is closely + * packed together. The strand layout varies a bit between cards: here + * and there, a single word is thrown out in the middle and the whole + * strand is offset by a bit from corresponding one on another chipset. + * For this reason, addresses of stuff in strands are almost useless. + * Knowing sequence of stuff and size of gaps between them is much more + * useful, and that's how we build the strands in our generator. + * + * NVA0 takes this mess to a whole new level by cutting the old strands + * into a few dozen pieces [known as genes], rearranging them randomly, + * and putting them back together to make new strands. Hopefully these + * genes correspond more or less directly to the same PGRAPH subunits + * as in 400040 register. + * + * The most common value in default context is 0, and when the genes + * are separated by 0's, gene bounduaries are quite speculative... + * some of them can be clearly deduced, others can be guessed, and yet + * others won't be resolved without figuring out the real meaning of + * given ctxval. For the same reason, ending point of each strand + * is unknown. Except for strand 0, which is the longest strand and + * its end corresponds to end of the whole xfer. + * + * An unsolved mystery is the seek instruction: it takes an argument + * in bits 8-18, and that argument is clearly the place in strands to + * seek to... but the offsets don't seem to correspond to offsets as + * seen in grctx. Perhaps there's another, real, not randomly-changing + * addressing in strands, and the xfer insn just happens to skip over + * the unused bits? NV10-NV30 PIPE comes to mind... + * + * As far as I know, there's no way to access the xfer areas directly + * without the help of ctxprog. + */ + +static inline void +xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { + int i; + if (val && ctx->mode == NOUVEAU_GRCTX_VALS) + for (i = 0; i < num; i++) + nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val); + ctx->ctxvals_pos += num << 3; +} + +/* Gene declarations... */ + +static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx); +static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); +static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); + +static void +nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i; + int offset; + int size = 0; + uint32_t units = nv_rd32 (ctx->dev, 0x1540); + + offset = (ctx->ctxvals_pos+0x3f)&~0x3f; + ctx->ctxvals_base = offset; + + if (dev_priv->chipset < 0xa0) { + /* Strand 0 */ + ctx->ctxvals_pos = offset; + switch (dev_priv->chipset) { + case 0x50: + xf_emit(ctx, 0x99, 0); + break; + case 0x84: + case 0x86: + xf_emit(ctx, 0x384, 0); + break; + case 0x92: + case 0x94: + case 0x96: + case 0x98: + xf_emit(ctx, 0x380, 0); + break; + } + nv50_graph_construct_gene_m2mf (ctx); + switch (dev_priv->chipset) { + case 0x50: + case 0x84: + case 0x86: + case 0x98: + xf_emit(ctx, 0x4c4, 0); + break; + case 0x92: + case 0x94: + case 0x96: + xf_emit(ctx, 0x984, 0); + break; + } + nv50_graph_construct_gene_unk5(ctx); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 0xa, 0); + else + xf_emit(ctx, 0xb, 0); + nv50_graph_construct_gene_unk4(ctx); + nv50_graph_construct_gene_unk3(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 1 */ + ctx->ctxvals_pos = offset + 0x1; + nv50_graph_construct_gene_unk6(ctx); + nv50_graph_construct_gene_unk7(ctx); + nv50_graph_construct_gene_unk8(ctx); + switch (dev_priv->chipset) { + case 0x50: + case 0x92: + xf_emit(ctx, 0xfb, 0); + break; + case 0x84: + xf_emit(ctx, 0xd3, 0); + break; + case 0x94: + case 0x96: + xf_emit(ctx, 0xab, 0); + break; + case 0x86: + case 0x98: + xf_emit(ctx, 0x6b, 0); + break; + } + xf_emit(ctx, 2, 0x4e3bfdf); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 0xb, 0); + xf_emit(ctx, 2, 0x4e3bfdf); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 2 */ + ctx->ctxvals_pos = offset + 0x2; + switch (dev_priv->chipset) { + case 0x50: + case 0x92: + xf_emit(ctx, 0xa80, 0); + break; + case 0x84: + xf_emit(ctx, 0xa7e, 0); + break; + case 0x94: + case 0x96: + xf_emit(ctx, 0xa7c, 0); + break; + case 0x86: + case 0x98: + xf_emit(ctx, 0xa7a, 0); + break; + } + xf_emit(ctx, 1, 0x3fffff); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x1fff); + xf_emit(ctx, 0xe, 0); + nv50_graph_construct_gene_unk9(ctx); + nv50_graph_construct_gene_unk2(ctx); + nv50_graph_construct_gene_unk1(ctx); + nv50_graph_construct_gene_unk10(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 3: per-ROP group state */ + ctx->ctxvals_pos = offset + 3; + for (i = 0; i < 6; i++) + if (units & (1 << (i + 16))) + nv50_graph_construct_gene_ropc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strands 4-7: per-TP state */ + for (i = 0; i < 4; i++) { + ctx->ctxvals_pos = offset + 4 + i; + if (units & (1 << (2 * i))) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << (2 * i + 1))) + nv50_graph_construct_xfer_tp(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + } else { + /* Strand 0 */ + ctx->ctxvals_pos = offset; + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0x385, 0); + else + xf_emit(ctx, 0x384, 0); + nv50_graph_construct_gene_m2mf(ctx); + xf_emit(ctx, 0x950, 0); + nv50_graph_construct_gene_unk10(ctx); + xf_emit(ctx, 1, 0x0fac6881); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 1, 1); + xf_emit(ctx, 3, 0); + } + nv50_graph_construct_gene_unk8(ctx); + if (dev_priv->chipset == 0xa0) + xf_emit(ctx, 0x189, 0); + else if (dev_priv->chipset < 0xa8) + xf_emit(ctx, 0x99, 0); + else if (dev_priv->chipset == 0xaa) + xf_emit(ctx, 0x65, 0); + else + xf_emit(ctx, 0x6d, 0); + nv50_graph_construct_gene_unk9(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 1 */ + ctx->ctxvals_pos = offset + 1; + nv50_graph_construct_gene_unk1(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 2 */ + ctx->ctxvals_pos = offset + 2; + if (dev_priv->chipset == 0xa0) { + nv50_graph_construct_gene_unk2(ctx); + } + xf_emit(ctx, 0x36, 0); + nv50_graph_construct_gene_unk5(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 3 */ + ctx->ctxvals_pos = offset + 3; + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + nv50_graph_construct_gene_unk6(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 4 */ + ctx->ctxvals_pos = offset + 4; + if (dev_priv->chipset == 0xa0) + xf_emit(ctx, 0xa80, 0); + else + xf_emit(ctx, 0xa7a, 0); + xf_emit(ctx, 1, 0x3fffff); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x1fff); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 5 */ + ctx->ctxvals_pos = offset + 5; + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 0xb, 0); + xf_emit(ctx, 2, 0x4e3bfdf); + xf_emit(ctx, 3, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 2, 0x4e3bfdf); + xf_emit(ctx, 2, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 0); + for (i = 0; i < 8; i++) + if (units & (1<<(i+16))) + nv50_graph_construct_gene_ropc(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 6 */ + ctx->ctxvals_pos = offset + 6; + nv50_graph_construct_gene_unk3(ctx); + xf_emit(ctx, 0xb, 0); + nv50_graph_construct_gene_unk4(ctx); + nv50_graph_construct_gene_unk7(ctx); + if (units & (1 << 0)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 1)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 2)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 3)) + nv50_graph_construct_xfer_tp(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 7 */ + ctx->ctxvals_pos = offset + 7; + if (dev_priv->chipset == 0xa0) { + if (units & (1 << 4)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 5)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 6)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 7)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 8)) + nv50_graph_construct_xfer_tp(ctx); + if (units & (1 << 9)) + nv50_graph_construct_xfer_tp(ctx); + } else { + nv50_graph_construct_gene_unk2(ctx); + } + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + + ctx->ctxvals_pos = offset + size * 8; + ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; + cp_lsr (ctx, offset); + cp_out (ctx, CP_SET_XFER_POINTER); + cp_lsr (ctx, size); + cp_out (ctx, CP_SEEK_1); + cp_out (ctx, CP_XFER_1); + cp_wait(ctx, XFER, BUSY); +} + +/* + * non-trivial demagiced parts of ctx init go here + */ + +static void +nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) +{ + /* m2mf state */ + xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ + xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ + xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ + xf_emit (ctx, 1, 0); /* OFFSET_IN */ + xf_emit (ctx, 1, 0); /* OFFSET_OUT */ + xf_emit (ctx, 1, 0); /* PITCH_IN */ + xf_emit (ctx, 1, 0); /* PITCH_OUT */ + xf_emit (ctx, 1, 0); /* LINE_LENGTH */ + xf_emit (ctx, 1, 0); /* LINE_COUNT */ + xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */ + xf_emit (ctx, 1, 1); /* LINEAR_IN */ + xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */ + xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */ + xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */ + xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */ + xf_emit (ctx, 1, 1); /* LINEAR_OUT */ + xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */ + xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */ + xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */ + xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */ + xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ + xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ + xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ +} + +static void +nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* end of area 2 on pre-NVA0, area 1 on NVAx */ + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0x80c14); + xf_emit(ctx, 1, 0); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); + else + xf_emit(ctx, 1, 0x7ff); + switch (dev_priv->chipset) { + case 0x50: + case 0x86: + case 0x98: + case 0xaa: + case 0xac: + xf_emit(ctx, 0x542, 0); + break; + case 0x84: + case 0x92: + case 0x94: + case 0x96: + xf_emit(ctx, 0x942, 0); + break; + case 0xa0: + xf_emit(ctx, 0x2042, 0); + break; + case 0xa5: + case 0xa8: + xf_emit(ctx, 0x842, 0); + break; + } + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x27); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x26); + xf_emit(ctx, 3, 0); +} + +static void +nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx) +{ + /* end of area 2 on pre-NVA0, area 1 on NVAx */ + xf_emit(ctx, 0x10, 0x04000000); + xf_emit(ctx, 0x24, 0); + xf_emit(ctx, 2, 0x04e3bfdf); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x1fe21); +} + +static void +nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 5, 0); + xf_emit(ctx, 1, 0x80c14); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x804); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0x8100c12); + } + xf_emit(ctx, 1, 0); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x10); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 3, 0); + else + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0x804); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x1a); + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0x7f); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x80c14); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 6, 0); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); + else + xf_emit(ctx, 1, 0x7ff); + xf_emit(ctx, 1, 0x80c14); + xf_emit(ctx, 0x38, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 0x38, 0); + xf_emit(ctx, 2, 0x88); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 0x16, 0); + xf_emit(ctx, 1, 0x26); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x3f800000); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 4, 0); + else + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 1, 0x10); + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 0x28, 0); + else + xf_emit(ctx, 0x25, 0); + xf_emit(ctx, 1, 0x52); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x26); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x00ffff00); + xf_emit(ctx, 1, 0); +} + +static void +nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */ + xf_emit(ctx, 1, 0x3f); + xf_emit(ctx, 0xa, 0); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 2, 0x04000000); + xf_emit(ctx, 8, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 4); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 0x10, 0); + else + xf_emit(ctx, 0x11, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x1001); + xf_emit(ctx, 4, 0xffff); + xf_emit(ctx, 0x20, 0); + xf_emit(ctx, 0x10, 0x3f800000); + xf_emit(ctx, 1, 0x10); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0); + else + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 3); + xf_emit(ctx, 2, 0); +} + +static void +nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx) +{ + /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */ + xf_emit(ctx, 2, 0x04000000); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 1, 0); +} + +static void +nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */ + xf_emit(ctx, 2, 4); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0x1c4d, 0); + else + xf_emit(ctx, 0x1c4b, 0); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0x8100c12); + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 3); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x80c14); + xf_emit(ctx, 1, 1); + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0x80c14); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 1, 0x27); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x3c1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x16, 0); + xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 1, 0); +} + +static void +nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */ + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0xf); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 8, 0); + else + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0x20); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0x11, 0); + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 0xf, 0); + else + xf_emit(ctx, 0xe, 0); + xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 0xd, 0); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 8); + xf_emit(ctx, 1, 0); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); + else + xf_emit(ctx, 1, 0x7ff); + if (dev_priv->chipset == 0xa8) + xf_emit(ctx, 1, 0x1e00); + xf_emit(ctx, 0xc, 0); + xf_emit(ctx, 1, 0xf); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 0x125, 0); + else if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x126, 0); + else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) + xf_emit(ctx, 0x124, 0); + else + xf_emit(ctx, 0x1f7, 0); + xf_emit(ctx, 1, 0xf); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 3, 0); + else + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0xa1, 0); + else + xf_emit(ctx, 0x5a, 0); + xf_emit(ctx, 1, 0xf); + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x834, 0); + else if (dev_priv->chipset == 0xa0) + xf_emit(ctx, 0x1873, 0); + else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0x8ba, 0); + else + xf_emit(ctx, 0x833, 0); + xf_emit(ctx, 1, 0xf); + xf_emit(ctx, 0xf, 0); +} + +static void +nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */ + xf_emit(ctx, 2, 0); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 2, 1); + else + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0x100); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 8); + xf_emit(ctx, 5, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 3, 1); + xf_emit(ctx, 1, 0xcf); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 6, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 3, 1); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x15); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x4444480); + xf_emit(ctx, 0x37, 0); +} + +static void +nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx) +{ + /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */ + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0x100); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x10001); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x10001); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x10001); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 2); +} + +static void +nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */ + xf_emit(ctx, 1, 0x3f800000); + xf_emit(ctx, 6, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0x1a); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x12, 0); + xf_emit(ctx, 1, 0x00ffff00); + xf_emit(ctx, 6, 0); + xf_emit(ctx, 1, 0xf); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 0xf, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 2, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 3); + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 2, 0x04000000); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 5); + xf_emit(ctx, 1, 0x52); + if (dev_priv->chipset == 0x50) { + xf_emit(ctx, 0x13, 0); + } else { + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 1); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0x11, 0); + else + xf_emit(ctx, 0x10, 0); + } + xf_emit(ctx, 0x10, 0x3f800000); + xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 0x26, 0); + xf_emit(ctx, 1, 0x8100c12); + xf_emit(ctx, 1, 5); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 4, 0xffff); + if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 3); + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x1f, 0); + else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0xc, 0); + else + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x00ffff00); + xf_emit(ctx, 1, 0x1a); + if (dev_priv->chipset != 0x50) { + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 3); + } + if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0x26, 0); + else + xf_emit(ctx, 0x3c, 0); + xf_emit(ctx, 1, 0x102); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 4, 4); + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 8, 0); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0x3ff); + else + xf_emit(ctx, 1, 0x7ff); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x102); + xf_emit(ctx, 9, 0); + xf_emit(ctx, 4, 4); + xf_emit(ctx, 0x2c, 0); +} + +static void +nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int magic2; + if (dev_priv->chipset == 0x50) { + magic2 = 0x00003e60; + } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + magic2 = 0x001ffe67; + } else { + magic2 = 0x00087e67; + } + xf_emit(ctx, 8, 0); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, magic2); + xf_emit(ctx, 4, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 7, 0); + if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 0x15); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 4, 0); + if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0x400); + xf_emit(ctx, 1, 0x300); + xf_emit(ctx, 1, 0x1001); + if (dev_priv->chipset != 0xa0) { + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 0); + else + xf_emit(ctx, 1, 0x15); + } + xf_emit(ctx, 3, 0); + } + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 8, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x13, 0); + xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 0x10, 0); + xf_emit(ctx, 0x10, 0x3f800000); + xf_emit(ctx, 0x19, 0); + xf_emit(ctx, 1, 0x10); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x3f); + xf_emit(ctx, 6, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x1001); + xf_emit(ctx, 0xb, 0); + } else { + xf_emit(ctx, 0xc, 0); + } + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0xf); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x11); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 4, 0); + else + xf_emit(ctx, 6, 0); + xf_emit(ctx, 3, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, magic2); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x0fac6881); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 1, 0); + xf_emit(ctx, 0x18, 1); + xf_emit(ctx, 8, 2); + xf_emit(ctx, 8, 1); + xf_emit(ctx, 8, 2); + xf_emit(ctx, 8, 1); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 5, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x16, 0); + } else { + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 0x1b, 0); + else + xf_emit(ctx, 0x15, 0); + } + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 2, 1); + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 4, 0); + else + xf_emit(ctx, 3, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 0x10, 1); + xf_emit(ctx, 8, 2); + xf_emit(ctx, 0x10, 1); + xf_emit(ctx, 8, 2); + xf_emit(ctx, 8, 1); + xf_emit(ctx, 3, 0); + } + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x5b, 0); +} + +static void +nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int magic3; + if (dev_priv->chipset == 0x50) + magic3 = 0x1000; + else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) + magic3 = 0x1e00; + else + magic3 = 0; + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 4); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0x24, 0); + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 0x14, 0); + else + xf_emit(ctx, 0x15, 0); + xf_emit(ctx, 2, 4); + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0x03020100); + else + xf_emit(ctx, 1, 0x00608080); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 2, 4); + xf_emit(ctx, 1, 0x80); + if (magic3) + xf_emit(ctx, 1, magic3); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 0x24, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0x03020100); + xf_emit(ctx, 1, 3); + if (magic3) + xf_emit(ctx, 1, magic3); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 3); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 4); + if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) + xf_emit(ctx, 0x1024, 0); + else if (dev_priv->chipset < 0xa0) + xf_emit(ctx, 0xa24, 0); + else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) + xf_emit(ctx, 0x214, 0); + else + xf_emit(ctx, 0x414, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 3); + xf_emit(ctx, 2, 0); +} + +static void +nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int magic1, magic2; + if (dev_priv->chipset == 0x50) { + magic1 = 0x3ff; + magic2 = 0x00003e60; + } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) { + magic1 = 0x7ff; + magic2 = 0x001ffe67; + } else { + magic1 = 0x7ff; + magic2 = 0x00087e67; + } + xf_emit(ctx, 3, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0xc, 0); + xf_emit(ctx, 1, 0xf); + xf_emit(ctx, 0xb, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 4, 0xffff); + xf_emit(ctx, 8, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 5, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 1, 3); + xf_emit(ctx, 1, 0); + } else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0xa, 0); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 1, 2); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 1, 0); + xf_emit(ctx, 0x18, 1); + xf_emit(ctx, 8, 2); + xf_emit(ctx, 8, 1); + xf_emit(ctx, 8, 2); + xf_emit(ctx, 8, 1); + xf_emit(ctx, 1, 0); + } + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 3, 0xcf); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0xa, 0); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 8, 1); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 1, 0xf); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, magic2); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x11); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 2, 1); + else + xf_emit(ctx, 1, 1); + if(dev_priv->chipset == 0x50) + xf_emit(ctx, 1, 0); + else + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 5, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, magic1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x28, 0); + xf_emit(ctx, 8, 8); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 8, 0x400); + xf_emit(ctx, 8, 0x300); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0xf); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x20); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 0x100); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x40); + xf_emit(ctx, 1, 0x100); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 3); + xf_emit(ctx, 4, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, magic2); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 9, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x400); + xf_emit(ctx, 1, 0x300); + xf_emit(ctx, 1, 0x1001); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 4, 0); + else + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 1, 0xf); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 0x15, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 3, 0); + } else + xf_emit(ctx, 0x17, 0); + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0x0fac6881); + xf_emit(ctx, 1, magic2); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 3, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 2, 1); + else + xf_emit(ctx, 1, 1); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 2, 0); + else if (dev_priv->chipset != 0x50) + xf_emit(ctx, 1, 0); +} + +static void +nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 2, 0); + else + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0x2a712488); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x4085c000); + xf_emit(ctx, 1, 0x40); + xf_emit(ctx, 1, 0x100); + xf_emit(ctx, 1, 0x10100); + xf_emit(ctx, 1, 0x02800000); +} + +static void +nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 2, 0x04e3bfdf); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x00ffff00); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 2, 1); + else + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 0x00ffff00); + xf_emit(ctx, 8, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0x30201000); + xf_emit(ctx, 1, 0x70605040); + xf_emit(ctx, 1, 0xb8a89888); + xf_emit(ctx, 1, 0xf8e8d8c8); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x1a); +} + +static void +nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 0xfac6881); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 2, 0); + xf_emit(ctx, 1, 1); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 0xb, 0); + else + xf_emit(ctx, 0xa, 0); + xf_emit(ctx, 8, 1); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0xfac6881); + xf_emit(ctx, 1, 0xf); + xf_emit(ctx, 7, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 1); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 6, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 6, 0); + } else { + xf_emit(ctx, 0xb, 0); + } +} + +static void +nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + if (dev_priv->chipset < 0xa0) { + nv50_graph_construct_xfer_tp_x1(ctx); + nv50_graph_construct_xfer_tp_x2(ctx); + nv50_graph_construct_xfer_tp_x3(ctx); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 0xf, 0); + else + xf_emit(ctx, 0x12, 0); + nv50_graph_construct_xfer_tp_x4(ctx); + } else { + nv50_graph_construct_xfer_tp_x3(ctx); + if (dev_priv->chipset < 0xaa) + xf_emit(ctx, 0xc, 0); + else + xf_emit(ctx, 0xa, 0); + nv50_graph_construct_xfer_tp_x2(ctx); + nv50_graph_construct_xfer_tp_x5(ctx); + nv50_graph_construct_xfer_tp_x4(ctx); + nv50_graph_construct_xfer_tp_x1(ctx); + } +} + +static void +nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i, mpcnt; + if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) + mpcnt = 1; + else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8) + mpcnt = 2; + else + mpcnt = 3; + for (i = 0; i < mpcnt; i++) { + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x80); + xf_emit(ctx, 1, 0x80007004); + xf_emit(ctx, 1, 0x04000400); + if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 1, 0xc0); + xf_emit(ctx, 1, 0x1000); + xf_emit(ctx, 2, 0); + if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) { + xf_emit(ctx, 1, 0xe00); + xf_emit(ctx, 1, 0x1e00); + } + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 2, 0x1000); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 2); + if (dev_priv->chipset >= 0xaa) + xf_emit(ctx, 0xb, 0); + else if (dev_priv->chipset >= 0xa0) + xf_emit(ctx, 0xc, 0); + else + xf_emit(ctx, 0xa, 0); + } + xf_emit(ctx, 1, 0x08100c12); + xf_emit(ctx, 1, 0); + if (dev_priv->chipset >= 0xa0) { + xf_emit(ctx, 1, 0x1fe21); + } + xf_emit(ctx, 5, 0); + xf_emit(ctx, 4, 0xffff); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 2, 0x10001); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 0x1fe21); + xf_emit(ctx, 1, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 1); + xf_emit(ctx, 4, 0); + xf_emit(ctx, 1, 0x08100c12); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 8, 0); + xf_emit(ctx, 1, 0xfac6881); + xf_emit(ctx, 1, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) + xf_emit(ctx, 1, 3); + xf_emit(ctx, 3, 0); + xf_emit(ctx, 1, 4); + xf_emit(ctx, 9, 0); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 2, 1); + xf_emit(ctx, 1, 2); + xf_emit(ctx, 3, 1); + xf_emit(ctx, 1, 0); + if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) { + xf_emit(ctx, 8, 2); + xf_emit(ctx, 0x10, 1); + xf_emit(ctx, 8, 2); + xf_emit(ctx, 0x18, 1); + xf_emit(ctx, 3, 0); + } + xf_emit(ctx, 1, 4); + if (dev_priv->chipset == 0x50) + xf_emit(ctx, 0x3a0, 0); + else if (dev_priv->chipset < 0x94) + xf_emit(ctx, 0x3a2, 0); + else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) + xf_emit(ctx, 0x39f, 0); + else + xf_emit(ctx, 0x3a3, 0); + xf_emit(ctx, 1, 0x11); + xf_emit(ctx, 1, 0); + xf_emit(ctx, 1, 1); + xf_emit(ctx, 0x2d, 0); +} + +static void +nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) +{ + struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; + int i; + uint32_t offset; + uint32_t units = nv_rd32 (ctx->dev, 0x1540); + int size = 0; + + offset = (ctx->ctxvals_pos+0x3f)&~0x3f; + + if (dev_priv->chipset < 0xa0) { + for (i = 0; i < 8; i++) { + ctx->ctxvals_pos = offset + i; + if (i == 0) + xf_emit(ctx, 1, 0x08100c12); + if (units & (1 << i)) + nv50_graph_construct_xfer_tp2(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + } else { + /* Strand 0: TPs 0, 1 */ + ctx->ctxvals_pos = offset; + xf_emit(ctx, 1, 0x08100c12); + if (units & (1 << 0)) + nv50_graph_construct_xfer_tp2(ctx); + if (units & (1 << 1)) + nv50_graph_construct_xfer_tp2(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 0: TPs 2, 3 */ + ctx->ctxvals_pos = offset + 1; + if (units & (1 << 2)) + nv50_graph_construct_xfer_tp2(ctx); + if (units & (1 << 3)) + nv50_graph_construct_xfer_tp2(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 0: TPs 4, 5, 6 */ + ctx->ctxvals_pos = offset + 2; + if (units & (1 << 4)) + nv50_graph_construct_xfer_tp2(ctx); + if (units & (1 << 5)) + nv50_graph_construct_xfer_tp2(ctx); + if (units & (1 << 6)) + nv50_graph_construct_xfer_tp2(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + + /* Strand 0: TPs 7, 8, 9 */ + ctx->ctxvals_pos = offset + 3; + if (units & (1 << 7)) + nv50_graph_construct_xfer_tp2(ctx); + if (units & (1 << 8)) + nv50_graph_construct_xfer_tp2(ctx); + if (units & (1 << 9)) + nv50_graph_construct_xfer_tp2(ctx); + if ((ctx->ctxvals_pos-offset)/8 > size) + size = (ctx->ctxvals_pos-offset)/8; + } + ctx->ctxvals_pos = offset + size * 8; + ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; + cp_lsr (ctx, offset); + cp_out (ctx, CP_SET_XFER_POINTER); + cp_lsr (ctx, size); + cp_out (ctx, CP_SEEK_2); + cp_out (ctx, CP_XFER_2); + cp_wait(ctx, XFER, BUSY); +} diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index f0dc4e36ef0..de1f5b0062c 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c @@ -390,7 +390,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, if (gpuobj->im_backing) return -EINVAL; - *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1); + *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE); if (*sz == 0) return -EINVAL; diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 1cc7b937b1e..ed38262d998 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -30,6 +30,9 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable $(call if_changed,mkregtable) +$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable + $(call if_changed,mkregtable) + $(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h $(obj)/r200.o: $(obj)/r200_reg_safe.h @@ -42,6 +45,8 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h $(obj)/rs600.o: $(obj)/rs600_reg_safe.h +$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h + radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \ radeon_irq.o r300_cmdbuf.o r600_cp.o # add KMS driver @@ -54,8 +59,10 @@ radeon-y += radeon_device.o radeon_kms.o \ radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ - r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o + r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \ + evergreen.o radeon-$(CONFIG_COMPAT) += radeon_ioc32.o +radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o obj-$(CONFIG_DRM_RADEON)+= radeon.o diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 7f152f66f19..d75788feac6 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; - attr &= 0x38; - attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src(ctx, attr, ptr); @@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) uint8_t attr = U8((*ptr)++), shift; uint32_t saved, dst; int dptr = *ptr; - attr &= 0x38; - attr |= atom_def_dst[attr >> 3] << 6; SDEBUG(" dst: "); dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); shift = atom_get_src(ctx, attr, ptr); diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 91ad0d1c1b1..6732b5dd8ff 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h @@ -1,5 +1,5 @@ /* - * Copyright 2006-2007 Advanced Micro Devices, Inc. + * Copyright 2006-2007 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -20,10 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -/****************************************************************************/ + +/****************************************************************************/ /*Portion I: Definitions shared between VBIOS and Driver */ /****************************************************************************/ + #ifndef _ATOMBIOS_H #define _ATOMBIOS_H @@ -40,39 +42,46 @@ #endif #ifdef _H2INC -#ifndef ULONG -typedef unsigned long ULONG; -#endif + #ifndef ULONG + typedef unsigned long ULONG; + #endif -#ifndef UCHAR -typedef unsigned char UCHAR; -#endif + #ifndef UCHAR + typedef unsigned char UCHAR; + #endif -#ifndef USHORT -typedef unsigned short USHORT; -#endif + #ifndef USHORT + typedef unsigned short USHORT; + #endif #endif - -#define ATOM_DAC_A 0 + +#define ATOM_DAC_A 0 #define ATOM_DAC_B 1 #define ATOM_EXT_DAC 2 #define ATOM_CRTC1 0 #define ATOM_CRTC2 1 +#define ATOM_CRTC3 2 +#define ATOM_CRTC4 3 +#define ATOM_CRTC5 4 +#define ATOM_CRTC6 5 +#define ATOM_CRTC_INVALID 0xFF #define ATOM_DIGA 0 #define ATOM_DIGB 1 #define ATOM_PPLL1 0 #define ATOM_PPLL2 1 +#define ATOM_DCPLL 2 +#define ATOM_PPLL_INVALID 0xFF #define ATOM_SCALER1 0 #define ATOM_SCALER2 1 -#define ATOM_SCALER_DISABLE 0 -#define ATOM_SCALER_CENTER 1 -#define ATOM_SCALER_EXPANSION 2 -#define ATOM_SCALER_MULTI_EX 3 +#define ATOM_SCALER_DISABLE 0 +#define ATOM_SCALER_CENTER 1 +#define ATOM_SCALER_EXPANSION 2 +#define ATOM_SCALER_MULTI_EX 3 #define ATOM_DISABLE 0 #define ATOM_ENABLE 1 @@ -82,6 +91,7 @@ typedef unsigned short USHORT; #define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5) #define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5) #define ATOM_ENCODER_INIT (ATOM_DISABLE+7) +#define ATOM_GET_STATUS (ATOM_DISABLE+8) #define ATOM_BLANKING 1 #define ATOM_BLANKING_OFF 0 @@ -114,7 +124,7 @@ typedef unsigned short USHORT; #define ATOM_DAC2_CV ATOM_DAC1_CV #define ATOM_DAC2_NTSC ATOM_DAC1_NTSC #define ATOM_DAC2_PAL ATOM_DAC1_PAL - + #define ATOM_PM_ON 0 #define ATOM_PM_STANDBY 1 #define ATOM_PM_SUSPEND 2 @@ -134,6 +144,7 @@ typedef unsigned short USHORT; #define ATOM_PANEL_MISC_TEMPORAL 0x00000040 #define ATOM_PANEL_MISC_API_ENABLED 0x00000080 + #define MEMTYPE_DDR1 "DDR1" #define MEMTYPE_DDR2 "DDR2" #define MEMTYPE_DDR3 "DDR3" @@ -145,19 +156,19 @@ typedef unsigned short USHORT; /* Maximum size of that FireGL flag string */ -#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */ -#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */ +#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support +#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING ) -#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */ -#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING +#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop +#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING -#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */ -#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */ +#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support +#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING ) #define HW_ASSISTED_I2C_STATUS_FAILURE 2 #define HW_ASSISTED_I2C_STATUS_SUCCESS 1 -#pragma pack(1) /* BIOS data must use byte aligment */ +#pragma pack(1) /* BIOS data must use byte aligment */ /* Define offset to location of ROM header. */ @@ -165,367 +176,410 @@ typedef unsigned short USHORT; #define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L #define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94 -#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ +#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */ #define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f #define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e /* Common header for all ROM Data tables. - Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. + Every table pointed _ATOM_MASTER_DATA_TABLE has this common header. And the pointer actually points to this header. */ -typedef struct _ATOM_COMMON_TABLE_HEADER { - USHORT usStructureSize; - UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ - UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ - /*Image can't be updated, while Driver needs to carry the new table! */ -} ATOM_COMMON_TABLE_HEADER; - -typedef struct _ATOM_ROM_HEADER { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, - atombios should init it as "ATOM", don't change the position */ - USHORT usBiosRuntimeSegmentAddress; - USHORT usProtectedModeInfoOffset; - USHORT usConfigFilenameOffset; - USHORT usCRC_BlockOffset; - USHORT usBIOS_BootupMessageOffset; - USHORT usInt10Offset; - USHORT usPciBusDevInitCode; - USHORT usIoBaseAddress; - USHORT usSubsystemVendorID; - USHORT usSubsystemID; - USHORT usPCI_InfoOffset; - USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ - USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ - UCHAR ucExtendedFunctionCode; - UCHAR ucReserved; -} ATOM_ROM_HEADER; +typedef struct _ATOM_COMMON_TABLE_HEADER +{ + USHORT usStructureSize; + UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */ + UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */ + /*Image can't be updated, while Driver needs to carry the new table! */ +}ATOM_COMMON_TABLE_HEADER; + +typedef struct _ATOM_ROM_HEADER +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios, + atombios should init it as "ATOM", don't change the position */ + USHORT usBiosRuntimeSegmentAddress; + USHORT usProtectedModeInfoOffset; + USHORT usConfigFilenameOffset; + USHORT usCRC_BlockOffset; + USHORT usBIOS_BootupMessageOffset; + USHORT usInt10Offset; + USHORT usPciBusDevInitCode; + USHORT usIoBaseAddress; + USHORT usSubsystemVendorID; + USHORT usSubsystemID; + USHORT usPCI_InfoOffset; + USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */ + USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */ + UCHAR ucExtendedFunctionCode; + UCHAR ucReserved; +}ATOM_ROM_HEADER; /*==============================Command Table Portion==================================== */ #ifdef UEFI_BUILD -#define UTEMP USHORT -#define USHORT void* + #define UTEMP USHORT + #define USHORT void* #endif -typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES { - USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */ - USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */ - USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ - USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */ - USHORT DIGxEncoderControl; /* Only used by Bios */ - USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ - USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */ - USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */ - USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */ - USHORT GPIOPinControl; /* Atomic Table, only used by Bios */ - USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */ - USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */ - USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */ - USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ - USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ - USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ - USHORT MemoryPLLInit; - USHORT AdjustDisplayPll; /* only used by Bios */ - USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ - USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */ - USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */ - USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */ - USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */ - USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT GetConditionalGoldenSetting; /* only used by Bios */ - USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */ - USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ - USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */ - USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT EnableScaler; /* Atomic Table, used only by Bios */ - USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */ - USHORT EnableVGA_Access; /* Obsolete , only used by Bios */ - USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */ - USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */ - USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */ - USHORT UpdateCRTC_DoubleBufferRegisters; - USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */ - USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */ - USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */ - USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */ - USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */ - USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */ - USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */ - USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */ - USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */ - USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ - USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */ - USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */ - USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */ - USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ - USHORT MemoryTraining; /* Atomic Table, used only by Bios */ - USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */ - USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */ - USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */ - USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */ - USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */ - USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */ - USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */ - USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ - USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ - USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ - USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */ - USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */ - USHORT DPEncoderService; /* Function Table,only used by Bios */ -} ATOM_MASTER_LIST_OF_COMMAND_TABLES; - -/* For backward compatible */ +typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{ + USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1 + USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON + USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios + USHORT DIGxEncoderControl; //Only used by Bios + USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1 + USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed + USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2 + USHORT GPIOPinControl; //Atomic Table, only used by Bios + USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1 + USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1 + USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2 + USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT MemoryPLLInit; + USHORT AdjustDisplayPll; //only used by Bios + USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios + USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios + USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2 + USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3 + USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead + USHORT GetConditionalGoldenSetting; //only used by Bios + USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1 + USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3 + USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3 + USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead + USHORT EnableScaler; //Atomic Table, used only by Bios + USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1 + USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios + USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1 + USHORT SetCRTC_Replication; //Atomic Table, used only by Bios + USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios + USHORT UpdateCRTC_DoubleBufferRegisters; + USHORT LUT_AutoFill; //Atomic Table, only used by Bios + USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios + USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1 + USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios + USHORT MemoryCleanUp; //Atomic Table, only used by Bios + USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios + USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components + USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components + USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init + USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock + USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock + USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios + USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT MemoryTraining; //Atomic Table, used only by Bios + USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2 + USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1 + USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1 + USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" + USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init + USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock + USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender + USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1 + USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios + USHORT DPEncoderService; //Function Table,only used by Bios +}ATOM_MASTER_LIST_OF_COMMAND_TABLES; + +// For backward compatible #define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction #define UNIPHYTransmitterControl DIG1TransmitterControl #define LVTMATransmitterControl DIG2TransmitterControl #define SetCRTC_DPM_State GetConditionalGoldenSetting #define SetUniphyInstance ASIC_StaticPwrMgtStatusChange +#define HPDInterruptService ReadHWAssistedI2CStatus +#define EnableVGA_Access GetSCLKOverMCLKRatio -typedef struct _ATOM_MASTER_COMMAND_TABLE { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; -} ATOM_MASTER_COMMAND_TABLE; - -/****************************************************************************/ -/* Structures used in every command table */ -/****************************************************************************/ -typedef struct _ATOM_TABLE_ATTRIBUTE { +typedef struct _ATOM_MASTER_COMMAND_TABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables; +}ATOM_MASTER_COMMAND_TABLE; + +/****************************************************************************/ +// Structures used in every command table +/****************************************************************************/ +typedef struct _ATOM_TABLE_ATTRIBUTE +{ #if ATOM_BIG_ENDIAN - USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ - USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ - USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ + USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag + USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), + USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), #else - USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */ - USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */ - USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */ + USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword), + USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword), + USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag #endif -} ATOM_TABLE_ATTRIBUTE; - -typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS { - ATOM_TABLE_ATTRIBUTE sbfAccess; - USHORT susAccess; -} ATOM_TABLE_ATTRIBUTE_ACCESS; +}ATOM_TABLE_ATTRIBUTE; -/****************************************************************************/ -/* Common header for all command tables. */ -/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */ -/* And the pointer actually points to this header. */ -/****************************************************************************/ -typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER { - ATOM_COMMON_TABLE_HEADER CommonHeader; - ATOM_TABLE_ATTRIBUTE TableAttribute; -} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; +typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS +{ + ATOM_TABLE_ATTRIBUTE sbfAccess; + USHORT susAccess; +}ATOM_TABLE_ATTRIBUTE_ACCESS; + +/****************************************************************************/ +// Common header for all command tables. +// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. +// And the pointer actually points to this header. +/****************************************************************************/ +typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER +{ + ATOM_COMMON_TABLE_HEADER CommonHeader; + ATOM_TABLE_ATTRIBUTE TableAttribute; +}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER; -/****************************************************************************/ -/* Structures used by ComputeMemoryEnginePLLTable */ -/****************************************************************************/ +/****************************************************************************/ +// Structures used by ComputeMemoryEnginePLLTable +/****************************************************************************/ #define COMPUTE_MEMORY_PLL_PARAM 1 #define COMPUTE_ENGINE_PLL_PARAM 2 -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS { - ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */ - UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */ - UCHAR ucReserved; /* may expand to return larger Fbdiv later */ - UCHAR ucFbDiv; /* return value */ - UCHAR ucPostDiv; /* return value */ -} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; - -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 { - ULONG ulClock; /* When return, [23:0] return real clock */ - UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */ - USHORT usFbDiv; /* return Feedback value to be written to register */ - UCHAR ucPostDiv; /* return post div to be written to register */ -} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS +{ + ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div + UCHAR ucAction; //0:reserved //1:Memory //2:Engine + UCHAR ucReserved; //may expand to return larger Fbdiv later + UCHAR ucFbDiv; //return value + UCHAR ucPostDiv; //return value +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS; + +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 +{ + ULONG ulClock; //When return, [23:0] return real clock + UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register + USHORT usFbDiv; //return Feedback value to be written to register + UCHAR ucPostDiv; //return post div to be written to register +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2; #define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS -#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */ -#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ -#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ -#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ -#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ -#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ + +#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value +#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) +#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition +#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change +#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup +#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL #define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK -#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */ -#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */ -#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */ -#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */ -#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */ +#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) +#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition +#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change +#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup +#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL -typedef struct _ATOM_COMPUTE_CLOCK_FREQ { +typedef struct _ATOM_COMPUTE_CLOCK_FREQ +{ #if ATOM_BIG_ENDIAN - ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ - ULONG ulClockFreq:24; /* in unit of 10kHz */ + ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM + ULONG ulClockFreq:24; // in unit of 10kHz #else - ULONG ulClockFreq:24; /* in unit of 10kHz */ - ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */ + ULONG ulClockFreq:24; // in unit of 10kHz + ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM #endif -} ATOM_COMPUTE_CLOCK_FREQ; - -typedef struct _ATOM_S_MPLL_FB_DIVIDER { - USHORT usFbDivFrac; - USHORT usFbDiv; -} ATOM_S_MPLL_FB_DIVIDER; +}ATOM_COMPUTE_CLOCK_FREQ; -typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 { - union { - ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */ - ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */ - }; - UCHAR ucRefDiv; /* Output Parameter */ - UCHAR ucPostDiv; /* Output Parameter */ - UCHAR ucCntlFlag; /* Output Parameter */ - UCHAR ucReserved; -} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3; +typedef struct _ATOM_S_MPLL_FB_DIVIDER +{ + USHORT usFbDivFrac; + USHORT usFbDiv; +}ATOM_S_MPLL_FB_DIVIDER; -/* ucCntlFlag */ +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 +{ + union + { + ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter + ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter + }; + UCHAR ucRefDiv; //Output Parameter + UCHAR ucPostDiv; //Output Parameter + UCHAR ucCntlFlag; //Output Parameter + UCHAR ucReserved; +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3; + +// ucCntlFlag #define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1 #define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2 #define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4 +#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8 -typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER { - ATOM_COMPUTE_CLOCK_FREQ ulClock; - ULONG ulReserved[2]; -} DYNAMICE_MEMORY_SETTINGS_PARAMETER; - -typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER { - ATOM_COMPUTE_CLOCK_FREQ ulClock; - ULONG ulMemoryClock; - ULONG ulReserved; -} DYNAMICE_ENGINE_SETTINGS_PARAMETER; - -/****************************************************************************/ -/* Structures used by SetEngineClockTable */ -/****************************************************************************/ -typedef struct _SET_ENGINE_CLOCK_PARAMETERS { - ULONG ulTargetEngineClock; /* In 10Khz unit */ -} SET_ENGINE_CLOCK_PARAMETERS; -typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION { - ULONG ulTargetEngineClock; /* In 10Khz unit */ - COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; -} SET_ENGINE_CLOCK_PS_ALLOCATION; +// V4 are only used for APU which PLL outside GPU +typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 +{ +#if ATOM_BIG_ENDIAN + ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly + ULONG ulClock:24; //Input= target clock, output = actual clock +#else + ULONG ulClock:24; //Input= target clock, output = actual clock + ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly +#endif +}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4; -/****************************************************************************/ -/* Structures used by SetMemoryClockTable */ -/****************************************************************************/ -typedef struct _SET_MEMORY_CLOCK_PARAMETERS { - ULONG ulTargetMemoryClock; /* In 10Khz unit */ -} SET_MEMORY_CLOCK_PARAMETERS; +typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER +{ + ATOM_COMPUTE_CLOCK_FREQ ulClock; + ULONG ulReserved[2]; +}DYNAMICE_MEMORY_SETTINGS_PARAMETER; -typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION { - ULONG ulTargetMemoryClock; /* In 10Khz unit */ - COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; -} SET_MEMORY_CLOCK_PS_ALLOCATION; +typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER +{ + ATOM_COMPUTE_CLOCK_FREQ ulClock; + ULONG ulMemoryClock; + ULONG ulReserved; +}DYNAMICE_ENGINE_SETTINGS_PARAMETER; + +/****************************************************************************/ +// Structures used by SetEngineClockTable +/****************************************************************************/ +typedef struct _SET_ENGINE_CLOCK_PARAMETERS +{ + ULONG ulTargetEngineClock; //In 10Khz unit +}SET_ENGINE_CLOCK_PARAMETERS; -/****************************************************************************/ -/* Structures used by ASIC_Init.ctb */ -/****************************************************************************/ -typedef struct _ASIC_INIT_PARAMETERS { - ULONG ulDefaultEngineClock; /* In 10Khz unit */ - ULONG ulDefaultMemoryClock; /* In 10Khz unit */ -} ASIC_INIT_PARAMETERS; +typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION +{ + ULONG ulTargetEngineClock; //In 10Khz unit + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; +}SET_ENGINE_CLOCK_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by SetMemoryClockTable +/****************************************************************************/ +typedef struct _SET_MEMORY_CLOCK_PARAMETERS +{ + ULONG ulTargetMemoryClock; //In 10Khz unit +}SET_MEMORY_CLOCK_PARAMETERS; -typedef struct _ASIC_INIT_PS_ALLOCATION { - ASIC_INIT_PARAMETERS sASICInitClocks; - SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */ -} ASIC_INIT_PS_ALLOCATION; +typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION +{ + ULONG ulTargetMemoryClock; //In 10Khz unit + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved; +}SET_MEMORY_CLOCK_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by ASIC_Init.ctb +/****************************************************************************/ +typedef struct _ASIC_INIT_PARAMETERS +{ + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit +}ASIC_INIT_PARAMETERS; -/****************************************************************************/ -/* Structure used by DynamicClockGatingTable.ctb */ -/****************************************************************************/ -typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS { - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ - UCHAR ucPadding[3]; -} DYNAMIC_CLOCK_GATING_PARAMETERS; +typedef struct _ASIC_INIT_PS_ALLOCATION +{ + ASIC_INIT_PARAMETERS sASICInitClocks; + SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure +}ASIC_INIT_PS_ALLOCATION; + +/****************************************************************************/ +// Structure used by DynamicClockGatingTable.ctb +/****************************************************************************/ +typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[3]; +}DYNAMIC_CLOCK_GATING_PARAMETERS; #define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS -/****************************************************************************/ -/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */ -/****************************************************************************/ -typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS { - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ - UCHAR ucPadding[3]; -} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; +/****************************************************************************/ +// Structure used by EnableASIC_StaticPwrMgtTable.ctb +/****************************************************************************/ +typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[3]; +}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS; #define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS -/****************************************************************************/ -/* Structures used by DAC_LoadDetectionTable.ctb */ -/****************************************************************************/ -typedef struct _DAC_LOAD_DETECTION_PARAMETERS { - USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */ - UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */ - UCHAR ucMisc; /* Valid only when table revision =1.3 and above */ -} DAC_LOAD_DETECTION_PARAMETERS; +/****************************************************************************/ +// Structures used by DAC_LoadDetectionTable.ctb +/****************************************************************************/ +typedef struct _DAC_LOAD_DETECTION_PARAMETERS +{ + USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} + UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} + UCHAR ucMisc; //Valid only when table revision =1.3 and above +}DAC_LOAD_DETECTION_PARAMETERS; -/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */ +// DAC_LOAD_DETECTION_PARAMETERS.ucMisc #define DAC_LOAD_MISC_YPrPb 0x01 -typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION { - DAC_LOAD_DETECTION_PARAMETERS sDacload; - ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */ -} DAC_LOAD_DETECTION_PS_ALLOCATION; - -/****************************************************************************/ -/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */ -/****************************************************************************/ -typedef struct _DAC_ENCODER_CONTROL_PARAMETERS { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */ - UCHAR ucAction; /* 0: turn off encoder */ - /* 1: setup and turn on encoder */ - /* 7: ATOM_ENCODER_INIT Initialize DAC */ -} DAC_ENCODER_CONTROL_PARAMETERS; +typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION +{ + DAC_LOAD_DETECTION_PARAMETERS sDacload; + ULONG Reserved[2];// Don't set this one, allocation for EXT DAC +}DAC_LOAD_DETECTION_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb +/****************************************************************************/ +typedef struct _DAC_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder + // 7: ATOM_ENCODER_INIT Initialize DAC +}DAC_ENCODER_CONTROL_PARAMETERS; #define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS -/****************************************************************************/ -/* Structures used by DIG1EncoderControlTable */ -/* DIG2EncoderControlTable */ -/* ExternalEncoderControlTable */ -/****************************************************************************/ -typedef struct _DIG_ENCODER_CONTROL_PARAMETERS { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - UCHAR ucConfig; - /* [2] Link Select: */ - /* =0: PHY linkA if bfLane<3 */ - /* =1: PHY linkB if bfLanes<3 */ - /* =0: PHY linkA+B if bfLanes=3 */ - /* [3] Transmitter Sel */ - /* =0: UNIPHY or PCIEPHY */ - /* =1: LVTMA */ - UCHAR ucAction; /* =0: turn off encoder */ - /* =1: turn on encoder */ - UCHAR ucEncoderMode; - /* =0: DP encoder */ - /* =1: LVDS encoder */ - /* =2: DVI encoder */ - /* =3: HDMI encoder */ - /* =4: SDVO encoder */ - UCHAR ucLaneNum; /* how many lanes to enable */ - UCHAR ucReserved[2]; -} DIG_ENCODER_CONTROL_PARAMETERS; +/****************************************************************************/ +// Structures used by DIG1EncoderControlTable +// DIG2EncoderControlTable +// ExternalEncoderControlTable +/****************************************************************************/ +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucConfig; + // [2] Link Select: + // =0: PHY linkA if bfLane<3 + // =1: PHY linkB if bfLanes<3 + // =0: PHY linkA+B if bfLanes=3 + // [3] Transmitter Sel + // =0: UNIPHY or PCIEPHY + // =1: LVTMA + UCHAR ucAction; // =0: turn off encoder + // =1: turn on encoder + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucReserved[2]; +}DIG_ENCODER_CONTROL_PARAMETERS; #define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS #define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS -/* ucConfig */ +//ucConfig #define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01 #define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00 #define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01 @@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS { #define ATOM_ENCODER_CONFIG_LVTMA 0x08 #define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00 #define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08 -#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */ -/* ucAction */ -/* ATOM_ENABLE: Enable Encoder */ -/* ATOM_DISABLE: Disable Encoder */ +#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0 +// ucAction +// ATOM_ENABLE: Enable Encoder +// ATOM_DISABLE: Disable Encoder -/* ucEncoderMode */ +//ucEncoderMode #define ATOM_ENCODER_MODE_DP 0 #define ATOM_ENCODER_MODE_LVDS 1 #define ATOM_ENCODER_MODE_DVI 2 #define ATOM_ENCODER_MODE_HDMI 3 #define ATOM_ENCODER_MODE_SDVO 4 +#define ATOM_ENCODER_MODE_DP_AUDIO 5 #define ATOM_ENCODER_MODE_TV 13 #define ATOM_ENCODER_MODE_CV 14 #define ATOM_ENCODER_MODE_CRT 15 -typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 { +typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 +{ #if ATOM_BIG_ENDIAN - UCHAR ucReserved1:2; - UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ - UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ - UCHAR ucReserved:1; - UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ + UCHAR ucReserved1:2; + UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF + UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F + UCHAR ucReserved:1; + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz #else - UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */ - UCHAR ucReserved:1; - UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */ - UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */ - UCHAR ucReserved1:2; + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz + UCHAR ucReserved:1; + UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F + UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF + UCHAR ucReserved1:2; #endif -} ATOM_DIG_ENCODER_CONFIG_V2; +}ATOM_DIG_ENCODER_CONFIG_V2; -typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - ATOM_DIG_ENCODER_CONFIG_V2 acConfig; - UCHAR ucAction; - UCHAR ucEncoderMode; - /* =0: DP encoder */ - /* =1: LVDS encoder */ - /* =2: DVI encoder */ - /* =3: HDMI encoder */ - /* =4: SDVO encoder */ - UCHAR ucLaneNum; /* how many lanes to enable */ - UCHAR ucReserved[2]; -} DIG_ENCODER_CONTROL_PARAMETERS_V2; -/* ucConfig */ +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + ATOM_DIG_ENCODER_CONFIG_V2 acConfig; + UCHAR ucAction; + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS + UCHAR ucReserved; +}DIG_ENCODER_CONTROL_PARAMETERS_V2; + +//ucConfig #define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01 #define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00 #define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01 @@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 { #define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08 #define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10 -/****************************************************************************/ -/* Structures used by UNIPHYTransmitterControlTable */ -/* LVTMATransmitterControlTable */ -/* DVOOutputControlTable */ -/****************************************************************************/ -typedef struct _ATOM_DP_VS_MODE { - UCHAR ucLaneSel; - UCHAR ucLaneSet; -} ATOM_DP_VS_MODE; - -typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { - union { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ - ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ +// ucAction: +// ATOM_DISABLE +// ATOM_ENABLE +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08 +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09 +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a +#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b +#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c +#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d +#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e +#define ATOM_ENCODER_CMD_SETUP 0x0f + +// ucStatus +#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10 +#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00 + +// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver +typedef struct _ATOM_DIG_ENCODER_CONFIG_V3 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucReserved1:1; + UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F + UCHAR ucReserved:3; + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz +#else + UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz + UCHAR ucReserved:3; + UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F + UCHAR ucReserved1:1; +#endif +}ATOM_DIG_ENCODER_CONFIG_V3; + +#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70 + + +typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + ATOM_DIG_ENCODER_CONFIG_V3 acConfig; + UCHAR ucAction; + UCHAR ucEncoderMode; + // =0: DP encoder + // =1: LVDS encoder + // =2: DVI encoder + // =3: HDMI encoder + // =4: SDVO encoder + // =5: DP audio + UCHAR ucLaneNum; // how many lanes to enable + UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP + UCHAR ucReserved; +}DIG_ENCODER_CONTROL_PARAMETERS_V3; + + +// define ucBitPerColor: +#define PANEL_BPC_UNDEFINE 0x00 +#define PANEL_6BIT_PER_COLOR 0x01 +#define PANEL_8BIT_PER_COLOR 0x02 +#define PANEL_10BIT_PER_COLOR 0x03 +#define PANEL_12BIT_PER_COLOR 0x04 +#define PANEL_16BIT_PER_COLOR 0x05 + +/****************************************************************************/ +// Structures used by UNIPHYTransmitterControlTable +// LVTMATransmitterControlTable +// DVOOutputControlTable +/****************************************************************************/ +typedef struct _ATOM_DP_VS_MODE +{ + UCHAR ucLaneSel; + UCHAR ucLaneSet; +}ATOM_DP_VS_MODE; + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE asMode; // DP Voltage swing mode }; - UCHAR ucConfig; - /* [0]=0: 4 lane Link, */ - /* =1: 8 lane Link ( Dual Links TMDS ) */ - /* [1]=0: InCoherent mode */ - /* =1: Coherent Mode */ - /* [2] Link Select: */ - /* =0: PHY linkA if bfLane<3 */ - /* =1: PHY linkB if bfLanes<3 */ - /* =0: PHY linkA+B if bfLanes=3 */ - /* [5:4]PCIE lane Sel */ - /* =0: lane 0~3 or 0~7 */ - /* =1: lane 4~7 */ - /* =2: lane 8~11 or 8~15 */ - /* =3: lane 12~15 */ - UCHAR ucAction; /* =0: turn off encoder */ - /* =1: turn on encoder */ - UCHAR ucReserved[4]; -} DIG_TRANSMITTER_CONTROL_PARAMETERS; - -#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS - -/* ucInitInfo */ -#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff - -/* ucConfig */ + UCHAR ucConfig; + // [0]=0: 4 lane Link, + // =1: 8 lane Link ( Dual Links TMDS ) + // [1]=0: InCoherent mode + // =1: Coherent Mode + // [2] Link Select: + // =0: PHY linkA if bfLane<3 + // =1: PHY linkB if bfLanes<3 + // =0: PHY linkA+B if bfLanes=3 + // [5:4]PCIE lane Sel + // =0: lane 0~3 or 0~7 + // =1: lane 4~7 + // =2: lane 8~11 or 8~15 + // =3: lane 12~15 + UCHAR ucAction; // =0: turn off encoder + // =1: turn on encoder + UCHAR ucReserved[4]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS; + +#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS + +//ucInitInfo +#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff + +//ucConfig #define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01 #define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02 #define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04 #define ATOM_TRANSMITTER_CONFIG_LINKA 0x00 #define ATOM_TRANSMITTER_CONFIG_LINKB 0x04 -#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 +#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00 #define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04 -#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ -#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ -#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */ +#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE +#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE +#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE #define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30 #define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00 @@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { #define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80 #define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0 -/* ucAction */ +//ucAction #define ATOM_TRANSMITTER_ACTION_DISABLE 0 #define ATOM_TRANSMITTER_ACTION_ENABLE 1 #define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2 @@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS { #define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9 #define ATOM_TRANSMITTER_ACTION_SETUP 10 #define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11 +#define ATOM_TRANSMITTER_ACTION_POWER_ON 12 +#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13 -/* Following are used for DigTransmitterControlTable ver1.2 */ -typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 { +// Following are used for DigTransmitterControlTable ver1.2 +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 +{ #if ATOM_BIG_ENDIAN - UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ - /* =1 Dig Transmitter 2 ( Uniphy CD ) */ - /* =2 Dig Transmitter 3 ( Uniphy EF ) */ - UCHAR ucReserved:1; - UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ - UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ - UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ - /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ - - UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ - UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) + UCHAR ucReserved:1; + UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector #else - UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */ - UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */ - UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */ - /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */ - UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */ - UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */ - UCHAR ucReserved:1; - UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */ - /* =1 Dig Transmitter 2 ( Uniphy CD ) */ - /* =2 Dig Transmitter 3 ( Uniphy EF ) */ + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) + UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector + UCHAR ucReserved:1; + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) #endif -} ATOM_DIG_TRANSMITTER_CONFIG_V2; +}ATOM_DIG_TRANSMITTER_CONFIG_V2; -/* ucConfig */ -/* Bit0 */ +//ucConfig +//Bit0 #define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01 -/* Bit1 */ +//Bit1 #define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02 -/* Bit2 */ +//Bit2 #define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04 -#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00 #define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04 -/* Bit3 */ +// Bit3 #define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08 -#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ -#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */ +#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP +#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP -/* Bit4 */ +// Bit4 #define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10 -/* Bit7:6 */ +// Bit7:6 #define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0 -#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */ -#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */ -#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */ - -typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 { - union { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */ - ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */ +#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB +#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD +#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF + +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE asMode; // DP Voltage swing mode }; - ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; - UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */ - UCHAR ucReserved[4]; -} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; + ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig; + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX + UCHAR ucReserved[4]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2; -/****************************************************************************/ -/* Structures used by DAC1OuputControlTable */ -/* DAC2OuputControlTable */ -/* LVTMAOutputControlTable (Before DEC30) */ -/* TMDSAOutputControlTable (Before DEC30) */ -/****************************************************************************/ -typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS { - UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */ - /* When the display is LCD, in addition to above: */ - /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */ - /* ATOM_LCD_SELFTEST_STOP */ +typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3 +{ +#if ATOM_BIG_ENDIAN + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector +#else + UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector + UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode ) + UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E + // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F + UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F + UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2 + UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) + // =1 Dig Transmitter 2 ( Uniphy CD ) + // =2 Dig Transmitter 3 ( Uniphy EF ) +#endif +}ATOM_DIG_TRANSMITTER_CONFIG_V3; - UCHAR aucPadding[3]; /* padding to DWORD aligned */ -} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; +typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 +{ + union + { + USHORT usPixelClock; // in 10KHz; for bios convenient + USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h + ATOM_DP_VS_MODE asMode; // DP Voltage swing mode + }; + ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig; + UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX + UCHAR ucLaneNum; + UCHAR ucReserved[3]; +}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3; + +//ucConfig +//Bit0 +#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01 + +//Bit1 +#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02 + +//Bit2 +#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04 +#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00 +#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04 + +// Bit3 +#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08 +#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00 +#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08 + +// Bit5:4 +#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30 +#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00 +#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10 +#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20 + +// Bit7:6 +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0 +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD +#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF + +/****************************************************************************/ +// Structures used by DAC1OuputControlTable +// DAC2OuputControlTable +// LVTMAOutputControlTable (Before DEC30) +// TMDSAOutputControlTable (Before DEC30) +/****************************************************************************/ +typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +{ + UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE + // When the display is LCD, in addition to above: + // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| + // ATOM_LCD_SELFTEST_STOP + + UCHAR aucPadding[3]; // padding to DWORD aligned +}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS; #define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS -#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS + +#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS #define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION -#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS +#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS #define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION #define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS @@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS { #define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION #define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS -/****************************************************************************/ -/* Structures used by BlankCRTCTable */ -/****************************************************************************/ -typedef struct _BLANK_CRTC_PARAMETERS { - UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */ - USHORT usBlackColorRCr; - USHORT usBlackColorGY; - USHORT usBlackColorBCb; -} BLANK_CRTC_PARAMETERS; +/****************************************************************************/ +// Structures used by BlankCRTCTable +/****************************************************************************/ +typedef struct _BLANK_CRTC_PARAMETERS +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF + USHORT usBlackColorRCr; + USHORT usBlackColorGY; + USHORT usBlackColorBCb; +}BLANK_CRTC_PARAMETERS; #define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS -/****************************************************************************/ -/* Structures used by EnableCRTCTable */ -/* EnableCRTCMemReqTable */ -/* UpdateCRTC_DoubleBufferRegistersTable */ -/****************************************************************************/ -typedef struct _ENABLE_CRTC_PARAMETERS { - UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ - UCHAR ucPadding[2]; -} ENABLE_CRTC_PARAMETERS; +/****************************************************************************/ +// Structures used by EnableCRTCTable +// EnableCRTCMemReqTable +// UpdateCRTC_DoubleBufferRegistersTable +/****************************************************************************/ +typedef struct _ENABLE_CRTC_PARAMETERS +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[2]; +}ENABLE_CRTC_PARAMETERS; #define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS -/****************************************************************************/ -/* Structures used by SetCRTC_OverScanTable */ -/****************************************************************************/ -typedef struct _SET_CRTC_OVERSCAN_PARAMETERS { - USHORT usOverscanRight; /* right */ - USHORT usOverscanLeft; /* left */ - USHORT usOverscanBottom; /* bottom */ - USHORT usOverscanTop; /* top */ - UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucPadding[3]; -} SET_CRTC_OVERSCAN_PARAMETERS; +/****************************************************************************/ +// Structures used by SetCRTC_OverScanTable +/****************************************************************************/ +typedef struct _SET_CRTC_OVERSCAN_PARAMETERS +{ + USHORT usOverscanRight; // right + USHORT usOverscanLeft; // left + USHORT usOverscanBottom; // bottom + USHORT usOverscanTop; // top + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucPadding[3]; +}SET_CRTC_OVERSCAN_PARAMETERS; #define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS -/****************************************************************************/ -/* Structures used by SetCRTC_ReplicationTable */ -/****************************************************************************/ -typedef struct _SET_CRTC_REPLICATION_PARAMETERS { - UCHAR ucH_Replication; /* horizontal replication */ - UCHAR ucV_Replication; /* vertical replication */ - UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucPadding; -} SET_CRTC_REPLICATION_PARAMETERS; +/****************************************************************************/ +// Structures used by SetCRTC_ReplicationTable +/****************************************************************************/ +typedef struct _SET_CRTC_REPLICATION_PARAMETERS +{ + UCHAR ucH_Replication; // horizontal replication + UCHAR ucV_Replication; // vertical replication + UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucPadding; +}SET_CRTC_REPLICATION_PARAMETERS; #define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS -/****************************************************************************/ -/* Structures used by SelectCRTC_SourceTable */ -/****************************************************************************/ -typedef struct _SELECT_CRTC_SOURCE_PARAMETERS { - UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */ - UCHAR ucPadding[2]; -} SELECT_CRTC_SOURCE_PARAMETERS; +/****************************************************************************/ +// Structures used by SelectCRTC_SourceTable +/****************************************************************************/ +typedef struct _SELECT_CRTC_SOURCE_PARAMETERS +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... + UCHAR ucPadding[2]; +}SELECT_CRTC_SOURCE_PARAMETERS; #define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS -typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 { - UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */ - UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */ - UCHAR ucPadding; -} SELECT_CRTC_SOURCE_PARAMETERS_V2; - -/* ucEncoderID */ -/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */ -/* #define ASIC_INT_TV_ENCODER_ID 0x02 */ -/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */ -/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */ -/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */ -/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */ -/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */ -/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */ - -/* ucEncodeMode */ -/* #define ATOM_ENCODER_MODE_DP 0 */ -/* #define ATOM_ENCODER_MODE_LVDS 1 */ -/* #define ATOM_ENCODER_MODE_DVI 2 */ -/* #define ATOM_ENCODER_MODE_HDMI 3 */ -/* #define ATOM_ENCODER_MODE_SDVO 4 */ -/* #define ATOM_ENCODER_MODE_TV 13 */ -/* #define ATOM_ENCODER_MODE_CV 14 */ -/* #define ATOM_ENCODER_MODE_CRT 15 */ - -/****************************************************************************/ -/* Structures used by SetPixelClockTable */ -/* GetPixelClockTable */ -/****************************************************************************/ -/* Major revision=1., Minor revision=1 */ -typedef struct _PIXEL_CLOCK_PARAMETERS { - USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ - /* 0 means disable PPLL */ - USHORT usRefDiv; /* Reference divider */ - USHORT usFbDiv; /* feedback divider */ - UCHAR ucPostDiv; /* post divider */ - UCHAR ucFracFbDiv; /* fractional feedback divider */ - UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ - UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ - UCHAR ucCRTC; /* Which CRTC uses this Ppll */ - UCHAR ucPadding; -} PIXEL_CLOCK_PARAMETERS; - -/* Major revision=1., Minor revision=2, add ucMiscIfno */ -/* ucMiscInfo: */ +typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 +{ + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO + UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO + UCHAR ucPadding; +}SELECT_CRTC_SOURCE_PARAMETERS_V2; + +//ucEncoderID +//#define ASIC_INT_DAC1_ENCODER_ID 0x00 +//#define ASIC_INT_TV_ENCODER_ID 0x02 +//#define ASIC_INT_DIG1_ENCODER_ID 0x03 +//#define ASIC_INT_DAC2_ENCODER_ID 0x04 +//#define ASIC_EXT_TV_ENCODER_ID 0x06 +//#define ASIC_INT_DVO_ENCODER_ID 0x07 +//#define ASIC_INT_DIG2_ENCODER_ID 0x09 +//#define ASIC_EXT_DIG_ENCODER_ID 0x05 + +//ucEncodeMode +//#define ATOM_ENCODER_MODE_DP 0 +//#define ATOM_ENCODER_MODE_LVDS 1 +//#define ATOM_ENCODER_MODE_DVI 2 +//#define ATOM_ENCODER_MODE_HDMI 3 +//#define ATOM_ENCODER_MODE_SDVO 4 +//#define ATOM_ENCODER_MODE_TV 13 +//#define ATOM_ENCODER_MODE_CV 14 +//#define ATOM_ENCODER_MODE_CRT 15 + +/****************************************************************************/ +// Structures used by SetPixelClockTable +// GetPixelClockTable +/****************************************************************************/ +//Major revision=1., Minor revision=1 +typedef struct _PIXEL_CLOCK_PARAMETERS +{ + USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) + // 0 means disable PPLL + USHORT usRefDiv; // Reference divider + USHORT usFbDiv; // feedback divider + UCHAR ucPostDiv; // post divider + UCHAR ucFracFbDiv; // fractional feedback divider + UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 + UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER + UCHAR ucCRTC; // Which CRTC uses this Ppll + UCHAR ucPadding; +}PIXEL_CLOCK_PARAMETERS; + +//Major revision=1., Minor revision=2, add ucMiscIfno +//ucMiscInfo: #define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1 #define MISC_DEVICE_INDEX_MASK 0xF0 #define MISC_DEVICE_INDEX_SHIFT 4 -typedef struct _PIXEL_CLOCK_PARAMETERS_V2 { - USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ - /* 0 means disable PPLL */ - USHORT usRefDiv; /* Reference divider */ - USHORT usFbDiv; /* feedback divider */ - UCHAR ucPostDiv; /* post divider */ - UCHAR ucFracFbDiv; /* fractional feedback divider */ - UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ - UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */ - UCHAR ucCRTC; /* Which CRTC uses this Ppll */ - UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */ -} PIXEL_CLOCK_PARAMETERS_V2; - -/* Major revision=1., Minor revision=3, structure/definition change */ -/* ucEncoderMode: */ -/* ATOM_ENCODER_MODE_DP */ -/* ATOM_ENOCDER_MODE_LVDS */ -/* ATOM_ENOCDER_MODE_DVI */ -/* ATOM_ENOCDER_MODE_HDMI */ -/* ATOM_ENOCDER_MODE_SDVO */ -/* ATOM_ENCODER_MODE_TV 13 */ -/* ATOM_ENCODER_MODE_CV 14 */ -/* ATOM_ENCODER_MODE_CRT 15 */ - -/* ucDVOConfig */ -/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */ -/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */ -/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */ -/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */ -/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */ -/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */ -/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */ - -/* ucMiscInfo: also changed, see below */ +typedef struct _PIXEL_CLOCK_PARAMETERS_V2 +{ + USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) + // 0 means disable PPLL + USHORT usRefDiv; // Reference divider + USHORT usFbDiv; // feedback divider + UCHAR ucPostDiv; // post divider + UCHAR ucFracFbDiv; // fractional feedback divider + UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 + UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER + UCHAR ucCRTC; // Which CRTC uses this Ppll + UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog +}PIXEL_CLOCK_PARAMETERS_V2; + +//Major revision=1., Minor revision=3, structure/definition change +//ucEncoderMode: +//ATOM_ENCODER_MODE_DP +//ATOM_ENOCDER_MODE_LVDS +//ATOM_ENOCDER_MODE_DVI +//ATOM_ENOCDER_MODE_HDMI +//ATOM_ENOCDER_MODE_SDVO +//ATOM_ENCODER_MODE_TV 13 +//ATOM_ENCODER_MODE_CV 14 +//ATOM_ENCODER_MODE_CRT 15 + +//ucDVOConfig +//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01 +//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 +//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 +//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c +//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00 +//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 +//#define DVO_ENCODER_CONFIG_24BIT 0x08 + +//ucMiscInfo: also changed, see below #define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01 #define PIXEL_CLOCK_MISC_VGA_MODE 0x02 #define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04 #define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00 #define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04 #define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08 +#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10 +// V1.4 for RoadRunner +#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10 +#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20 -typedef struct _PIXEL_CLOCK_PARAMETERS_V3 { - USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */ - /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */ - USHORT usRefDiv; /* Reference divider */ - USHORT usFbDiv; /* feedback divider */ - UCHAR ucPostDiv; /* post divider */ - UCHAR ucFracFbDiv; /* fractional feedback divider */ - UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */ - UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */ - union { - UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */ - UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */ +typedef struct _PIXEL_CLOCK_PARAMETERS_V3 +{ + USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) + // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. + USHORT usRefDiv; // Reference divider + USHORT usFbDiv; // feedback divider + UCHAR ucPostDiv; // post divider + UCHAR ucFracFbDiv; // fractional feedback divider + UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2 + UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h + union + { + UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ + UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit }; - UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */ - /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */ -} PIXEL_CLOCK_PARAMETERS_V3; + UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel + // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source + // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider +}PIXEL_CLOCK_PARAMETERS_V3; #define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2 #define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST -/****************************************************************************/ -/* Structures used by AdjustDisplayPllTable */ -/****************************************************************************/ -typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS { +typedef struct _PIXEL_CLOCK_PARAMETERS_V5 +{ + UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to + // drive the pixel clock. not used for DCPLL case. + union{ + UCHAR ucReserved; + UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed. + }; + USHORT usPixelClock; // target the pixel clock to drive the CRTC timing + // 0 means disable PPLL/DCPLL. + USHORT usFbDiv; // feedback divider integer part. + UCHAR ucPostDiv; // post divider. + UCHAR ucRefDiv; // Reference divider + UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL + UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h, + // indicate which graphic encoder will be used. + UCHAR ucEncoderMode; // Encoder mode: + UCHAR ucMiscInfo; // bit[0]= Force program PPLL + // bit[1]= when VGA timing is used. + // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp + // bit[4]= RefClock source for PPLL. + // =0: XTLAIN( default mode ) + // =1: other external clock source, which is pre-defined + // by VBIOS depend on the feature required. + // bit[7:5]: reserved. + ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 ) + +}PIXEL_CLOCK_PARAMETERS_V5; + +#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01 +#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02 +#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c +#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00 +#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04 +#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08 +#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10 + +typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2 +{ + PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput; +}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2; + +typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2 +{ + UCHAR ucStatus; + UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock + UCHAR ucReserved[2]; +}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2; + +typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3 +{ + PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput; +}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3; + +/****************************************************************************/ +// Structures used by AdjustDisplayPllTable +/****************************************************************************/ +typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS +{ USHORT usPixelClock; UCHAR ucTransmitterID; UCHAR ucEncodeMode; - union { - UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */ - UCHAR ucConfig; /* if none DVO, not defined yet */ + union + { + UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit + UCHAR ucConfig; //if none DVO, not defined yet }; UCHAR ucReserved[3]; -} ADJUST_DISPLAY_PLL_PARAMETERS; +}ADJUST_DISPLAY_PLL_PARAMETERS; #define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10 - #define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS -/****************************************************************************/ -/* Structures used by EnableYUVTable */ -/****************************************************************************/ -typedef struct _ENABLE_YUV_PARAMETERS { - UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */ - UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */ - UCHAR ucPadding[2]; -} ENABLE_YUV_PARAMETERS; +typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 +{ + USHORT usPixelClock; // target pixel clock + UCHAR ucTransmitterID; // transmitter id defined in objectid.h + UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI + UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX + UCHAR ucReserved[3]; +}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3; + +// usDispPllConfig v1.2 for RoadRunner +#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO +#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS +#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI +#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS + + +typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 +{ + ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc + UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given ) + UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider + UCHAR ucReserved[2]; +}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3; + +typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 +{ + union + { + ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput; + ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput; + }; +} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3; + +/****************************************************************************/ +// Structures used by EnableYUVTable +/****************************************************************************/ +typedef struct _ENABLE_YUV_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) + UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format + UCHAR ucPadding[2]; +}ENABLE_YUV_PARAMETERS; #define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS -/****************************************************************************/ -/* Structures used by GetMemoryClockTable */ -/****************************************************************************/ -typedef struct _GET_MEMORY_CLOCK_PARAMETERS { - ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */ +/****************************************************************************/ +// Structures used by GetMemoryClockTable +/****************************************************************************/ +typedef struct _GET_MEMORY_CLOCK_PARAMETERS +{ + ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit } GET_MEMORY_CLOCK_PARAMETERS; #define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS -/****************************************************************************/ -/* Structures used by GetEngineClockTable */ -/****************************************************************************/ -typedef struct _GET_ENGINE_CLOCK_PARAMETERS { - ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */ +/****************************************************************************/ +// Structures used by GetEngineClockTable +/****************************************************************************/ +typedef struct _GET_ENGINE_CLOCK_PARAMETERS +{ + ULONG ulReturnEngineClock; // current engine speed in 10KHz unit } GET_ENGINE_CLOCK_PARAMETERS; #define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS -/****************************************************************************/ -/* Following Structures and constant may be obsolete */ -/****************************************************************************/ -/* Maxium 8 bytes,the data read in will be placed in the parameter space. */ -/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */ -typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS { - USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ - USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */ - USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */ - /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */ - UCHAR ucSlaveAddr; /* Read from which slave */ - UCHAR ucLineNumber; /* Read from which HW assisted line */ -} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; +/****************************************************************************/ +// Following Structures and constant may be obsolete +/****************************************************************************/ +//Maxium 8 bytes,the data read in will be placed in the parameter space. +//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed +typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS +{ + USHORT usPrescale; //Ratio between Engine clock and I2C clock + USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID + USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status + //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte + UCHAR ucSlaveAddr; //Read from which slave + UCHAR ucLineNumber; //Read from which HW assisted line +}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS; #define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS + #define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0 #define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1 #define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2 #define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3 #define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4 -typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS { - USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ - USHORT usByteOffset; /* Write to which byte */ - /* Upper portion of usByteOffset is Format of data */ - /* 1bytePS+offsetPS */ - /* 2bytesPS+offsetPS */ - /* blockID+offsetPS */ - /* blockID+offsetID */ - /* blockID+counterID+offsetID */ - UCHAR ucData; /* PS data1 */ - UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */ - UCHAR ucSlaveAddr; /* Write to which slave */ - UCHAR ucLineNumber; /* Write from which HW assisted line */ -} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; +typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS +{ + USHORT usPrescale; //Ratio between Engine clock and I2C clock + USHORT usByteOffset; //Write to which byte + //Upper portion of usByteOffset is Format of data + //1bytePS+offsetPS + //2bytesPS+offsetPS + //blockID+offsetPS + //blockID+offsetID + //blockID+counterID+offsetID + UCHAR ucData; //PS data1 + UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2 + UCHAR ucSlaveAddr; //Write to which slave + UCHAR ucLineNumber; //Write from which HW assisted line +}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS; #define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS -typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS { - USHORT usPrescale; /* Ratio between Engine clock and I2C clock */ - UCHAR ucSlaveAddr; /* Write to which slave */ - UCHAR ucLineNumber; /* Write from which HW assisted line */ -} SET_UP_HW_I2C_DATA_PARAMETERS; +typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS +{ + USHORT usPrescale; //Ratio between Engine clock and I2C clock + UCHAR ucSlaveAddr; //Write to which slave + UCHAR ucLineNumber; //Write from which HW assisted line +}SET_UP_HW_I2C_DATA_PARAMETERS; + /**************************************************************************/ #define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS -/****************************************************************************/ -/* Structures used by PowerConnectorDetectionTable */ -/****************************************************************************/ -typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS { - UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ - UCHAR ucPwrBehaviorId; - USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ -} POWER_CONNECTOR_DETECTION_PARAMETERS; - -typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION { - UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */ - UCHAR ucReserved; - USHORT usPwrBudget; /* how much power currently boot to in unit of watt */ - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; -} POWER_CONNECTOR_DETECTION_PS_ALLOCATION; +/****************************************************************************/ +// Structures used by PowerConnectorDetectionTable +/****************************************************************************/ +typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS +{ + UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected + UCHAR ucPwrBehaviorId; + USHORT usPwrBudget; //how much power currently boot to in unit of watt +}POWER_CONNECTOR_DETECTION_PARAMETERS; + +typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION +{ + UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected + UCHAR ucReserved; + USHORT usPwrBudget; //how much power currently boot to in unit of watt + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +}POWER_CONNECTOR_DETECTION_PS_ALLOCATION; /****************************LVDS SS Command Table Definitions**********************/ -/****************************************************************************/ -/* Structures used by EnableSpreadSpectrumOnPPLLTable */ -/****************************************************************************/ -typedef struct _ENABLE_LVDS_SS_PARAMETERS { - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ - UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */ - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ - UCHAR ucPadding[3]; -} ENABLE_LVDS_SS_PARAMETERS; - -/* ucTableFormatRevision=1,ucTableContentRevision=2 */ -typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 { - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ - UCHAR ucSpreadSpectrumStep; /* */ - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ - UCHAR ucSpreadSpectrumDelay; - UCHAR ucSpreadSpectrumRange; - UCHAR ucPadding; -} ENABLE_LVDS_SS_PARAMETERS_V2; - -/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */ -typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL { - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ - UCHAR ucSpreadSpectrumStep; /* */ - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ - UCHAR ucSpreadSpectrumDelay; - UCHAR ucSpreadSpectrumRange; - UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */ -} ENABLE_SPREAD_SPECTRUM_ON_PPLL; +/****************************************************************************/ +// Structures used by EnableSpreadSpectrumOnPPLLTable +/****************************************************************************/ +typedef struct _ENABLE_LVDS_SS_PARAMETERS +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD + UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY + UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[3]; +}ENABLE_LVDS_SS_PARAMETERS; + +//ucTableFormatRevision=1,ucTableContentRevision=2 +typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD + UCHAR ucSpreadSpectrumStep; // + UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE + UCHAR ucSpreadSpectrumDelay; + UCHAR ucSpreadSpectrumRange; + UCHAR ucPadding; +}ENABLE_LVDS_SS_PARAMETERS_V2; + +//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. +typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD + UCHAR ucSpreadSpectrumStep; // + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucSpreadSpectrumDelay; + UCHAR ucSpreadSpectrumRange; + UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2 +}ENABLE_SPREAD_SPECTRUM_ON_PPLL; + +typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2 +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread. + // Bit[1]: 1-Ext. 0-Int. + // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL + // Bits[7:4] reserved + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8] + USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC +}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2; + +#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00 +#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01 +#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02 +#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c +#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00 +#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04 +#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08 +#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF +#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0 +#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00 +#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8 #define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL /**************************************************************************/ -typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION { - PIXEL_CLOCK_PARAMETERS sPCLKInput; - ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */ -} SET_PIXEL_CLOCK_PS_ALLOCATION; +typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION +{ + PIXEL_CLOCK_PARAMETERS sPCLKInput; + ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion +}SET_PIXEL_CLOCK_PS_ALLOCATION; #define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION -/****************************************************************************/ -/* Structures used by ### */ -/****************************************************************************/ -typedef struct _MEMORY_TRAINING_PARAMETERS { - ULONG ulTargetMemoryClock; /* In 10Khz unit */ -} MEMORY_TRAINING_PARAMETERS; +/****************************************************************************/ +// Structures used by ### +/****************************************************************************/ +typedef struct _MEMORY_TRAINING_PARAMETERS +{ + ULONG ulTargetMemoryClock; //In 10Khz unit +}MEMORY_TRAINING_PARAMETERS; #define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS + /****************************LVDS and other encoder command table definitions **********************/ -/****************************************************************************/ -/* Structures used by LVDSEncoderControlTable (Before DCE30) */ -/* LVTMAEncoderControlTable (Before DCE30) */ -/* TMDSAEncoderControlTable (Before DCE30) */ -/****************************************************************************/ -typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - UCHAR ucMisc; /* bit0=0: Enable single link */ - /* =1: Enable dual link */ - /* Bit1=0: 666RGB */ - /* =1: 888RGB */ - UCHAR ucAction; /* 0: turn off encoder */ - /* 1: setup and turn on encoder */ -} LVDS_ENCODER_CONTROL_PARAMETERS; -#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS +/****************************************************************************/ +// Structures used by LVDSEncoderControlTable (Before DCE30) +// LVTMAEncoderControlTable (Before DCE30) +// TMDSAEncoderControlTable (Before DCE30) +/****************************************************************************/ +typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucMisc; // bit0=0: Enable single link + // =1: Enable dual link + // Bit1=0: 666RGB + // =1: 888RGB + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder +}LVDS_ENCODER_CONTROL_PARAMETERS; +#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS + #define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS #define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS #define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS #define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS -/* ucTableFormatRevision=1,ucTableContentRevision=2 */ -typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */ - UCHAR ucAction; /* 0: turn off encoder */ - /* 1: setup and turn on encoder */ - UCHAR ucTruncate; /* bit0=0: Disable truncate */ - /* =1: Enable truncate */ - /* bit4=0: 666RGB */ - /* =1: 888RGB */ - UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */ - /* =1: Enable spatial dithering */ - /* bit4=0: 666RGB */ - /* =1: 888RGB */ - UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */ - /* =1: Enable temporal dithering */ - /* bit4=0: 666RGB */ - /* =1: 888RGB */ - /* bit5=0: Gray level 2 */ - /* =1: Gray level 4 */ - UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */ - /* =1: 25FRC_SEL pattern F */ - /* bit6:5=0: 50FRC_SEL pattern A */ - /* =1: 50FRC_SEL pattern B */ - /* =2: 50FRC_SEL pattern C */ - /* =3: 50FRC_SEL pattern D */ - /* bit7=0: 75FRC_SEL pattern E */ - /* =1: 75FRC_SEL pattern F */ -} LVDS_ENCODER_CONTROL_PARAMETERS_V2; -#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 +//ucTableFormatRevision=1,ucTableContentRevision=2 +typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder + UCHAR ucTruncate; // bit0=0: Disable truncate + // =1: Enable truncate + // bit4=0: 666RGB + // =1: 888RGB + UCHAR ucSpatial; // bit0=0: Disable spatial dithering + // =1: Enable spatial dithering + // bit4=0: 666RGB + // =1: 888RGB + UCHAR ucTemporal; // bit0=0: Disable temporal dithering + // =1: Enable temporal dithering + // bit4=0: 666RGB + // =1: 888RGB + // bit5=0: Gray level 2 + // =1: Gray level 4 + UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E + // =1: 25FRC_SEL pattern F + // bit6:5=0: 50FRC_SEL pattern A + // =1: 50FRC_SEL pattern B + // =2: 50FRC_SEL pattern C + // =3: 50FRC_SEL pattern D + // bit7=0: 75FRC_SEL pattern E + // =1: 75FRC_SEL pattern F +}LVDS_ENCODER_CONTROL_PARAMETERS_V2; +#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 + #define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 #define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 - + #define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2 #define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2 @@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 { #define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3 #define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3 -/****************************************************************************/ -/* Structures used by ### */ -/****************************************************************************/ -typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS { - UCHAR ucEnable; /* Enable or Disable External TMDS encoder */ - UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */ - UCHAR ucPadding[2]; -} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; - -typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION { - ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ -} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; +/****************************************************************************/ +// Structures used by ### +/****************************************************************************/ +typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS +{ + UCHAR ucEnable; // Enable or Disable External TMDS encoder + UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} + UCHAR ucPadding[2]; +}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS; + +typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION +{ + ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion +}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION; #define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2 -typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 { - ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ -} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; +typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 +{ + ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion +}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2; -typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION { - DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; -} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; +typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION +{ + DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION; -/****************************************************************************/ -/* Structures used by DVOEncoderControlTable */ -/****************************************************************************/ -/* ucTableFormatRevision=1,ucTableContentRevision=3 */ +/****************************************************************************/ +// Structures used by DVOEncoderControlTable +/****************************************************************************/ +//ucTableFormatRevision=1,ucTableContentRevision=3 -/* ucDVOConfig: */ +//ucDVOConfig: #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 @@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION { #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 #define DVO_ENCODER_CONFIG_24BIT 0x08 -typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { - USHORT usPixelClock; - UCHAR ucDVOConfig; - UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ - UCHAR ucReseved[4]; -} DVO_ENCODER_CONTROL_PARAMETERS_V3; +typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 +{ + USHORT usPixelClock; + UCHAR ucDVOConfig; + UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT + UCHAR ucReseved[4]; +}DVO_ENCODER_CONTROL_PARAMETERS_V3; #define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3 -/* ucTableFormatRevision=1 */ -/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */ -/* bit1=0: non-coherent mode */ -/* =1: coherent mode */ +//ucTableFormatRevision=1 +//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for +// bit1=0: non-coherent mode +// =1: coherent mode -/* ========================================================================================== */ -/* Only change is here next time when changing encoder parameter definitions again! */ +//========================================================================================== +//Only change is here next time when changing encoder parameter definitions again! #define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3 #define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST @@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { #define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS #define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION -/* ========================================================================================== */ +//========================================================================================== #define PANEL_ENCODER_MISC_DUAL 0x01 #define PANEL_ENCODER_MISC_COHERENT 0x02 #define PANEL_ENCODER_MISC_TMDS_LINKB 0x04 @@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 { #define PANEL_ENCODER_75FRC_E 0x00 #define PANEL_ENCODER_75FRC_F 0x80 -/****************************************************************************/ -/* Structures used by SetVoltageTable */ -/****************************************************************************/ +/****************************************************************************/ +// Structures used by SetVoltageTable +/****************************************************************************/ #define SET_VOLTAGE_TYPE_ASIC_VDDC 1 #define SET_VOLTAGE_TYPE_ASIC_MVDDC 2 #define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3 #define SET_VOLTAGE_TYPE_ASIC_VDDCI 4 #define SET_VOLTAGE_INIT_MODE 5 -#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */ +#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic #define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1 #define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2 #define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4 #define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0 -#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 +#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1 #define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2 -typedef struct _SET_VOLTAGE_PARAMETERS { - UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */ - UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */ - UCHAR ucVoltageIndex; /* An index to tell which voltage level */ - UCHAR ucReserved; -} SET_VOLTAGE_PARAMETERS; - -typedef struct _SET_VOLTAGE_PARAMETERS_V2 { - UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */ - UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */ - USHORT usVoltageLevel; /* real voltage level */ -} SET_VOLTAGE_PARAMETERS_V2; - -typedef struct _SET_VOLTAGE_PS_ALLOCATION { - SET_VOLTAGE_PARAMETERS sASICSetVoltage; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; -} SET_VOLTAGE_PS_ALLOCATION; - -/****************************************************************************/ -/* Structures used by TVEncoderControlTable */ -/****************************************************************************/ -typedef struct _TV_ENCODER_CONTROL_PARAMETERS { - USHORT usPixelClock; /* in 10KHz; for bios convenient */ - UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */ - UCHAR ucAction; /* 0: turn off encoder */ - /* 1: setup and turn on encoder */ -} TV_ENCODER_CONTROL_PARAMETERS; - -typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION { - TV_ENCODER_CONTROL_PARAMETERS sTVEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */ -} TV_ENCODER_CONTROL_PS_ALLOCATION; - -/* ==============================Data Table Portion==================================== */ - -#ifdef UEFI_BUILD -#define UTEMP USHORT -#define USHORT void* -#endif - -/****************************************************************************/ -/* Structure used in Data.mtb */ -/****************************************************************************/ -typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES { - USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */ - USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */ - USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */ - USHORT StandardVESA_Timing; /* Only used by Bios */ - USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */ - USHORT DAC_Info; /* Will be obsolete from R600 */ - USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */ - USHORT TMDS_Info; /* Will be obsolete from R600 */ - USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */ - USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */ - USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */ - USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */ - USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */ - USHORT VESA_ToInternalModeLUT; /* Only used by Bios */ - USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */ - USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */ - USHORT CompassionateData; /* Will be obsolete from R600 */ - USHORT SaveRestoreInfo; /* Only used by Bios */ - USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */ - USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */ - USHORT XTMDS_Info; /* Will be obsolete from R600 */ - USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */ - USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */ - USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */ - USHORT MC_InitParameter; /* Only used by command table */ - USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */ - USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */ - USHORT TV_VideoMode; /* Only used by command table */ - USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */ - USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */ - USHORT IntegratedSystemInfo; /* Shared by various SW components */ - USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */ - USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */ - USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */ -} ATOM_MASTER_LIST_OF_DATA_TABLES; - -#ifdef UEFI_BUILD -#define USHORT UTEMP -#endif +typedef struct _SET_VOLTAGE_PARAMETERS +{ + UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ + UCHAR ucVoltageMode; // To set all, to set source A or source B or ... + UCHAR ucVoltageIndex; // An index to tell which voltage level + UCHAR ucReserved; +}SET_VOLTAGE_PARAMETERS; -typedef struct _ATOM_MASTER_DATA_TABLE { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; -} ATOM_MASTER_DATA_TABLE; +typedef struct _SET_VOLTAGE_PARAMETERS_V2 +{ + UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ + UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode + USHORT usVoltageLevel; // real voltage level +}SET_VOLTAGE_PARAMETERS_V2; -/****************************************************************************/ -/* Structure used in MultimediaCapabilityInfoTable */ -/****************************************************************************/ -typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulSignature; /* HW info table signature string "$ATI" */ - UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */ - UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */ - UCHAR ucVideoPortInfo; /* Provides the video port capabilities */ - UCHAR ucHostPortInfo; /* Provides host port configuration information */ -} ATOM_MULTIMEDIA_CAPABILITY_INFO; +typedef struct _SET_VOLTAGE_PS_ALLOCATION +{ + SET_VOLTAGE_PARAMETERS sASICSetVoltage; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; +}SET_VOLTAGE_PS_ALLOCATION; + +/****************************************************************************/ +// Structures used by TVEncoderControlTable +/****************************************************************************/ +typedef struct _TV_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; // in 10KHz; for bios convenient + UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..." + UCHAR ucAction; // 0: turn off encoder + // 1: setup and turn on encoder +}TV_ENCODER_CONTROL_PARAMETERS; -/****************************************************************************/ -/* Structure used in MultimediaConfigInfoTable */ -/****************************************************************************/ -typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulSignature; /* MM info table signature sting "$MMT" */ - UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */ - UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */ - UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */ - UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */ - UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */ - UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */ - UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */ - UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ - UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ - UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ - UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ - UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */ -} ATOM_MULTIMEDIA_CONFIG_INFO; +typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION +{ + TV_ENCODER_CONTROL_PARAMETERS sTVEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one +}TV_ENCODER_CONTROL_PS_ALLOCATION; -/****************************************************************************/ -/* Structures used in FirmwareInfoTable */ -/****************************************************************************/ +//==============================Data Table Portion==================================== -/* usBIOSCapability Definition: */ -/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */ -/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */ -/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */ -/* Others: Reserved */ +/****************************************************************************/ +// Structure used in Data.mtb +/****************************************************************************/ +typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES +{ + USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position! + USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios + USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios + USHORT StandardVESA_Timing; // Only used by Bios + USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4 + USHORT DAC_Info; // Will be obsolete from R600 + USHORT LVDS_Info; // Shared by various SW components,latest version 1.1 + USHORT TMDS_Info; // Will be obsolete from R600 + USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1 + USHORT SupportedDevicesInfo; // Will be obsolete from R600 + USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600 + USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600 + USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1 + USHORT VESA_ToInternalModeLUT; // Only used by Bios + USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600 + USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600 + USHORT CompassionateData; // Will be obsolete from R600 + USHORT SaveRestoreInfo; // Only used by Bios + USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info + USHORT OemInfo; // Defined and used by external SW, should be obsolete soon + USHORT XTMDS_Info; // Will be obsolete from R600 + USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used + USHORT Object_Header; // Shared by various SW components,latest version 1.1 + USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!! + USHORT MC_InitParameter; // Only used by command table + USHORT ASIC_VDDC_Info; // Will be obsolete from R600 + USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info" + USHORT TV_VideoMode; // Only used by command table + USHORT VRAM_Info; // Only used by command table, latest version 1.3 + USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 + USHORT IntegratedSystemInfo; // Shared by various SW components + USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 + USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1 + USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1 +}ATOM_MASTER_LIST_OF_DATA_TABLES; + +typedef struct _ATOM_MASTER_DATA_TABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables; +}ATOM_MASTER_DATA_TABLE; + +/****************************************************************************/ +// Structure used in MultimediaCapabilityInfoTable +/****************************************************************************/ +typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulSignature; // HW info table signature string "$ATI" + UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) + UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) + UCHAR ucVideoPortInfo; // Provides the video port capabilities + UCHAR ucHostPortInfo; // Provides host port configuration information +}ATOM_MULTIMEDIA_CAPABILITY_INFO; + +/****************************************************************************/ +// Structure used in MultimediaConfigInfoTable +/****************************************************************************/ +typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulSignature; // MM info table signature sting "$MMT" + UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) + UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5) + UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting + UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) + UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) + UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4) + UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3) + UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) + UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) +}ATOM_MULTIMEDIA_CONFIG_INFO; + +/****************************************************************************/ +// Structures used in FirmwareInfoTable +/****************************************************************************/ + +// usBIOSCapability Defintion: +// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; +// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; +// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; +// Others: Reserved #define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001 #define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002 #define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004 -#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 -#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 +#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable. +#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable. #define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020 #define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040 #define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080 @@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO { #define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00 #define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000 #define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000 +#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip +#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip #ifndef _H2INC -/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ -typedef struct _ATOM_FIRMWARE_CAPABILITY { +//Please don't add or expand this bitfield structure below, this one will retire soon.! +typedef struct _ATOM_FIRMWARE_CAPABILITY +{ #if ATOM_BIG_ENDIAN - USHORT Reserved:3; - USHORT HyperMemory_Size:4; - USHORT HyperMemory_Support:1; - USHORT PPMode_Assigned:1; - USHORT WMI_SUPPORT:1; - USHORT GPUControlsBL:1; - USHORT EngineClockSS_Support:1; - USHORT MemoryClockSS_Support:1; - USHORT ExtendedDesktopSupport:1; - USHORT DualCRTC_Support:1; - USHORT FirmwarePosted:1; + USHORT Reserved:3; + USHORT HyperMemory_Size:4; + USHORT HyperMemory_Support:1; + USHORT PPMode_Assigned:1; + USHORT WMI_SUPPORT:1; + USHORT GPUControlsBL:1; + USHORT EngineClockSS_Support:1; + USHORT MemoryClockSS_Support:1; + USHORT ExtendedDesktopSupport:1; + USHORT DualCRTC_Support:1; + USHORT FirmwarePosted:1; #else - USHORT FirmwarePosted:1; - USHORT DualCRTC_Support:1; - USHORT ExtendedDesktopSupport:1; - USHORT MemoryClockSS_Support:1; - USHORT EngineClockSS_Support:1; - USHORT GPUControlsBL:1; - USHORT WMI_SUPPORT:1; - USHORT PPMode_Assigned:1; - USHORT HyperMemory_Support:1; - USHORT HyperMemory_Size:4; - USHORT Reserved:3; + USHORT FirmwarePosted:1; + USHORT DualCRTC_Support:1; + USHORT ExtendedDesktopSupport:1; + USHORT MemoryClockSS_Support:1; + USHORT EngineClockSS_Support:1; + USHORT GPUControlsBL:1; + USHORT WMI_SUPPORT:1; + USHORT PPMode_Assigned:1; + USHORT HyperMemory_Support:1; + USHORT HyperMemory_Size:4; + USHORT Reserved:3; #endif -} ATOM_FIRMWARE_CAPABILITY; +}ATOM_FIRMWARE_CAPABILITY; -typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { - ATOM_FIRMWARE_CAPABILITY sbfAccess; - USHORT susAccess; -} ATOM_FIRMWARE_CAPABILITY_ACCESS; +typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS +{ + ATOM_FIRMWARE_CAPABILITY sbfAccess; + USHORT susAccess; +}ATOM_FIRMWARE_CAPABILITY_ACCESS; #else -typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS { - USHORT susAccess; -} ATOM_FIRMWARE_CAPABILITY_ACCESS; +typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS +{ + USHORT susAccess; +}ATOM_FIRMWARE_CAPABILITY_ACCESS; #endif -typedef struct _ATOM_FIRMWARE_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; /* In 10Khz unit */ - ULONG ulDefaultMemoryClock; /* In 10Khz unit */ - ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ - ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ - ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ - ULONG ulASICMaxEngineClock; /* In 10Khz unit */ - ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ - UCHAR ucASICMaxTemperature; - UCHAR ucPadding[3]; /* Don't use them */ - ULONG aulReservedForBIOS[3]; /* Don't use them */ - USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ - USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ - USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */ - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; /* In 10Khz unit */ - USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ - UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ - UCHAR ucDesign_ID; /* Indicate what is the board design */ - UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ -} ATOM_FIRMWARE_INFO; - -typedef struct _ATOM_FIRMWARE_INFO_V1_2 { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; /* In 10Khz unit */ - ULONG ulDefaultMemoryClock; /* In 10Khz unit */ - ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ - ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ - ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ - ULONG ulASICMaxEngineClock; /* In 10Khz unit */ - ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ - UCHAR ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - UCHAR ucPadding[2]; /* Don't use them */ - ULONG aulReservedForBIOS[2]; /* Don't use them */ - ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ - USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ - USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ - USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; /* In 10Khz unit */ - USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ - UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ - UCHAR ucDesign_ID; /* Indicate what is the board design */ - UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ -} ATOM_FIRMWARE_INFO_V1_2; - -typedef struct _ATOM_FIRMWARE_INFO_V1_3 { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; /* In 10Khz unit */ - ULONG ulDefaultMemoryClock; /* In 10Khz unit */ - ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ - ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ - ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ - ULONG ulASICMaxEngineClock; /* In 10Khz unit */ - ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ - UCHAR ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - UCHAR ucPadding[2]; /* Don't use them */ - ULONG aulReservedForBIOS; /* Don't use them */ - ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ - ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ - USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ - USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ - USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; /* In 10Khz unit */ - USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ - UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ - UCHAR ucDesign_ID; /* Indicate what is the board design */ - UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ -} ATOM_FIRMWARE_INFO_V1_3; - -typedef struct _ATOM_FIRMWARE_INFO_V1_4 { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulFirmwareRevision; - ULONG ulDefaultEngineClock; /* In 10Khz unit */ - ULONG ulDefaultMemoryClock; /* In 10Khz unit */ - ULONG ulDriverTargetEngineClock; /* In 10Khz unit */ - ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */ - ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */ - ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */ - ULONG ulASICMaxEngineClock; /* In 10Khz unit */ - ULONG ulASICMaxMemoryClock; /* In 10Khz unit */ - UCHAR ucASICMaxTemperature; - UCHAR ucMinAllowedBL_Level; - USHORT usBootUpVDDCVoltage; /* In MV unit */ - USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */ - USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */ - ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */ - ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */ - USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */ - USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */ - USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */ - USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */ - USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */ - USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */ - ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; - USHORT usReferenceClock; /* In 10Khz unit */ - USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */ - UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */ - UCHAR ucDesign_ID; /* Indicate what is the board design */ - UCHAR ucMemoryModule_ID; /* Indicate what is the board design */ -} ATOM_FIRMWARE_INFO_V1_4; - -#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4 - -/****************************************************************************/ -/* Structures used in IntegratedSystemInfoTable */ -/****************************************************************************/ +typedef struct _ATOM_FIRMWARE_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucPadding[3]; //Don't use them + ULONG aulReservedForBIOS[3]; //Don't use them + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!! + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO; + +typedef struct _ATOM_FIRMWARE_INFO_V1_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + UCHAR ucPadding[2]; //Don't use them + ULONG aulReservedForBIOS[2]; //Don't use them + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO_V1_2; + +typedef struct _ATOM_FIRMWARE_INFO_V1_3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + UCHAR ucPadding[2]; //Don't use them + ULONG aulReservedForBIOS; //Don't use them + ULONG ul3DAccelerationEngineClock;//In 10Khz unit + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO_V1_3; + +typedef struct _ATOM_FIRMWARE_INFO_V1_4 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulDriverTargetEngineClock; //In 10Khz unit + ULONG ulDriverTargetMemoryClock; //In 10Khz unit + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulASICMaxEngineClock; //In 10Khz unit + ULONG ulASICMaxMemoryClock; //In 10Khz unit + UCHAR ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + USHORT usBootUpVDDCVoltage; //In MV unit + USHORT usLcdMinPixelClockPLL_Output; // In MHz unit + USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit + ULONG ul3DAccelerationEngineClock;//In 10Khz unit + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usReferenceClock; //In 10Khz unit + USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit + UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit + UCHAR ucDesign_ID; //Indicate what is the board design + UCHAR ucMemoryModule_ID; //Indicate what is the board design +}ATOM_FIRMWARE_INFO_V1_4; + +//the structure below to be used from Cypress +typedef struct _ATOM_FIRMWARE_INFO_V2_1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulFirmwareRevision; + ULONG ulDefaultEngineClock; //In 10Khz unit + ULONG ulDefaultMemoryClock; //In 10Khz unit + ULONG ulReserved1; + ULONG ulReserved2; + ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit + ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit + ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit + ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock + ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit + UCHAR ucReserved1; //Was ucASICMaxTemperature; + UCHAR ucMinAllowedBL_Level; + USHORT usBootUpVDDCVoltage; //In MV unit + USHORT usLcdMinPixelClockPLL_Output; // In MHz unit + USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit + ULONG ulReserved4; //Was ulAsicMaximumVoltage + ULONG ulMinPixelClockPLL_Output; //In 10Khz unit + USHORT usMinEngineClockPLL_Input; //In 10Khz unit + USHORT usMaxEngineClockPLL_Input; //In 10Khz unit + USHORT usMinEngineClockPLL_Output; //In 10Khz unit + USHORT usMinMemoryClockPLL_Input; //In 10Khz unit + USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit + USHORT usMinMemoryClockPLL_Output; //In 10Khz unit + USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk + USHORT usMinPixelClockPLL_Input; //In 10Khz unit + USHORT usMaxPixelClockPLL_Input; //In 10Khz unit + USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output + ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability; + USHORT usCoreReferenceClock; //In 10Khz unit + USHORT usMemoryReferenceClock; //In 10Khz unit + USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock + UCHAR ucMemoryModule_ID; //Indicate what is the board design + UCHAR ucReserved4[3]; +}ATOM_FIRMWARE_INFO_V2_1; + + +#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1 + +/****************************************************************************/ +// Structures used in IntegratedSystemInfoTable +/****************************************************************************/ #define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2 #define IGP_CAP_FLAG_AC_CARD 0x4 #define IGP_CAP_FLAG_SDVO_CARD 0x8 #define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10 -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; /* in 10kHz unit */ - ULONG ulBootUpMemoryClock; /* in 10kHz unit */ - ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */ - ULONG ulMinSystemMemoryClock; /* in 10kHz unit */ - UCHAR ucNumberOfCyclesInPeriodHi; - UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */ - USHORT usReserved1; - USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */ - USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */ - ULONG ulReserved[2]; - - USHORT usFSBClock; /* In MHz unit */ - USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */ - /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */ - /* Bit[4]==1: P/2 mode, ==0: P/1 mode */ - USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */ - USHORT usK8MemoryClock; /* in MHz unit */ - USHORT usK8SyncStartDelay; /* in 0.01 us unit */ - USHORT usK8DataReturnTime; /* in 0.01 us unit */ - UCHAR ucMaxNBVoltage; - UCHAR ucMinNBVoltage; - UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */ - UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */ - UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */ - UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */ - UCHAR ucMaxNBVoltageHigh; - UCHAR ucMinNBVoltageHigh; -} ATOM_INTEGRATED_SYSTEM_INFO; +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; //in 10kHz unit + ULONG ulBootUpMemoryClock; //in 10kHz unit + ULONG ulMaxSystemMemoryClock; //in 10kHz unit + ULONG ulMinSystemMemoryClock; //in 10kHz unit + UCHAR ucNumberOfCyclesInPeriodHi; + UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID. + USHORT usReserved1; + USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage + USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage + ULONG ulReserved[2]; + + USHORT usFSBClock; //In MHz unit + USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable + //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card + //Bit[4]==1: P/2 mode, ==0: P/1 mode + USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal + USHORT usK8MemoryClock; //in MHz unit + USHORT usK8SyncStartDelay; //in 0.01 us unit + USHORT usK8DataReturnTime; //in 0.01 us unit + UCHAR ucMaxNBVoltage; + UCHAR ucMinNBVoltage; + UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved + UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod + UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime + UCHAR ucHTLinkWidth; //16 bit vs. 8 bit + UCHAR ucMaxNBVoltageHigh; + UCHAR ucMinNBVoltageHigh; +}ATOM_INTEGRATED_SYSTEM_INFO; /* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO -ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock +ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 For AMD IGP,for now this can be 0 -ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 +ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0 For AMD IGP,for now this can be 0 -usFSBClock: For Intel IGP,it's FSB Freq +usFSBClock: For Intel IGP,it's FSB Freq For AMD IGP,it's HT Link Speed usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200 @@ -1687,98 +2093,113 @@ VC:Voltage Control ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. -ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. -ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 +ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value. +ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0 ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all. ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all. + usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all. usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all. */ + /* The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST; -Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. +Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need. The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries. SW components can access the IGP system infor structure in the same way as before */ -typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 { - ATOM_COMMON_TABLE_HEADER sHeader; - ULONG ulBootUpEngineClock; /* in 10kHz unit */ - ULONG ulReserved1[2]; /* must be 0x0 for the reserved */ - ULONG ulBootUpUMAClock; /* in 10kHz unit */ - ULONG ulBootUpSidePortClock; /* in 10kHz unit */ - ULONG ulMinSidePortClock; /* in 10kHz unit */ - ULONG ulReserved2[6]; /* must be 0x0 for the reserved */ - ULONG ulSystemConfig; /* see explanation below */ - ULONG ulBootUpReqDisplayVector; - ULONG ulOtherDisplayMisc; - ULONG ulDDISlot1Config; - ULONG ulDDISlot2Config; - UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */ - UCHAR ucUMAChannelNumber; - UCHAR ucDockingPinBit; - UCHAR ucDockingPinPolarity; - ULONG ulDockingPinCFGInfo; - ULONG ulCPUCapInfo; - USHORT usNumberOfCyclesInPeriod; - USHORT usMaxNBVoltage; - USHORT usMinNBVoltage; - USHORT usBootUpNBVoltage; - ULONG ulHTLinkFreq; /* in 10Khz */ - USHORT usMinHTLinkWidth; - USHORT usMaxHTLinkWidth; - USHORT usUMASyncStartDelay; - USHORT usUMADataReturnTime; - USHORT usLinkStatusZeroTime; - USHORT usReserved; - ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */ - ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */ - USHORT usMaxUpStreamHTLinkWidth; - USHORT usMaxDownStreamHTLinkWidth; - USHORT usMinUpStreamHTLinkWidth; - USHORT usMinDownStreamHTLinkWidth; - ULONG ulReserved3[97]; /* must be 0x0 */ -} ATOM_INTEGRATED_SYSTEM_INFO_V2; + +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; //in 10kHz unit + ULONG ulReserved1[2]; //must be 0x0 for the reserved + ULONG ulBootUpUMAClock; //in 10kHz unit + ULONG ulBootUpSidePortClock; //in 10kHz unit + ULONG ulMinSidePortClock; //in 10kHz unit + ULONG ulReserved2[6]; //must be 0x0 for the reserved + ULONG ulSystemConfig; //see explanation below + ULONG ulBootUpReqDisplayVector; + ULONG ulOtherDisplayMisc; + ULONG ulDDISlot1Config; + ULONG ulDDISlot2Config; + UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved + UCHAR ucUMAChannelNumber; + UCHAR ucDockingPinBit; + UCHAR ucDockingPinPolarity; + ULONG ulDockingPinCFGInfo; + ULONG ulCPUCapInfo; + USHORT usNumberOfCyclesInPeriod; + USHORT usMaxNBVoltage; + USHORT usMinNBVoltage; + USHORT usBootUpNBVoltage; + ULONG ulHTLinkFreq; //in 10Khz + USHORT usMinHTLinkWidth; + USHORT usMaxHTLinkWidth; + USHORT usUMASyncStartDelay; + USHORT usUMADataReturnTime; + USHORT usLinkStatusZeroTime; + USHORT usDACEfuse; //for storing badgap value (for RS880 only) + ULONG ulHighVoltageHTLinkFreq; // in 10Khz + ULONG ulLowVoltageHTLinkFreq; // in 10Khz + USHORT usMaxUpStreamHTLinkWidth; + USHORT usMaxDownStreamHTLinkWidth; + USHORT usMinUpStreamHTLinkWidth; + USHORT usMinDownStreamHTLinkWidth; + USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW. + USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us. + ULONG ulReserved3[96]; //must be 0x0 +}ATOM_INTEGRATED_SYSTEM_INFO_V2; /* ulBootUpEngineClock: Boot-up Engine Clock in 10Khz; ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock -ulSystemConfig: -Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; +ulSystemConfig: +Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode; Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state =0: system boots up at driver control state. Power state depends on PowerPlay table. Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used. Bit[3]=1: Only one power state(Performance) will be supported. =0: Multiple power states supported from PowerPlay table. -Bit[4]=1: CLMC is supported and enabled on current system. - =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. -Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. +Bit[4]=1: CLMC is supported and enabled on current system. + =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface. +Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement. =0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied. Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored. =0: Voltage settings is determined by powerplay table. Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue. =0: Enable CLMC as regular mode, CDLD and CILR will be enabled. +Bit[8]=1: CDLF is supported and enabled on current system. + =0: CDLF is not supported or enabled on current system. +Bit[9]=1: DLL Shut Down feature is enabled on current system. + =0: DLL Shut Down feature is not enabled or supported on current system. ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions. ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion; - [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition; + [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition; ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design). [3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) - [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12) - [15:8] - Lane configuration attribute; + [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12) + When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time. + in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example: + one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2. + + [15:8] - Lane configuration attribute; [23:16]- Connector type, possible value: CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D CONNECTOR_OBJECT_ID_HDMI_TYPE_A CONNECTOR_OBJECT_ID_DISPLAYPORT + CONNECTOR_OBJECT_ID_eDP [31:24]- Reserved ulDDISlot2Config: Same as Slot1. @@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC. ucUMAChannelNumber: how many channels for the UMA; -ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin +ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin ucDockingPinBit: which bit in this register to read the pin status; ucDockingPinPolarity:Polarity of the pin when docked; ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0 usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%. -usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. + +usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode. usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode. GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0 PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1 GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE + usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value. ulHTLinkFreq: Bootup HT link Frequency in 10Khz. -usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. - If CDLW enabled, both upstream and downstream width should be the same during bootup. -usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. +usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth. If CDLW enabled, both upstream and downstream width should be the same during bootup. +usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth. + If CDLW enabled, both upstream and downstream width should be the same during bootup. -usUMASyncStartDelay: Memory access latency, required for watermark calculation +usUMASyncStartDelay: Memory access latency, required for watermark calculation usUMADataReturnTime: Memory access latency, required for watermark calculation -usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us +usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us for Griffin or Greyhound. SBIOS needs to convert to actual time by: if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us) if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us) @@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by: if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us) ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0. - This must be less than or equal to ulHTLinkFreq(bootup frequency). + This must be less than or equal to ulHTLinkFreq(bootup frequency). ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0. This must be less than or equal to ulHighVoltageHTLinkFreq. @@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep usMinDownStreamHTLinkWidth: same as above. */ + #define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001 #define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002 -#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 +#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004 #define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008 #define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010 #define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020 #define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040 #define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080 +#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100 +#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200 #define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF @@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above. #define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000 +// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; //in 10kHz unit + ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK. + ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge + ULONG ulBootUpUMAClock; //in 10kHz unit + ULONG ulReserved1[8]; //must be 0x0 for the reserved + ULONG ulBootUpReqDisplayVector; + ULONG ulOtherDisplayMisc; + ULONG ulReserved2[4]; //must be 0x0 for the reserved + ULONG ulSystemConfig; //TBD + ULONG ulCPUCapInfo; //TBD + USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; + USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse; + USHORT usBootUpNBVoltage; //boot up NB voltage + UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD + UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD + ULONG ulReserved3[4]; //must be 0x0 for the reserved + ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition + ULONG ulDDISlot2Config; + ULONG ulDDISlot3Config; + ULONG ulDDISlot4Config; + ULONG ulReserved4[4]; //must be 0x0 for the reserved + UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved + UCHAR ucUMAChannelNumber; + USHORT usReserved; + ULONG ulReserved5[4]; //must be 0x0 for the reserved + ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default + ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback + ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications + ULONG ulReserved6[61]; //must be 0x0 +}ATOM_INTEGRATED_SYSTEM_INFO_V5; + #define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000 #define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001 #define ATOM_TV_INT_ENCODER1_INDEX 0x00000002 @@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above. #define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C #define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D -/* define ASIC internal encoder id ( bit vector ) */ -#define ASIC_INT_DAC1_ENCODER_ID 0x00 +// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable +#define ASIC_INT_DAC1_ENCODER_ID 0x00 #define ASIC_INT_TV_ENCODER_ID 0x02 #define ASIC_INT_DIG1_ENCODER_ID 0x03 #define ASIC_INT_DAC2_ENCODER_ID 0x04 @@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above. #define ASIC_INT_DVO_ENCODER_ID 0x07 #define ASIC_INT_DIG2_ENCODER_ID 0x09 #define ASIC_EXT_DIG_ENCODER_ID 0x05 +#define ASIC_EXT_DIG2_ENCODER_ID 0x08 +#define ASIC_INT_DIG3_ENCODER_ID 0x0a +#define ASIC_INT_DIG4_ENCODER_ID 0x0b +#define ASIC_INT_DIG5_ENCODER_ID 0x0c +#define ASIC_INT_DIG6_ENCODER_ID 0x0d -/* define Encoder attribute */ +//define Encoder attribute #define ATOM_ANALOG_ENCODER 0 -#define ATOM_DIGITAL_ENCODER 1 +#define ATOM_DIGITAL_ENCODER 1 +#define ATOM_DP_ENCODER 2 + +#define ATOM_ENCODER_ENUM_MASK 0x70 +#define ATOM_ENCODER_ENUM_ID1 0x00 +#define ATOM_ENCODER_ENUM_ID2 0x10 +#define ATOM_ENCODER_ENUM_ID3 0x20 +#define ATOM_ENCODER_ENUM_ID4 0x30 +#define ATOM_ENCODER_ENUM_ID5 0x40 +#define ATOM_ENCODER_ENUM_ID6 0x50 #define ATOM_DEVICE_CRT1_INDEX 0x00000000 #define ATOM_DEVICE_LCD1_INDEX 0x00000001 @@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above. #define ATOM_DEVICE_DFP1_INDEX 0x00000003 #define ATOM_DEVICE_CRT2_INDEX 0x00000004 #define ATOM_DEVICE_LCD2_INDEX 0x00000005 -#define ATOM_DEVICE_TV2_INDEX 0x00000006 +#define ATOM_DEVICE_DFP6_INDEX 0x00000006 #define ATOM_DEVICE_DFP2_INDEX 0x00000007 #define ATOM_DEVICE_CV_INDEX 0x00000008 -#define ATOM_DEVICE_DFP3_INDEX 0x00000009 -#define ATOM_DEVICE_DFP4_INDEX 0x0000000A -#define ATOM_DEVICE_DFP5_INDEX 0x0000000B +#define ATOM_DEVICE_DFP3_INDEX 0x00000009 +#define ATOM_DEVICE_DFP4_INDEX 0x0000000A +#define ATOM_DEVICE_DFP5_INDEX 0x0000000B + #define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C #define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D #define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E #define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F #define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1) #define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO -#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1) +#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 ) #define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1) -#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX) -#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX) -#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX) -#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX) -#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX) -#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX) -#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) -#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX) -#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX) -#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX) -#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) -#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX) - -#define ATOM_DEVICE_CRT_SUPPORT \ - (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) -#define ATOM_DEVICE_DFP_SUPPORT \ - (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \ - ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \ - ATOM_DEVICE_DFP5_SUPPORT) -#define ATOM_DEVICE_TV_SUPPORT \ - (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT) -#define ATOM_DEVICE_LCD_SUPPORT \ - (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT) +#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX ) +#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX ) +#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX ) +#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX ) +#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX ) +#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX ) +#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX ) +#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX ) +#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX ) +#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX ) +#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX ) +#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX ) + +#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT) +#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT) +#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT) +#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT) #define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0 #define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004 @@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above. #define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E #define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F + #define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F #define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000 #define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000 @@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above. #define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004 #define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001 #define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002 -#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */ -#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */ +#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600 +#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690 #define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080 #define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007 #define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000 #define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001 -/* usDeviceSupport: */ -/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */ -/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */ -/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */ -/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */ -/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */ -/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */ -/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */ -/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */ -/* Bit 8 = 0 - no CV support= 1- CV is supported */ -/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */ -/* Byte1 (Supported Device Info) */ -/* Bit 0 = = 0 - no CV support= 1- CV is supported */ -/* */ -/* */ - -/* ucI2C_ConfigID */ -/* [7:0] - I2C LINE Associate ID */ -/* = 0 - no I2C */ -/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */ -/* = 0, [6:0]=SW assisted I2C ID */ -/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */ -/* = 2, HW engine for Multimedia use */ -/* = 3-7 Reserved for future I2C engines */ -/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */ - -typedef struct _ATOM_I2C_ID_CONFIG { -#if ATOM_BIG_ENDIAN - UCHAR bfHW_Capable:1; - UCHAR bfHW_EngineID:3; - UCHAR bfI2C_LineMux:4; -#else - UCHAR bfI2C_LineMux:4; - UCHAR bfHW_EngineID:3; - UCHAR bfHW_Capable:1; -#endif -} ATOM_I2C_ID_CONFIG; - -typedef union _ATOM_I2C_ID_CONFIG_ACCESS { - ATOM_I2C_ID_CONFIG sbfAccess; - UCHAR ucAccess; -} ATOM_I2C_ID_CONFIG_ACCESS; +// usDeviceSupport: +// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported +// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported +// Bit 2 = 0 - no TV1 support= 1- TV1 is supported +// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported +// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported +// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported +// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported +// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported +// Bit 8 = 0 - no CV support= 1- CV is supported +// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported +// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported +// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported +// +// /****************************************************************************/ -/* Structure used in GPIO_I2C_InfoTable */ +/* Structure used in MclkSS_InfoTable */ /****************************************************************************/ -typedef struct _ATOM_GPIO_I2C_ASSIGMENT { - USHORT usClkMaskRegisterIndex; - USHORT usClkEnRegisterIndex; - USHORT usClkY_RegisterIndex; - USHORT usClkA_RegisterIndex; - USHORT usDataMaskRegisterIndex; - USHORT usDataEnRegisterIndex; - USHORT usDataY_RegisterIndex; - USHORT usDataA_RegisterIndex; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; - UCHAR ucClkMaskShift; - UCHAR ucClkEnShift; - UCHAR ucClkY_Shift; - UCHAR ucClkA_Shift; - UCHAR ucDataMaskShift; - UCHAR ucDataEnShift; - UCHAR ucDataY_Shift; - UCHAR ucDataA_Shift; - UCHAR ucReserved1; - UCHAR ucReserved2; -} ATOM_GPIO_I2C_ASSIGMENT; - -typedef struct _ATOM_GPIO_I2C_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE]; -} ATOM_GPIO_I2C_INFO; +// ucI2C_ConfigID +// [7:0] - I2C LINE Associate ID +// = 0 - no I2C +// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) +// = 0, [6:0]=SW assisted I2C ID +// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use +// = 2, HW engine for Multimedia use +// = 3-7 Reserved for future I2C engines +// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C + +typedef struct _ATOM_I2C_ID_CONFIG +{ +#if ATOM_BIG_ENDIAN + UCHAR bfHW_Capable:1; + UCHAR bfHW_EngineID:3; + UCHAR bfI2C_LineMux:4; +#else + UCHAR bfI2C_LineMux:4; + UCHAR bfHW_EngineID:3; + UCHAR bfHW_Capable:1; +#endif +}ATOM_I2C_ID_CONFIG; -/****************************************************************************/ -/* Common Structure used in other structures */ -/****************************************************************************/ +typedef union _ATOM_I2C_ID_CONFIG_ACCESS +{ + ATOM_I2C_ID_CONFIG sbfAccess; + UCHAR ucAccess; +}ATOM_I2C_ID_CONFIG_ACCESS; + + +/****************************************************************************/ +// Structure used in GPIO_I2C_InfoTable +/****************************************************************************/ +typedef struct _ATOM_GPIO_I2C_ASSIGMENT +{ + USHORT usClkMaskRegisterIndex; + USHORT usClkEnRegisterIndex; + USHORT usClkY_RegisterIndex; + USHORT usClkA_RegisterIndex; + USHORT usDataMaskRegisterIndex; + USHORT usDataEnRegisterIndex; + USHORT usDataY_RegisterIndex; + USHORT usDataA_RegisterIndex; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; + UCHAR ucClkMaskShift; + UCHAR ucClkEnShift; + UCHAR ucClkY_Shift; + UCHAR ucClkA_Shift; + UCHAR ucDataMaskShift; + UCHAR ucDataEnShift; + UCHAR ucDataY_Shift; + UCHAR ucDataA_Shift; + UCHAR ucReserved1; + UCHAR ucReserved2; +}ATOM_GPIO_I2C_ASSIGMENT; + +typedef struct _ATOM_GPIO_I2C_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE]; +}ATOM_GPIO_I2C_INFO; + +/****************************************************************************/ +// Common Structure used in other structures +/****************************************************************************/ #ifndef _H2INC - -/* Please don't add or expand this bitfield structure below, this one will retire soon.! */ -typedef struct _ATOM_MODE_MISC_INFO { + +//Please don't add or expand this bitfield structure below, this one will retire soon.! +typedef struct _ATOM_MODE_MISC_INFO +{ #if ATOM_BIG_ENDIAN - USHORT Reserved:6; - USHORT RGB888:1; - USHORT DoubleClock:1; - USHORT Interlace:1; - USHORT CompositeSync:1; - USHORT V_ReplicationBy2:1; - USHORT H_ReplicationBy2:1; - USHORT VerticalCutOff:1; - USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ - USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ - USHORT HorizontalCutOff:1; + USHORT Reserved:6; + USHORT RGB888:1; + USHORT DoubleClock:1; + USHORT Interlace:1; + USHORT CompositeSync:1; + USHORT V_ReplicationBy2:1; + USHORT H_ReplicationBy2:1; + USHORT VerticalCutOff:1; + USHORT VSyncPolarity:1; //0=Active High, 1=Active Low + USHORT HSyncPolarity:1; //0=Active High, 1=Active Low + USHORT HorizontalCutOff:1; #else - USHORT HorizontalCutOff:1; - USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */ - USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */ - USHORT VerticalCutOff:1; - USHORT H_ReplicationBy2:1; - USHORT V_ReplicationBy2:1; - USHORT CompositeSync:1; - USHORT Interlace:1; - USHORT DoubleClock:1; - USHORT RGB888:1; - USHORT Reserved:6; + USHORT HorizontalCutOff:1; + USHORT HSyncPolarity:1; //0=Active High, 1=Active Low + USHORT VSyncPolarity:1; //0=Active High, 1=Active Low + USHORT VerticalCutOff:1; + USHORT H_ReplicationBy2:1; + USHORT V_ReplicationBy2:1; + USHORT CompositeSync:1; + USHORT Interlace:1; + USHORT DoubleClock:1; + USHORT RGB888:1; + USHORT Reserved:6; #endif -} ATOM_MODE_MISC_INFO; - -typedef union _ATOM_MODE_MISC_INFO_ACCESS { - ATOM_MODE_MISC_INFO sbfAccess; - USHORT usAccess; -} ATOM_MODE_MISC_INFO_ACCESS; - +}ATOM_MODE_MISC_INFO; + +typedef union _ATOM_MODE_MISC_INFO_ACCESS +{ + ATOM_MODE_MISC_INFO sbfAccess; + USHORT usAccess; +}ATOM_MODE_MISC_INFO_ACCESS; + #else - -typedef union _ATOM_MODE_MISC_INFO_ACCESS { - USHORT usAccess; -} ATOM_MODE_MISC_INFO_ACCESS; - + +typedef union _ATOM_MODE_MISC_INFO_ACCESS +{ + USHORT usAccess; +}ATOM_MODE_MISC_INFO_ACCESS; + #endif -/* usModeMiscInfo- */ +// usModeMiscInfo- #define ATOM_H_CUTOFF 0x01 -#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */ -#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */ +#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low +#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low #define ATOM_V_CUTOFF 0x08 #define ATOM_H_REPLICATIONBY2 0x10 #define ATOM_V_REPLICATIONBY2 0x20 @@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS { #define ATOM_DOUBLE_CLOCK_MODE 0x100 #define ATOM_RGB888_MODE 0x200 -/* usRefreshRate- */ +//usRefreshRate- #define ATOM_REFRESH_43 43 #define ATOM_REFRESH_47 47 -#define ATOM_REFRESH_56 56 +#define ATOM_REFRESH_56 56 #define ATOM_REFRESH_60 60 #define ATOM_REFRESH_65 65 #define ATOM_REFRESH_70 70 @@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS { #define ATOM_REFRESH_75 75 #define ATOM_REFRESH_85 85 -/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */ -/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */ -/* */ -/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */ -/* = EDID_HA + EDID_HBL */ -/* VESA_HDISP = VESA_ACTIVE = EDID_HA */ -/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */ -/* = EDID_HA + EDID_HSO */ -/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */ -/* VESA_BORDER = EDID_BORDER */ - -/****************************************************************************/ -/* Structure used in SetCRTC_UsingDTDTimingTable */ -/****************************************************************************/ -typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS { - USHORT usH_Size; - USHORT usH_Blanking_Time; - USHORT usV_Size; - USHORT usV_Blanking_Time; - USHORT usH_SyncOffset; - USHORT usH_SyncWidth; - USHORT usV_SyncOffset; - USHORT usV_SyncWidth; - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - UCHAR ucH_Border; /* From DFP EDID */ - UCHAR ucV_Border; - UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucPadding[3]; -} SET_CRTC_USING_DTD_TIMING_PARAMETERS; - -/****************************************************************************/ -/* Structure used in SetCRTC_TimingTable */ -/****************************************************************************/ -typedef struct _SET_CRTC_TIMING_PARAMETERS { - USHORT usH_Total; /* horizontal total */ - USHORT usH_Disp; /* horizontal display */ - USHORT usH_SyncStart; /* horozontal Sync start */ - USHORT usH_SyncWidth; /* horizontal Sync width */ - USHORT usV_Total; /* vertical total */ - USHORT usV_Disp; /* vertical display */ - USHORT usV_SyncStart; /* vertical Sync start */ - USHORT usV_SyncWidth; /* vertical Sync width */ - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */ - UCHAR ucOverscanRight; /* right */ - UCHAR ucOverscanLeft; /* left */ - UCHAR ucOverscanBottom; /* bottom */ - UCHAR ucOverscanTop; /* top */ - UCHAR ucReserved; -} SET_CRTC_TIMING_PARAMETERS; +// ATOM_MODE_TIMING data are exactly the same as VESA timing data. +// Translation from EDID to ATOM_MODE_TIMING, use the following formula. +// +// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK +// = EDID_HA + EDID_HBL +// VESA_HDISP = VESA_ACTIVE = EDID_HA +// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH +// = EDID_HA + EDID_HSO +// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW +// VESA_BORDER = EDID_BORDER + +/****************************************************************************/ +// Structure used in SetCRTC_UsingDTDTimingTable +/****************************************************************************/ +typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS +{ + USHORT usH_Size; + USHORT usH_Blanking_Time; + USHORT usV_Size; + USHORT usV_Blanking_Time; + USHORT usH_SyncOffset; + USHORT usH_SyncWidth; + USHORT usV_SyncOffset; + USHORT usV_SyncWidth; + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + UCHAR ucH_Border; // From DFP EDID + UCHAR ucV_Border; + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucPadding[3]; +}SET_CRTC_USING_DTD_TIMING_PARAMETERS; + +/****************************************************************************/ +// Structure used in SetCRTC_TimingTable +/****************************************************************************/ +typedef struct _SET_CRTC_TIMING_PARAMETERS +{ + USHORT usH_Total; // horizontal total + USHORT usH_Disp; // horizontal display + USHORT usH_SyncStart; // horozontal Sync start + USHORT usH_SyncWidth; // horizontal Sync width + USHORT usV_Total; // vertical total + USHORT usV_Disp; // vertical display + USHORT usV_SyncStart; // vertical Sync start + USHORT usV_SyncWidth; // vertical Sync width + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2 + UCHAR ucOverscanRight; // right + UCHAR ucOverscanLeft; // left + UCHAR ucOverscanBottom; // bottom + UCHAR ucOverscanTop; // top + UCHAR ucReserved; +}SET_CRTC_TIMING_PARAMETERS; #define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS -/****************************************************************************/ -/* Structure used in StandardVESA_TimingTable */ -/* AnalogTV_InfoTable */ -/* ComponentVideoInfoTable */ -/****************************************************************************/ -typedef struct _ATOM_MODE_TIMING { - USHORT usCRTC_H_Total; - USHORT usCRTC_H_Disp; - USHORT usCRTC_H_SyncStart; - USHORT usCRTC_H_SyncWidth; - USHORT usCRTC_V_Total; - USHORT usCRTC_V_Disp; - USHORT usCRTC_V_SyncStart; - USHORT usCRTC_V_SyncWidth; - USHORT usPixelClock; /* in 10Khz unit */ - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - USHORT usCRTC_OverscanRight; - USHORT usCRTC_OverscanLeft; - USHORT usCRTC_OverscanBottom; - USHORT usCRTC_OverscanTop; - USHORT usReserve; - UCHAR ucInternalModeNumber; - UCHAR ucRefreshRate; -} ATOM_MODE_TIMING; - -typedef struct _ATOM_DTD_FORMAT { - USHORT usPixClk; - USHORT usHActive; - USHORT usHBlanking_Time; - USHORT usVActive; - USHORT usVBlanking_Time; - USHORT usHSyncOffset; - USHORT usHSyncWidth; - USHORT usVSyncOffset; - USHORT usVSyncWidth; - USHORT usImageHSize; - USHORT usImageVSize; - UCHAR ucHBorder; - UCHAR ucVBorder; - ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; - UCHAR ucInternalModeNumber; - UCHAR ucRefreshRate; -} ATOM_DTD_FORMAT; - -/****************************************************************************/ -/* Structure used in LVDS_InfoTable */ -/* * Need a document to describe this table */ -/****************************************************************************/ +/****************************************************************************/ +// Structure used in StandardVESA_TimingTable +// AnalogTV_InfoTable +// ComponentVideoInfoTable +/****************************************************************************/ +typedef struct _ATOM_MODE_TIMING +{ + USHORT usCRTC_H_Total; + USHORT usCRTC_H_Disp; + USHORT usCRTC_H_SyncStart; + USHORT usCRTC_H_SyncWidth; + USHORT usCRTC_V_Total; + USHORT usCRTC_V_Disp; + USHORT usCRTC_V_SyncStart; + USHORT usCRTC_V_SyncWidth; + USHORT usPixelClock; //in 10Khz unit + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + USHORT usCRTC_OverscanRight; + USHORT usCRTC_OverscanLeft; + USHORT usCRTC_OverscanBottom; + USHORT usCRTC_OverscanTop; + USHORT usReserve; + UCHAR ucInternalModeNumber; + UCHAR ucRefreshRate; +}ATOM_MODE_TIMING; + +typedef struct _ATOM_DTD_FORMAT +{ + USHORT usPixClk; + USHORT usHActive; + USHORT usHBlanking_Time; + USHORT usVActive; + USHORT usVBlanking_Time; + USHORT usHSyncOffset; + USHORT usHSyncWidth; + USHORT usVSyncOffset; + USHORT usVSyncWidth; + USHORT usImageHSize; + USHORT usImageVSize; + UCHAR ucHBorder; + UCHAR ucVBorder; + ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo; + UCHAR ucInternalModeNumber; + UCHAR ucRefreshRate; +}ATOM_DTD_FORMAT; + +/****************************************************************************/ +// Structure used in LVDS_InfoTable +// * Need a document to describe this table +/****************************************************************************/ #define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004 #define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008 #define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010 #define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020 -/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */ -/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */ -#define LCDPANEL_CAP_READ_EDID 0x1 - -/* ucTableFormatRevision=1 */ -/* ucTableContentRevision=1 */ -typedef struct _ATOM_LVDS_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_DTD_FORMAT sLCDTiming; - USHORT usModePatchTableOffset; - USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ - USHORT usOffDelayInMs; - UCHAR ucPowerSequenceDigOntoDEin10Ms; - UCHAR ucPowerSequenceDEtoBLOnin10Ms; - UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ - /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ - /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ - /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ - UCHAR ucPanelDefaultRefreshRate; - UCHAR ucPanelIdentification; - UCHAR ucSS_Id; -} ATOM_LVDS_INFO; - -/* ucTableFormatRevision=1 */ -/* ucTableContentRevision=2 */ -typedef struct _ATOM_LVDS_INFO_V12 { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_DTD_FORMAT sLCDTiming; - USHORT usExtInfoTableOffset; - USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */ - USHORT usOffDelayInMs; - UCHAR ucPowerSequenceDigOntoDEin10Ms; - UCHAR ucPowerSequenceDEtoBLOnin10Ms; - UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */ - /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */ - /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */ - /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */ - UCHAR ucPanelDefaultRefreshRate; - UCHAR ucPanelIdentification; - UCHAR ucSS_Id; - USHORT usLCDVenderID; - USHORT usLCDProductID; - UCHAR ucLCDPanel_SpecialHandlingCap; - UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */ - UCHAR ucReserved[2]; -} ATOM_LVDS_INFO_V12; +//ucTableFormatRevision=1 +//ucTableContentRevision=1 +typedef struct _ATOM_LVDS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT sLCDTiming; + USHORT usModePatchTableOffset; + USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. + USHORT usOffDelayInMs; + UCHAR ucPowerSequenceDigOntoDEin10Ms; + UCHAR ucPowerSequenceDEtoBLOnin10Ms; + UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} + // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} + // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} + // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} + UCHAR ucPanelDefaultRefreshRate; + UCHAR ucPanelIdentification; + UCHAR ucSS_Id; +}ATOM_LVDS_INFO; + +//ucTableFormatRevision=1 +//ucTableContentRevision=2 +typedef struct _ATOM_LVDS_INFO_V12 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT sLCDTiming; + USHORT usExtInfoTableOffset; + USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec. + USHORT usOffDelayInMs; + UCHAR ucPowerSequenceDigOntoDEin10Ms; + UCHAR ucPowerSequenceDEtoBLOnin10Ms; + UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} + // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} + // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} + // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} + UCHAR ucPanelDefaultRefreshRate; + UCHAR ucPanelIdentification; + UCHAR ucSS_Id; + USHORT usLCDVenderID; + USHORT usLCDProductID; + UCHAR ucLCDPanel_SpecialHandlingCap; + UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable + UCHAR ucReserved[2]; +}ATOM_LVDS_INFO_V12; + +//Definitions for ucLCDPanel_SpecialHandlingCap: + +//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. +//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL +#define LCDPANEL_CAP_READ_EDID 0x1 + +//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together +//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static +//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12 +#define LCDPANEL_CAP_DRR_SUPPORTED 0x2 + +//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP. +#define LCDPANEL_CAP_eDP 0x4 + + +//Color Bit Depth definition in EDID V1.4 @BYTE 14h +//Bit 6 5 4 + // 0 0 0 - Color bit depth is undefined + // 0 0 1 - 6 Bits per Primary Color + // 0 1 0 - 8 Bits per Primary Color + // 0 1 1 - 10 Bits per Primary Color + // 1 0 0 - 12 Bits per Primary Color + // 1 0 1 - 14 Bits per Primary Color + // 1 1 0 - 16 Bits per Primary Color + // 1 1 1 - Reserved + +#define PANEL_COLOR_BIT_DEPTH_MASK 0x70 + +// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled} +#define PANEL_RANDOM_DITHER 0x80 +#define PANEL_RANDOM_DITHER_MASK 0x80 + #define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12 -typedef struct _ATOM_PATCH_RECORD_MODE { - UCHAR ucRecordType; - USHORT usHDisp; - USHORT usVDisp; -} ATOM_PATCH_RECORD_MODE; +typedef struct _ATOM_PATCH_RECORD_MODE +{ + UCHAR ucRecordType; + USHORT usHDisp; + USHORT usVDisp; +}ATOM_PATCH_RECORD_MODE; -typedef struct _ATOM_LCD_RTS_RECORD { - UCHAR ucRecordType; - UCHAR ucRTSValue; -} ATOM_LCD_RTS_RECORD; +typedef struct _ATOM_LCD_RTS_RECORD +{ + UCHAR ucRecordType; + UCHAR ucRTSValue; +}ATOM_LCD_RTS_RECORD; -/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */ -typedef struct _ATOM_LCD_MODE_CONTROL_CAP { - UCHAR ucRecordType; - USHORT usLCDCap; -} ATOM_LCD_MODE_CONTROL_CAP; +//!! If the record below exits, it shoud always be the first record for easy use in command table!!! +// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead. +typedef struct _ATOM_LCD_MODE_CONTROL_CAP +{ + UCHAR ucRecordType; + USHORT usLCDCap; +}ATOM_LCD_MODE_CONTROL_CAP; #define LCD_MODE_CAP_BL_OFF 1 #define LCD_MODE_CAP_CRTC_OFF 2 #define LCD_MODE_CAP_PANEL_OFF 4 -typedef struct _ATOM_FAKE_EDID_PATCH_RECORD { - UCHAR ucRecordType; - UCHAR ucFakeEDIDLength; - UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */ +typedef struct _ATOM_FAKE_EDID_PATCH_RECORD +{ + UCHAR ucRecordType; + UCHAR ucFakeEDIDLength; + UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements. } ATOM_FAKE_EDID_PATCH_RECORD; -typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD { - UCHAR ucRecordType; - USHORT usHSize; - USHORT usVSize; -} ATOM_PANEL_RESOLUTION_PATCH_RECORD; +typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD +{ + UCHAR ucRecordType; + USHORT usHSize; + USHORT usVSize; +}ATOM_PANEL_RESOLUTION_PATCH_RECORD; #define LCD_MODE_PATCH_RECORD_MODE_TYPE 1 #define LCD_RTS_RECORD_TYPE 2 @@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD { /****************************Spread Spectrum Info Table Definitions **********************/ -/* ucTableFormatRevision=1 */ -/* ucTableContentRevision=2 */ -typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { - USHORT usSpreadSpectrumPercentage; - UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */ - UCHAR ucSS_Step; - UCHAR ucSS_Delay; - UCHAR ucSS_Id; - UCHAR ucRecommendedRef_Div; - UCHAR ucSS_Range; /* it was reserved for V11 */ -} ATOM_SPREAD_SPECTRUM_ASSIGNMENT; +//ucTableFormatRevision=1 +//ucTableContentRevision=2 +typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT +{ + USHORT usSpreadSpectrumPercentage; + UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD + UCHAR ucSS_Step; + UCHAR ucSS_Delay; + UCHAR ucSS_Id; + UCHAR ucRecommendedRef_Div; + UCHAR ucSS_Range; //it was reserved for V11 +}ATOM_SPREAD_SPECTRUM_ASSIGNMENT; #define ATOM_MAX_SS_ENTRY 16 -#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */ -#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */ +#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well. +#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable. +#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz +#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz + #define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 #define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 @@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT { #define ATOM_INTERNAL_SS_MASK 0x00000000 #define ATOM_EXTERNAL_SS_MASK 0x00000002 #define EXEC_SS_STEP_SIZE_SHIFT 2 -#define EXEC_SS_DELAY_SHIFT 4 +#define EXEC_SS_DELAY_SHIFT 4 #define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4 -typedef struct _ATOM_SPREAD_SPECTRUM_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; -} ATOM_SPREAD_SPECTRUM_INFO; - -/****************************************************************************/ -/* Structure used in AnalogTV_InfoTable (Top level) */ -/****************************************************************************/ -/* ucTVBootUpDefaultStd definiton: */ - -/* ATOM_TV_NTSC 1 */ -/* ATOM_TV_NTSCJ 2 */ -/* ATOM_TV_PAL 3 */ -/* ATOM_TV_PALM 4 */ -/* ATOM_TV_PALCN 5 */ -/* ATOM_TV_PALN 6 */ -/* ATOM_TV_PAL60 7 */ -/* ATOM_TV_SECAM 8 */ - -/* ucTVSuppportedStd definition: */ +typedef struct _ATOM_SPREAD_SPECTRUM_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY]; +}ATOM_SPREAD_SPECTRUM_INFO; + +/****************************************************************************/ +// Structure used in AnalogTV_InfoTable (Top level) +/****************************************************************************/ +//ucTVBootUpDefaultStd definiton: + +//ATOM_TV_NTSC 1 +//ATOM_TV_NTSCJ 2 +//ATOM_TV_PAL 3 +//ATOM_TV_PALM 4 +//ATOM_TV_PALCN 5 +//ATOM_TV_PALN 6 +//ATOM_TV_PAL60 7 +//ATOM_TV_SECAM 8 + +//ucTVSupportedStd definition: #define NTSC_SUPPORT 0x1 #define NTSCJ_SUPPORT 0x2 @@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO { #define MAX_SUPPORTED_TV_TIMING 2 -typedef struct _ATOM_ANALOG_TV_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucTV_SupportedStandard; - UCHAR ucTV_BootUpDefaultStandard; - UCHAR ucExt_TV_ASIC_ID; - UCHAR ucExt_TV_ASIC_SlaveAddr; - /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */ - ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; -} ATOM_ANALOG_TV_INFO; +typedef struct _ATOM_ANALOG_TV_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucTV_SupportedStandard; + UCHAR ucTV_BootUpDefaultStandard; + UCHAR ucExt_TV_ASIC_ID; + UCHAR ucExt_TV_ASIC_SlaveAddr; + /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/ + ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; +}ATOM_ANALOG_TV_INFO; #define MAX_SUPPORTED_TV_TIMING_V1_2 3 -typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucTV_SupportedStandard; - UCHAR ucTV_BootUpDefaultStandard; - UCHAR ucExt_TV_ASIC_ID; - UCHAR ucExt_TV_ASIC_SlaveAddr; - ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; -} ATOM_ANALOG_TV_INFO_V1_2; +typedef struct _ATOM_ANALOG_TV_INFO_V1_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucTV_SupportedStandard; + UCHAR ucTV_BootUpDefaultStandard; + UCHAR ucExt_TV_ASIC_ID; + UCHAR ucExt_TV_ASIC_SlaveAddr; + ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; +}ATOM_ANALOG_TV_INFO_V1_2; + +typedef struct _ATOM_DPCD_INFO +{ + UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1 + UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane + UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP + UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec) +}ATOM_DPCD_INFO; + +#define ATOM_DPCD_MAX_LANE_MASK 0x1F /**************************************************************************/ -/* VRAM usage and their definitions */ +// VRAM usage and their defintions -/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */ -/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */ -/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */ -/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */ -/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */ +// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. +// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. +// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! +// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR +// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX #ifndef VESA_MEMORY_IN_64K_BLOCK -#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */ +#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!) #endif -#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */ -#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */ +#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes +#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes #define ATOM_HWICON_INFOTABLE_SIZE 32 #define MAX_DTD_MODE_IN_VRAM 6 -#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */ -#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */ +#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT) +#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) #define DFP_ENCODER_TYPE_OFFSET 0x80 #define DP_ENCODER_LANE_NUM_OFFSET 0x84 #define DP_ENCODER_LINK_RATE_OFFSET 0x88 @@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { #define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) #define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) #define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) @@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { #define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) #define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) -#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) +#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE) #define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) #define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) @@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { #define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE) #define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) +#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE) -#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256) -#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512) +#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256) +#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512 -/* The size below is in Kb! */ +//The size below is in Kb! #define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC) - + #define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L #define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30 #define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1 #define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0 -/***********************************************************************************/ -/* Structure used in VRAM_UsageByFirmwareTable */ -/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */ -/* at running time. */ -/* note2: From RV770, the memory is more than 32bit addressable, so we will change */ -/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */ -/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */ -/* (in offset to start of memory address) is KB aligned instead of byte aligend. */ -/***********************************************************************************/ +/***********************************************************************************/ +// Structure used in VRAM_UsageByFirmwareTable +// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm +// at running time. +// note2: From RV770, the memory is more than 32bit addressable, so we will change +// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains +// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware +// (in offset to start of memory address) is KB aligned instead of byte aligend. +/***********************************************************************************/ +// Note3: +/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter, +for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have: + +If (ulStartAddrUsedByFirmware!=0) +FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB; +Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose +else //Non VGA case + if (FB_Size<=2Gb) + FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB; + else + FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB + +CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/ + #define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1 -typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO { - ULONG ulStartAddrUsedByFirmware; - USHORT usFirmwareUseInKb; - USHORT usReserved; -} ATOM_FIRMWARE_VRAM_RESERVE_INFO; +typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO +{ + ULONG ulStartAddrUsedByFirmware; + USHORT usFirmwareUseInKb; + USHORT usReserved; +}ATOM_FIRMWARE_VRAM_RESERVE_INFO; -typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_FIRMWARE_VRAM_RESERVE_INFO - asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; -} ATOM_VRAM_USAGE_BY_FIRMWARE; +typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; +}ATOM_VRAM_USAGE_BY_FIRMWARE; -/****************************************************************************/ -/* Structure used in GPIO_Pin_LUTTable */ -/****************************************************************************/ -typedef struct _ATOM_GPIO_PIN_ASSIGNMENT { - USHORT usGpioPin_AIndex; - UCHAR ucGpioPinBitShift; - UCHAR ucGPIO_ID; -} ATOM_GPIO_PIN_ASSIGNMENT; +// change verion to 1.5, when allow driver to allocate the vram area for command table access. +typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 +{ + ULONG ulStartAddrUsedByFirmware; + USHORT usFirmwareUseInKb; + USHORT usFBUsedByDrvInKb; +}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5; -typedef struct _ATOM_GPIO_PIN_LUT { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; -} ATOM_GPIO_PIN_LUT; +typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO]; +}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5; + +/****************************************************************************/ +// Structure used in GPIO_Pin_LUTTable +/****************************************************************************/ +typedef struct _ATOM_GPIO_PIN_ASSIGNMENT +{ + USHORT usGpioPin_AIndex; + UCHAR ucGpioPinBitShift; + UCHAR ucGPIO_ID; +}ATOM_GPIO_PIN_ASSIGNMENT; -/****************************************************************************/ -/* Structure used in ComponentVideoInfoTable */ -/****************************************************************************/ +typedef struct _ATOM_GPIO_PIN_LUT +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1]; +}ATOM_GPIO_PIN_LUT; + +/****************************************************************************/ +// Structure used in ComponentVideoInfoTable +/****************************************************************************/ #define GPIO_PIN_ACTIVE_HIGH 0x1 #define MAX_SUPPORTED_CV_STANDARDS 5 -/* definitions for ATOM_D_INFO.ucSettings */ -#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */ -#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */ -#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */ +// definitions for ATOM_D_INFO.ucSettings +#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0] +#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out +#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7] -typedef struct _ATOM_GPIO_INFO { - USHORT usAOffset; - UCHAR ucSettings; - UCHAR ucReserved; -} ATOM_GPIO_INFO; +typedef struct _ATOM_GPIO_INFO +{ + USHORT usAOffset; + UCHAR ucSettings; + UCHAR ucReserved; +}ATOM_GPIO_INFO; -/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */ +// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) #define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2 -/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */ -#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */ -#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */ - -/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */ -/* Line 3 out put 5V. */ -#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */ -#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */ -#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 - -/* Line 3 out put 2.2V */ -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */ -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */ -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 - -/* Line 3 out put 0V */ -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */ -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */ -#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 - -#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */ - -#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */ - -/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */ -#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ -#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */ - -typedef struct _ATOM_COMPONENT_VIDEO_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMask_PinRegisterIndex; - USHORT usEN_PinRegisterIndex; - USHORT usY_PinRegisterIndex; - USHORT usA_PinRegisterIndex; - UCHAR ucBitShift; - UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */ - ATOM_DTD_FORMAT sReserved; /* must be zeroed out */ - UCHAR ucMiscInfo; - UCHAR uc480i; - UCHAR uc480p; - UCHAR uc720p; - UCHAR uc1080i; - UCHAR ucLetterBoxMode; - UCHAR ucReserved[3]; - UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ - ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; - ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; -} ATOM_COMPONENT_VIDEO_INFO; - -/* ucTableFormatRevision=2 */ -/* ucTableContentRevision=1 */ -typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucMiscInfo; - UCHAR uc480i; - UCHAR uc480p; - UCHAR uc720p; - UCHAR uc1080i; - UCHAR ucReserved; - UCHAR ucLetterBoxMode; - UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */ - ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; - ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; -} ATOM_COMPONENT_VIDEO_INFO_V21; +// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i +#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7]; +#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0] + +// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode +//Line 3 out put 5V. +#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9 +#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9 +#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0 + +//Line 3 out put 2.2V +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2 + +//Line 3 out put 0V +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3 +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3 +#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4 + +#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0] + +#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7 + +//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. +#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. +#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. + + +typedef struct _ATOM_COMPONENT_VIDEO_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMask_PinRegisterIndex; + USHORT usEN_PinRegisterIndex; + USHORT usY_PinRegisterIndex; + USHORT usA_PinRegisterIndex; + UCHAR ucBitShift; + UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low + ATOM_DTD_FORMAT sReserved; // must be zeroed out + UCHAR ucMiscInfo; + UCHAR uc480i; + UCHAR uc480p; + UCHAR uc720p; + UCHAR uc1080i; + UCHAR ucLetterBoxMode; + UCHAR ucReserved[3]; + UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector + ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; + ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; +}ATOM_COMPONENT_VIDEO_INFO; + +//ucTableFormatRevision=2 +//ucTableContentRevision=1 +typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucMiscInfo; + UCHAR uc480i; + UCHAR uc480p; + UCHAR uc720p; + UCHAR uc1080i; + UCHAR ucReserved; + UCHAR ucLetterBoxMode; + UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector + ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS]; + ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS]; +}ATOM_COMPONENT_VIDEO_INFO_V21; #define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21 -/****************************************************************************/ -/* Structure used in object_InfoTable */ -/****************************************************************************/ -typedef struct _ATOM_OBJECT_HEADER { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - USHORT usConnectorObjectTableOffset; - USHORT usRouterObjectTableOffset; - USHORT usEncoderObjectTableOffset; - USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */ - USHORT usDisplayPathTableOffset; -} ATOM_OBJECT_HEADER; - -typedef struct _ATOM_DISPLAY_OBJECT_PATH { - USHORT usDeviceTag; /* supported device */ - USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */ - USHORT usConnObjectId; /* Connector Object ID */ - USHORT usGPUObjectId; /* GPU ID */ - USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */ -} ATOM_DISPLAY_OBJECT_PATH; - -typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE { - UCHAR ucNumOfDispPath; - UCHAR ucVersion; - UCHAR ucPadding[2]; - ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; -} ATOM_DISPLAY_OBJECT_PATH_TABLE; - -typedef struct _ATOM_OBJECT /* each object has this structure */ -{ - USHORT usObjectID; - USHORT usSrcDstTableOffset; - USHORT usRecordOffset; /* this pointing to a bunch of records defined below */ - USHORT usReserved; -} ATOM_OBJECT; - -typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */ -{ - UCHAR ucNumberOfObjects; - UCHAR ucPadding[3]; - ATOM_OBJECT asObjects[1]; -} ATOM_OBJECT_TABLE; - -typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */ -{ - UCHAR ucNumberOfSrc; - USHORT usSrcObjectID[1]; - UCHAR ucNumberOfDst; - USHORT usDstObjectID[1]; -} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; - -/* Related definitions, all records are differnt but they have a commond header */ -typedef struct _ATOM_COMMON_RECORD_HEADER { - UCHAR ucRecordType; /* An emun to indicate the record type */ - UCHAR ucRecordSize; /* The size of the whole record in byte */ -} ATOM_COMMON_RECORD_HEADER; - -#define ATOM_I2C_RECORD_TYPE 1 +/****************************************************************************/ +// Structure used in object_InfoTable +/****************************************************************************/ +typedef struct _ATOM_OBJECT_HEADER +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + USHORT usConnectorObjectTableOffset; + USHORT usRouterObjectTableOffset; + USHORT usEncoderObjectTableOffset; + USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. + USHORT usDisplayPathTableOffset; +}ATOM_OBJECT_HEADER; + +typedef struct _ATOM_OBJECT_HEADER_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + USHORT usConnectorObjectTableOffset; + USHORT usRouterObjectTableOffset; + USHORT usEncoderObjectTableOffset; + USHORT usProtectionObjectTableOffset; //only available when Protection block is independent. + USHORT usDisplayPathTableOffset; + USHORT usMiscObjectTableOffset; +}ATOM_OBJECT_HEADER_V3; + +typedef struct _ATOM_DISPLAY_OBJECT_PATH +{ + USHORT usDeviceTag; //supported device + USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH + USHORT usConnObjectId; //Connector Object ID + USHORT usGPUObjectId; //GPU ID + USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. +}ATOM_DISPLAY_OBJECT_PATH; + +typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE +{ + UCHAR ucNumOfDispPath; + UCHAR ucVersion; + UCHAR ucPadding[2]; + ATOM_DISPLAY_OBJECT_PATH asDispPath[1]; +}ATOM_DISPLAY_OBJECT_PATH_TABLE; + + +typedef struct _ATOM_OBJECT //each object has this structure +{ + USHORT usObjectID; + USHORT usSrcDstTableOffset; + USHORT usRecordOffset; //this pointing to a bunch of records defined below + USHORT usReserved; +}ATOM_OBJECT; + +typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure +{ + UCHAR ucNumberOfObjects; + UCHAR ucPadding[3]; + ATOM_OBJECT asObjects[1]; +}ATOM_OBJECT_TABLE; + +typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure +{ + UCHAR ucNumberOfSrc; + USHORT usSrcObjectID[1]; + UCHAR ucNumberOfDst; + USHORT usDstObjectID[1]; +}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT; + + +//Two definitions below are for OPM on MXM module designs + +#define EXT_HPDPIN_LUTINDEX_0 0 +#define EXT_HPDPIN_LUTINDEX_1 1 +#define EXT_HPDPIN_LUTINDEX_2 2 +#define EXT_HPDPIN_LUTINDEX_3 3 +#define EXT_HPDPIN_LUTINDEX_4 4 +#define EXT_HPDPIN_LUTINDEX_5 5 +#define EXT_HPDPIN_LUTINDEX_6 6 +#define EXT_HPDPIN_LUTINDEX_7 7 +#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1) + +#define EXT_AUXDDC_LUTINDEX_0 0 +#define EXT_AUXDDC_LUTINDEX_1 1 +#define EXT_AUXDDC_LUTINDEX_2 2 +#define EXT_AUXDDC_LUTINDEX_3 3 +#define EXT_AUXDDC_LUTINDEX_4 4 +#define EXT_AUXDDC_LUTINDEX_5 5 +#define EXT_AUXDDC_LUTINDEX_6 6 +#define EXT_AUXDDC_LUTINDEX_7 7 +#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1) + +typedef struct _EXT_DISPLAY_PATH +{ + USHORT usDeviceTag; //A bit vector to show what devices are supported + USHORT usDeviceACPIEnum; //16bit device ACPI id. + USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions + UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT + UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT + USHORT usExtEncoderObjId; //external encoder object id + USHORT usReserved[3]; +}EXT_DISPLAY_PATH; + +#define NUMBER_OF_UCHAR_FOR_GUID 16 +#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7 + +typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string + EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries. + UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0. + UCHAR Reserved [7]; // for potential expansion +}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO; + +//Related definitions, all records are differnt but they have a commond header +typedef struct _ATOM_COMMON_RECORD_HEADER +{ + UCHAR ucRecordType; //An emun to indicate the record type + UCHAR ucRecordSize; //The size of the whole record in byte +}ATOM_COMMON_RECORD_HEADER; + + +#define ATOM_I2C_RECORD_TYPE 1 #define ATOM_HPD_INT_RECORD_TYPE 2 #define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3 #define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4 -#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ -#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ +#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE +#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE #define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7 -#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */ +#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE #define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9 #define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10 #define ATOM_CONNECTOR_CF_RECORD_TYPE 11 #define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12 #define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13 -#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 -#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 - -/* Must be updated when new record type is added,equal to that record definition! */ -#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE - -typedef struct _ATOM_I2C_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - ATOM_I2C_ID_CONFIG sucI2cId; - UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */ -} ATOM_I2C_RECORD; - -typedef struct _ATOM_HPD_INT_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ - UCHAR ucPlugged_PinState; -} ATOM_HPD_INT_RECORD; - -typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucProtectionFlag; - UCHAR ucReserved; -} ATOM_OUTPUT_PROTECTION_RECORD; - -typedef struct _ATOM_CONNECTOR_DEVICE_TAG { - ULONG ulACPIDeviceEnum; /* Reserved for now */ - USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */ - USHORT usPadding; -} ATOM_CONNECTOR_DEVICE_TAG; - -typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucNumberOfDevice; - UCHAR ucReserved; - ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */ -} ATOM_CONNECTOR_DEVICE_TAG_RECORD; - -typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucConfigGPIOID; - UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */ - UCHAR ucFlowinGPIPID; - UCHAR ucExtInGPIPID; -} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; - -typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucCTL1GPIO_ID; - UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */ - UCHAR ucCTL2GPIO_ID; - UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */ - UCHAR ucCTL3GPIO_ID; - UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */ - UCHAR ucCTLFPGA_IN_ID; - UCHAR ucPadding[3]; -} ATOM_ENCODER_FPGA_CONTROL_RECORD; - -typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ - UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */ -} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; - -typedef struct _ATOM_JTAG_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucTMSGPIO_ID; - UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */ - UCHAR ucTCKGPIO_ID; - UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */ - UCHAR ucTDOGPIO_ID; - UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */ - UCHAR ucTDIGPIO_ID; - UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */ - UCHAR ucPadding[2]; -} ATOM_JTAG_RECORD; - -/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */ -typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR { - UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */ - UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */ -} ATOM_GPIO_PIN_CONTROL_PAIR; - -typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucFlags; /* Future expnadibility */ - UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */ - ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */ -} ATOM_OBJECT_GPIO_CNTL_RECORD; - -/* Definitions for GPIO pin state */ +#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14 +#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15 +#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table +#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table +#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record +#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19 + + +//Must be updated when new record type is added,equal to that record definition! +#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE + +typedef struct _ATOM_I2C_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + ATOM_I2C_ID_CONFIG sucI2cId; + UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC +}ATOM_I2C_RECORD; + +typedef struct _ATOM_HPD_INT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info + UCHAR ucPlugged_PinState; +}ATOM_HPD_INT_RECORD; + + +typedef struct _ATOM_OUTPUT_PROTECTION_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucProtectionFlag; + UCHAR ucReserved; +}ATOM_OUTPUT_PROTECTION_RECORD; + +typedef struct _ATOM_CONNECTOR_DEVICE_TAG +{ + ULONG ulACPIDeviceEnum; //Reserved for now + USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT" + USHORT usPadding; +}ATOM_CONNECTOR_DEVICE_TAG; + +typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucNumberOfDevice; + UCHAR ucReserved; + ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation +}ATOM_CONNECTOR_DEVICE_TAG_RECORD; + + +typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucConfigGPIOID; + UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in + UCHAR ucFlowinGPIPID; + UCHAR ucExtInGPIPID; +}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD; + +typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucCTL1GPIO_ID; + UCHAR ucCTL1GPIOState; //Set to 1 when it's active high + UCHAR ucCTL2GPIO_ID; + UCHAR ucCTL2GPIOState; //Set to 1 when it's active high + UCHAR ucCTL3GPIO_ID; + UCHAR ucCTL3GPIOState; //Set to 1 when it's active high + UCHAR ucCTLFPGA_IN_ID; + UCHAR ucPadding[3]; +}ATOM_ENCODER_FPGA_CONTROL_RECORD; + +typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info + UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected +}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD; + +typedef struct _ATOM_JTAG_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucTMSGPIO_ID; + UCHAR ucTMSGPIOState; //Set to 1 when it's active high + UCHAR ucTCKGPIO_ID; + UCHAR ucTCKGPIOState; //Set to 1 when it's active high + UCHAR ucTDOGPIO_ID; + UCHAR ucTDOGPIOState; //Set to 1 when it's active high + UCHAR ucTDIGPIO_ID; + UCHAR ucTDIGPIOState; //Set to 1 when it's active high + UCHAR ucPadding[2]; +}ATOM_JTAG_RECORD; + + +//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually +typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR +{ + UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table + UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin +}ATOM_GPIO_PIN_CONTROL_PAIR; + +typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucFlags; // Future expnadibility + UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object + ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins +}ATOM_OBJECT_GPIO_CNTL_RECORD; + +//Definitions for GPIO pin state #define GPIO_PIN_TYPE_INPUT 0x00 #define GPIO_PIN_TYPE_OUTPUT 0x10 #define GPIO_PIN_TYPE_HW_CONTROL 0x20 -/* For GPIO_PIN_TYPE_OUTPUT the following is defined */ +//For GPIO_PIN_TYPE_OUTPUT the following is defined #define GPIO_PIN_OUTPUT_STATE_MASK 0x01 #define GPIO_PIN_OUTPUT_STATE_SHIFT 0 #define GPIO_PIN_STATE_ACTIVE_LOW 0x0 #define GPIO_PIN_STATE_ACTIVE_HIGH 0x1 -typedef struct _ATOM_ENCODER_DVO_CF_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - ULONG ulStrengthControl; /* DVOA strength control for CF */ - UCHAR ucPadding[2]; -} ATOM_ENCODER_DVO_CF_RECORD; +// Indexes to GPIO array in GLSync record +#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0 +#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1 +#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2 +#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3 +#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4 +#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5 +#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6 +#define ATOM_GPIO_INDEX_GLSYNC_MAX 7 + +typedef struct _ATOM_ENCODER_DVO_CF_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + ULONG ulStrengthControl; // DVOA strength control for CF + UCHAR ucPadding[2]; +}ATOM_ENCODER_DVO_CF_RECORD; -/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */ +// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1 #define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2 -typedef struct _ATOM_CONNECTOR_CF_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - USHORT usMaxPixClk; - UCHAR ucFlowCntlGpioId; - UCHAR ucSwapCntlGpioId; - UCHAR ucConnectedDvoBundle; - UCHAR ucPadding; -} ATOM_CONNECTOR_CF_RECORD; - -typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - ATOM_DTD_FORMAT asTiming; -} ATOM_CONNECTOR_HARDCODE_DTD_RECORD; - -typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */ - UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */ - UCHAR ucReserved; -} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; - -typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */ - UCHAR ucMuxControlPin; - UCHAR ucMuxState[2]; /* for alligment purpose */ -} ATOM_ROUTER_DDC_PATH_SELECT_RECORD; - -typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD { - ATOM_COMMON_RECORD_HEADER sheader; - UCHAR ucMuxType; - UCHAR ucMuxControlPin; - UCHAR ucMuxState[2]; /* for alligment purpose */ -} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; - -/* define ucMuxType */ +typedef struct _ATOM_CONNECTOR_CF_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + USHORT usMaxPixClk; + UCHAR ucFlowCntlGpioId; + UCHAR ucSwapCntlGpioId; + UCHAR ucConnectedDvoBundle; + UCHAR ucPadding; +}ATOM_CONNECTOR_CF_RECORD; + +typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + ATOM_DTD_FORMAT asTiming; +}ATOM_CONNECTOR_HARDCODE_DTD_RECORD; + +typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE + UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A + UCHAR ucReserved; +}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD; + + +typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state + UCHAR ucMuxControlPin; + UCHAR ucMuxState[2]; //for alligment purpose +}ATOM_ROUTER_DDC_PATH_SELECT_RECORD; + +typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucMuxType; + UCHAR ucMuxControlPin; + UCHAR ucMuxState[2]; //for alligment purpose +}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD; + +// define ucMuxType #define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f #define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01 -/****************************************************************************/ -/* ASIC voltage data table */ -/****************************************************************************/ -typedef struct _ATOM_VOLTAGE_INFO_HEADER { - USHORT usVDDCBaseLevel; /* In number of 50mv unit */ - USHORT usReserved; /* For possible extension table offset */ - UCHAR ucNumOfVoltageEntries; - UCHAR ucBytesPerVoltageEntry; - UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */ - UCHAR ucDefaultVoltageEntry; - UCHAR ucVoltageControlI2cLine; - UCHAR ucVoltageControlAddress; - UCHAR ucVoltageControlOffset; -} ATOM_VOLTAGE_INFO_HEADER; - -typedef struct _ATOM_VOLTAGE_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VOLTAGE_INFO_HEADER viHeader; - UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */ -} ATOM_VOLTAGE_INFO; - -typedef struct _ATOM_VOLTAGE_FORMULA { - USHORT usVoltageBaseLevel; /* In number of 1mv unit */ - USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */ - UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */ - UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */ - UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */ - UCHAR ucReserved; - UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */ -} ATOM_VOLTAGE_FORMULA; - -typedef struct _ATOM_VOLTAGE_CONTROL { - UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */ - UCHAR ucVoltageControlI2cLine; - UCHAR ucVoltageControlAddress; - UCHAR ucVoltageControlOffset; - USHORT usGpioPin_AIndex; /* GPIO_PAD register index */ - UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */ - UCHAR ucReserved; -} ATOM_VOLTAGE_CONTROL; - -/* Define ucVoltageControlId */ +typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE +{ + ATOM_COMMON_RECORD_HEADER sheader; + UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table +}ATOM_CONNECTOR_HPDPIN_LUT_RECORD; + +typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE +{ + ATOM_COMMON_RECORD_HEADER sheader; + ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID +}ATOM_CONNECTOR_AUXDDC_LUT_RECORD; + +typedef struct _ATOM_OBJECT_LINK_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + USHORT usObjectID; //could be connector, encorder or other object in object.h +}ATOM_OBJECT_LINK_RECORD; + +typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD +{ + ATOM_COMMON_RECORD_HEADER sheader; + USHORT usReserved; +}ATOM_CONNECTOR_REMOTE_CAP_RECORD; + +/****************************************************************************/ +// ASIC voltage data table +/****************************************************************************/ +typedef struct _ATOM_VOLTAGE_INFO_HEADER +{ + USHORT usVDDCBaseLevel; //In number of 50mv unit + USHORT usReserved; //For possible extension table offset + UCHAR ucNumOfVoltageEntries; + UCHAR ucBytesPerVoltageEntry; + UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit + UCHAR ucDefaultVoltageEntry; + UCHAR ucVoltageControlI2cLine; + UCHAR ucVoltageControlAddress; + UCHAR ucVoltageControlOffset; +}ATOM_VOLTAGE_INFO_HEADER; + +typedef struct _ATOM_VOLTAGE_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VOLTAGE_INFO_HEADER viHeader; + UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry +}ATOM_VOLTAGE_INFO; + + +typedef struct _ATOM_VOLTAGE_FORMULA +{ + USHORT usVoltageBaseLevel; // In number of 1mv unit + USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit + UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage + UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv + UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep + UCHAR ucReserved; + UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries +}ATOM_VOLTAGE_FORMULA; + +typedef struct _VOLTAGE_LUT_ENTRY +{ + USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code + USHORT usVoltageValue; // The corresponding Voltage Value, in mV +}VOLTAGE_LUT_ENTRY; + +typedef struct _ATOM_VOLTAGE_FORMULA_V2 +{ + UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage + UCHAR ucReserved[3]; + VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries +}ATOM_VOLTAGE_FORMULA_V2; + +typedef struct _ATOM_VOLTAGE_CONTROL +{ + UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine + UCHAR ucVoltageControlI2cLine; + UCHAR ucVoltageControlAddress; + UCHAR ucVoltageControlOffset; + USHORT usGpioPin_AIndex; //GPIO_PAD register index + UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff + UCHAR ucReserved; +}ATOM_VOLTAGE_CONTROL; + +// Define ucVoltageControlId #define VOLTAGE_CONTROLLED_BY_HW 0x00 #define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F #define VOLTAGE_CONTROLLED_BY_GPIO 0x80 -#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */ -#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */ -#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */ -#define VOLTAGE_CONTROL_ID_DS4402 0x04 - -typedef struct _ATOM_VOLTAGE_OBJECT { - UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */ - UCHAR ucSize; /* Size of Object */ - ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */ - ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */ -} ATOM_VOLTAGE_OBJECT; - -typedef struct _ATOM_VOLTAGE_OBJECT_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */ -} ATOM_VOLTAGE_OBJECT_INFO; - -typedef struct _ATOM_LEAKID_VOLTAGE { - UCHAR ucLeakageId; - UCHAR ucReserved; - USHORT usVoltage; -} ATOM_LEAKID_VOLTAGE; - -typedef struct _ATOM_ASIC_PROFILE_VOLTAGE { - UCHAR ucProfileId; - UCHAR ucReserved; - USHORT usSize; - USHORT usEfuseSpareStartAddr; - USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */ - ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */ -} ATOM_ASIC_PROFILE_VOLTAGE; - -/* ucProfileId */ -#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 +#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage +#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI +#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage +#define VOLTAGE_CONTROL_ID_DS4402 0x04 + +typedef struct _ATOM_VOLTAGE_OBJECT +{ + UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI + UCHAR ucSize; //Size of Object + ATOM_VOLTAGE_CONTROL asControl; //describ how to control + ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID +}ATOM_VOLTAGE_OBJECT; + +typedef struct _ATOM_VOLTAGE_OBJECT_V2 +{ + UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI + UCHAR ucSize; //Size of Object + ATOM_VOLTAGE_CONTROL asControl; //describ how to control + ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID +}ATOM_VOLTAGE_OBJECT_V2; + +typedef struct _ATOM_VOLTAGE_OBJECT_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control +}ATOM_VOLTAGE_OBJECT_INFO; + +typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control +}ATOM_VOLTAGE_OBJECT_INFO_V2; + +typedef struct _ATOM_LEAKID_VOLTAGE +{ + UCHAR ucLeakageId; + UCHAR ucReserved; + USHORT usVoltage; +}ATOM_LEAKID_VOLTAGE; + +typedef struct _ATOM_ASIC_PROFILE_VOLTAGE +{ + UCHAR ucProfileId; + UCHAR ucReserved; + USHORT usSize; + USHORT usEfuseSpareStartAddr; + USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, + ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage +}ATOM_ASIC_PROFILE_VOLTAGE; + +//ucProfileId +#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1 #define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1 #define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2 -typedef struct _ATOM_ASIC_PROFILING_INFO { - ATOM_COMMON_TABLE_HEADER asHeader; - ATOM_ASIC_PROFILE_VOLTAGE asVoltage; -} ATOM_ASIC_PROFILING_INFO; - -typedef struct _ATOM_POWER_SOURCE_OBJECT { - UCHAR ucPwrSrcId; /* Power source */ - UCHAR ucPwrSensorType; /* GPIO, I2C or none */ - UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */ - UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */ - UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */ - UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */ - UCHAR ucPwrSensActiveState; /* high active or low active */ - UCHAR ucReserve[3]; /* reserve */ - USHORT usSensPwr; /* in unit of watt */ -} ATOM_POWER_SOURCE_OBJECT; - -typedef struct _ATOM_POWER_SOURCE_INFO { - ATOM_COMMON_TABLE_HEADER asHeader; - UCHAR asPwrbehave[16]; - ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; -} ATOM_POWER_SOURCE_INFO; - -/* Define ucPwrSrcId */ +typedef struct _ATOM_ASIC_PROFILING_INFO +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ATOM_ASIC_PROFILE_VOLTAGE asVoltage; +}ATOM_ASIC_PROFILING_INFO; + +typedef struct _ATOM_POWER_SOURCE_OBJECT +{ + UCHAR ucPwrSrcId; // Power source + UCHAR ucPwrSensorType; // GPIO, I2C or none + UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id + UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect + UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect + UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect + UCHAR ucPwrSensActiveState; // high active or low active + UCHAR ucReserve[3]; // reserve + USHORT usSensPwr; // in unit of watt +}ATOM_POWER_SOURCE_OBJECT; + +typedef struct _ATOM_POWER_SOURCE_INFO +{ + ATOM_COMMON_TABLE_HEADER asHeader; + UCHAR asPwrbehave[16]; + ATOM_POWER_SOURCE_OBJECT asPwrObj[1]; +}ATOM_POWER_SOURCE_INFO; + + +//Define ucPwrSrcId #define POWERSOURCE_PCIE_ID1 0x00 #define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01 #define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02 #define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04 #define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08 -/* define ucPwrSensorId */ +//define ucPwrSensorId #define POWER_SENSOR_ALWAYS 0x00 #define POWER_SENSOR_GPIO 0x01 #define POWER_SENSOR_I2C 0x02 +typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ULONG ulBootUpEngineClock; + ULONG ulDentistVCOFreq; + ULONG ulBootUpUMAClock; + ULONG ulReserved1[8]; + ULONG ulBootUpReqDisplayVector; + ULONG ulOtherDisplayMisc; + ULONG ulGPUCapInfo; + ULONG ulReserved2[3]; + ULONG ulSystemConfig; + ULONG ulCPUCapInfo; + USHORT usMaxNBVoltage; + USHORT usMinNBVoltage; + USHORT usBootUpNBVoltage; + USHORT usExtDispConnInfoOffset; + UCHAR ucHtcTmpLmt; + UCHAR ucTjOffset; + UCHAR ucMemoryType; + UCHAR ucUMAChannelNumber; + ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10]; + ULONG ulCSR_M3_ARB_CNTL_UVD[10]; + ULONG ulCSR_M3_ARB_CNTL_FS3D[10]; + ULONG ulReserved3[42]; + ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo; +}ATOM_INTEGRATED_SYSTEM_INFO_V6; + +/********************************************************************************************************************** +// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description +//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. +//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit. +//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit. +//ulReserved1[8] Reserved by now, must be 0x0. +//ulBootUpReqDisplayVector VBIOS boot up display IDs +// ATOM_DEVICE_CRT1_SUPPORT 0x0001 +// ATOM_DEVICE_CRT2_SUPPORT 0x0010 +// ATOM_DEVICE_DFP1_SUPPORT 0x0008 +// ATOM_DEVICE_DFP6_SUPPORT 0x0040 +// ATOM_DEVICE_DFP2_SUPPORT 0x0080 +// ATOM_DEVICE_DFP3_SUPPORT 0x0200 +// ATOM_DEVICE_DFP4_SUPPORT 0x0400 +// ATOM_DEVICE_DFP5_SUPPORT 0x0800 +// ATOM_DEVICE_LCD1_SUPPORT 0x0002 +//ulOtherDisplayMisc Other display related flags, not defined yet. +//ulGPUCapInfo TBD +//ulReserved2[3] must be 0x0 for the reserved. +//ulSystemConfig TBD +//ulCPUCapInfo TBD +//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. +//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse. +//usBootUpNBVoltage Boot up NB voltage in unit of mv. +//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register. +//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed. +//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved. +//ucUMAChannelNumber System memory channel numbers. +//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table. +//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default +//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback. +//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications. +**********************************************************************************************************************/ + /**************************************************************************/ -/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */ -/* Memory SS Info Table */ -/* Define Memory Clock SS chip ID */ +// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design +//Memory SS Info Table +//Define Memory Clock SS chip ID #define ICS91719 1 #define ICS91720 2 -/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */ -typedef struct _ATOM_I2C_DATA_RECORD { - UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */ - UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */ -} ATOM_I2C_DATA_RECORD; - -/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */ -typedef struct _ATOM_I2C_DEVICE_SETUP_INFO { - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */ - UCHAR ucSSChipID; /* SS chip being used */ - UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */ - UCHAR ucNumOfI2CDataRecords; /* number of data block */ - ATOM_I2C_DATA_RECORD asI2CData[1]; -} ATOM_I2C_DEVICE_SETUP_INFO; - -/* ========================================================================================== */ -typedef struct _ATOM_ASIC_MVDD_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; -} ATOM_ASIC_MVDD_INFO; - -/* ========================================================================================== */ +//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol +typedef struct _ATOM_I2C_DATA_RECORD +{ + UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" + UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually +}ATOM_I2C_DATA_RECORD; + + +//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information +typedef struct _ATOM_I2C_DEVICE_SETUP_INFO +{ + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap. + UCHAR ucSSChipID; //SS chip being used + UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip + UCHAR ucNumOfI2CDataRecords; //number of data block + ATOM_I2C_DATA_RECORD asI2CData[1]; +}ATOM_I2C_DEVICE_SETUP_INFO; + +//========================================================================================== +typedef struct _ATOM_ASIC_MVDD_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1]; +}ATOM_ASIC_MVDD_INFO; + +//========================================================================================== #define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO -/* ========================================================================================== */ +//========================================================================================== /**************************************************************************/ -typedef struct _ATOM_ASIC_SS_ASSIGNMENT { - ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */ - USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */ - USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */ - UCHAR ucClockIndication; /* Indicate which clock source needs SS */ - UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */ - UCHAR ucReserved[2]; -} ATOM_ASIC_SS_ASSIGNMENT; - -/* Define ucSpreadSpectrumType */ +typedef struct _ATOM_ASIC_SS_ASSIGNMENT +{ + ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz + USHORT usSpreadSpectrumPercentage; //in unit of 0.01% + USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq + UCHAR ucClockIndication; //Indicate which clock source needs SS + UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread. + UCHAR ucReserved[2]; +}ATOM_ASIC_SS_ASSIGNMENT; + +//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type. +//SS is not required or enabled if a match is not found. #define ASIC_INTERNAL_MEMORY_SS 1 #define ASIC_INTERNAL_ENGINE_SS 2 -#define ASIC_INTERNAL_UVD_SS 3 +#define ASIC_INTERNAL_UVD_SS 3 +#define ASIC_INTERNAL_SS_ON_TMDS 4 +#define ASIC_INTERNAL_SS_ON_HDMI 5 +#define ASIC_INTERNAL_SS_ON_LVDS 6 +#define ASIC_INTERNAL_SS_ON_DP 7 +#define ASIC_INTERNAL_SS_ON_DCPLL 8 + +typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2 +{ + ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz + //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) + USHORT usSpreadSpectrumPercentage; //in unit of 0.01% + USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq + UCHAR ucClockIndication; //Indicate which clock source needs SS + UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS + UCHAR ucReserved[2]; +}ATOM_ASIC_SS_ASSIGNMENT_V2; + +//ucSpreadSpectrumMode +//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000 +//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000 +//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001 +//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001 +//#define ATOM_INTERNAL_SS_MASK 0x00000000 +//#define ATOM_EXTERNAL_SS_MASK 0x00000002 + +typedef struct _ATOM_ASIC_INTERNAL_SS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; +}ATOM_ASIC_INTERNAL_SS_INFO; -typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4]; -} ATOM_ASIC_INTERNAL_SS_INFO; +typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only. +}ATOM_ASIC_INTERNAL_SS_INFO_V2; -/* ==============================Scratch Pad Definition Portion=============================== */ +typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3 +{ + ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz + //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 ) + USHORT usSpreadSpectrumPercentage; //in unit of 0.01% + USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq + UCHAR ucClockIndication; //Indicate which clock source needs SS + UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS + UCHAR ucReserved[2]; +}ATOM_ASIC_SS_ASSIGNMENT_V3; + +typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only. +}ATOM_ASIC_INTERNAL_SS_INFO_V3; + + +//==============================Scratch Pad Definition Portion=============================== #define ATOM_DEVICE_CONNECT_INFO_DEF 0 #define ATOM_ROM_LOCATION_DEF 1 #define ATOM_TV_STANDARD_DEF 2 @@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_I2C_CHANNEL_STATUS_DEF 8 #define ATOM_I2C_CHANNEL_STATUS1_DEF 9 -/* BIOS_0_SCRATCH Definition */ + +// BIOS_0_SCRATCH Definition #define ATOM_S0_CRT1_MONO 0x00000001L #define ATOM_S0_CRT1_COLOR 0x00000002L #define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR) @@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S0_CV_DIN_A 0x00000020L #define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A) + #define ATOM_S0_CRT2_MONO 0x00000100L #define ATOM_S0_CRT2_COLOR 0x00000200L #define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR) @@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S0_DFP2 0x00020000L #define ATOM_S0_LCD1 0x00040000L #define ATOM_S0_LCD2 0x00080000L -#define ATOM_S0_TV2 0x00100000L -#define ATOM_S0_DFP3 0x00200000L -#define ATOM_S0_DFP4 0x00400000L -#define ATOM_S0_DFP5 0x00800000L +#define ATOM_S0_DFP6 0x00100000L +#define ATOM_S0_DFP3 0x00200000L +#define ATOM_S0_DFP4 0x00400000L +#define ATOM_S0_DFP5 0x00800000L -#define ATOM_S0_DFP_MASK \ - (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5) +#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6 -#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */ - /* the FAD/HDP reg access bug. Bit is read by DAL */ +#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with + // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx #define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L #define ATOM_S0_THERMAL_STATE_SHIFT 26 #define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L -#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 +#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2 #define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3 -/* Byte aligned definition for BIOS usage */ +//Byte aligned defintion for BIOS usage #define ATOM_S0_CRT1_MONOb0 0x01 #define ATOM_S0_CRT1_COLORb0 0x02 #define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0) @@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S0_DFP2b2 0x02 #define ATOM_S0_LCD1b2 0x04 #define ATOM_S0_LCD2b2 0x08 -#define ATOM_S0_TV2b2 0x10 -#define ATOM_S0_DFP3b2 0x20 +#define ATOM_S0_DFP6b2 0x10 +#define ATOM_S0_DFP3b2 0x20 +#define ATOM_S0_DFP4b2 0x40 +#define ATOM_S0_DFP5b2 0x80 + #define ATOM_S0_THERMAL_STATE_MASKb3 0x1C #define ATOM_S0_THERMAL_STATE_SHIFTb3 2 @@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0 #define ATOM_S0_LCD1_SHIFT 18 -/* BIOS_1_SCRATCH Definition */ +// BIOS_1_SCRATCH Definition #define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL #define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L -/* BIOS_2_SCRATCH Definition */ +// BIOS_2_SCRATCH Definition #define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL #define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L #define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8 -#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L -#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L -#define ATOM_S2_TV1_DPMS_STATE 0x00040000L -#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L -#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L -#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L -#define ATOM_S2_TV2_DPMS_STATE 0x00400000L -#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L -#define ATOM_S2_CV_DPMS_STATE 0x01000000L -#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L -#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L -#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L - -#define ATOM_S2_DFP_DPM_STATE \ - (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \ - ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \ - ATOM_S2_DFP5_DPMS_STATE) - -#define ATOM_S2_DEVICE_DPMS_STATE \ - (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \ - ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \ - ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \ - ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE) - #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26 #define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L +#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L #define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L #define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0 @@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30 #define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L -/* Byte aligned definition for BIOS usage */ + +//Byte aligned defintion for BIOS usage #define ATOM_S2_TV1_STANDARD_MASKb0 0x0F #define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF -#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 -#define ATOM_S2_LCD1_DPMS_STATEb2 0x02 -#define ATOM_S2_TV1_DPMS_STATEb2 0x04 -#define ATOM_S2_DFP1_DPMS_STATEb2 0x08 -#define ATOM_S2_CRT2_DPMS_STATEb2 0x10 -#define ATOM_S2_LCD2_DPMS_STATEb2 0x20 -#define ATOM_S2_TV2_DPMS_STATEb2 0x40 -#define ATOM_S2_DFP2_DPMS_STATEb2 0x80 -#define ATOM_S2_CV_DPMS_STATEb3 0x01 -#define ATOM_S2_DFP3_DPMS_STATEb3 0x02 -#define ATOM_S2_DFP4_DPMS_STATEb3 0x04 -#define ATOM_S2_DFP5_DPMS_STATEb3 0x08 +#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01 #define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF #define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C @@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20 #define ATOM_S2_ROTATION_STATE_MASKb3 0xC0 -/* BIOS_3_SCRATCH Definition */ + +// BIOS_3_SCRATCH Definition #define ATOM_S3_CRT1_ACTIVE 0x00000001L #define ATOM_S3_LCD1_ACTIVE 0x00000002L #define ATOM_S3_TV1_ACTIVE 0x00000004L #define ATOM_S3_DFP1_ACTIVE 0x00000008L #define ATOM_S3_CRT2_ACTIVE 0x00000010L #define ATOM_S3_LCD2_ACTIVE 0x00000020L -#define ATOM_S3_TV2_ACTIVE 0x00000040L +#define ATOM_S3_DFP6_ACTIVE 0x00000040L #define ATOM_S3_DFP2_ACTIVE 0x00000080L #define ATOM_S3_CV_ACTIVE 0x00000100L #define ATOM_S3_DFP3_ACTIVE 0x00000200L #define ATOM_S3_DFP4_ACTIVE 0x00000400L #define ATOM_S3_DFP5_ACTIVE 0x00000800L -#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL +#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL #define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L #define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L @@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L #define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L #define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L -#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L +#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L #define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L #define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L #define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L @@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L #define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L +//Below two definitions are not supported in pplib, but in the old powerplay in DAL #define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L #define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L -/* Byte aligned definition for BIOS usage */ +//Byte aligned defintion for BIOS usage #define ATOM_S3_CRT1_ACTIVEb0 0x01 #define ATOM_S3_LCD1_ACTIVEb0 0x02 #define ATOM_S3_TV1_ACTIVEb0 0x04 #define ATOM_S3_DFP1_ACTIVEb0 0x08 #define ATOM_S3_CRT2_ACTIVEb0 0x10 #define ATOM_S3_LCD2_ACTIVEb0 0x20 -#define ATOM_S3_TV2_ACTIVEb0 0x40 +#define ATOM_S3_DFP6_ACTIVEb0 0x40 #define ATOM_S3_DFP2_ACTIVEb0 0x80 #define ATOM_S3_CV_ACTIVEb1 0x01 #define ATOM_S3_DFP3_ACTIVEb1 0x02 @@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08 #define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10 #define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20 -#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40 +#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40 #define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80 #define ATOM_S3_CV_CRTC_ACTIVEb3 0x01 #define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02 @@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S3_ACTIVE_CRTC2w1 0xFFF -#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 -#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40 -#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80 - -/* BIOS_4_SCRATCH Definition */ +// BIOS_4_SCRATCH Definition #define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL #define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L #define ATOM_S4_LCD1_REFRESH_SHIFT 8 -/* Byte aligned definition for BIOS usage */ +//Byte aligned defintion for BIOS usage #define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF #define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0 #define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0 -/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */ +// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! #define ATOM_S5_DOS_REQ_CRT1b0 0x01 #define ATOM_S5_DOS_REQ_LCD1b0 0x02 #define ATOM_S5_DOS_REQ_TV1b0 0x04 #define ATOM_S5_DOS_REQ_DFP1b0 0x08 #define ATOM_S5_DOS_REQ_CRT2b0 0x10 #define ATOM_S5_DOS_REQ_LCD2b0 0x20 -#define ATOM_S5_DOS_REQ_TV2b0 0x40 +#define ATOM_S5_DOS_REQ_DFP6b0 0x40 #define ATOM_S5_DOS_REQ_DFP2b0 0x80 #define ATOM_S5_DOS_REQ_CVb1 0x01 #define ATOM_S5_DOS_REQ_DFP3b1 0x02 #define ATOM_S5_DOS_REQ_DFP4b1 0x04 #define ATOM_S5_DOS_REQ_DFP5b1 0x08 -#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF +#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF #define ATOM_S5_DOS_REQ_CRT1 0x0001 #define ATOM_S5_DOS_REQ_LCD1 0x0002 @@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S5_DOS_REQ_DFP1 0x0008 #define ATOM_S5_DOS_REQ_CRT2 0x0010 #define ATOM_S5_DOS_REQ_LCD2 0x0020 -#define ATOM_S5_DOS_REQ_TV2 0x0040 +#define ATOM_S5_DOS_REQ_DFP6 0x0040 #define ATOM_S5_DOS_REQ_DFP2 0x0080 #define ATOM_S5_DOS_REQ_CV 0x0100 -#define ATOM_S5_DOS_REQ_DFP3 0x0200 -#define ATOM_S5_DOS_REQ_DFP4 0x0400 -#define ATOM_S5_DOS_REQ_DFP5 0x0800 +#define ATOM_S5_DOS_REQ_DFP3 0x0200 +#define ATOM_S5_DOS_REQ_DFP4 0x0400 +#define ATOM_S5_DOS_REQ_DFP5 0x0800 #define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0 #define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0 #define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0 #define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1 -#define ATOM_S5_DOS_FORCE_DEVICEw1 \ - (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \ - ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8)) +#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\ + (ATOM_S5_DOS_FORCE_CVb3<<8)) -/* BIOS_6_SCRATCH Definition */ +// BIOS_6_SCRATCH Definition #define ATOM_S6_DEVICE_CHANGE 0x00000001L #define ATOM_S6_SCALER_CHANGE 0x00000002L #define ATOM_S6_LID_CHANGE 0x00000004L @@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L #define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L #define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L -#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */ -#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */ +#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD +#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD -#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */ -#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */ +#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion +#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion #define ATOM_S6_ACC_REQ_CRT1 0x00010000L #define ATOM_S6_ACC_REQ_LCD1 0x00020000L @@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S6_ACC_REQ_DFP1 0x00080000L #define ATOM_S6_ACC_REQ_CRT2 0x00100000L #define ATOM_S6_ACC_REQ_LCD2 0x00200000L -#define ATOM_S6_ACC_REQ_TV2 0x00400000L +#define ATOM_S6_ACC_REQ_DFP6 0x00400000L #define ATOM_S6_ACC_REQ_DFP2 0x00800000L #define ATOM_S6_ACC_REQ_CV 0x01000000L #define ATOM_S6_ACC_REQ_DFP3 0x02000000L @@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L #define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L -/* Byte aligned definition for BIOS usage */ +//Byte aligned defintion for BIOS usage #define ATOM_S6_DEVICE_CHANGEb0 0x01 #define ATOM_S6_SCALER_CHANGEb0 0x02 #define ATOM_S6_LID_CHANGEb0 0x04 @@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S6_LID_STATEb0 0x40 #define ATOM_S6_DOCK_STATEb0 0x80 #define ATOM_S6_CRITICAL_STATEb1 0x01 -#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 +#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02 #define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04 #define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08 -#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 -#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 +#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10 +#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20 #define ATOM_S6_ACC_REQ_CRT1b2 0x01 #define ATOM_S6_ACC_REQ_LCD1b2 0x02 @@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S6_ACC_REQ_DFP1b2 0x08 #define ATOM_S6_ACC_REQ_CRT2b2 0x10 #define ATOM_S6_ACC_REQ_LCD2b2 0x20 -#define ATOM_S6_ACC_REQ_TV2b2 0x40 +#define ATOM_S6_ACC_REQ_DFP6b2 0x40 #define ATOM_S6_ACC_REQ_DFP2b2 0x80 #define ATOM_S6_ACC_REQ_CVb3 0x01 -#define ATOM_S6_ACC_REQ_DFP3b3 0x02 -#define ATOM_S6_ACC_REQ_DFP4b3 0x04 -#define ATOM_S6_ACC_REQ_DFP5b3 0x08 +#define ATOM_S6_ACC_REQ_DFP3b3 0x02 +#define ATOM_S6_ACC_REQ_DFP4b3 0x04 +#define ATOM_S6_ACC_REQ_DFP5b3 0x08 #define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0 #define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10 @@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30 #define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31 -/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */ +// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! #define ATOM_S7_DOS_MODE_TYPEb0 0x03 #define ATOM_S7_DOS_MODE_VGAb0 0x00 #define ATOM_S7_DOS_MODE_VESAb0 0x01 @@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO { #define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8 -/* BIOS_8_SCRATCH Definition */ +// BIOS_8_SCRATCH Definition #define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF -#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 +#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000 #define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0 #define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16 -/* BIOS_9_SCRATCH Definition */ -#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK +// BIOS_9_SCRATCH Definition +#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK #define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF #endif -#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK +#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK #define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000 #endif -#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT +#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT #define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0 #endif -#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT +#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT #define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16 #endif + #define ATOM_FLAG_SET 0x20 #define ATOM_FLAG_CLEAR 0 -#define CLEAR_ATOM_S6_ACC_MODE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) -#define SET_ATOM_S6_DEVICE_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_SCALER_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_LID_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET) - -#define SET_ATOM_S6_LID_STATE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\ - ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_LID_STATE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR) - -#define SET_ATOM_S6_DOCK_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \ - ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_DOCK_STATE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_DOCK_STATE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR) - -#define SET_ATOM_S6_THERMAL_STATE_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET) -#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET) - -#define SET_ATOM_S6_CRITICAL_STATE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_CRITICAL_STATE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR) - -#define SET_ATOM_S6_REQ_SCALER \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S6_REQ_SCALER \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR ) - -#define SET_ATOM_S6_REQ_SCALER_ARATIO \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET ) -#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR ) - -#define SET_ATOM_S6_I2C_STATE_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) - -#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) - -#define SET_ATOM_S6_DEVICE_RECONFIG \ - ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \ - ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET) -#define CLEAR_ATOM_S0_LCD1 \ - ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \ - ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR ) -#define SET_ATOM_S7_DOS_8BIT_DAC_EN \ - ((ATOM_DOS_MODE_INFO_DEF << 8) | \ - ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET ) -#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \ - ((ATOM_DOS_MODE_INFO_DEF << 8) | \ - ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR ) +#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR) +#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET) -/****************************************************************************/ -/* Portion II: Definitinos only used in Driver */ +#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR) + +#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR) + +#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET) +#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET) + +#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR) + +#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR ) + +#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET ) +#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR ) + +#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) + +#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET ) + +#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET) +#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR ) +#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET ) +#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR ) + +/****************************************************************************/ +//Portion II: Definitinos only used in Driver /****************************************************************************/ -/* Macros used by driver */ +// Macros used by driver +#ifdef __cplusplus +#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT)) -#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT)) +#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F) +#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F) +#else // not __cplusplus +#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT)) #define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F) #define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F) +#endif // __cplusplus #define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION #define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION -/****************************************************************************/ -/* Portion III: Definitinos only used in VBIOS */ +/****************************************************************************/ +//Portion III: Definitinos only used in VBIOS /****************************************************************************/ #define ATOM_DAC_SRC 0x80 #define ATOM_SRC_DAC1 0 #define ATOM_SRC_DAC2 0x80 -#ifdef UEFI_BUILD -#define USHORT UTEMP -#endif - -typedef struct _MEMORY_PLLINIT_PARAMETERS { - ULONG ulTargetMemoryClock; /* In 10Khz unit */ - UCHAR ucAction; /* not define yet */ - UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */ - UCHAR ucFbDiv; /* FB value */ - UCHAR ucPostDiv; /* Post div */ -} MEMORY_PLLINIT_PARAMETERS; +typedef struct _MEMORY_PLLINIT_PARAMETERS +{ + ULONG ulTargetMemoryClock; //In 10Khz unit + UCHAR ucAction; //not define yet + UCHAR ucFbDiv_Hi; //Fbdiv Hi byte + UCHAR ucFbDiv; //FB value + UCHAR ucPostDiv; //Post div +}MEMORY_PLLINIT_PARAMETERS; #define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS -#define GPIO_PIN_WRITE 0x01 + +#define GPIO_PIN_WRITE 0x01 #define GPIO_PIN_READ 0x00 -typedef struct _GPIO_PIN_CONTROL_PARAMETERS { - UCHAR ucGPIO_ID; /* return value, read from GPIO pins */ - UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */ - UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */ - UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */ -} GPIO_PIN_CONTROL_PARAMETERS; - -typedef struct _ENABLE_SCALER_PARAMETERS { - UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */ - UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */ - UCHAR ucTVStandard; /* */ - UCHAR ucPadding[1]; -} ENABLE_SCALER_PARAMETERS; -#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS - -/* ucEnable: */ +typedef struct _GPIO_PIN_CONTROL_PARAMETERS +{ + UCHAR ucGPIO_ID; //return value, read from GPIO pins + UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update + UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask + UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write +}GPIO_PIN_CONTROL_PARAMETERS; + +typedef struct _ENABLE_SCALER_PARAMETERS +{ + UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2 + UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION + UCHAR ucTVStandard; // + UCHAR ucPadding[1]; +}ENABLE_SCALER_PARAMETERS; +#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS + +//ucEnable: #define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0 #define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1 #define SCALER_ENABLE_2TAP_ALPHA_MODE 2 #define SCALER_ENABLE_MULTITAP_MODE 3 -typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS { - ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */ - UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */ - UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */ - UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */ - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ -} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; - -typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION { - ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; - ENABLE_CRTC_PARAMETERS sReserved; -} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; - -typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS { - USHORT usHight; /* Image Hight */ - USHORT usWidth; /* Image Width */ - UCHAR ucSurface; /* Surface 1 or 2 */ - UCHAR ucPadding[3]; -} ENABLE_GRAPH_SURFACE_PARAMETERS; - -typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 { - USHORT usHight; /* Image Hight */ - USHORT usWidth; /* Image Width */ - UCHAR ucSurface; /* Surface 1 or 2 */ - UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */ - UCHAR ucPadding[2]; -} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; - -typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION { - ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; - ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */ -} ENABLE_GRAPH_SURFACE_PS_ALLOCATION; - -typedef struct _MEMORY_CLEAN_UP_PARAMETERS { - USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */ - USHORT usMemorySize; /* 8Kb blocks aligned */ -} MEMORY_CLEAN_UP_PARAMETERS; +typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS +{ + ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position + UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset + UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset + UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE +}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS; + +typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION +{ + ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon; + ENABLE_CRTC_PARAMETERS sReserved; +}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION; + +typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS +{ + USHORT usHight; // Image Hight + USHORT usWidth; // Image Width + UCHAR ucSurface; // Surface 1 or 2 + UCHAR ucPadding[3]; +}ENABLE_GRAPH_SURFACE_PARAMETERS; + +typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 +{ + USHORT usHight; // Image Hight + USHORT usWidth; // Image Width + UCHAR ucSurface; // Surface 1 or 2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + UCHAR ucPadding[2]; +}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2; + +typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3 +{ + USHORT usHight; // Image Hight + USHORT usWidth; // Image Width + UCHAR ucSurface; // Surface 1 or 2 + UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE + USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0. +}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3; + +typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION +{ + ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface; + ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one +}ENABLE_GRAPH_SURFACE_PS_ALLOCATION; + +typedef struct _MEMORY_CLEAN_UP_PARAMETERS +{ + USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address + USHORT usMemorySize; //8Kb blocks aligned +}MEMORY_CLEAN_UP_PARAMETERS; #define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS -typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS { - USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */ - USHORT usY_Size; -} GET_DISPLAY_SURFACE_SIZE_PARAMETERS; +typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS +{ + USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC + USHORT usY_Size; +}GET_DISPLAY_SURFACE_SIZE_PARAMETERS; -typedef struct _INDIRECT_IO_ACCESS { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR IOAccessSequence[256]; +typedef struct _INDIRECT_IO_ACCESS +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR IOAccessSequence[256]; } INDIRECT_IO_ACCESS; #define INDIRECT_READ 0x00 @@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS { #define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ #define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE -typedef struct _ATOM_OEM_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; -} ATOM_OEM_INFO; - -typedef struct _ATOM_TV_MODE { - UCHAR ucVMode_Num; /* Video mode number */ - UCHAR ucTV_Mode_Num; /* Internal TV mode number */ -} ATOM_TV_MODE; - -typedef struct _ATOM_BIOS_INT_TVSTD_MODE { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */ - USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */ - USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */ - USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ - USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */ -} ATOM_BIOS_INT_TVSTD_MODE; - -typedef struct _ATOM_TV_MODE_SCALER_PTR { - USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */ - USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */ - UCHAR ucTV_Mode_Num; -} ATOM_TV_MODE_SCALER_PTR; - -typedef struct _ATOM_STANDARD_VESA_TIMING { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */ -} ATOM_STANDARD_VESA_TIMING; - -typedef struct _ATOM_STD_FORMAT { - USHORT usSTD_HDisp; - USHORT usSTD_VDisp; - USHORT usSTD_RefreshRate; - USHORT usReserved; -} ATOM_STD_FORMAT; - -typedef struct _ATOM_VESA_TO_EXTENDED_MODE { - USHORT usVESA_ModeNumber; - USHORT usExtendedModeNumber; -} ATOM_VESA_TO_EXTENDED_MODE; - -typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT { - ATOM_COMMON_TABLE_HEADER sHeader; - ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; -} ATOM_VESA_TO_INTENAL_MODE_LUT; +typedef struct _ATOM_OEM_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; +}ATOM_OEM_INFO; + +typedef struct _ATOM_TV_MODE +{ + UCHAR ucVMode_Num; //Video mode number + UCHAR ucTV_Mode_Num; //Internal TV mode number +}ATOM_TV_MODE; + +typedef struct _ATOM_BIOS_INT_TVSTD_MODE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table + USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table + USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table + USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table + USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table +}ATOM_BIOS_INT_TVSTD_MODE; + + +typedef struct _ATOM_TV_MODE_SCALER_PTR +{ + USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients + USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients + UCHAR ucTV_Mode_Num; +}ATOM_TV_MODE_SCALER_PTR; + +typedef struct _ATOM_STANDARD_VESA_TIMING +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation +}ATOM_STANDARD_VESA_TIMING; + + +typedef struct _ATOM_STD_FORMAT +{ + USHORT usSTD_HDisp; + USHORT usSTD_VDisp; + USHORT usSTD_RefreshRate; + USHORT usReserved; +}ATOM_STD_FORMAT; + +typedef struct _ATOM_VESA_TO_EXTENDED_MODE +{ + USHORT usVESA_ModeNumber; + USHORT usExtendedModeNumber; +}ATOM_VESA_TO_EXTENDED_MODE; + +typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT +{ + ATOM_COMMON_TABLE_HEADER sHeader; + ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76]; +}ATOM_VESA_TO_INTENAL_MODE_LUT; /*************** ATOM Memory Related Data Structure ***********************/ -typedef struct _ATOM_MEMORY_VENDOR_BLOCK { - UCHAR ucMemoryType; - UCHAR ucMemoryVendor; - UCHAR ucAdjMCId; - UCHAR ucDynClkId; - ULONG ulDllResetClkRange; -} ATOM_MEMORY_VENDOR_BLOCK; - -typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG { +typedef struct _ATOM_MEMORY_VENDOR_BLOCK{ + UCHAR ucMemoryType; + UCHAR ucMemoryVendor; + UCHAR ucAdjMCId; + UCHAR ucDynClkId; + ULONG ulDllResetClkRange; +}ATOM_MEMORY_VENDOR_BLOCK; + + +typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{ #if ATOM_BIG_ENDIAN - ULONG ucMemBlkId:8; - ULONG ulMemClockRange:24; + ULONG ucMemBlkId:8; + ULONG ulMemClockRange:24; #else - ULONG ulMemClockRange:24; - ULONG ucMemBlkId:8; + ULONG ulMemClockRange:24; + ULONG ucMemBlkId:8; #endif -} ATOM_MEMORY_SETTING_ID_CONFIG; - -typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS { - ATOM_MEMORY_SETTING_ID_CONFIG slAccess; - ULONG ulAccess; -} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; - -typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK { - ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; - ULONG aulMemData[1]; -} ATOM_MEMORY_SETTING_DATA_BLOCK; - -typedef struct _ATOM_INIT_REG_INDEX_FORMAT { - USHORT usRegIndex; /* MC register index */ - UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */ -} ATOM_INIT_REG_INDEX_FORMAT; - -typedef struct _ATOM_INIT_REG_BLOCK { - USHORT usRegIndexTblSize; /* size of asRegIndexBuf */ - USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */ - ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; - ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; -} ATOM_INIT_REG_BLOCK; +}ATOM_MEMORY_SETTING_ID_CONFIG; + +typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS +{ + ATOM_MEMORY_SETTING_ID_CONFIG slAccess; + ULONG ulAccess; +}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS; + + +typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{ + ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID; + ULONG aulMemData[1]; +}ATOM_MEMORY_SETTING_DATA_BLOCK; + + +typedef struct _ATOM_INIT_REG_INDEX_FORMAT{ + USHORT usRegIndex; // MC register index + UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf +}ATOM_INIT_REG_INDEX_FORMAT; + + +typedef struct _ATOM_INIT_REG_BLOCK{ + USHORT usRegIndexTblSize; //size of asRegIndexBuf + USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK + ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1]; + ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1]; +}ATOM_INIT_REG_BLOCK; #define END_OF_REG_INDEX_BLOCK 0x0ffff #define END_OF_REG_DATA_BLOCK 0x00000000 @@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK { #define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1) #define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1) -typedef struct _ATOM_MC_INIT_PARAM_TABLE { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usAdjustARB_SEQDataOffset; - USHORT usMCInitMemTypeTblOffset; - USHORT usMCInitCommonTblOffset; - USHORT usMCInitPowerDownTblOffset; - ULONG ulARB_SEQDataBuf[32]; - ATOM_INIT_REG_BLOCK asMCInitMemType; - ATOM_INIT_REG_BLOCK asMCInitCommon; -} ATOM_MC_INIT_PARAM_TABLE; + +typedef struct _ATOM_MC_INIT_PARAM_TABLE +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usAdjustARB_SEQDataOffset; + USHORT usMCInitMemTypeTblOffset; + USHORT usMCInitCommonTblOffset; + USHORT usMCInitPowerDownTblOffset; + ULONG ulARB_SEQDataBuf[32]; + ATOM_INIT_REG_BLOCK asMCInitMemType; + ATOM_INIT_REG_BLOCK asMCInitCommon; +}ATOM_MC_INIT_PARAM_TABLE; + #define _4Mx16 0x2 #define _4Mx32 0x3 @@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE { #define QIMONDA INFINEON #define PROMOS MOSEL +#define KRETON INFINEON -/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */ +/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// #define UCODE_ROM_START_ADDRESS 0x1c000 -#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */ - -/* uCode block header for reference */ - -typedef struct _MCuCodeHeader { - ULONG ulSignature; - UCHAR ucRevision; - UCHAR ucChecksum; - UCHAR ucReserved1; - UCHAR ucReserved2; - USHORT usParametersLength; - USHORT usUCodeLength; - USHORT usReserved1; - USHORT usReserved2; +#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode + +//uCode block header for reference + +typedef struct _MCuCodeHeader +{ + ULONG ulSignature; + UCHAR ucRevision; + UCHAR ucChecksum; + UCHAR ucReserved1; + UCHAR ucReserved2; + USHORT usParametersLength; + USHORT usUCodeLength; + USHORT usReserved1; + USHORT usReserved2; } MCuCodeHeader; -/* //////////////////////////////////////////////////////////////////////////////// */ +////////////////////////////////////////////////////////////////////////////////// #define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16 #define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF -typedef struct _ATOM_VRAM_MODULE_V1 { - ULONG ulReserved; - USHORT usEMRSValue; - USHORT usMRSValue; - USHORT usReserved; - UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ - UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */ - UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */ - UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ - UCHAR ucRow; /* Number of Row,in power of 2; */ - UCHAR ucColumn; /* Number of Column,in power of 2; */ - UCHAR ucBank; /* Nunber of Bank; */ - UCHAR ucRank; /* Number of Rank, in power of 2 */ - UCHAR ucChannelNum; /* Number of channel; */ - UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ - UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ - UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ - UCHAR ucReserved[2]; -} ATOM_VRAM_MODULE_V1; - -typedef struct _ATOM_VRAM_MODULE_V2 { - ULONG ulReserved; - ULONG ulFlags; /* To enable/disable functionalities based on memory type */ - ULONG ulEngineClock; /* Override of default engine clock for particular memory type */ - ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */ - USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ - USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ - USHORT usEMRSValue; - USHORT usMRSValue; - USHORT usReserved; - UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ - UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ - UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ - UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */ - UCHAR ucRow; /* Number of Row,in power of 2; */ - UCHAR ucColumn; /* Number of Column,in power of 2; */ - UCHAR ucBank; /* Nunber of Bank; */ - UCHAR ucRank; /* Number of Rank, in power of 2 */ - UCHAR ucChannelNum; /* Number of channel; */ - UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */ - UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */ - UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */ - UCHAR ucRefreshRateFactor; - UCHAR ucReserved[3]; -} ATOM_VRAM_MODULE_V2; - -typedef struct _ATOM_MEMORY_TIMING_FORMAT { - ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ - union { - USHORT usMRS; /* mode register */ - USHORT usDDR3_MR0; - }; - union { - USHORT usEMRS; /* extended mode register */ - USHORT usDDR3_MR1; - }; - UCHAR ucCL; /* CAS latency */ - UCHAR ucWL; /* WRITE Latency */ - UCHAR uctRAS; /* tRAS */ - UCHAR uctRC; /* tRC */ - UCHAR uctRFC; /* tRFC */ - UCHAR uctRCDR; /* tRCDR */ - UCHAR uctRCDW; /* tRCDW */ - UCHAR uctRP; /* tRP */ - UCHAR uctRRD; /* tRRD */ - UCHAR uctWR; /* tWR */ - UCHAR uctWTR; /* tWTR */ - UCHAR uctPDIX; /* tPDIX */ - UCHAR uctFAW; /* tFAW */ - UCHAR uctAOND; /* tAOND */ - union { - struct { - UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ - UCHAR ucReserved; - }; - USHORT usDDR3_MR2; - }; -} ATOM_MEMORY_TIMING_FORMAT; - -typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 { - ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */ - USHORT usMRS; /* mode register */ - USHORT usEMRS; /* extended mode register */ - UCHAR ucCL; /* CAS latency */ - UCHAR ucWL; /* WRITE Latency */ - UCHAR uctRAS; /* tRAS */ - UCHAR uctRC; /* tRC */ - UCHAR uctRFC; /* tRFC */ - UCHAR uctRCDR; /* tRCDR */ - UCHAR uctRCDW; /* tRCDW */ - UCHAR uctRP; /* tRP */ - UCHAR uctRRD; /* tRRD */ - UCHAR uctWR; /* tWR */ - UCHAR uctWTR; /* tWTR */ - UCHAR uctPDIX; /* tPDIX */ - UCHAR uctFAW; /* tFAW */ - UCHAR uctAOND; /* tAOND */ - UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */ -/* ///////////////////////GDDR parameters/////////////////////////////////// */ - UCHAR uctCCDL; /* */ - UCHAR uctCRCRL; /* */ - UCHAR uctCRCWL; /* */ - UCHAR uctCKE; /* */ - UCHAR uctCKRSE; /* */ - UCHAR uctCKRSX; /* */ - UCHAR uctFAW32; /* */ - UCHAR ucReserved1; /* */ - UCHAR ucReserved2; /* */ - UCHAR ucTerminator; -} ATOM_MEMORY_TIMING_FORMAT_V1; - -typedef struct _ATOM_MEMORY_FORMAT { - ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */ - union { - USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ - USHORT usDDR3_Reserved; /* Not used for DDR3 memory */ - }; - union { - USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ - USHORT usDDR3_MR3; /* Used for DDR3 memory */ - }; - UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */ - UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */ - UCHAR ucRow; /* Number of Row,in power of 2; */ - UCHAR ucColumn; /* Number of Column,in power of 2; */ - UCHAR ucBank; /* Nunber of Bank; */ - UCHAR ucRank; /* Number of Rank, in power of 2 */ - UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */ - UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */ - UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */ - UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ - UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ - UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */ - ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ -} ATOM_MEMORY_FORMAT; - -typedef struct _ATOM_VRAM_MODULE_V3 { - ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */ - USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */ - USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */ - USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */ - UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ - UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */ - UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */ - UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */ - UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ - UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ - ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */ -} ATOM_VRAM_MODULE_V3; - -/* ATOM_VRAM_MODULE_V3.ucNPL_RT */ +typedef struct _ATOM_VRAM_MODULE_V1 +{ + ULONG ulReserved; + USHORT usEMRSValue; + USHORT usMRSValue; + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; + UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender + UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... + UCHAR ucRow; // Number of Row,in power of 2; + UCHAR ucColumn; // Number of Column,in power of 2; + UCHAR ucBank; // Nunber of Bank; + UCHAR ucRank; // Number of Rank, in power of 2 + UCHAR ucChannelNum; // Number of channel; + UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 + UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; + UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; + UCHAR ucReserved[2]; +}ATOM_VRAM_MODULE_V1; + + +typedef struct _ATOM_VRAM_MODULE_V2 +{ + ULONG ulReserved; + ULONG ulFlags; // To enable/disable functionalities based on memory type + ULONG ulEngineClock; // Override of default engine clock for particular memory type + ULONG ulMemoryClock; // Override of default memory clock for particular memory type + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRSValue; + USHORT usMRSValue; + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; + UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed + UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... + UCHAR ucRow; // Number of Row,in power of 2; + UCHAR ucColumn; // Number of Column,in power of 2; + UCHAR ucBank; // Nunber of Bank; + UCHAR ucRank; // Number of Rank, in power of 2 + UCHAR ucChannelNum; // Number of channel; + UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 + UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; + UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; + UCHAR ucRefreshRateFactor; + UCHAR ucReserved[3]; +}ATOM_VRAM_MODULE_V2; + + +typedef struct _ATOM_MEMORY_TIMING_FORMAT +{ + ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing + union{ + USHORT usMRS; // mode register + USHORT usDDR3_MR0; + }; + union{ + USHORT usEMRS; // extended mode register + USHORT usDDR3_MR1; + }; + UCHAR ucCL; // CAS latency + UCHAR ucWL; // WRITE Latency + UCHAR uctRAS; // tRAS + UCHAR uctRC; // tRC + UCHAR uctRFC; // tRFC + UCHAR uctRCDR; // tRCDR + UCHAR uctRCDW; // tRCDW + UCHAR uctRP; // tRP + UCHAR uctRRD; // tRRD + UCHAR uctWR; // tWR + UCHAR uctWTR; // tWTR + UCHAR uctPDIX; // tPDIX + UCHAR uctFAW; // tFAW + UCHAR uctAOND; // tAOND + union + { + struct { + UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon + UCHAR ucReserved; + }; + USHORT usDDR3_MR2; + }; +}ATOM_MEMORY_TIMING_FORMAT; + + +typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 +{ + ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing + USHORT usMRS; // mode register + USHORT usEMRS; // extended mode register + UCHAR ucCL; // CAS latency + UCHAR ucWL; // WRITE Latency + UCHAR uctRAS; // tRAS + UCHAR uctRC; // tRC + UCHAR uctRFC; // tRFC + UCHAR uctRCDR; // tRCDR + UCHAR uctRCDW; // tRCDW + UCHAR uctRP; // tRP + UCHAR uctRRD; // tRRD + UCHAR uctWR; // tWR + UCHAR uctWTR; // tWTR + UCHAR uctPDIX; // tPDIX + UCHAR uctFAW; // tFAW + UCHAR uctAOND; // tAOND + UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon +////////////////////////////////////GDDR parameters/////////////////////////////////// + UCHAR uctCCDL; // + UCHAR uctCRCRL; // + UCHAR uctCRCWL; // + UCHAR uctCKE; // + UCHAR uctCKRSE; // + UCHAR uctCKRSX; // + UCHAR uctFAW32; // + UCHAR ucMR5lo; // + UCHAR ucMR5hi; // + UCHAR ucTerminator; +}ATOM_MEMORY_TIMING_FORMAT_V1; + +typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2 +{ + ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing + USHORT usMRS; // mode register + USHORT usEMRS; // extended mode register + UCHAR ucCL; // CAS latency + UCHAR ucWL; // WRITE Latency + UCHAR uctRAS; // tRAS + UCHAR uctRC; // tRC + UCHAR uctRFC; // tRFC + UCHAR uctRCDR; // tRCDR + UCHAR uctRCDW; // tRCDW + UCHAR uctRP; // tRP + UCHAR uctRRD; // tRRD + UCHAR uctWR; // tWR + UCHAR uctWTR; // tWTR + UCHAR uctPDIX; // tPDIX + UCHAR uctFAW; // tFAW + UCHAR uctAOND; // tAOND + UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon +////////////////////////////////////GDDR parameters/////////////////////////////////// + UCHAR uctCCDL; // + UCHAR uctCRCRL; // + UCHAR uctCRCWL; // + UCHAR uctCKE; // + UCHAR uctCKRSE; // + UCHAR uctCKRSX; // + UCHAR uctFAW32; // + UCHAR ucMR4lo; // + UCHAR ucMR4hi; // + UCHAR ucMR5lo; // + UCHAR ucMR5hi; // + UCHAR ucTerminator; + UCHAR ucReserved; +}ATOM_MEMORY_TIMING_FORMAT_V2; + +typedef struct _ATOM_MEMORY_FORMAT +{ + ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock + union{ + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_Reserved; // Not used for DDR3 memory + }; + union{ + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_MR3; // Used for DDR3 memory + }; + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; + UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed + UCHAR ucRow; // Number of Row,in power of 2; + UCHAR ucColumn; // Number of Column,in power of 2; + UCHAR ucBank; // Nunber of Bank; + UCHAR ucRank; // Number of Rank, in power of 2 + UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8 + UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) + UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc + ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock +}ATOM_MEMORY_FORMAT; + + +typedef struct _ATOM_VRAM_MODULE_V3 +{ + ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination + USHORT usSize; // size of ATOM_VRAM_MODULE_V3 + USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage + USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucChannelNum; // board dependent parameter:Number of channel; + UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit + UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucFlag; // To enable/disable functionalities based on memory type + ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec +}ATOM_VRAM_MODULE_V3; + + +//ATOM_VRAM_MODULE_V3.ucNPL_RT #define NPL_RT_MASK 0x0f #define BATTERY_ODT_MASK 0xc0 #define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3 -typedef struct _ATOM_VRAM_MODULE_V4 { - ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ - USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ - USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ - /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ - USHORT usReserved; - UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ - UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ - UCHAR ucChannelNum; /* Number of channels present in this module config */ - UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ - UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ - UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ - UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ - UCHAR ucVREFI; /* board dependent parameter */ - UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ - UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ - UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ - /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ - UCHAR ucReserved[3]; - -/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ - union { - USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ - USHORT usDDR3_Reserved; - }; - union { - USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ - USHORT usDDR3_MR3; /* Used for DDR3 memory */ - }; - UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ - UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ - UCHAR ucReserved2[2]; - ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ -} ATOM_VRAM_MODULE_V4; +typedef struct _ATOM_VRAM_MODULE_V4 +{ + ULONG ulChannelMapCfg; // board dependent parameter: Channel combination + USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE + USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; + UCHAR ucChannelNum; // Number of channels present in this module config + UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucFlag; // To enable/disable functionalities based on memory type + UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 + UCHAR ucVREFI; // board dependent parameter + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; + +//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level + union{ + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_Reserved; + }; + union{ + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + USHORT usDDR3_MR3; // Used for DDR3 memory + }; + UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucReserved2[2]; + ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock +}ATOM_VRAM_MODULE_V4; #define VRAM_MODULE_V4_MISC_RANK_MASK 0x3 #define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1 @@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 { #define VRAM_MODULE_V4_MISC_BL8 0x4 #define VRAM_MODULE_V4_MISC_DUAL_CS 0x10 -typedef struct _ATOM_VRAM_MODULE_V5 { - ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */ - USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */ - USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ - /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */ - USHORT usReserved; - UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */ - UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */ - UCHAR ucChannelNum; /* Number of channels present in this module config */ - UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */ - UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */ - UCHAR ucFlag; /* To enable/disable functionalities based on memory type */ - UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */ - UCHAR ucVREFI; /* board dependent parameter */ - UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */ - UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */ - UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */ - /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */ - UCHAR ucReserved[3]; +typedef struct _ATOM_VRAM_MODULE_V5 +{ + ULONG ulChannelMapCfg; // board dependent parameter: Channel combination + USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE + USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; + UCHAR ucChannelNum; // Number of channels present in this module config + UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucFlag; // To enable/disable functionalities based on memory type + UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 + UCHAR ucVREFI; // board dependent parameter + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; + +//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock +}ATOM_VRAM_MODULE_V5; + +typedef struct _ATOM_VRAM_MODULE_V6 +{ + ULONG ulChannelMapCfg; // board dependent parameter: Channel combination + USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE + USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) + USHORT usReserved; + UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module + UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; + UCHAR ucChannelNum; // Number of channels present in this module config + UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits + UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16 + UCHAR ucFlag; // To enable/disable functionalities based on memory type + UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 + UCHAR ucVREFI; // board dependent parameter + UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters + UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble + UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! + // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros + UCHAR ucReserved[3]; + +//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level + USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type + USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type + UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed + UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) + UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth + UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth + ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock +}ATOM_VRAM_MODULE_V6; + + + +typedef struct _ATOM_VRAM_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucNumOfVRAMModule; + ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; +}ATOM_VRAM_INFO_V2; -/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */ - USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */ - USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */ - UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */ - UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */ - UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */ - UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */ - ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */ -} ATOM_VRAM_MODULE_V5; - -typedef struct _ATOM_VRAM_INFO_V2 { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucNumOfVRAMModule; - ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ -} ATOM_VRAM_INFO_V2; - -typedef struct _ATOM_VRAM_INFO_V3 { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */ - USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */ - USHORT usRerseved; - UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */ - UCHAR ucNumOfVRAMModule; - ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ - ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */ - /* ATOM_INIT_REG_BLOCK aMemAdjust; */ -} ATOM_VRAM_INFO_V3; +typedef struct _ATOM_VRAM_INFO_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usRerseved; + UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator + UCHAR ucNumOfVRAMModule; + ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; + ATOM_INIT_REG_BLOCK asMemPatch; // for allocation + // ATOM_INIT_REG_BLOCK aMemAdjust; +}ATOM_VRAM_INFO_V3; #define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3 -typedef struct _ATOM_VRAM_INFO_V4 { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */ - USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */ - USHORT usRerseved; - UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */ - ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */ - UCHAR ucReservde[4]; - UCHAR ucNumOfVRAMModule; - ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */ - ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */ - /* ATOM_INIT_REG_BLOCK aMemAdjust; */ -} ATOM_VRAM_INFO_V4; - -typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */ -} ATOM_VRAM_GPIO_DETECTION_INFO; - -typedef struct _ATOM_MEMORY_TRAINING_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucTrainingLoop; - UCHAR ucReserved[3]; - ATOM_INIT_REG_BLOCK asMemTrainingSetting; -} ATOM_MEMORY_TRAINING_INFO; - -typedef struct SW_I2C_CNTL_DATA_PARAMETERS { - UCHAR ucControl; - UCHAR ucData; - UCHAR ucSatus; - UCHAR ucTemp; +typedef struct _ATOM_VRAM_INFO_V4 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting + USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting + USHORT usRerseved; + UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 + ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] + UCHAR ucReservde[4]; + UCHAR ucNumOfVRAMModule; + ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule; + ATOM_INIT_REG_BLOCK asMemPatch; // for allocation + // ATOM_INIT_REG_BLOCK aMemAdjust; +}ATOM_VRAM_INFO_V4; + +typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator +}ATOM_VRAM_GPIO_DETECTION_INFO; + + +typedef struct _ATOM_MEMORY_TRAINING_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucTrainingLoop; + UCHAR ucReserved[3]; + ATOM_INIT_REG_BLOCK asMemTrainingSetting; +}ATOM_MEMORY_TRAINING_INFO; + + +typedef struct SW_I2C_CNTL_DATA_PARAMETERS +{ + UCHAR ucControl; + UCHAR ucData; + UCHAR ucSatus; + UCHAR ucTemp; } SW_I2C_CNTL_DATA_PARAMETERS; #define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS -typedef struct _SW_I2C_IO_DATA_PARAMETERS { - USHORT GPIO_Info; - UCHAR ucAct; - UCHAR ucData; -} SW_I2C_IO_DATA_PARAMETERS; +typedef struct _SW_I2C_IO_DATA_PARAMETERS +{ + USHORT GPIO_Info; + UCHAR ucAct; + UCHAR ucData; + } SW_I2C_IO_DATA_PARAMETERS; #define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS @@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS { #define SW_I2C_CNTL_CLOSE 5 #define SW_I2C_CNTL_WRITE1BIT 6 -/* ==============================VESA definition Portion=============================== */ +//==============================VESA definition Portion=============================== #define VESA_OEM_PRODUCT_REV '01.00' -#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */ +#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support #define VESA_MODE_WIN_ATTRIBUTE 7 #define VESA_WIN_SIZE 64 -typedef struct _PTR_32_BIT_STRUCTURE { - USHORT Offset16; - USHORT Segment16; +typedef struct _PTR_32_BIT_STRUCTURE +{ + USHORT Offset16; + USHORT Segment16; } PTR_32_BIT_STRUCTURE; -typedef union _PTR_32_BIT_UNION { - PTR_32_BIT_STRUCTURE SegmentOffset; - ULONG Ptr32_Bit; +typedef union _PTR_32_BIT_UNION +{ + PTR_32_BIT_STRUCTURE SegmentOffset; + ULONG Ptr32_Bit; } PTR_32_BIT_UNION; -typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE { - UCHAR VbeSignature[4]; - USHORT VbeVersion; - PTR_32_BIT_UNION OemStringPtr; - UCHAR Capabilities[4]; - PTR_32_BIT_UNION VideoModePtr; - USHORT TotalMemory; +typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE +{ + UCHAR VbeSignature[4]; + USHORT VbeVersion; + PTR_32_BIT_UNION OemStringPtr; + UCHAR Capabilities[4]; + PTR_32_BIT_UNION VideoModePtr; + USHORT TotalMemory; } VBE_1_2_INFO_BLOCK_UPDATABLE; -typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE { - VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; - USHORT OemSoftRev; - PTR_32_BIT_UNION OemVendorNamePtr; - PTR_32_BIT_UNION OemProductNamePtr; - PTR_32_BIT_UNION OemProductRevPtr; + +typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE +{ + VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock; + USHORT OemSoftRev; + PTR_32_BIT_UNION OemVendorNamePtr; + PTR_32_BIT_UNION OemProductNamePtr; + PTR_32_BIT_UNION OemProductRevPtr; } VBE_2_0_INFO_BLOCK_UPDATABLE; -typedef union _VBE_VERSION_UNION { - VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; - VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; +typedef union _VBE_VERSION_UNION +{ + VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock; + VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock; } VBE_VERSION_UNION; -typedef struct _VBE_INFO_BLOCK { - VBE_VERSION_UNION UpdatableVBE_Info; - UCHAR Reserved[222]; - UCHAR OemData[256]; +typedef struct _VBE_INFO_BLOCK +{ + VBE_VERSION_UNION UpdatableVBE_Info; + UCHAR Reserved[222]; + UCHAR OemData[256]; } VBE_INFO_BLOCK; -typedef struct _VBE_FP_INFO { - USHORT HSize; - USHORT VSize; - USHORT FPType; - UCHAR RedBPP; - UCHAR GreenBPP; - UCHAR BlueBPP; - UCHAR ReservedBPP; - ULONG RsvdOffScrnMemSize; - ULONG RsvdOffScrnMEmPtr; - UCHAR Reserved[14]; +typedef struct _VBE_FP_INFO +{ + USHORT HSize; + USHORT VSize; + USHORT FPType; + UCHAR RedBPP; + UCHAR GreenBPP; + UCHAR BlueBPP; + UCHAR ReservedBPP; + ULONG RsvdOffScrnMemSize; + ULONG RsvdOffScrnMEmPtr; + UCHAR Reserved[14]; } VBE_FP_INFO; -typedef struct _VESA_MODE_INFO_BLOCK { -/* Mandatory information for all VBE revisions */ - USHORT ModeAttributes; /* dw ? ; mode attributes */ - UCHAR WinAAttributes; /* db ? ; window A attributes */ - UCHAR WinBAttributes; /* db ? ; window B attributes */ - USHORT WinGranularity; /* dw ? ; window granularity */ - USHORT WinSize; /* dw ? ; window size */ - USHORT WinASegment; /* dw ? ; window A start segment */ - USHORT WinBSegment; /* dw ? ; window B start segment */ - ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */ - USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */ - -/* ; Mandatory information for VBE 1.2 and above */ - USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */ - USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */ - UCHAR XCharSize; /* db ? ; character cell width in pixels */ - UCHAR YCharSize; /* db ? ; character cell height in pixels */ - UCHAR NumberOfPlanes; /* db ? ; number of memory planes */ - UCHAR BitsPerPixel; /* db ? ; bits per pixel */ - UCHAR NumberOfBanks; /* db ? ; number of banks */ - UCHAR MemoryModel; /* db ? ; memory model type */ - UCHAR BankSize; /* db ? ; bank size in KB */ - UCHAR NumberOfImagePages; /* db ? ; number of images */ - UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */ - -/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */ - UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */ - UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */ - UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */ - UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */ - UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */ - UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */ - UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */ - UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */ - UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */ - -/* ; Mandatory information for VBE 2.0 and above */ - ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */ - ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */ - USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */ - -/* ; Mandatory information for VBE 3.0 and above */ - USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */ - UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */ - UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */ - UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */ - UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */ - UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */ - UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */ - UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */ - UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */ - UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */ - UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */ - ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */ - UCHAR Reserved; /* db 190 dup (0) */ +typedef struct _VESA_MODE_INFO_BLOCK +{ +// Mandatory information for all VBE revisions + USHORT ModeAttributes; // dw ? ; mode attributes + UCHAR WinAAttributes; // db ? ; window A attributes + UCHAR WinBAttributes; // db ? ; window B attributes + USHORT WinGranularity; // dw ? ; window granularity + USHORT WinSize; // dw ? ; window size + USHORT WinASegment; // dw ? ; window A start segment + USHORT WinBSegment; // dw ? ; window B start segment + ULONG WinFuncPtr; // dd ? ; real mode pointer to window function + USHORT BytesPerScanLine;// dw ? ; bytes per scan line + +//; Mandatory information for VBE 1.2 and above + USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters + USHORT YResolution; // dw ? ; vertical resolution in pixels or characters + UCHAR XCharSize; // db ? ; character cell width in pixels + UCHAR YCharSize; // db ? ; character cell height in pixels + UCHAR NumberOfPlanes; // db ? ; number of memory planes + UCHAR BitsPerPixel; // db ? ; bits per pixel + UCHAR NumberOfBanks; // db ? ; number of banks + UCHAR MemoryModel; // db ? ; memory model type + UCHAR BankSize; // db ? ; bank size in KB + UCHAR NumberOfImagePages;// db ? ; number of images + UCHAR ReservedForPageFunction;//db 1 ; reserved for page function + +//; Direct Color fields(required for direct/6 and YUV/7 memory models) + UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits + UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask + UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits + UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask + UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits + UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask + UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits + UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask + UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes + +//; Mandatory information for VBE 2.0 and above + ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer + ULONG Reserved_1; // dd 0 ; reserved - always set to 0 + USHORT Reserved_2; // dw 0 ; reserved - always set to 0 + +//; Mandatory information for VBE 3.0 and above + USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes + UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes + UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes + UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes) + UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes) + UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes) + UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes) + UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes) + UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes) + UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes) + UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes) + ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode + UCHAR Reserved; // db 190 dup (0) } VESA_MODE_INFO_BLOCK; -/* BIOS function CALLS */ -#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */ +// BIOS function CALLS +#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code #define ATOM_BIOS_FUNCTION_COP_MODE 0x00 #define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04 #define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05 #define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06 -#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B +#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B #define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E #define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F #define ATOM_BIOS_FUNCTION_STV_STD 0x16 @@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK { #define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82 #define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83 #define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84 -#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A +#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A #define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B -#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */ -#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */ +#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80 +#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80 #define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D #define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E -#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F -#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */ -#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */ -#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */ -#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */ -#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */ -#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */ -#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */ - -#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */ -#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */ -#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */ -#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */ -#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */ -#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */ -#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */ -#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */ +#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F +#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03 +#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7 +#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state +#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state +#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85 +#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89 +#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported + + +#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS +#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01 +#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02 +#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON. +#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY +#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND +#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF +#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) #define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L #define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L #define ATOM_BIOS_REG_LOW_MASK 0x000000FFL -/* structure used for VBIOS only */ +// structure used for VBIOS only -/* DispOutInfoTable */ -typedef struct _ASIC_TRANSMITTER_INFO { +//DispOutInfoTable +typedef struct _ASIC_TRANSMITTER_INFO +{ USHORT usTransmitterObjId; USHORT usSupportDevice; - UCHAR ucTransmitterCmdTblId; - UCHAR ucConfig; - UCHAR ucEncoderID; /* available 1st encoder ( default ) */ - UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */ - UCHAR uc2ndEncoderID; - UCHAR ucReserved; -} ASIC_TRANSMITTER_INFO; - -typedef struct _ASIC_ENCODER_INFO { + UCHAR ucTransmitterCmdTblId; + UCHAR ucConfig; + UCHAR ucEncoderID; //available 1st encoder ( default ) + UCHAR ucOptionEncoderID; //available 2nd encoder ( optional ) + UCHAR uc2ndEncoderID; + UCHAR ucReserved; +}ASIC_TRANSMITTER_INFO; + +typedef struct _ASIC_ENCODER_INFO +{ UCHAR ucEncoderID; UCHAR ucEncoderConfig; - USHORT usEncoderCmdTblId; -} ASIC_ENCODER_INFO; + USHORT usEncoderCmdTblId; +}ASIC_ENCODER_INFO; + +typedef struct _ATOM_DISP_OUT_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT ptrTransmitterInfo; + USHORT ptrEncoderInfo; + ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; + ASIC_ENCODER_INFO asEncoderInfo[1]; +}ATOM_DISP_OUT_INFO; -typedef struct _ATOM_DISP_OUT_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; +typedef struct _ATOM_DISP_OUT_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; USHORT ptrTransmitterInfo; USHORT ptrEncoderInfo; - ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; - ASIC_ENCODER_INFO asEncoderInfo[1]; -} ATOM_DISP_OUT_INFO; + USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary. + ASIC_TRANSMITTER_INFO asTransmitterInfo[1]; + ASIC_ENCODER_INFO asEncoderInfo[1]; +}ATOM_DISP_OUT_INFO_V2; -/* DispDevicePriorityInfo */ -typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; +// DispDevicePriorityInfo +typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; USHORT asDevicePriority[16]; -} ATOM_DISPLAY_DEVICE_PRIORITY_INFO; - -/* ProcessAuxChannelTransactionTable */ -typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS { - USHORT lpAuxRequest; - USHORT lpDataOut; - UCHAR ucChannelID; - union { - UCHAR ucReplyStatus; - UCHAR ucDelay; +}ATOM_DISPLAY_DEVICE_PRIORITY_INFO; + +//ProcessAuxChannelTransactionTable +typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS +{ + USHORT lpAuxRequest; + USHORT lpDataOut; + UCHAR ucChannelID; + union + { + UCHAR ucReplyStatus; + UCHAR ucDelay; + }; + UCHAR ucDataOutLen; + UCHAR ucReserved; +}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; + +//ProcessAuxChannelTransactionTable +typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 +{ + USHORT lpAuxRequest; + USHORT lpDataOut; + UCHAR ucChannelID; + union + { + UCHAR ucReplyStatus; + UCHAR ucDelay; }; - UCHAR ucDataOutLen; - UCHAR ucReserved; -} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS; + UCHAR ucDataOutLen; + UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6 +}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2; #define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS -/* GetSinkType */ +//GetSinkType -typedef struct _DP_ENCODER_SERVICE_PARAMETERS { +typedef struct _DP_ENCODER_SERVICE_PARAMETERS +{ USHORT ucLinkClock; - union { - UCHAR ucConfig; /* for DP training command */ - UCHAR ucI2cId; /* use for GET_SINK_TYPE command */ + union + { + UCHAR ucConfig; // for DP training command + UCHAR ucI2cId; // use for GET_SINK_TYPE command }; UCHAR ucAction; UCHAR ucStatus; UCHAR ucLaneNum; UCHAR ucReserved[2]; -} DP_ENCODER_SERVICE_PARAMETERS; +}DP_ENCODER_SERVICE_PARAMETERS; -/* ucAction */ +// ucAction #define ATOM_DP_ACTION_GET_SINK_TYPE 0x01 +/* obselete */ #define ATOM_DP_ACTION_TRAINING_START 0x02 #define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03 #define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04 @@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS { #define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06 #define ATOM_DP_ACTION_BLANKING 0x07 -/* ucConfig */ +// ucConfig #define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03 #define ATOM_DP_CONFIG_DIG1_ENCODER 0x00 #define ATOM_DP_CONFIG_DIG2_ENCODER 0x01 @@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS { #define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04 #define ATOM_DP_CONFIG_LINK_A 0x00 #define ATOM_DP_CONFIG_LINK_B 0x04 - +/* /obselete */ #define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS -/* DP_TRAINING_TABLE */ -#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR +// DP_TRAINING_TABLE +#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR #define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 ) -#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16) -#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24) +#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 ) +#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 ) #define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32) #define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40) #define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48) @@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS { #define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64) #define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72) #define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76) -#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) +#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80) +#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84) -typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS { - UCHAR ucI2CSpeed; - union { - UCHAR ucRegIndex; - UCHAR ucStatus; +typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS +{ + UCHAR ucI2CSpeed; + union + { + UCHAR ucRegIndex; + UCHAR ucStatus; }; - USHORT lpI2CDataOut; - UCHAR ucFlag; - UCHAR ucTransBytes; - UCHAR ucSlaveAddr; - UCHAR ucLineNumber; -} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; + USHORT lpI2CDataOut; + UCHAR ucFlag; + UCHAR ucTransBytes; + UCHAR ucSlaveAddr; + UCHAR ucLineNumber; +}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS; #define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS -/* ucFlag */ +//ucFlag #define HW_I2C_WRITE 1 #define HW_I2C_READ 0 +#define I2C_2BYTE_ADDR 0x02 +typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2 +{ + UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ... + UCHAR ucReserved[3]; +}SET_HWBLOCK_INSTANCE_PARAMETER_V2; + +#define HWBLKINST_INSTANCE_MASK 0x07 +#define HWBLKINST_HWBLK_MASK 0xF0 +#define HWBLKINST_HWBLK_SHIFT 0x04 + +//ucHWBlock +#define SELECT_DISP_ENGINE 0 +#define SELECT_DISP_PLL 1 +#define SELECT_DCIO_UNIPHY_LINK0 2 +#define SELECT_DCIO_UNIPHY_LINK1 3 +#define SELECT_DCIO_IMPCAL 4 +#define SELECT_DCIO_DIG 6 +#define SELECT_CRTC_PIXEL_RATE 7 + +/****************************************************************************/ +//Portion VI: Definitinos for vbios MC scratch registers that driver used /****************************************************************************/ -/* Portion VI: Definitinos being oboselete */ + +#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000 +#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000 +#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000 +#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000 +#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000 +#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000 +#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000 + +/****************************************************************************/ +//Portion VI: Definitinos being oboselete /****************************************************************************/ -/* ========================================================================================== */ -/* Remove the definitions below when driver is ready! */ -typedef struct _ATOM_DAC_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMaxFrequency; /* in 10kHz unit */ - USHORT usReserved; -} ATOM_DAC_INFO; - -typedef struct _COMPASSIONATE_DATA { - ATOM_COMMON_TABLE_HEADER sHeader; - - /* ============================== DAC1 portion */ - UCHAR ucDAC1_BG_Adjustment; - UCHAR ucDAC1_DAC_Adjustment; - USHORT usDAC1_FORCE_Data; - /* ============================== DAC2 portion */ - UCHAR ucDAC2_CRT2_BG_Adjustment; - UCHAR ucDAC2_CRT2_DAC_Adjustment; - USHORT usDAC2_CRT2_FORCE_Data; - USHORT usDAC2_CRT2_MUX_RegisterIndex; - UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ - UCHAR ucDAC2_NTSC_BG_Adjustment; - UCHAR ucDAC2_NTSC_DAC_Adjustment; - USHORT usDAC2_TV1_FORCE_Data; - USHORT usDAC2_TV1_MUX_RegisterIndex; - UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ - UCHAR ucDAC2_CV_BG_Adjustment; - UCHAR ucDAC2_CV_DAC_Adjustment; - USHORT usDAC2_CV_FORCE_Data; - USHORT usDAC2_CV_MUX_RegisterIndex; - UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */ - UCHAR ucDAC2_PAL_BG_Adjustment; - UCHAR ucDAC2_PAL_DAC_Adjustment; - USHORT usDAC2_TV2_FORCE_Data; -} COMPASSIONATE_DATA; +//========================================================================================== +//Remove the definitions below when driver is ready! +typedef struct _ATOM_DAC_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMaxFrequency; // in 10kHz unit + USHORT usReserved; +}ATOM_DAC_INFO; + + +typedef struct _COMPASSIONATE_DATA +{ + ATOM_COMMON_TABLE_HEADER sHeader; + + //============================== DAC1 portion + UCHAR ucDAC1_BG_Adjustment; + UCHAR ucDAC1_DAC_Adjustment; + USHORT usDAC1_FORCE_Data; + //============================== DAC2 portion + UCHAR ucDAC2_CRT2_BG_Adjustment; + UCHAR ucDAC2_CRT2_DAC_Adjustment; + USHORT usDAC2_CRT2_FORCE_Data; + USHORT usDAC2_CRT2_MUX_RegisterIndex; + UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low + UCHAR ucDAC2_NTSC_BG_Adjustment; + UCHAR ucDAC2_NTSC_DAC_Adjustment; + USHORT usDAC2_TV1_FORCE_Data; + USHORT usDAC2_TV1_MUX_RegisterIndex; + UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low + UCHAR ucDAC2_CV_BG_Adjustment; + UCHAR ucDAC2_CV_DAC_Adjustment; + USHORT usDAC2_CV_FORCE_Data; + USHORT usDAC2_CV_MUX_RegisterIndex; + UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low + UCHAR ucDAC2_PAL_BG_Adjustment; + UCHAR ucDAC2_PAL_DAC_Adjustment; + USHORT usDAC2_TV2_FORCE_Data; +}COMPASSIONATE_DATA; /****************************Supported Device Info Table Definitions**********************/ -/* ucConnectInfo: */ -/* [7:4] - connector type */ -/* = 1 - VGA connector */ -/* = 2 - DVI-I */ -/* = 3 - DVI-D */ -/* = 4 - DVI-A */ -/* = 5 - SVIDEO */ -/* = 6 - COMPOSITE */ -/* = 7 - LVDS */ -/* = 8 - DIGITAL LINK */ -/* = 9 - SCART */ -/* = 0xA - HDMI_type A */ -/* = 0xB - HDMI_type B */ -/* = 0xE - Special case1 (DVI+DIN) */ -/* Others=TBD */ -/* [3:0] - DAC Associated */ -/* = 0 - no DAC */ -/* = 1 - DACA */ -/* = 2 - DACB */ -/* = 3 - External DAC */ -/* Others=TBD */ -/* */ - -typedef struct _ATOM_CONNECTOR_INFO { +// ucConnectInfo: +// [7:4] - connector type +// = 1 - VGA connector +// = 2 - DVI-I +// = 3 - DVI-D +// = 4 - DVI-A +// = 5 - SVIDEO +// = 6 - COMPOSITE +// = 7 - LVDS +// = 8 - DIGITAL LINK +// = 9 - SCART +// = 0xA - HDMI_type A +// = 0xB - HDMI_type B +// = 0xE - Special case1 (DVI+DIN) +// Others=TBD +// [3:0] - DAC Associated +// = 0 - no DAC +// = 1 - DACA +// = 2 - DACB +// = 3 - External DAC +// Others=TBD +// + +typedef struct _ATOM_CONNECTOR_INFO +{ #if ATOM_BIG_ENDIAN - UCHAR bfConnectorType:4; - UCHAR bfAssociatedDAC:4; + UCHAR bfConnectorType:4; + UCHAR bfAssociatedDAC:4; #else - UCHAR bfAssociatedDAC:4; - UCHAR bfConnectorType:4; + UCHAR bfAssociatedDAC:4; + UCHAR bfConnectorType:4; #endif -} ATOM_CONNECTOR_INFO; +}ATOM_CONNECTOR_INFO; + +typedef union _ATOM_CONNECTOR_INFO_ACCESS +{ + ATOM_CONNECTOR_INFO sbfAccess; + UCHAR ucAccess; +}ATOM_CONNECTOR_INFO_ACCESS; -typedef union _ATOM_CONNECTOR_INFO_ACCESS { - ATOM_CONNECTOR_INFO sbfAccess; - UCHAR ucAccess; -} ATOM_CONNECTOR_INFO_ACCESS; +typedef struct _ATOM_CONNECTOR_INFO_I2C +{ + ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; +}ATOM_CONNECTOR_INFO_I2C; -typedef struct _ATOM_CONNECTOR_INFO_I2C { - ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; -} ATOM_CONNECTOR_INFO_I2C; -typedef struct _ATOM_SUPPORTED_DEVICES_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; -} ATOM_SUPPORTED_DEVICES_INFO; +typedef struct _ATOM_SUPPORTED_DEVICES_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO]; +}ATOM_SUPPORTED_DEVICES_INFO; #define NO_INT_SRC_MAPPED 0xFF -typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP { - UCHAR ucIntSrcBitmap; -} ATOM_CONNECTOR_INC_SRC_BITMAP; - -typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; - ATOM_CONNECTOR_INC_SRC_BITMAP - asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; -} ATOM_SUPPORTED_DEVICES_INFO_2; - -typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usDeviceSupport; - ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; - ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; -} ATOM_SUPPORTED_DEVICES_INFO_2d1; +typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP +{ + UCHAR ucIntSrcBitmap; +}ATOM_CONNECTOR_INC_SRC_BITMAP; + +typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; + ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2]; +}ATOM_SUPPORTED_DEVICES_INFO_2; + +typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usDeviceSupport; + ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE]; + ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE]; +}ATOM_SUPPORTED_DEVICES_INFO_2d1; #define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1 -typedef struct _ATOM_MISC_CONTROL_INFO { - USHORT usFrequency; - UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */ - UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */ - UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */ - UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */ -} ATOM_MISC_CONTROL_INFO; + + +typedef struct _ATOM_MISC_CONTROL_INFO +{ + USHORT usFrequency; + UCHAR ucPLL_ChargePump; // PLL charge-pump gain control + UCHAR ucPLL_DutyCycle; // PLL duty cycle control + UCHAR ucPLL_VCO_Gain; // PLL VCO gain control + UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control +}ATOM_MISC_CONTROL_INFO; + #define ATOM_MAX_MISC_INFO 4 -typedef struct _ATOM_TMDS_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usMaxFrequency; /* in 10Khz */ - ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; -} ATOM_TMDS_INFO; +typedef struct _ATOM_TMDS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usMaxFrequency; // in 10Khz + ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO]; +}ATOM_TMDS_INFO; + + +typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE +{ + UCHAR ucTVStandard; //Same as TV standards defined above, + UCHAR ucPadding[1]; +}ATOM_ENCODER_ANALOG_ATTRIBUTE; -typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE { - UCHAR ucTVStandard; /* Same as TV standards defined above, */ - UCHAR ucPadding[1]; -} ATOM_ENCODER_ANALOG_ATTRIBUTE; +typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE +{ + UCHAR ucAttribute; //Same as other digital encoder attributes defined above + UCHAR ucPadding[1]; +}ATOM_ENCODER_DIGITAL_ATTRIBUTE; -typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE { - UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */ - UCHAR ucPadding[1]; -} ATOM_ENCODER_DIGITAL_ATTRIBUTE; +typedef union _ATOM_ENCODER_ATTRIBUTE +{ + ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib; + ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib; +}ATOM_ENCODER_ATTRIBUTE; -typedef union _ATOM_ENCODER_ATTRIBUTE { - ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib; - ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib; -} ATOM_ENCODER_ATTRIBUTE; -typedef struct _DVO_ENCODER_CONTROL_PARAMETERS { - USHORT usPixelClock; - USHORT usEncoderID; - UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */ - UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */ - ATOM_ENCODER_ATTRIBUTE usDevAttr; -} DVO_ENCODER_CONTROL_PARAMETERS; +typedef struct _DVO_ENCODER_CONTROL_PARAMETERS +{ + USHORT usPixelClock; + USHORT usEncoderID; + UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only. + UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT + ATOM_ENCODER_ATTRIBUTE usDevAttr; +}DVO_ENCODER_CONTROL_PARAMETERS; + +typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION +{ + DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder; + WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion +}DVO_ENCODER_CONTROL_PS_ALLOCATION; -typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION { - DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder; - WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */ -} DVO_ENCODER_CONTROL_PS_ALLOCATION; #define ATOM_XTMDS_ASIC_SI164_ID 1 #define ATOM_XTMDS_ASIC_SI178_ID 2 @@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION { #define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002 #define ATOM_XTMDS_MVPU_FPGA 0x00000004 -typedef struct _ATOM_XTMDS_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - USHORT usSingleLinkMaxFrequency; - ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */ - UCHAR ucXtransimitterID; - UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */ - UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */ - /* due to design. This ID is used to alert driver that the sequence is not "standard"! */ - UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */ - UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */ -} ATOM_XTMDS_INFO; - -typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { - UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */ - UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */ - UCHAR ucPadding[2]; -} DFP_DPMS_STATUS_CHANGE_PARAMETERS; + +typedef struct _ATOM_XTMDS_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + USHORT usSingleLinkMaxFrequency; + ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip + UCHAR ucXtransimitterID; + UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported + UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters + // due to design. This ID is used to alert driver that the sequence is not "standard"! + UCHAR ucMasterAddress; // Address to control Master xTMDS Chip + UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip +}ATOM_XTMDS_INFO; + +typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS +{ + UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off + UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX.... + UCHAR ucPadding[2]; +}DFP_DPMS_STATUS_CHANGE_PARAMETERS; /****************************Legacy Power Play Table Definitions **********************/ -/* Definitions for ulPowerPlayMiscInfo */ +//Definitions for ulPowerPlayMiscInfo #define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L #define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L #define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L @@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { #define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L #define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L -#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */ - +#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program + #define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L #define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L #define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L @@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { #define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L #define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L -#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L +#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L #define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L #define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L #define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L -#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */ -#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 +#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved +#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20 #define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L #define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L #define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L -#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */ -#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */ -#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */ +#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic +#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic +#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode -#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */ +#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) #define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28 #define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L @@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS { #define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L #define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L #define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L -#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */ - /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */ +#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. + //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback #define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L #define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L -#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L - -/* ucTableFormatRevision=1 */ -/* ucTableContentRevision=1 */ -typedef struct _ATOM_POWERMODE_INFO { - ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ - ULONG ulReserved1; /* must set to 0 */ - ULONG ulReserved2; /* must set to 0 */ - USHORT usEngineClock; - USHORT usMemoryClock; - UCHAR ucVoltageDropIndex; /* index to GPIO table */ - UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucNumPciELanes; /* number of PCIE lanes */ -} ATOM_POWERMODE_INFO; - -/* ucTableFormatRevision=2 */ -/* ucTableContentRevision=1 */ -typedef struct _ATOM_POWERMODE_INFO_V2 { - ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ - ULONG ulMiscInfo2; - ULONG ulEngineClock; - ULONG ulMemoryClock; - UCHAR ucVoltageDropIndex; /* index to GPIO table */ - UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucNumPciELanes; /* number of PCIE lanes */ -} ATOM_POWERMODE_INFO_V2; - -/* ucTableFormatRevision=2 */ -/* ucTableContentRevision=2 */ -typedef struct _ATOM_POWERMODE_INFO_V3 { - ULONG ulMiscInfo; /* The power level should be arranged in ascending order */ - ULONG ulMiscInfo2; - ULONG ulEngineClock; - ULONG ulMemoryClock; - UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */ - UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */ - UCHAR ucMinTemperature; - UCHAR ucMaxTemperature; - UCHAR ucNumPciELanes; /* number of PCIE lanes */ - UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */ -} ATOM_POWERMODE_INFO_V3; +#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L + +//ucTableFormatRevision=1 +//ucTableContentRevision=1 +typedef struct _ATOM_POWERMODE_INFO +{ + ULONG ulMiscInfo; //The power level should be arranged in ascending order + ULONG ulReserved1; // must set to 0 + ULONG ulReserved2; // must set to 0 + USHORT usEngineClock; + USHORT usMemoryClock; + UCHAR ucVoltageDropIndex; // index to GPIO table + UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucNumPciELanes; // number of PCIE lanes +}ATOM_POWERMODE_INFO; + +//ucTableFormatRevision=2 +//ucTableContentRevision=1 +typedef struct _ATOM_POWERMODE_INFO_V2 +{ + ULONG ulMiscInfo; //The power level should be arranged in ascending order + ULONG ulMiscInfo2; + ULONG ulEngineClock; + ULONG ulMemoryClock; + UCHAR ucVoltageDropIndex; // index to GPIO table + UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucNumPciELanes; // number of PCIE lanes +}ATOM_POWERMODE_INFO_V2; + +//ucTableFormatRevision=2 +//ucTableContentRevision=2 +typedef struct _ATOM_POWERMODE_INFO_V3 +{ + ULONG ulMiscInfo; //The power level should be arranged in ascending order + ULONG ulMiscInfo2; + ULONG ulEngineClock; + ULONG ulMemoryClock; + UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table + UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate + UCHAR ucMinTemperature; + UCHAR ucMaxTemperature; + UCHAR ucNumPciELanes; // number of PCIE lanes + UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table +}ATOM_POWERMODE_INFO_V3; + #define ATOM_MAX_NUMBEROF_POWER_BLOCK 8 @@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 { #define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04 #define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05 #define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06 -#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */ - -typedef struct _ATOM_POWERPLAY_INFO { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucOverdriveThermalController; - UCHAR ucOverdriveI2cLine; - UCHAR ucOverdriveIntBitmap; - UCHAR ucOverdriveControllerAddress; - UCHAR ucSizeOfPowerModeEntry; - UCHAR ucNumOfPowerModeEntries; - ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; -} ATOM_POWERPLAY_INFO; - -typedef struct _ATOM_POWERPLAY_INFO_V2 { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucOverdriveThermalController; - UCHAR ucOverdriveI2cLine; - UCHAR ucOverdriveIntBitmap; - UCHAR ucOverdriveControllerAddress; - UCHAR ucSizeOfPowerModeEntry; - UCHAR ucNumOfPowerModeEntries; - ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; -} ATOM_POWERPLAY_INFO_V2; - -typedef struct _ATOM_POWERPLAY_INFO_V3 { - ATOM_COMMON_TABLE_HEADER sHeader; - UCHAR ucOverdriveThermalController; - UCHAR ucOverdriveI2cLine; - UCHAR ucOverdriveIntBitmap; - UCHAR ucOverdriveControllerAddress; - UCHAR ucSizeOfPowerModeEntry; - UCHAR ucNumOfPowerModeEntries; - ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; -} ATOM_POWERPLAY_INFO_V3; +#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog + + +typedef struct _ATOM_POWERPLAY_INFO +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucOverdriveThermalController; + UCHAR ucOverdriveI2cLine; + UCHAR ucOverdriveIntBitmap; + UCHAR ucOverdriveControllerAddress; + UCHAR ucSizeOfPowerModeEntry; + UCHAR ucNumOfPowerModeEntries; + ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +}ATOM_POWERPLAY_INFO; + +typedef struct _ATOM_POWERPLAY_INFO_V2 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucOverdriveThermalController; + UCHAR ucOverdriveI2cLine; + UCHAR ucOverdriveIntBitmap; + UCHAR ucOverdriveControllerAddress; + UCHAR ucSizeOfPowerModeEntry; + UCHAR ucNumOfPowerModeEntries; + ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +}ATOM_POWERPLAY_INFO_V2; + +typedef struct _ATOM_POWERPLAY_INFO_V3 +{ + ATOM_COMMON_TABLE_HEADER sHeader; + UCHAR ucOverdriveThermalController; + UCHAR ucOverdriveI2cLine; + UCHAR ucOverdriveIntBitmap; + UCHAR ucOverdriveControllerAddress; + UCHAR ucSizeOfPowerModeEntry; + UCHAR ucNumOfPowerModeEntries; + ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; +}ATOM_POWERPLAY_INFO_V3; /* New PPlib */ /**************************************************************************/ @@ -4873,40 +5897,42 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). - ULONG ulFlags; + ULONG ulFlags; } ATOM_PPLIB_RS780_CLOCK_INFO; -#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 -#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 -#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 -#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 +#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 +#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 +#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 +#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 #define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. #define ATOM_PPLIB_RS780_SPMCLK_LOW 1 #define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 -#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 -#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 -#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 +#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 +#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 +#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 /**************************************************************************/ -/* Following definitions are for compatiblity issue in different SW components. */ + +// Following definitions are for compatiblity issue in different SW components. #define ATOM_MASTER_DATA_TABLE_REVISION 0x01 -#define Object_Info Object_Header +#define Object_Info Object_Header #define AdjustARB_SEQ MC_InitParameter #define VRAM_GPIO_DetectionInfo VoltageObjectInfo -#define ASIC_VDDCI_Info ASIC_ProfilingInfo +#define ASIC_VDDCI_Info ASIC_ProfilingInfo #define ASIC_MVDDQ_Info MemoryTrainingInfo -#define SS_Info PPLL_SS_Info +#define SS_Info PPLL_SS_Info #define ASIC_MVDDC_Info ASIC_InternalSS_Info #define DispDevicePriorityInfo SaveRestoreInfo #define DispOutInfo TV_VideoMode + #define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE #define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE -/* New device naming, remove them when both DAL/VBIOS is ready */ +//New device naming, remove them when both DAL/VBIOS is ready #define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS #define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS @@ -4921,7 +5947,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO #define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX #define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX - + #define ATOM_DEVICE_DFP2I_INDEX 0x00000009 #define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX) @@ -4939,7 +5965,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO #define ATOM_S3_DFP2I_ACTIVEb1 0x02 -#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE +#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE #define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE #define ATOM_S3_DFP2I_ACTIVE 0x00000200L @@ -4958,14 +5984,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO #define ATOM_S6_ACC_REQ_DFP2Ib3 0x02 #define ATOM_S6_ACC_REQ_DFP2I 0x02000000L -#define TMDS1XEncoderControl DVOEncoderControl +#define TMDS1XEncoderControl DVOEncoderControl #define DFP1XOutputControl DVOOutputControl #define ExternalDFPOutputControl DFP1XOutputControl #define EnableExternalTMDS_Encoder TMDS1XEncoderControl #define DFP1IOutputControl TMDSAOutputControl -#define DFP2IOutputControl LVTMAOutputControl +#define DFP2IOutputControl LVTMAOutputControl #define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS #define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION @@ -4974,7 +6000,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO #define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION #define ucDac1Standard ucDacStandard -#define ucDac2Standard ucDacStandard +#define ucDac2Standard ucDacStandard #define TMDS1EncoderControl TMDSAEncoderControl #define TMDS2EncoderControl LVTMAEncoderControl @@ -4984,12 +6010,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO #define CRT1OutputControl DAC1OutputControl #define CRT2OutputControl DAC2OutputControl -/* These two lines will be removed for sure in a few days, will follow up with Michael V. */ +//These two lines will be removed for sure in a few days, will follow up with Michael V. #define EnableLVDS_SS EnableSpreadSpectrumOnPPLL -#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL +#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL + +//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L +//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE +//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE +//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE +//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE + +#define ATOM_S6_ACC_REQ_TV2 0x00400000L +#define ATOM_DEVICE_TV2_INDEX 0x00000006 +#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX) +#define ATOM_S0_TV2 0x00100000L +#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE +#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE + +// +#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L +#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L +#define ATOM_S2_TV1_DPMS_STATE 0x00040000L +#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L +#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L +#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L +#define ATOM_S2_TV2_DPMS_STATE 0x00400000L +#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L +#define ATOM_S2_CV_DPMS_STATE 0x01000000L +#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L +#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L +#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L + +#define ATOM_S2_CRT1_DPMS_STATEb2 0x01 +#define ATOM_S2_LCD1_DPMS_STATEb2 0x02 +#define ATOM_S2_TV1_DPMS_STATEb2 0x04 +#define ATOM_S2_DFP1_DPMS_STATEb2 0x08 +#define ATOM_S2_CRT2_DPMS_STATEb2 0x10 +#define ATOM_S2_LCD2_DPMS_STATEb2 0x20 +#define ATOM_S2_TV2_DPMS_STATEb2 0x40 +#define ATOM_S2_DFP2_DPMS_STATEb2 0x80 +#define ATOM_S2_CV_DPMS_STATEb3 0x01 +#define ATOM_S2_DFP3_DPMS_STATEb3 0x02 +#define ATOM_S2_DFP4_DPMS_STATEb3 0x04 +#define ATOM_S2_DFP5_DPMS_STATEb3 0x08 + +#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20 +#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40 +#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80 /*********************************************************************************/ -#pragma pack() /* BIOS data must use byte aligment */ +#pragma pack() // BIOS data must use byte aligment #endif /* _ATOMBIOS_H */ diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index af464e351fb..dd9fdf56061 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -245,21 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - atombios_enable_crtc(crtc, 1); + atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev)) - atombios_enable_crtc_memreq(crtc, 1); - atombios_blank_crtc(crtc, 0); - drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); + atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); + atombios_blank_crtc(crtc, ATOM_DISABLE); + /* XXX re-enable when interrupt support is added */ + if (!ASIC_IS_DCE4(rdev)) + drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); - atombios_blank_crtc(crtc, 1); + /* XXX re-enable when interrupt support is added */ + if (!ASIC_IS_DCE4(rdev)) + drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); + atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev)) - atombios_enable_crtc_memreq(crtc, 0); - atombios_enable_crtc(crtc, 0); + atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); + atombios_enable_crtc(crtc, ATOM_DISABLE); break; } } @@ -349,6 +353,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc, atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +union atom_enable_ss { + ENABLE_LVDS_SS_PARAMETERS legacy; + ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1; +}; + static void atombios_set_ss(struct drm_crtc *crtc, int enable) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -358,11 +367,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) struct radeon_encoder *radeon_encoder = NULL; struct radeon_encoder_atom_dig *dig = NULL; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); - ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args; - ENABLE_LVDS_SS_PARAMETERS legacy_args; + union atom_enable_ss args; uint16_t percentage = 0; uint8_t type = 0, step = 0, delay = 0, range = 0; + /* XXX add ss support for DCE4 */ + if (ASIC_IS_DCE4(rdev)) + return; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); @@ -386,29 +398,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable) if (!radeon_encoder) return; + memset(&args, 0, sizeof(args)); if (ASIC_IS_AVIVO(rdev)) { - memset(&args, 0, sizeof(args)); - args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); - args.ucSpreadSpectrumType = type; - args.ucSpreadSpectrumStep = step; - args.ucSpreadSpectrumDelay = delay; - args.ucSpreadSpectrumRange = range; - args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - args.ucEnable = enable; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage); + args.v1.ucSpreadSpectrumType = type; + args.v1.ucSpreadSpectrumStep = step; + args.v1.ucSpreadSpectrumDelay = delay; + args.v1.ucSpreadSpectrumRange = range; + args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; + args.v1.ucEnable = enable; } else { - memset(&legacy_args, 0, sizeof(legacy_args)); - legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage); - legacy_args.ucSpreadSpectrumType = type; - legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; - legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; - legacy_args.ucEnable = enable; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args); + args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage); + args.legacy.ucSpreadSpectrumType = type; + args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2; + args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4; + args.legacy.ucEnable = enable; } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union adjust_pixel_clock { ADJUST_DISPLAY_PLL_PS_ALLOCATION v1; + ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3; }; static u32 atombios_adjust_pll(struct drm_crtc *crtc, @@ -420,10 +431,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, struct drm_encoder *encoder = NULL; struct radeon_encoder *radeon_encoder = NULL; u32 adjusted_clock = mode->clock; + int encoder_mode = 0; /* reset the pll flags */ pll->flags = 0; + /* select the PLL algo */ + if (ASIC_IS_AVIVO(rdev)) { + if (radeon_new_pll == 0) + pll->algo = PLL_ALGO_LEGACY; + else + pll->algo = PLL_ALGO_NEW; + } else { + if (radeon_new_pll == 1) + pll->algo = PLL_ALGO_NEW; + else + pll->algo = PLL_ALGO_LEGACY; + } + if (ASIC_IS_AVIVO(rdev)) { if ((rdev->family == CHIP_RS600) || (rdev->family == CHIP_RS690) || @@ -448,10 +473,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); + encoder_mode = atombios_get_encoder_mode(encoder); if (ASIC_IS_AVIVO(rdev)) { /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) adjusted_clock = mode->clock * 2; + /* LVDS PLL quirks */ + if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + pll->algo = dig->pll_algo; + } } else { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; @@ -468,14 +499,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, */ if (ASIC_IS_DCE3(rdev)) { union adjust_pixel_clock args; - struct radeon_encoder_atom_dig *dig; u8 frev, crev; int index; - if (!radeon_encoder->enc_priv) - return adjusted_clock; - dig = radeon_encoder->enc_priv; - index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll); atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); @@ -489,12 +515,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, case 2: args.v1.usPixelClock = cpu_to_le16(mode->clock / 10); args.v1.ucTransmitterID = radeon_encoder->encoder_id; - args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder); + args.v1.ucEncodeMode = encoder_mode; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10; break; + case 3: + args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id; + args.v3.sInput.ucEncodeMode = encoder_mode; + args.v3.sInput.ucDispPllConfig = 0; + if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + + if (encoder_mode == ATOM_ENCODER_MODE_DP) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_COHERENT_MODE; + else { + if (dig->coherent_mode) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_COHERENT_MODE; + if (mode->clock > 165000) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_DUAL_LINK; + } + } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + /* may want to enable SS on DP/eDP eventually */ + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_SS_ENABLE; + if (mode->clock > 165000) + args.v3.sInput.ucDispPllConfig |= + DISPPLL_CONFIG_DUAL_LINK; + } + atom_execute_table(rdev->mode_info.atom_context, + index, (uint32_t *)&args); + adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; + if (args.v3.sOutput.ucRefDiv) { + pll->flags |= RADEON_PLL_USE_REF_DIV; + pll->reference_div = args.v3.sOutput.ucRefDiv; + } + if (args.v3.sOutput.ucPostDiv) { + pll->flags |= RADEON_PLL_USE_POST_DIV; + pll->post_div = args.v3.sOutput.ucPostDiv; + } + break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); return adjusted_clock; @@ -513,9 +578,47 @@ union set_pixel_clock { PIXEL_CLOCK_PARAMETERS v1; PIXEL_CLOCK_PARAMETERS_V2 v2; PIXEL_CLOCK_PARAMETERS_V3 v3; + PIXEL_CLOCK_PARAMETERS_V5 v5; }; -void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) +static void atombios_crtc_set_dcpll(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + u8 frev, crev; + int index; + union set_pixel_clock args; + + memset(&args, 0, sizeof(args)); + + index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); + atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, + &crev); + + switch (frev) { + case 1: + switch (crev) { + case 5: + /* if the default dcpll clock is specified, + * SetPixelClock provides the dividers + */ + args.v5.ucCRTC = ATOM_CRTC_INVALID; + args.v5.usPixelClock = rdev->clock.default_dispclk; + args.v5.ucPpll = ATOM_DCPLL; + break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); + return; + } + break; + default: + DRM_ERROR("Unknown table version %d %d\n", frev, crev); + return; + } + atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); +} + +static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; @@ -529,12 +632,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0; struct radeon_pll *pll; u32 adjusted_clock; + int encoder_mode = 0; memset(&args, 0, sizeof(args)); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc == crtc) { radeon_encoder = to_radeon_encoder(encoder); + encoder_mode = atombios_get_encoder_mode(encoder); break; } } @@ -542,26 +647,24 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) if (!radeon_encoder) return; - if (radeon_crtc->crtc_id == 0) + switch (radeon_crtc->pll_id) { + case ATOM_PPLL1: pll = &rdev->clock.p1pll; - else + break; + case ATOM_PPLL2: pll = &rdev->clock.p2pll; + break; + case ATOM_DCPLL: + case ATOM_PPLL_INVALID: + pll = &rdev->clock.dcpll; + break; + } /* adjust pixel clock as needed */ adjusted_clock = atombios_adjust_pll(crtc, mode, pll); - if (ASIC_IS_AVIVO(rdev)) { - if (radeon_new_pll) - radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, - &fb_div, &frac_fb_div, - &ref_div, &post_div); - else - radeon_compute_pll(pll, adjusted_clock, &pll_clock, - &fb_div, &frac_fb_div, - &ref_div, &post_div); - } else - radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, - &ref_div, &post_div); + radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, + &ref_div, &post_div); index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, @@ -576,8 +679,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) args.v1.usFbDiv = cpu_to_le16(fb_div); args.v1.ucFracFbDiv = frac_fb_div; args.v1.ucPostDiv = post_div; - args.v1.ucPpll = - radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; + args.v1.ucPpll = radeon_crtc->pll_id; args.v1.ucCRTC = radeon_crtc->crtc_id; args.v1.ucRefDivSrc = 1; break; @@ -587,8 +689,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) args.v2.usFbDiv = cpu_to_le16(fb_div); args.v2.ucFracFbDiv = frac_fb_div; args.v2.ucPostDiv = post_div; - args.v2.ucPpll = - radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; + args.v2.ucPpll = radeon_crtc->pll_id; args.v2.ucCRTC = radeon_crtc->crtc_id; args.v2.ucRefDivSrc = 1; break; @@ -598,12 +699,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) args.v3.usFbDiv = cpu_to_le16(fb_div); args.v3.ucFracFbDiv = frac_fb_div; args.v3.ucPostDiv = post_div; - args.v3.ucPpll = - radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1; - args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2); + args.v3.ucPpll = radeon_crtc->pll_id; + args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2); args.v3.ucTransmitterId = radeon_encoder->encoder_id; - args.v3.ucEncoderMode = - atombios_get_encoder_mode(encoder); + args.v3.ucEncoderMode = encoder_mode; + break; + case 5: + args.v5.ucCRTC = radeon_crtc->crtc_id; + args.v5.usPixelClock = cpu_to_le16(mode->clock / 10); + args.v5.ucRefDiv = ref_div; + args.v5.usFbDiv = cpu_to_le16(fb_div); + args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000); + args.v5.ucPostDiv = post_div; + args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */ + args.v5.ucTransmitterID = radeon_encoder->encoder_id; + args.v5.ucEncoderMode = encoder_mode; + args.v5.ucPpll = radeon_crtc->pll_id; break; default: DRM_ERROR("Unknown table version %d %d\n", frev, crev); @@ -618,6 +729,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } +static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_framebuffer *radeon_fb; + struct drm_gem_object *obj; + struct radeon_bo *rbo; + uint64_t fb_location; + uint32_t fb_format, fb_pitch_pixels, tiling_flags; + int r; + + /* no fb bound */ + if (!crtc->fb) { + DRM_DEBUG("No FB bound\n"); + return 0; + } + + radeon_fb = to_radeon_framebuffer(crtc->fb); + + /* Pin framebuffer & get tilling informations */ + obj = radeon_fb->obj; + rbo = obj->driver_private; + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location); + if (unlikely(r != 0)) { + radeon_bo_unreserve(rbo); + return -EINVAL; + } + radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); + radeon_bo_unreserve(rbo); + + switch (crtc->fb->bits_per_pixel) { + case 8: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); + break; + case 15: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555)); + break; + case 16: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565)); + break; + case 24: + case 32: + fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) | + EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888)); + break; + default: + DRM_ERROR("Unsupported screen depth %d\n", + crtc->fb->bits_per_pixel); + return -EINVAL; + } + + switch (radeon_crtc->crtc_id) { + case 0: + WREG32(AVIVO_D1VGA_CONTROL, 0); + break; + case 1: + WREG32(AVIVO_D2VGA_CONTROL, 0); + break; + case 2: + WREG32(EVERGREEN_D3VGA_CONTROL, 0); + break; + case 3: + WREG32(EVERGREEN_D4VGA_CONTROL, 0); + break; + case 4: + WREG32(EVERGREEN_D5VGA_CONTROL, 0); + break; + case 5: + WREG32(EVERGREEN_D6VGA_CONTROL, 0); + break; + default: + break; + } + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, + upper_32_bits(fb_location)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset, + (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK); + WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format); + + WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width); + WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height); + + fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); + WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); + WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); + + WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset, + crtc->mode.vdisplay); + x &= ~3; + y &= ~1; + WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, + (x << 16) | y); + WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, + (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); + + if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) + WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, + EVERGREEN_INTERLEAVE_EN); + else + WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0); + + if (old_fb && old_fb != crtc->fb) { + radeon_fb = to_radeon_framebuffer(old_fb); + rbo = radeon_fb->obj->driver_private; + r = radeon_bo_reserve(rbo, false); + if (unlikely(r != 0)) + return r; + radeon_bo_unpin(rbo); + radeon_bo_unreserve(rbo); + } + + /* Bytes per pixel may have changed */ + radeon_bandwidth_update(rdev); + + return 0; +} + static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { @@ -755,7 +1000,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_DCE4(rdev)) + return evergreen_crtc_set_base(crtc, x, y, old_fb); + else if (ASIC_IS_AVIVO(rdev)) return avivo_crtc_set_base(crtc, x, y, old_fb); else return radeon_crtc_set_base(crtc, x, y, old_fb); @@ -785,6 +1032,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc) } } +static int radeon_atom_pick_pll(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_encoder *test_encoder; + struct drm_crtc *test_crtc; + uint32_t pll_in_use = 0; + + if (ASIC_IS_DCE4(rdev)) { + /* if crtc is driving DP and we have an ext clock, use that */ + list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) { + if (test_encoder->crtc && (test_encoder->crtc == crtc)) { + if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) { + if (rdev->clock.dp_extclk) + return ATOM_PPLL_INVALID; + } + } + } + + /* otherwise, pick one of the plls */ + list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { + struct radeon_crtc *radeon_test_crtc; + + if (crtc == test_crtc) + continue; + + radeon_test_crtc = to_radeon_crtc(test_crtc); + if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) && + (radeon_test_crtc->pll_id <= ATOM_PPLL2)) + pll_in_use |= (1 << radeon_test_crtc->pll_id); + } + if (!(pll_in_use & 1)) + return ATOM_PPLL1; + return ATOM_PPLL2; + } else + return radeon_crtc->crtc_id; + +} + int atombios_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, @@ -796,19 +1083,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc, /* TODO color tiling */ + /* pick pll */ + radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); + atombios_set_ss(crtc, 0); + /* always set DCPLL */ + if (ASIC_IS_DCE4(rdev)) + atombios_crtc_set_dcpll(crtc); atombios_crtc_set_pll(crtc, adjusted_mode); atombios_set_ss(crtc, 1); - atombios_crtc_set_timing(crtc, adjusted_mode); - if (ASIC_IS_AVIVO(rdev)) - atombios_crtc_set_base(crtc, x, y, old_fb); + if (ASIC_IS_DCE4(rdev)) + atombios_set_crtc_dtd_timing(crtc, adjusted_mode); + else if (ASIC_IS_AVIVO(rdev)) + atombios_crtc_set_timing(crtc, adjusted_mode); else { + atombios_crtc_set_timing(crtc, adjusted_mode); if (radeon_crtc->crtc_id == 0) atombios_set_crtc_dtd_timing(crtc, adjusted_mode); - atombios_crtc_set_base(crtc, x, y, old_fb); radeon_legacy_atom_fixup(crtc); } + atombios_crtc_set_base(crtc, x, y, old_fb); atombios_overscan_setup(crtc, mode, adjusted_mode); atombios_scaler_setup(crtc); return 0; @@ -825,14 +1120,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { - atombios_lock_crtc(crtc, 1); + atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) { atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); - atombios_lock_crtc(crtc, 0); + atombios_lock_crtc(crtc, ATOM_DISABLE); } static const struct drm_crtc_helper_funcs atombios_helper_funcs = { @@ -848,8 +1143,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { void radeon_atombios_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc) { - if (radeon_crtc->crtc_id == 1) - radeon_crtc->crtc_offset = - AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; + struct radeon_device *rdev = dev->dev_private; + + if (ASIC_IS_DCE4(rdev)) { + switch (radeon_crtc->crtc_id) { + case 0: + default: + radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET; + break; + case 1: + radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET; + break; + case 2: + radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET; + break; + case 3: + radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET; + break; + case 4: + radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET; + break; + case 5: + radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET; + break; + } + } else { + if (radeon_crtc->crtc_id == 1) + radeon_crtc->crtc_offset = + AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; + else + radeon_crtc->crtc_offset = 0; + } + radeon_crtc->pll_id = -1; drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); } diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 99915a682d5..8a133bda00a 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE], train_set[lane] = v | p; } +union aux_channel_transaction { + PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; + PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; +}; /* radeon aux chan functions */ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, @@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; - PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args; + union aux_channel_transaction args; int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction); unsigned char *base; int retry_count = 0; @@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes, retry: memcpy(base, req_bytes, num_bytes); - args.lpAuxRequest = 0; - args.lpDataOut = 16; - args.ucDataOutLen = 0; - args.ucChannelID = chan->rec.i2c_id; - args.ucDelay = delay / 10; + args.v1.lpAuxRequest = 0; + args.v1.lpDataOut = 16; + args.v1.ucDataOutLen = 0; + args.v1.ucChannelID = chan->rec.i2c_id; + args.v1.ucDelay = delay / 10; + if (ASIC_IS_DCE4(rdev)) + args.v2.ucHPD_ID = chan->rec.hpd_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); - if (args.ucReplyStatus && !args.ucDataOutLen) { - if (args.ucReplyStatus == 0x20 && retry_count++ < 10) + if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) { + if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10) goto retry; DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n", req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3], - chan->rec.i2c_id, args.ucReplyStatus, retry_count); + chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count); return false; } - if (args.ucDataOutLen && read_byte && read_buf_len) { - if (read_buf_len < args.ucDataOutLen) { + if (args.v1.ucDataOutLen && read_byte && read_buf_len) { + if (read_buf_len < args.v1.ucDataOutLen) { DRM_ERROR("Buffer to small for return answer %d %d\n", - read_buf_len, args.ucDataOutLen); + read_buf_len, args.v1.ucDataOutLen); return false; } { - int len = min(read_buf_len, args.ucDataOutLen); + int len = min(read_buf_len, args.v1.ucDataOutLen); memcpy(read_byte, base + 16, len); } } @@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder, dp_set_link_bw_lanes(radeon_connector, link_configuration); /* disable downspread on the sink */ dp_set_downspread(radeon_connector, 0); - /* start training on the source */ - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, - dig_connector->dp_clock, enc_id, 0); - /* set training pattern 1 on the source */ - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, - dig_connector->dp_clock, enc_id, 0); + if (ASIC_IS_DCE4(rdev)) { + /* start training on the source */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START); + /* set training pattern 1 on the source */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1); + } else { + /* start training on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START, + dig_connector->dp_clock, enc_id, 0); + /* set training pattern 1 on the source */ + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, + dig_connector->dp_clock, enc_id, 0); + } /* set initial vs/emph */ memset(train_set, 0, 4); @@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder, /* set training pattern 2 on the sink */ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2); /* set training pattern 2 on the source */ - radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, - dig_connector->dp_clock, enc_id, 1); + if (ASIC_IS_DCE4(rdev)) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2); + else + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL, + dig_connector->dp_clock, enc_id, 1); /* channel equalization loop */ tries = 0; @@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder, >> DP_TRAIN_PRE_EMPHASIS_SHIFT); /* disable the training pattern on the sink */ - dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE); + if (ASIC_IS_DCE4(rdev)) + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE); + else + radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, + dig_connector->dp_clock, enc_id, 0); radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE, dig_connector->dp_clock, enc_id, 0); diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h index d4e6e6e4a93..3c391e7e9fd 100644 --- a/drivers/gpu/drm/radeon/avivod.h +++ b/drivers/gpu/drm/radeon/avivod.h @@ -30,11 +30,13 @@ #define D1CRTC_CONTROL 0x6080 #define CRTC_EN (1 << 0) +#define D1CRTC_STATUS 0x609c #define D1CRTC_UPDATE_LOCK 0x60E8 #define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110 #define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118 #define D2CRTC_CONTROL 0x6880 +#define D2CRTC_STATUS 0x689c #define D2CRTC_UPDATE_LOCK 0x68E8 #define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910 #define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c new file mode 100644 index 00000000000..bd2e7aa85c1 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -0,0 +1,767 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#include <linux/firmware.h> +#include <linux/platform_device.h> +#include "drmP.h" +#include "radeon.h" +#include "radeon_drm.h" +#include "rv770d.h" +#include "atom.h" +#include "avivod.h" +#include "evergreen_reg.h" + +static void evergreen_gpu_init(struct radeon_device *rdev); +void evergreen_fini(struct radeon_device *rdev); + +bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) +{ + bool connected = false; + /* XXX */ + return connected; +} + +void evergreen_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd) +{ + /* XXX */ +} + +void evergreen_hpd_init(struct radeon_device *rdev) +{ + /* XXX */ +} + + +void evergreen_bandwidth_update(struct radeon_device *rdev) +{ + /* XXX */ +} + +void evergreen_hpd_fini(struct radeon_device *rdev) +{ + /* XXX */ +} + +static int evergreen_mc_wait_for_idle(struct radeon_device *rdev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < rdev->usec_timeout; i++) { + /* read MC_STATUS */ + tmp = RREG32(SRBM_STATUS) & 0x1F00; + if (!tmp) + return 0; + udelay(1); + } + return -1; +} + +/* + * GART + */ +int evergreen_pcie_gart_enable(struct radeon_device *rdev) +{ + u32 tmp; + int r, i; + + if (rdev->gart.table.vram.robj == NULL) { + dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; + } + r = radeon_gart_table_vram_pin(rdev); + if (r) + return r; + radeon_gart_restore(rdev); + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | + ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | + EFFECTIVE_L2_QUEUE_SIZE(7)); + WREG32(VM_L2_CNTL2, 0); + WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); + /* Setup TLB control */ + tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | + SYSTEM_ACCESS_MODE_NOT_IN_SYS | + SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | + EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); + WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); + WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); + WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, + (u32)(rdev->dummy_page.addr >> 12)); + for (i = 1; i < 7; i++) + WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + + r600_pcie_gart_tlb_flush(rdev); + rdev->gart.ready = true; + return 0; +} + +void evergreen_pcie_gart_disable(struct radeon_device *rdev) +{ + u32 tmp; + int i, r; + + /* Disable all tables */ + for (i = 0; i < 7; i++) + WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); + + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING | + EFFECTIVE_L2_QUEUE_SIZE(7)); + WREG32(VM_L2_CNTL2, 0); + WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); + /* Setup TLB control */ + tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); + if (rdev->gart.table.vram.robj) { + r = radeon_bo_reserve(rdev->gart.table.vram.robj, false); + if (likely(r == 0)) { + radeon_bo_kunmap(rdev->gart.table.vram.robj); + radeon_bo_unpin(rdev->gart.table.vram.robj); + radeon_bo_unreserve(rdev->gart.table.vram.robj); + } + } +} + +void evergreen_pcie_gart_fini(struct radeon_device *rdev) +{ + evergreen_pcie_gart_disable(rdev); + radeon_gart_table_vram_free(rdev); + radeon_gart_fini(rdev); +} + + +void evergreen_agp_enable(struct radeon_device *rdev) +{ + u32 tmp; + int i; + + /* Setup L2 cache */ + WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | + ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | + EFFECTIVE_L2_QUEUE_SIZE(7)); + WREG32(VM_L2_CNTL2, 0); + WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); + /* Setup TLB control */ + tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | + SYSTEM_ACCESS_MODE_NOT_IN_SYS | + SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | + EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); + WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); + WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); + for (i = 0; i < 7; i++) + WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); +} + +static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) +{ + save->vga_control[0] = RREG32(D1VGA_CONTROL); + save->vga_control[1] = RREG32(D2VGA_CONTROL); + save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); + save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); + save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); + save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); + save->vga_render_control = RREG32(VGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); + save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); + save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); + save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); + save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); + save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); + save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + + /* Stop all video */ + WREG32(VGA_RENDER_CONTROL, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + + WREG32(D1VGA_CONTROL, 0); + WREG32(D2VGA_CONTROL, 0); + WREG32(EVERGREEN_D3VGA_CONTROL, 0); + WREG32(EVERGREEN_D4VGA_CONTROL, 0); + WREG32(EVERGREEN_D5VGA_CONTROL, 0); + WREG32(EVERGREEN_D6VGA_CONTROL, 0); +} + +static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) +{ + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, + upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, + (u32)rdev->mc.vram_start); + + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); + /* Unlock host access */ + WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + /* Restore video state */ + WREG32(D1VGA_CONTROL, save->vga_control[0]); + WREG32(D2VGA_CONTROL, save->vga_control[1]); + WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); + WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); + WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); + WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); + WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); + WREG32(VGA_RENDER_CONTROL, save->vga_render_control); +} + +static void evergreen_mc_program(struct radeon_device *rdev) +{ + struct evergreen_mc_save save; + u32 tmp; + int i, j; + + /* Initialize HDP */ + for (i = 0, j = 0; i < 32; i++, j += 0x18) { + WREG32((0x2c14 + j), 0x00000000); + WREG32((0x2c18 + j), 0x00000000); + WREG32((0x2c1c + j), 0x00000000); + WREG32((0x2c20 + j), 0x00000000); + WREG32((0x2c24 + j), 0x00000000); + } + WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); + + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + /* Lockout access through VGA aperture*/ + WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); + /* Update configuration */ + if (rdev->flags & RADEON_IS_AGP) { + if (rdev->mc.vram_start < rdev->mc.gtt_start) { + /* VRAM before AGP */ + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.gtt_end >> 12); + } else { + /* VRAM after AGP */ + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.gtt_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.vram_end >> 12); + } + } else { + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, + rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, + rdev->mc.vram_end >> 12); + } + WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); + tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); + WREG32(MC_VM_FB_LOCATION, tmp); + WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); + WREG32(HDP_NONSURFACE_INFO, (2 << 7)); + WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); + if (rdev->flags & RADEON_IS_AGP) { + WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); + WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); + WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); + } else { + WREG32(MC_VM_AGP_BASE, 0); + WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); + WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); + } + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + evergreen_mc_resume(rdev, &save); + /* we need to own VRAM, so turn off the VGA renderer here + * to stop it overwriting our objects */ + rv515_vga_render_disable(rdev); +} + +#if 0 +/* + * CP. + */ +static void evergreen_cp_stop(struct radeon_device *rdev) +{ + /* XXX */ +} + + +static int evergreen_cp_load_microcode(struct radeon_device *rdev) +{ + /* XXX */ + + return 0; +} + + +/* + * Core functions + */ +static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes, + u32 num_backends, + u32 backend_disable_mask) +{ + u32 backend_map = 0; + + return backend_map; +} +#endif + +static void evergreen_gpu_init(struct radeon_device *rdev) +{ + /* XXX */ +} + +int evergreen_mc_init(struct radeon_device *rdev) +{ + fixed20_12 a; + u32 tmp; + int chansize, numchan; + + /* Get VRAM informations */ + rdev->mc.vram_is_ddr = true; + tmp = RREG32(MC_ARB_RAMCFG); + if (tmp & CHANSIZE_OVERRIDE) { + chansize = 16; + } else if (tmp & CHANSIZE_MASK) { + chansize = 64; + } else { + chansize = 32; + } + tmp = RREG32(MC_SHARED_CHMAP); + switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { + case 0: + default: + numchan = 1; + break; + case 1: + numchan = 2; + break; + case 2: + numchan = 4; + break; + case 3: + numchan = 8; + break; + } + rdev->mc.vram_width = numchan * chansize; + /* Could aper size report 0 ? */ + rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); + rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + /* Setup GPU memory space */ + /* size in MB on evergreen */ + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.visible_vram_size = rdev->mc.aper_size; + /* FIXME remove this once we support unmappable VRAM */ + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { + rdev->mc.mc_vram_size = rdev->mc.aper_size; + rdev->mc.real_vram_size = rdev->mc.aper_size; + } + r600_vram_gtt_location(rdev, &rdev->mc); + /* FIXME: we should enforce default clock in case GPU is not in + * default setup + */ + a.full = rfixed_const(100); + rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); + rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); + return 0; +} + +int evergreen_gpu_reset(struct radeon_device *rdev) +{ + /* FIXME: implement for evergreen */ + return 0; +} + +static int evergreen_startup(struct radeon_device *rdev) +{ +#if 0 + int r; + + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { + r = r600_init_microcode(rdev); + if (r) { + DRM_ERROR("Failed to load firmware!\n"); + return r; + } + } +#endif + evergreen_mc_program(rdev); +#if 0 + if (rdev->flags & RADEON_IS_AGP) { + evergreem_agp_enable(rdev); + } else { + r = evergreen_pcie_gart_enable(rdev); + if (r) + return r; + } +#endif + evergreen_gpu_init(rdev); +#if 0 + if (!rdev->r600_blit.shader_obj) { + r = r600_blit_init(rdev); + if (r) { + DRM_ERROR("radeon: failed blitter (%d).\n", r); + return r; + } + } + + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (unlikely(r != 0)) + return r; + r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + if (r) { + DRM_ERROR("failed to pin blit object %d\n", r); + return r; + } + + /* Enable IRQ */ + r = r600_irq_init(rdev); + if (r) { + DRM_ERROR("radeon: IH init failed (%d).\n", r); + radeon_irq_kms_fini(rdev); + return r; + } + r600_irq_set(rdev); + + r = radeon_ring_init(rdev, rdev->cp.ring_size); + if (r) + return r; + r = evergreen_cp_load_microcode(rdev); + if (r) + return r; + r = r600_cp_resume(rdev); + if (r) + return r; + /* write back buffer are not vital so don't worry about failure */ + r600_wb_enable(rdev); +#endif + return 0; +} + +int evergreen_resume(struct radeon_device *rdev) +{ + int r; + + /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, + * posting will perform necessary task to bring back GPU into good + * shape. + */ + /* post card */ + atom_asic_init(rdev->mode_info.atom_context); + /* Initialize clocks */ + r = radeon_clocks_init(rdev); + if (r) { + return r; + } + + r = evergreen_startup(rdev); + if (r) { + DRM_ERROR("r600 startup failed on resume\n"); + return r; + } +#if 0 + r = r600_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + return r; + } +#endif + return r; + +} + +int evergreen_suspend(struct radeon_device *rdev) +{ +#if 0 + int r; + + /* FIXME: we should wait for ring to be empty */ + r700_cp_stop(rdev); + rdev->cp.ready = false; + r600_wb_disable(rdev); + evergreen_pcie_gart_disable(rdev); + /* unpin shaders bo */ + r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); + if (likely(r == 0)) { + radeon_bo_unpin(rdev->r600_blit.shader_obj); + radeon_bo_unreserve(rdev->r600_blit.shader_obj); + } +#endif + return 0; +} + +static bool evergreen_card_posted(struct radeon_device *rdev) +{ + u32 reg; + + /* first check CRTCs */ + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (reg & EVERGREEN_CRTC_MASTER_EN) + return true; + + /* then check MEM_SIZE, in case the crtcs are off */ + if (RREG32(CONFIG_MEMSIZE)) + return true; + + return false; +} + +/* Plan is to move initialization in that function and use + * helper function so that radeon_device_init pretty much + * do nothing more than calling asic specific function. This + * should also allow to remove a bunch of callback function + * like vram_info. + */ +int evergreen_init(struct radeon_device *rdev) +{ + int r; + + r = radeon_dummy_page_init(rdev); + if (r) + return r; + /* This don't do much */ + r = radeon_gem_init(rdev); + if (r) + return r; + /* Read BIOS */ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + /* Must be an ATOMBIOS */ + if (!rdev->is_atom_bios) { + dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); + return -EINVAL; + } + r = radeon_atombios_init(rdev); + if (r) + return r; + /* Post card if necessary */ + if (!evergreen_card_posted(rdev)) { + if (!rdev->bios) { + dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n"); + return -EINVAL; + } + DRM_INFO("GPU not posted. posting now...\n"); + atom_asic_init(rdev->mode_info.atom_context); + } + /* Initialize scratch registers */ + r600_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); + r = radeon_clocks_init(rdev); + if (r) + return r; + /* Initialize power management */ + radeon_pm_init(rdev); + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) + return r; + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) + radeon_agp_disable(rdev); + } + /* initialize memory controller */ + r = evergreen_mc_init(rdev); + if (r) + return r; + /* Memory manager */ + r = radeon_bo_init(rdev); + if (r) + return r; +#if 0 + r = radeon_irq_kms_init(rdev); + if (r) + return r; + + rdev->cp.ring_obj = NULL; + r600_ring_init(rdev, 1024 * 1024); + + rdev->ih.ring_obj = NULL; + r600_ih_ring_init(rdev, 64 * 1024); + + r = r600_pcie_gart_init(rdev); + if (r) + return r; +#endif + rdev->accel_working = false; + r = evergreen_startup(rdev); + if (r) { + evergreen_suspend(rdev); + /*r600_wb_fini(rdev);*/ + /*radeon_ring_fini(rdev);*/ + /*evergreen_pcie_gart_fini(rdev);*/ + rdev->accel_working = false; + } + if (rdev->accel_working) { + r = radeon_ib_pool_init(rdev); + if (r) { + DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r); + rdev->accel_working = false; + } + r = r600_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failed testing IB (%d).\n", r); + rdev->accel_working = false; + } + } + return 0; +} + +void evergreen_fini(struct radeon_device *rdev) +{ + evergreen_suspend(rdev); +#if 0 + r600_blit_fini(rdev); + r600_irq_fini(rdev); + radeon_irq_kms_fini(rdev); + radeon_ring_fini(rdev); + r600_wb_fini(rdev); + evergreen_pcie_gart_fini(rdev); +#endif + radeon_gem_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_clocks_fini(rdev); + radeon_agp_fini(rdev); + radeon_bo_fini(rdev); + radeon_atombios_fini(rdev); + kfree(rdev->bios); + rdev->bios = NULL; + radeon_dummy_page_fini(rdev); +} diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h new file mode 100644 index 00000000000..f7c7c964343 --- /dev/null +++ b/drivers/gpu/drm/radeon/evergreen_reg.h @@ -0,0 +1,176 @@ +/* + * Copyright 2010 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ +#ifndef __EVERGREEN_REG_H__ +#define __EVERGREEN_REG_H__ + +/* evergreen */ +#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310 +#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324 +#define EVERGREEN_D3VGA_CONTROL 0x3e0 +#define EVERGREEN_D4VGA_CONTROL 0x3e4 +#define EVERGREEN_D5VGA_CONTROL 0x3e8 +#define EVERGREEN_D6VGA_CONTROL 0x3ec + +#define EVERGREEN_P1PLL_SS_CNTL 0x414 +#define EVERGREEN_P2PLL_SS_CNTL 0x454 +# define EVERGREEN_PxPLL_SS_EN (1 << 12) +/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */ +#define EVERGREEN_GRPH_ENABLE 0x6800 +#define EVERGREEN_GRPH_CONTROL 0x6804 +# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0) +# define EVERGREEN_GRPH_DEPTH_8BPP 0 +# define EVERGREEN_GRPH_DEPTH_16BPP 1 +# define EVERGREEN_GRPH_DEPTH_32BPP 2 +# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) +/* 8 BPP */ +# define EVERGREEN_GRPH_FORMAT_INDEXED 0 +/* 16 BPP */ +# define EVERGREEN_GRPH_FORMAT_ARGB1555 0 +# define EVERGREEN_GRPH_FORMAT_ARGB565 1 +# define EVERGREEN_GRPH_FORMAT_ARGB4444 2 +# define EVERGREEN_GRPH_FORMAT_AI88 3 +# define EVERGREEN_GRPH_FORMAT_MONO16 4 +# define EVERGREEN_GRPH_FORMAT_BGRA5551 5 +/* 32 BPP */ +# define EVERGREEN_GRPH_FORMAT_ARGB8888 0 +# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1 +# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2 +# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3 +# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4 +# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 +# define EVERGREEN_GRPH_FORMAT_RGB111110 6 +# define EVERGREEN_GRPH_FORMAT_BGR101111 7 +#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c +# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0) +# define EVERGREEN_GRPH_ENDIAN_NONE 0 +# define EVERGREEN_GRPH_ENDIAN_8IN16 1 +# define EVERGREEN_GRPH_ENDIAN_8IN32 2 +# define EVERGREEN_GRPH_ENDIAN_8IN64 3 +# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4) +# define EVERGREEN_GRPH_RED_SEL_R 0 +# define EVERGREEN_GRPH_RED_SEL_G 1 +# define EVERGREEN_GRPH_RED_SEL_B 2 +# define EVERGREEN_GRPH_RED_SEL_A 3 +# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6) +# define EVERGREEN_GRPH_GREEN_SEL_G 0 +# define EVERGREEN_GRPH_GREEN_SEL_B 1 +# define EVERGREEN_GRPH_GREEN_SEL_A 2 +# define EVERGREEN_GRPH_GREEN_SEL_R 3 +# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8) +# define EVERGREEN_GRPH_BLUE_SEL_B 0 +# define EVERGREEN_GRPH_BLUE_SEL_A 1 +# define EVERGREEN_GRPH_BLUE_SEL_R 2 +# define EVERGREEN_GRPH_BLUE_SEL_G 3 +# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10) +# define EVERGREEN_GRPH_ALPHA_SEL_A 0 +# define EVERGREEN_GRPH_ALPHA_SEL_R 1 +# define EVERGREEN_GRPH_ALPHA_SEL_G 2 +# define EVERGREEN_GRPH_ALPHA_SEL_B 3 +#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810 +#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814 +# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0) +# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00 +#define EVERGREEN_GRPH_PITCH 0x6818 +#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c +#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820 +#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824 +#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828 +#define EVERGREEN_GRPH_X_START 0x682c +#define EVERGREEN_GRPH_Y_START 0x6830 +#define EVERGREEN_GRPH_X_END 0x6834 +#define EVERGREEN_GRPH_Y_END 0x6838 + +/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */ +#define EVERGREEN_CUR_CONTROL 0x6998 +# define EVERGREEN_CURSOR_EN (1 << 0) +# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8) +# define EVERGREEN_CURSOR_MONO 0 +# define EVERGREEN_CURSOR_24_1 1 +# define EVERGREEN_CURSOR_24_8_PRE_MULT 2 +# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3 +# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16) +# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20) +# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24) +# define EVERGREEN_CURSOR_URGENT_ALWAYS 0 +# define EVERGREEN_CURSOR_URGENT_1_8 1 +# define EVERGREEN_CURSOR_URGENT_1_4 2 +# define EVERGREEN_CURSOR_URGENT_3_8 3 +# define EVERGREEN_CURSOR_URGENT_1_2 4 +#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c +# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000 +#define EVERGREEN_CUR_SIZE 0x69a0 +#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4 +#define EVERGREEN_CUR_POSITION 0x69a8 +#define EVERGREEN_CUR_HOT_SPOT 0x69ac +#define EVERGREEN_CUR_COLOR1 0x69b0 +#define EVERGREEN_CUR_COLOR2 0x69b4 +#define EVERGREEN_CUR_UPDATE 0x69b8 +# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0) +# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1) +# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16) +# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24) + +/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */ +#define EVERGREEN_DC_LUT_RW_MODE 0x69e0 +#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4 +#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8 +#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec +#define EVERGREEN_DC_LUT_30_COLOR 0x69f0 +#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4 +#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8 +#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc +#define EVERGREEN_DC_LUT_CONTROL 0x6a00 +#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04 +#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08 +#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c +#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10 +#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14 +#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18 + +#define EVERGREEN_DATA_FORMAT 0x6b00 +# define EVERGREEN_INTERLEAVE_EN (1 << 0) +#define EVERGREEN_DESKTOP_HEIGHT 0x6b04 + +#define EVERGREEN_VIEWPORT_START 0x6d70 +#define EVERGREEN_VIEWPORT_SIZE 0x6d74 + +/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */ +#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0) +#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0) +#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0) +#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0) +#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0) +#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0) + +/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */ +#define EVERGREEN_CRTC_CONTROL 0x6e70 +# define EVERGREEN_CRTC_MASTER_EN (1 << 0) +#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 + +#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 +#define EVERGREEN_DC_GPIO_HPD_A 0x64b4 +#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8 +#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc + +#endif diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index c0d4650cdb7..91eb762eb3f 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -197,13 +197,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev) { uint32_t tmp; + radeon_gart_restore(rdev); /* discard memory request outside of configured range */ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; WREG32(RADEON_AIC_CNTL, tmp); /* set address range for PCI address translate */ - WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - WREG32(RADEON_AIC_HI_ADDR, tmp); + WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start); + WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end); /* set PCI GART page-table base address */ WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; @@ -312,9 +312,11 @@ int r100_irq_process(struct radeon_device *rdev) /* Vertical blank interrupts */ if (status & RADEON_CRTC_VBLANK_STAT) { drm_handle_vblank(rdev->ddev, 0); + wake_up(&rdev->irq.vblank_queue); } if (status & RADEON_CRTC2_VBLANK_STAT) { drm_handle_vblank(rdev->ddev, 1); + wake_up(&rdev->irq.vblank_queue); } if (status & RADEON_FP_DETECT_STAT) { queue_hotplug = true; @@ -366,8 +368,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0)); radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL); /* Wait until IDLE & CLEAN */ - radeon_ring_write(rdev, PACKET0(0x1720, 0)); - radeon_ring_write(rdev, (1 << 16) | (1 << 17)); + radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | RADEON_HDP_READ_BUFFER_INVALIDATE); @@ -1701,7 +1703,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev) } for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(RADEON_RBBM_STATUS); - if (!(tmp & (1 << 31))) { + if (!(tmp & RADEON_RBBM_ACTIVE)) { return 0; } DRM_UDELAY(1); @@ -1716,8 +1718,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev) for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ - tmp = RREG32(0x0150); - if (tmp & (1 << 2)) { + tmp = RREG32(RADEON_MC_STATUS); + if (tmp & RADEON_MC_IDLE) { return 0; } DRM_UDELAY(1); @@ -1790,7 +1792,7 @@ int r100_gpu_reset(struct radeon_device *rdev) } /* Check if GPU is idle */ status = RREG32(RADEON_RBBM_STATUS); - if (status & (1 << 31)) { + if (status & RADEON_RBBM_ACTIVE) { DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); return -1; } @@ -1800,6 +1802,9 @@ int r100_gpu_reset(struct radeon_device *rdev) void r100_set_common_regs(struct radeon_device *rdev) { + struct drm_device *dev = rdev->ddev; + bool force_dac2 = false; + /* set these so they don't interfere with anything */ WREG32(RADEON_OV0_SCALE_CNTL, 0); WREG32(RADEON_SUBPIC_CNTL, 0); @@ -1808,6 +1813,68 @@ void r100_set_common_regs(struct radeon_device *rdev) WREG32(RADEON_DVI_I2C_CNTL_1, 0); WREG32(RADEON_CAP0_TRIG_CNTL, 0); WREG32(RADEON_CAP1_TRIG_CNTL, 0); + + /* always set up dac2 on rn50 and some rv100 as lots + * of servers seem to wire it up to a VGA port but + * don't report it in the bios connector + * table. + */ + switch (dev->pdev->device) { + /* RN50 */ + case 0x515e: + case 0x5969: + force_dac2 = true; + break; + /* RV100*/ + case 0x5159: + case 0x515a: + /* DELL triple head servers */ + if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && + ((dev->pdev->subsystem_device == 0x016c) || + (dev->pdev->subsystem_device == 0x016d) || + (dev->pdev->subsystem_device == 0x016e) || + (dev->pdev->subsystem_device == 0x016f) || + (dev->pdev->subsystem_device == 0x0170) || + (dev->pdev->subsystem_device == 0x017d) || + (dev->pdev->subsystem_device == 0x017e) || + (dev->pdev->subsystem_device == 0x0183) || + (dev->pdev->subsystem_device == 0x018a) || + (dev->pdev->subsystem_device == 0x019a))) + force_dac2 = true; + break; + } + + if (force_dac2) { + u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); + u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); + u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2); + + /* For CRT on DAC2, don't turn it on if BIOS didn't + enable it, even it's detected. + */ + + /* force it to crtc0 */ + dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; + dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; + disp_hw_debug |= RADEON_CRT2_DISP1_SEL; + + /* set up the TV DAC */ + tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL | + RADEON_TV_DAC_STD_MASK | + RADEON_TV_DAC_RDACPD | + RADEON_TV_DAC_GDACPD | + RADEON_TV_DAC_BDACPD | + RADEON_TV_DAC_BGADJ_MASK | + RADEON_TV_DAC_DACADJ_MASK); + tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | + RADEON_TV_DAC_NHOLD | + RADEON_TV_DAC_STD_PS2 | + (0x58 << 16)); + + WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); + WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); + WREG32(RADEON_DAC_CNTL2, dac2_cntl); + } } /* @@ -1889,17 +1956,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev) void r100_vram_init_sizes(struct radeon_device *rdev) { u64 config_aper_size; - u32 accessible; + /* work out accessible VRAM */ + rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); + rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); + rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev); + /* FIXME we don't use the second aperture yet when we could use it */ + if (rdev->mc.visible_vram_size > rdev->mc.aper_size) + rdev->mc.visible_vram_size = rdev->mc.aper_size; config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); - if (rdev->flags & RADEON_IS_IGP) { uint32_t tom; /* read NB_TOM to get the amount of ram stolen for the GPU */ tom = RREG32(RADEON_NB_TOM); rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); - /* for IGPs we need to keep VRAM where it was put by the BIOS */ - rdev->mc.vram_location = (tom & 0xffff) << 16; WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } else { @@ -1911,30 +1981,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev) rdev->mc.real_vram_size = 8192 * 1024; WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); } - /* let driver place VRAM */ - rdev->mc.vram_location = 0xFFFFFFFFUL; - /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - - * Novell bug 204882 + along with lots of ubuntu ones */ + /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - + * Novell bug 204882 + along with lots of ubuntu ones + */ if (config_aper_size > rdev->mc.real_vram_size) rdev->mc.mc_vram_size = config_aper_size; else rdev->mc.mc_vram_size = rdev->mc.real_vram_size; } - - /* work out accessible VRAM */ - accessible = r100_get_accessible_vram(rdev); - - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); - - if (accessible > rdev->mc.aper_size) - accessible = rdev->mc.aper_size; - - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) + /* FIXME remove this once we support unmappable VRAM */ + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { rdev->mc.mc_vram_size = rdev->mc.aper_size; - - if (rdev->mc.real_vram_size > rdev->mc.aper_size) rdev->mc.real_vram_size = rdev->mc.aper_size; + } } void r100_vga_set_state(struct radeon_device *rdev, bool state) @@ -1951,11 +2010,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state) WREG32(RADEON_CONFIG_CNTL, temp); } -void r100_vram_info(struct radeon_device *rdev) +void r100_mc_init(struct radeon_device *rdev) { - r100_vram_get_type(rdev); + u64 base; + r100_vram_get_type(rdev); r100_vram_init_sizes(rdev); + base = rdev->mc.aper_base; + if (rdev->flags & RADEON_IS_IGP) + base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; + radeon_vram_location(rdev, &rdev->mc, base); + if (!(rdev->flags & RADEON_IS_AGP)) + radeon_gtt_location(rdev, &rdev->mc); } @@ -3226,10 +3292,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) { /* Update base address for crtc */ - WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); + WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start); if (!(rdev->flags & RADEON_SINGLE_CRTC)) { - WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, - rdev->mc.vram_location); + WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start); } /* Restore CRTC registers */ WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); @@ -3390,32 +3455,6 @@ void r100_fini(struct radeon_device *rdev) rdev->bios = NULL; } -int r100_mc_init(struct radeon_device *rdev) -{ - int r; - u32 tmp; - - /* Setup GPU memory space */ - rdev->mc.vram_location = 0xFFFFFFFFUL; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - if (rdev->flags & RADEON_IS_IGP) { - tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); - rdev->mc.vram_location = tmp << 16; - } - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); - if (r) { - radeon_agp_disable(rdev); - } else { - rdev->mc.gtt_location = rdev->mc.agp_base; - } - } - r = radeon_mc_setup(rdev); - if (r) - return r; - return 0; -} - int r100_init(struct radeon_device *rdev) { int r; @@ -3458,12 +3497,15 @@ int r100_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - r100_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r100_mc_init(rdev); - if (r) - return r; + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + radeon_agp_disable(rdev); + } + } + /* initialize VRAM */ + r100_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index ff1e0cd608b..1146c9909c2 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -31,6 +31,7 @@ #include "radeon_reg.h" #include "radeon.h" +#include "r100d.h" #include "r200_reg_safe.h" #include "r100_track.h" @@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0) return vtx_size; } +int r200_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_pages, + struct radeon_fence *fence) +{ + uint32_t size; + uint32_t cur_size; + int i, num_loops; + int r = 0; + + /* radeon pitch is /64 */ + size = num_pages << PAGE_SHIFT; + num_loops = DIV_ROUND_UP(size, 0x1FFFFF); + r = radeon_ring_lock(rdev, num_loops * 4 + 64); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + /* Must wait for 2D idle & clean before DMA or hangs might happen */ + radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, (1 << 16)); + for (i = 0; i < num_loops; i++) { + cur_size = size; + if (cur_size > 0x1FFFFF) { + cur_size = 0x1FFFFF; + } + size -= cur_size; + radeon_ring_write(rdev, PACKET0(0x720, 2)); + radeon_ring_write(rdev, src_offset); + radeon_ring_write(rdev, dst_offset); + radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30)); + src_offset += cur_size; + dst_offset += cur_size; + } + radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE); + if (fence) { + r = radeon_fence_emit(rdev, fence); + } + radeon_ring_unlock_commit(rdev); + return r; +} + + static int r200_get_vtx_size_1(uint32_t vtx_fmt_1) { int vtx_size, i, tex_size; diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 43b55a030b4..4cef90cd74e 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; + radeon_gart_restore(rdev); /* discard memory request outside of configured range */ tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); - WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE; + WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); + tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); table_addr = rdev->gart.table_addr; WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); /* FIXME: setup default page */ - WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); + WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); /* Clear error */ WREG32_PCIE(0x18, 0); @@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev, /* Who ever call radeon_fence_emit should call ring_lock and ask * for enough space (today caller are ib schedule and buffer move) */ /* Write SC register so SC & US assert idle */ - radeon_ring_write(rdev, PACKET0(0x43E0, 0)); + radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0)); radeon_ring_write(rdev, 0); - radeon_ring_write(rdev, PACKET0(0x43E4, 0)); + radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0)); radeon_ring_write(rdev, 0); /* Flush 3D cache */ - radeon_ring_write(rdev, PACKET0(0x4E4C, 0)); - radeon_ring_write(rdev, (2 << 0)); - radeon_ring_write(rdev, PACKET0(0x4F18, 0)); - radeon_ring_write(rdev, (1 << 0)); + radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, R300_RB3D_DC_FLUSH); + radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); + radeon_ring_write(rdev, R300_ZC_FLUSH); /* Wait until IDLE & CLEAN */ - radeon_ring_write(rdev, PACKET0(0x1720, 0)); - radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); + radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); + radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN | + RADEON_WAIT_2D_IDLECLEAN | + RADEON_WAIT_DMA_GUI_IDLE)); radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); radeon_ring_write(rdev, rdev->config.r300.hdp_cntl | RADEON_HDP_READ_BUFFER_INVALIDATE); @@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } -int r300_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence) -{ - uint32_t size; - uint32_t cur_size; - int i, num_loops; - int r = 0; - - /* radeon pitch is /64 */ - size = num_pages << PAGE_SHIFT; - num_loops = DIV_ROUND_UP(size, 0x1FFFFF); - r = radeon_ring_lock(rdev, num_loops * 4 + 64); - if (r) { - DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; - } - /* Must wait for 2D idle & clean before DMA or hangs might happen */ - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 )); - radeon_ring_write(rdev, (1 << 16)); - for (i = 0; i < num_loops; i++) { - cur_size = size; - if (cur_size > 0x1FFFFF) { - cur_size = 0x1FFFFF; - } - size -= cur_size; - radeon_ring_write(rdev, PACKET0(0x720, 2)); - radeon_ring_write(rdev, src_offset); - radeon_ring_write(rdev, dst_offset); - radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30)); - src_offset += cur_size; - dst_offset += cur_size; - } - radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); - radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE); - if (fence) { - r = radeon_fence_emit(rdev, fence); - } - radeon_ring_unlock_commit(rdev); - return r; -} - void r300_ring_start(struct radeon_device *rdev) { unsigned gb_tile_config; @@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev) radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN); - radeon_ring_write(rdev, PACKET0(0x170C, 0)); - radeon_ring_write(rdev, 1 << 31); + radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0)); + radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG); radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); @@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev) for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ - tmp = RREG32(0x0150); - if (tmp & (1 << 4)) { + tmp = RREG32(RADEON_MC_STATUS); + if (tmp & R300_MC_IDLE) { return 0; } DRM_UDELAY(1); @@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } - tmp = RREG32(0x170C); - WREG32(0x170C, tmp | (1 << 31)); + tmp = RREG32(R300_DST_PIPE_CONFIG); + WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); WREG32(R300_RB2D_DSTCACHE_MODE, R300_DC_AUTOFLUSH_ENABLE | @@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev) /* GA still busy soft reset it */ WREG32(0x429C, 0x200); WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); - WREG32(0x43E0, 0); - WREG32(0x43E4, 0); + WREG32(R300_RE_SCISSORS_TL, 0); + WREG32(R300_RE_SCISSORS_BR, 0); WREG32(0x24AC, 0); } /* Wait to prevent race in RBBM_STATUS */ @@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev) } /* Check if GPU is idle */ status = RREG32(RADEON_RBBM_STATUS); - if (status & (1 << 31)) { + if (status & RADEON_RBBM_ACTIVE) { DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); return -1; } @@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev) /* * r300,r350,rv350,rv380 VRAM info */ -void r300_vram_info(struct radeon_device *rdev) +void r300_mc_init(struct radeon_device *rdev) { - uint32_t tmp; + u64 base; + u32 tmp; /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; - tmp = RREG32(RADEON_MEM_CNTL); tmp &= R300_MEM_NUM_CHANNELS_MASK; switch (tmp) { @@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev) case 2: rdev->mc.vram_width = 256; break; default: rdev->mc.vram_width = 128; break; } - r100_vram_init_sizes(rdev); + base = rdev->mc.aper_base; + if (rdev->flags & RADEON_IS_IGP) + base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; + radeon_vram_location(rdev, &rdev->mc, base); + if (!(rdev->flags & RADEON_IS_AGP)) + radeon_gtt_location(rdev, &rdev->mc); } void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) @@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) } +int rv370_get_pcie_lanes(struct radeon_device *rdev) +{ + u32 link_width_cntl; + + if (rdev->flags & RADEON_IS_IGP) + return 0; + + if (!(rdev->flags & RADEON_IS_PCIE)) + return 0; + + /* FIXME wait for idle */ + + if (rdev->family < CHIP_R600) + link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); + else + link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL); + + switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { + case RADEON_PCIE_LC_LINK_WIDTH_X0: + return 0; + case RADEON_PCIE_LC_LINK_WIDTH_X1: + return 1; + case RADEON_PCIE_LC_LINK_WIDTH_X2: + return 2; + case RADEON_PCIE_LC_LINK_WIDTH_X4: + return 4; + case RADEON_PCIE_LC_LINK_WIDTH_X8: + return 8; + case RADEON_PCIE_LC_LINK_WIDTH_X16: + default: + return 16; + } +} + #if defined(CONFIG_DEBUG_FS) static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) { @@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, tile_flags |= R300_TXO_MACRO_TILE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_TXO_MICRO_TILE; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_TXO_MICRO_TILE_SQUARE; tmp = idx_value + ((u32)reloc->lobj.gpu_offset); tmp |= tile_flags; @@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p, tile_flags |= R300_COLOR_TILE_ENABLE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) tile_flags |= R300_COLOR_MICROTILE_ENABLE; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; tmp = idx_value & ~(0x7 << 16); tmp |= tile_flags; @@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p, if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) tile_flags |= R300_DEPTHMACROTILE_ENABLE; if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) - tile_flags |= R300_DEPTHMICROTILE_TILED;; + tile_flags |= R300_DEPTHMICROTILE_TILED; + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) + tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; tmp = idx_value & ~(0x7 << 16); tmp |= tile_flags; @@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - r300_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) - return r; + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + radeon_agp_disable(rdev); + } + } + /* initialize memory controller */ + r300_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 34bffa0e4b7..ea46d558e8f 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -33,6 +33,7 @@ #include "drmP.h" #include "drm.h" +#include "drm_buffer.h" #include "radeon_drm.h" #include "radeon_drv.h" #include "r300_reg.h" @@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * int reg; int sz; int i; - int values[64]; + u32 *value; RING_LOCALS; sz = header.packet0.count; reg = (header.packet0.reghi << 8) | header.packet0.reglo; if ((sz > 64) || (sz < 0)) { - DRM_ERROR - ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", - reg, sz); + DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", + reg, sz); return -EINVAL; } + for (i = 0; i < sz; i++) { - values[i] = ((int *)cmdbuf->buf)[i]; switch (r300_reg_flags[(reg >> 2) + i]) { case MARK_SAFE: break; case MARK_CHECK_OFFSET: - if (!radeon_check_offset(dev_priv, (u32) values[i])) { - DRM_ERROR - ("Offset failed range check (reg=%04x sz=%d)\n", - reg, sz); + value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); + if (!radeon_check_offset(dev_priv, *value)) { + DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", + reg, sz); return -EINVAL; } break; default: DRM_ERROR("Register %04x failed check as flag=%02x\n", - reg + i * 4, r300_reg_flags[(reg >> 2) + i]); + reg + i * 4, r300_reg_flags[(reg >> 2) + i]); return -EINVAL; } } BEGIN_RING(1 + sz); OUT_RING(CP_PACKET0(reg, sz - 1)); - OUT_RING_TABLE(values, sz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); - cmdbuf->buf += sz * 4; - cmdbuf->bufsz -= sz * 4; - return 0; } @@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, if (!sz) return 0; - if (sz * 4 > cmdbuf->bufsz) + if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) return -EINVAL; if (reg + sz * 4 >= 0x10000) { @@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv, BEGIN_RING(1 + sz); OUT_RING(CP_PACKET0(reg, sz - 1)); - OUT_RING_TABLE((int *)cmdbuf->buf, sz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); - cmdbuf->buf += sz * 4; - cmdbuf->bufsz -= sz * 4; - return 0; } @@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, if (!sz) return 0; - if (sz * 16 > cmdbuf->bufsz) + if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer)) return -EINVAL; /* VAP is very sensitive so we purge cache before we program it @@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, BEGIN_RING(3 + sz * 4); OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr); OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1)); - OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4); ADVANCE_RING(); BEGIN_RING(2); @@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv, OUT_RING(0); ADVANCE_RING(); - cmdbuf->buf += sz * 16; - cmdbuf->bufsz -= sz * 16; - return 0; } @@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, { RING_LOCALS; - if (8 * 4 > cmdbuf->bufsz) + if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) return -EINVAL; BEGIN_RING(10); OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8)); OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING | (1 << R300_PRIM_NUM_VERTICES_SHIFT)); - OUT_RING_TABLE((int *)cmdbuf->buf, 8); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8); ADVANCE_RING(); BEGIN_RING(4); @@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv, /* set flush flag */ dev_priv->track_flush |= RADEON_FLUSH_EMITED; - cmdbuf->buf += 8 * 4; - cmdbuf->bufsz -= 8 * 4; - return 0; } @@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, { int count, i, k; #define MAX_ARRAY_PACKET 64 - u32 payload[MAX_ARRAY_PACKET]; + u32 *data; u32 narrays; RING_LOCALS; - count = (header >> 16) & 0x3fff; + count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16; if ((count + 1) > MAX_ARRAY_PACKET) { DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count); return -EINVAL; } - memset(payload, 0, MAX_ARRAY_PACKET * 4); - memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4); - /* carefully check packet contents */ - narrays = payload[0]; + /* We have already read the header so advance the buffer. */ + drm_buffer_advance(cmdbuf->buffer, 4); + + narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); k = 0; i = 1; while ((k < narrays) && (i < (count + 1))) { i++; /* skip attribute field */ - if (!radeon_check_offset(dev_priv, payload[i])) { + data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); + if (!radeon_check_offset(dev_priv, *data)) { DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); @@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, if (k == narrays) break; /* have one more to process, they come in pairs */ - if (!radeon_check_offset(dev_priv, payload[i])) { + data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); + if (!radeon_check_offset(dev_priv, *data)) { DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); @@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, BEGIN_RING(count + 2); OUT_RING(header); - OUT_RING_TABLE(payload, count + 1); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1); ADVANCE_RING(); - cmdbuf->buf += (count + 2) * 4; - cmdbuf->bufsz -= (count + 2) * 4; - return 0; } static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { - u32 *cmd = (u32 *) cmdbuf->buf; + u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); int count, ret; RING_LOCALS; - count=(cmd[0]>>16) & 0x3fff; - if (cmd[0] & 0x8000) { - u32 offset; + count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16; - if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL + if (*cmd & 0x8000) { + u32 offset; + u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); + if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { - offset = cmd[2] << 10; + + u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); + offset = *cmd2 << 10; ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); @@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, } } - if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && - (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { - offset = cmd[3] << 10; + if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && + (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { + u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); + offset = *cmd3 << 10; ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); @@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, } BEGIN_RING(count+2); - OUT_RING(cmd[0]); - OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2); ADVANCE_RING(); - cmdbuf->buf += (count+2)*4; - cmdbuf->bufsz -= (count+2)*4; - return 0; } static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { - u32 *cmd; + u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); + u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); int count; int expected_count; RING_LOCALS; - cmd = (u32 *) cmdbuf->buf; - count = (cmd[0]>>16) & 0x3fff; - expected_count = cmd[1] >> 16; - if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) + count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16; + + expected_count = *cmd1 >> 16; + if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit)) expected_count = (expected_count+1)/2; if (count && count != expected_count) { @@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, } BEGIN_RING(count+2); - OUT_RING(cmd[0]); - OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2); ADVANCE_RING(); - cmdbuf->buf += (count+2)*4; - cmdbuf->bufsz -= (count+2)*4; - if (!count) { - drm_r300_cmd_header_t header; + drm_r300_cmd_header_t stack_header, *header; + u32 *cmd1, *cmd2, *cmd3; - if (cmdbuf->bufsz < 4*4 + sizeof(header)) { + if (drm_buffer_unprocessed(cmdbuf->buffer) + < 4*4 + sizeof(stack_header)) { DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n"); return -EINVAL; } - header.u = *(unsigned int *)cmdbuf->buf; + header = drm_buffer_read_object(cmdbuf->buffer, + sizeof(stack_header), &stack_header); - cmdbuf->buf += sizeof(header); - cmdbuf->bufsz -= sizeof(header); - cmd = (u32 *) cmdbuf->buf; + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); + cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); + cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); + cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); - if (header.header.cmd_type != R300_CMD_PACKET3 || - header.packet3.packet != R300_CMD_PACKET3_RAW || - cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { + if (header->header.cmd_type != R300_CMD_PACKET3 || + header->packet3.packet != R300_CMD_PACKET3_RAW || + *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) { DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n"); return -EINVAL; } - if ((cmd[1] & 0x8000ffff) != 0x80000810) { - DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); + if ((*cmd1 & 0x8000ffff) != 0x80000810) { + DRM_ERROR("Invalid indx_buffer reg address %08X\n", + *cmd1); return -EINVAL; } - if (!radeon_check_offset(dev_priv, cmd[2])) { - DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); + if (!radeon_check_offset(dev_priv, *cmd2)) { + DRM_ERROR("Invalid indx_buffer offset is %08X\n", + *cmd2); return -EINVAL; } - if (cmd[3] != expected_count) { + if (*cmd3 != expected_count) { DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n", - cmd[3], expected_count); + *cmd3, expected_count); return -EINVAL; } BEGIN_RING(4); - OUT_RING(cmd[0]); - OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4); ADVANCE_RING(); - - cmdbuf->buf += 4*4; - cmdbuf->bufsz -= 4*4; } return 0; @@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv, static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf) { - u32 header; + u32 *header; int count; RING_LOCALS; - if (4 > cmdbuf->bufsz) + if (4 > drm_buffer_unprocessed(cmdbuf->buffer)) return -EINVAL; /* Fixme !! This simply emits a packet without much checking. We need to be smarter. */ /* obtain first word - actual packet3 header */ - header = *(u32 *) cmdbuf->buf; + header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); /* Is it packet 3 ? */ - if ((header >> 30) != 0x3) { - DRM_ERROR("Not a packet3 header (0x%08x)\n", header); + if ((*header >> 30) != 0x3) { + DRM_ERROR("Not a packet3 header (0x%08x)\n", *header); return -EINVAL; } - count = (header >> 16) & 0x3fff; + count = (*header >> 16) & 0x3fff; /* Check again now that we know how much data to expect */ - if ((count + 2) * 4 > cmdbuf->bufsz) { + if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) { DRM_ERROR ("Expected packet3 of length %d but have only %d bytes left\n", - (count + 2) * 4, cmdbuf->bufsz); + (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer)); return -EINVAL; } /* Is it a packet type we know about ? */ - switch (header & 0xff00) { + switch (*header & 0xff00) { case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ - return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); + return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header); case RADEON_CNTL_BITBLT_MULTI: return r300_emit_bitblt_multi(dev_priv, cmdbuf); @@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, /* these packets are safe */ break; default: - DRM_ERROR("Unknown packet3 header (0x%08x)\n", header); + DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header); return -EINVAL; } BEGIN_RING(count + 2); - OUT_RING(header); - OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2); ADVANCE_RING(); - cmdbuf->buf += (count + 2) * 4; - cmdbuf->bufsz -= (count + 2) * 4; - return 0; } @@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, { int n; int ret; - char *orig_buf = cmdbuf->buf; - int orig_bufsz = cmdbuf->bufsz; + int orig_iter = cmdbuf->buffer->iterator; /* This is a do-while-loop so that we run the interior at least once, * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale. @@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, if (ret) return ret; - cmdbuf->buf = orig_buf; - cmdbuf->bufsz = orig_bufsz; + cmdbuf->buffer->iterator = orig_iter; } switch (header.packet3.packet) { @@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv, break; default: - DRM_ERROR("bad packet3 type %i at %p\n", + DRM_ERROR("bad packet3 type %i at byte %d\n", header.packet3.packet, - cmdbuf->buf - sizeof(header)); + cmdbuf->buffer->iterator - (int)sizeof(header)); return -EINVAL; } @@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, drm_r300_cmd_header_t header) { u32 *ref_age_base; - u32 i, buf_idx, h_pending; - u64 ptr_addr; + u32 i, *buf_idx, h_pending; + u64 *ptr_addr; + u64 stack_ptr_addr; RING_LOCALS; - if (cmdbuf->bufsz < - (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) { + if (drm_buffer_unprocessed(cmdbuf->buffer) < + (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) { return -EINVAL; } @@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv, dev_priv->scratch_ages[header.scratch.reg]++; - ptr_addr = get_unaligned((u64 *)cmdbuf->buf); - ref_age_base = (u32 *)(unsigned long)ptr_addr; - - cmdbuf->buf += sizeof(u64); - cmdbuf->bufsz -= sizeof(u64); + ptr_addr = drm_buffer_read_object(cmdbuf->buffer, + sizeof(stack_ptr_addr), &stack_ptr_addr); + ref_age_base = (u32 *)(unsigned long)*ptr_addr; for (i=0; i < header.scratch.n_bufs; i++) { - buf_idx = *(u32 *)cmdbuf->buf; - buf_idx *= 2; /* 8 bytes per buf */ + buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); + *buf_idx *= 2; /* 8 bytes per buf */ - if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) { + if (DRM_COPY_TO_USER(ref_age_base + *buf_idx, + &dev_priv->scratch_ages[header.scratch.reg], + sizeof(u32))) return -EINVAL; - } - if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) { + if (DRM_COPY_FROM_USER(&h_pending, + ref_age_base + *buf_idx + 1, + sizeof(u32))) return -EINVAL; - } - if (h_pending == 0) { + if (h_pending == 0) return -EINVAL; - } h_pending--; - if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) { + if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1, + &h_pending, + sizeof(u32))) return -EINVAL; - } - cmdbuf->buf += sizeof(buf_idx); - cmdbuf->bufsz -= sizeof(buf_idx); + drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx)); } BEGIN_RING(2); @@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv, DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type); if (!sz) return 0; - if (sz * stride * 4 > cmdbuf->bufsz) + if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) return -EINVAL; BEGIN_RING(3 + sz * stride); OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr); OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1)); - OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride); ADVANCE_RING(); - cmdbuf->buf += sz * stride * 4; - cmdbuf->bufsz -= sz * stride * 4; - return 0; } @@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, goto cleanup; } - while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) { + while (drm_buffer_unprocessed(cmdbuf->buffer) + >= sizeof(drm_r300_cmd_header_t)) { int idx; - drm_r300_cmd_header_t header; - - header.u = *(unsigned int *)cmdbuf->buf; + drm_r300_cmd_header_t *header, stack_header; - cmdbuf->buf += sizeof(header); - cmdbuf->bufsz -= sizeof(header); + header = drm_buffer_read_object(cmdbuf->buffer, + sizeof(stack_header), &stack_header); - switch (header.header.cmd_type) { + switch (header->header.cmd_type) { case R300_CMD_PACKET0: DRM_DEBUG("R300_CMD_PACKET0\n"); - ret = r300_emit_packet0(dev_priv, cmdbuf, header); + ret = r300_emit_packet0(dev_priv, cmdbuf, *header); if (ret) { DRM_ERROR("r300_emit_packet0 failed\n"); goto cleanup; @@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, case R300_CMD_VPU: DRM_DEBUG("R300_CMD_VPU\n"); - ret = r300_emit_vpu(dev_priv, cmdbuf, header); + ret = r300_emit_vpu(dev_priv, cmdbuf, *header); if (ret) { DRM_ERROR("r300_emit_vpu failed\n"); goto cleanup; @@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, case R300_CMD_PACKET3: DRM_DEBUG("R300_CMD_PACKET3\n"); - ret = r300_emit_packet3(dev_priv, cmdbuf, header); + ret = r300_emit_packet3(dev_priv, cmdbuf, *header); if (ret) { DRM_ERROR("r300_emit_packet3 failed\n"); goto cleanup; @@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, int i; RING_LOCALS; - BEGIN_RING(header.delay.count); - for (i = 0; i < header.delay.count; i++) + BEGIN_RING(header->delay.count); + for (i = 0; i < header->delay.count; i++) OUT_RING(RADEON_CP_PACKET2); ADVANCE_RING(); } @@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, case R300_CMD_DMA_DISCARD: DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); - idx = header.dma.buf_idx; + idx = header->dma.buf_idx; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", idx, dma->buf_count - 1); @@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, case R300_CMD_WAIT: DRM_DEBUG("R300_CMD_WAIT\n"); - r300_cmd_wait(dev_priv, header); + r300_cmd_wait(dev_priv, *header); break; case R300_CMD_SCRATCH: DRM_DEBUG("R300_CMD_SCRATCH\n"); - ret = r300_scratch(dev_priv, cmdbuf, header); + ret = r300_scratch(dev_priv, cmdbuf, *header); if (ret) { DRM_ERROR("r300_scratch failed\n"); goto cleanup; @@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, goto cleanup; } DRM_DEBUG("R300_CMD_R500FP\n"); - ret = r300_emit_r500fp(dev_priv, cmdbuf, header); + ret = r300_emit_r500fp(dev_priv, cmdbuf, *header); if (ret) { DRM_ERROR("r300_emit_r500fp failed\n"); goto cleanup; } break; default: - DRM_ERROR("bad cmd_type %i at %p\n", - header.header.cmd_type, - cmdbuf->buf - sizeof(header)); + DRM_ERROR("bad cmd_type %i at byte %d\n", + header->header.cmd_type, + cmdbuf->buffer->iterator - (int)sizeof(*header)); ret = -EINVAL; goto cleanup; } diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 1735a2b6958..1a0d5362cd7 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h @@ -952,6 +952,7 @@ # define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0) # define R300_TXO_MACRO_TILE (1 << 2) # define R300_TXO_MICRO_TILE (1 << 3) +# define R300_TXO_MICRO_TILE_SQUARE (2 << 3) # define R300_TXO_OFFSET_MASK 0xffffffe0 # define R300_TXO_OFFSET_SHIFT 5 /* END: Guess from R200 */ @@ -1360,6 +1361,7 @@ # define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */ # define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */ # define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */ +# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17) # define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */ # define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */ # define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */ diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index d9373246c97..c7593b8f58e 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev) rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm); } -int r420_mc_init(struct radeon_device *rdev) -{ - int r; - - /* Setup GPU memory space */ - rdev->mc.vram_location = 0xFFFFFFFFUL; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); - if (r) { - radeon_agp_disable(rdev); - } else { - rdev->mc.gtt_location = rdev->mc.agp_base; - } - } - r = radeon_mc_setup(rdev); - if (r) { - return r; - } - return 0; -} - void r420_pipes_init(struct radeon_device *rdev) { unsigned tmp; @@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev) unsigned num_pipes; /* GA_ENHANCE workaround TCL deadlock issue */ - WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); + WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL | + (1 << 2) | (1 << 3)); /* add idle wait as per freedesktop.org bug 24041 */ if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " @@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev) tmp = (7 << 1); break; } - WREG32(0x42C8, (1 << num_pipes) - 1); + WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1); /* Sub pixel 1/12 so we can have 4K rendering according to doc */ - tmp |= (1 << 4) | (1 << 0); - WREG32(0x4018, tmp); + tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING; + WREG32(R300_GB_TILE_CONFIG, tmp); if (r100_gui_wait_for_idle(rdev)) { printk(KERN_WARNING "Failed to wait GUI idle while " "programming pipes. Bad things might happen.\n"); } - tmp = RREG32(0x170C); - WREG32(0x170C, tmp | (1 << 31)); + tmp = RREG32(R300_DST_PIPE_CONFIG); + WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); WREG32(R300_RB2D_DSTCACHE_MODE, RREG32(R300_RB2D_DSTCACHE_MODE) | @@ -348,13 +327,15 @@ int r420_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - r300_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) { - return r; + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + radeon_agp_disable(rdev); + } } + /* initialize memory controller */ + r300_mc_init(rdev); r420_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h index 74ad89bdf2b..0cf2ad2a558 100644 --- a/drivers/gpu/drm/radeon/r500_reg.h +++ b/drivers/gpu/drm/radeon/r500_reg.h @@ -717,54 +717,62 @@ #define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 #define AVIVO_DC_GPIO_HPD_A 0x7e94 - -#define AVIVO_GPIO_0 0x7e30 -#define AVIVO_GPIO_1 0x7e40 -#define AVIVO_GPIO_2 0x7e50 -#define AVIVO_GPIO_3 0x7e60 - #define AVIVO_DC_GPIO_HPD_Y 0x7e9c -#define AVIVO_I2C_STATUS 0x7d30 -# define AVIVO_I2C_STATUS_DONE (1 << 0) -# define AVIVO_I2C_STATUS_NACK (1 << 1) -# define AVIVO_I2C_STATUS_HALT (1 << 2) -# define AVIVO_I2C_STATUS_GO (1 << 3) -# define AVIVO_I2C_STATUS_MASK 0x7 -/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe - * DONE? */ -# define AVIVO_I2C_STATUS_CMD_RESET 0x7 -# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3) -#define AVIVO_I2C_STOP 0x7d34 -#define AVIVO_I2C_START_CNTL 0x7d38 -# define AVIVO_I2C_START (1 << 8) -# define AVIVO_I2C_CONNECTOR0 (0 << 16) -# define AVIVO_I2C_CONNECTOR1 (1 << 16) -#define R520_I2C_START (1<<0) -#define R520_I2C_STOP (1<<1) -#define R520_I2C_RX (1<<2) -#define R520_I2C_EN (1<<8) -#define R520_I2C_DDC1 (0<<16) -#define R520_I2C_DDC2 (1<<16) -#define R520_I2C_DDC3 (2<<16) -#define R520_I2C_DDC_MASK (3<<16) -#define AVIVO_I2C_CONTROL2 0x7d3c -# define AVIVO_I2C_7D3C_SIZE_SHIFT 8 -# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8) -#define AVIVO_I2C_CONTROL3 0x7d40 -/* Reading is done 4 bytes at a time: read the bottom 8 bits from - * 7d44, four times in a row. - * Writing is a little more complex. First write DATA with - * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic - * magic number, zz is, I think, the slave address, and yy is the byte - * you want to write. */ -#define AVIVO_I2C_DATA 0x7d44 -#define R520_I2C_ADDR_COUNT_MASK (0x7) -#define R520_I2C_DATA_COUNT_SHIFT (8) -#define R520_I2C_DATA_COUNT_MASK (0xF00) -#define AVIVO_I2C_CNTL 0x7d50 -# define AVIVO_I2C_EN (1 << 0) -# define AVIVO_I2C_RESET (1 << 8) +#define AVIVO_DC_I2C_STATUS1 0x7d30 +# define AVIVO_DC_I2C_DONE (1 << 0) +# define AVIVO_DC_I2C_NACK (1 << 1) +# define AVIVO_DC_I2C_HALT (1 << 2) +# define AVIVO_DC_I2C_GO (1 << 3) +#define AVIVO_DC_I2C_RESET 0x7d34 +# define AVIVO_DC_I2C_SOFT_RESET (1 << 0) +# define AVIVO_DC_I2C_ABORT (1 << 8) +#define AVIVO_DC_I2C_CONTROL1 0x7d38 +# define AVIVO_DC_I2C_START (1 << 0) +# define AVIVO_DC_I2C_STOP (1 << 1) +# define AVIVO_DC_I2C_RECEIVE (1 << 2) +# define AVIVO_DC_I2C_EN (1 << 8) +# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16) +# define AVIVO_SEL_DDC1 0 +# define AVIVO_SEL_DDC2 1 +# define AVIVO_SEL_DDC3 2 +#define AVIVO_DC_I2C_CONTROL2 0x7d3c +# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0) +# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8) +#define AVIVO_DC_I2C_CONTROL3 0x7d40 +# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0) +# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1) +# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7) +# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8) +# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16) +# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24) +#define AVIVO_DC_I2C_DATA 0x7d44 +#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48 +# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0) +# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8) +# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16) +#define AVIVO_DC_I2C_ARBITRATION 0x7d50 +# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0) +# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1) +# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8) +# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9) +# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16) +# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17) + +#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40 +#define AVIVO_DC_GPIO_DDC1_A 0x7e44 +#define AVIVO_DC_GPIO_DDC1_EN 0x7e48 +#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c + +#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50 +#define AVIVO_DC_GPIO_DDC2_A 0x7e54 +#define AVIVO_DC_GPIO_DDC2_EN 0x7e58 +#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c + +#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60 +#define AVIVO_DC_GPIO_DDC3_A 0x7e64 +#define AVIVO_DC_GPIO_DDC3_EN 0x7e68 +#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c #define AVIVO_DISP_INTERRUPT_STATUS 0x7edc # define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index ddf5731eba0..2b8a5dd1351 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -119,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev) rdev->mc.vram_width *= 2; } -void r520_vram_info(struct radeon_device *rdev) +void r520_mc_init(struct radeon_device *rdev) { fixed20_12 a; r520_vram_get_type(rdev); - r100_vram_init_sizes(rdev); + radeon_vram_location(rdev, &rdev->mc, 0); + if (!(rdev->flags & RADEON_IS_AGP)) + radeon_gtt_location(rdev, &rdev->mc); /* FIXME: we should enforce default clock in case GPU is not in * default setup */ @@ -267,12 +269,15 @@ int r520_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - r520_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) - return r; + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + radeon_agp_disable(rdev); + } + } + /* initialize memory controller */ + r520_mc_init(rdev); rv515_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 2ffcf5a0355..c5229019729 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev) /* * R600 PCIE GART */ -int r600_gart_clear_page(struct radeon_device *rdev, int i) -{ - void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; - u64 pte; - - if (i < 0 || i > rdev->gart.num_gpu_pages) - return -EINVAL; - pte = 0; - writeq(pte, ((void __iomem *)ptr) + (i * 8)); - return 0; -} - void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) { unsigned i; u32 tmp; + /* flush hdp cache so updates hit vram */ + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1); + WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12); WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12); WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1)); @@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; + radeon_gart_restore(rdev); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | @@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev) rv515_vga_render_disable(rdev); } +/** + * r600_vram_gtt_location - try to find VRAM & GTT location + * @rdev: radeon device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * + * Function will place try to place VRAM at same place as in CPU (PCI) + * address space as some GPU seems to have issue when we reprogram at + * different address space. + * + * If there is not enough space to fit the unvisible VRAM after the + * aperture then we limit the VRAM size to the aperture. + * + * If we are using AGP then place VRAM adjacent to AGP aperture are we need + * them to be in one from GPU point of view so that we can program GPU to + * catch access outside them (weird GPU policy see ??). + * + * This function will never fails, worst case are limiting VRAM or GTT. + * + * Note: GTT start, end, size should be initialized before calling this + * function on AGP platform. + */ +void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) +{ + u64 size_bf, size_af; + + if (mc->mc_vram_size > 0xE0000000) { + /* leave room for at least 512M GTT */ + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = 0xE0000000; + mc->mc_vram_size = 0xE0000000; + } + if (rdev->flags & RADEON_IS_AGP) { + size_bf = mc->gtt_start; + size_af = 0xFFFFFFFF - mc->gtt_end + 1; + if (size_bf > size_af) { + if (mc->mc_vram_size > size_bf) { + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = size_bf; + mc->mc_vram_size = size_bf; + } + mc->vram_start = mc->gtt_start - mc->mc_vram_size; + } else { + if (mc->mc_vram_size > size_af) { + dev_warn(rdev->dev, "limiting VRAM\n"); + mc->real_vram_size = size_af; + mc->mc_vram_size = size_af; + } + mc->vram_start = mc->gtt_end; + } + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", + mc->mc_vram_size >> 20, mc->vram_start, + mc->vram_end, mc->real_vram_size >> 20); + } else { + u64 base = 0; + if (rdev->flags & RADEON_IS_IGP) + base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + radeon_vram_location(rdev, &rdev->mc, base); + radeon_gtt_location(rdev, mc); + } +} + int r600_mc_init(struct radeon_device *rdev) { fixed20_12 a; @@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev) /* Setup GPU memory space */ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); - - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) + rdev->mc.visible_vram_size = rdev->mc.aper_size; + /* FIXME remove this once we support unmappable VRAM */ + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { rdev->mc.mc_vram_size = rdev->mc.aper_size; - - if (rdev->mc.real_vram_size > rdev->mc.aper_size) rdev->mc.real_vram_size = rdev->mc.aper_size; - - if (rdev->flags & RADEON_IS_AGP) { - /* gtt_size is setup by radeon_agp_init */ - rdev->mc.gtt_location = rdev->mc.agp_base; - tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; - /* Try to put vram before or after AGP because we - * we want SYSTEM_APERTURE to cover both VRAM and - * AGP so that GPU can catch out of VRAM/AGP access - */ - if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) { - /* Enough place before */ - rdev->mc.vram_location = rdev->mc.gtt_location - - rdev->mc.mc_vram_size; - } else if (tmp > rdev->mc.mc_vram_size) { - /* Enough place after */ - rdev->mc.vram_location = rdev->mc.gtt_location + - rdev->mc.gtt_size; - } else { - /* Try to setup VRAM then AGP might not - * not work on some card - */ - rdev->mc.vram_location = 0x00000000UL; - rdev->mc.gtt_location = rdev->mc.mc_vram_size; - } - } else { - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; - rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) & - 0xFFFF) << 24; - tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; - if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { - /* Enough place after vram */ - rdev->mc.gtt_location = tmp; - } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) { - /* Enough place before vram */ - rdev->mc.gtt_location = 0; - } else { - /* Not enough place after or before shrink - * gart size - */ - if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) { - rdev->mc.gtt_location = 0; - rdev->mc.gtt_size = rdev->mc.vram_location; - } else { - rdev->mc.gtt_location = tmp; - rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp; - } - } - rdev->mc.gtt_location = rdev->mc.mc_vram_size; } - rdev->mc.vram_start = rdev->mc.vram_location; - rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; - rdev->mc.gtt_start = rdev->mc.gtt_location; - rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; + r600_vram_gtt_location(rdev, &rdev->mc); /* FIXME: we should enforce default clock in case GPU is not in * default setup */ a.full = rfixed_const(100); rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); - if (rdev->flags & RADEON_IS_IGP) rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); - return 0; } @@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev) { u32 tiling_config; u32 ramcfg; + u32 backend_map; + u32 cc_rb_backend_disable; + u32 cc_gc_shader_pipe_config; u32 tmp; int i, j; u32 sq_config; @@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev) default: break; } + rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes; + rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); tiling_config |= GROUP_SIZE(0); + rdev->config.r600.tiling_group_size = 256; tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; if (tmp > 3) { tiling_config |= ROW_TILING(3); @@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev) tiling_config |= SAMPLE_SPLIT(tmp); } tiling_config |= BANK_SWAPS(1); - tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, - rdev->config.r600.max_backends, - (0xff << rdev->config.r600.max_backends) & 0xff); - tiling_config |= BACKEND_MAP(tmp); + + cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; + cc_rb_backend_disable |= + BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); + + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; + cc_gc_shader_pipe_config |= + INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); + cc_gc_shader_pipe_config |= + INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); + + backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, + (R6XX_MAX_BACKENDS - + r600_count_pipe_bits((cc_rb_backend_disable & + R6XX_MAX_BACKENDS_MASK) >> 16)), + (cc_rb_backend_disable >> 16)); + + tiling_config |= BACKEND_MAP(backend_map); WREG32(GB_TILING_CONFIG, tiling_config); WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); - tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); - WREG32(CC_RB_BACKEND_DISABLE, tmp); - /* Setup pipes */ - tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); - tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); - WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp); - WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp); + WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); + WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK); + tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); @@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */ + + radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); + radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); + /* wait for 3D idle clean */ + radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); + radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); /* Emit fence sequence & fire IRQ */ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); radeon_ring_write(rdev, fence->seq); - radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); - radeon_ring_write(rdev, 1); /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); radeon_ring_write(rdev, RB_INT_STAT); @@ -2745,6 +2765,7 @@ restart_ih: case 0: /* D1 vblank */ if (disp_int & LB_D1_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 0); + wake_up(&rdev->irq.vblank_queue); disp_int &= ~LB_D1_VBLANK_INTERRUPT; DRM_DEBUG("IH: D1 vblank\n"); } @@ -2765,6 +2786,7 @@ restart_ih: case 0: /* D2 vblank */ if (disp_int & LB_D2_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 1); + wake_up(&rdev->irq.vblank_queue); disp_int &= ~LB_D2_VBLANK_INTERRUPT; DRM_DEBUG("IH: D2 vblank\n"); } diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c index 0dcb6904c4f..db928016d03 100644 --- a/drivers/gpu/drm/radeon/r600_audio.c +++ b/drivers/gpu/drm/radeon/r600_audio.c @@ -35,7 +35,7 @@ */ static int r600_audio_chipset_supported(struct radeon_device *rdev) { - return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710) + return rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740; @@ -147,15 +147,23 @@ static void r600_audio_update_hdmi(unsigned long param) } /* + * turn on/off audio engine + */ +static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable) +{ + DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling"); + WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000); +} + +/* * initialize the audio vars and register the update timer */ int r600_audio_init(struct radeon_device *rdev) { - if (!r600_audio_chipset_supported(rdev)) + if (!radeon_audio || !r600_audio_chipset_supported(rdev)) return 0; - DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling"); - WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000); + r600_audio_engine_enable(rdev, true); rdev->audio_channels = -1; rdev->audio_rate = -1; @@ -258,9 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock) */ void r600_audio_fini(struct radeon_device *rdev) { - if (!r600_audio_chipset_supported(rdev)) + if (!radeon_audio || !r600_audio_chipset_supported(rdev)) return; del_timer(&rdev->audio_timer); - WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000); + + r600_audio_engine_enable(rdev, false); } diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index 5ea43234758..f4fb88ece2b 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c @@ -49,7 +49,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64 RING_LOCALS; DRM_DEBUG("\n"); - h = (h + 7) & ~7; + h = ALIGN(h, 8); if (h < 8) h = 8; diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 446b765ac72..f6c6c77db7e 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -25,7 +25,7 @@ set_render_target(struct radeon_device *rdev, int format, u32 cb_color_info; int pitch, slice; - h = (h + 7) & ~7; + h = ALIGN(h, 8); if (h < 8) h = 8; @@ -396,15 +396,13 @@ set_default_state(struct radeon_device *rdev) NUM_ES_STACK_ENTRIES(num_es_stack_entries)); /* emit an IB pointing at default state */ - dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf; + dwords = ALIGN(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); radeon_ring_write(rdev, dwords); - radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); - radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); /* SQ config */ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6)); radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); @@ -578,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) ring_size = num_loops * dwords_per_loop; /* set default + shaders */ ring_size += 40; /* shaders + def state */ - ring_size += 7; /* fence emit for VB IB */ + ring_size += 10; /* fence emit for VB IB */ ring_size += 5; /* done copy */ - ring_size += 7; /* fence emit for done copy */ + ring_size += 10; /* fence emit for done copy */ r = radeon_ring_lock(rdev, ring_size); if (r) return r; @@ -594,13 +592,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) { int r; - radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); - radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); - /* wait for 3D idle clean */ - radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); - radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); - radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit); - if (rdev->r600_blit.vb_ib) r600_vb_ib_put(rdev); diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c index d745e815c2e..a112c59f9d8 100644 --- a/drivers/gpu/drm/radeon/r600_blit_shaders.c +++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c @@ -9,11 +9,6 @@ const u32 r6xx_default_state[] = 0xc0012800, 0x80000000, 0x80000000, - 0xc0004600, - 0x00000016, - 0xc0016800, - 0x00000010, - 0x00028000, 0xc0016800, 0x00000010, 0x00008000, @@ -531,11 +526,6 @@ const u32 r7xx_default_state[] = 0xc0012800, 0x80000000, 0x80000000, - 0xc0004600, - 0x00000016, - 0xc0016800, - 0x00000010, - 0x00028000, 0xc0016800, 0x00000010, 0x00008000, diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 75bcf35a093..40416c068d9 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev, u32 hdp_host_path_cntl; u32 backend_map; u32 gb_tiling_config = 0; - u32 cc_rb_backend_disable = 0; - u32 cc_gc_shader_pipe_config = 0; + u32 cc_rb_backend_disable; + u32 cc_gc_shader_pipe_config; u32 ramcfg; /* setup chip specs */ @@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev, gb_tiling_config |= R600_BANK_SWAPS(1); - backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, - dev_priv->r600_max_backends, - (0xff << dev_priv->r600_max_backends) & 0xff); - gb_tiling_config |= R600_BACKEND_MAP(backend_map); + cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000; + cc_rb_backend_disable |= + R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK); - cc_gc_shader_pipe_config = + cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; + cc_gc_shader_pipe_config |= R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK); cc_gc_shader_pipe_config |= R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK); - cc_rb_backend_disable = - R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK); + backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, + (R6XX_MAX_BACKENDS - + r600_count_pipe_bits((cc_rb_backend_disable & + R6XX_MAX_BACKENDS_MASK) >> 16)), + (cc_rb_backend_disable >> 16)); + gb_tiling_config |= R600_BACKEND_MAP(backend_map); RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); + if (gb_tiling_config & 0xc0) { + dev_priv->r600_group_size = 512; + } else { + dev_priv->r600_group_size = 256; + } + dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7); + if (gb_tiling_config & 0x30) { + dev_priv->r600_nbanks = 8; + } else { + dev_priv->r600_nbanks = 4; + } RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); num_qd_pipes = - R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); + R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); @@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev, } -static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, +static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv, + u32 num_tile_pipes, u32 num_backends, u32 backend_disable_mask) { @@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, u32 swizzle_pipe[R7XX_MAX_PIPES]; u32 cur_backend; u32 i; + bool force_no_swizzle; if (num_tile_pipes > R7XX_MAX_PIPES) num_tile_pipes = R7XX_MAX_PIPES; @@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, if (enabled_backends_count != num_backends) num_backends = enabled_backends_count; + switch (dev_priv->flags & RADEON_FAMILY_MASK) { + case CHIP_RV770: + case CHIP_RV730: + force_no_swizzle = false; + break; + case CHIP_RV710: + case CHIP_RV740: + default: + force_no_swizzle = true; + break; + } + memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); switch (num_tile_pipes) { case 1: @@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, swizzle_pipe[1] = 1; break; case 3: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 1; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 1; + } break; case 4: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 3; - swizzle_pipe[3] = 1; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 3; + swizzle_pipe[3] = 1; + } break; case 5: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 1; - swizzle_pipe[4] = 3; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 1; + swizzle_pipe[4] = 3; + } break; case 6: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 5; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 5; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 1; + } break; case 7: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; - swizzle_pipe[6] = 5; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + swizzle_pipe[6] = 6; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 6; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 1; + swizzle_pipe[6] = 5; + } break; case 8: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; - swizzle_pipe[6] = 7; - swizzle_pipe[7] = 5; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + swizzle_pipe[6] = 6; + swizzle_pipe[7] = 7; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 6; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 1; + swizzle_pipe[6] = 7; + swizzle_pipe[7] = 5; + } break; } @@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev, drm_radeon_private_t *dev_priv) { int i, j, num_qd_pipes; + u32 ta_aux_cntl; u32 sx_debug_1; u32 smx_dc_ctl0; + u32 db_debug3; u32 num_gs_verts_per_thread; u32 vgt_gs_per_es; u32 gs_prim_buffer_depth = 0; @@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev, u32 sq_dyn_gpr_size_simd_ab_0; u32 backend_map; u32 gb_tiling_config = 0; - u32 cc_rb_backend_disable = 0; - u32 cc_gc_shader_pipe_config = 0; + u32 cc_rb_backend_disable; + u32 cc_gc_shader_pipe_config; u32 mc_arb_ramcfg; u32 db_debug4; @@ -1428,38 +1510,51 @@ static void r700_gfx_init(struct drm_device *dev, gb_tiling_config |= R600_BANK_SWAPS(1); - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) - backend_map = 0x28; - else - backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes, - dev_priv->r600_max_backends, - (0xff << dev_priv->r600_max_backends) & 0xff); - gb_tiling_config |= R600_BACKEND_MAP(backend_map); + cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000; + cc_rb_backend_disable |= + R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK); - cc_gc_shader_pipe_config = + cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; + cc_gc_shader_pipe_config |= R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK); cc_gc_shader_pipe_config |= R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK); - cc_rb_backend_disable = - R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK); + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740) + backend_map = 0x28; + else + backend_map = r700_get_tile_pipe_to_backend_map(dev_priv, + dev_priv->r600_max_tile_pipes, + (R7XX_MAX_BACKENDS - + r600_count_pipe_bits((cc_rb_backend_disable & + R7XX_MAX_BACKENDS_MASK) >> 16)), + (cc_rb_backend_disable >> 16)); + gb_tiling_config |= R600_BACKEND_MAP(backend_map); RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config); RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); + if (gb_tiling_config & 0xc0) { + dev_priv->r600_group_size = 512; + } else { + dev_priv->r600_group_size = 256; + } + dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7); + if (gb_tiling_config & 0x30) { + dev_priv->r600_nbanks = 8; + } else { + dev_priv->r600_nbanks = 4; + } RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0); RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0); - RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0); - RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0); num_qd_pipes = - R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK); + R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8); RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK); RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK); @@ -1469,10 +1564,8 @@ static void r700_gfx_init(struct drm_device *dev, RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30)); - RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO | - R600_SYNC_GRADIENT | - R600_SYNC_WALKER | - R600_SYNC_ALIGNER)); + ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX); + RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO); sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1); sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS; @@ -1483,14 +1576,28 @@ static void r700_gfx_init(struct drm_device *dev, smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1); RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0); - RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) | - R700_GS_FLUSH_CTL(4) | - R700_ACK_FLUSH_CTL(3) | - R700_SYNC_FLUSH_CTL)); + if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740) + RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) | + R700_GS_FLUSH_CTL(4) | + R700_ACK_FLUSH_CTL(3) | + R700_SYNC_FLUSH_CTL)); - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770) - RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f)); - else { + db_debug3 = RADEON_READ(R700_DB_DEBUG3); + db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f); + switch (dev_priv->flags & RADEON_FAMILY_MASK) { + case CHIP_RV770: + case CHIP_RV740: + db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f); + break; + case CHIP_RV710: + case CHIP_RV730: + default: + db_debug3 |= R700_DB_CLK_OFF_DELAY(2); + break; + } + RADEON_WRITE(R700_DB_DEBUG3, db_debug3); + + if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) { db_debug4 = RADEON_READ(RV700_DB_DEBUG4); db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER; RADEON_WRITE(RV700_DB_DEBUG4, db_debug4); @@ -1519,10 +1626,10 @@ static void r700_gfx_init(struct drm_device *dev, R600_ALU_UPDATE_FIFO_HIWATER(0x8)); switch (dev_priv->flags & RADEON_FAMILY_MASK) { case CHIP_RV770: - sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1); - break; case CHIP_RV730: case CHIP_RV710: + sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1); + break; case CHIP_RV740: default: sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); @@ -2529,3 +2636,12 @@ out: mutex_unlock(&dev_priv->cs_mutex); return r; } + +void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size) +{ + struct drm_radeon_private *dev_priv = dev->dev_private; + + *npipes = dev_priv->r600_npipes; + *nbanks = dev_priv->r600_nbanks; + *group_size = dev_priv->r600_group_size; +} diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index e4c45ec1650..cd2c63bce50 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -28,6 +28,7 @@ #include "drmP.h" #include "radeon.h" #include "r600d.h" +#include "r600_reg_safe.h" static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); @@ -35,11 +36,313 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, struct radeon_cs_reloc **cs_reloc); typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**); static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm; +extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size); + struct r600_cs_track { - u32 cb_color0_base_last; + /* configuration we miror so that we use same code btw kms/ums */ + u32 group_size; + u32 nbanks; + u32 npipes; + /* value we track */ + u32 nsamples; + u32 cb_color_base_last[8]; + struct radeon_bo *cb_color_bo[8]; + u32 cb_color_bo_offset[8]; + struct radeon_bo *cb_color_frag_bo[8]; + struct radeon_bo *cb_color_tile_bo[8]; + u32 cb_color_info[8]; + u32 cb_color_size_idx[8]; + u32 cb_target_mask; + u32 cb_shader_mask; + u32 cb_color_size[8]; + u32 vgt_strmout_en; + u32 vgt_strmout_buffer_en; + u32 db_depth_control; + u32 db_depth_info; + u32 db_depth_size_idx; + u32 db_depth_view; + u32 db_depth_size; + u32 db_offset; + struct radeon_bo *db_bo; }; +static inline int r600_bpe_from_format(u32 *bpe, u32 format) +{ + switch (format) { + case V_038004_COLOR_8: + case V_038004_COLOR_4_4: + case V_038004_COLOR_3_3_2: + case V_038004_FMT_1: + *bpe = 1; + break; + case V_038004_COLOR_16: + case V_038004_COLOR_16_FLOAT: + case V_038004_COLOR_8_8: + case V_038004_COLOR_5_6_5: + case V_038004_COLOR_6_5_5: + case V_038004_COLOR_1_5_5_5: + case V_038004_COLOR_4_4_4_4: + case V_038004_COLOR_5_5_5_1: + *bpe = 2; + break; + case V_038004_FMT_8_8_8: + *bpe = 3; + break; + case V_038004_COLOR_32: + case V_038004_COLOR_32_FLOAT: + case V_038004_COLOR_16_16: + case V_038004_COLOR_16_16_FLOAT: + case V_038004_COLOR_8_24: + case V_038004_COLOR_8_24_FLOAT: + case V_038004_COLOR_24_8: + case V_038004_COLOR_24_8_FLOAT: + case V_038004_COLOR_10_11_11: + case V_038004_COLOR_10_11_11_FLOAT: + case V_038004_COLOR_11_11_10: + case V_038004_COLOR_11_11_10_FLOAT: + case V_038004_COLOR_2_10_10_10: + case V_038004_COLOR_8_8_8_8: + case V_038004_COLOR_10_10_10_2: + case V_038004_FMT_5_9_9_9_SHAREDEXP: + case V_038004_FMT_32_AS_8: + case V_038004_FMT_32_AS_8_8: + *bpe = 4; + break; + case V_038004_COLOR_X24_8_32_FLOAT: + case V_038004_COLOR_32_32: + case V_038004_COLOR_32_32_FLOAT: + case V_038004_COLOR_16_16_16_16: + case V_038004_COLOR_16_16_16_16_FLOAT: + *bpe = 8; + break; + case V_038004_FMT_16_16_16: + case V_038004_FMT_16_16_16_FLOAT: + *bpe = 6; + break; + case V_038004_FMT_32_32_32: + case V_038004_FMT_32_32_32_FLOAT: + *bpe = 12; + break; + case V_038004_COLOR_32_32_32_32: + case V_038004_COLOR_32_32_32_32_FLOAT: + *bpe = 16; + break; + case V_038004_FMT_GB_GR: + case V_038004_FMT_BG_RG: + case V_038004_COLOR_INVALID: + *bpe = 16; + return -EINVAL; + } + return 0; +} + +static void r600_cs_track_init(struct r600_cs_track *track) +{ + int i; + + for (i = 0; i < 8; i++) { + track->cb_color_base_last[i] = 0; + track->cb_color_size[i] = 0; + track->cb_color_size_idx[i] = 0; + track->cb_color_info[i] = 0; + track->cb_color_bo[i] = NULL; + track->cb_color_bo_offset[i] = 0xFFFFFFFF; + } + track->cb_target_mask = 0xFFFFFFFF; + track->cb_shader_mask = 0xFFFFFFFF; + track->db_bo = NULL; + /* assume the biggest format and that htile is enabled */ + track->db_depth_info = 7 | (1 << 25); + track->db_depth_view = 0xFFFFC000; + track->db_depth_size = 0xFFFFFFFF; + track->db_depth_size_idx = 0; + track->db_depth_control = 0xFFFFFFFF; +} + +static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) +{ + struct r600_cs_track *track = p->track; + u32 bpe = 0, pitch, slice_tile_max, size, tmp, height; + volatile u32 *ib = p->ib->ptr; + + if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { + dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); + return -EINVAL; + } + size = radeon_bo_size(track->cb_color_bo[i]); + if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) { + dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n", + __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]), + i, track->cb_color_info[i]); + return -EINVAL; + } + pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3; + slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1; + if (!pitch) { + dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n", + __func__, __LINE__, pitch, i, track->cb_color_size[i]); + return -EINVAL; + } + height = size / (pitch * bpe); + if (height > 8192) + height = 8192; + switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) { + case V_0280A0_ARRAY_LINEAR_GENERAL: + case V_0280A0_ARRAY_LINEAR_ALIGNED: + if (pitch & 0x3f) { + dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n", + __func__, __LINE__, pitch, bpe, pitch * bpe); + return -EINVAL; + } + if ((pitch * bpe) & (track->group_size - 1)) { + dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + break; + case V_0280A0_ARRAY_1D_TILED_THIN1: + if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) { + dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + height &= ~0x7; + if (!height) + height = 8; + break; + case V_0280A0_ARRAY_2D_TILED_THIN1: + if (pitch & ((8 * track->nbanks) - 1)) { + dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + tmp = pitch * 8 * bpe * track->nsamples; + tmp = tmp / track->nbanks; + if (tmp & (track->group_size - 1)) { + dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", + __func__, __LINE__, pitch); + return -EINVAL; + } + height &= ~((16 * track->npipes) - 1); + if (!height) + height = 16 * track->npipes; + break; + default: + dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__, + G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i, + track->cb_color_info[i]); + return -EINVAL; + } + /* check offset */ + tmp = height * pitch; + if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { + dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]); + return -EINVAL; + } + /* limit max tile */ + tmp = (height * pitch) >> 6; + if (tmp < slice_tile_max) + slice_tile_max = tmp; + tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) | + S_028060_SLICE_TILE_MAX(slice_tile_max - 1); + ib[track->cb_color_size_idx[i]] = tmp; + return 0; +} + +static int r600_cs_track_check(struct radeon_cs_parser *p) +{ + struct r600_cs_track *track = p->track; + u32 tmp; + int r, i; + volatile u32 *ib = p->ib->ptr; + + /* on legacy kernel we don't perform advanced check */ + if (p->rdev == NULL) + return 0; + /* we don't support out buffer yet */ + if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) { + dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n"); + return -EINVAL; + } + /* check that we have a cb for each enabled target, we don't check + * shader_mask because it seems mesa isn't always setting it :( + */ + tmp = track->cb_target_mask; + for (i = 0; i < 8; i++) { + if ((tmp >> (i * 4)) & 0xF) { + /* at least one component is enabled */ + if (track->cb_color_bo[i] == NULL) { + dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", + __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); + return -EINVAL; + } + /* perform rewrite of CB_COLOR[0-7]_SIZE */ + r = r600_cs_track_validate_cb(p, i); + if (r) + return r; + } + } + /* Check depth buffer */ + if (G_028800_STENCIL_ENABLE(track->db_depth_control) || + G_028800_Z_ENABLE(track->db_depth_control)) { + u32 nviews, bpe, ntiles; + if (track->db_bo == NULL) { + dev_warn(p->dev, "z/stencil with no depth buffer\n"); + return -EINVAL; + } + if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) { + dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n"); + return -EINVAL; + } + switch (G_028010_FORMAT(track->db_depth_info)) { + case V_028010_DEPTH_16: + bpe = 2; + break; + case V_028010_DEPTH_X8_24: + case V_028010_DEPTH_8_24: + case V_028010_DEPTH_X8_24_FLOAT: + case V_028010_DEPTH_8_24_FLOAT: + case V_028010_DEPTH_32_FLOAT: + bpe = 4; + break; + case V_028010_DEPTH_X24_8_32_FLOAT: + bpe = 8; + break; + default: + dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info)); + return -EINVAL; + } + if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) { + if (!track->db_depth_size_idx) { + dev_warn(p->dev, "z/stencil buffer size not set\n"); + return -EINVAL; + } + printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n"); + tmp = radeon_bo_size(track->db_bo) - track->db_offset; + tmp = (tmp / bpe) >> 6; + if (!tmp) { + dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n", + track->db_depth_size, bpe, track->db_offset, + radeon_bo_size(track->db_bo)); + return -EINVAL; + } + ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF); + } else { + ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; + nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; + tmp = ntiles * bpe * 64 * nviews; + if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { + dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n", + track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, + radeon_bo_size(track->db_bo)); + return -EINVAL; + } + } + } + return 0; +} + /** * r600_cs_packet_parse() - parse cp packet and point ib index to next packet * @parser: parser structure holding parsing context. @@ -359,6 +662,334 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p, return 0; } +/** + * r600_cs_check_reg() - check if register is authorized or not + * @parser: parser structure holding parsing context + * @reg: register we are testing + * @idx: index into the cs buffer + * + * This function will test against r600_reg_safe_bm and return 0 + * if register is safe. If register is not flag as safe this function + * will test it against a list of register needind special handling. + */ +static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) +{ + struct r600_cs_track *track = (struct r600_cs_track *)p->track; + struct radeon_cs_reloc *reloc; + u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm); + u32 m, i, tmp, *ib; + int r; + + i = (reg >> 7); + if (i > last_reg) { + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + return -EINVAL; + } + m = 1 << ((reg >> 2) & 31); + if (!(r600_reg_safe_bm[i] & m)) + return 0; + ib = p->ib->ptr; + switch (reg) { + /* force following reg to 0 in an attemp to disable out buffer + * which will need us to better understand how it works to perform + * security check on it (Jerome) + */ + case R_0288A8_SQ_ESGS_RING_ITEMSIZE: + case R_008C44_SQ_ESGS_RING_SIZE: + case R_0288B0_SQ_ESTMP_RING_ITEMSIZE: + case R_008C54_SQ_ESTMP_RING_SIZE: + case R_0288C0_SQ_FBUF_RING_ITEMSIZE: + case R_008C74_SQ_FBUF_RING_SIZE: + case R_0288B4_SQ_GSTMP_RING_ITEMSIZE: + case R_008C5C_SQ_GSTMP_RING_SIZE: + case R_0288AC_SQ_GSVS_RING_ITEMSIZE: + case R_008C4C_SQ_GSVS_RING_SIZE: + case R_0288BC_SQ_PSTMP_RING_ITEMSIZE: + case R_008C6C_SQ_PSTMP_RING_SIZE: + case R_0288C4_SQ_REDUC_RING_ITEMSIZE: + case R_008C7C_SQ_REDUC_RING_SIZE: + case R_0288B8_SQ_VSTMP_RING_ITEMSIZE: + case R_008C64_SQ_VSTMP_RING_SIZE: + case R_0288C8_SQ_GS_VERT_ITEMSIZE: + /* get value to populate the IB don't remove */ + tmp =radeon_get_ib_value(p, idx); + ib[idx] = 0; + break; + case R_028800_DB_DEPTH_CONTROL: + track->db_depth_control = radeon_get_ib_value(p, idx); + break; + case R_028010_DB_DEPTH_INFO: + track->db_depth_info = radeon_get_ib_value(p, idx); + break; + case R_028004_DB_DEPTH_VIEW: + track->db_depth_view = radeon_get_ib_value(p, idx); + break; + case R_028000_DB_DEPTH_SIZE: + track->db_depth_size = radeon_get_ib_value(p, idx); + track->db_depth_size_idx = idx; + break; + case R_028AB0_VGT_STRMOUT_EN: + track->vgt_strmout_en = radeon_get_ib_value(p, idx); + break; + case R_028B20_VGT_STRMOUT_BUFFER_EN: + track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx); + break; + case R_028238_CB_TARGET_MASK: + track->cb_target_mask = radeon_get_ib_value(p, idx); + break; + case R_02823C_CB_SHADER_MASK: + track->cb_shader_mask = radeon_get_ib_value(p, idx); + break; + case R_028C04_PA_SC_AA_CONFIG: + tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); + track->nsamples = 1 << tmp; + break; + case R_0280A0_CB_COLOR0_INFO: + case R_0280A4_CB_COLOR1_INFO: + case R_0280A8_CB_COLOR2_INFO: + case R_0280AC_CB_COLOR3_INFO: + case R_0280B0_CB_COLOR4_INFO: + case R_0280B4_CB_COLOR5_INFO: + case R_0280B8_CB_COLOR6_INFO: + case R_0280BC_CB_COLOR7_INFO: + tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4; + track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); + break; + case R_028060_CB_COLOR0_SIZE: + case R_028064_CB_COLOR1_SIZE: + case R_028068_CB_COLOR2_SIZE: + case R_02806C_CB_COLOR3_SIZE: + case R_028070_CB_COLOR4_SIZE: + case R_028074_CB_COLOR5_SIZE: + case R_028078_CB_COLOR6_SIZE: + case R_02807C_CB_COLOR7_SIZE: + tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4; + track->cb_color_size[tmp] = radeon_get_ib_value(p, idx); + track->cb_color_size_idx[tmp] = idx; + break; + /* This register were added late, there is userspace + * which does provide relocation for those but set + * 0 offset. In order to avoid breaking old userspace + * we detect this and set address to point to last + * CB_COLOR0_BASE, note that if userspace doesn't set + * CB_COLOR0_BASE before this register we will report + * error. Old userspace always set CB_COLOR0_BASE + * before any of this. + */ + case R_0280E0_CB_COLOR0_FRAG: + case R_0280E4_CB_COLOR1_FRAG: + case R_0280E8_CB_COLOR2_FRAG: + case R_0280EC_CB_COLOR3_FRAG: + case R_0280F0_CB_COLOR4_FRAG: + case R_0280F4_CB_COLOR5_FRAG: + case R_0280F8_CB_COLOR6_FRAG: + case R_0280FC_CB_COLOR7_FRAG: + tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4; + if (!r600_cs_packet_next_is_pkt3_nop(p)) { + if (!track->cb_color_base_last[tmp]) { + dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); + return -EINVAL; + } + ib[idx] = track->cb_color_base_last[tmp]; + printk_once(KERN_WARNING "You have old & broken userspace " + "please consider updating mesa & xf86-video-ati\n"); + track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; + } else { + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_frag_bo[tmp] = reloc->robj; + } + break; + case R_0280C0_CB_COLOR0_TILE: + case R_0280C4_CB_COLOR1_TILE: + case R_0280C8_CB_COLOR2_TILE: + case R_0280CC_CB_COLOR3_TILE: + case R_0280D0_CB_COLOR4_TILE: + case R_0280D4_CB_COLOR5_TILE: + case R_0280D8_CB_COLOR6_TILE: + case R_0280DC_CB_COLOR7_TILE: + tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4; + if (!r600_cs_packet_next_is_pkt3_nop(p)) { + if (!track->cb_color_base_last[tmp]) { + dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); + return -EINVAL; + } + ib[idx] = track->cb_color_base_last[tmp]; + printk_once(KERN_WARNING "You have old & broken userspace " + "please consider updating mesa & xf86-video-ati\n"); + track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; + } else { + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_tile_bo[tmp] = reloc->robj; + } + break; + case CB_COLOR0_BASE: + case CB_COLOR1_BASE: + case CB_COLOR2_BASE: + case CB_COLOR3_BASE: + case CB_COLOR4_BASE: + case CB_COLOR5_BASE: + case CB_COLOR6_BASE: + case CB_COLOR7_BASE: + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + tmp = (reg - CB_COLOR0_BASE) / 4; + track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->cb_color_base_last[tmp] = ib[idx]; + track->cb_color_bo[tmp] = reloc->robj; + break; + case DB_DEPTH_BASE: + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + track->db_offset = radeon_get_ib_value(p, idx); + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + track->db_bo = reloc->robj; + break; + case DB_HTILE_DATA_BASE: + case SQ_PGM_START_FS: + case SQ_PGM_START_ES: + case SQ_PGM_START_VS: + case SQ_PGM_START_GS: + case SQ_PGM_START_PS: + r = r600_cs_packet_next_reloc(p, &reloc); + if (r) { + dev_warn(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); + return -EINVAL; + } + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + break; + default: + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + return -EINVAL; + } + return 0; +} + +static inline unsigned minify(unsigned size, unsigned levels) +{ + size = size >> levels; + if (size < 1) + size = 1; + return size; +} + +static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels, + unsigned w0, unsigned h0, unsigned d0, unsigned bpe, + unsigned *l0_size, unsigned *mipmap_size) +{ + unsigned offset, i, level, face; + unsigned width, height, depth, rowstride, size; + + w0 = minify(w0, 0); + h0 = minify(h0, 0); + d0 = minify(d0, 0); + for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) { + width = minify(w0, i); + height = minify(h0, i); + depth = minify(d0, i); + for(face = 0; face < nfaces; face++) { + rowstride = ((width * bpe) + 255) & ~255; + size = height * rowstride * depth; + offset += size; + offset = (offset + 0x1f) & ~0x1f; + } + } + *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0; + *mipmap_size = offset; + if (!blevel) + *mipmap_size -= *l0_size; + if (!nlevels) + *mipmap_size = *l0_size; +} + +/** + * r600_check_texture_resource() - check if register is authorized or not + * @p: parser structure holding parsing context + * @idx: index into the cs buffer + * @texture: texture's bo structure + * @mipmap: mipmap's bo structure + * + * This function will check that the resource has valid field and that + * the texture and mipmap bo object are big enough to cover this resource. + */ +static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, + struct radeon_bo *texture, + struct radeon_bo *mipmap) +{ + u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0; + u32 word0, word1, l0_size, mipmap_size; + + /* on legacy kernel we don't perform advanced check */ + if (p->rdev == NULL) + return 0; + word0 = radeon_get_ib_value(p, idx + 0); + word1 = radeon_get_ib_value(p, idx + 1); + w0 = G_038000_TEX_WIDTH(word0) + 1; + h0 = G_038004_TEX_HEIGHT(word1) + 1; + d0 = G_038004_TEX_DEPTH(word1); + nfaces = 1; + switch (G_038000_DIM(word0)) { + case V_038000_SQ_TEX_DIM_1D: + case V_038000_SQ_TEX_DIM_2D: + case V_038000_SQ_TEX_DIM_3D: + break; + case V_038000_SQ_TEX_DIM_CUBEMAP: + nfaces = 6; + break; + case V_038000_SQ_TEX_DIM_1D_ARRAY: + case V_038000_SQ_TEX_DIM_2D_ARRAY: + case V_038000_SQ_TEX_DIM_2D_MSAA: + case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: + default: + dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); + return -EINVAL; + } + if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) { + dev_warn(p->dev, "%s:%d texture invalid format %d\n", + __func__, __LINE__, G_038004_DATA_FORMAT(word1)); + return -EINVAL; + } + word0 = radeon_get_ib_value(p, idx + 4); + word1 = radeon_get_ib_value(p, idx + 5); + blevel = G_038010_BASE_LEVEL(word0); + nlevels = G_038014_LAST_LEVEL(word1); + r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size); + /* using get ib will give us the offset into the texture bo */ + word0 = radeon_get_ib_value(p, idx + 2); + if ((l0_size + word0) > radeon_bo_size(texture)) { + dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n", + w0, h0, bpe, word0, l0_size, radeon_bo_size(texture)); + return -EINVAL; + } + /* using get ib will give us the offset into the mipmap bo */ + word0 = radeon_get_ib_value(p, idx + 3); + if ((mipmap_size + word0) > radeon_bo_size(mipmap)) { + dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", + w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture)); + return -EINVAL; + } + return 0; +} + static int r600_packet3_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt) { @@ -408,12 +1039,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff); ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; + r = r600_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } break; case PACKET3_DRAW_INDEX_AUTO: if (pkt->count != 1) { DRM_ERROR("bad DRAW_INDEX_AUTO\n"); return -EINVAL; } + r = r600_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + return r; + } break; case PACKET3_DRAW_INDEX_IMMD_BE: case PACKET3_DRAW_INDEX_IMMD: @@ -421,6 +1062,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad DRAW_INDEX_IMMD\n"); return -EINVAL; } + r = r600_cs_track_check(p); + if (r) { + dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + return r; + } break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { @@ -493,30 +1139,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); - switch (reg) { - case SQ_ESGS_RING_BASE: - case SQ_GSVS_RING_BASE: - case SQ_ESTMP_RING_BASE: - case SQ_GSTMP_RING_BASE: - case SQ_VSTMP_RING_BASE: - case SQ_PSTMP_RING_BASE: - case SQ_FBUF_RING_BASE: - case SQ_REDUC_RING_BASE: - case SX_MEMORY_EXPORT_BASE: - r = r600_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("bad SET_CONFIG_REG " - "0x%04X\n", reg); - return -EINVAL; - } - ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - break; - case CP_COHER_BASE: - /* use PACKET3_SURFACE_SYNC */ - return -EINVAL; - default: - break; - } + r = r600_cs_check_reg(p, reg, idx+1+i); + if (r) + return r; } break; case PACKET3_SET_CONTEXT_REG: @@ -530,106 +1155,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p, } for (i = 0; i < pkt->count; i++) { reg = start_reg + (4 * i); - switch (reg) { - /* This register were added late, there is userspace - * which does provide relocation for those but set - * 0 offset. In order to avoid breaking old userspace - * we detect this and set address to point to last - * CB_COLOR0_BASE, note that if userspace doesn't set - * CB_COLOR0_BASE before this register we will report - * error. Old userspace always set CB_COLOR0_BASE - * before any of this. - */ - case R_0280E0_CB_COLOR0_FRAG: - case R_0280E4_CB_COLOR1_FRAG: - case R_0280E8_CB_COLOR2_FRAG: - case R_0280EC_CB_COLOR3_FRAG: - case R_0280F0_CB_COLOR4_FRAG: - case R_0280F4_CB_COLOR5_FRAG: - case R_0280F8_CB_COLOR6_FRAG: - case R_0280FC_CB_COLOR7_FRAG: - case R_0280C0_CB_COLOR0_TILE: - case R_0280C4_CB_COLOR1_TILE: - case R_0280C8_CB_COLOR2_TILE: - case R_0280CC_CB_COLOR3_TILE: - case R_0280D0_CB_COLOR4_TILE: - case R_0280D4_CB_COLOR5_TILE: - case R_0280D8_CB_COLOR6_TILE: - case R_0280DC_CB_COLOR7_TILE: - if (!r600_cs_packet_next_is_pkt3_nop(p)) { - if (!track->cb_color0_base_last) { - dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); - return -EINVAL; - } - ib[idx+1+i] = track->cb_color0_base_last; - printk_once(KERN_WARNING "radeon: You have old & broken userspace " - "please consider updating mesa & xf86-video-ati\n"); - } else { - r = r600_cs_packet_next_reloc(p, &reloc); - if (r) { - dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); - return -EINVAL; - } - ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - } - break; - case DB_DEPTH_BASE: - case DB_HTILE_DATA_BASE: - case CB_COLOR0_BASE: - r = r600_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } - ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - track->cb_color0_base_last = ib[idx+1+i]; - break; - case CB_COLOR1_BASE: - case CB_COLOR2_BASE: - case CB_COLOR3_BASE: - case CB_COLOR4_BASE: - case CB_COLOR5_BASE: - case CB_COLOR6_BASE: - case CB_COLOR7_BASE: - case SQ_PGM_START_FS: - case SQ_PGM_START_ES: - case SQ_PGM_START_VS: - case SQ_PGM_START_GS: - case SQ_PGM_START_PS: - r = r600_cs_packet_next_reloc(p, &reloc); - if (r) { - DRM_ERROR("bad SET_CONTEXT_REG " - "0x%04X\n", reg); - return -EINVAL; - } - ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); - break; - case VGT_DMA_BASE: - case VGT_DMA_BASE_HI: - /* These should be handled by DRAW_INDEX packet 3 */ - case VGT_STRMOUT_BASE_OFFSET_0: - case VGT_STRMOUT_BASE_OFFSET_1: - case VGT_STRMOUT_BASE_OFFSET_2: - case VGT_STRMOUT_BASE_OFFSET_3: - case VGT_STRMOUT_BASE_OFFSET_HI_0: - case VGT_STRMOUT_BASE_OFFSET_HI_1: - case VGT_STRMOUT_BASE_OFFSET_HI_2: - case VGT_STRMOUT_BASE_OFFSET_HI_3: - case VGT_STRMOUT_BUFFER_BASE_0: - case VGT_STRMOUT_BUFFER_BASE_1: - case VGT_STRMOUT_BUFFER_BASE_2: - case VGT_STRMOUT_BUFFER_BASE_3: - case VGT_STRMOUT_BUFFER_OFFSET_0: - case VGT_STRMOUT_BUFFER_OFFSET_1: - case VGT_STRMOUT_BUFFER_OFFSET_2: - case VGT_STRMOUT_BUFFER_OFFSET_3: - /* These should be handled by STRMOUT_BUFFER packet 3 */ - DRM_ERROR("bad context reg: 0x%08x\n", reg); - return -EINVAL; - default: - break; - } + r = r600_cs_check_reg(p, reg, idx+1+i); + if (r) + return r; } break; case PACKET3_SET_RESOURCE: @@ -646,6 +1174,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } for (i = 0; i < (pkt->count / 7); i++) { + struct radeon_bo *texture, *mipmap; + u32 size, offset; + switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) { case SQ_TEX_VTX_VALID_TEXTURE: /* tex base */ @@ -655,6 +1186,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + texture = reloc->robj; /* tex mip base */ r = r600_cs_packet_next_reloc(p, &reloc); if (r) { @@ -662,6 +1194,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p, return -EINVAL; } ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + mipmap = reloc->robj; + r = r600_check_texture_resource(p, idx+(i*7)+1, + texture, mipmap); + if (r) + return r; break; case SQ_TEX_VTX_VALID_BUFFER: /* vtx base */ @@ -670,6 +1207,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p, DRM_ERROR("bad SET_RESOURCE\n"); return -EINVAL; } + offset = radeon_get_ib_value(p, idx+1+(i*7)+0); + size = radeon_get_ib_value(p, idx+1+(i*7)+1); + if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { + /* force size to size of the buffer */ + dev_warn(p->dev, "vbo resource seems too big for the bo\n"); + ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj); + } ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff; break; @@ -760,11 +1304,28 @@ int r600_cs_parse(struct radeon_cs_parser *p) struct r600_cs_track *track; int r; - track = kzalloc(sizeof(*track), GFP_KERNEL); - p->track = track; + if (p->track == NULL) { + /* initialize tracker, we are in kms */ + track = kzalloc(sizeof(*track), GFP_KERNEL); + if (track == NULL) + return -ENOMEM; + r600_cs_track_init(track); + if (p->rdev->family < CHIP_RV770) { + track->npipes = p->rdev->config.r600.tiling_npipes; + track->nbanks = p->rdev->config.r600.tiling_nbanks; + track->group_size = p->rdev->config.r600.tiling_group_size; + } else if (p->rdev->family <= CHIP_RV740) { + track->npipes = p->rdev->config.rv770.tiling_npipes; + track->nbanks = p->rdev->config.rv770.tiling_nbanks; + track->group_size = p->rdev->config.rv770.tiling_group_size; + } + p->track = track; + } do { r = r600_cs_packet_parse(p, &pkt, p->idx); if (r) { + kfree(p->track); + p->track = NULL; return r; } p->idx += pkt.count + 2; @@ -779,9 +1340,13 @@ int r600_cs_parse(struct radeon_cs_parser *p) break; default: DRM_ERROR("Unknown packet type %d !\n", pkt.type); + kfree(p->track); + p->track = NULL; return -EINVAL; } if (r) { + kfree(p->track); + p->track = NULL; return r; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); @@ -791,6 +1356,8 @@ int r600_cs_parse(struct radeon_cs_parser *p) mdelay(1); } #endif + kfree(p->track); + p->track = NULL; return 0; } @@ -833,9 +1400,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, { struct radeon_cs_parser parser; struct radeon_cs_chunk *ib_chunk; - struct radeon_ib fake_ib; + struct radeon_ib fake_ib; + struct r600_cs_track *track; int r; + /* initialize tracker */ + track = kzalloc(sizeof(*track), GFP_KERNEL); + if (track == NULL) + return -ENOMEM; + r600_cs_track_init(track); + r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size); /* initialize parser */ memset(&parser, 0, sizeof(struct radeon_cs_parser)); parser.filp = filp; @@ -843,6 +1417,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp, parser.rdev = NULL; parser.family = family; parser.ib = &fake_ib; + parser.track = track; fake_ib.ptr = ib; r = radeon_cs_parser_init(&parser, data); if (r) { diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 30480881aed..5b2e4d44282 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -883,6 +883,16 @@ #define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 +#define R_028C04_PA_SC_AA_CONFIG 0x028C04 +#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0) +#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3) +#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC +#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4) +#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1) +#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF +#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13) +#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF) +#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF #define R_0280E0_CB_COLOR0_FRAG 0x0280E0 #define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0) #define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF) @@ -905,6 +915,461 @@ #define R_0280D4_CB_COLOR5_TILE 0x0280D4 #define R_0280D8_CB_COLOR6_TILE 0x0280D8 #define R_0280DC_CB_COLOR7_TILE 0x0280DC - +#define R_0280A0_CB_COLOR0_INFO 0x0280A0 +#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0) +#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3) +#define C_0280A0_ENDIAN 0xFFFFFFFC +#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2) +#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F) +#define C_0280A0_FORMAT 0xFFFFFF03 +#define V_0280A0_COLOR_INVALID 0x00000000 +#define V_0280A0_COLOR_8 0x00000001 +#define V_0280A0_COLOR_4_4 0x00000002 +#define V_0280A0_COLOR_3_3_2 0x00000003 +#define V_0280A0_COLOR_16 0x00000005 +#define V_0280A0_COLOR_16_FLOAT 0x00000006 +#define V_0280A0_COLOR_8_8 0x00000007 +#define V_0280A0_COLOR_5_6_5 0x00000008 +#define V_0280A0_COLOR_6_5_5 0x00000009 +#define V_0280A0_COLOR_1_5_5_5 0x0000000A +#define V_0280A0_COLOR_4_4_4_4 0x0000000B +#define V_0280A0_COLOR_5_5_5_1 0x0000000C +#define V_0280A0_COLOR_32 0x0000000D +#define V_0280A0_COLOR_32_FLOAT 0x0000000E +#define V_0280A0_COLOR_16_16 0x0000000F +#define V_0280A0_COLOR_16_16_FLOAT 0x00000010 +#define V_0280A0_COLOR_8_24 0x00000011 +#define V_0280A0_COLOR_8_24_FLOAT 0x00000012 +#define V_0280A0_COLOR_24_8 0x00000013 +#define V_0280A0_COLOR_24_8_FLOAT 0x00000014 +#define V_0280A0_COLOR_10_11_11 0x00000015 +#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016 +#define V_0280A0_COLOR_11_11_10 0x00000017 +#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018 +#define V_0280A0_COLOR_2_10_10_10 0x00000019 +#define V_0280A0_COLOR_8_8_8_8 0x0000001A +#define V_0280A0_COLOR_10_10_10_2 0x0000001B +#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C +#define V_0280A0_COLOR_32_32 0x0000001D +#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E +#define V_0280A0_COLOR_16_16_16_16 0x0000001F +#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020 +#define V_0280A0_COLOR_32_32_32_32 0x00000022 +#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023 +#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8) +#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF) +#define C_0280A0_ARRAY_MODE 0xFFFFF0FF +#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000 +#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001 +#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002 +#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004 +#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12) +#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7) +#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF +#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15) +#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1) +#define C_0280A0_READ_SIZE 0xFFFF7FFF +#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16) +#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3) +#define C_0280A0_COMP_SWAP 0xFFFCFFFF +#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18) +#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3) +#define C_0280A0_TILE_MODE 0xFFF3FFFF +#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20) +#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1) +#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF +#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21) +#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1) +#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF +#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22) +#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1) +#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF +#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23) +#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1) +#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF +#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24) +#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1) +#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF +#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25) +#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1) +#define C_0280A0_ROUND_MODE 0xFDFFFFFF +#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26) +#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1) +#define C_0280A0_TILE_COMPACT 0xFBFFFFFF +#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27) +#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1) +#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF +#define R_0280A4_CB_COLOR1_INFO 0x0280A4 +#define R_0280A8_CB_COLOR2_INFO 0x0280A8 +#define R_0280AC_CB_COLOR3_INFO 0x0280AC +#define R_0280B0_CB_COLOR4_INFO 0x0280B0 +#define R_0280B4_CB_COLOR5_INFO 0x0280B4 +#define R_0280B8_CB_COLOR6_INFO 0x0280B8 +#define R_0280BC_CB_COLOR7_INFO 0x0280BC +#define R_028060_CB_COLOR0_SIZE 0x028060 +#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0) +#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF) +#define C_028060_PITCH_TILE_MAX 0xFFFFFC00 +#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10) +#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF) +#define C_028060_SLICE_TILE_MAX 0xC00003FF +#define R_028064_CB_COLOR1_SIZE 0x028064 +#define R_028068_CB_COLOR2_SIZE 0x028068 +#define R_02806C_CB_COLOR3_SIZE 0x02806C +#define R_028070_CB_COLOR4_SIZE 0x028070 +#define R_028074_CB_COLOR5_SIZE 0x028074 +#define R_028078_CB_COLOR6_SIZE 0x028078 +#define R_02807C_CB_COLOR7_SIZE 0x02807C +#define R_028238_CB_TARGET_MASK 0x028238 +#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0) +#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF) +#define C_028238_TARGET0_ENABLE 0xFFFFFFF0 +#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4) +#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF) +#define C_028238_TARGET1_ENABLE 0xFFFFFF0F +#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8) +#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF) +#define C_028238_TARGET2_ENABLE 0xFFFFF0FF +#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12) +#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF) +#define C_028238_TARGET3_ENABLE 0xFFFF0FFF +#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16) +#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF) +#define C_028238_TARGET4_ENABLE 0xFFF0FFFF +#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20) +#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF) +#define C_028238_TARGET5_ENABLE 0xFF0FFFFF +#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24) +#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF) +#define C_028238_TARGET6_ENABLE 0xF0FFFFFF +#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28) +#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF) +#define C_028238_TARGET7_ENABLE 0x0FFFFFFF +#define R_02823C_CB_SHADER_MASK 0x02823C +#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0) +#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF) +#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0 +#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4) +#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF) +#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F +#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8) +#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF) +#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF +#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12) +#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF) +#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF +#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16) +#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF) +#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF +#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20) +#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF) +#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF +#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24) +#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF) +#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF +#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28) +#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF) +#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF +#define R_028AB0_VGT_STRMOUT_EN 0x028AB0 +#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0) +#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1) +#define C_028AB0_STREAMOUT 0xFFFFFFFE +#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20 +#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0) +#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1) +#define C_028B20_BUFFER_0_EN 0xFFFFFFFE +#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1) +#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1) +#define C_028B20_BUFFER_1_EN 0xFFFFFFFD +#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2) +#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1) +#define C_028B20_BUFFER_2_EN 0xFFFFFFFB +#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3) +#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1) +#define C_028B20_BUFFER_3_EN 0xFFFFFFF7 +#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_028B20_SIZE 0x00000000 +#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000 +#define S_038000_DIM(x) (((x) & 0x7) << 0) +#define G_038000_DIM(x) (((x) >> 0) & 0x7) +#define C_038000_DIM 0xFFFFFFF8 +#define V_038000_SQ_TEX_DIM_1D 0x00000000 +#define V_038000_SQ_TEX_DIM_2D 0x00000001 +#define V_038000_SQ_TEX_DIM_3D 0x00000002 +#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003 +#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004 +#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005 +#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006 +#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007 +#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3) +#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF) +#define C_038000_TILE_MODE 0xFFFFFF87 +#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7) +#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1) +#define C_038000_TILE_TYPE 0xFFFFFF7F +#define S_038000_PITCH(x) (((x) & 0x7FF) << 8) +#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF) +#define C_038000_PITCH 0xFFF800FF +#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19) +#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF) +#define C_038000_TEX_WIDTH 0x0007FFFF +#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004 +#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0) +#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF) +#define C_038004_TEX_HEIGHT 0xFFFFE000 +#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13) +#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF) +#define C_038004_TEX_DEPTH 0xFC001FFF +#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26) +#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F) +#define C_038004_DATA_FORMAT 0x03FFFFFF +#define V_038004_COLOR_INVALID 0x00000000 +#define V_038004_COLOR_8 0x00000001 +#define V_038004_COLOR_4_4 0x00000002 +#define V_038004_COLOR_3_3_2 0x00000003 +#define V_038004_COLOR_16 0x00000005 +#define V_038004_COLOR_16_FLOAT 0x00000006 +#define V_038004_COLOR_8_8 0x00000007 +#define V_038004_COLOR_5_6_5 0x00000008 +#define V_038004_COLOR_6_5_5 0x00000009 +#define V_038004_COLOR_1_5_5_5 0x0000000A +#define V_038004_COLOR_4_4_4_4 0x0000000B +#define V_038004_COLOR_5_5_5_1 0x0000000C +#define V_038004_COLOR_32 0x0000000D +#define V_038004_COLOR_32_FLOAT 0x0000000E +#define V_038004_COLOR_16_16 0x0000000F +#define V_038004_COLOR_16_16_FLOAT 0x00000010 +#define V_038004_COLOR_8_24 0x00000011 +#define V_038004_COLOR_8_24_FLOAT 0x00000012 +#define V_038004_COLOR_24_8 0x00000013 +#define V_038004_COLOR_24_8_FLOAT 0x00000014 +#define V_038004_COLOR_10_11_11 0x00000015 +#define V_038004_COLOR_10_11_11_FLOAT 0x00000016 +#define V_038004_COLOR_11_11_10 0x00000017 +#define V_038004_COLOR_11_11_10_FLOAT 0x00000018 +#define V_038004_COLOR_2_10_10_10 0x00000019 +#define V_038004_COLOR_8_8_8_8 0x0000001A +#define V_038004_COLOR_10_10_10_2 0x0000001B +#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C +#define V_038004_COLOR_32_32 0x0000001D +#define V_038004_COLOR_32_32_FLOAT 0x0000001E +#define V_038004_COLOR_16_16_16_16 0x0000001F +#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020 +#define V_038004_COLOR_32_32_32_32 0x00000022 +#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023 +#define V_038004_FMT_1 0x00000025 +#define V_038004_FMT_GB_GR 0x00000027 +#define V_038004_FMT_BG_RG 0x00000028 +#define V_038004_FMT_32_AS_8 0x00000029 +#define V_038004_FMT_32_AS_8_8 0x0000002A +#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B +#define V_038004_FMT_8_8_8 0x0000002C +#define V_038004_FMT_16_16_16 0x0000002D +#define V_038004_FMT_16_16_16_FLOAT 0x0000002E +#define V_038004_FMT_32_32_32 0x0000002F +#define V_038004_FMT_32_32_32_FLOAT 0x00000030 +#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 +#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) +#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) +#define C_038010_FORMAT_COMP_X 0xFFFFFFFC +#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2) +#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3) +#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3 +#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4) +#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3) +#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF +#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6) +#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3) +#define C_038010_FORMAT_COMP_W 0xFFFFFF3F +#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8) +#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3) +#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF +#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10) +#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1) +#define C_038010_SRF_MODE_ALL 0xFFFFFBFF +#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11) +#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1) +#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF +#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12) +#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3) +#define C_038010_ENDIAN_SWAP 0xFFFFCFFF +#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14) +#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3) +#define C_038010_REQUEST_SIZE 0xFFFF3FFF +#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16) +#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7) +#define C_038010_DST_SEL_X 0xFFF8FFFF +#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19) +#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7) +#define C_038010_DST_SEL_Y 0xFFC7FFFF +#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22) +#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7) +#define C_038010_DST_SEL_Z 0xFE3FFFFF +#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25) +#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7) +#define C_038010_DST_SEL_W 0xF1FFFFFF +#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28) +#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF) +#define C_038010_BASE_LEVEL 0x0FFFFFFF +#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014 +#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0) +#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF) +#define C_038014_LAST_LEVEL 0xFFFFFFF0 +#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4) +#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF) +#define C_038014_BASE_ARRAY 0xFFFE000F +#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17) +#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF) +#define C_038014_LAST_ARRAY 0xC001FFFF +#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8 +#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288A8_ITEMSIZE 0xFFFF8000 +#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44 +#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C44_MEM_SIZE 0x00000000 +#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0 +#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288B0_ITEMSIZE 0xFFFF8000 +#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54 +#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C54_MEM_SIZE 0x00000000 +#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0 +#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288C0_ITEMSIZE 0xFFFF8000 +#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74 +#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C74_MEM_SIZE 0x00000000 +#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4 +#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288B4_ITEMSIZE 0xFFFF8000 +#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C +#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C5C_MEM_SIZE 0x00000000 +#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC +#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288AC_ITEMSIZE 0xFFFF8000 +#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C +#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C4C_MEM_SIZE 0x00000000 +#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC +#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288BC_ITEMSIZE 0xFFFF8000 +#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C +#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C6C_MEM_SIZE 0x00000000 +#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4 +#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288C4_ITEMSIZE 0xFFFF8000 +#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C +#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C7C_MEM_SIZE 0x00000000 +#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8 +#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288B8_ITEMSIZE 0xFFFF8000 +#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64 +#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0) +#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_008C64_MEM_SIZE 0x00000000 +#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8 +#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0) +#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF) +#define C_0288C8_ITEMSIZE 0xFFFF8000 +#define R_028010_DB_DEPTH_INFO 0x028010 +#define S_028010_FORMAT(x) (((x) & 0x7) << 0) +#define G_028010_FORMAT(x) (((x) >> 0) & 0x7) +#define C_028010_FORMAT 0xFFFFFFF8 +#define V_028010_DEPTH_INVALID 0x00000000 +#define V_028010_DEPTH_16 0x00000001 +#define V_028010_DEPTH_X8_24 0x00000002 +#define V_028010_DEPTH_8_24 0x00000003 +#define V_028010_DEPTH_X8_24_FLOAT 0x00000004 +#define V_028010_DEPTH_8_24_FLOAT 0x00000005 +#define V_028010_DEPTH_32_FLOAT 0x00000006 +#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007 +#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3) +#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1) +#define C_028010_READ_SIZE 0xFFFFFFF7 +#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15) +#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF) +#define C_028010_ARRAY_MODE 0xFFF87FFF +#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25) +#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1) +#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF +#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26) +#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1) +#define C_028010_TILE_COMPACT 0xFBFFFFFF +#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31) +#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1) +#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF +#define R_028000_DB_DEPTH_SIZE 0x028000 +#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0) +#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF) +#define C_028000_PITCH_TILE_MAX 0xFFFFFC00 +#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10) +#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF) +#define C_028000_SLICE_TILE_MAX 0xC00003FF +#define R_028004_DB_DEPTH_VIEW 0x028004 +#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0) +#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF) +#define C_028004_SLICE_START 0xFFFFF800 +#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13) +#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF) +#define C_028004_SLICE_MAX 0xFF001FFF +#define R_028800_DB_DEPTH_CONTROL 0x028800 +#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0) +#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1) +#define C_028800_STENCIL_ENABLE 0xFFFFFFFE +#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1) +#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1) +#define C_028800_Z_ENABLE 0xFFFFFFFD +#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2) +#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1) +#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB +#define S_028800_ZFUNC(x) (((x) & 0x7) << 4) +#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7) +#define C_028800_ZFUNC 0xFFFFFF8F +#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7) +#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1) +#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F +#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8) +#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7) +#define C_028800_STENCILFUNC 0xFFFFF8FF +#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11) +#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7) +#define C_028800_STENCILFAIL 0xFFFFC7FF +#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14) +#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7) +#define C_028800_STENCILZPASS 0xFFFE3FFF +#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17) +#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7) +#define C_028800_STENCILZFAIL 0xFFF1FFFF +#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20) +#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7) +#define C_028800_STENCILFUNC_BF 0xFF8FFFFF +#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23) +#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7) +#define C_028800_STENCILFAIL_BF 0xFC7FFFFF +#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26) +#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7) +#define C_028800_STENCILZPASS_BF 0xE3FFFFFF +#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29) +#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7) +#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF #endif diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index c0356bb193e..829e26e8a4b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -89,6 +89,7 @@ extern int radeon_testing; extern int radeon_connector_table; extern int radeon_tv; extern int radeon_new_pll; +extern int radeon_dynpm; extern int radeon_audio; /* @@ -118,6 +119,21 @@ struct radeon_device; /* * BIOS. */ +#define ATRM_BIOS_PAGE 4096 + +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_atrm_supported(struct pci_dev *pdev); +int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len); +#else +static inline bool radeon_atrm_supported(struct pci_dev *pdev) +{ + return false; +} + +static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){ + return -EINVAL; +} +#endif bool radeon_get_bios(struct radeon_device *rdev); @@ -138,17 +154,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev); struct radeon_clock { struct radeon_pll p1pll; struct radeon_pll p2pll; + struct radeon_pll dcpll; struct radeon_pll spll; struct radeon_pll mpll; /* 10 Khz units */ uint32_t default_mclk; uint32_t default_sclk; + uint32_t default_dispclk; + uint32_t dp_extclk; }; /* * Power management */ int radeon_pm_init(struct radeon_device *rdev); +void radeon_pm_compute_clocks(struct radeon_device *rdev); +void radeon_combios_get_power_modes(struct radeon_device *rdev); +void radeon_atombios_get_power_modes(struct radeon_device *rdev); /* * Fences. @@ -275,6 +297,7 @@ union radeon_gart_table { }; #define RADEON_GPU_PAGE_SIZE 4096 +#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) struct radeon_gart { dma_addr_t table_addr; @@ -309,21 +332,19 @@ struct radeon_mc { /* for some chips with <= 32MB we need to lie * about vram size near mc fb location */ u64 mc_vram_size; - u64 gtt_location; + u64 visible_vram_size; u64 gtt_size; u64 gtt_start; u64 gtt_end; - u64 vram_location; u64 vram_start; u64 vram_end; unsigned vram_width; u64 real_vram_size; int vram_mtrr; bool vram_is_ddr; - bool igp_sideport_enabled; + bool igp_sideport_enabled; }; -int radeon_mc_setup(struct radeon_device *rdev); bool radeon_combios_sideport_present(struct radeon_device *rdev); bool radeon_atombios_sideport_present(struct radeon_device *rdev); @@ -348,6 +369,7 @@ struct radeon_irq { bool sw_int; /* FIXME: use a define max crtc rather than hardcode it */ bool crtc_vblank_int[2]; + wait_queue_head_t vblank_queue; /* FIXME: use defines for max hpd/dacs */ bool hpd[6]; spinlock_t sw_lock; @@ -379,6 +401,7 @@ struct radeon_ib { struct radeon_ib_pool { struct mutex mutex; struct radeon_bo *robj; + struct list_head bogus_ib; struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; bool ready; unsigned head_id; @@ -433,6 +456,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_pool_init(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev); int radeon_ib_test(struct radeon_device *rdev); +extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); /* Ring access between begin & end cannot sleep */ void radeon_ring_free_size(struct radeon_device *rdev); int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); @@ -570,7 +594,99 @@ struct radeon_wb { * Equation between gpu/memory clock and available bandwidth is hw dependent * (type of memory, bus size, efficiency, ...) */ +enum radeon_pm_state { + PM_STATE_DISABLED, + PM_STATE_MINIMUM, + PM_STATE_PAUSED, + PM_STATE_ACTIVE +}; +enum radeon_pm_action { + PM_ACTION_NONE, + PM_ACTION_MINIMUM, + PM_ACTION_DOWNCLOCK, + PM_ACTION_UPCLOCK +}; + +enum radeon_voltage_type { + VOLTAGE_NONE = 0, + VOLTAGE_GPIO, + VOLTAGE_VDDC, + VOLTAGE_SW +}; + +enum radeon_pm_state_type { + POWER_STATE_TYPE_DEFAULT, + POWER_STATE_TYPE_POWERSAVE, + POWER_STATE_TYPE_BATTERY, + POWER_STATE_TYPE_BALANCED, + POWER_STATE_TYPE_PERFORMANCE, +}; + +enum radeon_pm_clock_mode_type { + POWER_MODE_TYPE_DEFAULT, + POWER_MODE_TYPE_LOW, + POWER_MODE_TYPE_MID, + POWER_MODE_TYPE_HIGH, +}; + +struct radeon_voltage { + enum radeon_voltage_type type; + /* gpio voltage */ + struct radeon_gpio_rec gpio; + u32 delay; /* delay in usec from voltage drop to sclk change */ + bool active_high; /* voltage drop is active when bit is high */ + /* VDDC voltage */ + u8 vddc_id; /* index into vddc voltage table */ + u8 vddci_id; /* index into vddci voltage table */ + bool vddci_enabled; + /* r6xx+ sw */ + u32 voltage; +}; + +struct radeon_pm_non_clock_info { + /* pcie lanes */ + int pcie_lanes; + /* standardized non-clock flags */ + u32 flags; +}; + +struct radeon_pm_clock_info { + /* memory clock */ + u32 mclk; + /* engine clock */ + u32 sclk; + /* voltage info */ + struct radeon_voltage voltage; + /* standardized clock flags - not sure we'll need these */ + u32 flags; +}; + +struct radeon_power_state { + enum radeon_pm_state_type type; + /* XXX: use a define for num clock modes */ + struct radeon_pm_clock_info clock_info[8]; + /* number of valid clock modes in this power state */ + int num_clock_modes; + struct radeon_pm_clock_info *default_clock_mode; + /* non clock info about this state */ + struct radeon_pm_non_clock_info non_clock_info; + bool voltage_drop_active; +}; + +/* + * Some modes are overclocked by very low value, accept them + */ +#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */ + struct radeon_pm { + struct mutex mutex; + struct delayed_work idle_work; + enum radeon_pm_state state; + enum radeon_pm_action planned_action; + unsigned long action_timeout; + bool downclocked; + int active_crtcs; + int req_vblank; fixed20_12 max_bandwidth; fixed20_12 igp_sideport_mclk; fixed20_12 igp_system_mclk; @@ -582,6 +698,15 @@ struct radeon_pm { fixed20_12 core_bandwidth; fixed20_12 sclk; fixed20_12 needed_bandwidth; + /* XXX: use a define for num power modes */ + struct radeon_power_state power_state[8]; + /* number of valid power states */ + int num_power_states; + struct radeon_power_state *current_power_state; + struct radeon_pm_clock_info *current_clock_mode; + struct radeon_power_state *requested_power_state; + struct radeon_pm_clock_info *requested_clock_mode; + struct radeon_power_state *default_power_state; }; @@ -651,6 +776,7 @@ struct radeon_asic { void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); uint32_t (*get_memory_clock)(struct radeon_device *rdev); void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); + int (*get_pcie_lanes)(struct radeon_device *rdev); void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); void (*set_clock_gating)(struct radeon_device *rdev, int enable); int (*set_surface_reg)(struct radeon_device *rdev, int reg, @@ -701,6 +827,9 @@ struct r600_asic { unsigned sx_max_export_pos_size; unsigned sx_max_export_smx_size; unsigned sq_num_cf_insts; + unsigned tiling_nbanks; + unsigned tiling_npipes; + unsigned tiling_group_size; }; struct rv770_asic { @@ -721,6 +850,9 @@ struct rv770_asic { unsigned sc_prim_fifo_size; unsigned sc_hiz_tile_fifo_size; unsigned sc_earlyz_tile_fifo_fize; + unsigned tiling_nbanks; + unsigned tiling_npipes; + unsigned tiling_group_size; }; union radeon_asic_config { @@ -830,6 +962,8 @@ struct radeon_device { struct r600_ih ih; /* r6/700 interrupt ring */ struct workqueue_struct *wq; struct work_struct hotplug_work; + int num_crtc; /* number of crtcs */ + struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ /* audio stuff */ struct timer_list audio_timer; @@ -838,6 +972,8 @@ struct radeon_device { int audio_bits_per_sample; uint8_t audio_status_bits; uint8_t audio_category_code; + + bool powered_down; }; int radeon_device_init(struct radeon_device *rdev, @@ -895,6 +1031,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32 #define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) #define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) #define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) +#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg)) +#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v)) #define WREG32_P(reg, val, mask) \ do { \ uint32_t tmp_ = RREG32(reg); \ @@ -956,7 +1094,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) #define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620)) #define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730)) - +#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR)) /* * BIOS helpers. @@ -1015,6 +1153,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) #define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) +#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev)) #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) @@ -1029,6 +1168,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) /* AGP */ extern void radeon_agp_disable(struct radeon_device *rdev); extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); +extern void radeon_gart_restore(struct radeon_device *rdev); extern int radeon_modeset_init(struct radeon_device *rdev); extern void radeon_modeset_fini(struct radeon_device *rdev); extern bool radeon_card_posted(struct radeon_device *rdev); @@ -1042,6 +1182,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain); extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo); +extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base); +extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); +extern int radeon_resume_kms(struct drm_device *dev); +extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ struct r100_mc_save { @@ -1096,7 +1240,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev); /* r300,r350,rv350,rv370,rv380 */ extern void r300_set_reg_safe(struct radeon_device *rdev); extern void r300_mc_program(struct radeon_device *rdev); -extern void r300_vram_info(struct radeon_device *rdev); +extern void r300_mc_init(struct radeon_device *rdev); extern void r300_clock_startup(struct radeon_device *rdev); extern int r300_mc_wait_for_idle(struct radeon_device *rdev); extern int rv370_pcie_gart_init(struct radeon_device *rdev); @@ -1105,7 +1249,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev); extern void rv370_pcie_gart_disable(struct radeon_device *rdev); /* r420,r423,rv410 */ -extern int r420_mc_init(struct radeon_device *rdev); extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); @@ -1147,13 +1290,13 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode2); /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ +extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); extern bool r600_card_posted(struct radeon_device *rdev); extern void r600_cp_stop(struct radeon_device *rdev); extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); extern int r600_cp_resume(struct radeon_device *rdev); extern void r600_cp_fini(struct radeon_device *rdev); extern int r600_count_pipe_bits(uint32_t val); -extern int r600_gart_clear_page(struct radeon_device *rdev, int i); extern int r600_mc_wait_for_idle(struct radeon_device *rdev); extern int r600_pcie_gart_init(struct radeon_device *rdev); extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); @@ -1189,6 +1332,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder, uint8_t status_bits, uint8_t category_code); +/* evergreen */ +struct evergreen_mc_save { + u32 vga_control[6]; + u32 vga_render_control; + u32 vga_hdp_control; + u32 crtc_control[6]; +}; + #include "radeon_object.h" #endif diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c index c0681a5556d..c4457791dff 100644 --- a/drivers/gpu/drm/radeon/radeon_agp.c +++ b/drivers/gpu/drm/radeon/radeon_agp.c @@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev) rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; + rdev->mc.gtt_start = rdev->mc.agp_base; + rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1; + dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", + rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end); /* workaround some hw issues */ if (rdev->family < CHIP_R200) { diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 05ee1aeac3f..d3a157b2bcb 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); /* - * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 + * r100,rv100,rs100,rv200,rs200 */ extern int r100_init(struct radeon_device *rdev); extern void r100_fini(struct radeon_device *rdev); @@ -108,6 +108,52 @@ static struct radeon_asic r100_asic = { .set_engine_clock = &radeon_legacy_set_engine_clock, .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, + .get_pcie_lanes = NULL, + .set_pcie_lanes = NULL, + .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, +}; + +/* + * r200,rv250,rs300,rv280 + */ +extern int r200_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_pages, + struct radeon_fence *fence); +static struct radeon_asic r200_asic = { + .init = &r100_init, + .fini = &r100_fini, + .suspend = &r100_suspend, + .resume = &r100_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_reset = &r100_gpu_reset, + .gart_tlb_flush = &r100_pci_gart_tlb_flush, + .gart_set_page = &r100_pci_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r100_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r100_fence_ring_emit, + .cs_parse = &r100_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_legacy_get_engine_clock, + .set_engine_clock = &radeon_legacy_set_engine_clock, + .get_memory_clock = &radeon_legacy_get_memory_clock, + .set_memory_clock = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, .set_surface_reg = r100_set_surface_reg, @@ -138,11 +184,8 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); -extern int r300_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence); +extern int rv370_get_pcie_lanes(struct radeon_device *rdev); + static struct radeon_asic r300_asic = { .init = &r300_init, .fini = &r300_fini, @@ -162,7 +205,46 @@ static struct radeon_asic r300_asic = { .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, - .copy_dma = &r300_copy_dma, + .copy_dma = &r200_copy_dma, + .copy = &r100_copy_blit, + .get_engine_clock = &radeon_legacy_get_engine_clock, + .set_engine_clock = &radeon_legacy_set_engine_clock, + .get_memory_clock = &radeon_legacy_get_memory_clock, + .set_memory_clock = NULL, + .get_pcie_lanes = &rv370_get_pcie_lanes, + .set_pcie_lanes = &rv370_set_pcie_lanes, + .set_clock_gating = &radeon_legacy_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, + .bandwidth_update = &r100_bandwidth_update, + .hpd_init = &r100_hpd_init, + .hpd_fini = &r100_hpd_fini, + .hpd_sense = &r100_hpd_sense, + .hpd_set_polarity = &r100_hpd_set_polarity, + .ioctl_wait_idle = NULL, +}; + + +static struct radeon_asic r300_asic_pcie = { + .init = &r300_init, + .fini = &r300_fini, + .suspend = &r300_suspend, + .resume = &r300_resume, + .vga_set_state = &r100_vga_set_state, + .gpu_reset = &r300_gpu_reset, + .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, + .gart_set_page = &rv370_pcie_gart_set_page, + .cp_commit = &r100_cp_commit, + .ring_start = &r300_ring_start, + .ring_test = &r100_ring_test, + .ring_ib_execute = &r100_ring_ib_execute, + .irq_set = &r100_irq_set, + .irq_process = &r100_irq_process, + .get_vblank_counter = &r100_get_vblank_counter, + .fence_ring_emit = &r300_fence_ring_emit, + .cs_parse = &r300_cs_parse, + .copy_blit = &r100_copy_blit, + .copy_dma = &r200_copy_dma, .copy = &r100_copy_blit, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, @@ -206,12 +288,13 @@ static struct radeon_asic r420_asic = { .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, - .copy_dma = &r300_copy_dma, + .copy_dma = &r200_copy_dma, .copy = &r100_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, .set_surface_reg = r100_set_surface_reg, @@ -255,12 +338,13 @@ static struct radeon_asic rs400_asic = { .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, - .copy_dma = &r300_copy_dma, + .copy_dma = &r200_copy_dma, .copy = &r100_copy_blit, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, + .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, .set_surface_reg = r100_set_surface_reg, @@ -314,14 +398,17 @@ static struct radeon_asic rs600_asic = { .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, - .copy_dma = &r300_copy_dma, + .copy_dma = &r200_copy_dma, .copy = &r100_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, + .set_surface_reg = r100_set_surface_reg, + .clear_surface_reg = r100_clear_surface_reg, .bandwidth_update = &rs600_bandwidth_update, .hpd_init = &rs600_hpd_init, .hpd_fini = &rs600_hpd_fini, @@ -360,12 +447,13 @@ static struct radeon_asic rs690_asic = { .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, - .copy_dma = &r300_copy_dma, - .copy = &r300_copy_dma, + .copy_dma = &r200_copy_dma, + .copy = &r200_copy_dma, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, .set_surface_reg = r100_set_surface_reg, @@ -412,12 +500,13 @@ static struct radeon_asic rv515_asic = { .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, - .copy_dma = &r300_copy_dma, + .copy_dma = &r200_copy_dma, .copy = &r100_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, .set_surface_reg = r100_set_surface_reg, @@ -455,12 +544,13 @@ static struct radeon_asic r520_asic = { .fence_ring_emit = &r300_fence_ring_emit, .cs_parse = &r300_cs_parse, .copy_blit = &r100_copy_blit, - .copy_dma = &r300_copy_dma, + .copy_dma = &r200_copy_dma, .copy = &r100_copy_blit, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, .set_surface_reg = r100_set_surface_reg, @@ -538,8 +628,9 @@ static struct radeon_asic r600_asic = { .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = NULL, - .set_clock_gating = &radeon_atom_set_clock_gating, + .set_clock_gating = NULL, .set_surface_reg = r600_set_surface_reg, .clear_surface_reg = r600_clear_surface_reg, .bandwidth_update = &rv515_bandwidth_update, @@ -583,6 +674,7 @@ static struct radeon_asic rv770_asic = { .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, + .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, .set_surface_reg = r600_set_surface_reg, @@ -595,4 +687,54 @@ static struct radeon_asic rv770_asic = { .ioctl_wait_idle = r600_ioctl_wait_idle, }; +/* + * evergreen + */ +int evergreen_init(struct radeon_device *rdev); +void evergreen_fini(struct radeon_device *rdev); +int evergreen_suspend(struct radeon_device *rdev); +int evergreen_resume(struct radeon_device *rdev); +int evergreen_gpu_reset(struct radeon_device *rdev); +void evergreen_bandwidth_update(struct radeon_device *rdev); +void evergreen_hpd_init(struct radeon_device *rdev); +void evergreen_hpd_fini(struct radeon_device *rdev); +bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); +void evergreen_hpd_set_polarity(struct radeon_device *rdev, + enum radeon_hpd_id hpd); + +static struct radeon_asic evergreen_asic = { + .init = &evergreen_init, + .fini = &evergreen_fini, + .suspend = &evergreen_suspend, + .resume = &evergreen_resume, + .cp_commit = NULL, + .gpu_reset = &evergreen_gpu_reset, + .vga_set_state = &r600_vga_set_state, + .gart_tlb_flush = &r600_pcie_gart_tlb_flush, + .gart_set_page = &rs600_gart_set_page, + .ring_test = NULL, + .ring_ib_execute = NULL, + .irq_set = NULL, + .irq_process = NULL, + .get_vblank_counter = NULL, + .fence_ring_emit = NULL, + .cs_parse = NULL, + .copy_blit = NULL, + .copy_dma = NULL, + .copy = NULL, + .get_engine_clock = &radeon_atom_get_engine_clock, + .set_engine_clock = &radeon_atom_set_engine_clock, + .get_memory_clock = &radeon_atom_get_memory_clock, + .set_memory_clock = &radeon_atom_set_memory_clock, + .set_pcie_lanes = NULL, + .set_clock_gating = NULL, + .set_surface_reg = r600_set_surface_reg, + .clear_surface_reg = r600_clear_surface_reg, + .bandwidth_update = &evergreen_bandwidth_update, + .hpd_init = &evergreen_hpd_init, + .hpd_fini = &evergreen_hpd_fini, + .hpd_sense = &evergreen_hpd_sense, + .hpd_set_polarity = &evergreen_hpd_set_polarity, +}; + #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 4d8831548a5..93783b15c81 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device struct radeon_gpio_rec *gpio) { struct radeon_hpd hpd; + u32 reg; + + if (ASIC_IS_DCE4(rdev)) + reg = EVERGREEN_DC_GPIO_HPD_A; + else + reg = AVIVO_DC_GPIO_HPD_A; + hpd.gpio = *gpio; - if (gpio->reg == AVIVO_DC_GPIO_HPD_A) { + if (gpio->reg == reg) { switch(gpio->mask) { case (1 << 0): hpd.hpd = RADEON_HPD_1; @@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ddc_bus.valid = false; } + /* needed for aux chan transactions */ + ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0; + conn_id = le16_to_cpu(path->usConnObjectId); if (!radeon_atom_apply_quirks @@ -838,6 +848,7 @@ union firmware_info { ATOM_FIRMWARE_INFO_V1_2 info_12; ATOM_FIRMWARE_INFO_V1_3 info_13; ATOM_FIRMWARE_INFO_V1_4 info_14; + ATOM_FIRMWARE_INFO_V2_1 info_21; }; bool radeon_atom_get_clock_info(struct drm_device *dev) @@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) uint8_t frev, crev; struct radeon_pll *p1pll = &rdev->clock.p1pll; struct radeon_pll *p2pll = &rdev->clock.p2pll; + struct radeon_pll *dcpll = &rdev->clock.dcpll; struct radeon_pll *spll = &rdev->clock.spll; struct radeon_pll *mpll = &rdev->clock.mpll; uint16_t data_offset; @@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) rdev->clock.default_mclk = le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); + if (ASIC_IS_DCE4(rdev)) { + rdev->clock.default_dispclk = + le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); + if (rdev->clock.default_dispclk == 0) + rdev->clock.default_dispclk = 60000; /* 600 Mhz */ + rdev->clock.dp_extclk = + le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); + } + *dcpll = *p1pll; + return true; } + return false; } @@ -1091,6 +1114,30 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct return ss; } +static void radeon_atom_apply_lvds_quirks(struct drm_device *dev, + struct radeon_encoder_atom_dig *lvds) +{ + + /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */ + if ((dev->pdev->device == 0x95c4) && + (dev->pdev->subsystem_vendor == 0x1179) && + (dev->pdev->subsystem_device == 0xff50)) { + if ((lvds->native_mode.hdisplay == 1280) && + (lvds->native_mode.vdisplay == 800)) + lvds->pll_algo = PLL_ALGO_LEGACY; + } + + /* Dell Studio 15 laptop panel doesn't like new pll divider algo */ + if ((dev->pdev->device == 0x95c4) && + (dev->pdev->subsystem_vendor == 0x1028) && + (dev->pdev->subsystem_device == 0x029f)) { + if ((lvds->native_mode.hdisplay == 1280) && + (lvds->native_mode.vdisplay == 800)) + lvds->pll_algo = PLL_ALGO_LEGACY; + } + +} + union lvds_info { struct _ATOM_LVDS_INFO info; struct _ATOM_LVDS_INFO_V12 info_12; @@ -1161,6 +1208,21 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id); + if (ASIC_IS_AVIVO(rdev)) { + if (radeon_new_pll == 0) + lvds->pll_algo = PLL_ALGO_LEGACY; + else + lvds->pll_algo = PLL_ALGO_NEW; + } else { + if (radeon_new_pll == 1) + lvds->pll_algo = PLL_ALGO_NEW; + else + lvds->pll_algo = PLL_ALGO_LEGACY; + } + + /* LVDS quirks */ + radeon_atom_apply_lvds_quirks(dev, lvds); + encoder->native_mode = lvds->native_mode; } return lvds; @@ -1385,20 +1447,375 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) return tv_dac; } -void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE info_4; +}; + +void radeon_atombios_get_power_modes(struct radeon_device *rdev) { - DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); + struct radeon_mode_info *mode_info = &rdev->mode_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u32 misc, misc2 = 0, sclk, mclk; + union power_info *power_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + struct _ATOM_PPLIB_STATE *power_state; + int num_modes = 0, i, j; + int state_index = 0, mode_index = 0; - args.ucEnable = enable; + atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + rdev->pm.default_power_state = NULL; + + if (power_info) { + if (frev < 4) { + num_modes = power_info->info.ucNumOfPowerModeEntries; + if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) + num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + for (i = 0; i < num_modes; i++) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + switch (frev) { + case 1: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + /* skip overclock modes for now */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk > + rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || + (rdev->pm.power_state[state_index].clock_info[0].sclk > + rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) + continue; + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + power_info->info.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; + } + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_POWERSAVE; + if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + } + state_index++; + break; + case 2: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + /* skip overclock modes for now */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk > + rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || + (rdev->pm.power_state[state_index].clock_info[0].sclk > + rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) + continue; + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; + } + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_POWERSAVE; + if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + } + state_index++; + break; + case 3: + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = + le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); + rdev->pm.power_state[state_index].clock_info[0].sclk = + le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + continue; + /* skip overclock modes for now */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk > + rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || + (rdev->pm.power_state[state_index].clock_info[0].sclk > + rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) + continue; + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; + misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); + misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_GPIO; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = + radeon_lookup_gpio(rdev, + power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); + if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = + VOLTAGE_VDDC; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = + power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; + if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { + rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = + true; + rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = + power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; + } + } + /* order matters! */ + if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_POWERSAVE; + if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + } + state_index++; + break; + } + } + } else if (frev == 4) { + for (i = 0; i < power_info->info_4.ucNumStates; i++) { + mode_index = 0; + power_state = (struct _ATOM_PPLIB_STATE *) + (mode_info->atom_context->bios + + data_offset + + le16_to_cpu(power_info->info_4.usStateArrayOffset) + + i * power_info->info_4.ucStateEntrySize); + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + (mode_info->atom_context->bios + + data_offset + + le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) + + (power_state->ucNonClockStateIndex * + power_info->info_4.ucNonClockSize)); + for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) { + if (rdev->flags & RADEON_IS_IGP) { + struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info = + (struct _ATOM_PPLIB_RS780_CLOCK_INFO *) + (mode_info->atom_context->bios + + data_offset + + le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + + (power_state->ucClockStateIndices[j] * + power_info->info_4.ucClockInfoSize)); + sclk = le16_to_cpu(clock_info->usLowEngineClockLow); + sclk |= clock_info->ucLowEngineClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + /* skip invalid modes */ + if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) + continue; + /* skip overclock modes for now */ + if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > + rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN) + continue; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->usVDDC; + mode_index++; + } else { + struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info = + (struct _ATOM_PPLIB_R600_CLOCK_INFO *) + (mode_info->atom_context->bios + + data_offset + + le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) + + (power_state->ucClockStateIndices[j] * + power_info->info_4.ucClockInfoSize)); + sclk = le16_to_cpu(clock_info->usEngineClockLow); + sclk |= clock_info->ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->usMemoryClockLow); + mclk |= clock_info->ucMemoryClockHigh << 16; + rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; + rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; + /* skip invalid modes */ + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) + continue; + /* skip overclock modes for now */ + if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk > + rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || + (rdev->pm.power_state[state_index].clock_info[mode_index].sclk > + rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) + continue; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = + VOLTAGE_SW; + rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = + clock_info->usVDDC; + mode_index++; + } + } + rdev->pm.power_state[state_index].num_clock_modes = mode_index; + if (mode_index) { + misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); + misc2 = le16_to_cpu(non_clock_info->usClassification); + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> + ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { + case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BALANCED; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_PERFORMANCE; + break; + } + if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; + } + state_index++; + } + } + } + } else { + /* XXX figure out some good default low power mode for cards w/out power tables */ + } + + if (rdev->pm.default_power_state == NULL) { + /* add the default mode */ + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; + rdev->pm.power_state[state_index].default_clock_mode = + &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + if (rdev->asic->get_pcie_lanes) + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); + else + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; + rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + state_index++; + } + rdev->pm.num_power_states = state_index; + + rdev->pm.current_power_state = rdev->pm.default_power_state; + rdev->pm.current_clock_mode = + rdev->pm.default_power_state->default_clock_mode; } -void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable) +void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) { - ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args; - int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt); + DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; + int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); args.ucEnable = enable; diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c new file mode 100644 index 00000000000..3f557c4151e --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -0,0 +1,257 @@ +/* + * Copyright (c) 2010 Red Hat Inc. + * Author : Dave Airlie <airlied@redhat.com> + * + * Licensed under GPLv2 + * + * ATPX support for both Intel/ATI + */ +#include <linux/vga_switcheroo.h> +#include <acpi/acpi.h> +#include <acpi/acpi_bus.h> +#include <linux/pci.h> + +#define ATPX_VERSION 0 +#define ATPX_GPU_PWR 2 +#define ATPX_MUX_SELECT 3 + +#define ATPX_INTEGRATED 0 +#define ATPX_DISCRETE 1 + +#define ATPX_MUX_IGD 0 +#define ATPX_MUX_DISCRETE 1 + +static struct radeon_atpx_priv { + bool atpx_detected; + /* handle for device - and atpx */ + acpi_handle dhandle; + acpi_handle atpx_handle; + acpi_handle atrm_handle; +} radeon_atpx_priv; + +/* retrieve the ROM in 4k blocks */ +static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, + int offset, int len) +{ + acpi_status status; + union acpi_object atrm_arg_elements[2], *obj; + struct acpi_object_list atrm_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + atrm_arg.count = 2; + atrm_arg.pointer = &atrm_arg_elements[0]; + + atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[0].integer.value = offset; + + atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[1].integer.value = len; + + status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); + return -ENODEV; + } + + obj = (union acpi_object *)buffer.pointer; + memcpy(bios+offset, obj->buffer.pointer, len); + kfree(buffer.pointer); + return len; +} + +bool radeon_atrm_supported(struct pci_dev *pdev) +{ + /* get the discrete ROM only via ATRM */ + if (!radeon_atpx_priv.atpx_detected) + return false; + + if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + return false; + return true; +} + + +int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) +{ + return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len); +} + +static int radeon_atpx_get_version(acpi_handle handle) +{ + acpi_status status; + union acpi_object atpx_arg_elements[2], *obj; + struct acpi_object_list atpx_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + + atpx_arg.count = 2; + atpx_arg.pointer = &atpx_arg_elements[0]; + + atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; + atpx_arg_elements[0].integer.value = ATPX_VERSION; + + atpx_arg_elements[1].type = ACPI_TYPE_INTEGER; + atpx_arg_elements[1].integer.value = ATPX_VERSION; + + status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); + return -ENOSYS; + } + obj = (union acpi_object *)buffer.pointer; + if (obj && (obj->type == ACPI_TYPE_BUFFER)) + printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2)); + kfree(buffer.pointer); + return 0; +} + +static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value) +{ + acpi_status status; + union acpi_object atpx_arg_elements[2]; + struct acpi_object_list atpx_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + uint8_t buf[4] = {0}; + + if (!handle) + return -EINVAL; + + atpx_arg.count = 2; + atpx_arg.pointer = &atpx_arg_elements[0]; + + atpx_arg_elements[0].type = ACPI_TYPE_INTEGER; + atpx_arg_elements[0].integer.value = cmd_id; + + buf[2] = value & 0xff; + buf[3] = (value >> 8) & 0xff; + + atpx_arg_elements[1].type = ACPI_TYPE_BUFFER; + atpx_arg_elements[1].buffer.length = 4; + atpx_arg_elements[1].buffer.pointer = buf; + + status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status)); + return -ENOSYS; + } + kfree(buffer.pointer); + + return 0; +} + +static int radeon_atpx_set_discrete_state(acpi_handle handle, int state) +{ + return radeon_atpx_execute(handle, ATPX_GPU_PWR, state); +} + +static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id) +{ + return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id); +} + + +static int radeon_atpx_switchto(enum vga_switcheroo_client_id id) +{ + if (id == VGA_SWITCHEROO_IGD) + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0); + else + radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1); + return 0; +} + +static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + /* on w500 ACPI can't change intel gpu state */ + if (id == VGA_SWITCHEROO_IGD) + return 0; + + radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state); + return 0; +} + +static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) +{ + acpi_handle dhandle, atpx_handle, atrm_handle; + acpi_status status; + + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + return false; + + status = acpi_get_handle(dhandle, "ATPX", &atpx_handle); + if (ACPI_FAILURE(status)) + return false; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (ACPI_FAILURE(status)) + return false; + + radeon_atpx_priv.dhandle = dhandle; + radeon_atpx_priv.atpx_handle = atpx_handle; + radeon_atpx_priv.atrm_handle = atrm_handle; + return true; +} + +static int radeon_atpx_init(void) +{ + /* set up the ATPX handle */ + + radeon_atpx_get_version(radeon_atpx_priv.atpx_handle); + return 0; +} + +static int radeon_atpx_get_client_id(struct pci_dev *pdev) +{ + if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler radeon_atpx_handler = { + .switchto = radeon_atpx_switchto, + .power_state = radeon_atpx_power_state, + .init = radeon_atpx_init, + .get_client_id = radeon_atpx_get_client_id, +}; + +static bool radeon_atpx_detect(void) +{ + char acpi_method_name[255] = { 0 }; + struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; + struct pci_dev *pdev = NULL; + bool has_atpx = false; + int vga_count = 0; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + vga_count++; + + has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true); + } + + if (has_atpx && vga_count == 2) { + acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer); + printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n", + acpi_method_name); + radeon_atpx_priv.atpx_detected = true; + return true; + } + return false; +} + +void radeon_register_atpx_handler(void) +{ + bool r; + + /* detect if we have any ATPX + 2 VGA in the system */ + r = radeon_atpx_detect(); + if (!r) + return; + + vga_switcheroo_register_handler(&radeon_atpx_handler); +} + +void radeon_unregister_atpx_handler(void) +{ + vga_switcheroo_unregister_handler(); +} diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 906921740c6..55724046052 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -30,6 +30,7 @@ #include "radeon.h" #include "atom.h" +#include <linux/vga_switcheroo.h> /* * BIOS. */ @@ -62,7 +63,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) iounmap(bios); return false; } - memcpy(rdev->bios, bios, size); + memcpy_fromio(rdev->bios, bios, size); iounmap(bios); return true; } @@ -93,6 +94,38 @@ static bool radeon_read_bios(struct radeon_device *rdev) return true; } +/* ATRM is used to get the BIOS on the discrete cards in + * dual-gpu systems. + */ +static bool radeon_atrm_get_bios(struct radeon_device *rdev) +{ + int ret; + int size = 64 * 1024; + int i; + + if (!radeon_atrm_supported(rdev->pdev)) + return false; + + rdev->bios = kmalloc(size, GFP_KERNEL); + if (!rdev->bios) { + DRM_ERROR("Unable to allocate bios\n"); + return false; + } + + for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { + ret = radeon_atrm_get_bios_chunk(rdev->bios, + (i * ATRM_BIOS_PAGE), + ATRM_BIOS_PAGE); + if (ret <= 0) + break; + } + + if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { + kfree(rdev->bios); + return false; + } + return true; +} static bool r700_read_disabled_bios(struct radeon_device *rdev) { uint32_t viph_control; @@ -388,16 +421,16 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) return legacy_read_disabled_bios(rdev); } + bool radeon_get_bios(struct radeon_device *rdev) { bool r; uint16_t tmp; - if (rdev->flags & RADEON_IS_IGP) { + r = radeon_atrm_get_bios(rdev); + if (r == false) r = igp_read_bios_from_vram(rdev); - if (r == false) - r = radeon_read_bios(rdev); - } else + if (r == false) r = radeon_read_bios(rdev); if (r == false) { r = radeon_read_disabled_bios(rdev); @@ -408,6 +441,13 @@ bool radeon_get_bios(struct radeon_device *rdev) return false; } if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) { + printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]); + goto free_bios; + } + + tmp = RBIOS16(0x18); + if (RBIOS8(tmp + 0x14) != 0x0) { + DRM_INFO("Not an x86 BIOS ROM, not using.\n"); goto free_bios; } diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index 73c4405bf42..f64936cc4dd 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c @@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev) struct radeon_device *rdev = dev->dev_private; struct radeon_pll *p1pll = &rdev->clock.p1pll; struct radeon_pll *p2pll = &rdev->clock.p2pll; + struct radeon_pll *dcpll = &rdev->clock.dcpll; struct radeon_pll *spll = &rdev->clock.spll; struct radeon_pll *mpll = &rdev->clock.mpll; int ret; @@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev) p2pll->max_frac_feedback_div = 0; } + /* dcpll is DCE4 only */ + dcpll->min_post_div = 2; + dcpll->max_post_div = 0x7f; + dcpll->min_frac_feedback_div = 0; + dcpll->max_frac_feedback_div = 9; + dcpll->min_ref_div = 2; + dcpll->max_ref_div = 0x3ff; + dcpll->min_feedback_div = 4; + dcpll->max_feedback_div = 0xfff; + dcpll->best_vco = 0; + p1pll->min_ref_div = 2; p1pll->max_ref_div = 0x3ff; p1pll->min_feedback_div = 4; @@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev) /* XXX make sure engine is idle */ if (radeon_dynclks != -1) { - if (radeon_dynclks) - radeon_set_clock_gating(rdev, 1); + if (radeon_dynclks) { + if (rdev->asic->set_clock_gating) + radeon_set_clock_gating(rdev, 1); + } } radeon_apply_clock_quirks(rdev); return 0; diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 22d476160d5..e9ea38ece37 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, int rev; uint16_t offset = 0, check_offset; + if (!rdev->bios) + return 0; + switch (table) { /* absolute offset tables */ case COMBIOS_ASIC_INIT_1_TABLE: @@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, } +bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) +{ + int edid_info; + struct edid *edid; + edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); + if (!edid_info) + return false; + + edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1), + GFP_KERNEL); + if (edid == NULL) + return false; + + memcpy((unsigned char *)edid, + (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH); + + if (!drm_edid_is_valid(edid)) { + kfree(edid); + return false; + } + + rdev->mode_info.bios_hardcoded_edid = edid; + return true; +} + +struct edid * +radeon_combios_get_hardcoded_edid(struct radeon_device *rdev) +{ + if (rdev->mode_info.bios_hardcoded_edid) + return rdev->mode_info.bios_hardcoded_edid; + return NULL; +} + static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev, int ddc_line) { @@ -486,9 +522,65 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde i2c.y_data_reg = ddc_line; } - if (rdev->family < CHIP_R200) - i2c.hw_capable = false; - else { + switch (rdev->family) { + case CHIP_R100: + case CHIP_RV100: + case CHIP_RS100: + case CHIP_RV200: + case CHIP_RS200: + case CHIP_RS300: + switch (ddc_line) { + case RADEON_GPIO_DVI_DDC: + /* in theory this should be hw capable, + * but it doesn't seem to work + */ + i2c.hw_capable = false; + break; + default: + i2c.hw_capable = false; + break; + } + break; + case CHIP_R200: + switch (ddc_line) { + case RADEON_GPIO_DVI_DDC: + case RADEON_GPIO_MONID: + i2c.hw_capable = true; + break; + default: + i2c.hw_capable = false; + break; + } + break; + case CHIP_RV250: + case CHIP_RV280: + switch (ddc_line) { + case RADEON_GPIO_VGA_DDC: + case RADEON_GPIO_DVI_DDC: + case RADEON_GPIO_CRT2_DDC: + i2c.hw_capable = true; + break; + default: + i2c.hw_capable = false; + break; + } + break; + case CHIP_R300: + case CHIP_R350: + switch (ddc_line) { + case RADEON_GPIO_VGA_DDC: + case RADEON_GPIO_DVI_DDC: + i2c.hw_capable = true; + break; + default: + i2c.hw_capable = false; + break; + } + break; + case CHIP_RV350: + case CHIP_RV380: + case CHIP_RS400: + case CHIP_RS480: switch (ddc_line) { case RADEON_GPIO_VGA_DDC: case RADEON_GPIO_DVI_DDC: @@ -504,9 +596,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde i2c.hw_capable = false; break; } + break; + default: + i2c.hw_capable = false; + break; } i2c.mm_i2c = false; i2c.i2c_id = 0; + i2c.hpd_id = 0; if (ddc_line) i2c.valid = true; @@ -527,9 +624,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev) int8_t rev; uint16_t sclk, mclk; - if (rdev->bios == NULL) - return false; - pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); if (pll_info) { rev = RBIOS8(pll_info); @@ -654,9 +748,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct if (!p_dac) return NULL; - if (rdev->bios == NULL) - goto out; - /* check CRT table */ dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); if (dac_info) { @@ -673,7 +764,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct found = 1; } -out: if (!found) /* fallback to defaults */ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac); @@ -687,9 +777,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev) uint16_t tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; - if (rdev->bios == NULL) - return tv_std; - tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); if (tv_info) { if (RBIOS8(tv_info + 6) == 'T') { @@ -793,9 +880,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct if (!tv_dac) return NULL; - if (rdev->bios == NULL) - goto out; - /* first check TV table */ dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); if (dac_info) { @@ -857,7 +941,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct } } -out: if (!found) /* fallback to defaults */ radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); @@ -945,11 +1028,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder int tmp, i; struct radeon_encoder_lvds *lvds = NULL; - if (rdev->bios == NULL) { - lvds = radeon_legacy_get_lvds_info_from_regs(rdev); - goto out; - } - lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); if (lcd_info) { @@ -1050,7 +1128,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder DRM_INFO("No panel info found in BIOS\n"); lvds = radeon_legacy_get_lvds_info_from_regs(rdev); } -out: + if (lvds) encoder->native_mode = lvds->native_mode; return lvds; @@ -1102,9 +1180,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, int i, n; uint8_t ver; - if (rdev->bios == NULL) - return false; - tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); if (tmds_info) { @@ -1184,9 +1259,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder enum radeon_combios_ddc gpio; struct radeon_i2c_bus_rec i2c_bus; - if (rdev->bios == NULL) - return false; - tmds->i2c_bus = NULL; if (rdev->flags & RADEON_IS_IGP) { offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); @@ -1253,7 +1325,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); break; case DDC_LCD: /* MM i2c */ - DRM_ERROR("MM i2c requires hw i2c engine\n"); + i2c_bus.valid = true; + i2c_bus.hw_capable = true; + i2c_bus.mm_i2c = true; + tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO"); break; default: DRM_ERROR("Unsupported gpio %d\n", gpio); @@ -1909,9 +1984,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) struct radeon_i2c_bus_rec ddc_i2c; struct radeon_hpd hpd; - if (rdev->bios == NULL) - return false; - conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE); if (conn_info) { for (i = 0; i < 4; i++) { @@ -2278,6 +2350,115 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) return true; } +void radeon_combios_get_power_modes(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + u16 offset, misc, misc2 = 0; + u8 rev, blocks, tmp; + int state_index = 0; + + rdev->pm.default_power_state = NULL; + + if (rdev->flags & RADEON_IS_MOBILITY) { + offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE); + if (offset) { + rev = RBIOS8(offset); + blocks = RBIOS8(offset + 0x2); + /* power mode 0 tends to be the only valid one */ + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2); + rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6); + if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || + (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) + goto default_mode; + /* skip overclock modes for now */ + if ((rdev->pm.power_state[state_index].clock_info[0].mclk > + rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) || + (rdev->pm.power_state[state_index].clock_info[0].sclk > + rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)) + goto default_mode; + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_BATTERY; + misc = RBIOS16(offset + 0x5 + 0x0); + if (rev > 4) + misc2 = RBIOS16(offset + 0x5 + 0xe); + if (misc & 0x4) { + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; + if (misc & 0x8) + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + true; + else + rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = + false; + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true; + if (rev < 6) { + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg = + RBIOS16(offset + 0x5 + 0xb) * 4; + tmp = RBIOS8(offset + 0x5 + 0xd); + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp); + } else { + u8 entries = RBIOS8(offset + 0x5 + 0xb); + u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc); + if (entries && voltage_table_offset) { + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg = + RBIOS16(voltage_table_offset) * 4; + tmp = RBIOS8(voltage_table_offset + 0x2); + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp); + } else + rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false; + } + switch ((misc2 & 0x700) >> 8) { + case 0: + default: + rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0; + break; + case 1: + rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33; + break; + case 2: + rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66; + break; + case 3: + rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99; + break; + case 4: + rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132; + break; + } + } else + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + if (rev > 6) + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = + RBIOS8(offset + 0x5 + 0x10); + state_index++; + } else { + /* XXX figure out some good default low power mode for mobility cards w/out power tables */ + } + } else { + /* XXX figure out some good default low power mode for desktop cards */ + } + +default_mode: + /* add the default mode */ + rdev->pm.power_state[state_index].type = + POWER_STATE_TYPE_DEFAULT; + rdev->pm.power_state[state_index].num_clock_modes = 1; + rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; + rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; + rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; + rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; + if (rdev->asic->get_pcie_lanes) + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev); + else + rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16; + rdev->pm.default_power_state = &rdev->pm.power_state[state_index]; + rdev->pm.num_power_states = state_index + 1; + + rdev->pm.current_power_state = rdev->pm.default_power_state; + rdev->pm.current_clock_mode = + rdev->pm.default_power_state->default_clock_mode; +} + void radeon_external_tmds_setup(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -2289,23 +2470,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder) switch (tmds->dvo_chip) { case DVO_SIL164: /* sil 164 */ - radeon_i2c_do_lock(tmds->i2c_bus, 1); - radeon_i2c_sw_put_byte(tmds->i2c_bus, - tmds->slave_addr, - 0x08, 0x30); - radeon_i2c_sw_put_byte(tmds->i2c_bus, + radeon_i2c_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x08, 0x30); + radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, 0x09, 0x00); - radeon_i2c_sw_put_byte(tmds->i2c_bus, - tmds->slave_addr, - 0x0a, 0x90); - radeon_i2c_sw_put_byte(tmds->i2c_bus, - tmds->slave_addr, - 0x0c, 0x89); - radeon_i2c_sw_put_byte(tmds->i2c_bus, + radeon_i2c_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x0a, 0x90); + radeon_i2c_put_byte(tmds->i2c_bus, + tmds->slave_addr, + 0x0c, 0x89); + radeon_i2c_put_byte(tmds->i2c_bus, tmds->slave_addr, 0x08, 0x3b); - radeon_i2c_do_lock(tmds->i2c_bus, 0); break; case DVO_SIL1178: /* sil 1178 - untested */ @@ -2338,9 +2517,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) uint32_t reg, val, and_mask, or_mask; struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; - if (rdev->bios == NULL) - return false; - if (!tmds) return false; @@ -2390,11 +2566,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) index++; val = RBIOS8(index); index++; - radeon_i2c_do_lock(tmds->i2c_bus, 1); - radeon_i2c_sw_put_byte(tmds->i2c_bus, - slave_addr, - reg, val); - radeon_i2c_do_lock(tmds->i2c_bus, 0); + radeon_i2c_put_byte(tmds->i2c_bus, + slave_addr, + reg, val); break; default: DRM_ERROR("Unknown id %d\n", id >> 13); @@ -2447,11 +2621,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder) reg = id & 0x1fff; val = RBIOS8(index); index += 1; - radeon_i2c_do_lock(tmds->i2c_bus, 1); - radeon_i2c_sw_put_byte(tmds->i2c_bus, - tmds->slave_addr, - reg, val); - radeon_i2c_do_lock(tmds->i2c_bus, 0); + radeon_i2c_put_byte(tmds->i2c_bus, + tmds->slave_addr, + reg, val); break; default: DRM_ERROR("Unknown id %d\n", id >> 13); diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 65f81942f39..ee0083f982d 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec ret = connector_status_connected; else { if (radeon_connector->ddc_bus) { - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (radeon_connector->edid) ret = connector_status_connected; } @@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect if (!encoder) ret = connector_status_disconnected; - if (radeon_connector->ddc_bus) { - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); + if (radeon_connector->ddc_bus) dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); - } if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", @@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect enum drm_connector_status ret = connector_status_disconnected; bool dret = false; - if (radeon_connector->ddc_bus) { - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); + if (radeon_connector->ddc_bus) dret = radeon_ddc_probe(radeon_connector); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); - } if (dret) { if (radeon_connector->edid) { kfree(radeon_connector->edid); radeon_connector->edid = NULL; } - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (!radeon_connector->edid) { DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", @@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector) if (radeon_connector->edid) kfree(radeon_connector->edid); if (radeon_dig_connector->dp_i2c_bus) - radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus); + radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus); kfree(radeon_connector->con_priv); drm_sysfs_connector_remove(connector); drm_connector_cleanup(connector); @@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto ret = connector_status_connected; } } else { - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); if (radeon_ddc_probe(radeon_connector)) { radeon_dig_connector->dp_sink_type = sink_type; ret = connector_status_connected; } - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); } return ret; diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index 06123ba31d3..dc6eba6b96d 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -1644,6 +1644,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri radeon_cp_load_microcode(dev_priv); radeon_cp_init_ring_buffer(dev, dev_priv, file_priv); + dev_priv->have_z_offset = 0; radeon_do_engine_reset(dev); radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index e9d085021c1..70ba02ed772 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) } radeon_bo_list_unreserve(&parser->validated); for (i = 0; i < parser->nrelocs; i++) { - if (parser->relocs[i].gobj) { - mutex_lock(&parser->rdev->ddev->struct_mutex); - drm_gem_object_unreference(parser->relocs[i].gobj); - mutex_unlock(&parser->rdev->ddev->struct_mutex); - } + if (parser->relocs[i].gobj) + drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); } kfree(parser->track); kfree(parser->relocs); diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 28772a37009..b7023fff89e 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock) struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); uint32_t cur_lock; - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset); + if (lock) + cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK; + else + cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK; + WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock); + } else if (ASIC_IS_AVIVO(rdev)) { cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset); if (lock) cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK; @@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc) struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); + WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); } else { @@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc) struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); + WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | + EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | - (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); + (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); } else { switch (radeon_crtc->crtc_id) { case 0: @@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct radeon_device *rdev = crtc->dev->dev_private; - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr); + } else if (ASIC_IS_AVIVO(rdev)) { if (rdev->family >= CHIP_RV770) { if (radeon_crtc->crtc_id) WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0); @@ -169,17 +186,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, unpin: if (radeon_crtc->cursor_bo) { radeon_gem_object_unpin(radeon_crtc->cursor_bo); - mutex_lock(&crtc->dev->struct_mutex); - drm_gem_object_unreference(radeon_crtc->cursor_bo); - mutex_unlock(&crtc->dev->struct_mutex); + drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo); } radeon_crtc->cursor_bo = obj; return 0; fail: - mutex_lock(&crtc->dev->struct_mutex); - drm_gem_object_unreference(obj); - mutex_unlock(&crtc->dev->struct_mutex); + drm_gem_object_unreference_unlocked(obj); return 0; } @@ -201,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, yorigin = CURSOR_HEIGHT - 1; radeon_lock_cursor(crtc, true); - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + /* cursors are offset into the total surface */ + x += crtc->x; + y += crtc->y; + DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); + + /* XXX: check if evergreen has the same issues as avivo chips */ + WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, + ((xorigin ? 0 : x) << 16) | + (yorigin ? 0 : y)); + WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); + WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, + ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1)); + } else if (ASIC_IS_AVIVO(rdev)) { int w = radeon_crtc->cursor_width; int i = 0; struct drm_crtc *crtc_p; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 768b1509fa0..e28e4ed5f72 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -30,6 +30,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> #include <linux/vgaarb.h> +#include <linux/vga_switcheroo.h> #include "radeon_reg.h" #include "radeon.h" #include "radeon_asic.h" @@ -100,80 +101,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg) } } -/* - * MC common functions +/** + * radeon_vram_location - try to find VRAM location + * @rdev: radeon device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * @base: base address at which to put VRAM + * + * Function will place try to place VRAM at base address provided + * as parameter (which is so far either PCI aperture address or + * for IGP TOM base address). + * + * If there is not enough space to fit the unvisible VRAM in the 32bits + * address space then we limit the VRAM size to the aperture. + * + * If we are using AGP and if the AGP aperture doesn't allow us to have + * room for all the VRAM than we restrict the VRAM to the PCI aperture + * size and print a warning. + * + * This function will never fails, worst case are limiting VRAM. + * + * Note: GTT start, end, size should be initialized before calling this + * function on AGP platform. + * + * Note: We don't explictly enforce VRAM start to be aligned on VRAM size, + * this shouldn't be a problem as we are using the PCI aperture as a reference. + * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but + * not IGP. + * + * Note: we use mc_vram_size as on some board we need to program the mc to + * cover the whole aperture even if VRAM size is inferior to aperture size + * Novell bug 204882 + along with lots of ubuntu ones + * + * Note: when limiting vram it's safe to overwritte real_vram_size because + * we are not in case where real_vram_size is inferior to mc_vram_size (ie + * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu + * ones) + * + * Note: IGP TOM addr should be the same as the aperture addr, we don't + * explicitly check for that thought. + * + * FIXME: when reducing VRAM size align new size on power of 2. + */ +void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base) +{ + mc->vram_start = base; + if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) { + dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); + mc->real_vram_size = mc->aper_size; + mc->mc_vram_size = mc->aper_size; + } + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { + dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); + mc->real_vram_size = mc->aper_size; + mc->mc_vram_size = mc->aper_size; + } + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", + mc->mc_vram_size >> 20, mc->vram_start, + mc->vram_end, mc->real_vram_size >> 20); +} + +/** + * radeon_gtt_location - try to find GTT location + * @rdev: radeon device structure holding all necessary informations + * @mc: memory controller structure holding memory informations + * + * Function will place try to place GTT before or after VRAM. + * + * If GTT size is bigger than space left then we ajust GTT size. + * Thus function will never fails. + * + * FIXME: when reducing GTT size align new size on power of 2. */ -int radeon_mc_setup(struct radeon_device *rdev) +void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) { - uint32_t tmp; + u64 size_af, size_bf; - /* Some chips have an "issue" with the memory controller, the - * location must be aligned to the size. We just align it down, - * too bad if we walk over the top of system memory, we don't - * use DMA without a remapped anyway. - * Affected chips are rv280, all r3xx, and all r4xx, but not IGP - */ - /* FGLRX seems to setup like this, VRAM a 0, then GART. - */ - /* - * Note: from R6xx the address space is 40bits but here we only - * use 32bits (still have to see a card which would exhaust 4G - * address space). - */ - if (rdev->mc.vram_location != 0xFFFFFFFFUL) { - /* vram location was already setup try to put gtt after - * if it fits */ - tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; - tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); - if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { - rdev->mc.gtt_location = tmp; - } else { - if (rdev->mc.gtt_size >= rdev->mc.vram_location) { - printk(KERN_ERR "[drm] GTT too big to fit " - "before or after vram location.\n"); - return -EINVAL; - } - rdev->mc.gtt_location = 0; - } - } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { - /* gtt location was already setup try to put vram before - * if it fits */ - if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { - rdev->mc.vram_location = 0; - } else { - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; - tmp += (rdev->mc.mc_vram_size - 1); - tmp &= ~(rdev->mc.mc_vram_size - 1); - if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { - rdev->mc.vram_location = tmp; - } else { - printk(KERN_ERR "[drm] vram too big to fit " - "before or after GTT location.\n"); - return -EINVAL; - } + size_af = 0xFFFFFFFF - mc->vram_end; + size_bf = mc->vram_start; + if (size_bf > size_af) { + if (mc->gtt_size > size_bf) { + dev_warn(rdev->dev, "limiting GTT\n"); + mc->gtt_size = size_bf; } + mc->gtt_start = mc->vram_start - mc->gtt_size; } else { - rdev->mc.vram_location = 0; - tmp = rdev->mc.mc_vram_size; - tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); - rdev->mc.gtt_location = tmp; - } - rdev->mc.vram_start = rdev->mc.vram_location; - rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; - rdev->mc.gtt_start = rdev->mc.gtt_location; - rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); - DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", - (unsigned)rdev->mc.vram_location, - (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1)); - DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20)); - DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", - (unsigned)rdev->mc.gtt_location, - (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1)); - return 0; + if (mc->gtt_size > size_af) { + dev_warn(rdev->dev, "limiting GTT\n"); + mc->gtt_size = size_af; + } + mc->gtt_start = mc->vram_end + 1; + } + mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; + dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", + mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); } - /* * GPU helpers function. */ @@ -182,7 +206,16 @@ bool radeon_card_posted(struct radeon_device *rdev) uint32_t reg; /* first check CRTCs */ - if (ASIC_IS_AVIVO(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (reg & EVERGREEN_CRTC_MASTER_EN) + return true; + } else if (ASIC_IS_AVIVO(rdev)) { reg = RREG32(AVIVO_D1CRTC_CONTROL) | RREG32(AVIVO_D2CRTC_CONTROL); if (reg & AVIVO_CRTC_EN) { @@ -229,6 +262,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev) int radeon_dummy_page_init(struct radeon_device *rdev) { + if (rdev->dummy_page.page) + return 0; rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); if (rdev->dummy_page.page == NULL) return -ENOMEM; @@ -310,7 +345,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev) rdev->mc_rreg = &rs600_mc_rreg; rdev->mc_wreg = &rs600_mc_wreg; } - if (rdev->family >= CHIP_R600) { + if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) { rdev->pciep_rreg = &r600_pciep_rreg; rdev->pciep_wreg = &r600_pciep_wreg; } @@ -329,21 +364,22 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RS100: case CHIP_RV200: case CHIP_RS200: + rdev->asic = &r100_asic; + break; case CHIP_R200: case CHIP_RV250: case CHIP_RS300: case CHIP_RV280: - rdev->asic = &r100_asic; + rdev->asic = &r200_asic; break; case CHIP_R300: case CHIP_R350: case CHIP_RV350: case CHIP_RV380: - rdev->asic = &r300_asic; - if (rdev->flags & RADEON_IS_PCIE) { - rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; - rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; - } + if (rdev->flags & RADEON_IS_PCIE) + rdev->asic = &r300_asic_pcie; + else + rdev->asic = &r300_asic; break; case CHIP_R420: case CHIP_R423: @@ -387,6 +423,13 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RV740: rdev->asic = &rv770_asic; break; + case CHIP_CEDAR: + case CHIP_REDWOOD: + case CHIP_JUNIPER: + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + rdev->asic = &evergreen_asic; + break; default: /* FIXME: not supported yet */ return -EINVAL; @@ -613,6 +656,36 @@ void radeon_check_arguments(struct radeon_device *rdev) } } +static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct radeon_device *rdev = dev->dev_private; + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + if (state == VGA_SWITCHEROO_ON) { + printk(KERN_INFO "radeon: switched on\n"); + /* don't suspend or resume card normally */ + rdev->powered_down = false; + radeon_resume_kms(dev); + } else { + printk(KERN_INFO "radeon: switched off\n"); + radeon_suspend_kms(dev, pmm); + /* don't suspend or resume card normally */ + rdev->powered_down = true; + } +} + +static bool radeon_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + bool can_switch; + + spin_lock(&dev->count_lock); + can_switch = (dev->open_count == 0); + spin_unlock(&dev->count_lock); + return can_switch; +} + + int radeon_device_init(struct radeon_device *rdev, struct drm_device *ddev, struct pci_dev *pdev, @@ -638,11 +711,14 @@ int radeon_device_init(struct radeon_device *rdev, mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ib_pool.mutex); mutex_init(&rdev->cp.mutex); + mutex_init(&rdev->dc_hw_i2c_mutex); if (rdev->family >= CHIP_R600) spin_lock_init(&rdev->ih.lock); mutex_init(&rdev->gem.mutex); + mutex_init(&rdev->pm.mutex); rwlock_init(&rdev->fence_drv.lock); INIT_LIST_HEAD(&rdev->gem.objects); + init_waitqueue_head(&rdev->irq.vblank_queue); /* setup workqueue */ rdev->wq = create_workqueue("radeon"); @@ -692,6 +768,9 @@ int radeon_device_init(struct radeon_device *rdev, /* this will fail for cards that aren't VGA class devices, just * ignore it */ vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); + vga_switcheroo_register_client(rdev->pdev, + radeon_switcheroo_set_state, + radeon_switcheroo_can_switch); r = radeon_init(rdev); if (r) @@ -723,6 +802,7 @@ void radeon_device_fini(struct radeon_device *rdev) rdev->shutdown = true; radeon_fini(rdev); destroy_workqueue(rdev->wq); + vga_switcheroo_unregister_client(rdev->pdev); vga_client_register(rdev->pdev, NULL, NULL, NULL); iounmap(rdev->rmmio); rdev->rmmio = NULL; @@ -746,6 +826,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) } rdev = dev->dev_private; + if (rdev->powered_down) + return 0; /* unpin the front buffers */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); @@ -791,6 +873,9 @@ int radeon_resume_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; + if (rdev->powered_down) + return 0; + acquire_console_sem(); pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7e17a362b54..ba8d806dcf3 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc) WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id); } +static void evergreen_crtc_load_lut(struct drm_crtc *crtc) +{ + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + int i; + + DRM_DEBUG("%d\n", radeon_crtc->crtc_id); + WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0); + + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0); + WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0); + + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff); + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff); + WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff); + + WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id); + WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007); + + WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0); + for (i = 0; i < 256; i++) { + WREG32(EVERGREEN_DC_LUT_30_COLOR, + (radeon_crtc->lut_r[i] << 20) | + (radeon_crtc->lut_g[i] << 10) | + (radeon_crtc->lut_b[i] << 0)); + } +} + static void legacy_crtc_load_lut(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); @@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) if (!crtc->enabled) return; - if (ASIC_IS_AVIVO(rdev)) + if (ASIC_IS_DCE4(rdev)) + evergreen_crtc_load_lut(crtc); + else if (ASIC_IS_AVIVO(rdev)) avivo_crtc_load_lut(crtc); else legacy_crtc_load_lut(crtc); @@ -361,6 +393,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev) int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) { + struct drm_device *dev = radeon_connector->base.dev; + struct radeon_device *rdev = dev->dev_private; int ret = 0; if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) || @@ -373,11 +407,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) if (!radeon_connector->ddc_bus) return -1; if (!radeon_connector->edid) { - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); } - + /* some servers provide a hardcoded edid in rom for KVMs */ + if (!radeon_connector->edid) + radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev); if (radeon_connector->edid) { drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); @@ -395,9 +429,7 @@ static int radeon_ddc_dump(struct drm_connector *connector) if (!radeon_connector->ddc_bus) return -1; - radeon_i2c_do_lock(radeon_connector->ddc_bus, 1); edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); - radeon_i2c_do_lock(radeon_connector->ddc_bus, 0); if (edid) { kfree(edid); } @@ -414,13 +446,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d) return n; } -void radeon_compute_pll(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) +static void radeon_compute_pll_legacy(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p) { uint32_t min_ref_div = pll->min_ref_div; uint32_t max_ref_div = pll->max_ref_div; @@ -580,95 +612,194 @@ void radeon_compute_pll(struct radeon_pll *pll, *post_div_p = best_post_div; } -void radeon_compute_pll_avivo(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p) +static bool +calc_fb_div(struct radeon_pll *pll, + uint32_t freq, + uint32_t post_div, + uint32_t ref_div, + uint32_t *fb_div, + uint32_t *fb_div_frac) { - fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq; - fixed20_12 pll_out_max, pll_out_min; - fixed20_12 pll_in_max, pll_in_min; - fixed20_12 reference_freq; - fixed20_12 error, ffreq, a, b; - - pll_out_max.full = rfixed_const(pll->pll_out_max); - pll_out_min.full = rfixed_const(pll->pll_out_min); - pll_in_max.full = rfixed_const(pll->pll_in_max); - pll_in_min.full = rfixed_const(pll->pll_in_min); - reference_freq.full = rfixed_const(pll->reference_freq); - do_div(freq, 10); + fixed20_12 feedback_divider, a, b; + u32 vco_freq; + + vco_freq = freq * post_div; + /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */ + a.full = rfixed_const(pll->reference_freq); + feedback_divider.full = rfixed_const(vco_freq); + feedback_divider.full = rfixed_div(feedback_divider, a); + a.full = rfixed_const(ref_div); + feedback_divider.full = rfixed_mul(feedback_divider, a); + + if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) { + /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */ + a.full = rfixed_const(10); + feedback_divider.full = rfixed_mul(feedback_divider, a); + feedback_divider.full += rfixed_const_half(0); + feedback_divider.full = rfixed_floor(feedback_divider); + feedback_divider.full = rfixed_div(feedback_divider, a); + + /* *fb_div = floor(feedback_divider); */ + a.full = rfixed_floor(feedback_divider); + *fb_div = rfixed_trunc(a); + /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */ + a.full = rfixed_const(10); + b.full = rfixed_mul(feedback_divider, a); + + feedback_divider.full = rfixed_floor(feedback_divider); + feedback_divider.full = rfixed_mul(feedback_divider, a); + feedback_divider.full = b.full - feedback_divider.full; + *fb_div_frac = rfixed_trunc(feedback_divider); + } else { + /* *fb_div = floor(feedback_divider + 0.5); */ + feedback_divider.full += rfixed_const_half(0); + feedback_divider.full = rfixed_floor(feedback_divider); + + *fb_div = rfixed_trunc(feedback_divider); + *fb_div_frac = 0; + } + + if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div)) + return false; + else + return true; +} + +static bool +calc_fb_ref_div(struct radeon_pll *pll, + uint32_t freq, + uint32_t post_div, + uint32_t *fb_div, + uint32_t *fb_div_frac, + uint32_t *ref_div) +{ + fixed20_12 ffreq, max_error, error, pll_out, a; + u32 vco; + ffreq.full = rfixed_const(freq); - error.full = rfixed_const(100 * 100); + /* max_error = ffreq * 0.0025; */ + a.full = rfixed_const(400); + max_error.full = rfixed_div(ffreq, a); - /* max p */ - p.full = rfixed_div(pll_out_max, ffreq); - p.full = rfixed_floor(p); + for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) { + if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) { + vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac)); + vco = vco / ((*ref_div) * 10); - /* min m */ - m.full = rfixed_div(reference_freq, pll_in_max); - m.full = rfixed_ceil(m); + if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max)) + continue; - while (1) { - n.full = rfixed_div(ffreq, reference_freq); - n.full = rfixed_mul(n, m); - n.full = rfixed_mul(n, p); + /* pll_out = vco / post_div; */ + a.full = rfixed_const(post_div); + pll_out.full = rfixed_const(vco); + pll_out.full = rfixed_div(pll_out, a); - f_vco.full = rfixed_div(n, m); - f_vco.full = rfixed_mul(f_vco, reference_freq); + if (pll_out.full >= ffreq.full) { + error.full = pll_out.full - ffreq.full; + if (error.full <= max_error.full) + return true; + } + } + } + return false; +} - f_pclk.full = rfixed_div(f_vco, p); +static void radeon_compute_pll_new(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p) +{ + u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0; + u32 best_freq = 0, vco_frequency; - if (f_pclk.full > ffreq.full) - error.full = f_pclk.full - ffreq.full; - else - error.full = ffreq.full - f_pclk.full; - error.full = rfixed_div(error, f_pclk); - a.full = rfixed_const(100 * 100); - error.full = rfixed_mul(error, a); - - a.full = rfixed_mul(m, p); - a.full = rfixed_div(n, a); - best_freq.full = rfixed_mul(reference_freq, a); - - if (rfixed_trunc(error) < 25) - break; - - a.full = rfixed_const(1); - m.full = m.full + a.full; - a.full = rfixed_div(reference_freq, m); - if (a.full >= pll_in_min.full) - continue; + /* freq = freq / 10; */ + do_div(freq, 10); - m.full = rfixed_div(reference_freq, pll_in_max); - m.full = rfixed_ceil(m); - a.full= rfixed_const(1); - p.full = p.full - a.full; - a.full = rfixed_mul(p, ffreq); - if (a.full >= pll_out_min.full) - continue; - else { - DRM_ERROR("Unable to find pll dividers\n"); - break; + if (pll->flags & RADEON_PLL_USE_POST_DIV) { + post_div = pll->post_div; + if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div)) + goto done; + + vco_frequency = freq * post_div; + if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) + goto done; + + if (pll->flags & RADEON_PLL_USE_REF_DIV) { + ref_div = pll->reference_div; + if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) + goto done; + if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) + goto done; + } + } else { + for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) { + if (pll->flags & RADEON_PLL_LEGACY) { + if ((post_div == 5) || + (post_div == 7) || + (post_div == 9) || + (post_div == 10) || + (post_div == 11)) + continue; + } + + if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) + continue; + + vco_frequency = freq * post_div; + if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max)) + continue; + if (pll->flags & RADEON_PLL_USE_REF_DIV) { + ref_div = pll->reference_div; + if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div)) + goto done; + if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac)) + break; + } else { + if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div)) + break; + } } } - a.full = rfixed_const(10); - b.full = rfixed_mul(n, a); + best_freq = pll->reference_freq * 10 * fb_div; + best_freq += pll->reference_freq * fb_div_frac; + best_freq = best_freq / (ref_div * post_div); - frac_n.full = rfixed_floor(n); - frac_n.full = rfixed_mul(frac_n, a); - frac_n.full = b.full - frac_n.full; +done: + if (best_freq == 0) + DRM_ERROR("Couldn't find valid PLL dividers\n"); - *dot_clock_p = rfixed_trunc(best_freq); - *fb_div_p = rfixed_trunc(n); - *frac_fb_div_p = rfixed_trunc(frac_n); - *ref_div_p = rfixed_trunc(m); - *post_div_p = rfixed_trunc(p); + *dot_clock_p = best_freq / 10; + *fb_div_p = fb_div; + *frac_fb_div_p = fb_div_frac; + *ref_div_p = ref_div; + *post_div_p = post_div; - DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); + DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p); +} + +void radeon_compute_pll(struct radeon_pll *pll, + uint64_t freq, + uint32_t *dot_clock_p, + uint32_t *fb_div_p, + uint32_t *frac_fb_div_p, + uint32_t *ref_div_p, + uint32_t *post_div_p) +{ + switch (pll->algo) { + case PLL_ALGO_NEW: + radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p, + frac_fb_div_p, ref_div_p, post_div_p); + break; + case PLL_ALGO_LEGACY: + default: + radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p, + frac_fb_div_p, ref_div_p, post_div_p); + break; + } } static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) @@ -679,11 +810,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) if (fb->fbdev) radeonfb_remove(dev, fb); - if (radeon_fb->obj) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(radeon_fb->obj); - mutex_unlock(&dev->struct_mutex); - } + if (radeon_fb->obj) + drm_gem_object_unreference_unlocked(radeon_fb->obj); drm_framebuffer_cleanup(fb); kfree(radeon_fb); } @@ -819,7 +947,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev) int radeon_modeset_init(struct radeon_device *rdev) { - int num_crtc = 2, i; + int i; int ret; drm_mode_config_init(rdev->ddev); @@ -842,11 +970,23 @@ int radeon_modeset_init(struct radeon_device *rdev) return ret; } + /* check combios for a valid hardcoded EDID - Sun servers */ + if (!rdev->is_atom_bios) { + /* check for hardcoded EDID in BIOS */ + radeon_combios_check_hardcoded_edid(rdev); + } + if (rdev->flags & RADEON_SINGLE_CRTC) - num_crtc = 1; + rdev->num_crtc = 1; + else { + if (ASIC_IS_DCE4(rdev)) + rdev->num_crtc = 6; + else + rdev->num_crtc = 2; + } /* allocate crtcs */ - for (i = 0; i < num_crtc; i++) { + for (i = 0; i < rdev->num_crtc; i++) { radeon_crtc_init(rdev->ddev, i); } @@ -863,6 +1003,8 @@ int radeon_modeset_init(struct radeon_device *rdev) void radeon_modeset_fini(struct radeon_device *rdev) { + kfree(rdev->mode_info.bios_hardcoded_edid); + if (rdev->mode_info.mode_config_initialized) { radeon_hpd_fini(rdev); drm_mode_config_cleanup(rdev->ddev); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 8ba3de7994d..6eec0ece6a6 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -40,9 +40,11 @@ /* * KMS wrapper. + * - 2.0.0 - initial interface + * - 2.1.0 - add square tiling interface */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 0 +#define KMS_DRIVER_MINOR 1 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -86,7 +88,8 @@ int radeon_benchmarking = 0; int radeon_testing = 0; int radeon_connector_table = 0; int radeon_tv = 1; -int radeon_new_pll = 1; +int radeon_new_pll = -1; +int radeon_dynpm = -1; int radeon_audio = 1; MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); @@ -122,9 +125,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444); MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); module_param_named(tv, radeon_tv, int, 0444); -MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips"); +MODULE_PARM_DESC(new_pll, "Select new PLL code"); module_param_named(new_pll, radeon_new_pll, int, 0444); +MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)"); +module_param_named(dynpm, radeon_dynpm, int, 0444); + MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); module_param_named(audio, radeon_audio, int, 0444); @@ -339,6 +345,7 @@ static int __init radeon_init(void) driver = &kms_driver; driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = radeon_max_kms_ioctl; + radeon_register_atpx_handler(); } /* if the vga console setting is enabled still * let modprobe override it */ @@ -348,6 +355,7 @@ static int __init radeon_init(void) static void __exit radeon_exit(void) { drm_exit(driver); + radeon_unregister_atpx_handler(); } module_init(radeon_init); diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index c57ad606504..ec55f2b23c2 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -268,6 +268,8 @@ typedef struct drm_radeon_private { u32 scratch_ages[5]; + int have_z_offset; + /* starting from here on, data is preserved accross an open */ uint32_t flags; /* see radeon_chip_flags */ resource_size_t fb_aper_offset; @@ -295,6 +297,9 @@ typedef struct drm_radeon_private { int r700_sc_prim_fifo_size; int r700_sc_hiz_tile_fifo_size; int r700_sc_earlyz_tile_fifo_fize; + int r600_group_size; + int r600_npipes; + int r600_nbanks; struct mutex cs_mutex; u32 cs_id_scnt; @@ -310,9 +315,11 @@ typedef struct drm_radeon_buf_priv { u32 age; } drm_radeon_buf_priv_t; +struct drm_buffer; + typedef struct drm_radeon_kcmd_buffer { int bufsz; - char *buf; + struct drm_buffer *buffer; int nbox; struct drm_clip_rect __user *boxes; } drm_radeon_kcmd_buffer_t; @@ -455,6 +462,15 @@ extern void r600_blit_swap(struct drm_device *dev, int sx, int sy, int dx, int dy, int w, int h, int src_pitch, int dst_pitch, int cpp); +/* atpx handler */ +#if defined(CONFIG_VGA_SWITCHEROO) +void radeon_register_atpx_handler(void); +void radeon_unregister_atpx_handler(void); +#else +static inline void radeon_register_atpx_handler(void) {} +static inline void radeon_unregister_atpx_handler(void) {} +#endif + /* Flags for stats.boxes */ #define RADEON_BOX_DMA_IDLE 0x1 @@ -2122,4 +2138,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv); write &= mask; \ } while (0) +/** + * Copy given number of dwords from drm buffer to the ring buffer. + */ +#define OUT_RING_DRM_BUFFER(buf, sz) do { \ + int _size = (sz) * 4; \ + struct drm_buffer *_buf = (buf); \ + int _part_size; \ + while (_size > 0) { \ + _part_size = _size; \ + \ + if (write + _part_size/4 > mask) \ + _part_size = ((mask + 1) - write)*4; \ + \ + if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \ + _part_size = PAGE_SIZE - drm_buffer_index(_buf);\ + \ + \ + \ + memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \ + [drm_buffer_index(_buf)], _part_size); \ + \ + _size -= _part_size; \ + write = (write + _part_size/4) & mask; \ + drm_buffer_advance(_buf, _part_size); \ + } \ +} while (0) + + #endif /* __RADEON_DRV_H__ */ diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 3c91724457c..bc926ea0a53 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder) /* DVO requires 2x ppll clocks depending on tmds chip */ if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) return index_mask; - + count = -1; list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) { struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder); @@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder) return NULL; } +static struct radeon_connector_atom_dig * +radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder) +{ + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_connector *connector; + struct radeon_connector *radeon_connector; + struct radeon_connector_atom_dig *dig_connector; + + if (!rdev->is_atom_bios) + return NULL; + + connector = radeon_get_connector_for_encoder(encoder); + if (!connector) + return NULL; + + radeon_connector = to_radeon_connector(connector); + + if (!radeon_connector->con_priv) + return NULL; + + dig_connector = radeon_connector->con_priv; + + return dig_connector; +} + static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) @@ -236,6 +262,9 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; + /* adjust pm to upcoming mode change */ + radeon_pm_compute_clocks(rdev); + /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); drm_mode_set_crtcinfo(adjusted_mode, 0); @@ -458,34 +487,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct radeon_connector_atom_dig *dig_connector = + radeon_get_atom_connector_priv_from_encoder(encoder); union lvds_encoder_control args; int index = 0; int hdmi_detected = 0; uint8_t frev, crev; - struct radeon_encoder_atom_dig *dig; - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - struct radeon_connector_atom_dig *dig_connector; - connector = radeon_get_connector_for_encoder(encoder); - if (!connector) + if (!dig || !dig_connector) return; - radeon_connector = to_radeon_connector(connector); - - if (!radeon_encoder->enc_priv) - return; - - dig = radeon_encoder->enc_priv; - - if (!radeon_connector->con_priv) - return; - - if (drm_detect_hdmi_monitor(radeon_connector->edid)) + if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) hdmi_detected = 1; - dig_connector = radeon_connector->con_priv; - memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { @@ -586,7 +601,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) { struct drm_connector *connector; struct radeon_connector *radeon_connector; - struct radeon_connector_atom_dig *radeon_dig_connector; + struct radeon_connector_atom_dig *dig_connector; connector = radeon_get_connector_for_encoder(encoder); if (!connector) @@ -617,9 +632,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) break; case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: - radeon_dig_connector = radeon_connector->con_priv; - if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || - (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) + dig_connector = radeon_connector->con_priv; + if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || + (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) return ATOM_ENCODER_MODE_DP; else if (drm_detect_hdmi_monitor(radeon_connector->edid)) return ATOM_ENCODER_MODE_HDMI; @@ -656,6 +671,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) * - 2 DIG encoder blocks. * DIG1/2 can drive UNIPHY0/1/2 link A or link B * + * DCE 4.0 + * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B). + * Supports up to 6 digital outputs + * - 6 DIG encoder blocks. + * - DIG to PHY mapping is hardcoded + * DIG1 drives UNIPHY0 link A, A+B + * DIG2 drives UNIPHY0 link B + * DIG3 drives UNIPHY1 link A, A+B + * DIG4 drives UNIPHY1 link B + * DIG5 drives UNIPHY2 link A, A+B + * DIG6 drives UNIPHY2 link B + * * Routing * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links) * Examples: @@ -664,88 +691,78 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI */ -static void + +union dig_encoder_control { + DIG_ENCODER_CONTROL_PS_ALLOCATION v1; + DIG_ENCODER_CONTROL_PARAMETERS_V2 v2; + DIG_ENCODER_CONTROL_PARAMETERS_V3 v3; +}; + +void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - DIG_ENCODER_CONTROL_PS_ALLOCATION args; + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct radeon_connector_atom_dig *dig_connector = + radeon_get_atom_connector_priv_from_encoder(encoder); + union dig_encoder_control args; int index = 0, num = 0; uint8_t frev, crev; - struct radeon_encoder_atom_dig *dig; - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - struct radeon_connector_atom_dig *dig_connector; - connector = radeon_get_connector_for_encoder(encoder); - if (!connector) + if (!dig || !dig_connector) return; - radeon_connector = to_radeon_connector(connector); - - if (!radeon_connector->con_priv) - return; - - dig_connector = radeon_connector->con_priv; - - if (!radeon_encoder->enc_priv) - return; - - dig = radeon_encoder->enc_priv; - memset(&args, 0, sizeof(args)); - if (dig->dig_encoder) - index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); - else - index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); + if (ASIC_IS_DCE4(rdev)) + index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl); + else { + if (dig->dig_encoder) + index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); + else + index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); + } num = dig->dig_encoder + 1; atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev); - args.ucAction = action; - args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v1.ucAction = action; + args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); + args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder); - if (ASIC_IS_DCE32(rdev)) { + if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) { + if (dig_connector->dp_clock == 270000) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; + args.v1.ucLaneNum = dig_connector->dp_lane_count; + } else if (radeon_encoder->pixel_clock > 165000) + args.v1.ucLaneNum = 8; + else + args.v1.ucLaneNum = 4; + + if (ASIC_IS_DCE4(rdev)) { + args.v3.acConfig.ucDigSel = dig->dig_encoder; + args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR; + } else { switch (radeon_encoder->encoder_id) { case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; + args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: - args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; + case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: + args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2; break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: - args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; - break; - } - } else { - switch (radeon_encoder->encoder_id) { - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: - args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1; - break; - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2; + args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3; break; } + if (dig_connector->linkb) + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; + else + args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; } - args.ucEncoderMode = atombios_get_encoder_mode(encoder); - - if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) { - if (dig_connector->dp_clock == 270000) - args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ; - args.ucLaneNum = dig_connector->dp_lane_count; - } else if (radeon_encoder->pixel_clock > 165000) - args.ucLaneNum = 8; - else - args.ucLaneNum = 4; - - if (dig_connector->linkb) - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB; - else - args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA; - atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } @@ -753,6 +770,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) union dig_transmitter_control { DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1; DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; + DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3; }; void @@ -761,37 +779,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; + struct radeon_connector_atom_dig *dig_connector = + radeon_get_atom_connector_priv_from_encoder(encoder); + struct drm_connector *connector; + struct radeon_connector *radeon_connector; union dig_transmitter_control args; int index = 0, num = 0; uint8_t frev, crev; - struct radeon_encoder_atom_dig *dig; - struct drm_connector *connector; - struct radeon_connector *radeon_connector; - struct radeon_connector_atom_dig *dig_connector; bool is_dp = false; + int pll_id = 0; - connector = radeon_get_connector_for_encoder(encoder); - if (!connector) + if (!dig || !dig_connector) return; + connector = radeon_get_connector_for_encoder(encoder); radeon_connector = to_radeon_connector(connector); - if (!radeon_encoder->enc_priv) - return; - - dig = radeon_encoder->enc_priv; - - if (!radeon_connector->con_priv) - return; - - dig_connector = radeon_connector->con_priv; - if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) is_dp = true; memset(&args, 0, sizeof(args)); - if (ASIC_IS_DCE32(rdev)) + if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev)) index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl); else { switch (radeon_encoder->encoder_id) { @@ -821,7 +831,54 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t else args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); } - if (ASIC_IS_DCE32(rdev)) { + if (ASIC_IS_DCE4(rdev)) { + if (is_dp) + args.v3.ucLaneNum = dig_connector->dp_lane_count; + else if (radeon_encoder->pixel_clock > 165000) + args.v3.ucLaneNum = 8; + else + args.v3.ucLaneNum = 4; + + if (dig_connector->linkb) { + args.v3.acConfig.ucLinkSel = 1; + args.v3.acConfig.ucEncoderSel = 1; + } + + /* Select the PLL for the PHY + * DP PHY should be clocked from external src if there is + * one. + */ + if (encoder->crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); + pll_id = radeon_crtc->pll_id; + } + if (is_dp && rdev->clock.dp_extclk) + args.v3.acConfig.ucRefClkSource = 2; /* external src */ + else + args.v3.acConfig.ucRefClkSource = pll_id; + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + args.v3.acConfig.ucTransmitterSel = 0; + num = 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + args.v3.acConfig.ucTransmitterSel = 1; + num = 1; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + args.v3.acConfig.ucTransmitterSel = 2; + num = 2; + break; + } + + if (is_dp) + args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */ + else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { + if (dig->coherent_mode) + args.v3.acConfig.fCoherentMode = 1; + } + } else if (ASIC_IS_DCE32(rdev)) { if (dig->dig_encoder == 1) args.v2.acConfig.ucEncoderSel = 1; if (dig_connector->linkb) @@ -849,7 +906,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t args.v2.acConfig.fCoherentMode = 1; } } else { - args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; if (dig->dig_encoder) @@ -1024,9 +1080,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + + /* adjust pm to dpms change */ + radeon_pm_compute_clocks(rdev); } -union crtc_sourc_param { +union crtc_source_param { SELECT_CRTC_SOURCE_PS_ALLOCATION v1; SELECT_CRTC_SOURCE_PARAMETERS_V2 v2; }; @@ -1038,7 +1097,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - union crtc_sourc_param args; + union crtc_source_param args; int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source); uint8_t frev, crev; struct radeon_encoder_atom_dig *dig; @@ -1107,10 +1166,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: dig = radeon_encoder->enc_priv; - if (dig->dig_encoder) - args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; - else + switch (dig->dig_encoder) { + case 0: args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; + break; + case 1: + args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; + break; + case 2: + args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID; + break; + case 3: + args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID; + break; + case 4: + args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID; + break; + case 5: + args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID; + break; + } break; case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; @@ -1167,6 +1242,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, } /* set scaler clears this on some chips */ + /* XXX check DCE4 */ if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, @@ -1183,6 +1259,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct drm_encoder *test_encoder; struct radeon_encoder_atom_dig *dig; uint32_t dig_enc_in_use = 0; + + if (ASIC_IS_DCE4(rdev)) { + struct radeon_connector_atom_dig *dig_connector = + radeon_get_atom_connector_priv_from_encoder(encoder); + + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig_connector->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig_connector->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig_connector->linkb) + return 5; + else + return 4; + break; + } + } + /* on DCE32 and encoder can driver any block so just crtc id */ if (ASIC_IS_DCE32(rdev)) { return radeon_crtc->crtc_id; @@ -1254,15 +1357,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE); - - /* setup and enable the encoder and transmitter */ - atombios_dig_encoder_setup(encoder, ATOM_ENABLE); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + if (ASIC_IS_DCE4(rdev)) { + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP); + + /* init and enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + } else { + /* disable the encoder and transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE); + + /* setup and enable the encoder and transmitter */ + atombios_dig_encoder_setup(encoder, ATOM_ENABLE); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + } break; case ENCODER_OBJECT_ID_INTERNAL_DDI: atombios_ddia_setup(encoder, ATOM_ENABLE); @@ -1282,7 +1396,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, } atombios_apply_encoder_quirks(encoder, adjusted_mode); - r600_hdmi_setmode(encoder, adjusted_mode); + /* XXX */ + if (!ASIC_IS_DCE4(rdev)) + r600_hdmi_setmode(encoder, adjusted_mode); } static bool @@ -1480,10 +1596,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su return; encoder = &radeon_encoder->base; - if (rdev->flags & RADEON_SINGLE_CRTC) + switch (rdev->num_crtc) { + case 1: encoder->possible_crtcs = 0x1; - else + break; + case 2: + default: encoder->possible_crtcs = 0x3; + break; + case 6: + encoder->possible_crtcs = 0x3f; + break; + } radeon_encoder->enc_priv = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h index 797972e344a..93c7d5d4191 100644 --- a/drivers/gpu/drm/radeon/radeon_family.h +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -75,6 +75,11 @@ enum radeon_family { CHIP_RV730, CHIP_RV710, CHIP_RV740, + CHIP_CEDAR, + CHIP_REDWOOD, + CHIP_JUNIPER, + CHIP_CYPRESS, + CHIP_HEMLOCK, CHIP_LAST, }; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index d71e346e9ab..8fccbf29235 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -39,6 +39,8 @@ #include "drm_fb_helper.h" +#include <linux/vga_switcheroo.h> + struct radeon_fb_device { struct drm_fb_helper helper; struct radeon_framebuffer *rfb; @@ -148,7 +150,6 @@ int radeonfb_create(struct drm_device *dev, unsigned long tmp; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; - int crtc_count; mode_cmd.width = surface_width; mode_cmd.height = surface_height; @@ -239,11 +240,7 @@ int radeonfb_create(struct drm_device *dev, rfbdev = info->par; rfbdev->helper.funcs = &radeon_fb_helper_funcs; rfbdev->helper.dev = dev; - if (rdev->flags & RADEON_SINGLE_CRTC) - crtc_count = 1; - else - crtc_count = 2; - ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, + ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc, RADEONFB_CONN_LIMIT); if (ret) goto out_unref; @@ -257,7 +254,7 @@ int radeonfb_create(struct drm_device *dev, info->flags = FBINFO_DEFAULT; info->fbops = &radeonfb_ops; - tmp = fb_gpuaddr - rdev->mc.vram_location; + tmp = fb_gpuaddr - rdev->mc.vram_start; info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = size; info->screen_base = fbptr; @@ -291,6 +288,7 @@ int radeonfb_create(struct drm_device *dev, rfbdev->rdev = rdev; mutex_unlock(&rdev->ddev->struct_mutex); + vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); return 0; out_unref: diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index e73d56e83fa..1770d3c07fd 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, unsigned t; unsigned p; int i, j; + u64 page_base; if (!rdev->gart.ready) { WARN(1, "trying to unbind memory to unitialized GART !\n"); @@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); rdev->gart.pages[p] = NULL; - rdev->gart.pages_addr[p] = 0; + rdev->gart.pages_addr[p] = rdev->dummy_page.addr; + page_base = rdev->gart.pages_addr[p]; for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { - radeon_gart_set_page(rdev, t, 0); + radeon_gart_set_page(rdev, t, page_base); + page_base += RADEON_GPU_PAGE_SIZE; } } } @@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, return 0; } +void radeon_gart_restore(struct radeon_device *rdev) +{ + int i, j, t; + u64 page_base; + + for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) { + page_base = rdev->gart.pages_addr[i]; + for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) { + radeon_gart_set_page(rdev, t, page_base); + page_base += RADEON_GPU_PAGE_SIZE; + } + } + mb(); + radeon_gart_tlb_flush(rdev); +} + int radeon_gart_init(struct radeon_device *rdev) { + int r, i; + if (rdev->gart.pages) { return 0; } @@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev) DRM_ERROR("Page size is smaller than GPU page size!\n"); return -EINVAL; } + r = radeon_dummy_page_init(rdev); + if (r) + return r; /* Compute table size */ rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE; @@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev) radeon_gart_fini(rdev); return -ENOMEM; } + /* set GART entry to point to the dummy page by default */ + for (i = 0; i < rdev->gart.num_cpu_pages; i++) { + rdev->gart.pages_addr[i] = rdev->dummy_page.addr; + } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index db8e9a355a0..ef92d147d8f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size, if (r != -ERESTARTSYS) DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n", size, initial_domain, alignment, r); - mutex_lock(&rdev->ddev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&rdev->ddev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } gobj->driver_private = robj; @@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, } r = drm_gem_handle_create(filp, gobj, &handle); if (r) { - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_handle_unreference_unlocked(gobj); args->handle = handle; return 0; } @@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, } robj = gobj->driver_private; args->addr_ptr = radeon_bo_mmap_offset(robj); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return 0; } @@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, default: break; } - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, /* callback hw specific functions if any */ if (robj->rdev->asic->ioctl_wait_idle) robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, return -EINVAL; robj = gobj->driver_private; r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } @@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); radeon_bo_unreserve(rbo); out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gobj); - mutex_unlock(&dev->struct_mutex); + drm_gem_object_unreference_unlocked(gobj); return r; } diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index da3da1e89d0..4ae50c19589 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -26,6 +26,7 @@ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" +#include "atom.h" /** * radeon_ddc_probe @@ -59,7 +60,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector) } -void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) +static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) { struct radeon_device *rdev = i2c->dev->dev_private; struct radeon_i2c_bus_rec *rec = &i2c->rec; @@ -71,13 +72,25 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) */ if (rec->hw_capable) { if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { - if (rec->a_clk_reg == RADEON_GPIO_MONID) { + u32 reg; + + if (rdev->family >= CHIP_RV350) + reg = RADEON_GPIO_MONID; + else if ((rdev->family == CHIP_R300) || + (rdev->family == CHIP_R350)) + reg = RADEON_GPIO_DVI_DDC; + else + reg = RADEON_GPIO_CRT2_DDC; + + mutex_lock(&rdev->dc_hw_i2c_mutex); + if (rec->a_clk_reg == reg) { WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); } else { WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); } + mutex_unlock(&rdev->dc_hw_i2c_mutex); } } @@ -168,6 +181,692 @@ static void set_data(void *i2c_priv, int data) WREG32(rec->en_data_reg, val); } +static u32 radeon_get_i2c_prescale(struct radeon_device *rdev) +{ + struct radeon_pll *spll = &rdev->clock.spll; + u32 sclk = radeon_get_engine_clock(rdev); + u32 prescale = 0; + u32 n, m; + u8 loop; + int i2c_clock; + + switch (rdev->family) { + case CHIP_R100: + case CHIP_RV100: + case CHIP_RS100: + case CHIP_RV200: + case CHIP_RS200: + case CHIP_R200: + case CHIP_RV250: + case CHIP_RS300: + case CHIP_RV280: + case CHIP_R300: + case CHIP_R350: + case CHIP_RV350: + n = (spll->reference_freq) / (4 * 6); + for (loop = 1; loop < 255; loop++) { + if ((loop * (loop - 1)) > n) + break; + } + m = loop - 1; + prescale = m | (loop << 8); + break; + case CHIP_RV380: + case CHIP_RS400: + case CHIP_RS480: + case CHIP_R420: + case CHIP_R423: + case CHIP_RV410: + sclk = radeon_get_engine_clock(rdev); + prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; + break; + case CHIP_RS600: + case CHIP_RS690: + case CHIP_RS740: + /* todo */ + break; + case CHIP_RV515: + case CHIP_R520: + case CHIP_RV530: + case CHIP_RV560: + case CHIP_RV570: + case CHIP_R580: + i2c_clock = 50; + sclk = radeon_get_engine_clock(rdev); + if (rdev->family == CHIP_R520) + prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock)); + else + prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128; + break; + case CHIP_R600: + case CHIP_RV610: + case CHIP_RV630: + case CHIP_RV670: + /* todo */ + break; + case CHIP_RV620: + case CHIP_RV635: + case CHIP_RS780: + case CHIP_RS880: + case CHIP_RV770: + case CHIP_RV730: + case CHIP_RV710: + case CHIP_RV740: + /* todo */ + break; + case CHIP_CEDAR: + case CHIP_REDWOOD: + case CHIP_JUNIPER: + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + /* todo */ + break; + default: + DRM_ERROR("i2c: unhandled radeon chip\n"); + break; + } + return prescale; +} + + +/* hw i2c engine for r1xx-4xx hardware + * hw can buffer up to 15 bytes + */ +static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); + struct radeon_device *rdev = i2c->dev->dev_private; + struct radeon_i2c_bus_rec *rec = &i2c->rec; + struct i2c_msg *p; + int i, j, k, ret = num; + u32 prescale; + u32 i2c_cntl_0, i2c_cntl_1, i2c_data; + u32 tmp, reg; + + mutex_lock(&rdev->dc_hw_i2c_mutex); + /* take the pm lock since we need a constant sclk */ + mutex_lock(&rdev->pm.mutex); + + prescale = radeon_get_i2c_prescale(rdev); + + reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) | + RADEON_I2C_START | + RADEON_I2C_STOP | + RADEON_I2C_GO); + + if (rdev->is_atom_bios) { + tmp = RREG32(RADEON_BIOS_6_SCRATCH); + WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE); + } + + if (rec->mm_i2c) { + i2c_cntl_0 = RADEON_I2C_CNTL_0; + i2c_cntl_1 = RADEON_I2C_CNTL_1; + i2c_data = RADEON_I2C_DATA; + } else { + i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0; + i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1; + i2c_data = RADEON_DVI_I2C_DATA; + + switch (rdev->family) { + case CHIP_R100: + case CHIP_RV100: + case CHIP_RS100: + case CHIP_RV200: + case CHIP_RS200: + case CHIP_RS300: + switch (rec->mask_clk_reg) { + case RADEON_GPIO_DVI_DDC: + /* no gpio select bit */ + break; + default: + DRM_ERROR("gpio not supported with hw i2c\n"); + ret = -EINVAL; + goto done; + } + break; + case CHIP_R200: + /* only bit 4 on r200 */ + switch (rec->mask_clk_reg) { + case RADEON_GPIO_DVI_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1); + break; + case RADEON_GPIO_MONID: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3); + break; + default: + DRM_ERROR("gpio not supported with hw i2c\n"); + ret = -EINVAL; + goto done; + } + break; + case CHIP_RV250: + case CHIP_RV280: + /* bits 3 and 4 */ + switch (rec->mask_clk_reg) { + case RADEON_GPIO_DVI_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1); + break; + case RADEON_GPIO_VGA_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2); + break; + case RADEON_GPIO_CRT2_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3); + break; + default: + DRM_ERROR("gpio not supported with hw i2c\n"); + ret = -EINVAL; + goto done; + } + break; + case CHIP_R300: + case CHIP_R350: + /* only bit 4 on r300/r350 */ + switch (rec->mask_clk_reg) { + case RADEON_GPIO_VGA_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1); + break; + case RADEON_GPIO_DVI_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3); + break; + default: + DRM_ERROR("gpio not supported with hw i2c\n"); + ret = -EINVAL; + goto done; + } + break; + case CHIP_RV350: + case CHIP_RV380: + case CHIP_R420: + case CHIP_R423: + case CHIP_RV410: + case CHIP_RS400: + case CHIP_RS480: + /* bits 3 and 4 */ + switch (rec->mask_clk_reg) { + case RADEON_GPIO_VGA_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1); + break; + case RADEON_GPIO_DVI_DDC: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2); + break; + case RADEON_GPIO_MONID: + reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3); + break; + default: + DRM_ERROR("gpio not supported with hw i2c\n"); + ret = -EINVAL; + goto done; + } + break; + default: + DRM_ERROR("unsupported asic\n"); + ret = -EINVAL; + goto done; + break; + } + } + + /* check for bus probe */ + p = &msgs[0]; + if ((num == 1) && (p->len == 0)) { + WREG32(i2c_cntl_0, (RADEON_I2C_DONE | + RADEON_I2C_NACK | + RADEON_I2C_HALT | + RADEON_I2C_SOFT_RST)); + WREG32(i2c_data, (p->addr << 1) & 0xff); + WREG32(i2c_data, 0); + WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) | + (1 << RADEON_I2C_ADDR_COUNT_SHIFT) | + RADEON_I2C_EN | + (48 << RADEON_I2C_TIME_LIMIT_SHIFT))); + WREG32(i2c_cntl_0, reg); + for (k = 0; k < 32; k++) { + udelay(10); + tmp = RREG32(i2c_cntl_0); + if (tmp & RADEON_I2C_GO) + continue; + tmp = RREG32(i2c_cntl_0); + if (tmp & RADEON_I2C_DONE) + break; + else { + DRM_DEBUG("i2c write error 0x%08x\n", tmp); + WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT); + ret = -EIO; + goto done; + } + } + goto done; + } + + for (i = 0; i < num; i++) { + p = &msgs[i]; + for (j = 0; j < p->len; j++) { + if (p->flags & I2C_M_RD) { + WREG32(i2c_cntl_0, (RADEON_I2C_DONE | + RADEON_I2C_NACK | + RADEON_I2C_HALT | + RADEON_I2C_SOFT_RST)); + WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1); + WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) | + (1 << RADEON_I2C_ADDR_COUNT_SHIFT) | + RADEON_I2C_EN | + (48 << RADEON_I2C_TIME_LIMIT_SHIFT))); + WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE); + for (k = 0; k < 32; k++) { + udelay(10); + tmp = RREG32(i2c_cntl_0); + if (tmp & RADEON_I2C_GO) + continue; + tmp = RREG32(i2c_cntl_0); + if (tmp & RADEON_I2C_DONE) + break; + else { + DRM_DEBUG("i2c read error 0x%08x\n", tmp); + WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT); + ret = -EIO; + goto done; + } + } + p->buf[j] = RREG32(i2c_data) & 0xff; + } else { + WREG32(i2c_cntl_0, (RADEON_I2C_DONE | + RADEON_I2C_NACK | + RADEON_I2C_HALT | + RADEON_I2C_SOFT_RST)); + WREG32(i2c_data, (p->addr << 1) & 0xff); + WREG32(i2c_data, p->buf[j]); + WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) | + (1 << RADEON_I2C_ADDR_COUNT_SHIFT) | + RADEON_I2C_EN | + (48 << RADEON_I2C_TIME_LIMIT_SHIFT))); + WREG32(i2c_cntl_0, reg); + for (k = 0; k < 32; k++) { + udelay(10); + tmp = RREG32(i2c_cntl_0); + if (tmp & RADEON_I2C_GO) + continue; + tmp = RREG32(i2c_cntl_0); + if (tmp & RADEON_I2C_DONE) + break; + else { + DRM_DEBUG("i2c write error 0x%08x\n", tmp); + WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT); + ret = -EIO; + goto done; + } + } + } + } + } + +done: + WREG32(i2c_cntl_0, 0); + WREG32(i2c_cntl_1, 0); + WREG32(i2c_cntl_0, (RADEON_I2C_DONE | + RADEON_I2C_NACK | + RADEON_I2C_HALT | + RADEON_I2C_SOFT_RST)); + + if (rdev->is_atom_bios) { + tmp = RREG32(RADEON_BIOS_6_SCRATCH); + tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE; + WREG32(RADEON_BIOS_6_SCRATCH, tmp); + } + + mutex_unlock(&rdev->pm.mutex); + mutex_unlock(&rdev->dc_hw_i2c_mutex); + + return ret; +} + +/* hw i2c engine for r5xx hardware + * hw can buffer up to 15 bytes + */ +static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); + struct radeon_device *rdev = i2c->dev->dev_private; + struct radeon_i2c_bus_rec *rec = &i2c->rec; + struct i2c_msg *p; + int i, j, remaining, current_count, buffer_offset, ret = num; + u32 prescale; + u32 tmp, reg; + u32 saved1, saved2; + + mutex_lock(&rdev->dc_hw_i2c_mutex); + /* take the pm lock since we need a constant sclk */ + mutex_lock(&rdev->pm.mutex); + + prescale = radeon_get_i2c_prescale(rdev); + + /* clear gpio mask bits */ + tmp = RREG32(rec->mask_clk_reg); + tmp &= ~rec->mask_clk_mask; + WREG32(rec->mask_clk_reg, tmp); + tmp = RREG32(rec->mask_clk_reg); + + tmp = RREG32(rec->mask_data_reg); + tmp &= ~rec->mask_data_mask; + WREG32(rec->mask_data_reg, tmp); + tmp = RREG32(rec->mask_data_reg); + + /* clear pin values */ + tmp = RREG32(rec->a_clk_reg); + tmp &= ~rec->a_clk_mask; + WREG32(rec->a_clk_reg, tmp); + tmp = RREG32(rec->a_clk_reg); + + tmp = RREG32(rec->a_data_reg); + tmp &= ~rec->a_data_mask; + WREG32(rec->a_data_reg, tmp); + tmp = RREG32(rec->a_data_reg); + + /* set the pins to input */ + tmp = RREG32(rec->en_clk_reg); + tmp &= ~rec->en_clk_mask; + WREG32(rec->en_clk_reg, tmp); + tmp = RREG32(rec->en_clk_reg); + + tmp = RREG32(rec->en_data_reg); + tmp &= ~rec->en_data_mask; + WREG32(rec->en_data_reg, tmp); + tmp = RREG32(rec->en_data_reg); + + /* */ + tmp = RREG32(RADEON_BIOS_6_SCRATCH); + WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE); + saved1 = RREG32(AVIVO_DC_I2C_CONTROL1); + saved2 = RREG32(0x494); + WREG32(0x494, saved2 | 0x1); + + WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C); + for (i = 0; i < 50; i++) { + udelay(1); + if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C) + break; + } + if (i == 50) { + DRM_ERROR("failed to get i2c bus\n"); + ret = -EBUSY; + goto done; + } + + reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN; + switch (rec->mask_clk_reg) { + case AVIVO_DC_GPIO_DDC1_MASK: + reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1); + break; + case AVIVO_DC_GPIO_DDC2_MASK: + reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2); + break; + case AVIVO_DC_GPIO_DDC3_MASK: + reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3); + break; + default: + DRM_ERROR("gpio not supported with hw i2c\n"); + ret = -EINVAL; + goto done; + } + + /* check for bus probe */ + p = &msgs[0]; + if ((num == 1) && (p->len == 0)) { + WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE | + AVIVO_DC_I2C_NACK | + AVIVO_DC_I2C_HALT)); + WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET); + udelay(1); + WREG32(AVIVO_DC_I2C_RESET, 0); + + WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff); + WREG32(AVIVO_DC_I2C_DATA, 0); + + WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48)); + WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) | + AVIVO_DC_I2C_DATA_COUNT(1) | + (prescale << 16))); + WREG32(AVIVO_DC_I2C_CONTROL1, reg); + WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO); + for (j = 0; j < 200; j++) { + udelay(50); + tmp = RREG32(AVIVO_DC_I2C_STATUS1); + if (tmp & AVIVO_DC_I2C_GO) + continue; + tmp = RREG32(AVIVO_DC_I2C_STATUS1); + if (tmp & AVIVO_DC_I2C_DONE) + break; + else { + DRM_DEBUG("i2c write error 0x%08x\n", tmp); + WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT); + ret = -EIO; + goto done; + } + } + goto done; + } + + for (i = 0; i < num; i++) { + p = &msgs[i]; + remaining = p->len; + buffer_offset = 0; + if (p->flags & I2C_M_RD) { + while (remaining) { + if (remaining > 15) + current_count = 15; + else + current_count = remaining; + WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE | + AVIVO_DC_I2C_NACK | + AVIVO_DC_I2C_HALT)); + WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET); + udelay(1); + WREG32(AVIVO_DC_I2C_RESET, 0); + + WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1); + WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48)); + WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) | + AVIVO_DC_I2C_DATA_COUNT(current_count) | + (prescale << 16))); + WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE); + WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO); + for (j = 0; j < 200; j++) { + udelay(50); + tmp = RREG32(AVIVO_DC_I2C_STATUS1); + if (tmp & AVIVO_DC_I2C_GO) + continue; + tmp = RREG32(AVIVO_DC_I2C_STATUS1); + if (tmp & AVIVO_DC_I2C_DONE) + break; + else { + DRM_DEBUG("i2c read error 0x%08x\n", tmp); + WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT); + ret = -EIO; + goto done; + } + } + for (j = 0; j < current_count; j++) + p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff; + remaining -= current_count; + buffer_offset += current_count; + } + } else { + while (remaining) { + if (remaining > 15) + current_count = 15; + else + current_count = remaining; + WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE | + AVIVO_DC_I2C_NACK | + AVIVO_DC_I2C_HALT)); + WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET); + udelay(1); + WREG32(AVIVO_DC_I2C_RESET, 0); + + WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff); + for (j = 0; j < current_count; j++) + WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]); + + WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48)); + WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) | + AVIVO_DC_I2C_DATA_COUNT(current_count) | + (prescale << 16))); + WREG32(AVIVO_DC_I2C_CONTROL1, reg); + WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO); + for (j = 0; j < 200; j++) { + udelay(50); + tmp = RREG32(AVIVO_DC_I2C_STATUS1); + if (tmp & AVIVO_DC_I2C_GO) + continue; + tmp = RREG32(AVIVO_DC_I2C_STATUS1); + if (tmp & AVIVO_DC_I2C_DONE) + break; + else { + DRM_DEBUG("i2c write error 0x%08x\n", tmp); + WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT); + ret = -EIO; + goto done; + } + } + remaining -= current_count; + buffer_offset += current_count; + } + } + } + +done: + WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE | + AVIVO_DC_I2C_NACK | + AVIVO_DC_I2C_HALT)); + WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET); + udelay(1); + WREG32(AVIVO_DC_I2C_RESET, 0); + + WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C); + WREG32(AVIVO_DC_I2C_CONTROL1, saved1); + WREG32(0x494, saved2); + tmp = RREG32(RADEON_BIOS_6_SCRATCH); + tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE; + WREG32(RADEON_BIOS_6_SCRATCH, tmp); + + mutex_unlock(&rdev->pm.mutex); + mutex_unlock(&rdev->dc_hw_i2c_mutex); + + return ret; +} + +static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); + int ret; + + radeon_i2c_do_lock(i2c, 1); + ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num); + radeon_i2c_do_lock(i2c, 0); + + return ret; +} + +static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); + struct radeon_device *rdev = i2c->dev->dev_private; + struct radeon_i2c_bus_rec *rec = &i2c->rec; + int ret; + + switch (rdev->family) { + case CHIP_R100: + case CHIP_RV100: + case CHIP_RS100: + case CHIP_RV200: + case CHIP_RS200: + case CHIP_R200: + case CHIP_RV250: + case CHIP_RS300: + case CHIP_RV280: + case CHIP_R300: + case CHIP_R350: + case CHIP_RV350: + case CHIP_RV380: + case CHIP_R420: + case CHIP_R423: + case CHIP_RV410: + case CHIP_RS400: + case CHIP_RS480: + if (rec->hw_capable) + ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); + else + ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + break; + case CHIP_RS600: + case CHIP_RS690: + case CHIP_RS740: + /* XXX fill in hw i2c implementation */ + ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + break; + case CHIP_RV515: + case CHIP_R520: + case CHIP_RV530: + case CHIP_RV560: + case CHIP_RV570: + case CHIP_R580: + if (rec->hw_capable) { + if (rec->mm_i2c) + ret = r100_hw_i2c_xfer(i2c_adap, msgs, num); + else + ret = r500_hw_i2c_xfer(i2c_adap, msgs, num); + } else + ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + break; + case CHIP_R600: + case CHIP_RV610: + case CHIP_RV630: + case CHIP_RV670: + /* XXX fill in hw i2c implementation */ + ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + break; + case CHIP_RV620: + case CHIP_RV635: + case CHIP_RS780: + case CHIP_RS880: + case CHIP_RV770: + case CHIP_RV730: + case CHIP_RV710: + case CHIP_RV740: + /* XXX fill in hw i2c implementation */ + ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + break; + case CHIP_CEDAR: + case CHIP_REDWOOD: + case CHIP_JUNIPER: + case CHIP_CYPRESS: + case CHIP_HEMLOCK: + /* XXX fill in hw i2c implementation */ + ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num); + break; + default: + DRM_ERROR("i2c: unhandled radeon chip\n"); + ret = -EIO; + break; + } + + return ret; +} + +static u32 radeon_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm radeon_i2c_algo = { + .master_xfer = radeon_i2c_xfer, + .functionality = radeon_i2c_func, +}; + struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_bus_rec *rec, const char *name) @@ -179,23 +878,36 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, if (i2c == NULL) return NULL; - i2c->adapter.owner = THIS_MODULE; - i2c->dev = dev; - i2c_set_adapdata(&i2c->adapter, i2c); - i2c->adapter.algo_data = &i2c->algo.bit; - i2c->algo.bit.setsda = set_data; - i2c->algo.bit.setscl = set_clock; - i2c->algo.bit.getsda = get_data; - i2c->algo.bit.getscl = get_clock; - i2c->algo.bit.udelay = 20; + /* set the internal bit adapter */ + i2c->algo.radeon.bit_adapter.owner = THIS_MODULE; + i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c); + sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name); + i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data; + i2c->algo.radeon.bit_data.setsda = set_data; + i2c->algo.radeon.bit_data.setscl = set_clock; + i2c->algo.radeon.bit_data.getsda = get_data; + i2c->algo.radeon.bit_data.getscl = get_clock; + i2c->algo.radeon.bit_data.udelay = 20; /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always * make this, 2 jiffies is a lot more reliable */ - i2c->algo.bit.timeout = 2; - i2c->algo.bit.data = i2c; + i2c->algo.radeon.bit_data.timeout = 2; + i2c->algo.radeon.bit_data.data = i2c; + ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter); + if (ret) { + DRM_ERROR("Failed to register internal bit i2c %s\n", name); + goto out_free; + } + /* set the radeon i2c adapter */ + i2c->dev = dev; i2c->rec = *rec; - ret = i2c_bit_add_bus(&i2c->adapter); + i2c->adapter.owner = THIS_MODULE; + i2c_set_adapdata(&i2c->adapter, i2c); + sprintf(i2c->adapter.name, "Radeon i2c %s", name); + i2c->adapter.algo_data = &i2c->algo.radeon; + i2c->adapter.algo = &radeon_i2c_algo; + ret = i2c_add_adapter(&i2c->adapter); if (ret) { - DRM_INFO("Failed to register i2c %s\n", name); + DRM_ERROR("Failed to register i2c %s\n", name); goto out_free; } @@ -237,11 +949,19 @@ out_free: } - void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) { if (!i2c) return; + i2c_del_adapter(&i2c->algo.radeon.bit_adapter); + i2c_del_adapter(&i2c->adapter); + kfree(i2c); +} + +void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c) +{ + if (!i2c) + return; i2c_del_adapter(&i2c->adapter); kfree(i2c); @@ -252,10 +972,10 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector) return NULL; } -void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, - u8 slave_addr, - u8 addr, - u8 *val) +void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, + u8 slave_addr, + u8 addr, + u8 *val) { u8 out_buf[2]; u8 in_buf[2]; @@ -286,10 +1006,10 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, } } -void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus, - u8 slave_addr, - u8 addr, - u8 val) +void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus, + u8 slave_addr, + u8 addr, + u8 val) { uint8_t out_buf[2]; struct i2c_msg msg = { diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index f23b05606eb..20ec276e759 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -30,6 +30,8 @@ #include "radeon.h" #include "radeon_drm.h" +#include <linux/vga_switcheroo.h> + int radeon_driver_unload_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; @@ -136,6 +138,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev) void radeon_driver_lastclose_kms(struct drm_device *dev) { + vga_switcheroo_process_delayed_switch(); } int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) @@ -276,17 +279,17 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = { DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH), DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH), /* KMS */ - DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED), }; int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms); diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index b6d8081e124..df23d6a01d0 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -403,7 +403,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, /* if scanout was in GTT this really wouldn't work */ /* crtc offset is from display base addr not FB location */ - radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; + radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start; base -= radeon_crtc->legacy_display_base_addr; @@ -582,29 +582,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod ? RADEON_CRTC_V_SYNC_POL : 0)); - /* TODO -> Dell Server */ - if (0) { - uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); - uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); - uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2); - uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); - - dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL; - dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL; - - /* For CRT on DAC2, don't turn it on if BIOS didn't - enable it, even it's detected. - */ - disp_hw_debug |= RADEON_CRT2_DISP1_SEL; - tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16)); - tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16)); - - WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); - WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); - WREG32(RADEON_DAC_CNTL2, dac2_cntl); - WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); - } - if (radeon_crtc->crtc_id) { uint32_t crtc2_gen_cntl; uint32_t disp2_merge_cntl; @@ -726,6 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode) pll = &rdev->clock.p1pll; pll->flags = RADEON_PLL_LEGACY; + if (radeon_new_pll == 1) + pll->algo = PLL_ALGO_NEW; + else + pll->algo = PLL_ALGO_LEGACY; if (mode->clock > 200000) /* range limits??? */ pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 38e45e231ef..cf389ce50a8 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -115,6 +115,9 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + + /* adjust pm to dpms change */ + radeon_pm_compute_clocks(rdev); } static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder) @@ -214,6 +217,11 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *adjusted_mode) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_device *dev = encoder->dev; + struct radeon_device *rdev = dev->dev_private; + + /* adjust pm to upcoming mode change */ + radeon_pm_compute_clocks(rdev); /* set the active encoder to connector routing */ radeon_encoder_set_active_device(encoder); @@ -285,6 +293,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + + /* adjust pm to dpms change */ + radeon_pm_compute_clocks(rdev); } static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder) @@ -470,6 +481,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + + /* adjust pm to dpms change */ + radeon_pm_compute_clocks(rdev); } static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder) @@ -635,6 +649,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + + /* adjust pm to dpms change */ + radeon_pm_compute_clocks(rdev); } static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder) @@ -842,6 +859,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); else radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); + + /* adjust pm to dpms change */ + radeon_pm_compute_clocks(rdev); } static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index e81b2aeb6a8..1702b820aa4 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec { bool valid; /* id used by atom */ uint8_t i2c_id; + /* id used by atom */ + uint8_t hpd_id; /* can be used with hw i2c engine */ bool hw_capable; /* uses multi-media i2c engine */ @@ -113,6 +115,7 @@ struct radeon_tmds_pll { #define RADEON_MAX_BIOS_CONNECTOR 16 +/* pll flags */ #define RADEON_PLL_USE_BIOS_DIVS (1 << 0) #define RADEON_PLL_NO_ODD_POST_DIV (1 << 1) #define RADEON_PLL_USE_REF_DIV (1 << 2) @@ -127,6 +130,12 @@ struct radeon_tmds_pll { #define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) #define RADEON_PLL_USE_POST_DIV (1 << 12) +/* pll algo */ +enum radeon_pll_algo { + PLL_ALGO_LEGACY, + PLL_ALGO_NEW +}; + struct radeon_pll { /* reference frequency */ uint32_t reference_freq; @@ -157,6 +166,13 @@ struct radeon_pll { /* pll id */ uint32_t id; + /* pll algo */ + enum radeon_pll_algo algo; +}; + +struct i2c_algo_radeon_data { + struct i2c_adapter bit_adapter; + struct i2c_algo_bit_data bit_data; }; struct radeon_i2c_chan { @@ -164,7 +180,7 @@ struct radeon_i2c_chan { struct drm_device *dev; union { struct i2c_algo_dp_aux_data dp; - struct i2c_algo_bit_data bit; + struct i2c_algo_radeon_data radeon; } algo; struct radeon_i2c_bus_rec rec; }; @@ -193,7 +209,7 @@ struct radeon_mode_info { struct card_info *atom_card_info; enum radeon_connector_table connector_table; bool mode_config_initialized; - struct radeon_crtc *crtcs[2]; + struct radeon_crtc *crtcs[6]; /* DVI-I properties */ struct drm_property *coherent_mode_property; /* DAC enable load detect */ @@ -202,7 +218,8 @@ struct radeon_mode_info { struct drm_property *tv_std_property; /* legacy TMDS PLL detect */ struct drm_property *tmds_pll_property; - + /* hardcoded DFP edid from BIOS */ + struct edid *bios_hardcoded_edid; }; #define MAX_H_CODE_TIMING_LEN 32 @@ -237,6 +254,7 @@ struct radeon_crtc { fixed20_12 vsc; fixed20_12 hsc; struct drm_display_mode native_mode; + int pll_id; }; struct radeon_encoder_primary_dac { @@ -303,6 +321,7 @@ struct radeon_encoder_atom_dig { /* atom lvds */ uint32_t lvds_misc; uint16_t panel_pwr_delay; + enum radeon_pll_algo pll_algo; struct radeon_atom_ss *ss; /* panel mode */ struct drm_display_mode native_mode; @@ -398,6 +417,7 @@ extern void dp_link_train(struct drm_encoder *encoder, struct drm_connector *connector); extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector); extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector); +extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action); extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set); @@ -411,14 +431,15 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, struct radeon_i2c_bus_rec *rec, const char *name); extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); -extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus, - u8 slave_addr, - u8 addr, - u8 *val); -extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c, - u8 slave_addr, - u8 addr, - u8 val); +extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c); +extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus, + u8 slave_addr, + u8 addr, + u8 *val); +extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c, + u8 slave_addr, + u8 addr, + u8 val); extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); @@ -432,14 +453,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll, uint32_t *ref_div_p, uint32_t *post_div_p); -extern void radeon_compute_pll_avivo(struct radeon_pll *pll, - uint64_t freq, - uint32_t *dot_clock_p, - uint32_t *fb_div_p, - uint32_t *frac_fb_div_p, - uint32_t *ref_div_p, - uint32_t *post_div_p); - extern void radeon_setup_encoder_clones(struct drm_device *dev); struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); @@ -473,6 +486,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); +extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev); +extern struct edid * +radeon_combios_get_hardcoded_edid(struct radeon_device *rdev); extern bool radeon_atom_get_clock_info(struct drm_device *dev); extern bool radeon_combios_get_clock_info(struct drm_device *dev); extern struct radeon_encoder_atom_dig * @@ -531,7 +547,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc); void radeon_legacy_init_crtc(struct drm_device *dev, struct radeon_crtc *radeon_crtc); -extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state); void radeon_get_clock_info(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index f1da370928e..fc9d00ac6b1 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -178,7 +178,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) { int r, i; - radeon_ttm_placement_from_domain(bo, domain); if (bo->pin_count) { bo->pin_count++; if (gpu_addr) @@ -186,6 +185,8 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return 0; } radeon_ttm_placement_from_domain(bo, domain); + /* force to pin into visible video ram */ + bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; for (i = 0; i < bo->placement.num_placement; i++) bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 8bce64cdc32..d4d1c39a0e9 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -18,21 +18,413 @@ * OTHER DEALINGS IN THE SOFTWARE. * * Authors: RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com> + * Alex Deucher <alexdeucher@gmail.com> */ #include "drmP.h" #include "radeon.h" +#include "avivod.h" -int radeon_debugfs_pm_init(struct radeon_device *rdev); +#define RADEON_IDLE_LOOP_MS 100 +#define RADEON_RECLOCK_DELAY_MS 200 +#define RADEON_WAIT_VBLANK_TIMEOUT 200 + +static void radeon_pm_set_clocks_locked(struct radeon_device *rdev); +static void radeon_pm_set_clocks(struct radeon_device *rdev); +static void radeon_pm_idle_work_handler(struct work_struct *work); +static int radeon_debugfs_pm_init(struct radeon_device *rdev); + +static const char *pm_state_names[4] = { + "PM_STATE_DISABLED", + "PM_STATE_MINIMUM", + "PM_STATE_PAUSED", + "PM_STATE_ACTIVE" +}; + +static const char *pm_state_types[5] = { + "Default", + "Powersave", + "Battery", + "Balanced", + "Performance", +}; + +static void radeon_print_power_mode_info(struct radeon_device *rdev) +{ + int i, j; + bool is_default; + + DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states); + for (i = 0; i < rdev->pm.num_power_states; i++) { + if (rdev->pm.default_power_state == &rdev->pm.power_state[i]) + is_default = true; + else + is_default = false; + DRM_INFO("State %d %s %s\n", i, + pm_state_types[rdev->pm.power_state[i].type], + is_default ? "(default)" : ""); + if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP)) + DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes); + DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes); + for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) { + if (rdev->flags & RADEON_IS_IGP) + DRM_INFO("\t\t%d engine: %d\n", + j, + rdev->pm.power_state[i].clock_info[j].sclk * 10); + else + DRM_INFO("\t\t%d engine/memory: %d/%d\n", + j, + rdev->pm.power_state[i].clock_info[j].sclk * 10, + rdev->pm.power_state[i].clock_info[j].mclk * 10); + } + } +} + +static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type type) +{ + int i, j; + enum radeon_pm_state_type wanted_types[2]; + int wanted_count; + + switch (type) { + case POWER_STATE_TYPE_DEFAULT: + default: + return rdev->pm.default_power_state; + case POWER_STATE_TYPE_POWERSAVE: + if (rdev->flags & RADEON_IS_MOBILITY) { + wanted_types[0] = POWER_STATE_TYPE_POWERSAVE; + wanted_types[1] = POWER_STATE_TYPE_BATTERY; + wanted_count = 2; + } else { + wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; + wanted_count = 1; + } + break; + case POWER_STATE_TYPE_BATTERY: + if (rdev->flags & RADEON_IS_MOBILITY) { + wanted_types[0] = POWER_STATE_TYPE_BATTERY; + wanted_types[1] = POWER_STATE_TYPE_POWERSAVE; + wanted_count = 2; + } else { + wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE; + wanted_count = 1; + } + break; + case POWER_STATE_TYPE_BALANCED: + case POWER_STATE_TYPE_PERFORMANCE: + wanted_types[0] = type; + wanted_count = 1; + break; + } + + for (i = 0; i < wanted_count; i++) { + for (j = 0; j < rdev->pm.num_power_states; j++) { + if (rdev->pm.power_state[j].type == wanted_types[i]) + return &rdev->pm.power_state[j]; + } + } + + return rdev->pm.default_power_state; +} + +static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev, + struct radeon_power_state *power_state, + enum radeon_pm_clock_mode_type type) +{ + switch (type) { + case POWER_MODE_TYPE_DEFAULT: + default: + return power_state->default_clock_mode; + case POWER_MODE_TYPE_LOW: + return &power_state->clock_info[0]; + case POWER_MODE_TYPE_MID: + if (power_state->num_clock_modes > 2) + return &power_state->clock_info[1]; + else + return &power_state->clock_info[0]; + break; + case POWER_MODE_TYPE_HIGH: + return &power_state->clock_info[power_state->num_clock_modes - 1]; + } + +} + +static void radeon_get_power_state(struct radeon_device *rdev, + enum radeon_pm_action action) +{ + switch (action) { + case PM_ACTION_MINIMUM: + rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY); + rdev->pm.requested_clock_mode = + radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW); + break; + case PM_ACTION_DOWNCLOCK: + rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE); + rdev->pm.requested_clock_mode = + radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID); + break; + case PM_ACTION_UPCLOCK: + rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT); + rdev->pm.requested_clock_mode = + radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH); + break; + case PM_ACTION_NONE: + default: + DRM_ERROR("Requested mode for not defined action\n"); + return; + } + DRM_INFO("Requested: e: %d m: %d p: %d\n", + rdev->pm.requested_clock_mode->sclk, + rdev->pm.requested_clock_mode->mclk, + rdev->pm.requested_power_state->non_clock_info.pcie_lanes); +} + +static void radeon_set_power_state(struct radeon_device *rdev) +{ + /* if *_clock_mode are the same, *_power_state are as well */ + if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode) + return; + + DRM_INFO("Setting: e: %d m: %d p: %d\n", + rdev->pm.requested_clock_mode->sclk, + rdev->pm.requested_clock_mode->mclk, + rdev->pm.requested_power_state->non_clock_info.pcie_lanes); + /* set pcie lanes */ + /* set voltage */ + /* set engine clock */ + radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk); + /* set memory clock */ + + rdev->pm.current_power_state = rdev->pm.requested_power_state; + rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode; +} int radeon_pm_init(struct radeon_device *rdev) { + rdev->pm.state = PM_STATE_DISABLED; + rdev->pm.planned_action = PM_ACTION_NONE; + rdev->pm.downclocked = false; + + if (rdev->bios) { + if (rdev->is_atom_bios) + radeon_atombios_get_power_modes(rdev); + else + radeon_combios_get_power_modes(rdev); + radeon_print_power_mode_info(rdev); + } + if (radeon_debugfs_pm_init(rdev)) { DRM_ERROR("Failed to register debugfs file for PM!\n"); } + INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler); + + if (radeon_dynpm != -1 && radeon_dynpm) { + rdev->pm.state = PM_STATE_PAUSED; + DRM_INFO("radeon: dynamic power management enabled\n"); + } + + DRM_INFO("radeon: power management initialized\n"); + return 0; } +void radeon_pm_compute_clocks(struct radeon_device *rdev) +{ + struct drm_device *ddev = rdev->ddev; + struct drm_connector *connector; + struct radeon_crtc *radeon_crtc; + int count = 0; + + if (rdev->pm.state == PM_STATE_DISABLED) + return; + + mutex_lock(&rdev->pm.mutex); + + rdev->pm.active_crtcs = 0; + list_for_each_entry(connector, + &ddev->mode_config.connector_list, head) { + if (connector->encoder && + connector->dpms != DRM_MODE_DPMS_OFF) { + radeon_crtc = to_radeon_crtc(connector->encoder->crtc); + rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id); + ++count; + } + } + + if (count > 1) { + if (rdev->pm.state == PM_STATE_ACTIVE) { + cancel_delayed_work(&rdev->pm.idle_work); + + rdev->pm.state = PM_STATE_PAUSED; + rdev->pm.planned_action = PM_ACTION_UPCLOCK; + if (rdev->pm.downclocked) + radeon_pm_set_clocks(rdev); + + DRM_DEBUG("radeon: dynamic power management deactivated\n"); + } + } else if (count == 1) { + /* TODO: Increase clocks if needed for current mode */ + + if (rdev->pm.state == PM_STATE_MINIMUM) { + rdev->pm.state = PM_STATE_ACTIVE; + rdev->pm.planned_action = PM_ACTION_UPCLOCK; + radeon_pm_set_clocks(rdev); + + queue_delayed_work(rdev->wq, &rdev->pm.idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + } + else if (rdev->pm.state == PM_STATE_PAUSED) { + rdev->pm.state = PM_STATE_ACTIVE; + queue_delayed_work(rdev->wq, &rdev->pm.idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); + DRM_DEBUG("radeon: dynamic power management activated\n"); + } + } + else { /* count == 0 */ + if (rdev->pm.state != PM_STATE_MINIMUM) { + cancel_delayed_work(&rdev->pm.idle_work); + + rdev->pm.state = PM_STATE_MINIMUM; + rdev->pm.planned_action = PM_ACTION_MINIMUM; + radeon_pm_set_clocks(rdev); + } + } + + mutex_unlock(&rdev->pm.mutex); +} + +static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish) +{ + u32 stat_crtc1 = 0, stat_crtc2 = 0; + bool in_vbl = true; + + if (ASIC_IS_AVIVO(rdev)) { + if (rdev->pm.active_crtcs & (1 << 0)) { + stat_crtc1 = RREG32(D1CRTC_STATUS); + if (!(stat_crtc1 & 1)) + in_vbl = false; + } + if (rdev->pm.active_crtcs & (1 << 1)) { + stat_crtc2 = RREG32(D2CRTC_STATUS); + if (!(stat_crtc2 & 1)) + in_vbl = false; + } + } + if (in_vbl == false) + DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1, + stat_crtc2, finish ? "exit" : "entry"); + return in_vbl; +} +static void radeon_pm_set_clocks_locked(struct radeon_device *rdev) +{ + /*radeon_fence_wait_last(rdev);*/ + switch (rdev->pm.planned_action) { + case PM_ACTION_UPCLOCK: + rdev->pm.downclocked = false; + break; + case PM_ACTION_DOWNCLOCK: + rdev->pm.downclocked = true; + break; + case PM_ACTION_MINIMUM: + break; + case PM_ACTION_NONE: + DRM_ERROR("%s: PM_ACTION_NONE\n", __func__); + break; + } + + /* check if we are in vblank */ + radeon_pm_debug_check_in_vbl(rdev, false); + radeon_set_power_state(rdev); + radeon_pm_debug_check_in_vbl(rdev, true); + rdev->pm.planned_action = PM_ACTION_NONE; +} + +static void radeon_pm_set_clocks(struct radeon_device *rdev) +{ + radeon_get_power_state(rdev, rdev->pm.planned_action); + mutex_lock(&rdev->cp.mutex); + + if (rdev->pm.active_crtcs & (1 << 0)) { + rdev->pm.req_vblank |= (1 << 0); + drm_vblank_get(rdev->ddev, 0); + } + if (rdev->pm.active_crtcs & (1 << 1)) { + rdev->pm.req_vblank |= (1 << 1); + drm_vblank_get(rdev->ddev, 1); + } + if (rdev->pm.active_crtcs) + wait_event_interruptible_timeout( + rdev->irq.vblank_queue, 0, + msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT)); + if (rdev->pm.req_vblank & (1 << 0)) { + rdev->pm.req_vblank &= ~(1 << 0); + drm_vblank_put(rdev->ddev, 0); + } + if (rdev->pm.req_vblank & (1 << 1)) { + rdev->pm.req_vblank &= ~(1 << 1); + drm_vblank_put(rdev->ddev, 1); + } + + radeon_pm_set_clocks_locked(rdev); + mutex_unlock(&rdev->cp.mutex); +} + +static void radeon_pm_idle_work_handler(struct work_struct *work) +{ + struct radeon_device *rdev; + rdev = container_of(work, struct radeon_device, + pm.idle_work.work); + + mutex_lock(&rdev->pm.mutex); + if (rdev->pm.state == PM_STATE_ACTIVE) { + unsigned long irq_flags; + int not_processed = 0; + + read_lock_irqsave(&rdev->fence_drv.lock, irq_flags); + if (!list_empty(&rdev->fence_drv.emited)) { + struct list_head *ptr; + list_for_each(ptr, &rdev->fence_drv.emited) { + /* count up to 3, that's enought info */ + if (++not_processed >= 3) + break; + } + } + read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); + + if (not_processed >= 3) { /* should upclock */ + if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) { + rdev->pm.planned_action = PM_ACTION_NONE; + } else if (rdev->pm.planned_action == PM_ACTION_NONE && + rdev->pm.downclocked) { + rdev->pm.planned_action = + PM_ACTION_UPCLOCK; + rdev->pm.action_timeout = jiffies + + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); + } + } else if (not_processed == 0) { /* should downclock */ + if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) { + rdev->pm.planned_action = PM_ACTION_NONE; + } else if (rdev->pm.planned_action == PM_ACTION_NONE && + !rdev->pm.downclocked) { + rdev->pm.planned_action = + PM_ACTION_DOWNCLOCK; + rdev->pm.action_timeout = jiffies + + msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS); + } + } + + if (rdev->pm.planned_action != PM_ACTION_NONE && + jiffies > rdev->pm.action_timeout) { + radeon_pm_set_clocks(rdev); + } + } + mutex_unlock(&rdev->pm.mutex); + + queue_delayed_work(rdev->wq, &rdev->pm.idle_work, + msecs_to_jiffies(RADEON_IDLE_LOOP_MS)); +} + /* * Debugfs info */ @@ -44,11 +436,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; + seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]); seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk); seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk); if (rdev->asic->get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); + if (rdev->asic->get_pcie_lanes) + seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev)); return 0; } @@ -58,7 +453,7 @@ static struct drm_info_list radeon_pm_info_list[] = { }; #endif -int radeon_debugfs_pm_init(struct radeon_device *rdev) +static int radeon_debugfs_pm_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list)); diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h index 6d0a009dd4a..5c0dc082d33 100644 --- a/drivers/gpu/drm/radeon/radeon_reg.h +++ b/drivers/gpu/drm/radeon/radeon_reg.h @@ -54,7 +54,7 @@ #include "r300_reg.h" #include "r500_reg.h" #include "r600_reg.h" - +#include "evergreen_reg.h" #define RADEON_MC_AGP_LOCATION 0x014c #define RADEON_MC_AGP_START_MASK 0x0000FFFF @@ -1060,32 +1060,38 @@ /* Multimedia I2C bus */ #define RADEON_I2C_CNTL_0 0x0090 -#define RADEON_I2C_DONE (1 << 0) -#define RADEON_I2C_NACK (1 << 1) -#define RADEON_I2C_HALT (1 << 2) -#define RADEON_I2C_SOFT_RST (1 << 5) -#define RADEON_I2C_DRIVE_EN (1 << 6) -#define RADEON_I2C_DRIVE_SEL (1 << 7) -#define RADEON_I2C_START (1 << 8) -#define RADEON_I2C_STOP (1 << 9) -#define RADEON_I2C_RECEIVE (1 << 10) -#define RADEON_I2C_ABORT (1 << 11) -#define RADEON_I2C_GO (1 << 12) -#define RADEON_I2C_PRESCALE_SHIFT 16 +# define RADEON_I2C_DONE (1 << 0) +# define RADEON_I2C_NACK (1 << 1) +# define RADEON_I2C_HALT (1 << 2) +# define RADEON_I2C_SOFT_RST (1 << 5) +# define RADEON_I2C_DRIVE_EN (1 << 6) +# define RADEON_I2C_DRIVE_SEL (1 << 7) +# define RADEON_I2C_START (1 << 8) +# define RADEON_I2C_STOP (1 << 9) +# define RADEON_I2C_RECEIVE (1 << 10) +# define RADEON_I2C_ABORT (1 << 11) +# define RADEON_I2C_GO (1 << 12) +# define RADEON_I2C_PRESCALE_SHIFT 16 #define RADEON_I2C_CNTL_1 0x0094 -#define RADEON_I2C_DATA_COUNT_SHIFT 0 -#define RADEON_I2C_ADDR_COUNT_SHIFT 4 -#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 -#define RADEON_I2C_SEL (1 << 16) -#define RADEON_I2C_EN (1 << 17) -#define RADEON_I2C_TIME_LIMIT_SHIFT 24 +# define RADEON_I2C_DATA_COUNT_SHIFT 0 +# define RADEON_I2C_ADDR_COUNT_SHIFT 4 +# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8 +# define RADEON_I2C_SEL (1 << 16) +# define RADEON_I2C_EN (1 << 17) +# define RADEON_I2C_TIME_LIMIT_SHIFT 24 #define RADEON_I2C_DATA 0x0098 #define RADEON_DVI_I2C_CNTL_0 0x02e0 # define R200_DVI_I2C_PIN_SEL(x) ((x) << 3) -# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ -# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ -# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ +# define R200_SEL_DDC1 0 /* depends on asic */ +# define R200_SEL_DDC2 1 /* depends on asic */ +# define R200_SEL_DDC3 2 /* depends on asic */ +# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13) +# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13) +# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14) +# define RADEON_HW_NEEDS_DVI_I2C (1 << 14) +# define RADEON_ABORT_HW_DVI_I2C (1 << 15) +# define RADEON_HW_USING_DVI_I2C (1 << 15) #define RADEON_DVI_I2C_CNTL_1 0x02e4 #define RADEON_DVI_I2C_DATA 0x02e8 diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 6579eb4c1f2..e50513a6273 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -34,6 +34,36 @@ int radeon_debugfs_ib_init(struct radeon_device *rdev); +void radeon_ib_bogus_cleanup(struct radeon_device *rdev) +{ + struct radeon_ib *ib, *n; + + list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) { + list_del(&ib->list); + vfree(ib->ptr); + kfree(ib); + } +} + +void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib) +{ + struct radeon_ib *bib; + + bib = kmalloc(sizeof(*bib), GFP_KERNEL); + if (bib == NULL) + return; + bib->ptr = vmalloc(ib->length_dw * 4); + if (bib->ptr == NULL) { + kfree(bib); + return; + } + memcpy(bib->ptr, ib->ptr, ib->length_dw * 4); + bib->length_dw = ib->length_dw; + mutex_lock(&rdev->ib_pool.mutex); + list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib); + mutex_unlock(&rdev->ib_pool.mutex); +} + /* * IB. */ @@ -142,6 +172,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) if (rdev->ib_pool.robj) return 0; + INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib); /* Allocate 1M object buffer */ r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, true, RADEON_GEM_DOMAIN_GTT, @@ -192,6 +223,8 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) return; } mutex_lock(&rdev->ib_pool.mutex); + radeon_ib_bogus_cleanup(rdev); + if (rdev->ib_pool.robj) { r = radeon_bo_reserve(rdev->ib_pool.robj, false); if (likely(r == 0)) { @@ -349,15 +382,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data) return 0; } +static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct radeon_device *rdev = node->info_ent->data; + struct radeon_ib *ib; + unsigned i; + + mutex_lock(&rdev->ib_pool.mutex); + if (list_empty(&rdev->ib_pool.bogus_ib)) { + mutex_unlock(&rdev->ib_pool.mutex); + seq_printf(m, "no bogus IB recorded\n"); + return 0; + } + ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list); + list_del_init(&ib->list); + mutex_unlock(&rdev->ib_pool.mutex); + seq_printf(m, "IB size %05u dwords\n", ib->length_dw); + for (i = 0; i < ib->length_dw; i++) { + seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]); + } + vfree(ib->ptr); + kfree(ib); + return 0; +} + static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE]; static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32]; + +static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = { + {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL}, +}; #endif int radeon_debugfs_ib_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) unsigned i; + int r; + radeon_debugfs_ib_bogus_info_list[0].data = rdev; + r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1); + if (r) + return r; for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i); radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i]; diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 067167cb39c..3c32f840dcd 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -29,6 +29,7 @@ #include "drmP.h" #include "drm.h" +#include "drm_buffer.h" #include "drm_sarea.h" #include "radeon_drm.h" #include "radeon_drv.h" @@ -91,21 +92,27 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * dev_priv, struct drm_file *file_priv, - int id, u32 *data) + int id, struct drm_buffer *buf) { + u32 *data; switch (id) { case RADEON_EMIT_PP_MISC: - if (radeon_check_and_fixup_offset(dev_priv, file_priv, - &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) { + data = drm_buffer_pointer_to_dword(buf, + (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4); + + if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid depth buffer offset\n"); return -EINVAL; } + dev_priv->have_z_offset = 1; break; case RADEON_EMIT_PP_CNTL: - if (radeon_check_and_fixup_offset(dev_priv, file_priv, - &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) { + data = drm_buffer_pointer_to_dword(buf, + (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4); + + if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid colour buffer offset\n"); return -EINVAL; } @@ -117,8 +124,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case R200_EMIT_PP_TXOFFSET_3: case R200_EMIT_PP_TXOFFSET_4: case R200_EMIT_PP_TXOFFSET_5: - if (radeon_check_and_fixup_offset(dev_priv, file_priv, - &data[0])) { + data = drm_buffer_pointer_to_dword(buf, 0); + if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid R200 texture offset\n"); return -EINVAL; } @@ -127,8 +134,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case RADEON_EMIT_PP_TXFILTER_0: case RADEON_EMIT_PP_TXFILTER_1: case RADEON_EMIT_PP_TXFILTER_2: - if (radeon_check_and_fixup_offset(dev_priv, file_priv, - &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) { + data = drm_buffer_pointer_to_dword(buf, + (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4); + if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) { DRM_ERROR("Invalid R100 texture offset\n"); return -EINVAL; } @@ -142,9 +150,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case R200_EMIT_PP_CUBIC_OFFSETS_5:{ int i; for (i = 0; i < 5; i++) { + data = drm_buffer_pointer_to_dword(buf, i); if (radeon_check_and_fixup_offset(dev_priv, file_priv, - &data[i])) { + data)) { DRM_ERROR ("Invalid R200 cubic texture offset\n"); return -EINVAL; @@ -158,9 +167,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t * case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{ int i; for (i = 0; i < 5; i++) { + data = drm_buffer_pointer_to_dword(buf, i); if (radeon_check_and_fixup_offset(dev_priv, file_priv, - &data[i])) { + data)) { DRM_ERROR ("Invalid R100 cubic texture offset\n"); return -EINVAL; @@ -269,23 +279,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * cmdbuf, unsigned int *cmdsz) { - u32 *cmd = (u32 *) cmdbuf->buf; + u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0); u32 offset, narrays; int count, i, k; - *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16); + count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16); + *cmdsz = 2 + count; - if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) { + if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) { DRM_ERROR("Not a type 3 packet\n"); return -EINVAL; } - if (4 * *cmdsz > cmdbuf->bufsz) { + if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) { DRM_ERROR("Packet size larger than size of data provided\n"); return -EINVAL; } - switch(cmd[0] & 0xff00) { + switch (*cmd & 0xff00) { /* XXX Are there old drivers needing other packets? */ case RADEON_3D_DRAW_IMMD: @@ -312,7 +323,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * break; case RADEON_3D_LOAD_VBPNTR: - count = (cmd[0] >> 16) & 0x3fff; if (count > 18) { /* 12 arrays max */ DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", @@ -321,13 +331,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * } /* carefully check packet contents */ - narrays = cmd[1] & ~0xc000; + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); + + narrays = *cmd & ~0xc000; k = 0; i = 2; while ((k < narrays) && (i < (count + 2))) { i++; /* skip attribute field */ + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); if (radeon_check_and_fixup_offset(dev_priv, file_priv, - &cmd[i])) { + cmd)) { DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", k, i); @@ -338,8 +351,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * if (k == narrays) break; /* have one more to process, they come in pairs */ + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i); + if (radeon_check_and_fixup_offset(dev_priv, - file_priv, &cmd[i])) + file_priv, cmd)) { DRM_ERROR ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n", @@ -363,7 +378,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * DRM_ERROR("Invalid 3d packet for r200-class chip\n"); return -EINVAL; } - if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) { + + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); + if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) { DRM_ERROR("Invalid rndr_gen_indx offset\n"); return -EINVAL; } @@ -374,12 +391,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * DRM_ERROR("Invalid 3d packet for r100-class chip\n"); return -EINVAL; } - if ((cmd[1] & 0x8000ffff) != 0x80000810) { - DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); + + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); + if ((*cmd & 0x8000ffff) != 0x80000810) { + DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd); return -EINVAL; } - if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) { - DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); + if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) { + DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd); return -EINVAL; } break; @@ -388,31 +408,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t * case RADEON_CNTL_PAINT_MULTI: case RADEON_CNTL_BITBLT_MULTI: /* MSB of opcode: next DWORD GUI_CNTL */ - if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL + cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1); + if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { - offset = cmd[2] << 10; + u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2); + offset = *cmd2 << 10; if (radeon_check_and_fixup_offset (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid first packet offset\n"); return -EINVAL; } - cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10; + *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10; } - if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && - (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { - offset = cmd[3] << 10; + if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && + (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { + u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3); + offset = *cmd << 10; if (radeon_check_and_fixup_offset (dev_priv, file_priv, &offset)) { DRM_ERROR("Invalid second packet offset\n"); return -EINVAL; } - cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10; + *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10; } break; default: - DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00); + DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00); return -EINVAL; } @@ -876,6 +899,11 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, if (tmp & RADEON_BACK) flags |= RADEON_FRONT; } + if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { + if (!dev_priv->have_z_offset) + printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); + flags &= ~(RADEON_DEPTH | RADEON_STENCIL); + } if (flags & (RADEON_FRONT | RADEON_BACK)) { @@ -2611,7 +2639,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv, { int id = (int)header.packet.packet_id; int sz, reg; - int *data = (int *)cmdbuf->buf; RING_LOCALS; if (id >= RADEON_MAX_STATE_PACKETS) @@ -2620,23 +2647,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv, sz = packet[id].len; reg = packet[id].start; - if (sz * sizeof(int) > cmdbuf->bufsz) { + if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) { DRM_ERROR("Packet size provided larger than data provided\n"); return -EINVAL; } - if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) { + if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, + cmdbuf->buffer)) { DRM_ERROR("Packet verification failed\n"); return -EINVAL; } BEGIN_RING(sz + 1); OUT_RING(CP_PACKET0(reg, (sz - 1))); - OUT_RING_TABLE(data, sz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); - cmdbuf->buf += sz * sizeof(int); - cmdbuf->bufsz -= sz * sizeof(int); return 0; } @@ -2653,10 +2679,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); - OUT_RING_TABLE(cmdbuf->buf, sz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); - cmdbuf->buf += sz * sizeof(int); - cmdbuf->bufsz -= sz * sizeof(int); return 0; } @@ -2675,10 +2699,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0)); OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1)); - OUT_RING_TABLE(cmdbuf->buf, sz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); - cmdbuf->buf += sz * sizeof(int); - cmdbuf->bufsz -= sz * sizeof(int); return 0; } @@ -2696,11 +2718,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); - OUT_RING_TABLE(cmdbuf->buf, sz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); - cmdbuf->buf += sz * sizeof(int); - cmdbuf->bufsz -= sz * sizeof(int); return 0; } @@ -2714,7 +2734,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, if (!sz) return 0; - if (sz * 4 > cmdbuf->bufsz) + if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) return -EINVAL; BEGIN_RING(5 + sz); @@ -2722,11 +2742,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0)); OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT)); OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1))); - OUT_RING_TABLE(cmdbuf->buf, sz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz); ADVANCE_RING(); - cmdbuf->buf += sz * sizeof(int); - cmdbuf->bufsz -= sz * sizeof(int); return 0; } @@ -2748,11 +2766,9 @@ static int radeon_emit_packet3(struct drm_device * dev, } BEGIN_RING(cmdsz); - OUT_RING_TABLE(cmdbuf->buf, cmdsz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz); ADVANCE_RING(); - cmdbuf->buf += cmdsz * 4; - cmdbuf->bufsz -= cmdsz * 4; return 0; } @@ -2805,16 +2821,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev, } BEGIN_RING(cmdsz); - OUT_RING_TABLE(cmdbuf->buf, cmdsz); + OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz); ADVANCE_RING(); } while (++i < cmdbuf->nbox); if (cmdbuf->nbox == 1) cmdbuf->nbox = 0; + return 0; out: - cmdbuf->buf += cmdsz * 4; - cmdbuf->bufsz -= cmdsz * 4; + drm_buffer_advance(cmdbuf->buffer, cmdsz * 4); return 0; } @@ -2847,16 +2863,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags) return 0; } -static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) +static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf = NULL; + drm_radeon_cmd_header_t stack_header; int idx; drm_radeon_kcmd_buffer_t *cmdbuf = data; - drm_radeon_cmd_header_t header; - int orig_nbox, orig_bufsz; - char *kbuf = NULL; + int orig_nbox; LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -2871,17 +2887,16 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file * races between checking values and using those values in other code, * and simply to avoid a lot of function calls to copy in data. */ - orig_bufsz = cmdbuf->bufsz; - if (orig_bufsz != 0) { - kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL); - if (kbuf == NULL) - return -ENOMEM; - if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf, - cmdbuf->bufsz)) { - kfree(kbuf); - return -EFAULT; - } - cmdbuf->buf = kbuf; + if (cmdbuf->bufsz != 0) { + int rv; + void __user *buffer = cmdbuf->buffer; + rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz); + if (rv) + return rv; + rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer, + cmdbuf->bufsz); + if (rv) + return rv; } orig_nbox = cmdbuf->nbox; @@ -2890,24 +2905,24 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file int temp; temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf); - if (orig_bufsz != 0) - kfree(kbuf); + if (cmdbuf->bufsz != 0) + drm_buffer_free(cmdbuf->buffer); return temp; } /* microcode_version != r300 */ - while (cmdbuf->bufsz >= sizeof(header)) { + while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) { - header.i = *(int *)cmdbuf->buf; - cmdbuf->buf += sizeof(header); - cmdbuf->bufsz -= sizeof(header); + drm_radeon_cmd_header_t *header; + header = drm_buffer_read_object(cmdbuf->buffer, + sizeof(stack_header), &stack_header); - switch (header.header.cmd_type) { + switch (header->header.cmd_type) { case RADEON_CMD_PACKET: DRM_DEBUG("RADEON_CMD_PACKET\n"); if (radeon_emit_packets - (dev_priv, file_priv, header, cmdbuf)) { + (dev_priv, file_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_packets failed\n"); goto err; } @@ -2915,7 +2930,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file case RADEON_CMD_SCALARS: DRM_DEBUG("RADEON_CMD_SCALARS\n"); - if (radeon_emit_scalars(dev_priv, header, cmdbuf)) { + if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_scalars failed\n"); goto err; } @@ -2923,7 +2938,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file case RADEON_CMD_VECTORS: DRM_DEBUG("RADEON_CMD_VECTORS\n"); - if (radeon_emit_vectors(dev_priv, header, cmdbuf)) { + if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_vectors failed\n"); goto err; } @@ -2931,7 +2946,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file case RADEON_CMD_DMA_DISCARD: DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n"); - idx = header.dma.buf_idx; + idx = header->dma.buf_idx; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("buffer index %d (of %d max)\n", idx, dma->buf_count - 1); @@ -2968,7 +2983,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file case RADEON_CMD_SCALARS2: DRM_DEBUG("RADEON_CMD_SCALARS2\n"); - if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) { + if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_scalars2 failed\n"); goto err; } @@ -2976,37 +2991,37 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file case RADEON_CMD_WAIT: DRM_DEBUG("RADEON_CMD_WAIT\n"); - if (radeon_emit_wait(dev, header.wait.flags)) { + if (radeon_emit_wait(dev, header->wait.flags)) { DRM_ERROR("radeon_emit_wait failed\n"); goto err; } break; case RADEON_CMD_VECLINEAR: DRM_DEBUG("RADEON_CMD_VECLINEAR\n"); - if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) { + if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) { DRM_ERROR("radeon_emit_veclinear failed\n"); goto err; } break; default: - DRM_ERROR("bad cmd_type %d at %p\n", - header.header.cmd_type, - cmdbuf->buf - sizeof(header)); + DRM_ERROR("bad cmd_type %d at byte %d\n", + header->header.cmd_type, + cmdbuf->buffer->iterator); goto err; } } - if (orig_bufsz != 0) - kfree(kbuf); + if (cmdbuf->bufsz != 0) + drm_buffer_free(cmdbuf->buffer); DRM_DEBUG("DONE\n"); COMMIT_RING(); return 0; err: - if (orig_bufsz != 0) - kfree(kbuf); + if (cmdbuf->bufsz != 0) + drm_buffer_free(cmdbuf->buffer); return -EINVAL; } diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 9f5e2f929da..313c96bc09d 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -186,7 +186,7 @@ void radeon_test_moves(struct radeon_device *rdev) radeon_bo_kunmap(gtt_obj[i]); DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", - gtt_addr - rdev->mc.gtt_location); + gtt_addr - rdev->mc.gtt_start); } out_cleanup: diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 58b5adf974c..43c5ab34b63 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: - man->gpu_offset = rdev->mc.gtt_location; + man->gpu_offset = rdev->mc.gtt_start; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; @@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */ - man->gpu_offset = rdev->mc.vram_location; + man->gpu_offset = rdev->mc.vram_start; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE; @@ -262,10 +262,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, switch (old_mem->mem_type) { case TTM_PL_VRAM: - old_start += rdev->mc.vram_location; + old_start += rdev->mc.vram_start; break; case TTM_PL_TT: - old_start += rdev->mc.gtt_location; + old_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); @@ -273,10 +273,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, } switch (new_mem->mem_type) { case TTM_PL_VRAM: - new_start += rdev->mc.vram_location; + new_start += rdev->mc.vram_start; break; case TTM_PL_TT: - new_start += rdev->mc.gtt_location; + new_start += rdev->mc.gtt_start; break; default: DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600 new file mode 100644 index 00000000000..8f414a5f520 --- /dev/null +++ b/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -0,0 +1,837 @@ +r600 0x9400 +0x000287A0 R7xx_CB_SHADER_CONTROL +0x00028230 R7xx_PA_SC_EDGERULE +0x000286C8 R7xx_SPI_THREAD_GROUPING +0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ +0x000088C4 VGT_CACHE_INVALIDATION +0x00028A50 VGT_ENHANCE +0x000088CC VGT_ES_PER_GS +0x00028A2C VGT_GROUP_DECR +0x00028A28 VGT_GROUP_FIRST_DECR +0x00028A24 VGT_GROUP_PRIM_TYPE +0x00028A30 VGT_GROUP_VECT_0_CNTL +0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL +0x00028A34 VGT_GROUP_VECT_1_CNTL +0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL +0x00028A40 VGT_GS_MODE +0x00028A6C VGT_GS_OUT_PRIM_TYPE +0x000088C8 VGT_GS_PER_ES +0x000088E8 VGT_GS_PER_VS +0x000088D4 VGT_GS_VERTEX_REUSE +0x00028A14 VGT_HOS_CNTL +0x00028A18 VGT_HOS_MAX_TESS_LEVEL +0x00028A1C VGT_HOS_MIN_TESS_LEVEL +0x00028A20 VGT_HOS_REUSE_DEPTH +0x0000895C VGT_INDEX_TYPE +0x00028408 VGT_INDX_OFFSET +0x00028AA0 VGT_INSTANCE_STEP_RATE_0 +0x00028AA4 VGT_INSTANCE_STEP_RATE_1 +0x000088C0 VGT_LAST_COPY_STATE +0x00028400 VGT_MAX_VTX_INDX +0x000088D8 VGT_MC_LAT_CNTL +0x00028404 VGT_MIN_VTX_INDX +0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN +0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX +0x00008970 VGT_NUM_INDICES +0x00008974 VGT_NUM_INSTANCES +0x00028A10 VGT_OUTPUT_PATH_CNTL +0x00028C5C VGT_OUT_DEALLOC_CNTL +0x00028A84 VGT_PRIMITIVEID_EN +0x00008958 VGT_PRIMITIVE_TYPE +0x00028AB4 VGT_REUSE_OFF +0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL +0x00028AB8 VGT_VTX_CNT_EN +0x000088B0 VGT_VTX_VECT_EJECT_REG +0x00028810 PA_CL_CLIP_CNTL +0x00008A14 PA_CL_ENHANCE +0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ +0x00028C18 PA_CL_GB_HORZ_DISC_ADJ +0x00028C0C PA_CL_GB_VERT_CLIP_ADJ +0x00028C10 PA_CL_GB_VERT_DISC_ADJ +0x00028820 PA_CL_NANINF_CNTL +0x00028E1C PA_CL_POINT_CULL_RAD +0x00028E18 PA_CL_POINT_SIZE +0x00028E10 PA_CL_POINT_X_RAD +0x00028E14 PA_CL_POINT_Y_RAD +0x00028E2C PA_CL_UCP_0_W +0x00028E3C PA_CL_UCP_1_W +0x00028E4C PA_CL_UCP_2_W +0x00028E5C PA_CL_UCP_3_W +0x00028E6C PA_CL_UCP_4_W +0x00028E7C PA_CL_UCP_5_W +0x00028E20 PA_CL_UCP_0_X +0x00028E30 PA_CL_UCP_1_X +0x00028E40 PA_CL_UCP_2_X +0x00028E50 PA_CL_UCP_3_X +0x00028E60 PA_CL_UCP_4_X +0x00028E70 PA_CL_UCP_5_X +0x00028E24 PA_CL_UCP_0_Y +0x00028E34 PA_CL_UCP_1_Y +0x00028E44 PA_CL_UCP_2_Y +0x00028E54 PA_CL_UCP_3_Y +0x00028E64 PA_CL_UCP_4_Y +0x00028E74 PA_CL_UCP_5_Y +0x00028E28 PA_CL_UCP_0_Z +0x00028E38 PA_CL_UCP_1_Z +0x00028E48 PA_CL_UCP_2_Z +0x00028E58 PA_CL_UCP_3_Z +0x00028E68 PA_CL_UCP_4_Z +0x00028E78 PA_CL_UCP_5_Z +0x00028440 PA_CL_VPORT_XOFFSET_0 +0x00028458 PA_CL_VPORT_XOFFSET_1 +0x00028470 PA_CL_VPORT_XOFFSET_2 +0x00028488 PA_CL_VPORT_XOFFSET_3 +0x000284A0 PA_CL_VPORT_XOFFSET_4 +0x000284B8 PA_CL_VPORT_XOFFSET_5 +0x000284D0 PA_CL_VPORT_XOFFSET_6 +0x000284E8 PA_CL_VPORT_XOFFSET_7 +0x00028500 PA_CL_VPORT_XOFFSET_8 +0x00028518 PA_CL_VPORT_XOFFSET_9 +0x00028530 PA_CL_VPORT_XOFFSET_10 +0x00028548 PA_CL_VPORT_XOFFSET_11 +0x00028560 PA_CL_VPORT_XOFFSET_12 +0x00028578 PA_CL_VPORT_XOFFSET_13 +0x00028590 PA_CL_VPORT_XOFFSET_14 +0x000285A8 PA_CL_VPORT_XOFFSET_15 +0x0002843C PA_CL_VPORT_XSCALE_0 +0x00028454 PA_CL_VPORT_XSCALE_1 +0x0002846C PA_CL_VPORT_XSCALE_2 +0x00028484 PA_CL_VPORT_XSCALE_3 +0x0002849C PA_CL_VPORT_XSCALE_4 +0x000284B4 PA_CL_VPORT_XSCALE_5 +0x000284CC PA_CL_VPORT_XSCALE_6 +0x000284E4 PA_CL_VPORT_XSCALE_7 +0x000284FC PA_CL_VPORT_XSCALE_8 +0x00028514 PA_CL_VPORT_XSCALE_9 +0x0002852C PA_CL_VPORT_XSCALE_10 +0x00028544 PA_CL_VPORT_XSCALE_11 +0x0002855C PA_CL_VPORT_XSCALE_12 +0x00028574 PA_CL_VPORT_XSCALE_13 +0x0002858C PA_CL_VPORT_XSCALE_14 +0x000285A4 PA_CL_VPORT_XSCALE_15 +0x00028448 PA_CL_VPORT_YOFFSET_0 +0x00028460 PA_CL_VPORT_YOFFSET_1 +0x00028478 PA_CL_VPORT_YOFFSET_2 +0x00028490 PA_CL_VPORT_YOFFSET_3 +0x000284A8 PA_CL_VPORT_YOFFSET_4 +0x000284C0 PA_CL_VPORT_YOFFSET_5 +0x000284D8 PA_CL_VPORT_YOFFSET_6 +0x000284F0 PA_CL_VPORT_YOFFSET_7 +0x00028508 PA_CL_VPORT_YOFFSET_8 +0x00028520 PA_CL_VPORT_YOFFSET_9 +0x00028538 PA_CL_VPORT_YOFFSET_10 +0x00028550 PA_CL_VPORT_YOFFSET_11 +0x00028568 PA_CL_VPORT_YOFFSET_12 +0x00028580 PA_CL_VPORT_YOFFSET_13 +0x00028598 PA_CL_VPORT_YOFFSET_14 +0x000285B0 PA_CL_VPORT_YOFFSET_15 +0x00028444 PA_CL_VPORT_YSCALE_0 +0x0002845C PA_CL_VPORT_YSCALE_1 +0x00028474 PA_CL_VPORT_YSCALE_2 +0x0002848C PA_CL_VPORT_YSCALE_3 +0x000284A4 PA_CL_VPORT_YSCALE_4 +0x000284BC PA_CL_VPORT_YSCALE_5 +0x000284D4 PA_CL_VPORT_YSCALE_6 +0x000284EC PA_CL_VPORT_YSCALE_7 +0x00028504 PA_CL_VPORT_YSCALE_8 +0x0002851C PA_CL_VPORT_YSCALE_9 +0x00028534 PA_CL_VPORT_YSCALE_10 +0x0002854C PA_CL_VPORT_YSCALE_11 +0x00028564 PA_CL_VPORT_YSCALE_12 +0x0002857C PA_CL_VPORT_YSCALE_13 +0x00028594 PA_CL_VPORT_YSCALE_14 +0x000285AC PA_CL_VPORT_YSCALE_15 +0x00028450 PA_CL_VPORT_ZOFFSET_0 +0x00028468 PA_CL_VPORT_ZOFFSET_1 +0x00028480 PA_CL_VPORT_ZOFFSET_2 +0x00028498 PA_CL_VPORT_ZOFFSET_3 +0x000284B0 PA_CL_VPORT_ZOFFSET_4 +0x000284C8 PA_CL_VPORT_ZOFFSET_5 +0x000284E0 PA_CL_VPORT_ZOFFSET_6 +0x000284F8 PA_CL_VPORT_ZOFFSET_7 +0x00028510 PA_CL_VPORT_ZOFFSET_8 +0x00028528 PA_CL_VPORT_ZOFFSET_9 +0x00028540 PA_CL_VPORT_ZOFFSET_10 +0x00028558 PA_CL_VPORT_ZOFFSET_11 +0x00028570 PA_CL_VPORT_ZOFFSET_12 +0x00028588 PA_CL_VPORT_ZOFFSET_13 +0x000285A0 PA_CL_VPORT_ZOFFSET_14 +0x000285B8 PA_CL_VPORT_ZOFFSET_15 +0x0002844C PA_CL_VPORT_ZSCALE_0 +0x00028464 PA_CL_VPORT_ZSCALE_1 +0x0002847C PA_CL_VPORT_ZSCALE_2 +0x00028494 PA_CL_VPORT_ZSCALE_3 +0x000284AC PA_CL_VPORT_ZSCALE_4 +0x000284C4 PA_CL_VPORT_ZSCALE_5 +0x000284DC PA_CL_VPORT_ZSCALE_6 +0x000284F4 PA_CL_VPORT_ZSCALE_7 +0x0002850C PA_CL_VPORT_ZSCALE_8 +0x00028524 PA_CL_VPORT_ZSCALE_9 +0x0002853C PA_CL_VPORT_ZSCALE_10 +0x00028554 PA_CL_VPORT_ZSCALE_11 +0x0002856C PA_CL_VPORT_ZSCALE_12 +0x00028584 PA_CL_VPORT_ZSCALE_13 +0x0002859C PA_CL_VPORT_ZSCALE_14 +0x000285B4 PA_CL_VPORT_ZSCALE_15 +0x0002881C PA_CL_VS_OUT_CNTL +0x00028818 PA_CL_VTE_CNTL +0x00028C48 PA_SC_AA_MASK +0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S +0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S +0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0 +0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1 +0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX +0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX +0x00028214 PA_SC_CLIPRECT_0_BR +0x0002821C PA_SC_CLIPRECT_1_BR +0x00028224 PA_SC_CLIPRECT_2_BR +0x0002822C PA_SC_CLIPRECT_3_BR +0x00028210 PA_SC_CLIPRECT_0_TL +0x00028218 PA_SC_CLIPRECT_1_TL +0x00028220 PA_SC_CLIPRECT_2_TL +0x00028228 PA_SC_CLIPRECT_3_TL +0x0002820C PA_SC_CLIPRECT_RULE +0x00008BF0 PA_SC_ENHANCE +0x00028244 PA_SC_GENERIC_SCISSOR_BR +0x00028240 PA_SC_GENERIC_SCISSOR_TL +0x00028C00 PA_SC_LINE_CNTL +0x00028A0C PA_SC_LINE_STIPPLE +0x00008B10 PA_SC_LINE_STIPPLE_STATE +0x00028A4C PA_SC_MODE_CNTL +0x00028A48 PA_SC_MPASS_PS_CNTL +0x00008B20 PA_SC_MULTI_CHIP_CNTL +0x00028034 PA_SC_SCREEN_SCISSOR_BR +0x00028030 PA_SC_SCREEN_SCISSOR_TL +0x00028254 PA_SC_VPORT_SCISSOR_0_BR +0x0002825C PA_SC_VPORT_SCISSOR_1_BR +0x00028264 PA_SC_VPORT_SCISSOR_2_BR +0x0002826C PA_SC_VPORT_SCISSOR_3_BR +0x00028274 PA_SC_VPORT_SCISSOR_4_BR +0x0002827C PA_SC_VPORT_SCISSOR_5_BR +0x00028284 PA_SC_VPORT_SCISSOR_6_BR +0x0002828C PA_SC_VPORT_SCISSOR_7_BR +0x00028294 PA_SC_VPORT_SCISSOR_8_BR +0x0002829C PA_SC_VPORT_SCISSOR_9_BR +0x000282A4 PA_SC_VPORT_SCISSOR_10_BR +0x000282AC PA_SC_VPORT_SCISSOR_11_BR +0x000282B4 PA_SC_VPORT_SCISSOR_12_BR +0x000282BC PA_SC_VPORT_SCISSOR_13_BR +0x000282C4 PA_SC_VPORT_SCISSOR_14_BR +0x000282CC PA_SC_VPORT_SCISSOR_15_BR +0x00028250 PA_SC_VPORT_SCISSOR_0_TL +0x00028258 PA_SC_VPORT_SCISSOR_1_TL +0x00028260 PA_SC_VPORT_SCISSOR_2_TL +0x00028268 PA_SC_VPORT_SCISSOR_3_TL +0x00028270 PA_SC_VPORT_SCISSOR_4_TL +0x00028278 PA_SC_VPORT_SCISSOR_5_TL +0x00028280 PA_SC_VPORT_SCISSOR_6_TL +0x00028288 PA_SC_VPORT_SCISSOR_7_TL +0x00028290 PA_SC_VPORT_SCISSOR_8_TL +0x00028298 PA_SC_VPORT_SCISSOR_9_TL +0x000282A0 PA_SC_VPORT_SCISSOR_10_TL +0x000282A8 PA_SC_VPORT_SCISSOR_11_TL +0x000282B0 PA_SC_VPORT_SCISSOR_12_TL +0x000282B8 PA_SC_VPORT_SCISSOR_13_TL +0x000282C0 PA_SC_VPORT_SCISSOR_14_TL +0x000282C8 PA_SC_VPORT_SCISSOR_15_TL +0x000282D4 PA_SC_VPORT_ZMAX_0 +0x000282DC PA_SC_VPORT_ZMAX_1 +0x000282E4 PA_SC_VPORT_ZMAX_2 +0x000282EC PA_SC_VPORT_ZMAX_3 +0x000282F4 PA_SC_VPORT_ZMAX_4 +0x000282FC PA_SC_VPORT_ZMAX_5 +0x00028304 PA_SC_VPORT_ZMAX_6 +0x0002830C PA_SC_VPORT_ZMAX_7 +0x00028314 PA_SC_VPORT_ZMAX_8 +0x0002831C PA_SC_VPORT_ZMAX_9 +0x00028324 PA_SC_VPORT_ZMAX_10 +0x0002832C PA_SC_VPORT_ZMAX_11 +0x00028334 PA_SC_VPORT_ZMAX_12 +0x0002833C PA_SC_VPORT_ZMAX_13 +0x00028344 PA_SC_VPORT_ZMAX_14 +0x0002834C PA_SC_VPORT_ZMAX_15 +0x000282D0 PA_SC_VPORT_ZMIN_0 +0x000282D8 PA_SC_VPORT_ZMIN_1 +0x000282E0 PA_SC_VPORT_ZMIN_2 +0x000282E8 PA_SC_VPORT_ZMIN_3 +0x000282F0 PA_SC_VPORT_ZMIN_4 +0x000282F8 PA_SC_VPORT_ZMIN_5 +0x00028300 PA_SC_VPORT_ZMIN_6 +0x00028308 PA_SC_VPORT_ZMIN_7 +0x00028310 PA_SC_VPORT_ZMIN_8 +0x00028318 PA_SC_VPORT_ZMIN_9 +0x00028320 PA_SC_VPORT_ZMIN_10 +0x00028328 PA_SC_VPORT_ZMIN_11 +0x00028330 PA_SC_VPORT_ZMIN_12 +0x00028338 PA_SC_VPORT_ZMIN_13 +0x00028340 PA_SC_VPORT_ZMIN_14 +0x00028348 PA_SC_VPORT_ZMIN_15 +0x00028200 PA_SC_WINDOW_OFFSET +0x00028208 PA_SC_WINDOW_SCISSOR_BR +0x00028204 PA_SC_WINDOW_SCISSOR_TL +0x00028A08 PA_SU_LINE_CNTL +0x00028A04 PA_SU_POINT_MINMAX +0x00028A00 PA_SU_POINT_SIZE +0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET +0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE +0x00028DFC PA_SU_POLY_OFFSET_CLAMP +0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL +0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET +0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE +0x00028814 PA_SU_SC_MODE_CNTL +0x00028C08 PA_SU_VTX_CNTL +0x00008C00 SQ_CONFIG +0x00008C04 SQ_GPR_RESOURCE_MGMT_1 +0x00008C08 SQ_GPR_RESOURCE_MGMT_2 +0x00008C10 SQ_STACK_RESOURCE_MGMT_1 +0x00008C14 SQ_STACK_RESOURCE_MGMT_2 +0x00008C0C SQ_THREAD_RESOURCE_MGMT +0x00028380 SQ_VTX_SEMANTIC_0 +0x00028384 SQ_VTX_SEMANTIC_1 +0x00028388 SQ_VTX_SEMANTIC_2 +0x0002838C SQ_VTX_SEMANTIC_3 +0x00028390 SQ_VTX_SEMANTIC_4 +0x00028394 SQ_VTX_SEMANTIC_5 +0x00028398 SQ_VTX_SEMANTIC_6 +0x0002839C SQ_VTX_SEMANTIC_7 +0x000283A0 SQ_VTX_SEMANTIC_8 +0x000283A4 SQ_VTX_SEMANTIC_9 +0x000283A8 SQ_VTX_SEMANTIC_10 +0x000283AC SQ_VTX_SEMANTIC_11 +0x000283B0 SQ_VTX_SEMANTIC_12 +0x000283B4 SQ_VTX_SEMANTIC_13 +0x000283B8 SQ_VTX_SEMANTIC_14 +0x000283BC SQ_VTX_SEMANTIC_15 +0x000283C0 SQ_VTX_SEMANTIC_16 +0x000283C4 SQ_VTX_SEMANTIC_17 +0x000283C8 SQ_VTX_SEMANTIC_18 +0x000283CC SQ_VTX_SEMANTIC_19 +0x000283D0 SQ_VTX_SEMANTIC_20 +0x000283D4 SQ_VTX_SEMANTIC_21 +0x000283D8 SQ_VTX_SEMANTIC_22 +0x000283DC SQ_VTX_SEMANTIC_23 +0x000283E0 SQ_VTX_SEMANTIC_24 +0x000283E4 SQ_VTX_SEMANTIC_25 +0x000283E8 SQ_VTX_SEMANTIC_26 +0x000283EC SQ_VTX_SEMANTIC_27 +0x000283F0 SQ_VTX_SEMANTIC_28 +0x000283F4 SQ_VTX_SEMANTIC_29 +0x000283F8 SQ_VTX_SEMANTIC_30 +0x000283FC SQ_VTX_SEMANTIC_31 +0x000288E0 SQ_VTX_SEMANTIC_CLEAR +0x0003CFF4 SQ_VTX_START_INST_LOC +0x0003C000 SQ_TEX_SAMPLER_WORD0_0 +0x0003C004 SQ_TEX_SAMPLER_WORD1_0 +0x0003C008 SQ_TEX_SAMPLER_WORD2_0 +0x00030000 SQ_ALU_CONSTANT0_0 +0x00030004 SQ_ALU_CONSTANT1_0 +0x00030008 SQ_ALU_CONSTANT2_0 +0x0003000C SQ_ALU_CONSTANT3_0 +0x0003E380 SQ_BOOL_CONST_0 +0x0003E384 SQ_BOOL_CONST_1 +0x0003E388 SQ_BOOL_CONST_2 +0x0003E200 SQ_LOOP_CONST_0 +0x0003E200 SQ_LOOP_CONST_DX10_0 +0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0 +0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1 +0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2 +0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3 +0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4 +0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5 +0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6 +0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7 +0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8 +0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9 +0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10 +0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11 +0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12 +0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13 +0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14 +0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15 +0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0 +0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1 +0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2 +0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3 +0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4 +0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5 +0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6 +0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7 +0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8 +0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9 +0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10 +0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11 +0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12 +0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13 +0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14 +0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15 +0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0 +0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1 +0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2 +0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3 +0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4 +0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5 +0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6 +0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7 +0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8 +0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9 +0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10 +0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11 +0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12 +0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13 +0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14 +0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15 +0x000289C0 SQ_ALU_CONST_CACHE_GS_0 +0x000289C4 SQ_ALU_CONST_CACHE_GS_1 +0x000289C8 SQ_ALU_CONST_CACHE_GS_2 +0x000289CC SQ_ALU_CONST_CACHE_GS_3 +0x000289D0 SQ_ALU_CONST_CACHE_GS_4 +0x000289D4 SQ_ALU_CONST_CACHE_GS_5 +0x000289D8 SQ_ALU_CONST_CACHE_GS_6 +0x000289DC SQ_ALU_CONST_CACHE_GS_7 +0x000289E0 SQ_ALU_CONST_CACHE_GS_8 +0x000289E4 SQ_ALU_CONST_CACHE_GS_9 +0x000289E8 SQ_ALU_CONST_CACHE_GS_10 +0x000289EC SQ_ALU_CONST_CACHE_GS_11 +0x000289F0 SQ_ALU_CONST_CACHE_GS_12 +0x000289F4 SQ_ALU_CONST_CACHE_GS_13 +0x000289F8 SQ_ALU_CONST_CACHE_GS_14 +0x000289FC SQ_ALU_CONST_CACHE_GS_15 +0x00028940 SQ_ALU_CONST_CACHE_PS_0 +0x00028944 SQ_ALU_CONST_CACHE_PS_1 +0x00028948 SQ_ALU_CONST_CACHE_PS_2 +0x0002894C SQ_ALU_CONST_CACHE_PS_3 +0x00028950 SQ_ALU_CONST_CACHE_PS_4 +0x00028954 SQ_ALU_CONST_CACHE_PS_5 +0x00028958 SQ_ALU_CONST_CACHE_PS_6 +0x0002895C SQ_ALU_CONST_CACHE_PS_7 +0x00028960 SQ_ALU_CONST_CACHE_PS_8 +0x00028964 SQ_ALU_CONST_CACHE_PS_9 +0x00028968 SQ_ALU_CONST_CACHE_PS_10 +0x0002896C SQ_ALU_CONST_CACHE_PS_11 +0x00028970 SQ_ALU_CONST_CACHE_PS_12 +0x00028974 SQ_ALU_CONST_CACHE_PS_13 +0x00028978 SQ_ALU_CONST_CACHE_PS_14 +0x0002897C SQ_ALU_CONST_CACHE_PS_15 +0x00028980 SQ_ALU_CONST_CACHE_VS_0 +0x00028984 SQ_ALU_CONST_CACHE_VS_1 +0x00028988 SQ_ALU_CONST_CACHE_VS_2 +0x0002898C SQ_ALU_CONST_CACHE_VS_3 +0x00028990 SQ_ALU_CONST_CACHE_VS_4 +0x00028994 SQ_ALU_CONST_CACHE_VS_5 +0x00028998 SQ_ALU_CONST_CACHE_VS_6 +0x0002899C SQ_ALU_CONST_CACHE_VS_7 +0x000289A0 SQ_ALU_CONST_CACHE_VS_8 +0x000289A4 SQ_ALU_CONST_CACHE_VS_9 +0x000289A8 SQ_ALU_CONST_CACHE_VS_10 +0x000289AC SQ_ALU_CONST_CACHE_VS_11 +0x000289B0 SQ_ALU_CONST_CACHE_VS_12 +0x000289B4 SQ_ALU_CONST_CACHE_VS_13 +0x000289B8 SQ_ALU_CONST_CACHE_VS_14 +0x000289BC SQ_ALU_CONST_CACHE_VS_15 +0x000288D8 SQ_PGM_CF_OFFSET_ES +0x000288DC SQ_PGM_CF_OFFSET_FS +0x000288D4 SQ_PGM_CF_OFFSET_GS +0x000288CC SQ_PGM_CF_OFFSET_PS +0x000288D0 SQ_PGM_CF_OFFSET_VS +0x00028854 SQ_PGM_EXPORTS_PS +0x00028890 SQ_PGM_RESOURCES_ES +0x000288A4 SQ_PGM_RESOURCES_FS +0x0002887C SQ_PGM_RESOURCES_GS +0x00028850 SQ_PGM_RESOURCES_PS +0x00028868 SQ_PGM_RESOURCES_VS +0x00009100 SPI_CONFIG_CNTL +0x0000913C SPI_CONFIG_CNTL_1 +0x000286DC SPI_FOG_CNTL +0x000286E4 SPI_FOG_FUNC_BIAS +0x000286E0 SPI_FOG_FUNC_SCALE +0x000286D8 SPI_INPUT_Z +0x000286D4 SPI_INTERP_CONTROL_0 +0x00028644 SPI_PS_INPUT_CNTL_0 +0x00028648 SPI_PS_INPUT_CNTL_1 +0x0002864C SPI_PS_INPUT_CNTL_2 +0x00028650 SPI_PS_INPUT_CNTL_3 +0x00028654 SPI_PS_INPUT_CNTL_4 +0x00028658 SPI_PS_INPUT_CNTL_5 +0x0002865C SPI_PS_INPUT_CNTL_6 +0x00028660 SPI_PS_INPUT_CNTL_7 +0x00028664 SPI_PS_INPUT_CNTL_8 +0x00028668 SPI_PS_INPUT_CNTL_9 +0x0002866C SPI_PS_INPUT_CNTL_10 +0x00028670 SPI_PS_INPUT_CNTL_11 +0x00028674 SPI_PS_INPUT_CNTL_12 +0x00028678 SPI_PS_INPUT_CNTL_13 +0x0002867C SPI_PS_INPUT_CNTL_14 +0x00028680 SPI_PS_INPUT_CNTL_15 +0x00028684 SPI_PS_INPUT_CNTL_16 +0x00028688 SPI_PS_INPUT_CNTL_17 +0x0002868C SPI_PS_INPUT_CNTL_18 +0x00028690 SPI_PS_INPUT_CNTL_19 +0x00028694 SPI_PS_INPUT_CNTL_20 +0x00028698 SPI_PS_INPUT_CNTL_21 +0x0002869C SPI_PS_INPUT_CNTL_22 +0x000286A0 SPI_PS_INPUT_CNTL_23 +0x000286A4 SPI_PS_INPUT_CNTL_24 +0x000286A8 SPI_PS_INPUT_CNTL_25 +0x000286AC SPI_PS_INPUT_CNTL_26 +0x000286B0 SPI_PS_INPUT_CNTL_27 +0x000286B4 SPI_PS_INPUT_CNTL_28 +0x000286B8 SPI_PS_INPUT_CNTL_29 +0x000286BC SPI_PS_INPUT_CNTL_30 +0x000286C0 SPI_PS_INPUT_CNTL_31 +0x000286CC SPI_PS_IN_CONTROL_0 +0x000286D0 SPI_PS_IN_CONTROL_1 +0x000286C4 SPI_VS_OUT_CONFIG +0x00028614 SPI_VS_OUT_ID_0 +0x00028618 SPI_VS_OUT_ID_1 +0x0002861C SPI_VS_OUT_ID_2 +0x00028620 SPI_VS_OUT_ID_3 +0x00028624 SPI_VS_OUT_ID_4 +0x00028628 SPI_VS_OUT_ID_5 +0x0002862C SPI_VS_OUT_ID_6 +0x00028630 SPI_VS_OUT_ID_7 +0x00028634 SPI_VS_OUT_ID_8 +0x00028638 SPI_VS_OUT_ID_9 +0x00028438 SX_ALPHA_REF +0x00028410 SX_ALPHA_TEST_CONTROL +0x00028350 SX_MISC +0x0000A020 SMX_DC_CTL0 +0x0000A024 SMX_DC_CTL1 +0x0000A028 SMX_DC_CTL2 +0x00009608 TC_CNTL +0x00009604 TC_INVALIDATE +0x00009490 TD_CNTL +0x00009400 TD_FILTER4 +0x00009404 TD_FILTER4_1 +0x00009408 TD_FILTER4_2 +0x0000940C TD_FILTER4_3 +0x00009410 TD_FILTER4_4 +0x00009414 TD_FILTER4_5 +0x00009418 TD_FILTER4_6 +0x0000941C TD_FILTER4_7 +0x00009420 TD_FILTER4_8 +0x00009424 TD_FILTER4_9 +0x00009428 TD_FILTER4_10 +0x0000942C TD_FILTER4_11 +0x00009430 TD_FILTER4_12 +0x00009434 TD_FILTER4_13 +0x00009438 TD_FILTER4_14 +0x0000943C TD_FILTER4_15 +0x00009440 TD_FILTER4_16 +0x00009444 TD_FILTER4_17 +0x00009448 TD_FILTER4_18 +0x0000944C TD_FILTER4_19 +0x00009450 TD_FILTER4_20 +0x00009454 TD_FILTER4_21 +0x00009458 TD_FILTER4_22 +0x0000945C TD_FILTER4_23 +0x00009460 TD_FILTER4_24 +0x00009464 TD_FILTER4_25 +0x00009468 TD_FILTER4_26 +0x0000946C TD_FILTER4_27 +0x00009470 TD_FILTER4_28 +0x00009474 TD_FILTER4_29 +0x00009478 TD_FILTER4_30 +0x0000947C TD_FILTER4_31 +0x00009480 TD_FILTER4_32 +0x00009484 TD_FILTER4_33 +0x00009488 TD_FILTER4_34 +0x0000948C TD_FILTER4_35 +0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA +0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA +0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA +0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA +0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA +0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA +0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA +0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA +0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA +0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA +0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA +0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA +0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA +0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA +0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA +0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA +0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA +0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA +0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE +0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE +0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE +0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE +0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE +0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE +0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE +0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE +0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE +0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE +0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE +0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE +0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE +0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE +0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE +0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE +0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE +0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE +0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN +0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN +0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN +0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN +0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN +0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN +0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN +0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN +0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN +0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN +0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN +0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN +0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN +0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN +0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN +0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN +0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN +0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN +0x0000A800 TD_GS_SAMPLER0_BORDER_RED +0x0000A810 TD_GS_SAMPLER1_BORDER_RED +0x0000A820 TD_GS_SAMPLER2_BORDER_RED +0x0000A830 TD_GS_SAMPLER3_BORDER_RED +0x0000A840 TD_GS_SAMPLER4_BORDER_RED +0x0000A850 TD_GS_SAMPLER5_BORDER_RED +0x0000A860 TD_GS_SAMPLER6_BORDER_RED +0x0000A870 TD_GS_SAMPLER7_BORDER_RED +0x0000A880 TD_GS_SAMPLER8_BORDER_RED +0x0000A890 TD_GS_SAMPLER9_BORDER_RED +0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED +0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED +0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED +0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED +0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED +0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED +0x0000A900 TD_GS_SAMPLER16_BORDER_RED +0x0000A910 TD_GS_SAMPLER17_BORDER_RED +0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA +0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA +0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA +0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA +0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA +0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA +0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA +0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA +0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA +0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA +0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA +0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA +0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA +0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA +0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA +0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA +0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA +0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA +0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE +0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE +0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE +0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE +0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE +0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE +0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE +0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE +0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE +0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE +0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE +0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE +0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE +0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE +0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE +0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE +0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE +0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE +0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN +0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN +0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN +0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN +0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN +0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN +0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN +0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN +0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN +0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN +0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN +0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN +0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN +0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN +0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN +0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN +0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN +0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN +0x0000A400 TD_PS_SAMPLER0_BORDER_RED +0x0000A410 TD_PS_SAMPLER1_BORDER_RED +0x0000A420 TD_PS_SAMPLER2_BORDER_RED +0x0000A430 TD_PS_SAMPLER3_BORDER_RED +0x0000A440 TD_PS_SAMPLER4_BORDER_RED +0x0000A450 TD_PS_SAMPLER5_BORDER_RED +0x0000A460 TD_PS_SAMPLER6_BORDER_RED +0x0000A470 TD_PS_SAMPLER7_BORDER_RED +0x0000A480 TD_PS_SAMPLER8_BORDER_RED +0x0000A490 TD_PS_SAMPLER9_BORDER_RED +0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED +0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED +0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED +0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED +0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED +0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED +0x0000A500 TD_PS_SAMPLER16_BORDER_RED +0x0000A510 TD_PS_SAMPLER17_BORDER_RED +0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL +0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL +0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL +0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL +0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL +0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL +0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL +0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL +0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL +0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL +0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL +0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL +0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL +0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL +0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL +0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL +0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL +0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL +0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA +0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA +0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA +0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA +0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA +0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA +0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA +0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA +0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA +0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA +0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA +0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA +0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA +0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA +0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA +0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA +0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA +0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA +0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE +0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE +0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE +0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE +0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE +0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE +0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE +0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE +0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE +0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE +0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE +0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE +0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE +0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE +0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE +0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE +0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE +0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE +0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN +0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN +0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN +0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN +0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN +0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN +0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN +0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN +0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN +0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN +0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN +0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN +0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN +0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN +0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN +0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN +0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN +0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN +0x0000A600 TD_VS_SAMPLER0_BORDER_RED +0x0000A610 TD_VS_SAMPLER1_BORDER_RED +0x0000A620 TD_VS_SAMPLER2_BORDER_RED +0x0000A630 TD_VS_SAMPLER3_BORDER_RED +0x0000A640 TD_VS_SAMPLER4_BORDER_RED +0x0000A650 TD_VS_SAMPLER5_BORDER_RED +0x0000A660 TD_VS_SAMPLER6_BORDER_RED +0x0000A670 TD_VS_SAMPLER7_BORDER_RED +0x0000A680 TD_VS_SAMPLER8_BORDER_RED +0x0000A690 TD_VS_SAMPLER9_BORDER_RED +0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED +0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED +0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED +0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED +0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED +0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED +0x0000A700 TD_VS_SAMPLER16_BORDER_RED +0x0000A710 TD_VS_SAMPLER17_BORDER_RED +0x00009508 TA_CNTL_AUX +0x0002802C DB_DEPTH_CLEAR +0x00028D24 DB_HTILE_SURFACE +0x00028D34 DB_PREFETCH_LIMIT +0x00028D30 DB_PRELOAD_CONTROL +0x00028D0C DB_RENDER_CONTROL +0x00028D10 DB_RENDER_OVERRIDE +0x0002880C DB_SHADER_CONTROL +0x00028D2C DB_SRESULTS_COMPARE_STATE1 +0x00028430 DB_STENCILREFMASK +0x00028434 DB_STENCILREFMASK_BF +0x00028028 DB_STENCIL_CLEAR +0x00028780 CB_BLEND0_CONTROL +0x00028784 CB_BLEND1_CONTROL +0x00028788 CB_BLEND2_CONTROL +0x0002878C CB_BLEND3_CONTROL +0x00028790 CB_BLEND4_CONTROL +0x00028794 CB_BLEND5_CONTROL +0x00028798 CB_BLEND6_CONTROL +0x0002879C CB_BLEND7_CONTROL +0x00028804 CB_BLEND_CONTROL +0x00028420 CB_BLEND_ALPHA +0x0002841C CB_BLEND_BLUE +0x00028418 CB_BLEND_GREEN +0x00028414 CB_BLEND_RED +0x0002812C CB_CLEAR_ALPHA +0x00028128 CB_CLEAR_BLUE +0x00028124 CB_CLEAR_GREEN +0x00028120 CB_CLEAR_RED +0x00028C30 CB_CLRCMP_CONTROL +0x00028C38 CB_CLRCMP_DST +0x00028C3C CB_CLRCMP_MSK +0x00028C34 CB_CLRCMP_SRC +0x00028100 CB_COLOR0_MASK +0x00028104 CB_COLOR1_MASK +0x00028108 CB_COLOR2_MASK +0x0002810C CB_COLOR3_MASK +0x00028110 CB_COLOR4_MASK +0x00028114 CB_COLOR5_MASK +0x00028118 CB_COLOR6_MASK +0x0002811C CB_COLOR7_MASK +0x00028080 CB_COLOR0_VIEW +0x00028084 CB_COLOR1_VIEW +0x00028088 CB_COLOR2_VIEW +0x0002808C CB_COLOR3_VIEW +0x00028090 CB_COLOR4_VIEW +0x00028094 CB_COLOR5_VIEW +0x00028098 CB_COLOR6_VIEW +0x0002809C CB_COLOR7_VIEW +0x00028808 CB_COLOR_CONTROL +0x0002842C CB_FOG_BLUE +0x00028428 CB_FOG_GREEN +0x00028424 CB_FOG_RED +0x00008040 WAIT_UNTIL +0x00008950 CC_GC_SHADER_PIPE_CONFIG +0x00008954 GC_USER_SHADER_PIPE_CONFIG +0x00009714 VC_ENHANCE +0x00009830 DB_DEBUG +0x00009838 DB_WATERMARKS +0x00028D28 DB_SRESULTS_COMPARE_STATE0 +0x00028D44 DB_ALPHA_TO_MASK +0x00009504 TA_CNTL +0x00009700 VC_CNTL +0x00009718 VC_CONFIG +0x0000A02C SMX_DC_MC_INTF_CTL diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 287fcebfb4e..626d51891ee 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev) uint32_t size_reg; uint32_t tmp; + radeon_gart_restore(rdev); tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); @@ -150,9 +151,8 @@ int rs400_gart_enable(struct radeon_device *rdev) WREG32(RADEON_AGP_BASE, 0xFFFFFFFF); WREG32(RS480_AGP_BASE_2, 0); } - tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; - tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16); - tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16); + tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16); + tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16); if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp); tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; @@ -251,14 +251,19 @@ void rs400_gpu_init(struct radeon_device *rdev) } } -void rs400_vram_info(struct radeon_device *rdev) +void rs400_mc_init(struct radeon_device *rdev) { + u64 base; + rs400_gart_adjust_size(rdev); + rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev); /* DDR for all card after R300 & IGP */ rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; - r100_vram_init_sizes(rdev); + base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; + radeon_vram_location(rdev, &rdev->mc, base); + radeon_gtt_location(rdev, &rdev->mc); } uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) @@ -362,22 +367,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) #endif } -static int rs400_mc_init(struct radeon_device *rdev) -{ - int r; - u32 tmp; - - /* Setup GPU memory space */ - tmp = RREG32(R_00015C_NB_TOM); - rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - r = radeon_mc_setup(rdev); - rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev); - if (r) - return r; - return 0; -} - void rs400_mc_program(struct radeon_device *rdev) { struct r100_mc_save save; @@ -516,12 +505,8 @@ int rs400_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - rs400_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = rs400_mc_init(rdev); - if (r) - return r; + /* initialize memory controller */ + rs400_mc_init(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index c3818562a13..47f046b78c6 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -45,23 +45,6 @@ void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); -int rs600_mc_init(struct radeon_device *rdev) -{ - /* read back the MC value from the hw */ - int r; - u32 tmp; - - /* Setup GPU memory space */ - tmp = RREG32_MC(R_000004_MC_FB_LOCATION); - rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16; - rdev->mc.gtt_location = 0xffffffffUL; - r = radeon_mc_setup(rdev); - rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); - if (r) - return r; - return 0; -} - /* hpd for digital panel detect/disconnect */ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd) { @@ -213,6 +196,7 @@ int rs600_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; + radeon_gart_restore(rdev); /* Enable bus master */ tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; WREG32(R_00004C_BUS_CNTL, tmp); @@ -406,10 +390,14 @@ int rs600_irq_process(struct radeon_device *rdev) if (G_000044_SW_INT(status)) radeon_fence_process(rdev); /* Vertical blank interrupts */ - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) + if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) { drm_handle_vblank(rdev->ddev, 0); - if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) + wake_up(&rdev->irq.vblank_queue); + } + if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) { drm_handle_vblank(rdev->ddev, 1); + wake_up(&rdev->irq.vblank_queue); + } if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) { queue_hotplug = true; DRM_DEBUG("HPD1\n"); @@ -470,22 +458,22 @@ void rs600_gpu_init(struct radeon_device *rdev) dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); } -void rs600_vram_info(struct radeon_device *rdev) +void rs600_mc_init(struct radeon_device *rdev) { + u64 base; + + rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); + rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; - rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; - - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); - rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); - - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) - rdev->mc.mc_vram_size = rdev->mc.aper_size; - - if (rdev->mc.real_vram_size > rdev->mc.aper_size) - rdev->mc.real_vram_size = rdev->mc.aper_size; + rdev->mc.visible_vram_size = rdev->mc.aper_size; + rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); + base = RREG32_MC(R_000004_MC_FB_LOCATION); + base = G_000004_MC_FB_START(base) << 16; + radeon_vram_location(rdev, &rdev->mc, base); + radeon_gtt_location(rdev, &rdev->mc); } void rs600_bandwidth_update(struct radeon_device *rdev) @@ -661,12 +649,8 @@ int rs600_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - rs600_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = rs600_mc_init(rdev); - if (r) - return r; + /* initialize memory controller */ + rs600_mc_init(rdev); rs600_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 06e2771aee5..83b9174f76f 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -129,27 +129,21 @@ void rs690_pm_info(struct radeon_device *rdev) rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); } -void rs690_vram_info(struct radeon_device *rdev) +void rs690_mc_init(struct radeon_device *rdev) { fixed20_12 a; + u64 base; rs400_gart_adjust_size(rdev); - rdev->mc.vram_is_ddr = true; rdev->mc.vram_width = 128; - rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); rdev->mc.mc_vram_size = rdev->mc.real_vram_size; - rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); - - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) - rdev->mc.mc_vram_size = rdev->mc.aper_size; - - if (rdev->mc.real_vram_size > rdev->mc.aper_size) - rdev->mc.real_vram_size = rdev->mc.aper_size; - + rdev->mc.visible_vram_size = rdev->mc.aper_size; + base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); + base = G_000100_MC_FB_START(base) << 16; rs690_pm_info(rdev); /* FIXME: we should enforce default clock in case GPU is not in * default setup @@ -160,22 +154,9 @@ void rs690_vram_info(struct radeon_device *rdev) a.full = rfixed_const(16); /* core_bandwidth = sclk(Mhz) * 16 */ rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); -} - -static int rs690_mc_init(struct radeon_device *rdev) -{ - int r; - u32 tmp; - - /* Setup GPU memory space */ - tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION); - rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - r = radeon_mc_setup(rdev); rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); - if (r) - return r; - return 0; + radeon_vram_location(rdev, &rdev->mc, base); + radeon_gtt_location(rdev, &rdev->mc); } void rs690_line_buffer_adjust(struct radeon_device *rdev, @@ -728,12 +709,8 @@ int rs690_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - rs690_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = rs690_mc_init(rdev); - if (r) - return r; + /* initialize memory controller */ + rs690_mc_init(rdev); rv515_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 0e1e6b8632b..bea747da123 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -277,13 +277,15 @@ static void rv515_vram_get_type(struct radeon_device *rdev) } } -void rv515_vram_info(struct radeon_device *rdev) +void rv515_mc_init(struct radeon_device *rdev) { fixed20_12 a; rv515_vram_get_type(rdev); - r100_vram_init_sizes(rdev); + radeon_vram_location(rdev, &rdev->mc, 0); + if (!(rdev->flags & RADEON_IS_AGP)) + radeon_gtt_location(rdev, &rdev->mc); /* FIXME: we should enforce default clock in case GPU is not in * default setup */ @@ -587,12 +589,15 @@ int rv515_init(struct radeon_device *rdev) radeon_get_clock_info(rdev->ddev); /* Initialize power management */ radeon_pm_init(rdev); - /* Get vram informations */ - rv515_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) - return r; + /* initialize AGP */ + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + radeon_agp_disable(rdev); + } + } + /* initialize memory controller */ + rv515_mc_init(rdev); rv515_debugfs(rdev); /* Fence driver */ r = radeon_fence_driver_init(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 03021674d09..37887dee12a 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; + radeon_gart_restore(rdev); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | @@ -273,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev) /* * Core functions */ -static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, - u32 num_backends, - u32 backend_disable_mask) +static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, + u32 num_tile_pipes, + u32 num_backends, + u32 backend_disable_mask) { u32 backend_map = 0; u32 enabled_backends_mask; @@ -284,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, u32 swizzle_pipe[R7XX_MAX_PIPES]; u32 cur_backend; u32 i; + bool force_no_swizzle; if (num_tile_pipes > R7XX_MAX_PIPES) num_tile_pipes = R7XX_MAX_PIPES; @@ -313,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, if (enabled_backends_count != num_backends) num_backends = enabled_backends_count; + switch (rdev->family) { + case CHIP_RV770: + case CHIP_RV730: + force_no_swizzle = false; + break; + case CHIP_RV710: + case CHIP_RV740: + default: + force_no_swizzle = true; + break; + } + memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES); switch (num_tile_pipes) { case 1: @@ -323,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, swizzle_pipe[1] = 1; break; case 3: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 1; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 1; + } break; case 4: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 3; - swizzle_pipe[3] = 1; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 3; + swizzle_pipe[3] = 1; + } break; case 5: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 1; - swizzle_pipe[4] = 3; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 1; + swizzle_pipe[4] = 3; + } break; case 6: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 5; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 5; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 1; + } break; case 7: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; - swizzle_pipe[6] = 5; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + swizzle_pipe[6] = 6; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 6; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 1; + swizzle_pipe[6] = 5; + } break; case 8: - swizzle_pipe[0] = 0; - swizzle_pipe[1] = 2; - swizzle_pipe[2] = 4; - swizzle_pipe[3] = 6; - swizzle_pipe[4] = 3; - swizzle_pipe[5] = 1; - swizzle_pipe[6] = 7; - swizzle_pipe[7] = 5; + if (force_no_swizzle) { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 1; + swizzle_pipe[2] = 2; + swizzle_pipe[3] = 3; + swizzle_pipe[4] = 4; + swizzle_pipe[5] = 5; + swizzle_pipe[6] = 6; + swizzle_pipe[7] = 7; + } else { + swizzle_pipe[0] = 0; + swizzle_pipe[1] = 2; + swizzle_pipe[2] = 4; + swizzle_pipe[3] = 6; + swizzle_pipe[4] = 3; + swizzle_pipe[5] = 1; + swizzle_pipe[6] = 7; + swizzle_pipe[7] = 5; + } break; } @@ -385,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes, static void rv770_gpu_init(struct radeon_device *rdev) { int i, j, num_qd_pipes; + u32 ta_aux_cntl; u32 sx_debug_1; u32 smx_dc_ctl0; + u32 db_debug3; u32 num_gs_verts_per_thread; u32 vgt_gs_per_es; u32 gs_prim_buffer_depth = 0; @@ -515,6 +583,7 @@ static void rv770_gpu_init(struct radeon_device *rdev) switch (rdev->config.rv770.max_tile_pipes) { case 1: + default: gb_tiling_config |= PIPE_TILING(0); break; case 2: @@ -526,16 +595,17 @@ static void rv770_gpu_init(struct radeon_device *rdev) case 8: gb_tiling_config |= PIPE_TILING(3); break; - default: - break; } + rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; if (rdev->family == CHIP_RV770) gb_tiling_config |= BANK_TILING(1); else gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); + rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); gb_tiling_config |= GROUP_SIZE(0); + rdev->config.rv770.tiling_group_size = 256; if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { gb_tiling_config |= ROW_TILING(3); @@ -549,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev) gb_tiling_config |= BANK_SWAPS(1); - if (rdev->family == CHIP_RV740) - backend_map = 0x28; - else - backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes, - rdev->config.rv770.max_backends, - (0xff << rdev->config.rv770.max_backends) & 0xff); - gb_tiling_config |= BACKEND_MAP(backend_map); + cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; + cc_rb_backend_disable |= + BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); - cc_gc_shader_pipe_config = + cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; + cc_gc_shader_pipe_config |= INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK); cc_gc_shader_pipe_config |= INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK); - cc_rb_backend_disable = - BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK); + if (rdev->family == CHIP_RV740) + backend_map = 0x28; + else + backend_map = r700_get_tile_pipe_to_backend_map(rdev, + rdev->config.rv770.max_tile_pipes, + (R7XX_MAX_BACKENDS - + r600_count_pipe_bits((cc_rb_backend_disable & + R7XX_MAX_BACKENDS_MASK) >> 16)), + (cc_rb_backend_disable >> 16)); + gb_tiling_config |= BACKEND_MAP(backend_map); + WREG32(GB_TILING_CONFIG, gb_tiling_config); WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); @@ -571,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); - WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); + WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); - WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); WREG32(CGTS_SYS_TCC_DISABLE, 0); WREG32(CGTS_TCC_DISABLE, 0); - WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); - WREG32(CGTS_USER_TCC_DISABLE, 0); num_qd_pipes = - R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK); + R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); @@ -590,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30)); - WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO | - SYNC_GRADIENT | - SYNC_WALKER | - SYNC_ALIGNER)); + ta_aux_cntl = RREG32(TA_CNTL_AUX); + WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO); sx_debug_1 = RREG32(SX_DEBUG_1); sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS; @@ -604,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev) smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1); WREG32(SMX_DC_CTL0, smx_dc_ctl0); - WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | - GS_FLUSH_CTL(4) | - ACK_FLUSH_CTL(3) | - SYNC_FLUSH_CTL)); + if (rdev->family != CHIP_RV740) + WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) | + GS_FLUSH_CTL(4) | + ACK_FLUSH_CTL(3) | + SYNC_FLUSH_CTL)); - if (rdev->family == CHIP_RV770) - WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f)); - else { + db_debug3 = RREG32(DB_DEBUG3); + db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); + switch (rdev->family) { + case CHIP_RV770: + case CHIP_RV740: + db_debug3 |= DB_CLK_OFF_DELAY(0x1f); + break; + case CHIP_RV710: + case CHIP_RV730: + default: + db_debug3 |= DB_CLK_OFF_DELAY(2); + break; + } + WREG32(DB_DEBUG3, db_debug3); + + if (rdev->family != CHIP_RV770) { db_debug4 = RREG32(DB_DEBUG4); db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER; WREG32(DB_DEBUG4, db_debug4); @@ -640,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev) ALU_UPDATE_FIFO_HIWATER(0x8)); switch (rdev->family) { case CHIP_RV770: - sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1); - break; case CHIP_RV730: case CHIP_RV710: + sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1); + break; case CHIP_RV740: default: sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4); @@ -816,45 +901,13 @@ int rv770_mc_init(struct radeon_device *rdev) /* Setup GPU memory space */ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); - - if (rdev->mc.mc_vram_size > rdev->mc.aper_size) + rdev->mc.visible_vram_size = rdev->mc.aper_size; + /* FIXME remove this once we support unmappable VRAM */ + if (rdev->mc.mc_vram_size > rdev->mc.aper_size) { rdev->mc.mc_vram_size = rdev->mc.aper_size; - - if (rdev->mc.real_vram_size > rdev->mc.aper_size) rdev->mc.real_vram_size = rdev->mc.aper_size; - - if (rdev->flags & RADEON_IS_AGP) { - /* gtt_size is setup by radeon_agp_init */ - rdev->mc.gtt_location = rdev->mc.agp_base; - tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size; - /* Try to put vram before or after AGP because we - * we want SYSTEM_APERTURE to cover both VRAM and - * AGP so that GPU can catch out of VRAM/AGP access - */ - if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) { - /* Enough place before */ - rdev->mc.vram_location = rdev->mc.gtt_location - - rdev->mc.mc_vram_size; - } else if (tmp > rdev->mc.mc_vram_size) { - /* Enough place after */ - rdev->mc.vram_location = rdev->mc.gtt_location + - rdev->mc.gtt_size; - } else { - /* Try to setup VRAM then AGP might not - * not work on some card - */ - rdev->mc.vram_location = 0x00000000UL; - rdev->mc.gtt_location = rdev->mc.mc_vram_size; - } - } else { - rdev->mc.vram_location = 0x00000000UL; - rdev->mc.gtt_location = rdev->mc.mc_vram_size; - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; } - rdev->mc.vram_start = rdev->mc.vram_location; - rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; - rdev->mc.gtt_start = rdev->mc.gtt_location; - rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; + r600_vram_gtt_location(rdev, &rdev->mc); /* FIXME: we should enforce default clock in case GPU is not in * default setup */ @@ -863,6 +916,7 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); return 0; } + int rv770_gpu_reset(struct radeon_device *rdev) { /* FIXME: implement any rv770 specific bits */ @@ -1038,6 +1092,7 @@ int rv770_init(struct radeon_device *rdev) r = radeon_fence_driver_init(rdev); if (r) return r; + /* initialize AGP */ if (rdev->flags & RADEON_IS_AGP) { r = radeon_agp_init(rdev); if (r) diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h index a1367ab6f26..9506f8cb99e 100644 --- a/drivers/gpu/drm/radeon/rv770d.h +++ b/drivers/gpu/drm/radeon/rv770d.h @@ -343,4 +343,6 @@ #define WAIT_UNTIL 0x8040 +#define SRBM_STATUS 0x0E50 + #endif diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 3d47a2c1232..a759170763b 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -480,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) void *from_virtual; void *to_virtual; int i; - int ret; + int ret = -ENOMEM; if (ttm->page_flags & TTM_PAGE_FLAG_USER) { ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start, @@ -499,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; ++i) { from_page = read_mapping_page(swap_space, i, NULL); - if (IS_ERR(from_page)) + if (IS_ERR(from_page)) { + ret = PTR_ERR(from_page); goto out_err; + } to_page = __ttm_tt_get_page(ttm, i); if (unlikely(to_page == NULL)) goto out_err; @@ -523,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm) return 0; out_err: ttm_tt_free_alloced_pages(ttm); - return -ENOMEM; + return ret; } int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) @@ -535,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) void *from_virtual; void *to_virtual; int i; + int ret = -ENOMEM; BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated); BUG_ON(ttm->caching_state != tt_cached); @@ -557,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) 0); if (unlikely(IS_ERR(swap_storage))) { printk(KERN_ERR "Failed allocating swap storage.\n"); - return -ENOMEM; + return PTR_ERR(swap_storage); } } else swap_storage = persistant_swap_storage; @@ -569,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage) if (unlikely(from_page == NULL)) continue; to_page = read_mapping_page(swap_space, i, NULL); - if (unlikely(to_page == NULL)) + if (unlikely(IS_ERR(to_page))) { + ret = PTR_ERR(to_page); goto out_err; - + } preempt_disable(); from_virtual = kmap_atomic(from_page, KM_USER0); to_virtual = kmap_atomic(to_page, KM_USER1); @@ -595,5 +599,5 @@ out_err: if (!persistant_swap_storage) fput(swap_storage); - return -ENOMEM; + return ret; } diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig index 0920492cea0..61ab4daf0bb 100644 --- a/drivers/gpu/vga/Kconfig +++ b/drivers/gpu/vga/Kconfig @@ -16,3 +16,14 @@ config VGA_ARB_MAX_GPUS help Reserves space in the kernel to maintain resource locking for multiple GPUS. The overhead for each GPU is very small. + +config VGA_SWITCHEROO + bool "Laptop Hybrid Grapics - GPU switching support" + depends on X86 + depends on ACPI + help + Many laptops released in 2008/9/10 have two gpus with a multiplxer + to switch between them. This adds support for dynamic switching when + X isn't running and delayed switching until the next logoff. This + features is called hybrid graphics, ATI PowerXpress, and Nvidia + HybridPower. diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile index 7cc8c1ed645..14ca30b75d0 100644 --- a/drivers/gpu/vga/Makefile +++ b/drivers/gpu/vga/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_VGA_ARB) += vgaarb.o +obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c new file mode 100644 index 00000000000..d6d1149d525 --- /dev/null +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -0,0 +1,450 @@ +/* + * Copyright (c) 2010 Red Hat Inc. + * Author : Dave Airlie <airlied@redhat.com> + * + * + * Licensed under GPLv2 + * + * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs + + Switcher interface - methods require for ATPX and DCM + - switchto - this throws the output MUX switch + - discrete_set_power - sets the power state for the discrete card + + GPU driver interface + - set_gpu_state - this should do the equiv of s/r for the card + - this should *not* set the discrete power state + - switch_check - check if the device is in a position to switch now + */ + +#include <linux/module.h> +#include <linux/dmi.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/fs.h> +#include <linux/debugfs.h> +#include <linux/fb.h> + +#include <linux/pci.h> +#include <linux/vga_switcheroo.h> + +struct vga_switcheroo_client { + struct pci_dev *pdev; + struct fb_info *fb_info; + int pwr_state; + void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state); + bool (*can_switch)(struct pci_dev *pdev); + int id; + bool active; +}; + +static DEFINE_MUTEX(vgasr_mutex); + +struct vgasr_priv { + + bool active; + bool delayed_switch_active; + enum vga_switcheroo_client_id delayed_client_id; + + struct dentry *debugfs_root; + struct dentry *switch_file; + + int registered_clients; + struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS]; + + struct vga_switcheroo_handler *handler; +}; + +static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv); +static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv); + +/* only one switcheroo per system */ +static struct vgasr_priv vgasr_priv; + +int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) +{ + mutex_lock(&vgasr_mutex); + if (vgasr_priv.handler) { + mutex_unlock(&vgasr_mutex); + return -EINVAL; + } + + vgasr_priv.handler = handler; + mutex_unlock(&vgasr_mutex); + return 0; +} +EXPORT_SYMBOL(vga_switcheroo_register_handler); + +void vga_switcheroo_unregister_handler(void) +{ + mutex_lock(&vgasr_mutex); + vgasr_priv.handler = NULL; + mutex_unlock(&vgasr_mutex); +} +EXPORT_SYMBOL(vga_switcheroo_unregister_handler); + +static void vga_switcheroo_enable(void) +{ + int i; + int ret; + /* call the handler to init */ + vgasr_priv.handler->init(); + + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev); + if (ret < 0) + return; + + vgasr_priv.clients[i].id = ret; + } + vga_switcheroo_debugfs_init(&vgasr_priv); + vgasr_priv.active = true; +} + +int vga_switcheroo_register_client(struct pci_dev *pdev, + void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state), + bool (*can_switch)(struct pci_dev *pdev)) +{ + int index; + + mutex_lock(&vgasr_mutex); + /* don't do IGD vs DIS here */ + if (vgasr_priv.registered_clients & 1) + index = 1; + else + index = 0; + + vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON; + vgasr_priv.clients[index].pdev = pdev; + vgasr_priv.clients[index].set_gpu_state = set_gpu_state; + vgasr_priv.clients[index].can_switch = can_switch; + vgasr_priv.clients[index].id = -1; + if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) + vgasr_priv.clients[index].active = true; + + vgasr_priv.registered_clients |= (1 << index); + + /* if we get two clients + handler */ + if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) { + printk(KERN_INFO "vga_switcheroo: enabled\n"); + vga_switcheroo_enable(); + } + mutex_unlock(&vgasr_mutex); + return 0; +} +EXPORT_SYMBOL(vga_switcheroo_register_client); + +void vga_switcheroo_unregister_client(struct pci_dev *pdev) +{ + int i; + + mutex_lock(&vgasr_mutex); + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].pdev == pdev) { + vgasr_priv.registered_clients &= ~(1 << i); + break; + } + } + + printk(KERN_INFO "vga_switcheroo: disabled\n"); + vga_switcheroo_debugfs_fini(&vgasr_priv); + vgasr_priv.active = false; + mutex_unlock(&vgasr_mutex); +} +EXPORT_SYMBOL(vga_switcheroo_unregister_client); + +void vga_switcheroo_client_fb_set(struct pci_dev *pdev, + struct fb_info *info) +{ + int i; + + mutex_lock(&vgasr_mutex); + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].pdev == pdev) { + vgasr_priv.clients[i].fb_info = info; + break; + } + } + mutex_unlock(&vgasr_mutex); +} +EXPORT_SYMBOL(vga_switcheroo_client_fb_set); + +static int vga_switcheroo_show(struct seq_file *m, void *v) +{ + int i; + mutex_lock(&vgasr_mutex); + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + seq_printf(m, "%d:%c:%s:%s\n", i, + vgasr_priv.clients[i].active ? '+' : ' ', + vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off", + pci_name(vgasr_priv.clients[i].pdev)); + } + mutex_unlock(&vgasr_mutex); + return 0; +} + +static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file) +{ + return single_open(file, vga_switcheroo_show, NULL); +} + +static int vga_switchon(struct vga_switcheroo_client *client) +{ + int ret; + + ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON); + /* call the driver callback to turn on device */ + client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON); + client->pwr_state = VGA_SWITCHEROO_ON; + return 0; +} + +static int vga_switchoff(struct vga_switcheroo_client *client) +{ + /* call the driver callback to turn off device */ + client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF); + vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF); + client->pwr_state = VGA_SWITCHEROO_OFF; + return 0; +} + +static int vga_switchto(struct vga_switcheroo_client *new_client) +{ + int ret; + int i; + struct vga_switcheroo_client *active = NULL; + + if (new_client->active == true) + return 0; + + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].active == true) { + active = &vgasr_priv.clients[i]; + break; + } + } + if (!active) + return 0; + + /* power up the first device */ + ret = pci_enable_device(new_client->pdev); + if (ret) + return ret; + + if (new_client->pwr_state == VGA_SWITCHEROO_OFF) + vga_switchon(new_client); + + /* swap shadow resource to denote boot VGA device has changed so X starts on new device */ + active->active = false; + + active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW; + new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; + + if (new_client->fb_info) { + struct fb_event event; + event.info = new_client->fb_info; + fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event); + } + + ret = vgasr_priv.handler->switchto(new_client->id); + if (ret) + return ret; + + if (active->pwr_state == VGA_SWITCHEROO_ON) + vga_switchoff(active); + + new_client->active = true; + return 0; +} + +static ssize_t +vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char usercmd[64]; + const char *pdev_name; + int i, ret; + bool delay = false, can_switch; + int client_id = -1; + struct vga_switcheroo_client *client = NULL; + + if (cnt > 63) + cnt = 63; + + if (copy_from_user(usercmd, ubuf, cnt)) + return -EFAULT; + + mutex_lock(&vgasr_mutex); + + if (!vgasr_priv.active) + return -EINVAL; + + /* pwr off the device not in use */ + if (strncmp(usercmd, "OFF", 3) == 0) { + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].active) + continue; + if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON) + vga_switchoff(&vgasr_priv.clients[i]); + } + goto out; + } + /* pwr on the device not in use */ + if (strncmp(usercmd, "ON", 2) == 0) { + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].active) + continue; + if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF) + vga_switchon(&vgasr_priv.clients[i]); + } + goto out; + } + + /* request a delayed switch - test can we switch now */ + if (strncmp(usercmd, "DIGD", 4) == 0) { + client_id = VGA_SWITCHEROO_IGD; + delay = true; + } + + if (strncmp(usercmd, "DDIS", 4) == 0) { + client_id = VGA_SWITCHEROO_DIS; + delay = true; + } + + if (strncmp(usercmd, "IGD", 3) == 0) + client_id = VGA_SWITCHEROO_IGD; + + if (strncmp(usercmd, "DIS", 3) == 0) + client_id = VGA_SWITCHEROO_DIS; + + if (client_id == -1) + goto out; + + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].id == client_id) { + client = &vgasr_priv.clients[i]; + break; + } + } + + vgasr_priv.delayed_switch_active = false; + /* okay we want a switch - test if devices are willing to switch */ + can_switch = true; + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev); + if (can_switch == false) { + printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i); + break; + } + } + + if (can_switch == false && delay == false) + goto out; + + if (can_switch == true) { + pdev_name = pci_name(client->pdev); + ret = vga_switchto(client); + if (ret) + printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret); + } else { + printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id); + vgasr_priv.delayed_switch_active = true; + vgasr_priv.delayed_client_id = client_id; + + /* we should at least power up the card to + make the switch faster */ + if (client->pwr_state == VGA_SWITCHEROO_OFF) + vga_switchon(client); + } + +out: + mutex_unlock(&vgasr_mutex); + return cnt; +} + +static const struct file_operations vga_switcheroo_debugfs_fops = { + .owner = THIS_MODULE, + .open = vga_switcheroo_debugfs_open, + .write = vga_switcheroo_debugfs_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv) +{ + if (priv->switch_file) { + debugfs_remove(priv->switch_file); + priv->switch_file = NULL; + } + if (priv->debugfs_root) { + debugfs_remove(priv->debugfs_root); + priv->debugfs_root = NULL; + } +} + +static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv) +{ + /* already initialised */ + if (priv->debugfs_root) + return 0; + priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL); + + if (!priv->debugfs_root) { + printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n"); + goto fail; + } + + priv->switch_file = debugfs_create_file("switch", 0644, + priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops); + if (!priv->switch_file) { + printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n"); + goto fail; + } + return 0; +fail: + vga_switcheroo_debugfs_fini(priv); + return -1; +} + +int vga_switcheroo_process_delayed_switch(void) +{ + struct vga_switcheroo_client *client = NULL; + const char *pdev_name; + bool can_switch = true; + int i; + int ret; + int err = -EINVAL; + + mutex_lock(&vgasr_mutex); + if (!vgasr_priv.delayed_switch_active) + goto err; + + printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id); + + for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) { + if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id) + client = &vgasr_priv.clients[i]; + can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev); + if (can_switch == false) { + printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i); + break; + } + } + + if (can_switch == false || client == NULL) + goto err; + + pdev_name = pci_name(client->pdev); + ret = vga_switchto(client); + if (ret) + printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret); + + vgasr_priv.delayed_switch_active = false; + err = 0; +err: + mutex_unlock(&vgasr_mutex); + return err; +} +EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch); + diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c index 867e08433e4..433602aed46 100644 --- a/drivers/hid/usbhid/hiddev.c +++ b/drivers/hid/usbhid/hiddev.c @@ -265,9 +265,10 @@ static int hiddev_release(struct inode * inode, struct file * file) static int hiddev_open(struct inode *inode, struct file *file) { struct hiddev_list *list; - int res; + int res, i; - int i = iminor(inode) - HIDDEV_MINOR_BASE; + lock_kernel(); + i = iminor(inode) - HIDDEV_MINOR_BASE; if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i]) return -ENODEV; @@ -313,10 +314,12 @@ static int hiddev_open(struct inode *inode, struct file *file) usbhid_open(hid); } + unlock_kernel(); return 0; bail: file->private_data = NULL; kfree(list); + unlock_kernel(); return res; } diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 8d8a00e5a30..02ce9cff5fc 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -61,6 +61,16 @@ config I2C_HELPER_AUTO In doubt, say Y. +config I2C_SMBUS + tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO + help + Say Y here if you want support for SMBus extensions to the I2C + specification. At the moment, the only supported extension is + the SMBus alert protocol. + + This support is also available as a module. If so, the module + will be called i2c-smbus. + source drivers/i2c/algos/Kconfig source drivers/i2c/busses/Kconfig source drivers/i2c/chips/Kconfig diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index ba26e6cbe74..acd0250c16a 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o obj-$(CONFIG_I2C) += i2c-core.o +obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o obj-y += busses/ chips/ algos/ diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 78d42aae008..dcdaf8e675b 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -453,8 +453,6 @@ static int pca_init(struct i2c_adapter *adap) */ int raise_fall_time; - struct i2c_algo_pca_data *pca_data = adap->algo_data; - /* Ignore the reset function from the module, * we can use the parallel bus reset */ diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 737f05200b1..4cc3807bd31 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -77,7 +77,7 @@ config I2C_AMD8111 will be called i2c-amd8111. config I2C_I801 - tristate "Intel 82801 (ICH)" + tristate "Intel 82801 (ICH/PCH)" depends on PCI help If you say yes to this option, support will be included for the Intel @@ -97,7 +97,8 @@ config I2C_I801 ICH9 Tolapai ICH10 - PCH + 3400/5 Series (PCH) + Cougar Point (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -580,6 +581,7 @@ config I2C_PARPORT tristate "Parallel port adapter" depends on PARPORT select I2C_ALGOBIT + select I2C_SMBUS help This supports parallel port I2C adapters such as the ones made by Philips or Velleman, Analog Devices evaluation boards, and more. @@ -603,6 +605,7 @@ config I2C_PARPORT config I2C_PARPORT_LIGHT tristate "Parallel port adapter (light)" select I2C_ALGOBIT + select I2C_SMBUS help This supports parallel port I2C adapters such as the ones made by Philips or Velleman, Analog Devices evaluation boards, and more. diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index 8de7d7b87bb..bd8f1e4d9e6 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -480,7 +480,7 @@ static struct i2c_adapter ali1535_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id ali1535_ids[] = { +static const struct pci_device_id ali1535_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { }, }; diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c index 4687af40dd5..a409cfcf062 100644 --- a/drivers/i2c/busses/i2c-ali1563.c +++ b/drivers/i2c/busses/i2c-ali1563.c @@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev) ali1563_shutdown(dev); } -static struct pci_device_id __devinitdata ali1563_id_table[] = { +static const struct pci_device_id ali1563_id_table[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) }, {}, }; diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index e7e3205f128..659f63f5e4a 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id ali15x3_ids[] = { +static const struct pci_device_id ali15x3_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c index 8f0b90ef8c7..c5a9fa488e7 100644 --- a/drivers/i2c/busses/i2c-amd756.c +++ b/drivers/i2c/busses/i2c-amd756.c @@ -308,7 +308,7 @@ static const char* chipname[] = { "nVidia nForce", "AMD8111", }; -static struct pci_device_id amd756_ids[] = { +static const struct pci_device_id amd756_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B), .driver_data = AMD756 }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413), diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c index 5b4ad86ca16..d0dc970d737 100644 --- a/drivers/i2c/busses/i2c-amd8111.c +++ b/drivers/i2c/busses/i2c-amd8111.c @@ -351,7 +351,7 @@ static const struct i2c_algorithm smbus_algorithm = { }; -static struct pci_device_id amd8111_ids[] = { +static const struct pci_device_id amd8111_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c index bec9b845dd1..c767295ad1f 100644 --- a/drivers/i2c/busses/i2c-hydra.c +++ b/drivers/i2c/busses/i2c-hydra.c @@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = { .algo_data = &hydra_bit_data, }; -static struct pci_device_id hydra_ids[] = { +static const struct pci_device_id hydra_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index df6ab553f97..9da5b05cdb5 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -41,7 +41,8 @@ Tolapai 0x5032 32 hard yes yes yes ICH10 0x3a30 32 hard yes yes yes ICH10 0x3a60 32 hard yes yes yes - PCH 0x3b30 32 hard yes yes yes + 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes + Cougar Point (PCH) 0x1c22 32 hard yes yes yes Features supported by this driver: Software PEC no @@ -561,7 +562,7 @@ static struct i2c_adapter i801_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id i801_ids[] = { +static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) }, @@ -578,6 +579,7 @@ static struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) }, { 0, } }; @@ -707,6 +709,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id case PCI_DEVICE_ID_INTEL_ICH10_4: case PCI_DEVICE_ID_INTEL_ICH10_5: case PCI_DEVICE_ID_INTEL_PCH_SMBUS: + case PCI_DEVICE_ID_INTEL_CPT_SMBUS: i801_features |= FEATURE_I2C_BLOCK_READ; /* fall through */ case PCI_DEVICE_ID_INTEL_82801DB_3: diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c index dba6eb053e2..69c22f79f23 100644 --- a/drivers/i2c/busses/i2c-isch.c +++ b/drivers/i2c/busses/i2c-isch.c @@ -256,7 +256,7 @@ static struct i2c_adapter sch_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id sch_ids[] = { +static const struct pci_device_id sch_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c index ec11d1c4e77..4a700587ef1 100644 --- a/drivers/i2c/busses/i2c-nforce2.c +++ b/drivers/i2c/busses/i2c-nforce2.c @@ -308,7 +308,7 @@ static struct i2c_algorithm smbus_algorithm = { }; -static struct pci_device_id nforce2_ids[] = { +static const struct pci_device_id nforce2_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) }, diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c index 322c5691e38..5f41ec0f72d 100644 --- a/drivers/i2c/busses/i2c-parport-light.c +++ b/drivers/i2c/busses/i2c-parport-light.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * i2c-parport-light.c I2C bus over parallel port * * ------------------------------------------------------------------------ * - Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org> + Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org> Based on older i2c-velleman.c driver Copyright (C) 1995-2000 Simon G. Vogl @@ -27,10 +27,12 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/delay.h> #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/i2c-smbus.h> #include <asm/io.h> #include "i2c-parport.h" @@ -43,6 +45,10 @@ static u16 base; module_param(base, ushort, 0); MODULE_PARM_DESC(base, "Base I/O address"); +static int irq; +module_param(irq, int, 0); +MODULE_PARM_DESC(irq, "IRQ (optional)"); + /* ----- Low-level parallel port access ----------------------------------- */ static inline void port_write(unsigned char p, unsigned char d) @@ -119,6 +125,16 @@ static struct i2c_adapter parport_adapter = { .name = "Parallel port adapter (light)", }; +/* SMBus alert support */ +static struct i2c_smbus_alert_setup alert_data = { + .alert_edge_triggered = 1, +}; +static struct i2c_client *ara; +static struct lineop parport_ctrl_irq = { + .val = (1 << 4), + .port = CTRL, +}; + static int __devinit i2c_parport_probe(struct platform_device *pdev) { int err; @@ -127,18 +143,39 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev) parport_setsda(NULL, 1); parport_setscl(NULL, 1); /* Other init if needed (power on...) */ - if (adapter_parm[type].init.val) + if (adapter_parm[type].init.val) { line_set(1, &adapter_parm[type].init); + /* Give powered devices some time to settle */ + msleep(100); + } parport_adapter.dev.parent = &pdev->dev; err = i2c_bit_add_bus(&parport_adapter); - if (err) + if (err) { dev_err(&pdev->dev, "Unable to register with I2C\n"); - return err; + return err; + } + + /* Setup SMBus alert if supported */ + if (adapter_parm[type].smbus_alert && irq) { + alert_data.irq = irq; + ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data); + if (ara) + line_set(1, &parport_ctrl_irq); + else + dev_warn(&pdev->dev, "Failed to register ARA client\n"); + } + + return 0; } static int __devexit i2c_parport_remove(struct platform_device *pdev) { + if (ara) { + line_set(0, &parport_ctrl_irq); + i2c_unregister_device(ara); + ara = NULL; + } i2c_del_adapter(&parport_adapter); /* Un-init if needed (power off...) */ @@ -205,6 +242,9 @@ static int __init i2c_parport_init(void) if (!request_region(base, 3, DRVNAME)) return -EBUSY; + if (irq != 0) + pr_info(DRVNAME ": using irq %d\n", irq); + if (!adapter_parm[type].getscl.val) parport_algo_data.getscl = NULL; diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c index 0d8998610c7..220fca7f23a 100644 --- a/drivers/i2c/busses/i2c-parport.c +++ b/drivers/i2c/busses/i2c-parport.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * i2c-parport.c I2C bus over parallel port * * ------------------------------------------------------------------------ * - Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org> + Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org> Based on older i2c-philips-par.c driver Copyright (C) 1995-2000 Simon G. Vogl @@ -27,9 +27,11 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/delay.h> #include <linux/parport.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> +#include <linux/i2c-smbus.h> #include "i2c-parport.h" /* ----- Device list ------------------------------------------------------ */ @@ -38,6 +40,8 @@ struct i2c_par { struct pardevice *pdev; struct i2c_adapter adapter; struct i2c_algo_bit_data algo_data; + struct i2c_smbus_alert_setup alert_data; + struct i2c_client *ara; struct i2c_par *next; }; @@ -143,6 +147,19 @@ static struct i2c_algo_bit_data parport_algo_data = { /* ----- I2c and parallel port call-back functions and structures --------- */ +void i2c_parport_irq(void *data) +{ + struct i2c_par *adapter = data; + struct i2c_client *ara = adapter->ara; + + if (ara) { + dev_dbg(&ara->dev, "SMBus alert received\n"); + i2c_handle_smbus_alert(ara); + } else + dev_dbg(&adapter->adapter.dev, + "SMBus alert received but no ARA client!\n"); +} + static void i2c_parport_attach (struct parport *port) { struct i2c_par *adapter; @@ -154,8 +171,9 @@ static void i2c_parport_attach (struct parport *port) } pr_debug("i2c-parport: attaching to %s\n", port->name); + parport_disable_irq(port); adapter->pdev = parport_register_device(port, "i2c-parport", - NULL, NULL, NULL, PARPORT_FLAG_EXCL, NULL); + NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter); if (!adapter->pdev) { printk(KERN_ERR "i2c-parport: Unable to register with parport\n"); goto ERROR0; @@ -185,14 +203,29 @@ static void i2c_parport_attach (struct parport *port) parport_setsda(port, 1); parport_setscl(port, 1); /* Other init if needed (power on...) */ - if (adapter_parm[type].init.val) + if (adapter_parm[type].init.val) { line_set(port, 1, &adapter_parm[type].init); + /* Give powered devices some time to settle */ + msleep(100); + } if (i2c_bit_add_bus(&adapter->adapter) < 0) { printk(KERN_ERR "i2c-parport: Unable to register with I2C\n"); goto ERROR1; } + /* Setup SMBus alert if supported */ + if (adapter_parm[type].smbus_alert) { + adapter->alert_data.alert_edge_triggered = 1; + adapter->ara = i2c_setup_smbus_alert(&adapter->adapter, + &adapter->alert_data); + if (adapter->ara) + parport_enable_irq(port); + else + printk(KERN_WARNING "i2c-parport: Failed to register " + "ARA client\n"); + } + /* Add the new adapter to the list */ adapter->next = adapter_list; adapter_list = adapter; @@ -213,6 +246,10 @@ static void i2c_parport_detach (struct parport *port) for (prev = NULL, adapter = adapter_list; adapter; prev = adapter, adapter = adapter->next) { if (adapter->pdev->port == port) { + if (adapter->ara) { + parport_disable_irq(port); + i2c_unregister_device(adapter->ara); + } i2c_del_adapter(&adapter->adapter); /* Un-init if needed (power off...) */ diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h index ed69d846cb9..a9f66816546 100644 --- a/drivers/i2c/busses/i2c-parport.h +++ b/drivers/i2c/busses/i2c-parport.h @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------ * * i2c-parport.h I2C bus over parallel port * * ------------------------------------------------------------------------ * - Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org> + Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -38,6 +38,7 @@ struct adapter_parm { struct lineop getsda; struct lineop getscl; struct lineop init; + unsigned int smbus_alert:1; }; static struct adapter_parm adapter_parm[] = { @@ -73,6 +74,7 @@ static struct adapter_parm adapter_parm[] = { .setscl = { 0x01, DATA, 1 }, .getsda = { 0x10, STAT, 1 }, .init = { 0xf0, DATA, 0 }, + .smbus_alert = 1, }, /* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */ { diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c index adf0fbb902f..0d20ff46a51 100644 --- a/drivers/i2c/busses/i2c-pasemi.c +++ b/drivers/i2c/busses/i2c-pasemi.c @@ -400,7 +400,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev) kfree(smbus); } -static struct pci_device_id pasemi_smb_ids[] = { +static const struct pci_device_id pasemi_smb_ids[] = { { PCI_DEVICE(0x1959, 0xa003) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index e56e4b6823c..ee9da6fcf69 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id piix4_ids[] = { +static const struct pci_device_id piix4_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) }, { PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) }, diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c index 844569f7d8b..55a71370c79 100644 --- a/drivers/i2c/busses/i2c-sis5595.c +++ b/drivers/i2c/busses/i2c-sis5595.c @@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id sis5595_ids[] __devinitdata = { +static const struct pci_device_id sis5595_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index 68cff7af701..2309c7f1bde 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -468,7 +468,7 @@ static struct i2c_adapter sis630_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id sis630_ids[] __devinitdata = { +static const struct pci_device_id sis630_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, { 0, } diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c index 1649963b00d..d43d8f8943d 100644 --- a/drivers/i2c/busses/i2c-sis96x.c +++ b/drivers/i2c/busses/i2c-sis96x.c @@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = { .algo = &smbus_algorithm, }; -static struct pci_device_id sis96x_ids[] = { +static const struct pci_device_id sis96x_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index e29b6d5ba8e..b5b1bbf37d3 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -31,11 +31,13 @@ #define CMD_I2C_IO_BEGIN (1<<0) #define CMD_I2C_IO_END (1<<1) -/* i2c bit delay, default is 10us -> 100kHz */ +/* i2c bit delay, default is 10us -> 100kHz max + (in practice, due to additional delays in the i2c bitbanging + code this results in a i2c clock of about 50kHz) */ static unsigned short delay = 10; module_param(delay, ushort, 0); -MODULE_PARM_DESC(delay, "bit delay in microseconds, " - "e.g. 10 for 100kHz (default is 100kHz)"); +MODULE_PARM_DESC(delay, "bit delay in microseconds " + "(default is 10us for 100kHz max)"); static int usb_read(struct i2c_adapter *adapter, int cmd, int value, int index, void *data, int len); @@ -137,7 +139,7 @@ static const struct i2c_algorithm usb_algorithm = { * Future Technology Devices International Ltd., later a pair was * bought from EZPrototypes */ -static struct usb_device_id i2c_tiny_usb_table [] = { +static const struct usb_device_id i2c_tiny_usb_table[] = { { USB_DEVICE(0x0403, 0xc631) }, /* FTDI */ { USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */ { } /* Terminating entry */ diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c index 8b24f192103..de78283bddb 100644 --- a/drivers/i2c/busses/i2c-via.c +++ b/drivers/i2c/busses/i2c-via.c @@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = { }; -static struct pci_device_id vt586b_ids[] __devinitdata = { +static const struct pci_device_id vt586b_ids[] __devinitconst = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) }, { 0, } }; diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index a84a909e123..d57292e5dae 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -444,7 +444,7 @@ release_region: return error; } -static struct pci_device_id vt596_ids[] = { +static const struct pci_device_id vt596_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3), .driver_data = SMBBA1 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3), diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 10be7b5fbe9..3202a86f420 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -34,6 +34,7 @@ #include <linux/hardirq.h> #include <linux/irqflags.h> #include <linux/rwsem.h> +#include <linux/pm_runtime.h> #include <asm/uaccess.h> #include "i2c-core.h" @@ -184,6 +185,52 @@ static int i2c_device_pm_resume(struct device *dev) #define i2c_device_pm_resume NULL #endif +#ifdef CONFIG_PM_RUNTIME +static int i2c_device_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm; + + if (!dev->driver) + return 0; + pm = dev->driver->pm; + if (!pm || !pm->runtime_suspend) + return 0; + return pm->runtime_suspend(dev); +} + +static int i2c_device_runtime_resume(struct device *dev) +{ + const struct dev_pm_ops *pm; + + if (!dev->driver) + return 0; + pm = dev->driver->pm; + if (!pm || !pm->runtime_resume) + return 0; + return pm->runtime_resume(dev); +} + +static int i2c_device_runtime_idle(struct device *dev) +{ + const struct dev_pm_ops *pm = NULL; + int ret; + + if (dev->driver) + pm = dev->driver->pm; + if (pm && pm->runtime_idle) { + ret = pm->runtime_idle(dev); + if (ret) + return ret; + } + + return pm_runtime_suspend(dev); +} +#else +#define i2c_device_runtime_suspend NULL +#define i2c_device_runtime_resume NULL +#define i2c_device_runtime_idle NULL +#endif + static int i2c_device_suspend(struct device *dev, pm_message_t mesg) { struct i2c_client *client = i2c_verify_client(dev); @@ -251,6 +298,9 @@ static const struct attribute_group *i2c_dev_attr_groups[] = { static const struct dev_pm_ops i2c_device_pm_ops = { .suspend = i2c_device_pm_suspend, .resume = i2c_device_pm_resume, + .runtime_suspend = i2c_device_runtime_suspend, + .runtime_resume = i2c_device_runtime_resume, + .runtime_idle = i2c_device_runtime_idle, }; struct bus_type i2c_bus_type = { @@ -1133,7 +1183,7 @@ EXPORT_SYMBOL(i2c_transfer); * i2c_master_send - issue a single I2C message in master transmit mode * @client: Handle to slave device * @buf: Data that will be written to the slave - * @count: How many bytes to write + * @count: How many bytes to write, must be less than 64k since msg.len is u16 * * Returns negative errno, or else the number of bytes written. */ @@ -1160,7 +1210,7 @@ EXPORT_SYMBOL(i2c_master_send); * i2c_master_recv - issue a single I2C message in master receive mode * @client: Handle to slave device * @buf: Where to store data read from slave - * @count: How many bytes to read + * @count: How many bytes to read, must be less than 64k since msg.len is u16 * * Returns negative errno, or else the number of bytes read. */ diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c new file mode 100644 index 00000000000..42127822124 --- /dev/null +++ b/drivers/i2c/i2c-smbus.c @@ -0,0 +1,263 @@ +/* + * i2c-smbus.c - SMBus extensions to the I2C protocol + * + * Copyright (C) 2008 David Brownell + * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/semaphore.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/i2c.h> +#include <linux/i2c-smbus.h> + +struct i2c_smbus_alert { + unsigned int alert_edge_triggered:1; + int irq; + struct work_struct alert; + struct i2c_client *ara; /* Alert response address */ +}; + +struct alert_data { + unsigned short addr; + u8 flag:1; +}; + +/* If this is the alerting device, notify its driver */ +static int smbus_do_alert(struct device *dev, void *addrp) +{ + struct i2c_client *client = i2c_verify_client(dev); + struct alert_data *data = addrp; + + if (!client || client->addr != data->addr) + return 0; + if (client->flags & I2C_CLIENT_TEN) + return 0; + + /* + * Drivers should either disable alerts, or provide at least + * a minimal handler. Lock so client->driver won't change. + */ + down(&dev->sem); + if (client->driver) { + if (client->driver->alert) + client->driver->alert(client, data->flag); + else + dev_warn(&client->dev, "no driver alert()!\n"); + } else + dev_dbg(&client->dev, "alert with no driver\n"); + up(&dev->sem); + + /* Stop iterating after we find the device */ + return -EBUSY; +} + +/* + * The alert IRQ handler needs to hand work off to a task which can issue + * SMBus calls, because those sleeping calls can't be made in IRQ context. + */ +static void smbus_alert(struct work_struct *work) +{ + struct i2c_smbus_alert *alert; + struct i2c_client *ara; + unsigned short prev_addr = 0; /* Not a valid address */ + + alert = container_of(work, struct i2c_smbus_alert, alert); + ara = alert->ara; + + for (;;) { + s32 status; + struct alert_data data; + + /* + * Devices with pending alerts reply in address order, low + * to high, because of slave transmit arbitration. After + * responding, an SMBus device stops asserting SMBALERT#. + * + * Note that SMBus 2.0 reserves 10-bit addresess for future + * use. We neither handle them, nor try to use PEC here. + */ + status = i2c_smbus_read_byte(ara); + if (status < 0) + break; + + data.flag = status & 1; + data.addr = status >> 1; + + if (data.addr == prev_addr) { + dev_warn(&ara->dev, "Duplicate SMBALERT# from dev " + "0x%02x, skipping\n", data.addr); + break; + } + dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n", + data.addr, data.flag); + + /* Notify driver for the device which issued the alert */ + device_for_each_child(&ara->adapter->dev, &data, + smbus_do_alert); + prev_addr = data.addr; + } + + /* We handled all alerts; re-enable level-triggered IRQs */ + if (!alert->alert_edge_triggered) + enable_irq(alert->irq); +} + +static irqreturn_t smbalert_irq(int irq, void *d) +{ + struct i2c_smbus_alert *alert = d; + + /* Disable level-triggered IRQs until we handle them */ + if (!alert->alert_edge_triggered) + disable_irq_nosync(irq); + + schedule_work(&alert->alert); + return IRQ_HANDLED; +} + +/* Setup SMBALERT# infrastructure */ +static int smbalert_probe(struct i2c_client *ara, + const struct i2c_device_id *id) +{ + struct i2c_smbus_alert_setup *setup = ara->dev.platform_data; + struct i2c_smbus_alert *alert; + struct i2c_adapter *adapter = ara->adapter; + int res; + + alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL); + if (!alert) + return -ENOMEM; + + alert->alert_edge_triggered = setup->alert_edge_triggered; + alert->irq = setup->irq; + INIT_WORK(&alert->alert, smbus_alert); + alert->ara = ara; + + if (setup->irq > 0) { + res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq, + 0, "smbus_alert", alert); + if (res) { + kfree(alert); + return res; + } + } + + i2c_set_clientdata(ara, alert); + dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n", + setup->alert_edge_triggered ? "edge" : "level"); + + return 0; +} + +/* IRQ resource is managed so it is freed automatically */ +static int smbalert_remove(struct i2c_client *ara) +{ + struct i2c_smbus_alert *alert = i2c_get_clientdata(ara); + + cancel_work_sync(&alert->alert); + + i2c_set_clientdata(ara, NULL); + kfree(alert); + return 0; +} + +static const struct i2c_device_id smbalert_ids[] = { + { "smbus_alert", 0 }, + { /* LIST END */ } +}; +MODULE_DEVICE_TABLE(i2c, smbalert_ids); + +static struct i2c_driver smbalert_driver = { + .driver = { + .name = "smbus_alert", + }, + .probe = smbalert_probe, + .remove = smbalert_remove, + .id_table = smbalert_ids, +}; + +/** + * i2c_setup_smbus_alert - Setup SMBus alert support + * @adapter: the target adapter + * @setup: setup data for the SMBus alert handler + * Context: can sleep + * + * Setup handling of the SMBus alert protocol on a given I2C bus segment. + * + * Handling can be done either through our IRQ handler, or by the + * adapter (from its handler, periodic polling, or whatever). + * + * NOTE that if we manage the IRQ, we *MUST* know if it's level or + * edge triggered in order to hand it to the workqueue correctly. + * If triggering the alert seems to wedge the system, you probably + * should have said it's level triggered. + * + * This returns the ara client, which should be saved for later use with + * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL + * to indicate an error. + */ +struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter, + struct i2c_smbus_alert_setup *setup) +{ + struct i2c_board_info ara_board_info = { + I2C_BOARD_INFO("smbus_alert", 0x0c), + .platform_data = setup, + }; + + return i2c_new_device(adapter, &ara_board_info); +} +EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert); + +/** + * i2c_handle_smbus_alert - Handle an SMBus alert + * @ara: the ARA client on the relevant adapter + * Context: can't sleep + * + * Helper function to be called from an I2C bus driver's interrupt + * handler. It will schedule the alert work, in turn calling the + * corresponding I2C device driver's alert function. + * + * It is assumed that ara is a valid i2c client previously returned by + * i2c_setup_smbus_alert(). + */ +int i2c_handle_smbus_alert(struct i2c_client *ara) +{ + struct i2c_smbus_alert *alert = i2c_get_clientdata(ara); + + return schedule_work(&alert->alert); +} +EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert); + +static int __init i2c_smbus_init(void) +{ + return i2c_add_driver(&smbalert_driver); +} + +static void __exit i2c_smbus_exit(void) +{ + i2c_del_driver(&smbalert_driver); +} + +module_init(i2c_smbus_init); +module_exit(i2c_smbus_exit); + +MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); +MODULE_DESCRIPTION("SMBus protocol extensions support"); +MODULE_LICENSE("GPL"); diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c index 878f8ec6dbe..57d00caefc8 100644 --- a/drivers/ide/aec62xx.c +++ b/drivers/ide/aec62xx.c @@ -81,15 +81,15 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr return chipset_table->ultra_settings; } -static void aec6210_set_mode(ide_drive_t *drive, const u8 speed) +static void aec6210_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); struct ide_host *host = pci_get_drvdata(dev); struct chipset_bus_clock_list_entry *bus_clock = host->host_priv; u16 d_conf = 0; u8 ultra = 0, ultra_conf = 0; u8 tmp0 = 0, tmp1 = 0, tmp2 = 0; + const u8 speed = drive->dma_mode; unsigned long flags; local_irq_save(flags); @@ -109,15 +109,15 @@ static void aec6210_set_mode(ide_drive_t *drive, const u8 speed) local_irq_restore(flags); } -static void aec6260_set_mode(ide_drive_t *drive, const u8 speed) +static void aec6260_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); struct ide_host *host = pci_get_drvdata(dev); struct chipset_bus_clock_list_entry *bus_clock = host->host_priv; u8 unit = drive->dn & 1; u8 tmp1 = 0, tmp2 = 0; u8 ultra = 0, drive_conf = 0, ultra_conf = 0; + const u8 speed = drive->dma_mode; unsigned long flags; local_irq_save(flags); @@ -134,9 +134,10 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed) local_irq_restore(flags); } -static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void aec_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0); + drive->dma_mode = drive->pio_mode; + hwif->port_ops->set_dma_mode(hwif, drive); } static int init_chipset_aec62xx(struct pci_dev *dev) diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c index 90da1f953ed..25b9fe3a9f8 100644 --- a/drivers/ide/ali14xx.c +++ b/drivers/ide/ali14xx.c @@ -109,13 +109,14 @@ static DEFINE_SPINLOCK(ali14xx_lock); * This function computes timing parameters * and sets controller registers accordingly. */ -static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void ali14xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { int driveNum; int time1, time2; u8 param1, param2, param3, param4; unsigned long flags; int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50; + const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); /* calculate timing, according to PIO mode */ diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c index 0abc43f3101..2c8016ad0e2 100644 --- a/drivers/ide/alim15x3.c +++ b/drivers/ide/alim15x3.c @@ -8,7 +8,7 @@ * Copyright (C) 2002 Alan Cox * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com> - * Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com> + * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * * (U)DMA capable version of ali 1533/1543(C), 1535(D) * @@ -48,61 +48,84 @@ static u8 m5229_revision; static u8 chip_is_1543c_e; static struct pci_dev *isa_dev; +static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on) +{ + struct pci_dev *pdev = to_pci_dev(hwif->dev); + int pio_fifo = 0x54 + hwif->channel; + u8 fifo; + int shift = 4 * (drive->dn & 1); + + pci_read_config_byte(pdev, pio_fifo, &fifo); + fifo &= ~(0x0F << shift); + fifo |= (on << shift); + pci_write_config_byte(pdev, pio_fifo, fifo); +} + +static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive, + struct ide_timing *t, u8 ultra) +{ + struct pci_dev *dev = to_pci_dev(hwif->dev); + int port = hwif->channel ? 0x5c : 0x58; + int udmat = 0x56 + hwif->channel; + u8 unit = drive->dn & 1, udma; + int shift = 4 * unit; + + /* Set up the UDMA */ + pci_read_config_byte(dev, udmat, &udma); + udma &= ~(0x0F << shift); + udma |= ultra << shift; + pci_write_config_byte(dev, udmat, udma); + + if (t == NULL) + return; + + t->setup = clamp_val(t->setup, 1, 8) & 7; + t->act8b = clamp_val(t->act8b, 1, 8) & 7; + t->rec8b = clamp_val(t->rec8b, 1, 16) & 15; + t->active = clamp_val(t->active, 1, 8) & 7; + t->recover = clamp_val(t->recover, 1, 16) & 15; + + pci_write_config_byte(dev, port, t->setup); + pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b); + pci_write_config_byte(dev, port + unit + 2, + (t->active << 4) | t->recover); +} + /** * ali_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * Program the controller for the given PIO mode. */ -static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; - struct pci_dev *dev = to_pci_dev(hwif->dev); - struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); - int s_time = t->setup, a_time = t->active, c_time = t->cycle; - u8 s_clc, a_clc, r_clc; - unsigned long flags; + ide_drive_t *pair = ide_get_pair_dev(drive); int bus_speed = ide_pci_clk ? ide_pci_clk : 33; - int port = hwif->channel ? 0x5c : 0x58; - int portFIFO = hwif->channel ? 0x55 : 0x54; - u8 cd_dma_fifo = 0, unit = drive->dn & 1; - - if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8) - s_clc = 0; - if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8) - a_clc = 0; - - if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) { - r_clc = 1; - } else { - if (r_clc >= 16) - r_clc = 0; + unsigned long T = 1000000 / bus_speed; /* PCI clock based */ + struct ide_timing t; + + ide_timing_compute(drive, drive->pio_mode, &t, T, 1); + if (pair) { + struct ide_timing p; + + ide_timing_compute(pair, pair->pio_mode, &p, T, 1); + ide_timing_merge(&p, &t, &t, + IDE_TIMING_SETUP | IDE_TIMING_8BIT); + if (pair->dma_mode) { + ide_timing_compute(pair, pair->dma_mode, &p, T, 1); + ide_timing_merge(&p, &t, &t, + IDE_TIMING_SETUP | IDE_TIMING_8BIT); + } } - local_irq_save(flags); - + /* * PIO mode => ATA FIFO on, ATAPI FIFO off */ - pci_read_config_byte(dev, portFIFO, &cd_dma_fifo); - if (drive->media==ide_disk) { - if (unit) { - pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0x0F) | 0x50); - } else { - pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0xF0) | 0x05); - } - } else { - if (unit) { - pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0x0F); - } else { - pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0xF0); - } - } - - pci_write_config_byte(dev, port, s_clc); - pci_write_config_byte(dev, port + unit + 2, (a_clc << 4) | r_clc); - local_irq_restore(flags); + ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00); + + ali_program_timings(hwif, drive, &t, 0); } /** @@ -132,44 +155,42 @@ static u8 ali_udma_filter(ide_drive_t *drive) /** * ali_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Configure the hardware for the desired IDE transfer mode. */ -static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; + static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD }; struct pci_dev *dev = to_pci_dev(hwif->dev); - u8 speed1 = speed; - u8 unit = drive->dn & 1; + ide_drive_t *pair = ide_get_pair_dev(drive); + int bus_speed = ide_pci_clk ? ide_pci_clk : 33; + unsigned long T = 1000000 / bus_speed; /* PCI clock based */ + const u8 speed = drive->dma_mode; u8 tmpbyte = 0x00; - int m5229_udma = (hwif->channel) ? 0x57 : 0x56; - - if (speed == XFER_UDMA_6) - speed1 = 0x47; + struct ide_timing t; if (speed < XFER_UDMA_0) { - u8 ultra_enable = (unit) ? 0x7f : 0xf7; - /* - * clear "ultra enable" bit - */ - pci_read_config_byte(dev, m5229_udma, &tmpbyte); - tmpbyte &= ultra_enable; - pci_write_config_byte(dev, m5229_udma, tmpbyte); - - /* - * FIXME: Oh, my... DMA timings are never set. - */ + ide_timing_compute(drive, drive->dma_mode, &t, T, 1); + if (pair) { + struct ide_timing p; + + ide_timing_compute(pair, pair->pio_mode, &p, T, 1); + ide_timing_merge(&p, &t, &t, + IDE_TIMING_SETUP | IDE_TIMING_8BIT); + if (pair->dma_mode) { + ide_timing_compute(pair, pair->dma_mode, + &p, T, 1); + ide_timing_merge(&p, &t, &t, + IDE_TIMING_SETUP | IDE_TIMING_8BIT); + } + } + ali_program_timings(hwif, drive, &t, 0); } else { - pci_read_config_byte(dev, m5229_udma, &tmpbyte); - tmpbyte &= (0x0f << ((1-unit) << 2)); - /* - * enable ultra dma and set timing - */ - tmpbyte |= ((0x08 | ((4-speed1)&0x07)) << (unit << 2)); - pci_write_config_byte(dev, m5229_udma, tmpbyte); + ali_program_timings(hwif, drive, NULL, + udma_timing[speed - XFER_UDMA_0]); if (speed >= XFER_UDMA_3) { pci_read_config_byte(dev, 0x4b, &tmpbyte); tmpbyte |= 1; @@ -355,19 +376,13 @@ static int ali_cable_override(struct pci_dev *pdev) * * This checks if the controller and the cable are capable * of UDMA66 transfers. It doesn't check the drives. - * But see note 2 below! - * - * FIXME: frobs bits that are not defined on newer ALi devicea */ static u8 ali_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); - unsigned long flags; u8 cbl = ATA_CBL_PATA40, tmpbyte; - local_irq_save(flags); - if (m5229_revision >= 0xC2) { /* * m5229 80-pin cable detection (from Host View) @@ -387,8 +402,6 @@ static u8 ali_cable_detect(ide_hwif_t *hwif) } } - local_irq_restore(flags); - return cbl; } @@ -584,6 +597,6 @@ static void __exit ali15x3_ide_exit(void) module_init(ali15x3_ide_init); module_exit(ali15x3_ide_exit); -MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox"); +MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c index 628cd2e5fed..3747b2561f0 100644 --- a/drivers/ide/amd74xx.c +++ b/drivers/ide/amd74xx.c @@ -3,7 +3,7 @@ * IDE driver for Linux. * * Copyright (c) 2000-2002 Vojtech Pavlik - * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz + * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz * * Based on the work of: * Andre Hedrick @@ -70,7 +70,8 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask, default: return; } - pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + (3 - dn), t); + if (timing->udma) + pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + 3 - dn, t); } /* @@ -78,14 +79,14 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask, * to a desired transfer mode. It also can be called by upper layers. */ -static void amd_set_drive(ide_drive_t *drive, const u8 speed) +static void amd_set_drive(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); ide_drive_t *peer = ide_get_pair_dev(drive); struct ide_timing t, p; int T, UT; u8 udma_mask = hwif->ultra_mask; + const u8 speed = drive->dma_mode; T = 1000000000 / amd_clock; UT = (udma_mask == ATA_UDMA2) ? T : (T / 2); @@ -93,7 +94,7 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed) ide_timing_compute(drive, speed, &t, T, UT); if (peer) { - ide_timing_compute(peer, peer->current_speed, &p, T, UT); + ide_timing_compute(peer, peer->pio_mode, &p, T, UT); ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT); } @@ -107,9 +108,10 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed) * amd_set_pio_mode() is a callback from upper layers for PIO-only tuning. */ -static void amd_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void amd_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - amd_set_drive(drive, XFER_PIO_0 + pio); + drive->dma_mode = drive->pio_mode; + amd_set_drive(hwif, drive); } static void amd7409_cable_detect(struct pci_dev *dev) @@ -340,6 +342,6 @@ static void __exit amd74xx_ide_exit(void) module_init(amd74xx_ide_init); module_exit(amd74xx_ide_exit); -MODULE_AUTHOR("Vojtech Pavlik"); +MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("AMD PCI IDE driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c index 248219a89a6..000a78e5246 100644 --- a/drivers/ide/at91_ide.c +++ b/drivers/ide/at91_ide.c @@ -172,11 +172,12 @@ static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, leave_16bit(chipselect, mode); } -static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct ide_timing *timing; - u8 chipselect = drive->hwif->select_data; + u8 chipselect = hwif->select_data; int use_iordy = 0; + const u8 pio = drive->pio_mode - XFER_PIO_0; pdbg("chipselect %u pio %u\n", chipselect, pio); diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c index 837322b10a4..15f0ead89f5 100644 --- a/drivers/ide/atiixp.c +++ b/drivers/ide/atiixp.c @@ -42,19 +42,20 @@ static DEFINE_SPINLOCK(atiixp_lock); /** * atiixp_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * Set the interface PIO mode. */ -static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void atiixp_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - struct pci_dev *dev = to_pci_dev(drive->hwif->dev); + struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long flags; int timing_shift = (drive->dn ^ 1) * 8; u32 pio_timing_data; u16 pio_mode_data; + const u8 pio = drive->pio_mode - XFER_PIO_0; spin_lock_irqsave(&atiixp_lock, flags); @@ -74,21 +75,22 @@ static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio) /** * atiixp_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Set a ATIIXP host controller to the desired DMA mode. This involves * programming the right timing data into the PCI configuration space. */ -static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void atiixp_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - struct pci_dev *dev = to_pci_dev(drive->hwif->dev); + struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long flags; int timing_shift = (drive->dn ^ 1) * 8; u32 tmp32; u16 tmp16; u16 udma_ctl = 0; + const u8 speed = drive->dma_mode; spin_lock_irqsave(&atiixp_lock, flags); diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c index 349a67bf1a3..b26c23416fa 100644 --- a/drivers/ide/au1xxx-ide.c +++ b/drivers/ide/au1xxx-ide.c @@ -99,12 +99,11 @@ static void au1xxx_output_data(ide_drive_t *drive, struct ide_cmd *cmd, } #endif -static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void au1xxx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2); - /* set pio mode! */ - switch(pio) { + switch (drive->pio_mode - XFER_PIO_0) { case 0: mem_sttime = SBC_IDE_TIMING(PIO0); @@ -161,11 +160,11 @@ static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio) au_writel(mem_stcfg,MEM_STCFG2); } -static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void auide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2); - switch(speed) { + switch (drive->dma_mode) { #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA case XFER_MW_DMA_2: mem_sttime = SBC_IDE_TIMING(MDMA2); @@ -297,8 +296,8 @@ static int auide_dma_test_irq(ide_drive_t *drive) */ drive->waiting_for_dma++; if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) { - printk(KERN_WARNING "%s: timeout waiting for ddma to \ - complete\n", drive->name); + printk(KERN_WARNING "%s: timeout waiting for ddma to complete\n", + drive->name); return 1; } udelay(10); diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c index 1a32d62ed86..d2b8b272bc2 100644 --- a/drivers/ide/cmd640.c +++ b/drivers/ide/cmd640.c @@ -572,9 +572,10 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index, program_drive_counts(drive, index); } -static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void cmd640_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned int index = 0, cycle_time; + const u8 pio = drive->pio_mode - XFER_PIO_0; u8 b; switch (pio) { @@ -605,7 +606,7 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio) } #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */ -static void cmd640_init_dev(ide_drive_t *drive) +static void __init cmd640_init_dev(ide_drive_t *drive) { unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1); diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c index f2500c8826b..5f80312e636 100644 --- a/drivers/ide/cmd64x.c +++ b/drivers/ide/cmd64x.c @@ -7,6 +7,7 @@ * Copyright (C) 1998 David S. Miller (davem@redhat.com) * * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> + * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com> */ @@ -50,72 +51,42 @@ #define UDIDETCR1 0x7B #define DTPR1 0x7C -static u8 quantize_timing(int timing, int quant) -{ - return (timing + quant - 1) / quant; -} - -/* - * This routine calculates active/recovery counts and then writes them into - * the chipset registers. - */ -static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time) +static void cmd64x_program_timings(ide_drive_t *drive, u8 mode) { + ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(drive->hwif->dev); - int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : 33); - u8 cycle_count, active_count, recovery_count, drwtim; + int bus_speed = ide_pci_clk ? ide_pci_clk : 33; + const unsigned long T = 1000000 / bus_speed; static const u8 recovery_values[] = {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0}; + static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0}; + static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23}; static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3}; + struct ide_timing t; + u8 arttim = 0; - cycle_count = quantize_timing( cycle_time, clock_time); - active_count = quantize_timing(active_time, clock_time); - recovery_count = cycle_count - active_count; + ide_timing_compute(drive, mode, &t, T, 0); /* * In case we've got too long recovery phase, try to lengthen * the active phase */ - if (recovery_count > 16) { - active_count += recovery_count - 16; - recovery_count = 16; + if (t.recover > 16) { + t.active += t.recover - 16; + t.recover = 16; } - if (active_count > 16) /* shouldn't actually happen... */ - active_count = 16; + if (t.active > 16) /* shouldn't actually happen... */ + t.active = 16; /* * Convert values to internal chipset representation */ - recovery_count = recovery_values[recovery_count]; - active_count &= 0x0f; + t.recover = recovery_values[t.recover]; + t.active &= 0x0f; /* Program the active/recovery counts into the DRWTIM register */ - drwtim = (active_count << 4) | recovery_count; - (void) pci_write_config_byte(dev, drwtim_regs[drive->dn], drwtim); -} - -/* - * This routine writes into the chipset registers - * PIO setup/active/recovery timings. - */ -static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) -{ - ide_hwif_t *hwif = drive->hwif; - struct pci_dev *dev = to_pci_dev(hwif->dev); - struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); - unsigned long setup_count; - unsigned int cycle_time; - u8 arttim = 0; - - static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0}; - static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23}; - - cycle_time = ide_pio_cycle_time(drive, pio); - - program_cycle_times(drive, cycle_time, t->active); - - setup_count = quantize_timing(t->setup, - 1000 / (ide_pci_clk ? ide_pci_clk : 33)); + pci_write_config_byte(dev, drwtim_regs[drive->dn], + (t.active << 4) | t.recover); /* * The primary channel has individual address setup timing registers @@ -126,15 +97,21 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) if (hwif->channel) { ide_drive_t *pair = ide_get_pair_dev(drive); - ide_set_drivedata(drive, (void *)setup_count); + if (pair) { + struct ide_timing tp; - if (pair) - setup_count = max_t(u8, setup_count, - (unsigned long)ide_get_drivedata(pair)); + ide_timing_compute(pair, pair->pio_mode, &tp, T, 0); + ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP); + if (pair->dma_mode) { + ide_timing_compute(pair, pair->dma_mode, + &tp, T, 0); + ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP); + } + } } - if (setup_count > 5) /* shouldn't actually happen... */ - setup_count = 5; + if (t.setup > 5) /* shouldn't actually happen... */ + t.setup = 5; /* * Program the address setup clocks into the ARTTIM registers. @@ -144,7 +121,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) if (hwif->channel) arttim &= ~ARTTIM23_INTR_CH1; arttim &= ~0xc0; - arttim |= setup_values[setup_count]; + arttim |= setup_values[t.setup]; (void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim); } @@ -153,8 +130,10 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio) * Special cases are 8: prefetch off, 9: prefetch on (both never worked) */ -static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { + const u8 pio = drive->pio_mode - XFER_PIO_0; + /* * Filter out the prefetch control values * to prevent PIO5 from being programmed @@ -162,20 +141,18 @@ static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio) if (pio == 8 || pio == 9) return; - cmd64x_tune_pio(drive, pio); + cmd64x_program_timings(drive, XFER_PIO_0 + pio); } -static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 unit = drive->dn & 0x01; u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0; + const u8 speed = drive->dma_mode; - if (speed >= XFER_SW_DMA_0) { - (void) pci_read_config_byte(dev, pciU, ®U); - regU &= ~(unit ? 0xCA : 0x35); - } + pci_read_config_byte(dev, pciU, ®U); + regU &= ~(unit ? 0xCA : 0x35); switch(speed) { case XFER_UDMA_5: @@ -197,18 +174,13 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed) regU |= unit ? 0xC2 : 0x31; break; case XFER_MW_DMA_2: - program_cycle_times(drive, 120, 70); - break; case XFER_MW_DMA_1: - program_cycle_times(drive, 150, 80); - break; case XFER_MW_DMA_0: - program_cycle_times(drive, 480, 215); + cmd64x_program_timings(drive, speed); break; } - if (speed >= XFER_SW_DMA_0) - (void) pci_write_config_byte(dev, pciU, regU); + pci_write_config_byte(dev, pciU, regU); } static void cmd648_clear_irq(ide_drive_t *drive) @@ -471,6 +443,6 @@ static void __exit cmd64x_ide_exit(void) module_init(cmd64x_ide_init); module_exit(cmd64x_ide_exit); -MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick"); +MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for CMD64x IDE"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c index 09f98ed0731..2c1e5f7cd26 100644 --- a/drivers/ide/cs5520.c +++ b/drivers/ide/cs5520.c @@ -57,11 +57,11 @@ static struct pio_clocks cs5520_pio_clocks[]={ {1, 2, 1} }; -static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void cs5520_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *pdev = to_pci_dev(hwif->dev); int controller = drive->dn > 1 ? 1 : 0; + const u8 pio = drive->pio_mode - XFER_PIO_0; /* 8bit CAT/CRT - 8bit command timing for channel */ pci_write_config_byte(pdev, 0x62 + controller, @@ -81,11 +81,12 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio) (cs5520_pio_clocks[pio].assert)); } -static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void cs5520_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { printk(KERN_ERR "cs55x0: bad ide timing.\n"); - cs5520_set_pio_mode(drive, 0); + drive->pio_mode = XFER_PIO_0 + 0; + cs5520_set_pio_mode(hwif, drive); } static const struct ide_port_ops cs5520_port_ops = { diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c index 40bf05eddf6..4dc4eb92b07 100644 --- a/drivers/ide/cs5530.c +++ b/drivers/ide/cs5530.c @@ -41,8 +41,8 @@ static unsigned int cs5530_pio_timings[2][5] = { /** * cs5530_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * Handles setting of PIO mode for the chipset. * @@ -50,10 +50,11 @@ static unsigned int cs5530_pio_timings[2][5] = { * will have valid default PIO timings set up before we get here. */ -static void cs5530_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void cs5530_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - unsigned long basereg = CS5530_BASEREG(drive->hwif); + unsigned long basereg = CS5530_BASEREG(hwif); unsigned int format = (inl(basereg + 4) >> 31) & 1; + const u8 pio = drive->pio_mode - XFER_PIO_0; outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3)); } @@ -99,12 +100,12 @@ out: return mask; } -static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode) +static void cs5530_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long basereg; unsigned int reg, timings = 0; - switch (mode) { + switch (drive->dma_mode) { case XFER_UDMA_0: timings = 0x00921250; break; case XFER_UDMA_1: timings = 0x00911140; break; case XFER_UDMA_2: timings = 0x00911030; break; @@ -112,7 +113,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode) case XFER_MW_DMA_1: timings = 0x00012121; break; case XFER_MW_DMA_2: timings = 0x00002020; break; } - basereg = CS5530_BASEREG(drive->hwif); + basereg = CS5530_BASEREG(hwif); reg = inl(basereg + 4); /* get drive0 config register */ timings |= reg & 0x80000000; /* preserve PIO format bit */ if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */ diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c index b883838adc2..5059fafadf2 100644 --- a/drivers/ide/cs5535.c +++ b/drivers/ide/cs5535.c @@ -86,7 +86,7 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed) cmd = pioa = speed - XFER_PIO_0; if (pair) { - u8 piob = ide_get_best_pio_mode(pair, 255, 4); + u8 piob = pair->pio_mode - XFER_PIO_0; if (piob < cmd) cmd = piob; @@ -129,28 +129,28 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed) /** * cs5535_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Programs the chipset for DMA mode. */ -static void cs5535_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void cs5535_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - cs5535_set_speed(drive, speed); + cs5535_set_speed(drive, drive->dma_mode); } /** * cs5535_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * A callback from the upper layers for PIO-only tuning. */ -static void cs5535_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void cs5535_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - cs5535_set_speed(drive, XFER_PIO_0 + pio); + cs5535_set_speed(drive, drive->pio_mode); } static u8 cs5535_cable_detect(ide_hwif_t *hwif) diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c index 9623b852c61..24214ab60ac 100644 --- a/drivers/ide/cs5536.c +++ b/drivers/ide/cs5536.c @@ -125,11 +125,11 @@ static u8 cs5536_cable_detect(ide_hwif_t *hwif) /** * cs5536_set_pio_mode - PIO timing setup + * @hwif: ATA port * @drive: ATA device - * @pio: PIO mode number */ -static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 drv_timings[5] = { 0x98, 0x55, 0x32, 0x21, 0x20, @@ -143,15 +143,16 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio) 0x99, 0x92, 0x90, 0x22, 0x20, }; - struct pci_dev *pdev = to_pci_dev(drive->hwif->dev); + struct pci_dev *pdev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 cast; + const u8 pio = drive->pio_mode - XFER_PIO_0; u8 cmd_pio = pio; if (pair) - cmd_pio = min(pio, ide_get_best_pio_mode(pair, 255, 4)); + cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0); timings &= (IDE_DRV_MASK << 8); timings |= drv_timings[pio]; @@ -172,11 +173,11 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio) /** * cs5536_set_dma_mode - DMA timing setup + * @hwif: ATA port * @drive: ATA device - * @mode: DMA mode */ -static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode) +static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 udma_timings[6] = { 0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6, @@ -186,10 +187,11 @@ static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode) 0x67, 0x21, 0x20, }; - struct pci_dev *pdev = to_pci_dev(drive->hwif->dev); + struct pci_dev *pdev = to_pci_dev(hwif->dev); int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u32 etc; + const u8 mode = drive->dma_mode; cs5536_read(pdev, ETC, &etc); diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c index d6e2cbbc53a..9383f67deae 100644 --- a/drivers/ide/cy82c693.c +++ b/drivers/ide/cy82c693.c @@ -1,43 +1,11 @@ /* * Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator + * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * * CYPRESS CY82C693 chipset IDE controller * * The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards. - * Writing the driver was quite simple, since most of the job is - * done by the generic pci-ide support. - * The hard part was finding the CY82C693's datasheet on Cypress's - * web page :-(. But Altavista solved this problem :-). - * - * - * Notes: - * - I recently got a 16.8G IBM DTTA, so I was able to test it with - * a large and fast disk - the results look great, so I'd say the - * driver is working fine :-) - * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA - * - this is my first linux driver, so there's probably a lot of room - * for optimizations and bug fixing, so feel free to do it. - * - if using PIO mode it's a good idea to set the PIO mode and - * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda - * - I had some problems with my IBM DHEA with PIO modes < 2 - * (lost interrupts) ????? - * - first tests with DMA look okay, they seem to work, but there is a - * problem with sound - the BusMaster IDE TimeOut should fixed this - * - * Ancient History: - * AMH@1999-08-24: v0.34 init_cy82c693_chip moved to pci_init_cy82c693 - * ASK@1999-01-23: v0.33 made a few minor code clean ups - * removed DMA clock speed setting by default - * added boot message - * ASK@1998-11-01: v0.32 added support to set BusMaster IDE TimeOut - * added support to set DMA Controller Clock Speed - * ASK@1998-10-31: v0.31 fixed problem with setting to high DMA modes - * on some drives. - * ASK@1998-10-29: v0.3 added support to set DMA modes - * ASK@1998-10-28: v0.2 added support to set PIO modes - * ASK@1998-10-27: v0.1 first version - chipset detection - * */ #include <linux/module.h> @@ -81,87 +49,13 @@ #define CY82_INDEX_CHANNEL1 0x31 #define CY82_INDEX_TIMEOUT 0x32 -/* the min and max PCI bus speed in MHz - from datasheet */ -#define CY82C963_MIN_BUS_SPEED 25 -#define CY82C963_MAX_BUS_SPEED 33 - -/* the struct for the PIO mode timings */ -typedef struct pio_clocks_s { - u8 address_time; /* Address setup (clocks) */ - u8 time_16r; /* clocks for 16bit IOR (0xF0=Active/data, 0x0F=Recovery) */ - u8 time_16w; /* clocks for 16bit IOW (0xF0=Active/data, 0x0F=Recovery) */ - u8 time_8; /* clocks for 8bit (0xF0=Active/data, 0x0F=Recovery) */ -} pio_clocks_t; - -/* - * calc clocks using bus_speed - * returns (rounded up) time in bus clocks for time in ns - */ -static int calc_clk(int time, int bus_speed) -{ - int clocks; - - clocks = (time*bus_speed+999)/1000 - 1; - - if (clocks < 0) - clocks = 0; - - if (clocks > 0x0F) - clocks = 0x0F; - - return clocks; -} - -/* - * compute the values for the clock registers for PIO - * mode and pci_clk [MHz] speed - * - * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used - * for mode 3 and 4 drives 8 and 16-bit timings are the same - * - */ -static void compute_clocks(u8 pio, pio_clocks_t *p_pclk) -{ - struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); - int clk1, clk2; - int bus_speed = ide_pci_clk ? ide_pci_clk : 33; - - /* we don't check against CY82C693's min and max speed, - * so you can play with the idebus=xx parameter - */ - - /* let's calc the address setup time clocks */ - p_pclk->address_time = (u8)calc_clk(t->setup, bus_speed); - - /* let's calc the active and recovery time clocks */ - clk1 = calc_clk(t->active, bus_speed); - - /* calc recovery timing */ - clk2 = t->cycle - t->active - t->setup; - - clk2 = calc_clk(clk2, bus_speed); - - clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */ - - /* note: we use the same values for 16bit IOR and IOW - * those are all the same, since I don't have other - * timings than those from ide-lib.c - */ - - p_pclk->time_16r = (u8)clk1; - p_pclk->time_16w = (u8)clk1; - - /* what are good values for 8bit ?? */ - p_pclk->time_8 = (u8)clk1; -} - /* * set DMA mode a specific channel for CY82C693 */ -static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode) +static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; + const u8 mode = drive->dma_mode; u8 single = (mode & 0x10) >> 4, index = 0, data = 0; index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0; @@ -186,12 +80,14 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode) outb(data, CY82_DATA_PORT); } -static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); - pio_clocks_t pclk; + int bus_speed = ide_pci_clk ? ide_pci_clk : 33; + const unsigned long T = 1000000 / bus_speed; unsigned int addrCtrl; + struct ide_timing t; + u8 time_16, time_8; /* select primary or secondary channel */ if (hwif->index > 0) { /* drive is on the secondary channel */ @@ -204,8 +100,12 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio) } } - /* let's calc the values for this PIO mode */ - compute_clocks(pio, &pclk); + ide_timing_compute(drive, drive->pio_mode, &t, T, 1); + + time_16 = clamp_val(t.recover - 1, 0, 15) | + (clamp_val(t.active - 1, 0, 15) << 4); + time_8 = clamp_val(t.act8b - 1, 0, 15) | + (clamp_val(t.rec8b - 1, 0, 15) << 4); /* now let's write the clocks registers */ if ((drive->dn & 1) == 0) { @@ -217,13 +117,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio) pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); addrCtrl &= (~0xF); - addrCtrl |= (unsigned int)pclk.address_time; + addrCtrl |= clamp_val(t.setup - 1, 0, 15); pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); /* now let's set the remaining registers */ - pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r); - pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w); - pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8); + pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16); + pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16); + pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8); } else { /* * set slave drive @@ -233,13 +133,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio) pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl); addrCtrl &= (~0xF0); - addrCtrl |= ((unsigned int)pclk.address_time<<4); + addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4); pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl); /* now let's set the remaining registers */ - pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, pclk.time_16r); - pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, pclk.time_16w); - pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, pclk.time_8); + pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16); + pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16); + pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8); } } @@ -325,6 +225,6 @@ static void __exit cy82c693_ide_exit(void) module_init(cy82c693_ide_init); module_exit(cy82c693_ide_exit); -MODULE_AUTHOR("Andreas Krebs, Andre Hedrick"); +MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c index c6b13812298..6929f7fce93 100644 --- a/drivers/ide/dtc2278.c +++ b/drivers/ide/dtc2278.c @@ -68,11 +68,11 @@ static void sub22 (char b, char c) static DEFINE_SPINLOCK(dtc2278_lock); -static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void dtc2278_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long flags; - if (pio >= 3) { + if (drive->pio_mode >= XFER_PIO_3) { spin_lock_irqsave(&dtc2278_lock, flags); /* * This enables PIO mode4 (3?) on the first interface diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index 4d90ac2dbb1..b885c1d548f 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -627,14 +627,14 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info) return info->timings->clock_table[info->clock][i]; } -static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed) +static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev); struct hpt_timings *t = info->timings; u8 itr_addr = 0x40 + (drive->dn * 4); u32 old_itr = 0; + const u8 speed = drive->dma_mode; u32 new_itr = get_speed_setting(speed, info); u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask : (speed < XFER_UDMA_0 ? t->dma_mask : @@ -651,9 +651,10 @@ static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed) pci_write_config_dword(dev, itr_addr, new_itr); } -static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - hpt3xx_set_mode(drive, XFER_PIO_0 + pio); + drive->dma_mode = drive->pio_mode; + hpt3xx_set_mode(hwif, drive); } static void hpt3xx_maskproc(ide_drive_t *drive, int mask) diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c index aafed8060e1..d81e49680c3 100644 --- a/drivers/ide/ht6560b.c +++ b/drivers/ide/ht6560b.c @@ -279,9 +279,10 @@ static void ht_set_prefetch(ide_drive_t *drive, u8 state) #endif } -static void ht6560b_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void ht6560b_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long flags, config; + const u8 pio = drive->pio_mode - XFER_PIO_0; u8 timing; switch (pio) { diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c index 0f67f1abbbd..4a697a238e2 100644 --- a/drivers/ide/icside.c +++ b/drivers/ide/icside.c @@ -65,6 +65,8 @@ static struct cardinfo icside_cardinfo_v6_2 = { }; struct icside_state { + unsigned int channel; + unsigned int enabled; void __iomem *irq_port; void __iomem *ioc_base; unsigned int sel; @@ -114,11 +116,18 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr) struct icside_state *state = ec->irq_data; void __iomem *base = state->irq_port; - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); - readb(base + ICS_ARCIN_V6_INTROFFSET_2); + state->enabled = 1; - writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); - readb(base + ICS_ARCIN_V6_INTROFFSET_1); + switch (state->channel) { + case 0: + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1); + readb(base + ICS_ARCIN_V6_INTROFFSET_2); + break; + case 1: + writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2); + readb(base + ICS_ARCIN_V6_INTROFFSET_1); + break; + } } /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) @@ -128,6 +137,8 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr) { struct icside_state *state = ec->irq_data; + state->enabled = 0; + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); } @@ -149,6 +160,44 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = { .irqpending = icside_irqpending_arcin_v6, }; +/* + * Handle routing of interrupts. This is called before + * we write the command to the drive. + */ +static void icside_maskproc(ide_drive_t *drive, int mask) +{ + ide_hwif_t *hwif = drive->hwif; + struct expansion_card *ec = ECARD_DEV(hwif->dev); + struct icside_state *state = ecard_get_drvdata(ec); + unsigned long flags; + + local_irq_save(flags); + + state->channel = hwif->channel; + + if (state->enabled && !mask) { + switch (hwif->channel) { + case 0: + writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); + break; + case 1: + writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); + break; + } + } else { + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2); + readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1); + } + + local_irq_restore(flags); +} + +static const struct ide_port_ops icside_v6_no_dma_port_ops = { + .maskproc = icside_maskproc, +}; + #ifdef CONFIG_BLK_DEV_IDEDMA_ICS /* * SG-DMA support. @@ -185,10 +234,11 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = { * MW1 80 50 50 150 C * MW2 70 25 25 120 C */ -static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode) +static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned long cycle_time; int use_dma_info = 0; + const u8 xfer_mode = drive->dma_mode; switch (xfer_mode) { case XFER_MW_DMA_2: @@ -228,6 +278,7 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode) static const struct ide_port_ops icside_v6_port_ops = { .set_dma_mode = icside_set_dma_mode, + .maskproc = icside_maskproc, }; static void icside_dma_host_set(ide_drive_t *drive, int on) @@ -272,6 +323,11 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) BUG_ON(dma_channel_active(ec->dma)); /* + * Ensure that we have the right interrupt routed. + */ + icside_maskproc(drive, 0); + + /* * Route the DMA signals to the correct interface. */ writeb(state->sel | hwif->channel, state->ioc_base); @@ -399,6 +455,7 @@ err_free: static const struct ide_port_info icside_v6_port_info __initdata = { .init_dma = icside_dma_off_init, + .port_ops = &icside_v6_no_dma_port_ops, .dma_ops = &icside_v6_dma_ops, .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, .mwdma_mask = ATA_MWDMA2, diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index dd6396384c2..ab87e4f7cec 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c @@ -121,19 +121,11 @@ static int ide_probe(struct pcmcia_device *link) static void ide_detach(struct pcmcia_device *link) { ide_info_t *info = link->priv; - ide_hwif_t *hwif = info->host->ports[0]; - unsigned long data_addr, ctl_addr; dev_dbg(&link->dev, "ide_detach(0x%p)\n", link); - data_addr = hwif->io_ports.data_addr; - ctl_addr = hwif->io_ports.ctl_addr; - ide_release(link); - release_region(ctl_addr, 1); - release_region(data_addr, 8); - kfree(info); } /* ide_detach */ @@ -354,12 +346,19 @@ static void ide_release(struct pcmcia_device *link) dev_dbg(&link->dev, "ide_release(0x%p)\n", link); - if (info->ndev) - /* FIXME: if this fails we need to queue the cleanup somehow - -- need to investigate the required PCMCIA magic */ + if (info->ndev) { + ide_hwif_t *hwif = host->ports[0]; + unsigned long data_addr, ctl_addr; + + data_addr = hwif->io_ports.data_addr; + ctl_addr = hwif->io_ports.ctl_addr; + ide_host_remove(host); + info->ndev = 0; - info->ndev = 0; + release_region(ctl_addr, 1); + release_region(data_addr, 8); + } pcmcia_disable_device(link); } /* ide_release */ diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c index 1099bf7cf96..c6935c78757 100644 --- a/drivers/ide/ide-devsets.c +++ b/drivers/ide/ide-devsets.c @@ -105,15 +105,17 @@ static int set_pio_mode(ide_drive_t *drive, int arg) return -ENOSYS; if (set_pio_mode_abuse(drive->hwif, arg)) { + drive->pio_mode = arg + XFER_PIO_0; + if (arg == 8 || arg == 9) { unsigned long flags; /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ spin_lock_irqsave(&hwif->lock, flags); - port_ops->set_pio_mode(drive, arg); + port_ops->set_pio_mode(hwif, drive); spin_unlock_irqrestore(&hwif->lock, flags); } else - port_ops->set_pio_mode(drive, arg); + port_ops->set_pio_mode(hwif, drive); } else { int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 222c1ef65fb..376f2dc410c 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -231,7 +231,7 @@ u8 eighty_ninty_three(ide_drive_t *drive) u16 *id = drive->id; int ivb = ide_in_drive_list(id, ivb_list); - if (hwif->cbl == ATA_CBL_PATA40_SHORT) + if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT) return 1; if (ivb) diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index f8c1ae6ad74..fbedd35feb4 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1042,6 +1042,8 @@ static void ide_port_init_devices(ide_hwif_t *hwif) if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) drive->dev_flags |= IDE_DFLAG_NO_UNMASK; + drive->pio_mode = XFER_PIO_0; + if (port_ops && port_ops->init_dev) port_ops->init_dev(drive); } diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 6a0e6254216..b07232880ec 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c @@ -1365,7 +1365,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count) * supported here, and not in the corresponding block interface. Our own * ide-tape ioctls are supported on both interfaces. */ -static int idetape_chrdev_ioctl(struct inode *inode, struct file *file, +static long do_idetape_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ide_tape_obj *tape = file->private_data; @@ -1420,6 +1420,16 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file, } } +static long idetape_chrdev_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + long ret; + lock_kernel(); + ret = do_idetape_chrdev_ioctl(file, cmd, arg); + unlock_kernel(); + return ret; +} + /* * Do a mode sense page 0 with block descriptor and if it succeeds set the tape * block size with the reported value. @@ -1888,7 +1898,7 @@ static const struct file_operations idetape_fops = { .owner = THIS_MODULE, .read = idetape_chrdev_read, .write = idetape_chrdev_write, - .ioctl = idetape_chrdev_ioctl, + .unlocked_ioctl = idetape_chrdev_ioctl, .open = idetape_chrdev_open, .release = idetape_chrdev_release, }; diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c index 001a56365be..0e05f75934c 100644 --- a/drivers/ide/ide-timings.c +++ b/drivers/ide/ide-timings.c @@ -166,12 +166,13 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed, if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); - if (speed <= XFER_PIO_2) - p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; - else if ((speed <= XFER_PIO_4) || - (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) - p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; - else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) + if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { + if (speed <= XFER_PIO_2) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; + else if ((speed <= XFER_PIO_4) || + (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; + } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) p.cycle = id[ATA_ID_EIDE_DMA_MIN]; ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B); @@ -185,11 +186,10 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed, /* * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, * S.M.A.R.T and some other commands. We have to ensure that the - * DMA cycle timing is slower/equal than the fastest PIO timing. + * DMA cycle timing is slower/equal than the current PIO timing. */ if (speed >= XFER_SW_DMA_0) { - u8 pio = ide_get_best_pio_mode(drive, 255, 5); - ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT); + ide_timing_compute(drive, drive->pio_mode, &p, T, UT); ide_timing_merge(&p, t, t, IDE_TIMING_ALL); } diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c index 46d203ce60c..5fc8d5c17de 100644 --- a/drivers/ide/ide-xfer-mode.c +++ b/drivers/ide/ide-xfer-mode.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL(ide_xfer_verbose); * This is used by most chipset support modules when "auto-tuning". */ -u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode) +static u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode) { u16 *id = drive->id; int pio_mode = -1, overridden = 0; @@ -105,7 +105,6 @@ u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode) return pio_mode; } -EXPORT_SYMBOL_GPL(ide_get_best_pio_mode); int ide_pio_need_iordy(ide_drive_t *drive, const u8 pio) { @@ -135,17 +134,20 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode) * set transfer mode on the device in ->set_pio_mode method... */ if (port_ops->set_dma_mode == NULL) { - port_ops->set_pio_mode(drive, mode - XFER_PIO_0); + drive->pio_mode = mode; + port_ops->set_pio_mode(hwif, drive); return 0; } if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { if (ide_config_drive_speed(drive, mode)) return -1; - port_ops->set_pio_mode(drive, mode - XFER_PIO_0); + drive->pio_mode = mode; + port_ops->set_pio_mode(hwif, drive); return 0; } else { - port_ops->set_pio_mode(drive, mode - XFER_PIO_0); + drive->pio_mode = mode; + port_ops->set_pio_mode(hwif, drive); return ide_config_drive_speed(drive, mode); } } @@ -164,10 +166,12 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode) if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { if (ide_config_drive_speed(drive, mode)) return -1; - port_ops->set_dma_mode(drive, mode); + drive->dma_mode = mode; + port_ops->set_dma_mode(hwif, drive); return 0; } else { - port_ops->set_dma_mode(drive, mode); + drive->dma_mode = mode; + port_ops->set_dma_mode(hwif, drive); return ide_config_drive_speed(drive, mode); } } diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c index 0d266a5b524..560e66d0765 100644 --- a/drivers/ide/it8172.c +++ b/drivers/ide/it8172.c @@ -37,12 +37,12 @@ #define DRV_NAME "IT8172" -static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void it8172_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u16 drive_enables; u32 drive_timing; + const u8 pio = drive->pio_mode - XFER_PIO_0; /* * The highest value of DIOR/DIOW pulse width and recovery time @@ -77,14 +77,14 @@ static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio) pci_write_config_dword(dev, 0x44, drive_timing); } -static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void it8172_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int u_speed = 0; u8 reg48, reg4a; + const u8 speed = drive->dma_mode; pci_read_config_byte(dev, 0x48, ®48); pci_read_config_byte(dev, 0x4a, ®4a); @@ -98,14 +98,14 @@ static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_write_config_byte(dev, 0x4a, reg4a | u_speed); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; - u8 pio; pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); pci_write_config_byte(dev, 0x4a, reg4a & ~a_speed); - pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; + drive->pio_mode = + mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0; - it8172_set_pio_mode(drive, pio); + it8172_set_pio_mode(hwif, drive); } } diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c index 47976167796..46816ba2641 100644 --- a/drivers/ide/it8213.c +++ b/drivers/ide/it8213.c @@ -17,15 +17,14 @@ /** * it8213_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * Set the interface PIO mode. */ -static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); int is_slave = drive->dn & 1; int master_port = 0x40; @@ -35,6 +34,7 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio) u8 slave_data; static DEFINE_SPINLOCK(tune_lock); int control = 0; + const u8 pio = drive->pio_mode - XFER_PIO_0; static const u8 timings[][2] = { { 0, 0 }, @@ -74,15 +74,14 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio) /** * it8213_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Tune the ITE chipset for the DMA mode. */ -static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = 0x40; int a_speed = 3 << (drive->dn * 4); @@ -92,6 +91,7 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed) int u_speed = 0; u16 reg4042, reg4a; u8 reg48, reg54, reg55; + const u8 speed = drive->dma_mode; pci_read_config_word(dev, maslave, ®4042); pci_read_config_byte(dev, 0x48, ®48); @@ -120,7 +120,6 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; - u8 pio; if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); @@ -132,11 +131,12 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); if (speed >= XFER_MW_DMA_0) - pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; + drive->pio_mode = + mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0; else - pio = 2; /* only SWDMA2 is allowed */ + drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */ - it8213_set_pio_mode(drive, pio); + it8213_set_pio_mode(hwif, drive); } } diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c index 51aa745246d..b2709c73348 100644 --- a/drivers/ide/it821x.c +++ b/drivers/ide/it821x.c @@ -228,18 +228,18 @@ static void it821x_clock_strategy(ide_drive_t *drive) /** * it821x_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * Tune the host to the desired PIO mode taking into the consideration * the maximum PIO mode supported by the other device on the cable. */ -static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void it821x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct it821x_dev *itdev = ide_get_hwifdata(hwif); ide_drive_t *pair = ide_get_pair_dev(drive); + const u8 pio = drive->pio_mode - XFER_PIO_0; u8 unit = drive->dn & 1, set_pio = pio; /* Spec says 89 ref driver uses 88 */ @@ -252,7 +252,7 @@ static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio) * on the cable. */ if (pair) { - u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4); + u8 pair_pio = pair->pio_mode - XFER_PIO_0; /* trim PIO to the slowest of the master/slave */ if (pair_pio < set_pio) set_pio = pair_pio; @@ -393,14 +393,16 @@ static int it821x_dma_end(ide_drive_t *drive) /** * it821x_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Tune the ITE chipset for the desired DMA mode. */ -static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void it821x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { + const u8 speed = drive->dma_mode; + /* * MWDMA tuning is really hard because our MWDMA and PIO * timings are kept in the same place. We can switch in the diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c index bf2be6431b2..74c2c4a6d90 100644 --- a/drivers/ide/jmicron.c +++ b/drivers/ide/jmicron.c @@ -80,19 +80,19 @@ static u8 jmicron_cable_detect(ide_hwif_t *hwif) return ATA_CBL_PATA80; } -static void jmicron_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void jmicron_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { } /** * jmicron_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @mode: DMA mode * * As the JMicron snoops for timings we don't need to do anything here. */ -static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode) +static void jmicron_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { } diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c index f1d70d6630f..1a53a4c375e 100644 --- a/drivers/ide/opti621.c +++ b/drivers/ide/opti621.c @@ -8,77 +8,6 @@ * Jan Harkes <jaharkes@cwi.nl>, * Mark Lord <mlord@pobox.com> * Some parts of code are from ali14xx.c and from rz1000.c. - * - * OPTi is trademark of OPTi, Octek is trademark of Octek. - * - * I used docs from OPTi databook, from ftp.opti.com, file 9123-0002.ps - * and disassembled/traced setupvic.exe (DOS program). - * It increases kernel code about 2 kB. - * I don't have this card no more, but I hope I can get some in case - * of needed development. - * My card is Octek PIDE 1.01 (on card) or OPTiViC (program). - * It has a place for a secondary connector in circuit, but nothing - * is there. Also BIOS says no address for - * secondary controller (see bellow in ide_init_opti621). - * I've only tested this on my system, which only has one disk. - * It's Western Digital WDAC2850, with PIO mode 3. The PCI bus - * is at 20 MHz (I have DX2/80, I tried PCI at 40, but I got random - * lockups). I tried the OCTEK double speed CD-ROM and - * it does not work! But I can't boot DOS also, so it's probably - * hardware fault. I have connected Conner 80MB, the Seagate 850MB (no - * problems) and Seagate 1GB (as slave, WD as master). My experiences - * with the third, 1GB drive: I got 3MB/s (hdparm), but sometimes - * it slows to about 100kB/s! I don't know why and I have - * not this drive now, so I can't try it again. - * I write this driver because I lost the paper ("manual") with - * settings of jumpers on the card and I have to boot Linux with - * Loadlin except LILO, cause I have to run the setupvic.exe program - * already or I get disk errors (my test: rpm -Vf - * /usr/X11R6/bin/XF86_SVGA - or any big file). - * Some numbers from hdparm -t /dev/hda: - * Timing buffer-cache reads: 32 MB in 3.02 seconds =10.60 MB/sec - * Timing buffered disk reads: 16 MB in 5.52 seconds = 2.90 MB/sec - * I have 4 Megs/s before, but I don't know why (maybe changes - * in hdparm test). - * After release of 0.1, I got some successful reports, so it might work. - * - * The main problem with OPTi is that some timings for master - * and slave must be the same. For example, if you have master - * PIO 3 and slave PIO 0, driver have to set some timings of - * master for PIO 0. Second problem is that opti621_set_pio_mode - * got only one drive to set, but have to set both drives. - * This is solved in compute_pios. If you don't set - * the second drive, compute_pios use ide_get_best_pio_mode - * for autoselect mode (you can change it to PIO 0, if you want). - * If you then set the second drive to another PIO, the old value - * (automatically selected) will be overrided by yours. - * There is a 25/33MHz switch in configuration - * register, but driver is written for use at any frequency. - * - * Version 0.1, Nov 8, 1996 - * by Jaromir Koutek, for 2.1.8. - * Initial version of driver. - * - * Version 0.2 - * Number 0.2 skipped. - * - * Version 0.3, Nov 29, 1997 - * by Mark Lord (probably), for 2.1.68 - * Updates for use with new IDE block driver. - * - * Version 0.4, Dec 14, 1997 - * by Jan Harkes - * Fixed some errors and cleaned the code. - * - * Version 0.5, Jan 2, 1998 - * by Jaromir Koutek - * Updates for use with (again) new IDE block driver. - * Update of documentation. - * - * Version 0.6, Jan 2, 1999 - * by Jaromir Koutek - * Reversed to version 0.3 of the driver, because - * 0.5 doesn't work. */ #include <linux/types.h> @@ -133,12 +62,12 @@ static u8 read_reg(int reg) return ret; } -static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; ide_drive_t *pair = ide_get_pair_dev(drive); unsigned long flags; - unsigned long mode = XFER_PIO_0 + pio, pair_mode; + unsigned long mode = drive->pio_mode, pair_mode; + const u8 pio = mode - XFER_PIO_0; u8 tim, misc, addr_pio = pio, clk; /* DRDY is default 2 (by OPTi Databook) */ diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c index f8eddf05ecb..9e8f4e1b0cc 100644 --- a/drivers/ide/palm_bk3710.c +++ b/drivers/ide/palm_bk3710.c @@ -166,7 +166,7 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate, writel(val32, base + BK3710_DATRCVR); if (mate) { - u8 mode2 = ide_get_best_pio_mode(mate, 255, 4); + u8 mode2 = mate->pio_mode - XFER_PIO_0; if (mode2 < mode) mode = mode2; @@ -188,10 +188,11 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate, writel(val32, base + BK3710_REGRCVR); } -static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed) +static void palm_bk3710_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { int is_slave = drive->dn & 1; - void __iomem *base = (void *)drive->hwif->dma_base; + void __iomem *base = (void *)hwif->dma_base; + const u8 xferspeed = drive->dma_mode; if (xferspeed >= XFER_UDMA_0) { palm_bk3710_setudmamode(base, is_slave, @@ -203,12 +204,13 @@ static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed) } } -static void palm_bk3710_set_pio_mode(ide_drive_t *drive, u8 pio) +static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { unsigned int cycle_time; int is_slave = drive->dn & 1; ide_drive_t *mate; - void __iomem *base = (void *)drive->hwif->dma_base; + void __iomem *base = (void *)hwif->dma_base; + const u8 pio = drive->pio_mode - XFER_PIO_0; /* * Obtain the drive PIO data for tuning the Palm Chip registers diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c index 65ba8239e7b..9546fe2a93f 100644 --- a/drivers/ide/pdc202xx_new.c +++ b/drivers/ide/pdc202xx_new.c @@ -129,11 +129,11 @@ static struct udma_timing { { 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */ }; -static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 adj = (drive->dn & 1) ? 0x08 : 0x00; + const u8 speed = drive->dma_mode; /* * IDE core issues SETFEATURES_XFER to the drive first (thanks to @@ -167,11 +167,11 @@ static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed) } } -static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 adj = (drive->dn & 1) ? 0x08 : 0x00; + const u8 pio = drive->pio_mode - XFER_PIO_0; if (max_dma_rate(dev) == 4) { set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c); diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c index 35161dd840a..c5f3841af36 100644 --- a/drivers/ide/pdc202xx_old.c +++ b/drivers/ide/pdc202xx_old.c @@ -1,7 +1,7 @@ /* * Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2006-2007, 2009 MontaVista Software, Inc. - * Copyright (C) 2007 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * * Portions Copyright (C) 1999 Promise Technology, Inc. * Author: Frank Tiernan (frankt@promise.com) @@ -21,23 +21,15 @@ #define DRV_NAME "pdc202xx_old" -static void pdc_old_disable_66MHz_clock(ide_hwif_t *); - -static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed) +static void pdc202xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 drive_pci = 0x60 + (drive->dn << 2); + const u8 speed = drive->dma_mode; u8 AP = 0, BP = 0, CP = 0; u8 TA = 0, TB = 0, TC = 0; - /* - * TODO: do this once per channel - */ - if (dev->device != PCI_DEVICE_ID_PROMISE_20246) - pdc_old_disable_66MHz_clock(hwif); - pci_read_config_byte(dev, drive_pci, &AP); pci_read_config_byte(dev, drive_pci + 1, &BP); pci_read_config_byte(dev, drive_pci + 2, &CP); @@ -84,9 +76,10 @@ static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed) } } -static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void pdc202xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - pdc202xx_set_mode(drive, XFER_PIO_0 + pio); + drive->dma_mode = drive->pio_mode; + pdc202xx_set_mode(hwif, drive); } static int pdc202xx_test_irq(ide_hwif_t *hwif) @@ -100,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif) * bit 7: error, bit 6: interrupting, * bit 5: FIFO full, bit 4: FIFO empty */ - return ((sc1d & 0x50) == 0x40) ? 1 : 0; + return ((sc1d & 0x50) == 0x50) ? 1 : 0; } else { /* * bit 3: error, bit 2: interrupting, * bit 1: FIFO full, bit 0: FIFO empty */ - return ((sc1d & 0x05) == 0x04) ? 1 : 0; + return ((sc1d & 0x05) == 0x05) ? 1 : 0; } } @@ -145,6 +138,11 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif) outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg); } +static void pdc2026x_init_hwif(ide_hwif_t *hwif) +{ + pdc_old_disable_66MHz_clock(hwif); +} + static void pdc202xx_dma_start(ide_drive_t *drive) { if (drive->current_speed > XFER_UDMA_2) @@ -261,6 +259,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = { { \ .name = DRV_NAME, \ .init_chipset = init_chipset_pdc202xx, \ + .init_hwif = pdc2026x_init_hwif, \ .port_ops = &pdc2026x_port_ops, \ .dma_ops = &pdc2026x_dma_ops, \ .host_flags = IDE_HFLAGS_PDC202XX, \ @@ -356,6 +355,6 @@ static void __exit pdc202xx_ide_exit(void) module_init(pdc202xx_ide_init); module_exit(pdc202xx_ide_exit); -MODULE_AUTHOR("Andre Hedrick, Frank Tiernan"); +MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for older Promise IDE"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c index bf14f39bd3a..1bdca49e5a0 100644 --- a/drivers/ide/piix.c +++ b/drivers/ide/piix.c @@ -59,15 +59,14 @@ static int no_piix_dma; /** * piix_set_pio_mode - set host controller for PIO mode + * @port: port * @drive: drive - * @pio: PIO mode number * * Set the interface PIO mode based upon the settings done by AMI BIOS. */ -static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); int is_slave = drive->dn & 1; int master_port = hwif->channel ? 0x42 : 0x40; @@ -77,6 +76,7 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio) u8 slave_data; static DEFINE_SPINLOCK(tune_lock); int control = 0; + const u8 pio = drive->pio_mode - XFER_PIO_0; /* ISP RTC */ static const u8 timings[][2]= { @@ -127,16 +127,15 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio) /** * piix_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Set a PIIX host controller to the desired DMA mode. This involves * programming the right timing data into the PCI configuration space. */ -static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = hwif->channel ? 0x42 : 0x40; int a_speed = 3 << (drive->dn * 4); @@ -147,6 +146,7 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) int sitre; u16 reg4042, reg4a; u8 reg48, reg54, reg55; + const u8 speed = drive->dma_mode; pci_read_config_word(dev, maslave, ®4042); sitre = (reg4042 & 0x4000) ? 1 : 0; @@ -176,7 +176,6 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; - u8 pio; if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); @@ -188,11 +187,12 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); if (speed >= XFER_MW_DMA_0) - pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; + drive->pio_mode = + mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0; else - pio = 2; /* only SWDMA2 is allowed */ + drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */ - piix_set_pio_mode(drive, pio); + piix_set_pio_mode(hwif, drive); } } diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c index 7a4e788cab2..850ee452e9b 100644 --- a/drivers/ide/pmac.c +++ b/drivers/ide/pmac.c @@ -496,12 +496,11 @@ static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl) /* * Old tuning functions (called on hdparm -p), sets up drive PIO timings */ -static void -pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); + const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio); u32 *timings, t; unsigned accessTicks, recTicks; @@ -778,14 +777,14 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2, #endif } -static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent); int ret = 0; u32 *timings, *timings2, tl[2]; u8 unit = drive->dn & 1; + const u8 speed = drive->dma_mode; timings = &pmif->timings[unit]; timings2 = &pmif->timings[unit+2]; @@ -1651,8 +1650,8 @@ pmac_ide_dma_test_irq (ide_drive_t *drive) if ((status & FLUSH) == 0) break; if (++timeout > 100) { - printk(KERN_WARNING "ide%d, ide_dma_test_irq \ - timeout flushing channel\n", hwif->index); + printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n", + hwif->index); break; } } diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c index 74696edc8d1..3f0244fd8e6 100644 --- a/drivers/ide/qd65xx.c +++ b/drivers/ide/qd65xx.c @@ -189,15 +189,13 @@ static void qd_set_timing (ide_drive_t *drive, u8 timing) printk(KERN_DEBUG "%s: %#x\n", drive->name, timing); } -static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { u16 *id = drive->id; int active_time = 175; int recovery_time = 415; /* worst case values from the dos driver */ - /* - * FIXME: use "pio" value - */ + /* FIXME: use drive->pio_mode value */ if (!qd_find_disk_type(drive, &active_time, &recovery_time) && (id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) && id[ATA_ID_EIDE_PIO] >= 240) { @@ -211,9 +209,9 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio) active_time, recovery_time)); } -static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; + const u8 pio = drive->pio_mode - XFER_PIO_0; struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio); unsigned int cycle_time; int active_time = 175; diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c index d467478d68d..134f1fd1386 100644 --- a/drivers/ide/sc1200.c +++ b/drivers/ide/sc1200.c @@ -122,13 +122,13 @@ out: return mask; } -static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode) +static void sc1200_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned int reg, timings; unsigned short pci_clock; unsigned int basereg = hwif->channel ? 0x50 : 0x40; + const u8 mode = drive->dma_mode; static const u32 udma_timing[3][3] = { { 0x00921250, 0x00911140, 0x00911030 }, @@ -193,10 +193,10 @@ static int sc1200_dma_end(ide_drive_t *drive) * will have valid default PIO timings set up before we get here. */ -static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void sc1200_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; int mode = -1; + const u8 pio = drive->pio_mode - XFER_PIO_0; /* * bad abuse of ->set_pio_mode interface diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c index 1104bb301eb..b7f5b0c4310 100644 --- a/drivers/ide/scc_pata.c +++ b/drivers/ide/scc_pata.c @@ -199,16 +199,15 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count) /** * scc_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * Load the timing settings for this device mode into the * controller. */ -static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct scc_ports *ports = ide_get_hwifdata(hwif); unsigned long ctl_base = ports->ctl; unsigned long cckctrl_port = ctl_base + 0xff0; @@ -216,6 +215,7 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio) unsigned long pioct_port = ctl_base + 0x004; unsigned long reg; int offset; + const u8 pio = drive->pio_mode - XFER_PIO_0; reg = in_be32((void __iomem *)cckctrl_port); if (reg & CCKCTRL_ATACLKOEN) { @@ -231,16 +231,15 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio) /** * scc_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Load the timing settings for this device mode into the * controller. */ -static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct scc_ports *ports = ide_get_hwifdata(hwif); unsigned long ctl_base = ports->ctl; unsigned long cckctrl_port = ctl_base + 0xff0; @@ -254,6 +253,7 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed) int offset, idx; unsigned long reg; unsigned long jcactsel; + const u8 speed = drive->dma_mode; reg = in_be32((void __iomem *)cckctrl_port); if (reg & CCKCTRL_ATACLKOEN) { @@ -872,20 +872,18 @@ static struct pci_driver scc_pci_driver = { .remove = __devexit_p(scc_remove), }; -static int scc_ide_init(void) +static int __init scc_ide_init(void) { return ide_pci_register_driver(&scc_pci_driver); } -module_init(scc_ide_init); -/* -- No exit code? -static void scc_ide_exit(void) +static void __exit scc_ide_exit(void) { - ide_pci_unregister_driver(&scc_pci_driver); + pci_unregister_driver(&scc_pci_driver); } -module_exit(scc_ide_exit); - */ +module_init(scc_ide_init); +module_exit(scc_ide_exit); MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c index b6554ef9271..35fb8dabb55 100644 --- a/drivers/ide/serverworks.c +++ b/drivers/ide/serverworks.c @@ -2,7 +2,7 @@ * Copyright (C) 1998-2000 Michel Aubry * Copyright (C) 1998-2000 Andrzej Krzysztofowicz * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> - * Copyright (C) 2007 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * Portions copyright (c) 2001 Sun Microsystems * * @@ -52,8 +52,6 @@ static const char *svwks_bad_ata100[] = { NULL }; -static struct pci_dev *isa_dev; - static int check_in_drive_lists (ide_drive_t *drive, const char **list) { char *m = (char *)&drive->id[ATA_ID_PROD]; @@ -67,26 +65,14 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list) static u8 svwks_udma_filter(ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(drive->hwif->dev); - u8 mask = 0; - if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) + if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) { return 0x1f; - if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { - u32 reg = 0; - if (isa_dev) - pci_read_config_dword(isa_dev, 0x64, ®); - - /* - * Don't enable UDMA on disk devices for the moment - */ - if(drive->media == ide_disk) - return 0; - /* Check the OSB4 DMA33 enable bit */ - return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0; } else if (dev->revision < SVWKS_CSB5_REVISION_NEW) { return 0x07; - } else if (dev->revision >= SVWKS_CSB5_REVISION_NEW) { - u8 btr = 0, mode; + } else { + u8 btr = 0, mode, mask; + pci_read_config_byte(dev, 0x5A, &btr); mode = btr & 0x3; @@ -101,13 +87,9 @@ static u8 svwks_udma_filter(ide_drive_t *drive) case 1: mask = 0x07; break; default: mask = 0x00; break; } - } - if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || - (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) && - (!(PCI_FUNC(dev->devfn) & 1))) - mask = 0x1f; - return mask; + return mask; + } } static u8 svwks_csb_check (struct pci_dev *dev) @@ -124,12 +106,13 @@ static u8 svwks_csb_check (struct pci_dev *dev) return 0; } -static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 }; static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 }; - struct pci_dev *dev = to_pci_dev(drive->hwif->dev); + struct pci_dev *dev = to_pci_dev(hwif->dev); + const u8 pio = drive->pio_mode - XFER_PIO_0; pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]); @@ -145,14 +128,14 @@ static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio) } } -static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 }; static const u8 dma_modes[] = { 0x77, 0x21, 0x20 }; static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 }; - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); + const u8 speed = drive->dma_mode; u8 unit = drive->dn & 1; u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0; @@ -185,8 +168,9 @@ static int init_chipset_svwks(struct pci_dev *dev) /* OSB4 : South Bridge and IDE */ if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { - isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS, - PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL); + struct pci_dev *isa_dev = + pci_get_device(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL); if (isa_dev) { pci_read_config_dword(isa_dev, 0x64, ®); reg &= ~0x00002000; /* disable 600ns interrupt mask */ @@ -195,6 +179,7 @@ static int init_chipset_svwks(struct pci_dev *dev) "enabled.\n", pci_name(dev)); reg |= 0x00004000; /* enable UDMA/33 support */ pci_write_config_dword(isa_dev, 0x64, reg); + pci_dev_put(isa_dev); } } @@ -343,7 +328,6 @@ static u8 svwks_cable_detect(ide_hwif_t *hwif) static const struct ide_port_ops osb4_port_ops = { .set_pio_mode = svwks_set_pio_mode, .set_dma_mode = svwks_set_dma_mode, - .udma_filter = svwks_udma_filter, }; static const struct ide_port_ops svwks_port_ops = { @@ -460,6 +444,6 @@ static void __exit svwks_ide_exit(void) module_init(svwks_ide_init); module_exit(svwks_ide_exit); -MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick"); +MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE"); MODULE_LICENSE("GPL"); diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c index b7d61dc6409..e3ea591f66d 100644 --- a/drivers/ide/sgiioc4.c +++ b/drivers/ide/sgiioc4.c @@ -255,7 +255,7 @@ static int sgiioc4_dma_end(ide_drive_t *drive) return dma_stat; } -static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void sgiioc4_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { } diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c index d95df528562..ddeda444a27 100644 --- a/drivers/ide/siimage.c +++ b/drivers/ide/siimage.c @@ -229,19 +229,18 @@ static u8 sil_sata_udma_filter(ide_drive_t *drive) /** * sil_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * Load the timing settings for this device mode into the * controller. */ -static void sil_set_pio_mode(ide_drive_t *drive, u8 pio) +static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 }; static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); u32 speedt = 0; @@ -249,6 +248,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio) unsigned long addr = siimage_seldev(drive, 0x04); unsigned long tfaddr = siimage_selreg(hwif, 0x02); unsigned long base = (unsigned long)hwif->hwif_data; + const u8 pio = drive->pio_mode - XFER_PIO_0; u8 tf_pio = pio; u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84) @@ -258,7 +258,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio) /* trim *taskfile* PIO to the slowest of the master/slave */ if (pair) { - u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4); + u8 pair_pio = pair->pio_mode - XFER_PIO_0; if (pair_pio < tf_pio) tf_pio = pair_pio; @@ -289,19 +289,18 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio) /** * sil_set_dma_mode - set host controller for DMA mode + * @hwif: port * @drive: drive - * @speed: DMA mode * * Tune the SiI chipset for the desired DMA mode. */ -static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }; static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 }; static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 }; - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long base = (unsigned long)hwif->hwif_data; u16 ultra = 0, multi = 0; @@ -311,6 +310,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed) : (mmio ? 0xB4 : 0x80); unsigned long ma = siimage_seldev(drive, 0x08); unsigned long ua = siimage_seldev(drive, 0x0C); + const u8 speed = drive->dma_mode; scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A)); mode = sil_ioread8 (dev, base + addr_mask); diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c index 468706082fb..db7f4e761db 100644 --- a/drivers/ide/sis5513.c +++ b/drivers/ide/sis5513.c @@ -290,10 +290,10 @@ static void config_drive_art_rwp(ide_drive_t *drive) pci_write_config_byte(dev, 0x4b, rw_prefetch); } -static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { config_drive_art_rwp(drive); - sis_program_timings(drive, XFER_PIO_0 + pio); + sis_program_timings(drive, drive->pio_mode); } static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode) @@ -340,8 +340,10 @@ static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode) sis_ata33_program_udma_timings(drive, mode); } -static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { + const u8 speed = drive->dma_mode; + if (speed >= XFER_UDMA_0) sis_program_udma_timings(drive, speed); else diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c index 3c2bbf0057e..f21dc2ad768 100644 --- a/drivers/ide/sl82c105.c +++ b/drivers/ide/sl82c105.c @@ -63,12 +63,13 @@ static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio) /* * Configure the chipset for PIO mode. */ -static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void sl82c105_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - struct pci_dev *dev = to_pci_dev(drive->hwif->dev); + struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long timings = (unsigned long)ide_get_drivedata(drive); int reg = 0x44 + drive->dn * 4; u16 drv_ctrl; + const u8 pio = drive->pio_mode - XFER_PIO_0; drv_ctrl = get_pio_timings(drive, pio); @@ -91,11 +92,12 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio) /* * Configure the chipset for DMA mode. */ -static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void sl82c105_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200}; unsigned long timings = (unsigned long)ide_get_drivedata(drive); u16 drv_ctrl; + const u8 speed = drive->dma_mode; drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0]; diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c index 1ccfb40e721..864ffe0e26d 100644 --- a/drivers/ide/slc90e66.c +++ b/drivers/ide/slc90e66.c @@ -18,9 +18,8 @@ static DEFINE_SPINLOCK(slc90e66_lock); -static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void slc90e66_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); int is_slave = drive->dn & 1; int master_port = hwif->channel ? 0x42 : 0x40; @@ -29,6 +28,8 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio) u16 master_data; u8 slave_data; int control = 0; + const u8 pio = drive->pio_mode - XFER_PIO_0; + /* ISP RTC */ static const u8 timings[][2] = { { 0, 0 }, @@ -71,14 +72,14 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio) spin_unlock_irqrestore(&slc90e66_lock, flags); } -static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed) +static void slc90e66_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = hwif->channel ? 0x42 : 0x40; int sitre = 0, a_speed = 7 << (drive->dn * 4); int u_speed = 0, u_flag = 1 << drive->dn; u16 reg4042, reg44, reg48, reg4a; + const u8 speed = drive->dma_mode; pci_read_config_word(dev, maslave, ®4042); sitre = (reg4042 & 0x4000) ? 1 : 0; @@ -98,7 +99,6 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed) } } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; - u8 pio; if (reg48 & u_flag) pci_write_config_word(dev, 0x48, reg48 & ~u_flag); @@ -106,11 +106,12 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); if (speed >= XFER_MW_DMA_0) - pio = mwdma_to_pio[speed - XFER_MW_DMA_0]; + drive->pio_mode = + mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0; else - pio = 2; /* only SWDMA2 is allowed */ + drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */ - slc90e66_set_pio_mode(drive, pio); + slc90e66_set_pio_mode(hwif, drive); } } diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c index 05a93d6baec..e444d24934b 100644 --- a/drivers/ide/tc86c001.c +++ b/drivers/ide/tc86c001.c @@ -13,11 +13,11 @@ #define DRV_NAME "tc86c001" -static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed) +static void tc86c001_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00); u16 mode, scr = inw(scr_port); + const u8 speed = drive->dma_mode; switch (speed) { case XFER_UDMA_4: mode = 0x00c0; break; @@ -41,9 +41,10 @@ static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed) outw(scr, scr_port); } -static void tc86c001_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void tc86c001_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - tc86c001_set_mode(drive, XFER_PIO_0 + pio); + drive->dma_mode = drive->pio_mode; + tc86c001_set_mode(hwif, drive); } /* diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c index 8773c3ba746..7953447eae0 100644 --- a/drivers/ide/triflex.c +++ b/drivers/ide/triflex.c @@ -34,9 +34,8 @@ #define DRV_NAME "triflex" -static void triflex_set_mode(ide_drive_t *drive, const u8 speed) +static void triflex_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct pci_dev *dev = to_pci_dev(hwif->dev); u32 triflex_timings = 0; u16 timing = 0; @@ -44,7 +43,7 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed) pci_read_config_dword(dev, channel_offset, &triflex_timings); - switch(speed) { + switch (drive->dma_mode) { case XFER_MW_DMA_2: timing = 0x0103; break; @@ -82,9 +81,10 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed) pci_write_config_dword(dev, channel_offset, triflex_timings); } -static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void triflex_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - triflex_set_mode(drive, XFER_PIO_0 + pio); + drive->dma_mode = drive->pio_mode; + triflex_set_mode(hwif, drive); } static const struct ide_port_ops triflex_port_ops = { diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c index fd59c0d235b..1d80f1fdbc9 100644 --- a/drivers/ide/tx4938ide.c +++ b/drivers/ide/tx4938ide.c @@ -56,16 +56,15 @@ static void tx4938ide_tune_ebusc(unsigned int ebus_ch, &tx4938_ebuscptr->cr[ebus_ch]); } -static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; struct tx4938ide_platform_info *pdata = hwif->dev->platform_data; - u8 safe = pio; + u8 safe = drive->pio_mode - XFER_PIO_0; ide_drive_t *pair; pair = ide_get_pair_dev(drive); if (pair) - safe = min(safe, ide_get_best_pio_mode(pair, 255, 5)); + safe = min(safe, pair->pio_mode - XFER_PIO_0); tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe); } diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c index 64b58ecc3f0..3c736775187 100644 --- a/drivers/ide/tx4939ide.c +++ b/drivers/ide/tx4939ide.c @@ -104,17 +104,17 @@ static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg) #define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base) -static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; int is_slave = drive->dn; u32 mask, val; + const u8 pio = drive->pio_mode - XFER_PIO_0; u8 safe = pio; ide_drive_t *pair; pair = ide_get_pair_dev(drive); if (pair) - safe = min(safe, ide_get_best_pio_mode(pair, 255, 4)); + safe = min(safe, pair->pio_mode - XFER_PIO_0); /* * Update Command Transfer Mode for master/slave and Data * Transfer Mode for this drive. @@ -125,10 +125,10 @@ static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio) /* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */ } -static void tx4939ide_set_dma_mode(ide_drive_t *drive, const u8 mode) +static void tx4939ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; u32 mask, val; + const u8 mode = drive->dma_mode; /* Update Data Transfer Mode for this drive. */ if (mode >= XFER_UDMA_0) diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c index 60f936e2319..47adcd09cb2 100644 --- a/drivers/ide/umc8672.c +++ b/drivers/ide/umc8672.c @@ -104,10 +104,11 @@ static void umc_set_speeds(u8 speeds[]) speeds[0], speeds[1], speeds[2], speeds[3]); } -static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif, *mate = hwif->mate; + ide_hwif_t *mate = hwif->mate; unsigned long uninitialized_var(flags); + const u8 pio = drive->pio_mode - XFER_PIO_0; printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", drive->name, pio, pio_to_umc[pio]); diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c index 028de26a25f..e65d010b708 100644 --- a/drivers/ide/via82cxxx.c +++ b/drivers/ide/via82cxxx.c @@ -6,7 +6,7 @@ * vt8235, vt8237, vt8237a * * Copyright (c) 2000-2002 Vojtech Pavlik - * Copyright (c) 2007 Bartlomiej Zolnierkiewicz + * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz * * Based on the work of: * Michel Aubry @@ -54,6 +54,11 @@ #define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */ #define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */ #define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */ +#define VIA_SATA_PATA 0x80 /* SATA/PATA combined configuration */ + +enum { + VIA_IDFLAG_SINGLE = (1 << 1), /* single channel controller */ +}; /* * VIA SouthBridge chips. @@ -67,11 +72,13 @@ static struct via_isa_bridge { u8 udma_mask; u8 flags; } via_isa_bridges[] = { - { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, - { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, - { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, + { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, + { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA }, + { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, + { "vt6415", PCI_DEVICE_ID_VIA_6410, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST }, { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, @@ -92,6 +99,7 @@ static struct via_isa_bridge { { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO }, { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK }, { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID }, + { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST }, { NULL } }; @@ -102,6 +110,7 @@ struct via82cxxx_dev { struct via_isa_bridge *via_config; unsigned int via_80w; + u8 cached_device[2]; }; /** @@ -137,30 +146,45 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing) case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break; case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break; case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break; - default: return; } - pci_write_config_byte(dev, VIA_UDMA_TIMING + (3 - dn), t); + /* Set UDMA unless device is not UDMA capable */ + if (vdev->via_config->udma_mask) { + u8 udma_etc; + + pci_read_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, &udma_etc); + + /* clear transfer mode bit */ + udma_etc &= ~0x20; + + if (timing->udma) { + /* preserve 80-wire cable detection bit */ + udma_etc &= 0x10; + udma_etc |= t; + } + + pci_write_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, udma_etc); + } } /** * via_set_drive - configure transfer mode + * @hwif: port * @drive: Drive to set up - * @speed: desired speed * * via_set_drive() computes timing values configures the chipset to * a desired transfer mode. It also can be called by upper layers. */ -static void via_set_drive(ide_drive_t *drive, const u8 speed) +static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive) { - ide_hwif_t *hwif = drive->hwif; ide_drive_t *peer = ide_get_pair_dev(drive); struct pci_dev *dev = to_pci_dev(hwif->dev); struct ide_host *host = pci_get_drvdata(dev); struct via82cxxx_dev *vdev = host->host_priv; struct ide_timing t, p; unsigned int T, UT; + const u8 speed = drive->dma_mode; T = 1000000000 / via_clock; @@ -175,7 +199,7 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed) ide_timing_compute(drive, speed, &t, T, UT); if (peer) { - ide_timing_compute(peer, peer->current_speed, &p, T, UT); + ide_timing_compute(peer, peer->pio_mode, &p, T, UT); ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT); } @@ -184,22 +208,24 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed) /** * via_set_pio_mode - set host controller for PIO mode + * @hwif: port * @drive: drive - * @pio: PIO mode number * * A callback from the upper layers for PIO-only tuning. */ -static void via_set_pio_mode(ide_drive_t *drive, const u8 pio) +static void via_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { - via_set_drive(drive, XFER_PIO_0 + pio); + drive->dma_mode = drive->pio_mode; + via_set_drive(hwif, drive); } static struct via_isa_bridge *via_config_find(struct pci_dev **isa) { struct via_isa_bridge *via_config; - for (via_config = via_isa_bridges; via_config->id; via_config++) + for (via_config = via_isa_bridges; + via_config->id != PCI_DEVICE_ID_VIA_ANON; via_config++) if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA + !!(via_config->flags & VIA_BAD_ID), via_config->id, NULL))) { @@ -362,6 +388,9 @@ static u8 via82cxxx_cable_detect(ide_hwif_t *hwif) if (via_cable_override(pdev)) return ATA_CBL_PATA40_SHORT; + if ((vdev->via_config->flags & VIA_SATA_PATA) && hwif->channel == 0) + return ATA_CBL_SATA; + if ((vdev->via_80w >> hwif->channel) & 1) return ATA_CBL_PATA80; else @@ -374,10 +403,66 @@ static const struct ide_port_ops via_port_ops = { .cable_detect = via82cxxx_cable_detect, }; +static void via_write_devctl(ide_hwif_t *hwif, u8 ctl) +{ + struct via82cxxx_dev *vdev = hwif->host->host_priv; + + outb(ctl, hwif->io_ports.ctl_addr); + outb(vdev->cached_device[hwif->channel], hwif->io_ports.device_addr); +} + +static void __via_dev_select(ide_drive_t *drive, u8 select) +{ + ide_hwif_t *hwif = drive->hwif; + struct via82cxxx_dev *vdev = hwif->host->host_priv; + + outb(select, hwif->io_ports.device_addr); + vdev->cached_device[hwif->channel] = select; +} + +static void via_dev_select(ide_drive_t *drive) +{ + __via_dev_select(drive, drive->select | ATA_DEVICE_OBS); +} + +static void via_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid) +{ + ide_hwif_t *hwif = drive->hwif; + struct ide_io_ports *io_ports = &hwif->io_ports; + + if (valid & IDE_VALID_FEATURE) + outb(tf->feature, io_ports->feature_addr); + if (valid & IDE_VALID_NSECT) + outb(tf->nsect, io_ports->nsect_addr); + if (valid & IDE_VALID_LBAL) + outb(tf->lbal, io_ports->lbal_addr); + if (valid & IDE_VALID_LBAM) + outb(tf->lbam, io_ports->lbam_addr); + if (valid & IDE_VALID_LBAH) + outb(tf->lbah, io_ports->lbah_addr); + if (valid & IDE_VALID_DEVICE) + __via_dev_select(drive, tf->device); +} + +const struct ide_tp_ops via_tp_ops = { + .exec_command = ide_exec_command, + .read_status = ide_read_status, + .read_altstatus = ide_read_altstatus, + .write_devctl = via_write_devctl, + + .dev_select = via_dev_select, + .tf_load = via_tf_load, + .tf_read = ide_tf_read, + + .input_data = ide_input_data, + .output_data = ide_output_data, +}; + static const struct ide_port_info via82cxxx_chipset __devinitdata = { .name = DRV_NAME, .init_chipset = init_chipset_via82cxxx, .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } }, + .tp_ops = &via_tp_ops, .port_ops = &via_port_ops, .host_flags = IDE_HFLAG_PIO_NO_BLACKLIST | IDE_HFLAG_POST_SET_MODE | @@ -402,11 +487,6 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i * Find the ISA bridge and check we know what it is. */ via_config = via_config_find(&isa); - if (!via_config->id) { - printk(KERN_WARNING DRV_NAME " %s: unknown chipset, skipping\n", - pci_name(dev)); - return -ENODEV; - } /* * Print the boot message. @@ -436,10 +516,13 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i via_clock = 33333; } - if (idx == 0) - d.host_flags |= IDE_HFLAG_NO_AUTODMA; - else + if (idx == 1) d.enablebits[1].reg = d.enablebits[0].reg = 0; + else + d.host_flags |= IDE_HFLAG_NO_AUTODMA; + + if (idx == VIA_IDFLAG_SINGLE) + d.host_flags |= IDE_HFLAG_SINGLE; if ((via_config->flags & VIA_NO_UNMASK) == 0) d.host_flags |= IDE_HFLAG_UNMASK_IRQS; @@ -475,8 +558,9 @@ static const struct pci_device_id via_pci_tbl[] = { { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 }, - { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 }, + { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), VIA_IDFLAG_SINGLE }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 }, + { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6415), 1 }, { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 }, { 0, }, }; @@ -504,6 +588,6 @@ static void __exit via_ide_exit(void) module_init(via_ide_init); module_exit(via_ide_exit); -MODULE_AUTHOR("Vojtech Pavlik, Michel Aubry, Jeff Garzik, Andre Hedrick"); +MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz, Michel Aubry, Jeff Garzik, Andre Hedrick"); MODULE_DESCRIPTION("PCI driver module for VIA IDE"); MODULE_LICENSE("GPL"); diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index dd0db67bf8d..975adce5f40 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig @@ -20,6 +20,7 @@ config INFINIBAND_USER_MAD config INFINIBAND_USER_ACCESS tristate "InfiniBand userspace access (verbs and CM)" + select ANON_INODES ---help--- Userspace InfiniBand access support. This enables the kernel side of userspace verbs and the userspace diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index f504c9b00c1..1b09b735c5a 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1215,15 +1215,18 @@ static void ib_ucm_release_dev(struct device *dev) ucm_dev = container_of(dev, struct ib_ucm_device, dev); cdev_del(&ucm_dev->cdev); - clear_bit(ucm_dev->devnum, dev_map); + if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) + clear_bit(ucm_dev->devnum, dev_map); + else + clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map); kfree(ucm_dev); } static const struct file_operations ucm_fops = { - .owner = THIS_MODULE, - .open = ib_ucm_open, + .owner = THIS_MODULE, + .open = ib_ucm_open, .release = ib_ucm_close, - .write = ib_ucm_write, + .write = ib_ucm_write, .poll = ib_ucm_poll, }; @@ -1237,8 +1240,32 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); +static dev_t overflow_maj; +static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES); +static int find_overflow_devnum(void) +{ + int ret; + + if (!overflow_maj) { + ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES, + "infiniband_cm"); + if (ret) { + printk(KERN_ERR "ucm: couldn't register dynamic device number\n"); + return ret; + } + } + + ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES); + if (ret >= IB_UCM_MAX_DEVICES) + return -1; + + return ret; +} + static void ib_ucm_add_one(struct ib_device *device) { + int devnum; + dev_t base; struct ib_ucm_device *ucm_dev; if (!device->alloc_ucontext || @@ -1251,16 +1278,25 @@ static void ib_ucm_add_one(struct ib_device *device) ucm_dev->ib_dev = device; - ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); - if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES) - goto err; - - set_bit(ucm_dev->devnum, dev_map); + devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES); + if (devnum >= IB_UCM_MAX_DEVICES) { + devnum = find_overflow_devnum(); + if (devnum < 0) + goto err; + + ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES; + base = devnum + overflow_maj; + set_bit(devnum, overflow_map); + } else { + ucm_dev->devnum = devnum; + base = devnum + IB_UCM_BASE_DEV; + set_bit(devnum, dev_map); + } cdev_init(&ucm_dev->cdev, &ucm_fops); ucm_dev->cdev.owner = THIS_MODULE; kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum); - if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1)) + if (cdev_add(&ucm_dev->cdev, base, 1)) goto err; ucm_dev->dev.class = &cm_class; @@ -1281,7 +1317,10 @@ err_dev: device_unregister(&ucm_dev->dev); err_cdev: cdev_del(&ucm_dev->cdev); - clear_bit(ucm_dev->devnum, dev_map); + if (ucm_dev->devnum < IB_UCM_MAX_DEVICES) + clear_bit(devnum, dev_map); + else + clear_bit(devnum, overflow_map); err: kfree(ucm_dev); return; @@ -1340,6 +1379,8 @@ static void __exit ib_ucm_cleanup(void) ib_unregister_client(&ucm_client); class_remove_file(&cm_class, &class_attr_abi_version); unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES); + if (overflow_maj) + unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES); idr_destroy(&ctx_id_table); } diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c index 8ec7876bedc..650b501eb14 100644 --- a/drivers/infiniband/core/ud_header.c +++ b/drivers/infiniband/core/ud_header.c @@ -181,6 +181,7 @@ static const struct ib_field deth_table[] = { * ib_ud_header_init - Initialize UD header structure * @payload_bytes:Length of packet payload * @grh_present:GRH flag (if non-zero, GRH will be included) + * @immediate_present: specify if immediate data should be used * @header:Structure to initialize * * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header, @@ -191,21 +192,13 @@ static const struct ib_field deth_table[] = { */ void ib_ud_header_init(int payload_bytes, int grh_present, + int immediate_present, struct ib_ud_header *header) { - int header_len; u16 packet_length; memset(header, 0, sizeof *header); - header_len = - IB_LRH_BYTES + - IB_BTH_BYTES + - IB_DETH_BYTES; - if (grh_present) { - header_len += IB_GRH_BYTES; - } - header->lrh.link_version = 0; header->lrh.link_next_header = grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL; @@ -231,7 +224,8 @@ void ib_ud_header_init(int payload_bytes, header->lrh.packet_length = cpu_to_be16(packet_length); - if (header->immediate_present) + header->immediate_present = immediate_present; + if (immediate_present) header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; else header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 6f7c096abf1..4f906f0614f 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -136,7 +136,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, down_write(¤t->mm->mmap_sem); locked = npages + current->mm->locked_vm; - lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { ret = -ENOMEM; diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 7de02969ed7..02d360cfc2f 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -65,12 +65,9 @@ enum { }; /* - * Our lifetime rules for these structs are the following: each time a - * device special file is opened, we look up the corresponding struct - * ib_umad_port by minor in the umad_port[] table while holding the - * port_lock. If this lookup succeeds, we take a reference on the - * ib_umad_port's struct ib_umad_device while still holding the - * port_lock; if the lookup fails, we fail the open(). We drop these + * Our lifetime rules for these structs are the following: + * device special file is opened, we take a reference on the + * ib_umad_port's struct ib_umad_device. We drop these * references in the corresponding close(). * * In addition to references coming from open character devices, there @@ -78,19 +75,14 @@ enum { * module's reference taken when allocating the ib_umad_device in * ib_umad_add_one(). * - * When destroying an ib_umad_device, we clear all of its - * ib_umad_ports from umad_port[] while holding port_lock before - * dropping the module's reference to the ib_umad_device. This is - * always safe because any open() calls will either succeed and obtain - * a reference before we clear the umad_port[] entries, or fail after - * we clear the umad_port[] entries. + * When destroying an ib_umad_device, we drop the module's reference. */ struct ib_umad_port { - struct cdev *cdev; + struct cdev cdev; struct device *dev; - struct cdev *sm_cdev; + struct cdev sm_cdev; struct device *sm_dev; struct semaphore sm_sem; @@ -136,7 +128,6 @@ static struct class *umad_class; static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); static DEFINE_SPINLOCK(port_lock); -static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS]; static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); static void ib_umad_add_one(struct ib_device *device); @@ -496,8 +487,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, ah_attr.ah_flags = IB_AH_GRH; memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; - ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); - ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; + ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); + ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; } @@ -528,9 +519,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, goto err_ah; } - packet->msg->ah = ah; + packet->msg->ah = ah; packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; - packet->msg->retries = packet->mad.hdr.retries; + packet->msg->retries = packet->mad.hdr.retries; packet->msg->context[0] = packet; /* Copy MAD header. Any RMPP header is already in place. */ @@ -779,15 +770,11 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, /* * ib_umad_open() does not need the BKL: * - * - umad_port[] accesses are protected by port_lock, the - * ib_umad_port structures are properly reference counted, and + * - the ib_umad_port structures are properly reference counted, and * everything else is purely local to the file being created, so * races against other open calls are not a problem; * - the ioctl method does not affect any global state outside of the * file structure being operated on; - * - the port is added to umad_port[] as the last part of module - * initialization so the open method will either immediately run - * -ENXIO, or all required initialization will be done. */ static int ib_umad_open(struct inode *inode, struct file *filp) { @@ -795,13 +782,10 @@ static int ib_umad_open(struct inode *inode, struct file *filp) struct ib_umad_file *file; int ret = 0; - spin_lock(&port_lock); - port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE]; + port = container_of(inode->i_cdev, struct ib_umad_port, cdev); if (port) kref_get(&port->umad_dev->ref); - spin_unlock(&port_lock); - - if (!port) + else return -ENXIO; mutex_lock(&port->file_mutex); @@ -872,16 +856,16 @@ static int ib_umad_close(struct inode *inode, struct file *filp) } static const struct file_operations umad_fops = { - .owner = THIS_MODULE, - .read = ib_umad_read, - .write = ib_umad_write, - .poll = ib_umad_poll, + .owner = THIS_MODULE, + .read = ib_umad_read, + .write = ib_umad_write, + .poll = ib_umad_poll, .unlocked_ioctl = ib_umad_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = ib_umad_compat_ioctl, + .compat_ioctl = ib_umad_compat_ioctl, #endif - .open = ib_umad_open, - .release = ib_umad_close + .open = ib_umad_open, + .release = ib_umad_close }; static int ib_umad_sm_open(struct inode *inode, struct file *filp) @@ -892,13 +876,10 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp) }; int ret; - spin_lock(&port_lock); - port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS]; + port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev); if (port) kref_get(&port->umad_dev->ref); - spin_unlock(&port_lock); - - if (!port) + else return -ENXIO; if (filp->f_flags & O_NONBLOCK) { @@ -949,8 +930,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp) } static const struct file_operations umad_sm_fops = { - .owner = THIS_MODULE, - .open = ib_umad_sm_open, + .owner = THIS_MODULE, + .open = ib_umad_sm_open, .release = ib_umad_sm_close }; @@ -990,16 +971,51 @@ static ssize_t show_abi_version(struct class *class, char *buf) } static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); +static dev_t overflow_maj; +static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); +static int find_overflow_devnum(void) +{ + int ret; + + if (!overflow_maj) { + ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, + "infiniband_mad"); + if (ret) { + printk(KERN_ERR "user_mad: couldn't register dynamic device number\n"); + return ret; + } + } + + ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS); + if (ret >= IB_UMAD_MAX_PORTS) + return -1; + + return ret; +} + static int ib_umad_init_port(struct ib_device *device, int port_num, struct ib_umad_port *port) { + int devnum; + dev_t base; + spin_lock(&port_lock); - port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); - if (port->dev_num >= IB_UMAD_MAX_PORTS) { + devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); + if (devnum >= IB_UMAD_MAX_PORTS) { spin_unlock(&port_lock); - return -1; + devnum = find_overflow_devnum(); + if (devnum < 0) + return -1; + + spin_lock(&port_lock); + port->dev_num = devnum + IB_UMAD_MAX_PORTS; + base = devnum + overflow_maj; + set_bit(devnum, overflow_map); + } else { + port->dev_num = devnum; + base = devnum + base_dev; + set_bit(devnum, dev_map); } - set_bit(port->dev_num, dev_map); spin_unlock(&port_lock); port->ib_dev = device; @@ -1008,17 +1024,14 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, mutex_init(&port->file_mutex); INIT_LIST_HEAD(&port->file_list); - port->cdev = cdev_alloc(); - if (!port->cdev) - return -1; - port->cdev->owner = THIS_MODULE; - port->cdev->ops = &umad_fops; - kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num); - if (cdev_add(port->cdev, base_dev + port->dev_num, 1)) + cdev_init(&port->cdev, &umad_fops); + port->cdev.owner = THIS_MODULE; + kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); + if (cdev_add(&port->cdev, base, 1)) goto err_cdev; port->dev = device_create(umad_class, device->dma_device, - port->cdev->dev, port, + port->cdev.dev, port, "umad%d", port->dev_num); if (IS_ERR(port->dev)) goto err_cdev; @@ -1028,17 +1041,15 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, if (device_create_file(port->dev, &dev_attr_port)) goto err_dev; - port->sm_cdev = cdev_alloc(); - if (!port->sm_cdev) - goto err_dev; - port->sm_cdev->owner = THIS_MODULE; - port->sm_cdev->ops = &umad_sm_fops; - kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num); - if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1)) + base += IB_UMAD_MAX_PORTS; + cdev_init(&port->sm_cdev, &umad_sm_fops); + port->sm_cdev.owner = THIS_MODULE; + kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); + if (cdev_add(&port->sm_cdev, base, 1)) goto err_sm_cdev; port->sm_dev = device_create(umad_class, device->dma_device, - port->sm_cdev->dev, port, + port->sm_cdev.dev, port, "issm%d", port->dev_num); if (IS_ERR(port->sm_dev)) goto err_sm_cdev; @@ -1048,24 +1059,23 @@ static int ib_umad_init_port(struct ib_device *device, int port_num, if (device_create_file(port->sm_dev, &dev_attr_port)) goto err_sm_dev; - spin_lock(&port_lock); - umad_port[port->dev_num] = port; - spin_unlock(&port_lock); - return 0; err_sm_dev: - device_destroy(umad_class, port->sm_cdev->dev); + device_destroy(umad_class, port->sm_cdev.dev); err_sm_cdev: - cdev_del(port->sm_cdev); + cdev_del(&port->sm_cdev); err_dev: - device_destroy(umad_class, port->cdev->dev); + device_destroy(umad_class, port->cdev.dev); err_cdev: - cdev_del(port->cdev); - clear_bit(port->dev_num, dev_map); + cdev_del(&port->cdev); + if (port->dev_num < IB_UMAD_MAX_PORTS) + clear_bit(devnum, dev_map); + else + clear_bit(devnum, overflow_map); return -1; } @@ -1079,15 +1089,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port) dev_set_drvdata(port->dev, NULL); dev_set_drvdata(port->sm_dev, NULL); - device_destroy(umad_class, port->cdev->dev); - device_destroy(umad_class, port->sm_cdev->dev); + device_destroy(umad_class, port->cdev.dev); + device_destroy(umad_class, port->sm_cdev.dev); - cdev_del(port->cdev); - cdev_del(port->sm_cdev); - - spin_lock(&port_lock); - umad_port[port->dev_num] = NULL; - spin_unlock(&port_lock); + cdev_del(&port->cdev); + cdev_del(&port->sm_cdev); mutex_lock(&port->file_mutex); @@ -1106,7 +1112,10 @@ static void ib_umad_kill_port(struct ib_umad_port *port) mutex_unlock(&port->file_mutex); - clear_bit(port->dev_num, dev_map); + if (port->dev_num < IB_UMAD_MAX_PORTS) + clear_bit(port->dev_num, dev_map); + else + clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map); } static void ib_umad_add_one(struct ib_device *device) @@ -1214,6 +1223,8 @@ static void __exit ib_umad_cleanup(void) ib_unregister_client(&umad_client); class_destroy(umad_class); unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); + if (overflow_maj) + unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2); } module_init(ib_umad_init); diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index b3ea9587dc8..a078e5624d2 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -41,6 +41,7 @@ #include <linux/idr.h> #include <linux/mutex.h> #include <linux/completion.h> +#include <linux/cdev.h> #include <rdma/ib_verbs.h> #include <rdma/ib_umem.h> @@ -69,23 +70,23 @@ struct ib_uverbs_device { struct kref ref; + int num_comp_vectors; struct completion comp; - int devnum; - struct cdev *cdev; struct device *dev; struct ib_device *ib_dev; - int num_comp_vectors; + int devnum; + struct cdev cdev; }; struct ib_uverbs_event_file { struct kref ref; + int is_async; struct ib_uverbs_file *uverbs_file; spinlock_t lock; + int is_closed; wait_queue_head_t poll_wait; struct fasync_struct *async_queue; struct list_head event_list; - int is_async; - int is_closed; }; struct ib_uverbs_file { @@ -145,7 +146,7 @@ extern struct idr ib_uverbs_srq_idr; void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj); struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, - int is_async, int *fd); + int is_async); struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd); void ib_uverbs_release_ucq(struct ib_uverbs_file *file, diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 112d3970222..f71cf138d67 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -301,10 +301,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, resp.num_comp_vectors = file->device->num_comp_vectors; - filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd); + ret = get_unused_fd(); + if (ret < 0) + goto err_free; + resp.async_fd = ret; + + filp = ib_uverbs_alloc_event_file(file, 1); if (IS_ERR(filp)) { ret = PTR_ERR(filp); - goto err_free; + goto err_fd; } if (copy_to_user((void __user *) (unsigned long) cmd.response, @@ -332,9 +337,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file, return in_len; err_file: - put_unused_fd(resp.async_fd); fput(filp); +err_fd: + put_unused_fd(resp.async_fd); + err_free: ibdev->dealloc_ucontext(ucontext); @@ -715,6 +722,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, struct ib_uverbs_create_comp_channel cmd; struct ib_uverbs_create_comp_channel_resp resp; struct file *filp; + int ret; if (out_len < sizeof resp) return -ENOSPC; @@ -722,9 +730,16 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file, if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; - filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd); - if (IS_ERR(filp)) + ret = get_unused_fd(); + if (ret < 0) + return ret; + resp.fd = ret; + + filp = ib_uverbs_alloc_event_file(file, 0); + if (IS_ERR(filp)) { + put_unused_fd(resp.fd); return PTR_ERR(filp); + } if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) { diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 5f284ffd430..4fa2e651644 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -42,8 +42,8 @@ #include <linux/poll.h> #include <linux/sched.h> #include <linux/file.h> -#include <linux/mount.h> #include <linux/cdev.h> +#include <linux/anon_inodes.h> #include <asm/uaccess.h> @@ -53,8 +53,6 @@ MODULE_AUTHOR("Roland Dreier"); MODULE_DESCRIPTION("InfiniBand userspace verbs access"); MODULE_LICENSE("Dual BSD/GPL"); -#define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */ - enum { IB_UVERBS_MAJOR = 231, IB_UVERBS_BASE_MINOR = 192, @@ -75,44 +73,41 @@ DEFINE_IDR(ib_uverbs_qp_idr); DEFINE_IDR(ib_uverbs_srq_idr); static DEFINE_SPINLOCK(map_lock); -static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES]; static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) = { - [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, - [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, - [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, - [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, - [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, - [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, - [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, + [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context, + [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device, + [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port, + [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd, + [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd, + [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr, + [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr, [IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel, - [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, - [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, - [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, - [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, - [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, - [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, - [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, - [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, - [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, - [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, - [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, - [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, - [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, - [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, - [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, - [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, - [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, - [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, - [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, - [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, + [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq, + [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq, + [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq, + [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq, + [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq, + [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp, + [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp, + [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp, + [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp, + [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send, + [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv, + [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv, + [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah, + [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah, + [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast, + [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast, + [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq, + [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq, + [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq, + [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq, }; -static struct vfsmount *uverbs_event_mnt; - static void ib_uverbs_add_one(struct ib_device *device); static void ib_uverbs_remove_one(struct ib_device *device); @@ -370,7 +365,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp) static const struct file_operations uverbs_event_fops = { .owner = THIS_MODULE, - .read = ib_uverbs_event_read, + .read = ib_uverbs_event_read, .poll = ib_uverbs_event_poll, .release = ib_uverbs_event_close, .fasync = ib_uverbs_event_fasync @@ -489,12 +484,10 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler, } struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, - int is_async, int *fd) + int is_async) { struct ib_uverbs_event_file *ev_file; - struct path path; struct file *filp; - int ret; ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL); if (!ev_file) @@ -509,38 +502,12 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file, ev_file->is_async = is_async; ev_file->is_closed = 0; - *fd = get_unused_fd(); - if (*fd < 0) { - ret = *fd; - goto err; - } - - /* - * fops_get() can't fail here, because we're coming from a - * system call on a uverbs file, which will already have a - * module reference. - */ - path.mnt = uverbs_event_mnt; - path.dentry = uverbs_event_mnt->mnt_root; - path_get(&path); - filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops)); - if (!filp) { - ret = -ENFILE; - goto err_fd; - } - - filp->private_data = ev_file; + filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops, + ev_file, O_RDONLY); + if (IS_ERR(filp)) + kfree(ev_file); return filp; - -err_fd: - fops_put(&uverbs_event_fops); - path_put(&path); - put_unused_fd(*fd); - -err: - kfree(ev_file); - return ERR_PTR(ret); } /* @@ -617,14 +584,12 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma) /* * ib_uverbs_open() does not need the BKL: * - * - dev_table[] accesses are protected by map_lock, the - * ib_uverbs_device structures are properly reference counted, and + * - the ib_uverbs_device structures are properly reference counted and * everything else is purely local to the file being created, so * races against other open calls are not a problem; * - there is no ioctl method to race against; - * - the device is added to dev_table[] as the last part of module - * initialization, the open method will either immediately run - * -ENXIO, or all required initialization will be done. + * - the open method will either immediately run -ENXIO, or all + * required initialization will be done. */ static int ib_uverbs_open(struct inode *inode, struct file *filp) { @@ -632,13 +597,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) struct ib_uverbs_file *file; int ret; - spin_lock(&map_lock); - dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR]; + dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); if (dev) kref_get(&dev->ref); - spin_unlock(&map_lock); - - if (!dev) + else return -ENXIO; if (!try_module_get(dev->ib_dev->owner)) { @@ -685,17 +647,17 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) } static const struct file_operations uverbs_fops = { - .owner = THIS_MODULE, - .write = ib_uverbs_write, - .open = ib_uverbs_open, + .owner = THIS_MODULE, + .write = ib_uverbs_write, + .open = ib_uverbs_open, .release = ib_uverbs_close }; static const struct file_operations uverbs_mmap_fops = { - .owner = THIS_MODULE, - .write = ib_uverbs_write, + .owner = THIS_MODULE, + .write = ib_uverbs_write, .mmap = ib_uverbs_mmap, - .open = ib_uverbs_open, + .open = ib_uverbs_open, .release = ib_uverbs_close }; @@ -735,8 +697,38 @@ static ssize_t show_abi_version(struct class *class, char *buf) } static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL); +static dev_t overflow_maj; +static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES); + +/* + * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by + * requesting a new major number and doubling the number of max devices we + * support. It's stupid, but simple. + */ +static int find_overflow_devnum(void) +{ + int ret; + + if (!overflow_maj) { + ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES, + "infiniband_verbs"); + if (ret) { + printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n"); + return ret; + } + } + + ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES); + if (ret >= IB_UVERBS_MAX_DEVICES) + return -1; + + return ret; +} + static void ib_uverbs_add_one(struct ib_device *device) { + int devnum; + dev_t base; struct ib_uverbs_device *uverbs_dev; if (!device->alloc_ucontext) @@ -750,28 +742,36 @@ static void ib_uverbs_add_one(struct ib_device *device) init_completion(&uverbs_dev->comp); spin_lock(&map_lock); - uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); - if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) { + devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); + if (devnum >= IB_UVERBS_MAX_DEVICES) { spin_unlock(&map_lock); - goto err; + devnum = find_overflow_devnum(); + if (devnum < 0) + goto err; + + spin_lock(&map_lock); + uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES; + base = devnum + overflow_maj; + set_bit(devnum, overflow_map); + } else { + uverbs_dev->devnum = devnum; + base = devnum + IB_UVERBS_BASE_DEV; + set_bit(devnum, dev_map); } - set_bit(uverbs_dev->devnum, dev_map); spin_unlock(&map_lock); uverbs_dev->ib_dev = device; uverbs_dev->num_comp_vectors = device->num_comp_vectors; - uverbs_dev->cdev = cdev_alloc(); - if (!uverbs_dev->cdev) - goto err; - uverbs_dev->cdev->owner = THIS_MODULE; - uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; - kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum); - if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1)) + cdev_init(&uverbs_dev->cdev, NULL); + uverbs_dev->cdev.owner = THIS_MODULE; + uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; + kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum); + if (cdev_add(&uverbs_dev->cdev, base, 1)) goto err_cdev; uverbs_dev->dev = device_create(uverbs_class, device->dma_device, - uverbs_dev->cdev->dev, uverbs_dev, + uverbs_dev->cdev.dev, uverbs_dev, "uverbs%d", uverbs_dev->devnum); if (IS_ERR(uverbs_dev->dev)) goto err_cdev; @@ -781,20 +781,19 @@ static void ib_uverbs_add_one(struct ib_device *device) if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version)) goto err_class; - spin_lock(&map_lock); - dev_table[uverbs_dev->devnum] = uverbs_dev; - spin_unlock(&map_lock); - ib_set_client_data(device, &uverbs_client, uverbs_dev); return; err_class: - device_destroy(uverbs_class, uverbs_dev->cdev->dev); + device_destroy(uverbs_class, uverbs_dev->cdev.dev); err_cdev: - cdev_del(uverbs_dev->cdev); - clear_bit(uverbs_dev->devnum, dev_map); + cdev_del(&uverbs_dev->cdev); + if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) + clear_bit(devnum, dev_map); + else + clear_bit(devnum, overflow_map); err: kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); @@ -811,35 +810,19 @@ static void ib_uverbs_remove_one(struct ib_device *device) return; dev_set_drvdata(uverbs_dev->dev, NULL); - device_destroy(uverbs_class, uverbs_dev->cdev->dev); - cdev_del(uverbs_dev->cdev); + device_destroy(uverbs_class, uverbs_dev->cdev.dev); + cdev_del(&uverbs_dev->cdev); - spin_lock(&map_lock); - dev_table[uverbs_dev->devnum] = NULL; - spin_unlock(&map_lock); - - clear_bit(uverbs_dev->devnum, dev_map); + if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES) + clear_bit(uverbs_dev->devnum, dev_map); + else + clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map); kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); wait_for_completion(&uverbs_dev->comp); kfree(uverbs_dev); } -static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags, - const char *dev_name, void *data, - struct vfsmount *mnt) -{ - return get_sb_pseudo(fs_type, "infinibandevent:", NULL, - INFINIBANDEVENTFS_MAGIC, mnt); -} - -static struct file_system_type uverbs_event_fs = { - /* No owner field so module can be unloaded */ - .name = "infinibandeventfs", - .get_sb = uverbs_event_get_sb, - .kill_sb = kill_litter_super -}; - static int __init ib_uverbs_init(void) { int ret; @@ -864,33 +847,14 @@ static int __init ib_uverbs_init(void) goto out_class; } - ret = register_filesystem(&uverbs_event_fs); - if (ret) { - printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n"); - goto out_class; - } - - uverbs_event_mnt = kern_mount(&uverbs_event_fs); - if (IS_ERR(uverbs_event_mnt)) { - ret = PTR_ERR(uverbs_event_mnt); - printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n"); - goto out_fs; - } - ret = ib_register_client(&uverbs_client); if (ret) { printk(KERN_ERR "user_verbs: couldn't register client\n"); - goto out_mnt; + goto out_class; } return 0; -out_mnt: - mntput(uverbs_event_mnt); - -out_fs: - unregister_filesystem(&uverbs_event_fs); - out_class: class_destroy(uverbs_class); @@ -904,10 +868,10 @@ out: static void __exit ib_uverbs_cleanup(void) { ib_unregister_client(&uverbs_client); - mntput(uverbs_event_mnt); - unregister_filesystem(&uverbs_event_fs); class_destroy(uverbs_class); unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES); + if (overflow_maj) + unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES); idr_destroy(&ib_uverbs_pd_idr); idr_destroy(&ib_uverbs_mr_idr); idr_destroy(&ib_uverbs_mw_idr); diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c index 0677fc7dfd5..a28e862f2d6 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.c +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c @@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) { udelay(1); if (i++ > 1000000) { - BUG_ON(1); printk(KERN_ERR "%s: stalled rnic\n", rdev_p->dev_name); return -EIO; @@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb); } -int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) +int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel) { struct rdma_cq_setup setup; int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe); @@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq) cq->cqid = cxio_hal_get_cqid(rdev_p->rscp); if (!cq->cqid) return -ENOMEM; - cq->sw_queue = kzalloc(size, GFP_KERNEL); - if (!cq->sw_queue) - return -ENOMEM; - cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), - (1UL << (cq->size_log2)) * - sizeof(struct t3_cqe), + if (kernel) { + cq->sw_queue = kzalloc(size, GFP_KERNEL); + if (!cq->sw_queue) + return -ENOMEM; + } + cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size, &(cq->dma_addr), GFP_KERNEL); if (!cq->queue) { kfree(cq->sw_queue); diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index f3d440cc68f..073373c2c56 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h @@ -53,7 +53,7 @@ #define T3_MAX_PBL_SIZE 256 #define T3_MAX_RQ_SIZE 1024 #define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1) -#define T3_MAX_CQ_DEPTH 8192 +#define T3_MAX_CQ_DEPTH 262144 #define T3_MAX_NUM_STAG (1<<15) #define T3_MAX_MR_SIZE 0x100000000ULL #define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ @@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev); void cxio_rdev_close(struct cxio_rdev *rdev); int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, enum t3_cq_opcode op, u32 credit); -int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); +int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel); int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx); diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index a197a5b7ac7..15073b2da1c 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h @@ -730,7 +730,22 @@ struct t3_cq { static inline void cxio_set_wq_in_error(struct t3_wq *wq) { - wq->queue->wq_in_err.err = 1; + wq->queue->wq_in_err.err |= 1; +} + +static inline void cxio_disable_wq_db(struct t3_wq *wq) +{ + wq->queue->wq_in_err.err |= 2; +} + +static inline void cxio_enable_wq_db(struct t3_wq *wq) +{ + wq->queue->wq_in_err.err &= ~2; +} + +static inline int cxio_wq_db_enabled(struct t3_wq *wq) +{ + return !(wq->queue->wq_in_err.err & 2); } static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq) diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c index b0ea0105ddf..ee1d8b4d454 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.c +++ b/drivers/infiniband/hw/cxgb3/iwch.c @@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = { static LIST_HEAD(dev_list); static DEFINE_MUTEX(dev_mutex); +static int disable_qp_db(int id, void *p, void *data) +{ + struct iwch_qp *qhp = p; + + cxio_disable_wq_db(&qhp->wq); + return 0; +} + +static int enable_qp_db(int id, void *p, void *data) +{ + struct iwch_qp *qhp = p; + + if (data) + ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); + cxio_enable_wq_db(&qhp->wq); + return 0; +} + +static void disable_dbs(struct iwch_dev *rnicp) +{ + spin_lock_irq(&rnicp->lock); + idr_for_each(&rnicp->qpidr, disable_qp_db, NULL); + spin_unlock_irq(&rnicp->lock); +} + +static void enable_dbs(struct iwch_dev *rnicp, int ring_db) +{ + spin_lock_irq(&rnicp->lock); + idr_for_each(&rnicp->qpidr, enable_qp_db, + (void *)(unsigned long)ring_db); + spin_unlock_irq(&rnicp->lock); +} + +static void iwch_db_drop_task(struct work_struct *work) +{ + struct iwch_dev *rnicp = container_of(work, struct iwch_dev, + db_drop_task.work); + enable_dbs(rnicp, 1); +} + static void rnic_init(struct iwch_dev *rnicp) { PDBG("%s iwch_dev %p\n", __func__, rnicp); @@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp) idr_init(&rnicp->qpidr); idr_init(&rnicp->mmidr); spin_lock_init(&rnicp->lock); + INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task); rnicp->attr.max_qps = T3_MAX_NUM_QP - 32; rnicp->attr.max_wrs = T3_MAX_QP_DEPTH; @@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev) mutex_lock(&dev_mutex); list_for_each_entry_safe(dev, tmp, &dev_list, entry) { if (dev->rdev.t3cdev_p == tdev) { + dev->rdev.flags = CXIO_ERROR_FATAL; + cancel_delayed_work_sync(&dev->db_drop_task); list_del(&dev->entry); iwch_unregister_device(dev); cxio_rdev_close(&dev->rdev); @@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) struct cxio_rdev *rdev = tdev->ulp; struct iwch_dev *rnicp; struct ib_event event; - u32 portnum = port_id + 1; + u32 portnum = port_id + 1; + int dispatch = 0; if (!rdev) return; @@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id) case OFFLOAD_STATUS_DOWN: { rdev->flags = CXIO_ERROR_FATAL; event.event = IB_EVENT_DEVICE_FATAL; + dispatch = 1; break; } case OFFLOAD_PORT_DOWN: { event.event = IB_EVENT_PORT_ERR; + dispatch = 1; break; } case OFFLOAD_PORT_UP: { event.event = IB_EVENT_PORT_ACTIVE; + dispatch = 1; + break; + } + case OFFLOAD_DB_FULL: { + disable_dbs(rnicp); + break; + } + case OFFLOAD_DB_EMPTY: { + enable_dbs(rnicp, 1); + break; + } + case OFFLOAD_DB_DROP: { + unsigned long delay = 1000; + unsigned short r; + + disable_dbs(rnicp); + get_random_bytes(&r, 2); + delay += r & 1023; + + /* + * delay is between 1000-2023 usecs. + */ + schedule_delayed_work(&rnicp->db_drop_task, + usecs_to_jiffies(delay)); break; } } - event.device = &rnicp->ibdev; - event.element.port_num = portnum; - ib_dispatch_event(&event); + if (dispatch) { + event.device = &rnicp->ibdev; + event.element.port_num = portnum; + ib_dispatch_event(&event); + } return; } diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h index 84735506333..a1c44578e03 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.h +++ b/drivers/infiniband/hw/cxgb3/iwch.h @@ -36,6 +36,7 @@ #include <linux/list.h> #include <linux/spinlock.h> #include <linux/idr.h> +#include <linux/workqueue.h> #include <rdma/ib_verbs.h> @@ -110,6 +111,7 @@ struct iwch_dev { struct idr mmidr; spinlock_t lock; struct list_head entry; + struct delayed_work db_drop_task; }; static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index ed7175549eb..47b35c6608d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve entries = roundup_pow_of_two(entries); chp->cq.size_log2 = ilog2(entries); - if (cxio_create_cq(&rhp->rdev, &chp->cq)) { + if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) { kfree(chp); return ERR_PTR(-ENOMEM); } diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 3eb8cecf81d..b4d893de365 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ++(qhp->wq.sq_wptr); } spin_unlock_irqrestore(&qhp->lock, flag); - ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); + if (cxio_wq_db_enabled(&qhp->wq)) + ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); out: if (err) @@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, num_wrs--; } spin_unlock_irqrestore(&qhp->lock, flag); - ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); + if (cxio_wq_db_enabled(&qhp->wq)) + ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); out: if (err) @@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp, ++(qhp->wq.sq_wptr); spin_unlock_irqrestore(&qhp->lock, flag); - ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); + if (cxio_wq_db_enabled(&qhp->wq)) + ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); return err; } diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 42be0b15084..b2b6fea2b14 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -548,11 +548,10 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) struct ehca_eq *eq = &shca->eq; struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache; u64 eqe_value, ret; - unsigned long flags; int eqe_cnt, i; int eq_empty = 0; - spin_lock_irqsave(&eq->irq_spinlock, flags); + spin_lock(&eq->irq_spinlock); if (is_irq) { const int max_query_cnt = 100; int query_cnt = 0; @@ -643,7 +642,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) } while (1); unlock_irq_spinlock: - spin_unlock_irqrestore(&eq->irq_spinlock, flags); + spin_unlock(&eq->irq_spinlock); } void ehca_tasklet_eq(unsigned long data) diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 0338f1fabe8..b105f664d3e 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -55,9 +55,7 @@ static struct kmem_cache *qp_cache; /* * attributes not supported by query qp */ -#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \ - IB_QP_MAX_QP_RD_ATOMIC | \ - IB_QP_ACCESS_FLAGS | \ +#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \ IB_QP_EN_SQD_ASYNC_NOTIFY) /* diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c index 8c1213f8916..dba8f9f8b99 100644 --- a/drivers/infiniband/hw/ehca/ehca_sqp.c +++ b/drivers/infiniband/hw/ehca/ehca_sqp.c @@ -222,7 +222,7 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, { int ret; - if (!port_num || port_num > ibdev->phys_port_cnt) + if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) return IB_MAD_RESULT_FAILURE; /* accept only pma request */ diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c index 82878e34862..eb7d59abd12 100644 --- a/drivers/infiniband/hw/ipath/ipath_user_pages.c +++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c @@ -59,8 +59,7 @@ static int __get_user_pages(unsigned long start_page, size_t num_pages, size_t got; int ret; - lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> - PAGE_SHIFT; + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; if (num_pages > lock_limit) { ret = -ENOMEM; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 2a97c964b9e..ae75389937d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1214,7 +1214,7 @@ out: static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) { - struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev; + struct ib_device *ib_dev = sqp->qp.ibqp.device; struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); @@ -1228,7 +1228,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, for (i = 0; i < wr->num_sge; ++i) send_size += wr->sg_list[i].length; - ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header); + ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header); sqp->ud_header.lrh.service_level = be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28; diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index c10576fa60c..d2d172e6289 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1494,7 +1494,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, u16 pkey; ib_ud_header_init(256, /* assume a MAD */ - mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), + mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, &sqp->ud_header); err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index b9d09bafd6c..4272c52e38a 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -110,6 +110,7 @@ static unsigned int sysfs_idx_addr; static struct pci_device_id nes_pci_table[] = { {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID}, + {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID}, {0} }; diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h index 98840564bb2..cc78fee1dd5 100644 --- a/drivers/infiniband/hw/nes/nes.h +++ b/drivers/infiniband/hw/nes/nes.h @@ -64,8 +64,9 @@ * NetEffect PCI vendor id and NE010 PCI device id. */ #ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */ -#define PCI_VENDOR_ID_NETEFFECT 0x1678 -#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100 +#define PCI_VENDOR_ID_NETEFFECT 0x1678 +#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100 +#define PCI_DEVICE_ID_NETEFFECT_NE020_KR 0x0110 #endif #define NE020_REV 4 @@ -193,8 +194,8 @@ extern u32 cm_packets_created; extern u32 cm_packets_received; extern u32 cm_packets_dropped; extern u32 cm_packets_retrans; -extern u32 cm_listens_created; -extern u32 cm_listens_destroyed; +extern atomic_t cm_listens_created; +extern atomic_t cm_listens_destroyed; extern u32 cm_backlog_drops; extern atomic_t cm_loopbacks; extern atomic_t cm_nodes_created; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 39468c27703..2a49ee40b52 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -67,8 +67,8 @@ u32 cm_packets_dropped; u32 cm_packets_retrans; u32 cm_packets_created; u32 cm_packets_received; -u32 cm_listens_created; -u32 cm_listens_destroyed; +atomic_t cm_listens_created; +atomic_t cm_listens_destroyed; u32 cm_backlog_drops; atomic_t cm_loopbacks; atomic_t cm_nodes_created; @@ -1011,9 +1011,10 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, event.cm_info.loc_port = loopback->loc_port; event.cm_info.cm_id = loopback->cm_id; + add_ref_cm_node(loopback); + loopback->state = NES_CM_STATE_CLOSED; cm_event_connect_error(&event); cm_node->state = NES_CM_STATE_LISTENER_DESTROYED; - loopback->state = NES_CM_STATE_CLOSED; rem_ref_cm_node(cm_node->cm_core, cm_node); @@ -1042,7 +1043,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core, kfree(listener); listener = NULL; ret = 0; - cm_listens_destroyed++; + atomic_inc(&cm_listens_destroyed); } else { spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); } @@ -3172,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog) g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node); return err; } - cm_listens_created++; + atomic_inc(&cm_listens_created); } cm_id->add_ref(cm_id); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index b1c2cbb88f0..ce7f5383357 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -748,16 +748,28 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, if (hw_rev != NE020_REV) { /* init serdes 0 */ - if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4)) - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA); - else + switch (nesadapter->phy_type[0]) { + case NES_PHY_TYPE_CX4: + if (wide_ppm_offset) + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA); + else + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); + break; + case NES_PHY_TYPE_KR: + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000); + break; + case NES_PHY_TYPE_PUMA_1G: nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); - - if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) { sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0); sds |= 0x00000100; nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds); + break; + default: + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF); + break; } + if (!OneG_Mode) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000); @@ -778,6 +790,9 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, if (wide_ppm_offset) nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA); break; + case NES_PHY_TYPE_KR: + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000); + break; case NES_PHY_TYPE_PUMA_1G: sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1); sds |= 0x000000100; @@ -1279,115 +1294,115 @@ int nes_destroy_cqp(struct nes_device *nesdev) /** - * nes_init_phy + * nes_init_1g_phy */ -int nes_init_phy(struct nes_device *nesdev) +int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) { - struct nes_adapter *nesadapter = nesdev->nesadapter; u32 counter = 0; - u32 sds; - u32 mac_index = nesdev->mac_index; - u32 tx_config = 0; u16 phy_data; - u32 temp_phy_data = 0; - u32 temp_phy_data2 = 0; - u8 phy_type = nesadapter->phy_type[mac_index]; - u8 phy_index = nesadapter->phy_index[mac_index]; - - if ((nesadapter->OneG_Mode) && - (phy_type != NES_PHY_TYPE_PUMA_1G)) { - nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index); - if (phy_type == NES_PHY_TYPE_1G) { - tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); - tx_config &= 0xFFFFFFE3; - tx_config |= 0x04; - nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); - } + int ret = 0; - nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data); - nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000); + nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000); - /* Reset the PHY */ - nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000); - udelay(100); - counter = 0; - do { - nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); - if (counter++ > 100) - break; - } while (phy_data & 0x8000); - - /* Setting no phy loopback */ - phy_data &= 0xbfff; - phy_data |= 0x1140; - nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); + /* Reset the PHY */ + nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000); + udelay(100); + counter = 0; + do { nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); - nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data); - nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data); - - /* Setting the interrupt mask */ - nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); - nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee); - nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); + if (counter++ > 100) { + ret = -1; + break; + } + } while (phy_data & 0x8000); + + /* Setting no phy loopback */ + phy_data &= 0xbfff; + phy_data |= 0x1140; + nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data); + nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); + nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data); + nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data); + + /* Setting the interrupt mask */ + nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee); + nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data); + + /* turning on flow control */ + nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00); + nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); + + /* Clear Half duplex */ + nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100)); + nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); + + nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); + nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300); + + return ret; +} - /* turning on flow control */ - nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); - nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00); - nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data); - /* Clear Half duplex */ - nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); - nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100)); - nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data); +/** + * nes_init_2025_phy + */ +int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index) +{ + u32 temp_phy_data = 0; + u32 temp_phy_data2 = 0; + u32 counter = 0; + u32 sds; + u32 mac_index = nesdev->mac_index; + int ret = 0; + unsigned int first_attempt = 1; - nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data); - nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300); + /* Check firmware heartbeat */ + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + udelay(1500); + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - return 0; + if (temp_phy_data != temp_phy_data2) { + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + if ((temp_phy_data & 0xff) > 0x20) + return 0; + printk(PFX "Reinitialize external PHY\n"); } - if ((phy_type == NES_PHY_TYPE_IRIS) || - (phy_type == NES_PHY_TYPE_ARGUS) || - (phy_type == NES_PHY_TYPE_SFP_D)) { - /* setup 10G MDIO operation */ - tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); - tx_config &= 0xFFFFFFE3; - tx_config |= 0x15; - nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); - } - if ((phy_type == NES_PHY_TYPE_ARGUS) || - (phy_type == NES_PHY_TYPE_SFP_D)) { - u32 first_time = 1; + /* no heartbeat, configure the PHY */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); - /* Check firmware heartbeat */ - nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); - temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - udelay(1500); - nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); - temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + switch (phy_type) { + case NES_PHY_TYPE_ARGUS: + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); - if (temp_phy_data != temp_phy_data2) { - nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); - temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - if ((temp_phy_data & 0xff) > 0x20) - return 0; - printk(PFX "Reinitializing PHY\n"); - } + /* setup LEDs */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); + break; - /* no heartbeat, configure the PHY */ - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000); - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000); + case NES_PHY_TYPE_SFP_D: nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); - if (phy_type == NES_PHY_TYPE_ARGUS) { - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008); - nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001); - } else { - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004); - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038); - nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013); - } + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098); nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); @@ -1395,71 +1410,136 @@ int nes_init_phy(struct nes_device *nesdev) nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009); + break; + + case NES_PHY_TYPE_KR: + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00); + + /* setup LEDs */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004); - nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020); + break; + } + + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528); - /* Bring PHY out of reset */ - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002); + /* Bring PHY out of reset */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002); - /* Check for heartbeat */ - counter = 0; - mdelay(690); + /* Check for heartbeat */ + counter = 0; + mdelay(690); + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + do { + if (counter++ > 150) { + printk(PFX "No PHY heartbeat\n"); + break; + } + mdelay(1); nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); + temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); + } while ((temp_phy_data2 == temp_phy_data)); + + /* wait for tracking */ + counter = 0; + do { + nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - do { - if (counter++ > 150) { - printk(PFX "No PHY heartbeat\n"); + if (counter++ > 300) { + if (((temp_phy_data & 0xff) == 0x0) && first_attempt) { + first_attempt = 0; + counter = 0; + /* reset AMCC PHY and try again */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0); + nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040); + continue; + } else { + ret = 1; break; } - mdelay(1); - nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee); - temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - } while ((temp_phy_data2 == temp_phy_data)); - - /* wait for tracking */ - counter = 0; - do { - nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd); - temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - if (counter++ > 300) { - if (((temp_phy_data & 0xff) == 0x0) && first_time) { - first_time = 0; - counter = 0; - /* reset AMCC PHY and try again */ - nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0); - nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040); - continue; - } else { - printk(PFX "PHY did not track\n"); - break; - } - } - mdelay(10); - } while ((temp_phy_data & 0xff) < 0x30); - - /* setup signal integrity */ - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000); - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE); - nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032); + } + mdelay(10); + } while ((temp_phy_data & 0xff) < 0x30); + + /* setup signal integrity */ + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE); + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032); + if (phy_type == NES_PHY_TYPE_KR) { + nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C); + } else { nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002); nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063); + } + + /* reset serdes */ + sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200); + sds |= 0x1; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds); + sds &= 0xfffffffe; + nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds); + + counter = 0; + while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) + && (counter++ < 5000)) + ; + + return ret; +} + - /* reset serdes */ - sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + - mac_index * 0x200); - sds |= 0x1; - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + - mac_index * 0x200, sds); - sds &= 0xfffffffe; - nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + - mac_index * 0x200, sds); - - counter = 0; - while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040) - && (counter++ < 5000)) - ; +/** + * nes_init_phy + */ +int nes_init_phy(struct nes_device *nesdev) +{ + struct nes_adapter *nesadapter = nesdev->nesadapter; + u32 mac_index = nesdev->mac_index; + u32 tx_config = 0; + unsigned long flags; + u8 phy_type = nesadapter->phy_type[mac_index]; + u8 phy_index = nesadapter->phy_index[mac_index]; + int ret = 0; + + tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG); + if (phy_type == NES_PHY_TYPE_1G) { + /* setup 1G MDIO operation */ + tx_config &= 0xFFFFFFE3; + tx_config |= 0x04; + } else { + /* setup 10G MDIO operation */ + tx_config &= 0xFFFFFFE3; + tx_config |= 0x15; } - return 0; + nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config); + + spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags); + + switch (phy_type) { + case NES_PHY_TYPE_1G: + ret = nes_init_1g_phy(nesdev, phy_type, phy_index); + break; + case NES_PHY_TYPE_ARGUS: + case NES_PHY_TYPE_SFP_D: + case NES_PHY_TYPE_KR: + ret = nes_init_2025_phy(nesdev, phy_type, phy_index); + break; + } + + spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags); + + return ret; } @@ -2460,23 +2540,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) } } else { switch (nesadapter->phy_type[mac_index]) { - case NES_PHY_TYPE_IRIS: - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1); - temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - u32temp = 20; - do { - nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1); - phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL); - if ((phy_data == temp_phy_data) || (!(--u32temp))) - break; - temp_phy_data = phy_data; - } while (1); - nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n", - __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP"); - break; - case NES_PHY_TYPE_ARGUS: case NES_PHY_TYPE_SFP_D: + case NES_PHY_TYPE_KR: /* clear the alarms */ nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008); nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001); @@ -3352,8 +3418,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, u16 async_event_id; u8 tcp_state; u8 iwarp_state; - int must_disconn = 1; - int must_terminate = 0; struct ib_event ibevent; nes_debug(NES_DBG_AEQ, "\n"); @@ -3367,6 +3431,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, BUG_ON(!context); } + /* context is nesqp unless async_event_id == CQ ERROR */ + nesqp = (struct nes_qp *)(unsigned long)context; async_event_id = (u16)aeq_info; tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT; iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT; @@ -3378,8 +3444,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, switch (async_event_id) { case NES_AEQE_AEID_LLP_FIN_RECEIVED: - nesqp = (struct nes_qp *)(unsigned long)context; - if (nesqp->term_flags) return; /* Ignore it, wait for close complete */ @@ -3394,79 +3458,48 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, async_event_id, nesqp->last_aeq, tcp_state); } - if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) || - (nesqp->ibqp_state != IB_QPS_RTS)) { - /* FIN Received but tcp state or IB state moved on, - should expect a close complete */ - return; - } - + break; case NES_AEQE_AEID_LLP_CLOSE_COMPLETE: - nesqp = (struct nes_qp *)(unsigned long)context; if (nesqp->term_flags) { nes_terminate_done(nesqp, 0); return; } + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0); + nes_cm_disconn(nesqp); + break; - case NES_AEQE_AEID_LLP_CONNECTION_RESET: case NES_AEQE_AEID_RESET_SENT: - nesqp = (struct nes_qp *)(unsigned long)context; - if (async_event_id == NES_AEQE_AEID_RESET_SENT) { - tcp_state = NES_AEQE_TCP_STATE_CLOSED; - } + tcp_state = NES_AEQE_TCP_STATE_CLOSED; spin_lock_irqsave(&nesqp->lock, flags); nesqp->hw_iwarp_state = iwarp_state; nesqp->hw_tcp_state = tcp_state; nesqp->last_aeq = async_event_id; - - if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) || - (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) { - nesqp->hte_added = 0; - next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE; - } - - if ((nesqp->ibqp_state == IB_QPS_RTS) && - ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) || - (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { - switch (nesqp->hw_iwarp_state) { - case NES_AEQE_IWARP_STATE_RTS: - next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING; - nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING; - break; - case NES_AEQE_IWARP_STATE_TERMINATE: - must_disconn = 0; /* terminate path takes care of disconn */ - if (nesqp->term_flags == 0) - must_terminate = 1; - break; - } - } else { - if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) { - /* FIN Received but ib state not RTS, - close complete will be on its way */ - must_disconn = 0; - } - } + nesqp->hte_added = 0; spin_unlock_irqrestore(&nesqp->lock, flags); + next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE; + nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); + nes_cm_disconn(nesqp); + break; - if (must_terminate) - nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); - else if (must_disconn) { - if (next_iwarp_state) { - nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n", - nesqp->hwqp.qp_id, next_iwarp_state); - nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0); - } - nes_cm_disconn(nesqp); - } + case NES_AEQE_AEID_LLP_CONNECTION_RESET: + if (atomic_read(&nesqp->close_timer_started)) + return; + spin_lock_irqsave(&nesqp->lock, flags); + nesqp->hw_iwarp_state = iwarp_state; + nesqp->hw_tcp_state = tcp_state; + nesqp->last_aeq = async_event_id; + spin_unlock_irqrestore(&nesqp->lock, flags); + nes_cm_disconn(nesqp); break; case NES_AEQE_AEID_TERMINATE_SENT: - nesqp = (struct nes_qp *)(unsigned long)context; nes_terminate_send_fin(nesdev, nesqp, aeqe); break; case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED: - nesqp = (struct nes_qp *)(unsigned long)context; nes_terminate_received(nesdev, nesqp, aeqe); break; @@ -3480,7 +3513,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION: case NES_AEQE_AEID_AMP_TO_WRAP: - nesqp = (struct nes_qp *)(unsigned long)context; + printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n", + nesqp->hwqp.qp_id, async_event_id); nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR); break; @@ -3488,7 +3522,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL: case NES_AEQE_AEID_DDP_UBE_INVALID_MO: case NES_AEQE_AEID_DDP_UBE_INVALID_QN: - nesqp = (struct nes_qp *)(unsigned long)context; if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) { aeq_info &= 0xffff0000; aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE; @@ -3530,7 +3563,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev, case NES_AEQE_AEID_STAG_ZERO_INVALID: case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST: case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: - nesqp = (struct nes_qp *)(unsigned long)context; + printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n", + nesqp->hwqp.qp_id, async_event_id); nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL); break; diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index 084be0ee689..9b1e7f869d8 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -37,12 +37,12 @@ #define NES_PHY_TYPE_CX4 1 #define NES_PHY_TYPE_1G 2 -#define NES_PHY_TYPE_IRIS 3 #define NES_PHY_TYPE_ARGUS 4 #define NES_PHY_TYPE_PUMA_1G 5 #define NES_PHY_TYPE_PUMA_10G 6 #define NES_PHY_TYPE_GLADIUS 7 #define NES_PHY_TYPE_SFP_D 8 +#define NES_PHY_TYPE_KR 9 #define NES_MULTICAST_PF_MAX 8 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 9384f5d3d33..a1d79b6856a 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1243,8 +1243,8 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, target_stat_values[++index] = cm_packets_received; target_stat_values[++index] = cm_packets_dropped; target_stat_values[++index] = cm_packets_retrans; - target_stat_values[++index] = cm_listens_created; - target_stat_values[++index] = cm_listens_destroyed; + target_stat_values[++index] = atomic_read(&cm_listens_created); + target_stat_values[++index] = atomic_read(&cm_listens_destroyed); target_stat_values[++index] = cm_backlog_drops; target_stat_values[++index] = atomic_read(&cm_loopbacks); target_stat_values[++index] = atomic_read(&cm_nodes_created); @@ -1474,9 +1474,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd } return 0; } - if ((phy_type == NES_PHY_TYPE_IRIS) || - (phy_type == NES_PHY_TYPE_ARGUS) || - (phy_type == NES_PHY_TYPE_SFP_D)) { + if ((phy_type == NES_PHY_TYPE_ARGUS) || + (phy_type == NES_PHY_TYPE_SFP_D) || + (phy_type == NES_PHY_TYPE_KR)) { et_cmd->transceiver = XCVR_EXTERNAL; et_cmd->port = PORT_FIBRE; et_cmd->supported = SUPPORTED_FIBRE; @@ -1596,8 +1596,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, struct net_device *netdev; struct nic_qp_map *curr_qp_map; u32 u32temp; - u16 phy_data; - u16 temp_phy_data; + u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index]; netdev = alloc_etherdev(sizeof(struct nes_vnic)); if (!netdev) { @@ -1705,65 +1704,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, if ((nesdev->netdev_count == 0) && ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) || - ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) && + ((phy_type == NES_PHY_TYPE_PUMA_1G) && (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) || ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) { - /* - * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n", - * NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1))); - */ u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1))); - if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) { + if (phy_type != NES_PHY_TYPE_PUMA_1G) { u32temp |= 0x00200000; nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesdev->mac_index & 1)), u32temp); } - u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 + - (0x200 * (nesdev->mac_index & 1))); - - if ((u32temp&0x0f1f0000) == 0x0f0f0000) { - if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) { - nes_init_phy(nesdev); - nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1); - temp_phy_data = (u16)nes_read_indexed(nesdev, - NES_IDX_MAC_MDIO_CONTROL); - u32temp = 20; - do { - nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1); - phy_data = (u16)nes_read_indexed(nesdev, - NES_IDX_MAC_MDIO_CONTROL); - if ((phy_data == temp_phy_data) || (!(--u32temp))) - break; - temp_phy_data = phy_data; - } while (1); - if (phy_data & 4) { - nes_debug(NES_DBG_INIT, "The Link is UP!!.\n"); - nesvnic->linkup = 1; - } else { - nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n"); - } - } else { - nes_debug(NES_DBG_INIT, "The Link is UP!!.\n"); - nesvnic->linkup = 1; - } - } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) { - nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n", - nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn)); - if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) || - ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000))) { - nes_debug(NES_DBG_INIT, "The Link is UP!!.\n"); - nesvnic->linkup = 1; - } - } /* clear the MAC interrupt status, assumes direct logical to physical mapping */ u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index)); nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp); nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp); - if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_IRIS) - nes_init_phy(nesdev); + nes_init_phy(nesdev); } diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 64d3136e374..815725f886c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -228,7 +228,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw, /* Check for SQ overflow */ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { spin_unlock_irqrestore(&nesqp->lock, flags); - return -EINVAL; + return -ENOMEM; } wqe = &nesqp->hwqp.sq_vbase[head]; @@ -3294,7 +3294,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr, /* Check for SQ overflow */ if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) { - err = -EINVAL; + err = -ENOMEM; break; } @@ -3577,7 +3577,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr, } /* Check for RQ overflow */ if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) { - err = -EINVAL; + err = -ENOMEM; break; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index e9795f60e5d..d10b4ec68d2 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c @@ -55,9 +55,7 @@ static int ipoib_get_coalesce(struct net_device *dev, struct ipoib_dev_priv *priv = netdev_priv(dev); coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs; - coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs; coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames; - coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames; return 0; } @@ -69,10 +67,8 @@ static int ipoib_set_coalesce(struct net_device *dev, int ret; /* - * Since IPoIB uses a single CQ for both rx and tx, we assume - * that rx params dictate the configuration. These values are - * saved in the private data and returned when ipoib_get_coalesce() - * is called. + * These values are saved in the private data and returned + * when ipoib_get_coalesce() is called */ if (coal->rx_coalesce_usecs > 0xffff || coal->rx_max_coalesced_frames > 0xffff) @@ -85,8 +81,6 @@ static int ipoib_set_coalesce(struct net_device *dev, return ret; } - coal->tx_coalesce_usecs = coal->rx_coalesce_usecs; - coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames; priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs; priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 5f7a6fca0a4..71237f8f78f 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -128,6 +128,28 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) return 0; } +int iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc) +{ + struct iscsi_iser_conn *iser_conn = task->conn->dd_data; + struct iser_device *device = iser_conn->ib_conn->device; + struct iscsi_iser_task *iser_task = task->dd_data; + u64 dma_addr; + + dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc, + ISER_HEADERS_LEN, DMA_TO_DEVICE); + if (ib_dma_mapping_error(device->ib_device, dma_addr)) + return -ENOMEM; + + tx_desc->dma_addr = dma_addr; + tx_desc->tx_sg[0].addr = tx_desc->dma_addr; + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; + tx_desc->tx_sg[0].lkey = device->mr->lkey; + + iser_task->headers_initialized = 1; + iser_task->iser_conn = iser_conn; + return 0; +} /** * iscsi_iser_task_init - Initialize task * @task: iscsi task @@ -137,17 +159,17 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) static int iscsi_iser_task_init(struct iscsi_task *task) { - struct iscsi_iser_conn *iser_conn = task->conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; + if (!iser_task->headers_initialized) + if (iser_initialize_task_headers(task, &iser_task->desc)) + return -ENOMEM; + /* mgmt task */ - if (!task->sc) { - iser_task->desc.data = task->data; + if (!task->sc) return 0; - } iser_task->command_sent = 0; - iser_task->iser_conn = iser_conn; iser_task_rdma_init(iser_task); return 0; } @@ -168,7 +190,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) { int error = 0; - iser_dbg("task deq [cid %d itt 0x%x]\n", conn->id, task->itt); + iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt); error = iser_send_control(conn, task); @@ -178,9 +200,6 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) * - if yes, the task is recycled at iscsi_complete_pdu * - if no, the task is recycled at iser_snd_completion */ - if (error && error != -ENOBUFS) - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); - return error; } @@ -232,7 +251,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task) task->imm_count, task->unsol_r2t.data_length); } - iser_dbg("task deq [cid %d itt 0x%x]\n", + iser_dbg("ctask xmit [cid %d itt 0x%x]\n", conn->id, task->itt); /* Send the cmd PDU */ @@ -248,8 +267,6 @@ iscsi_iser_task_xmit(struct iscsi_task *task) error = iscsi_iser_task_xmit_unsol_data(conn, task); iscsi_iser_task_xmit_exit: - if (error && error != -ENOBUFS) - iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return error; } @@ -283,7 +300,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx) * due to issues with the login code re iser sematics * this not set in iscsi_conn_setup - FIXME */ - conn->max_recv_dlength = 128; + conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN; iser_conn = conn->dd_data; conn->dd_data = iser_conn; @@ -401,7 +418,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, struct Scsi_Host *shost; struct iser_conn *ib_conn; - shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 1); + shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); if (!shost) return NULL; shost->transportt = iscsi_iser_scsi_transport; @@ -675,7 +692,7 @@ static int __init iser_init(void) memset(&ig, 0, sizeof(struct iser_global)); ig.desc_cache = kmem_cache_create("iser_descriptors", - sizeof (struct iser_desc), + sizeof(struct iser_tx_desc), 0, SLAB_HWCACHE_ALIGN, NULL); if (ig.desc_cache == NULL) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9d529cae1f0..036934cdcb9 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -102,9 +102,9 @@ #define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), * * SCSI_TMFUNC(2), LOGOUT(1) */ -#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \ - ISER_MAX_RX_MISC_PDUS + \ - ISER_MAX_TX_MISC_PDUS) +#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX) + +#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2) /* the max TX (send) WR supported by the iSER QP is defined by * * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * @@ -132,6 +132,12 @@ struct iser_hdr { __be64 read_va; } __attribute__((packed)); +/* Constant PDU lengths calculations */ +#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) + +#define ISER_RECV_DATA_SEG_LEN 128 +#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN) +#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN) /* Length of an object name string */ #define ISER_OBJECT_NAME_SIZE 64 @@ -187,51 +193,43 @@ struct iser_regd_buf { struct iser_mem_reg reg; /* memory registration info */ void *virt_addr; struct iser_device *device; /* device->device for dma_unmap */ - u64 dma_addr; /* if non zero, addr for dma_unmap */ enum dma_data_direction direction; /* direction for dma_unmap */ unsigned int data_size; - atomic_t ref_count; /* refcount, freed when dec to 0 */ -}; - -#define MAX_REGD_BUF_VECTOR_LEN 2 - -struct iser_dto { - struct iscsi_iser_task *task; - struct iser_conn *ib_conn; - int notify_enable; - - /* vector of registered buffers */ - unsigned int regd_vector_len; - struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN]; - - /* offset into the registered buffer may be specified */ - unsigned int offset[MAX_REGD_BUF_VECTOR_LEN]; - - /* a smaller size may be specified, if 0, then full size is used */ - unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN]; }; enum iser_desc_type { - ISCSI_RX, ISCSI_TX_CONTROL , ISCSI_TX_SCSI_COMMAND, ISCSI_TX_DATAOUT }; -struct iser_desc { +struct iser_tx_desc { struct iser_hdr iser_header; struct iscsi_hdr iscsi_header; - struct iser_regd_buf hdr_regd_buf; - void *data; /* used by RX & TX_CONTROL */ - struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */ enum iser_desc_type type; - struct iser_dto dto; + u64 dma_addr; + /* sg[0] points to iser/iscsi headers, sg[1] optionally points to either + of immediate data, unsolicited data-out or control (login,text) */ + struct ib_sge tx_sg[2]; + int num_sge; }; +#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ + sizeof(u64) + sizeof(struct ib_sge))) +struct iser_rx_desc { + struct iser_hdr iser_header; + struct iscsi_hdr iscsi_header; + char data[ISER_RECV_DATA_SEG_LEN]; + u64 dma_addr; + struct ib_sge rx_sg; + char pad[ISER_RX_PAD_SIZE]; +} __attribute__((packed)); + struct iser_device { struct ib_device *ib_device; struct ib_pd *pd; - struct ib_cq *cq; + struct ib_cq *rx_cq; + struct ib_cq *tx_cq; struct ib_mr *mr; struct tasklet_struct cq_tasklet; struct list_head ig_list; /* entry in ig devices list */ @@ -250,15 +248,18 @@ struct iser_conn { struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */ int disc_evt_flag; /* disconn event delivered */ wait_queue_head_t wait; /* waitq for conn/disconn */ - atomic_t post_recv_buf_count; /* posted rx count */ + int post_recv_buf_count; /* posted rx count */ atomic_t post_send_buf_count; /* posted tx count */ - atomic_t unexpected_pdu_count;/* count of received * - * unexpected pdus * - * not yet retired */ char name[ISER_OBJECT_NAME_SIZE]; struct iser_page_vec *page_vec; /* represents SG to fmr maps* * maps serialized as tx is*/ struct list_head conn_list; /* entry in ig conn list */ + + char *login_buf; + u64 login_dma; + unsigned int rx_desc_head; + struct iser_rx_desc *rx_descs; + struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; }; struct iscsi_iser_conn { @@ -267,7 +268,7 @@ struct iscsi_iser_conn { }; struct iscsi_iser_task { - struct iser_desc desc; + struct iser_tx_desc desc; struct iscsi_iser_conn *iser_conn; enum iser_task_status status; int command_sent; /* set if command sent */ @@ -275,6 +276,7 @@ struct iscsi_iser_task { struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ + int headers_initialized; }; struct iser_page_vec { @@ -322,22 +324,17 @@ void iser_conn_put(struct iser_conn *ib_conn); void iser_conn_terminate(struct iser_conn *ib_conn); -void iser_rcv_completion(struct iser_desc *desc, - unsigned long dto_xfer_len); +void iser_rcv_completion(struct iser_rx_desc *desc, + unsigned long dto_xfer_len, + struct iser_conn *ib_conn); -void iser_snd_completion(struct iser_desc *desc); +void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn); void iser_task_rdma_init(struct iscsi_iser_task *task); void iser_task_rdma_finalize(struct iscsi_iser_task *task); -void iser_dto_buffs_release(struct iser_dto *dto); - -int iser_regd_buff_release(struct iser_regd_buf *regd_buf); - -void iser_reg_single(struct iser_device *device, - struct iser_regd_buf *regd_buf, - enum dma_data_direction direction); +void iser_free_rx_descriptors(struct iser_conn *ib_conn); void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task, enum iser_data_dir cmd_dir); @@ -356,11 +353,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn, void iser_unreg_mem(struct iser_mem_reg *mem_reg); -int iser_post_recv(struct iser_desc *rx_desc); -int iser_post_send(struct iser_desc *tx_desc); - -int iser_conn_state_comp(struct iser_conn *ib_conn, - enum iser_ib_conn_state comp); +int iser_post_recvl(struct iser_conn *ib_conn); +int iser_post_recvm(struct iser_conn *ib_conn, int count); +int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc); int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, @@ -368,4 +363,6 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, enum dma_data_direction dma_dir); void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); +int iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc); #endif diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 9de640200ad..0b9ef071658 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -39,29 +39,6 @@ #include "iscsi_iser.h" -/* Constant PDU lengths calculations */ -#define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \ - sizeof (struct iscsi_hdr)) - -/* iser_dto_add_regd_buff - increments the reference count for * - * the registered buffer & adds it to the DTO object */ -static void iser_dto_add_regd_buff(struct iser_dto *dto, - struct iser_regd_buf *regd_buf, - unsigned long use_offset, - unsigned long use_size) -{ - int add_idx; - - atomic_inc(®d_buf->ref_count); - - add_idx = dto->regd_vector_len; - dto->regd[add_idx] = regd_buf; - dto->used_sz[add_idx] = use_size; - dto->offset[add_idx] = use_offset; - - dto->regd_vector_len++; -} - /* Register user buffer memory and initialize passive rdma * dto descriptor. Total data size is stored in * iser_task->data[ISER_DIR_IN].data_len @@ -122,9 +99,9 @@ iser_prepare_write_cmd(struct iscsi_task *task, struct iscsi_iser_task *iser_task = task->dd_data; struct iser_regd_buf *regd_buf; int err; - struct iser_dto *send_dto = &iser_task->desc.dto; struct iser_hdr *hdr = &iser_task->desc.iser_header; struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT]; + struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1]; err = iser_dma_map_task_data(iser_task, buf_out, @@ -163,135 +140,100 @@ iser_prepare_write_cmd(struct iscsi_task *task, if (imm_sz > 0) { iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n", task->itt, imm_sz); - iser_dto_add_regd_buff(send_dto, - regd_buf, - 0, - imm_sz); + tx_dsg->addr = regd_buf->reg.va; + tx_dsg->length = imm_sz; + tx_dsg->lkey = regd_buf->reg.lkey; + iser_task->desc.num_sge = 2; } return 0; } -/** - * iser_post_receive_control - allocates, initializes and posts receive DTO. - */ -static int iser_post_receive_control(struct iscsi_conn *conn) +/* creates a new tx descriptor and adds header regd buffer */ +static void iser_create_send_desc(struct iser_conn *ib_conn, + struct iser_tx_desc *tx_desc) { - struct iscsi_iser_conn *iser_conn = conn->dd_data; - struct iser_desc *rx_desc; - struct iser_regd_buf *regd_hdr; - struct iser_regd_buf *regd_data; - struct iser_dto *recv_dto = NULL; - struct iser_device *device = iser_conn->ib_conn->device; - int rx_data_size, err; - int posts, outstanding_unexp_pdus; - - /* for the login sequence we must support rx of upto 8K; login is done - * after conn create/bind (connect) and conn stop/bind (reconnect), - * what's common for both schemes is that the connection is not started - */ - if (conn->c_stage != ISCSI_CONN_STARTED) - rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN; - else /* FIXME till user space sets conn->max_recv_dlength correctly */ - rx_data_size = 128; - - outstanding_unexp_pdus = - atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0); - - /* - * in addition to the response buffer, replace those consumed by - * unexpected pdus. - */ - for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) { - rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); - if (rx_desc == NULL) { - iser_err("Failed to alloc desc for post recv %d\n", - posts); - err = -ENOMEM; - goto post_rx_cache_alloc_failure; - } - rx_desc->type = ISCSI_RX; - rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); - if (rx_desc->data == NULL) { - iser_err("Failed to alloc data buf for post recv %d\n", - posts); - err = -ENOMEM; - goto post_rx_kmalloc_failure; - } - - recv_dto = &rx_desc->dto; - recv_dto->ib_conn = iser_conn->ib_conn; - recv_dto->regd_vector_len = 0; + struct iser_device *device = ib_conn->device; - regd_hdr = &rx_desc->hdr_regd_buf; - memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); - regd_hdr->device = device; - regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */ - regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; + ib_dma_sync_single_for_cpu(device->ib_device, + tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); - iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); - - iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); + memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); + tx_desc->iser_header.flags = ISER_VER; - regd_data = &rx_desc->data_regd_buf; - memset(regd_data, 0, sizeof(struct iser_regd_buf)); - regd_data->device = device; - regd_data->virt_addr = rx_desc->data; - regd_data->data_size = rx_data_size; + tx_desc->num_sge = 1; - iser_reg_single(device, regd_data, DMA_FROM_DEVICE); + if (tx_desc->tx_sg[0].lkey != device->mr->lkey) { + tx_desc->tx_sg[0].lkey = device->mr->lkey; + iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc); + } +} - iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); - err = iser_post_recv(rx_desc); - if (err) { - iser_err("Failed iser_post_recv for post %d\n", posts); - goto post_rx_post_recv_failure; - } +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) +{ + int i, j; + u64 dma_addr; + struct iser_rx_desc *rx_desc; + struct ib_sge *rx_sg; + struct iser_device *device = ib_conn->device; + + ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS * + sizeof(struct iser_rx_desc), GFP_KERNEL); + if (!ib_conn->rx_descs) + goto rx_desc_alloc_fail; + + rx_desc = ib_conn->rx_descs; + + for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) { + dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(device->ib_device, dma_addr)) + goto rx_desc_dma_map_failed; + + rx_desc->dma_addr = dma_addr; + + rx_sg = &rx_desc->rx_sg; + rx_sg->addr = rx_desc->dma_addr; + rx_sg->length = ISER_RX_PAYLOAD_SIZE; + rx_sg->lkey = device->mr->lkey; } - /* all posts successful */ - return 0; -post_rx_post_recv_failure: - iser_dto_buffs_release(recv_dto); - kfree(rx_desc->data); -post_rx_kmalloc_failure: - kmem_cache_free(ig.desc_cache, rx_desc); -post_rx_cache_alloc_failure: - if (posts > 0) { - /* - * response buffer posted, but did not replace all unexpected - * pdu recv bufs. Ignore error, retry occurs next send - */ - outstanding_unexp_pdus -= (posts - 1); - err = 0; - } - atomic_add(outstanding_unexp_pdus, - &iser_conn->ib_conn->unexpected_pdu_count); + ib_conn->rx_desc_head = 0; + return 0; - return err; +rx_desc_dma_map_failed: + rx_desc = ib_conn->rx_descs; + for (j = 0; j < i; j++, rx_desc++) + ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + kfree(ib_conn->rx_descs); + ib_conn->rx_descs = NULL; +rx_desc_alloc_fail: + iser_err("failed allocating rx descriptors / data buffers\n"); + return -ENOMEM; } -/* creates a new tx descriptor and adds header regd buffer */ -static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn, - struct iser_desc *tx_desc) +void iser_free_rx_descriptors(struct iser_conn *ib_conn) { - struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf; - struct iser_dto *send_dto = &tx_desc->dto; + int i; + struct iser_rx_desc *rx_desc; + struct iser_device *device = ib_conn->device; - memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); - regd_hdr->device = iser_conn->ib_conn->device; - regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */ - regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN; + if (ib_conn->login_buf) { + ib_dma_unmap_single(device->ib_device, ib_conn->login_dma, + ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); + kfree(ib_conn->login_buf); + } - send_dto->ib_conn = iser_conn->ib_conn; - send_dto->notify_enable = 1; - send_dto->regd_vector_len = 0; + if (!ib_conn->rx_descs) + return; - memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr)); - tx_desc->iser_header.flags = ISER_VER; - - iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0); + rx_desc = ib_conn->rx_descs; + for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) + ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + kfree(ib_conn->rx_descs); } /** @@ -301,46 +243,23 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) { struct iscsi_iser_conn *iser_conn = conn->dd_data; - int i; - /* - * FIXME this value should be declared to the target during login with - * the MaxOutstandingUnexpectedPDUs key when supported - */ - int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; - - iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); + iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); /* Check that there is no posted recv or send buffers left - */ /* they must be consumed during the login phase */ - BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0); + BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0); BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); - /* Initial post receive buffers */ - for (i = 0; i < initial_post_recv_bufs_num; i++) { - if (iser_post_receive_control(conn) != 0) { - iser_err("Failed to post recv bufs at:%d conn:0x%p\n", - i, conn); - return -ENOMEM; - } - } - iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn); - return 0; -} + if (iser_alloc_rx_descriptors(iser_conn->ib_conn)) + return -ENOMEM; -static int -iser_check_xmit(struct iscsi_conn *conn, void *task) -{ - struct iscsi_iser_conn *iser_conn = conn->dd_data; + /* Initial post receive buffers */ + if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) + return -ENOMEM; - if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == - ISER_QP_MAX_REQ_DTOS) { - iser_dbg("%ld can't xmit task %p\n",jiffies,task); - return -ENOBUFS; - } return 0; } - /** * iser_send_command - send command PDU */ @@ -349,27 +268,18 @@ int iser_send_command(struct iscsi_conn *conn, { struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_dto *send_dto = NULL; unsigned long edtl; - int err = 0; + int err; struct iser_data_buf *data_buf; struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr; struct scsi_cmnd *sc = task->sc; - - if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { - iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); - return -EPERM; - } - if (iser_check_xmit(conn, task)) - return -ENOBUFS; + struct iser_tx_desc *tx_desc = &iser_task->desc; edtl = ntohl(hdr->data_length); /* build the tx desc regd header and add it to the tx desc dto */ - iser_task->desc.type = ISCSI_TX_SCSI_COMMAND; - send_dto = &iser_task->desc.dto; - send_dto->task = iser_task; - iser_create_send_desc(iser_conn, &iser_task->desc); + tx_desc->type = ISCSI_TX_SCSI_COMMAND; + iser_create_send_desc(iser_conn->ib_conn, tx_desc); if (hdr->flags & ISCSI_FLAG_CMD_READ) data_buf = &iser_task->data[ISER_DIR_IN]; @@ -398,23 +308,13 @@ int iser_send_command(struct iscsi_conn *conn, goto send_command_error; } - iser_reg_single(iser_conn->ib_conn->device, - send_dto->regd[0], DMA_TO_DEVICE); - - if (iser_post_receive_control(conn) != 0) { - iser_err("post_recv failed!\n"); - err = -ENOMEM; - goto send_command_error; - } - iser_task->status = ISER_TASK_STATUS_STARTED; - err = iser_post_send(&iser_task->desc); + err = iser_post_send(iser_conn->ib_conn, tx_desc); if (!err) return 0; send_command_error: - iser_dto_buffs_release(send_dto); iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err); return err; } @@ -428,20 +328,13 @@ int iser_send_data_out(struct iscsi_conn *conn, { struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_desc *tx_desc = NULL; - struct iser_dto *send_dto = NULL; + struct iser_tx_desc *tx_desc = NULL; + struct iser_regd_buf *regd_buf; unsigned long buf_offset; unsigned long data_seg_len; uint32_t itt; int err = 0; - - if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { - iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); - return -EPERM; - } - - if (iser_check_xmit(conn, task)) - return -ENOBUFS; + struct ib_sge *tx_dsg; itt = (__force uint32_t)hdr->itt; data_seg_len = ntoh24(hdr->dlength); @@ -450,28 +343,25 @@ int iser_send_data_out(struct iscsi_conn *conn, iser_dbg("%s itt %d dseg_len %d offset %d\n", __func__,(int)itt,(int)data_seg_len,(int)buf_offset); - tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO); + tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC); if (tx_desc == NULL) { iser_err("Failed to alloc desc for post dataout\n"); return -ENOMEM; } tx_desc->type = ISCSI_TX_DATAOUT; + tx_desc->iser_header.flags = ISER_VER; memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); - /* build the tx desc regd header and add it to the tx desc dto */ - send_dto = &tx_desc->dto; - send_dto->task = iser_task; - iser_create_send_desc(iser_conn, tx_desc); - - iser_reg_single(iser_conn->ib_conn->device, - send_dto->regd[0], DMA_TO_DEVICE); + /* build the tx desc */ + iser_initialize_task_headers(task, tx_desc); - /* all data was registered for RDMA, we can use the lkey */ - iser_dto_add_regd_buff(send_dto, - &iser_task->rdma_regd[ISER_DIR_OUT], - buf_offset, - data_seg_len); + regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT]; + tx_dsg = &tx_desc->tx_sg[1]; + tx_dsg->addr = regd_buf->reg.va + buf_offset; + tx_dsg->length = data_seg_len; + tx_dsg->lkey = regd_buf->reg.lkey; + tx_desc->num_sge = 2; if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) { iser_err("Offset:%ld & DSL:%ld in Data-Out " @@ -485,12 +375,11 @@ int iser_send_data_out(struct iscsi_conn *conn, itt, buf_offset, data_seg_len); - err = iser_post_send(tx_desc); + err = iser_post_send(iser_conn->ib_conn, tx_desc); if (!err) return 0; send_data_out_error: - iser_dto_buffs_release(send_dto); kmem_cache_free(ig.desc_cache, tx_desc); iser_err("conn %p failed err %d\n",conn, err); return err; @@ -501,64 +390,44 @@ int iser_send_control(struct iscsi_conn *conn, { struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; - struct iser_desc *mdesc = &iser_task->desc; - struct iser_dto *send_dto = NULL; + struct iser_tx_desc *mdesc = &iser_task->desc; unsigned long data_seg_len; int err = 0; - struct iser_regd_buf *regd_buf; struct iser_device *device; - unsigned char opcode; - - if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { - iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); - return -EPERM; - } - - if (iser_check_xmit(conn, task)) - return -ENOBUFS; /* build the tx desc regd header and add it to the tx desc dto */ mdesc->type = ISCSI_TX_CONTROL; - send_dto = &mdesc->dto; - send_dto->task = NULL; - iser_create_send_desc(iser_conn, mdesc); + iser_create_send_desc(iser_conn->ib_conn, mdesc); device = iser_conn->ib_conn->device; - iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); - data_seg_len = ntoh24(task->hdr->dlength); if (data_seg_len > 0) { - regd_buf = &mdesc->data_regd_buf; - memset(regd_buf, 0, sizeof(struct iser_regd_buf)); - regd_buf->device = device; - regd_buf->virt_addr = task->data; - regd_buf->data_size = task->data_count; - iser_reg_single(device, regd_buf, - DMA_TO_DEVICE); - iser_dto_add_regd_buff(send_dto, regd_buf, - 0, - data_seg_len); + struct ib_sge *tx_dsg = &mdesc->tx_sg[1]; + if (task != conn->login_task) { + iser_err("data present on non login task!!!\n"); + goto send_control_error; + } + memcpy(iser_conn->ib_conn->login_buf, task->data, + task->data_count); + tx_dsg->addr = iser_conn->ib_conn->login_dma; + tx_dsg->length = data_seg_len; + tx_dsg->lkey = device->mr->lkey; + mdesc->num_sge = 2; } - opcode = task->hdr->opcode & ISCSI_OPCODE_MASK; - - /* post recv buffer for response if one is expected */ - if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) { - if (iser_post_receive_control(conn) != 0) { - iser_err("post_rcv_buff failed!\n"); - err = -ENOMEM; + if (task == conn->login_task) { + err = iser_post_recvl(iser_conn->ib_conn); + if (err) goto send_control_error; - } } - err = iser_post_send(mdesc); + err = iser_post_send(iser_conn->ib_conn, mdesc); if (!err) return 0; send_control_error: - iser_dto_buffs_release(send_dto); iser_err("conn %p failed err %d\n",conn, err); return err; } @@ -566,104 +435,71 @@ send_control_error: /** * iser_rcv_dto_completion - recv DTO completion */ -void iser_rcv_completion(struct iser_desc *rx_desc, - unsigned long dto_xfer_len) +void iser_rcv_completion(struct iser_rx_desc *rx_desc, + unsigned long rx_xfer_len, + struct iser_conn *ib_conn) { - struct iser_dto *dto = &rx_desc->dto; - struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn; - struct iscsi_task *task; - struct iscsi_iser_task *iser_task; + struct iscsi_iser_conn *conn = ib_conn->iser_conn; struct iscsi_hdr *hdr; - char *rx_data = NULL; - int rx_data_len = 0; - unsigned char opcode; - - hdr = &rx_desc->iscsi_header; + u64 rx_dma; + int rx_buflen, outstanding, count, err; + + /* differentiate between login to all other PDUs */ + if ((char *)rx_desc == ib_conn->login_buf) { + rx_dma = ib_conn->login_dma; + rx_buflen = ISER_RX_LOGIN_SIZE; + } else { + rx_dma = rx_desc->dma_addr; + rx_buflen = ISER_RX_PAYLOAD_SIZE; + } - iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt); + ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma, + rx_buflen, DMA_FROM_DEVICE); - if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */ - rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN; - rx_data = dto->regd[1]->virt_addr; - rx_data += dto->offset[1]; - } + hdr = &rx_desc->iscsi_header; - opcode = hdr->opcode & ISCSI_OPCODE_MASK; - - if (opcode == ISCSI_OP_SCSI_CMD_RSP) { - spin_lock(&conn->iscsi_conn->session->lock); - task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt); - if (task) - __iscsi_get_task(task); - spin_unlock(&conn->iscsi_conn->session->lock); - - if (!task) - iser_err("itt can't be matched to task!!! " - "conn %p opcode %d itt %d\n", - conn->iscsi_conn, opcode, hdr->itt); - else { - iser_task = task->dd_data; - iser_dbg("itt %d task %p\n",hdr->itt, task); - iser_task->status = ISER_TASK_STATUS_COMPLETED; - iser_task_rdma_finalize(iser_task); - iscsi_put_task(task); - } - } - iser_dto_buffs_release(dto); + iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode, + hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN)); - iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len); + iscsi_iser_recv(conn->iscsi_conn, hdr, + rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN); - kfree(rx_desc->data); - kmem_cache_free(ig.desc_cache, rx_desc); + ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma, + rx_buflen, DMA_FROM_DEVICE); /* decrementing conn->post_recv_buf_count only --after-- freeing the * * task eliminates the need to worry on tasks which are completed in * * parallel to the execution of iser_conn_term. So the code that waits * * for the posted rx bufs refcount to become zero handles everything */ - atomic_dec(&conn->ib_conn->post_recv_buf_count); + conn->ib_conn->post_recv_buf_count--; - /* - * if an unexpected PDU was received then the recv wr consumed must - * be replaced, this is done in the next send of a control-type PDU - */ - if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) { - /* nop-in with itt = 0xffffffff */ - atomic_inc(&conn->ib_conn->unexpected_pdu_count); - } - else if (opcode == ISCSI_OP_ASYNC_EVENT) { - /* asyncronous message */ - atomic_inc(&conn->ib_conn->unexpected_pdu_count); + if (rx_dma == ib_conn->login_dma) + return; + + outstanding = ib_conn->post_recv_buf_count; + if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) { + count = min(ISER_QP_MAX_RECV_DTOS - outstanding, + ISER_MIN_POSTED_RX); + err = iser_post_recvm(ib_conn, count); + if (err) + iser_err("posting %d rx bufs err %d\n", count, err); } - /* a reject PDU consumes the recv buf posted for the response */ } -void iser_snd_completion(struct iser_desc *tx_desc) +void iser_snd_completion(struct iser_tx_desc *tx_desc, + struct iser_conn *ib_conn) { - struct iser_dto *dto = &tx_desc->dto; - struct iser_conn *ib_conn = dto->ib_conn; - struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; - struct iscsi_conn *conn = iser_conn->iscsi_conn; struct iscsi_task *task; - int resume_tx = 0; - - iser_dbg("Initiator, Data sent dto=0x%p\n", dto); - - iser_dto_buffs_release(dto); + struct iser_device *device = ib_conn->device; - if (tx_desc->type == ISCSI_TX_DATAOUT) + if (tx_desc->type == ISCSI_TX_DATAOUT) { + ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, + ISER_HEADERS_LEN, DMA_TO_DEVICE); kmem_cache_free(ig.desc_cache, tx_desc); - - if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == - ISER_QP_MAX_REQ_DTOS) - resume_tx = 1; + } atomic_dec(&ib_conn->post_send_buf_count); - if (resume_tx) { - iser_dbg("%ld resuming tx\n",jiffies); - iscsi_conn_queue_work(conn); - } - if (tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ task = (void *) ((long)(void *)tx_desc - @@ -692,7 +528,6 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) { - int deferred; int is_rdma_aligned = 1; struct iser_regd_buf *regd; @@ -710,32 +545,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) if (iser_task->dir[ISER_DIR_IN]) { regd = &iser_task->rdma_regd[ISER_DIR_IN]; - deferred = iser_regd_buff_release(regd); - if (deferred) { - iser_err("%d references remain for BUF-IN rdma reg\n", - atomic_read(®d->ref_count)); - } + if (regd->reg.is_fmr) + iser_unreg_mem(®d->reg); } if (iser_task->dir[ISER_DIR_OUT]) { regd = &iser_task->rdma_regd[ISER_DIR_OUT]; - deferred = iser_regd_buff_release(regd); - if (deferred) { - iser_err("%d references remain for BUF-OUT rdma reg\n", - atomic_read(®d->ref_count)); - } + if (regd->reg.is_fmr) + iser_unreg_mem(®d->reg); } /* if the data was unaligned, it was already unmapped and then copied */ if (is_rdma_aligned) iser_dma_unmap_task_data(iser_task); } - -void iser_dto_buffs_release(struct iser_dto *dto) -{ - int i; - - for (i = 0; i < dto->regd_vector_len; i++) - iser_regd_buff_release(dto->regd[i]); -} - diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 274c883ef3e..fb88d6896b6 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -41,62 +41,6 @@ #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ /** - * Decrements the reference count for the - * registered buffer & releases it - * - * returns 0 if released, 1 if deferred - */ -int iser_regd_buff_release(struct iser_regd_buf *regd_buf) -{ - struct ib_device *dev; - - if ((atomic_read(®d_buf->ref_count) == 0) || - atomic_dec_and_test(®d_buf->ref_count)) { - /* if we used the dma mr, unreg is just NOP */ - if (regd_buf->reg.is_fmr) - iser_unreg_mem(®d_buf->reg); - - if (regd_buf->dma_addr) { - dev = regd_buf->device->ib_device; - ib_dma_unmap_single(dev, - regd_buf->dma_addr, - regd_buf->data_size, - regd_buf->direction); - } - /* else this regd buf is associated with task which we */ - /* dma_unmap_single/sg later */ - return 0; - } else { - iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf); - return 1; - } -} - -/** - * iser_reg_single - fills registered buffer descriptor with - * registration information - */ -void iser_reg_single(struct iser_device *device, - struct iser_regd_buf *regd_buf, - enum dma_data_direction direction) -{ - u64 dma_addr; - - dma_addr = ib_dma_map_single(device->ib_device, - regd_buf->virt_addr, - regd_buf->data_size, direction); - BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); - - regd_buf->reg.lkey = device->mr->lkey; - regd_buf->reg.len = regd_buf->data_size; - regd_buf->reg.va = dma_addr; - regd_buf->reg.is_fmr = 0; - - regd_buf->dma_addr = dma_addr; - regd_buf->direction = direction; -} - -/** * iser_start_rdma_unaligned_sg */ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, @@ -109,10 +53,10 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, unsigned long cmd_data_len = data->data_len; if (cmd_data_len > ISER_KMALLOC_THRESHOLD) - mem = (void *)__get_free_pages(GFP_NOIO, + mem = (void *)__get_free_pages(GFP_ATOMIC, ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); else - mem = kmalloc(cmd_data_len, GFP_NOIO); + mem = kmalloc(cmd_data_len, GFP_ATOMIC); if (mem == NULL) { iser_err("Failed to allocate mem size %d %d for copying sglist\n", @@ -474,9 +418,5 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, return err; } } - - /* take a reference on this regd buf such that it will not be released * - * (eg in send dto completion) before we get the scsi response */ - atomic_inc(®d_buf->ref_count); return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 8579f32ce38..308d17bb514 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -37,9 +37,8 @@ #include "iscsi_iser.h" #define ISCSI_ISER_MAX_CONN 8 -#define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \ - ISER_QP_MAX_REQ_DTOS) * \ - ISCSI_ISER_MAX_CONN) +#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN) +#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN) static void iser_cq_tasklet_fn(unsigned long data); static void iser_cq_callback(struct ib_cq *cq, void *cq_context); @@ -67,15 +66,23 @@ static int iser_create_device_ib_res(struct iser_device *device) if (IS_ERR(device->pd)) goto pd_err; - device->cq = ib_create_cq(device->ib_device, + device->rx_cq = ib_create_cq(device->ib_device, iser_cq_callback, iser_cq_event_callback, (void *)device, - ISER_MAX_CQ_LEN, 0); - if (IS_ERR(device->cq)) - goto cq_err; + ISER_MAX_RX_CQ_LEN, 0); + if (IS_ERR(device->rx_cq)) + goto rx_cq_err; - if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP)) + device->tx_cq = ib_create_cq(device->ib_device, + NULL, iser_cq_event_callback, + (void *)device, + ISER_MAX_TX_CQ_LEN, 0); + + if (IS_ERR(device->tx_cq)) + goto tx_cq_err; + + if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP)) goto cq_arm_err; tasklet_init(&device->cq_tasklet, @@ -93,8 +100,10 @@ static int iser_create_device_ib_res(struct iser_device *device) dma_mr_err: tasklet_kill(&device->cq_tasklet); cq_arm_err: - ib_destroy_cq(device->cq); -cq_err: + ib_destroy_cq(device->tx_cq); +tx_cq_err: + ib_destroy_cq(device->rx_cq); +rx_cq_err: ib_dealloc_pd(device->pd); pd_err: iser_err("failed to allocate an IB resource\n"); @@ -112,11 +121,13 @@ static void iser_free_device_ib_res(struct iser_device *device) tasklet_kill(&device->cq_tasklet); (void)ib_dereg_mr(device->mr); - (void)ib_destroy_cq(device->cq); + (void)ib_destroy_cq(device->tx_cq); + (void)ib_destroy_cq(device->rx_cq); (void)ib_dealloc_pd(device->pd); device->mr = NULL; - device->cq = NULL; + device->tx_cq = NULL; + device->rx_cq = NULL; device->pd = NULL; } @@ -129,13 +140,23 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) { struct iser_device *device; struct ib_qp_init_attr init_attr; - int ret; + int ret = -ENOMEM; struct ib_fmr_pool_param params; BUG_ON(ib_conn->device == NULL); device = ib_conn->device; + ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); + if (!ib_conn->login_buf) { + goto alloc_err; + ret = -ENOMEM; + } + + ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, + (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, + DMA_FROM_DEVICE); + ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), GFP_KERNEL); @@ -169,12 +190,12 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) init_attr.event_handler = iser_qp_event_callback; init_attr.qp_context = (void *)ib_conn; - init_attr.send_cq = device->cq; - init_attr.recv_cq = device->cq; + init_attr.send_cq = device->tx_cq; + init_attr.recv_cq = device->rx_cq; init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS; init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; - init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN; - init_attr.cap.max_recv_sge = 2; + init_attr.cap.max_send_sge = 2; + init_attr.cap.max_recv_sge = 1; init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; init_attr.qp_type = IB_QPT_RC; @@ -192,6 +213,7 @@ qp_err: (void)ib_destroy_fmr_pool(ib_conn->fmr_pool); fmr_pool_err: kfree(ib_conn->page_vec); + kfree(ib_conn->login_buf); alloc_err: iser_err("unable to alloc mem or create resource, err %d\n", ret); return ret; @@ -278,17 +300,6 @@ static void iser_device_try_release(struct iser_device *device) mutex_unlock(&ig.device_list_mutex); } -int iser_conn_state_comp(struct iser_conn *ib_conn, - enum iser_ib_conn_state comp) -{ - int ret; - - spin_lock_bh(&ib_conn->lock); - ret = (ib_conn->state == comp); - spin_unlock_bh(&ib_conn->lock); - return ret; -} - static int iser_conn_state_comp_exch(struct iser_conn *ib_conn, enum iser_ib_conn_state comp, enum iser_ib_conn_state exch) @@ -314,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn) mutex_lock(&ig.connlist_mutex); list_del(&ib_conn->conn_list); mutex_unlock(&ig.connlist_mutex); - + iser_free_rx_descriptors(ib_conn); iser_free_ib_conn_res(ib_conn); ib_conn->device = NULL; /* on EVENT_ADDR_ERROR there's no device yet for this conn */ @@ -442,7 +453,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) ISCSI_ERR_CONN_FAILED); /* Complete the termination process if no posts are pending */ - if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) && + if (ib_conn->post_recv_buf_count == 0 && (atomic_read(&ib_conn->post_send_buf_count) == 0)) { ib_conn->state = ISER_CONN_DOWN; wake_up_interruptible(&ib_conn->wait); @@ -489,9 +500,8 @@ void iser_conn_init(struct iser_conn *ib_conn) { ib_conn->state = ISER_CONN_INIT; init_waitqueue_head(&ib_conn->wait); - atomic_set(&ib_conn->post_recv_buf_count, 0); + ib_conn->post_recv_buf_count = 0; atomic_set(&ib_conn->post_send_buf_count, 0); - atomic_set(&ib_conn->unexpected_pdu_count, 0); atomic_set(&ib_conn->refcount, 1); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); @@ -626,136 +636,97 @@ void iser_unreg_mem(struct iser_mem_reg *reg) reg->mem_h = NULL; } -/** - * iser_dto_to_iov - builds IOV from a dto descriptor - */ -static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len) +int iser_post_recvl(struct iser_conn *ib_conn) { - int i; - struct ib_sge *sge; - struct iser_regd_buf *regd_buf; - - if (dto->regd_vector_len > iov_len) { - iser_err("iov size %d too small for posting dto of len %d\n", - iov_len, dto->regd_vector_len); - BUG(); - } + struct ib_recv_wr rx_wr, *rx_wr_failed; + struct ib_sge sge; + int ib_ret; - for (i = 0; i < dto->regd_vector_len; i++) { - sge = &iov[i]; - regd_buf = dto->regd[i]; - - sge->addr = regd_buf->reg.va; - sge->length = regd_buf->reg.len; - sge->lkey = regd_buf->reg.lkey; - - if (dto->used_sz[i] > 0) /* Adjust size */ - sge->length = dto->used_sz[i]; - - /* offset and length should not exceed the regd buf length */ - if (sge->length + dto->offset[i] > regd_buf->reg.len) { - iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:" - "%ld in dto:0x%p [%d], va:0x%08lX\n", - (unsigned long)sge->length, dto->offset[i], - (unsigned long)regd_buf->reg.len, dto, i, - (unsigned long)sge->addr); - BUG(); - } + sge.addr = ib_conn->login_dma; + sge.length = ISER_RX_LOGIN_SIZE; + sge.lkey = ib_conn->device->mr->lkey; - sge->addr += dto->offset[i]; /* Adjust offset */ + rx_wr.wr_id = (unsigned long)ib_conn->login_buf; + rx_wr.sg_list = &sge; + rx_wr.num_sge = 1; + rx_wr.next = NULL; + + ib_conn->post_recv_buf_count++; + ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed); + if (ib_ret) { + iser_err("ib_post_recv failed ret=%d\n", ib_ret); + ib_conn->post_recv_buf_count--; } + return ib_ret; } -/** - * iser_post_recv - Posts a receive buffer. - * - * returns 0 on success, -1 on failure - */ -int iser_post_recv(struct iser_desc *rx_desc) +int iser_post_recvm(struct iser_conn *ib_conn, int count) { - int ib_ret, ret_val = 0; - struct ib_recv_wr recv_wr, *recv_wr_failed; - struct ib_sge iov[2]; - struct iser_conn *ib_conn; - struct iser_dto *recv_dto = &rx_desc->dto; - - /* Retrieve conn */ - ib_conn = recv_dto->ib_conn; - - iser_dto_to_iov(recv_dto, iov, 2); + struct ib_recv_wr *rx_wr, *rx_wr_failed; + int i, ib_ret; + unsigned int my_rx_head = ib_conn->rx_desc_head; + struct iser_rx_desc *rx_desc; + + for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) { + rx_desc = &ib_conn->rx_descs[my_rx_head]; + rx_wr->wr_id = (unsigned long)rx_desc; + rx_wr->sg_list = &rx_desc->rx_sg; + rx_wr->num_sge = 1; + rx_wr->next = rx_wr + 1; + my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1); + } - recv_wr.next = NULL; - recv_wr.sg_list = iov; - recv_wr.num_sge = recv_dto->regd_vector_len; - recv_wr.wr_id = (unsigned long)rx_desc; + rx_wr--; + rx_wr->next = NULL; /* mark end of work requests list */ - atomic_inc(&ib_conn->post_recv_buf_count); - ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed); + ib_conn->post_recv_buf_count += count; + ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed); if (ib_ret) { iser_err("ib_post_recv failed ret=%d\n", ib_ret); - atomic_dec(&ib_conn->post_recv_buf_count); - ret_val = -1; - } - - return ret_val; + ib_conn->post_recv_buf_count -= count; + } else + ib_conn->rx_desc_head = my_rx_head; + return ib_ret; } + /** * iser_start_send - Initiate a Send DTO operation * * returns 0 on success, -1 on failure */ -int iser_post_send(struct iser_desc *tx_desc) +int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc) { - int ib_ret, ret_val = 0; + int ib_ret; struct ib_send_wr send_wr, *send_wr_failed; - struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN]; - struct iser_conn *ib_conn; - struct iser_dto *dto = &tx_desc->dto; - ib_conn = dto->ib_conn; - - iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN); + ib_dma_sync_single_for_device(ib_conn->device->ib_device, + tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); send_wr.next = NULL; send_wr.wr_id = (unsigned long)tx_desc; - send_wr.sg_list = iov; - send_wr.num_sge = dto->regd_vector_len; + send_wr.sg_list = tx_desc->tx_sg; + send_wr.num_sge = tx_desc->num_sge; send_wr.opcode = IB_WR_SEND; - send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0; + send_wr.send_flags = IB_SEND_SIGNALED; atomic_inc(&ib_conn->post_send_buf_count); ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed); if (ib_ret) { - iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n", - dto, dto->regd_vector_len); iser_err("ib_post_send failed, ret:%d\n", ib_ret); atomic_dec(&ib_conn->post_send_buf_count); - ret_val = -1; } - - return ret_val; + return ib_ret; } -static void iser_handle_comp_error(struct iser_desc *desc) +static void iser_handle_comp_error(struct iser_tx_desc *desc, + struct iser_conn *ib_conn) { - struct iser_dto *dto = &desc->dto; - struct iser_conn *ib_conn = dto->ib_conn; - - iser_dto_buffs_release(dto); - - if (desc->type == ISCSI_RX) { - kfree(desc->data); + if (desc && desc->type == ISCSI_TX_DATAOUT) kmem_cache_free(ig.desc_cache, desc); - atomic_dec(&ib_conn->post_recv_buf_count); - } else { /* type is TX control/command/dataout */ - if (desc->type == ISCSI_TX_DATAOUT) - kmem_cache_free(ig.desc_cache, desc); - atomic_dec(&ib_conn->post_send_buf_count); - } - if (atomic_read(&ib_conn->post_recv_buf_count) == 0 && + if (ib_conn->post_recv_buf_count == 0 && atomic_read(&ib_conn->post_send_buf_count) == 0) { /* getting here when the state is UP means that the conn is * * being terminated asynchronously from the iSCSI layer's * @@ -774,32 +745,74 @@ static void iser_handle_comp_error(struct iser_desc *desc) } } +static int iser_drain_tx_cq(struct iser_device *device) +{ + struct ib_cq *cq = device->tx_cq; + struct ib_wc wc; + struct iser_tx_desc *tx_desc; + struct iser_conn *ib_conn; + int completed_tx = 0; + + while (ib_poll_cq(cq, 1, &wc) == 1) { + tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id; + ib_conn = wc.qp->qp_context; + if (wc.status == IB_WC_SUCCESS) { + if (wc.opcode == IB_WC_SEND) + iser_snd_completion(tx_desc, ib_conn); + else + iser_err("expected opcode %d got %d\n", + IB_WC_SEND, wc.opcode); + } else { + iser_err("tx id %llx status %d vend_err %x\n", + wc.wr_id, wc.status, wc.vendor_err); + atomic_dec(&ib_conn->post_send_buf_count); + iser_handle_comp_error(tx_desc, ib_conn); + } + completed_tx++; + } + return completed_tx; +} + + static void iser_cq_tasklet_fn(unsigned long data) { struct iser_device *device = (struct iser_device *)data; - struct ib_cq *cq = device->cq; + struct ib_cq *cq = device->rx_cq; struct ib_wc wc; - struct iser_desc *desc; + struct iser_rx_desc *desc; unsigned long xfer_len; + struct iser_conn *ib_conn; + int completed_tx, completed_rx; + completed_tx = completed_rx = 0; while (ib_poll_cq(cq, 1, &wc) == 1) { - desc = (struct iser_desc *) (unsigned long) wc.wr_id; + desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id; BUG_ON(desc == NULL); - + ib_conn = wc.qp->qp_context; if (wc.status == IB_WC_SUCCESS) { - if (desc->type == ISCSI_RX) { + if (wc.opcode == IB_WC_RECV) { xfer_len = (unsigned long)wc.byte_len; - iser_rcv_completion(desc, xfer_len); - } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */ - iser_snd_completion(desc); + iser_rcv_completion(desc, xfer_len, ib_conn); + } else + iser_err("expected opcode %d got %d\n", + IB_WC_RECV, wc.opcode); } else { - iser_err("comp w. error op %d status %d\n",desc->type,wc.status); - iser_handle_comp_error(desc); + if (wc.status != IB_WC_WR_FLUSH_ERR) + iser_err("rx id %llx status %d vend_err %x\n", + wc.wr_id, wc.status, wc.vendor_err); + ib_conn->post_recv_buf_count--; + iser_handle_comp_error(NULL, ib_conn); } + completed_rx++; + if (!(completed_rx & 63)) + completed_tx += iser_drain_tx_cq(device); } /* #warning "it is assumed here that arming CQ only once its empty" * * " would not cause interrupts to be missed" */ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); + + completed_tx += iser_drain_tx_cq(device); + iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx); } static void iser_cq_callback(struct ib_cq *cq, void *cq_context) diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 54c8fe25c42..ed3f9ebae88 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds, static void srp_add_one(struct ib_device *device); static void srp_remove_one(struct ib_device *device); -static void srp_completion(struct ib_cq *cq, void *target_ptr); +static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); +static void srp_send_completion(struct ib_cq *cq, void *target_ptr); static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); static struct scsi_transport_template *ib_srp_transport_template; @@ -227,14 +228,21 @@ static int srp_create_target_ib(struct srp_target_port *target) if (!init_attr) return -ENOMEM; - target->cq = ib_create_cq(target->srp_host->srp_dev->dev, - srp_completion, NULL, target, SRP_CQ_SIZE, 0); - if (IS_ERR(target->cq)) { - ret = PTR_ERR(target->cq); - goto out; + target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, + srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); + if (IS_ERR(target->recv_cq)) { + ret = PTR_ERR(target->recv_cq); + goto err; } - ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); + target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev, + srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); + if (IS_ERR(target->send_cq)) { + ret = PTR_ERR(target->send_cq); + goto err_recv_cq; + } + + ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP); init_attr->event_handler = srp_qp_event; init_attr->cap.max_send_wr = SRP_SQ_SIZE; @@ -243,24 +251,32 @@ static int srp_create_target_ib(struct srp_target_port *target) init_attr->cap.max_send_sge = 1; init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; init_attr->qp_type = IB_QPT_RC; - init_attr->send_cq = target->cq; - init_attr->recv_cq = target->cq; + init_attr->send_cq = target->send_cq; + init_attr->recv_cq = target->recv_cq; target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); if (IS_ERR(target->qp)) { ret = PTR_ERR(target->qp); - ib_destroy_cq(target->cq); - goto out; + goto err_send_cq; } ret = srp_init_qp(target, target->qp); - if (ret) { - ib_destroy_qp(target->qp); - ib_destroy_cq(target->cq); - goto out; - } + if (ret) + goto err_qp; -out: + kfree(init_attr); + return 0; + +err_qp: + ib_destroy_qp(target->qp); + +err_send_cq: + ib_destroy_cq(target->send_cq); + +err_recv_cq: + ib_destroy_cq(target->recv_cq); + +err: kfree(init_attr); return ret; } @@ -270,7 +286,8 @@ static void srp_free_target_ib(struct srp_target_port *target) int i; ib_destroy_qp(target->qp); - ib_destroy_cq(target->cq); + ib_destroy_cq(target->send_cq); + ib_destroy_cq(target->recv_cq); for (i = 0; i < SRP_RQ_SIZE; ++i) srp_free_iu(target->srp_host, target->rx_ring[i]); @@ -568,7 +585,9 @@ static int srp_reconnect_target(struct srp_target_port *target) if (ret) goto err; - while (ib_poll_cq(target->cq, 1, &wc) > 0) + while (ib_poll_cq(target->recv_cq, 1, &wc) > 0) + ; /* nothing */ + while (ib_poll_cq(target->send_cq, 1, &wc) > 0) ; /* nothing */ spin_lock_irq(target->scsi_host->host_lock); @@ -851,7 +870,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) struct srp_iu *iu; u8 opcode; - iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; + iu = target->rx_ring[wc->wr_id]; dev = target->srp_host->srp_dev->dev; ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, @@ -898,7 +917,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) DMA_FROM_DEVICE); } -static void srp_completion(struct ib_cq *cq, void *target_ptr) +static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) { struct srp_target_port *target = target_ptr; struct ib_wc wc; @@ -907,17 +926,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr) while (ib_poll_cq(cq, 1, &wc) > 0) { if (wc.status) { shost_printk(KERN_ERR, target->scsi_host, - PFX "failed %s status %d\n", - wc.wr_id & SRP_OP_RECV ? "receive" : "send", + PFX "failed receive status %d\n", wc.status); target->qp_in_error = 1; break; } - if (wc.wr_id & SRP_OP_RECV) - srp_handle_recv(target, &wc); - else - ++target->tx_tail; + srp_handle_recv(target, &wc); + } +} + +static void srp_send_completion(struct ib_cq *cq, void *target_ptr) +{ + struct srp_target_port *target = target_ptr; + struct ib_wc wc; + + while (ib_poll_cq(cq, 1, &wc) > 0) { + if (wc.status) { + shost_printk(KERN_ERR, target->scsi_host, + PFX "failed send status %d\n", + wc.status); + target->qp_in_error = 1; + break; + } + + ++target->tx_tail; } } @@ -930,7 +963,7 @@ static int __srp_post_recv(struct srp_target_port *target) int ret; next = target->rx_head & (SRP_RQ_SIZE - 1); - wr.wr_id = next | SRP_OP_RECV; + wr.wr_id = next; iu = target->rx_ring[next]; list.addr = iu->dma; @@ -970,6 +1003,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, { s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; + srp_send_completion(target->send_cq, target); + if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) return NULL; diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index e185b907fc1..5a80eac6fda 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -60,7 +60,6 @@ enum { SRP_RQ_SHIFT = 6, SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, SRP_SQ_SIZE = SRP_RQ_SIZE - 1, - SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE, SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), @@ -69,8 +68,6 @@ enum { SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 }; -#define SRP_OP_RECV (1 << 31) - enum srp_target_state { SRP_TARGET_LIVE, SRP_TARGET_CONNECTING, @@ -133,7 +130,8 @@ struct srp_target_port { int path_query_id; struct ib_cm_id *cm_id; - struct ib_cq *cq; + struct ib_cq *recv_cq; + struct ib_cq *send_cq; struct ib_qp *qp; int max_ti_iu_len; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 509c8f3dd9a..70ffbd071b2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) { unsigned long cpu; struct page *spare_page; - struct raid5_percpu *allcpus; + struct raid5_percpu __percpu *allcpus; void *scribble; int err; diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index dd708359b45..0f86f5e3672 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -405,7 +405,7 @@ struct raid5_private_data { * lists and performing address * conversions */ - } *percpu; + } __percpu *percpu; size_t scribble_len; /* size of scribble region must be * associated with conf to handle * cpu hotplug while reshaping diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c index 7a3de16fba0..75afe4f81e3 100644 --- a/drivers/media/dvb/firewire/firedtv-fw.c +++ b/drivers/media/dvb/firewire/firedtv-fw.c @@ -239,47 +239,18 @@ static const struct fw_address_region fcp_region = { }; /* Adjust the template string if models with longer names appear. */ -#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4)) - -static size_t model_name(u32 *directory, __be32 *buffer) -{ - struct fw_csr_iterator ci; - int i, length, key, value, last_key = 0; - u32 *block = NULL; - - fw_csr_iterator_init(&ci, directory); - while (fw_csr_iterator_next(&ci, &key, &value)) { - if (last_key == CSR_MODEL && - key == (CSR_DESCRIPTOR | CSR_LEAF)) - block = ci.p - 1 + value; - last_key = key; - } - - if (block == NULL) - return 0; - - length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN); - if (length <= 0) - return 0; - - /* fast-forward to text string */ - block += 3; - - for (i = 0; i < length; i++) - buffer[i] = cpu_to_be32(block[i]); - - return length * 4; -} +#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????") static int node_probe(struct device *dev) { struct firedtv *fdtv; - __be32 name[MAX_MODEL_NAME_LEN]; + char name[MAX_MODEL_NAME_LEN]; int name_len, err; - name_len = model_name(fw_unit(dev)->directory, name); + name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL, + name, sizeof(name)); - fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len); + fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0); if (!fdtv) return -ENOMEM; diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c index 9b413a35e04..0f505086774 100644 --- a/drivers/media/video/dabusb.c +++ b/drivers/media/video/dabusb.c @@ -616,10 +616,12 @@ static int dabusb_open (struct inode *inode, struct file *file) { int devnum = iminor(inode); pdabusb_t s; + int r; if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB)) return -EIO; + lock_kernel(); s = &dabusb[devnum - DABUSB_MINOR]; dbg("dabusb_open"); @@ -634,6 +636,7 @@ static int dabusb_open (struct inode *inode, struct file *file) msleep_interruptible(500); if (signal_pending (current)) { + unlock_kernel(); return -EAGAIN; } mutex_lock(&s->mutex); @@ -641,6 +644,7 @@ static int dabusb_open (struct inode *inode, struct file *file) if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) { mutex_unlock(&s->mutex); dev_err(&s->usbdev->dev, "set_interface failed\n"); + unlock_kernel(); return -EINVAL; } s->opened = 1; @@ -649,7 +653,9 @@ static int dabusb_open (struct inode *inode, struct file *file) file->f_pos = 0; file->private_data = s; - return nonseekable_open(inode, file); + r = nonseekable_open(inode, file); + unlock_kernel(); + return r; } static int dabusb_release (struct inode *inode, struct file *file) diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index f53755533e7..3fab78ba895 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -37,6 +37,7 @@ #include <linux/gfp.h> #include <linux/tty.h> #include <linux/tty_flip.h> +#include <linux/kfifo.h> #include <linux/mmc/core.h> #include <linux/mmc/card.h> @@ -47,19 +48,9 @@ #define UART_NR 8 /* Number of UARTs this driver can handle */ -#define UART_XMIT_SIZE PAGE_SIZE +#define FIFO_SIZE PAGE_SIZE #define WAKEUP_CHARS 256 -#define circ_empty(circ) ((circ)->head == (circ)->tail) -#define circ_clear(circ) ((circ)->head = (circ)->tail = 0) - -#define circ_chars_pending(circ) \ - (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE)) - -#define circ_chars_free(circ) \ - (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) - - struct uart_icount { __u32 cts; __u32 dsr; @@ -82,7 +73,7 @@ struct sdio_uart_port { struct mutex func_lock; struct task_struct *in_sdio_uart_irq; unsigned int regs_offset; - struct circ_buf xmit; + struct kfifo xmit_fifo; spinlock_t write_lock; struct uart_icount icount; unsigned int uartclk; @@ -105,6 +96,8 @@ static int sdio_uart_add_port(struct sdio_uart_port *port) kref_init(&port->kref); mutex_init(&port->func_lock); spin_lock_init(&port->write_lock); + if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL)) + return -ENOMEM; spin_lock(&sdio_uart_table_lock); for (index = 0; index < UART_NR; index++) { @@ -140,6 +133,7 @@ static void sdio_uart_port_destroy(struct kref *kref) { struct sdio_uart_port *port = container_of(kref, struct sdio_uart_port, kref); + kfifo_free(&port->xmit_fifo); kfree(port); } @@ -456,9 +450,11 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port, static void sdio_uart_transmit_chars(struct sdio_uart_port *port) { - struct circ_buf *xmit = &port->xmit; + struct kfifo *xmit = &port->xmit_fifo; int count; struct tty_struct *tty; + u8 iobuf[16]; + int len; if (port->x_char) { sdio_out(port, UART_TX, port->x_char); @@ -469,27 +465,25 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port) tty = tty_port_tty_get(&port->port); - if (tty == NULL || circ_empty(xmit) || + if (tty == NULL || !kfifo_len(xmit) || tty->stopped || tty->hw_stopped) { sdio_uart_stop_tx(port); tty_kref_put(tty); return; } - count = 16; - do { - sdio_out(port, UART_TX, xmit->buf[xmit->tail]); - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock); + for (count = 0; count < len; count++) { + sdio_out(port, UART_TX, iobuf[count]); port->icount.tx++; - if (circ_empty(xmit)) - break; - } while (--count > 0); + } - if (circ_chars_pending(xmit) < WAKEUP_CHARS) + len = kfifo_len(xmit); + if (len < WAKEUP_CHARS) { tty_wakeup(tty); - - if (circ_empty(xmit)) - sdio_uart_stop_tx(port); + if (len == 0) + sdio_uart_stop_tx(port); + } tty_kref_put(tty); } @@ -632,7 +626,6 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty) { struct sdio_uart_port *port = container_of(tport, struct sdio_uart_port, port); - unsigned long page; int ret; /* @@ -641,22 +634,17 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty) */ set_bit(TTY_IO_ERROR, &tty->flags); - /* Initialise and allocate the transmit buffer. */ - page = __get_free_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - port->xmit.buf = (unsigned char *)page; - circ_clear(&port->xmit); + kfifo_reset(&port->xmit_fifo); ret = sdio_uart_claim_func(port); if (ret) - goto err1; + return ret; ret = sdio_enable_func(port->func); if (ret) - goto err2; + goto err1; ret = sdio_claim_irq(port->func, sdio_uart_irq); if (ret) - goto err3; + goto err2; /* * Clear the FIFO buffers and disable them. @@ -700,12 +688,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty) sdio_uart_release_func(port); return 0; -err3: - sdio_disable_func(port->func); err2: - sdio_uart_release_func(port); + sdio_disable_func(port->func); err1: - free_page((unsigned long)port->xmit.buf); + sdio_uart_release_func(port); return ret; } @@ -727,7 +713,7 @@ static void sdio_uart_shutdown(struct tty_port *tport) ret = sdio_uart_claim_func(port); if (ret) - goto skip; + return; sdio_uart_stop_rx(port); @@ -749,10 +735,6 @@ static void sdio_uart_shutdown(struct tty_port *tport) sdio_disable_func(port->func); sdio_uart_release_func(port); - -skip: - /* Free the transmit buffer page. */ - free_page((unsigned long)port->xmit.buf); } /** @@ -822,27 +804,12 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf, int count) { struct sdio_uart_port *port = tty->driver_data; - struct circ_buf *circ = &port->xmit; - int c, ret = 0; + int ret; if (!port->func) return -ENODEV; - spin_lock(&port->write_lock); - while (1) { - c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); - if (count < c) - c = count; - if (c <= 0) - break; - memcpy(circ->buf + circ->head, buf, c); - circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1); - buf += c; - count -= c; - ret += c; - } - spin_unlock(&port->write_lock); - + ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock); if (!(port->ier & UART_IER_THRI)) { int err = sdio_uart_claim_func(port); if (!err) { @@ -859,13 +826,13 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf, static int sdio_uart_write_room(struct tty_struct *tty) { struct sdio_uart_port *port = tty->driver_data; - return port ? circ_chars_free(&port->xmit) : 0; + return FIFO_SIZE - kfifo_len(&port->xmit_fifo); } static int sdio_uart_chars_in_buffer(struct tty_struct *tty) { struct sdio_uart_port *port = tty->driver_data; - return port ? circ_chars_pending(&port->xmit) : 0; + return kfifo_len(&port->xmit_fifo); } static void sdio_uart_send_xchar(struct tty_struct *tty, char ch) diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 3e8618b4efb..4cd7f420766 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h @@ -264,6 +264,10 @@ struct adapter { struct work_struct fatal_error_handler_task; struct work_struct link_fault_handler_task; + struct work_struct db_full_task; + struct work_struct db_empty_task; + struct work_struct db_drop_task; + struct dentry *debugfs_root; struct mutex mdio_lock; @@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx, unsigned char *data); irqreturn_t t3_sge_intr_msix(int irq, void *cookie); +extern struct workqueue_struct *cxgb3_wq; int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size); diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 6fd968abb07..3e453e1d97e 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c @@ -45,6 +45,7 @@ #include <linux/firmware.h> #include <linux/log2.h> #include <linux/stringify.h> +#include <linux/sched.h> #include <asm/uaccess.h> #include "common.h" @@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not"); * will block keventd as it needs the rtnl lock, and we'll deadlock waiting * for our work to complete. Get our own work queue to solve this. */ -static struct workqueue_struct *cxgb3_wq; +struct workqueue_struct *cxgb3_wq; /** * link_report - show link status and link speed/duplex @@ -586,6 +587,19 @@ static void setup_rss(struct adapter *adap) V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); } +static void ring_dbs(struct adapter *adap) +{ + int i, j; + + for (i = 0; i < SGE_QSETS; i++) { + struct sge_qset *qs = &adap->sge.qs[i]; + + if (qs->adap) + for (j = 0; j < SGE_TXQ_PER_SET; j++) + t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); + } +} + static void init_napi(struct adapter *adap) { int i; @@ -2750,6 +2764,42 @@ static void t3_adap_check_task(struct work_struct *work) spin_unlock_irq(&adapter->work_lock); } +static void db_full_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_full_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0); +} + +static void db_empty_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_empty_task); + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0); +} + +static void db_drop_task(struct work_struct *work) +{ + struct adapter *adapter = container_of(work, struct adapter, + db_drop_task); + unsigned long delay = 1000; + unsigned short r; + + cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0); + + /* + * Sleep a while before ringing the driver qset dbs. + * The delay is between 1000-2023 usecs. + */ + get_random_bytes(&r, 2); + delay += r & 1023; + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(usecs_to_jiffies(delay)); + ring_dbs(adapter); +} + /* * Processes external (PHY) interrupts in process context. */ @@ -3218,6 +3268,11 @@ static int __devinit init_one(struct pci_dev *pdev, INIT_LIST_HEAD(&adapter->adapter_list); INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task); INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task); + + INIT_WORK(&adapter->db_full_task, db_full_task); + INIT_WORK(&adapter->db_empty_task, db_empty_task); + INIT_WORK(&adapter->db_drop_task, db_drop_task); + INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task); for (i = 0; i < ai->nports0 + ai->nports1; ++i) { diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h index 670aa62042d..929c298115c 100644 --- a/drivers/net/cxgb3/cxgb3_offload.h +++ b/drivers/net/cxgb3/cxgb3_offload.h @@ -73,7 +73,10 @@ enum { OFFLOAD_STATUS_UP, OFFLOAD_STATUS_DOWN, OFFLOAD_PORT_DOWN, - OFFLOAD_PORT_UP + OFFLOAD_PORT_UP, + OFFLOAD_DB_FULL, + OFFLOAD_DB_EMPTY, + OFFLOAD_DB_DROP }; struct cxgb3_client { diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h index 1b5327b5a96..cb42353c9fd 100644 --- a/drivers/net/cxgb3/regs.h +++ b/drivers/net/cxgb3/regs.h @@ -254,6 +254,22 @@ #define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR) #define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U) +#define S_HIPRIORITYDBFULL 7 +#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL) +#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U) + +#define S_HIPRIORITYDBEMPTY 6 +#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY) +#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U) + +#define S_LOPRIORITYDBFULL 5 +#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL) +#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U) + +#define S_LOPRIORITYDBEMPTY 4 +#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY) +#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U) + #define S_RSPQDISABLED 3 #define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED) #define F_RSPQDISABLED V_RSPQDISABLED(1U) diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 04820590374..78e265b484b 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c @@ -42,6 +42,7 @@ #include "sge_defs.h" #include "t3_cpl.h" #include "firmware_exports.h" +#include "cxgb3_offload.h" #define USE_GTS 0 @@ -2841,8 +2842,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter) } if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR)) - CH_ALERT(adapter, "SGE dropped %s priority doorbell\n", - status & F_HIPIODRBDROPERR ? "high" : "lo"); + queue_work(cxgb3_wq, &adapter->db_drop_task); + + if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL)) + queue_work(cxgb3_wq, &adapter->db_full_task); + + if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY)) + queue_work(cxgb3_wq, &adapter->db_empty_task); t3_write_reg(adapter, A_SG_INT_CAUSE, status); if (status & SGE_FATALERR) diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c index 3ab9f51918a..95a8ba0759f 100644 --- a/drivers/net/cxgb3/t3_hw.c +++ b/drivers/net/cxgb3/t3_hw.c @@ -1433,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg, F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \ V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \ F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \ - F_HIRCQPARITYERROR) + F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \ + F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \ + F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \ + F_LOPIODRBDROPERR) #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \ F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \ F_NFASRCHFAIL) diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index ad113b0f62d..0950fa40684 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2908,6 +2908,7 @@ enum parport_pc_pci_cards { netmos_9805, netmos_9815, netmos_9901, + netmos_9865, quatech_sppxp100, }; @@ -2989,6 +2990,7 @@ static struct parport_pc_pci { /* netmos_9805 */ { 1, { { 0, -1 }, } }, /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } }, /* netmos_9901 */ { 1, { { 0, -1 }, } }, + /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, }; @@ -3092,6 +3094,10 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 }, { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901, 0xA000, 0x2000, 0, 0, netmos_9901 }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, + 0xA000, 0x1000, 0, 0, netmos_9865 }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, + 0xA000, 0x2000, 0, 0, netmos_9865 }, /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c index ec73294d1fa..e2dc289f767 100644 --- a/drivers/pci/hotplug/ibmphp_res.c +++ b/drivers/pci/hotplug/ibmphp_res.c @@ -40,7 +40,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno); static int once_over (void); static int remove_ranges (struct bus_node *, struct bus_node *); static int update_bridge_ranges (struct bus_node **); -static int add_range (int type, struct range_node *, struct bus_node *); +static int add_bus_range (int type, struct range_node *, struct bus_node *); static void fix_resources (struct bus_node *); static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8); @@ -133,7 +133,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node newrange->rangeno = 1; else { /* need to insert our range */ - add_range (flag, newrange, newbus); + add_bus_range (flag, newrange, newbus); debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end); } @@ -384,7 +384,7 @@ int __init ibmphp_rsrc_init (void) * Input: type of the resource, range to add, current bus * Output: 0 or -1, bus and range ptrs ********************************************************************************/ -static int add_range (int type, struct range_node *range, struct bus_node *bus_cur) +static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur) { struct range_node *range_cur = NULL; struct range_node *range_prev; @@ -455,7 +455,7 @@ static int add_range (int type, struct range_node *range, struct bus_node *bus_c /******************************************************************************* * This routine goes through the list of resources of type 'type' and updates - * the range numbers that they correspond to. It was called from add_range fnc + * the range numbers that they correspond to. It was called from add_bus_range fnc * * Input: bus, type of the resource, the rangeno starting from which to update ******************************************************************************/ @@ -1999,7 +1999,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) if (bus_sec->noIORanges > 0) { if (!range_exists_already (range, bus_sec, IO)) { - add_range (IO, range, bus_sec); + add_bus_range (IO, range, bus_sec); ++bus_sec->noIORanges; } else { kfree (range); @@ -2048,7 +2048,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) if (bus_sec->noMemRanges > 0) { if (!range_exists_already (range, bus_sec, MEM)) { - add_range (MEM, range, bus_sec); + add_bus_range (MEM, range, bus_sec); ++bus_sec->noMemRanges; } else { kfree (range); @@ -2102,7 +2102,7 @@ static int __init update_bridge_ranges (struct bus_node **bus) if (bus_sec->noPFMemRanges > 0) { if (!range_exists_already (range, bus_sec, PFMEM)) { - add_range (PFMEM, range, bus_sec); + add_bus_range (PFMEM, range, bus_sec); ++bus_sec->noPFMemRanges; } else { kfree (range); diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig index 0a6601c7680..d189e4743e6 100644 --- a/drivers/pcmcia/Kconfig +++ b/drivers/pcmcia/Kconfig @@ -51,17 +51,23 @@ config PCMCIA_LOAD_CIS config PCMCIA_IOCTL bool "PCMCIA control ioctl (obsolete)" - depends on PCMCIA + depends on PCMCIA && ARM && !SMP && !PREEMPT default y help If you say Y here, the deprecated ioctl interface to the PCMCIA - subsystem will be built. It is needed by cardmgr and cardctl - (pcmcia-cs) to function properly. + subsystem will be built. It is needed by the deprecated pcmcia-cs + tools (cardmgr, cardctl) to function properly. You should use the new pcmciautils package instead (see <file:Documentation/Changes> for location and details). - If unsure, say Y. + This config option will most likely be removed from kernel 2.6.35, + the associated code from kernel 2.6.36. + + As the PCMCIA ioctl is not locking safe, it depends on !SMP and + !PREEMPT. + + If unsure, say N. config CARDBUS bool "32-bit CardBus support" diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c index ac0686efbf7..e6ab2a47d8c 100644 --- a/drivers/pcmcia/cardbus.c +++ b/drivers/pcmcia/cardbus.c @@ -71,7 +71,7 @@ int __ref cb_alloc(struct pcmcia_socket *s) unsigned int max, pass; s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0)); - pci_fixup_cardbus(bus); + pci_fixup_cardbus(bus); max = bus->secondary; for (pass = 0; pass < 2; pass++) diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 2f3622dd4b6..f230f6543bf 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -54,46 +54,44 @@ static const u_int exponent[] = { /* Upper limit on reasonable # of tuples */ #define MAX_TUPLES 200 -/*====================================================================*/ - -/* Parameters that can be set with 'insmod' */ - /* 16-bit CIS? */ static int cis_width; module_param(cis_width, int, 0444); void release_cis_mem(struct pcmcia_socket *s) { - mutex_lock(&s->ops_mutex); - if (s->cis_mem.flags & MAP_ACTIVE) { - s->cis_mem.flags &= ~MAP_ACTIVE; - s->ops->set_mem_map(s, &s->cis_mem); - if (s->cis_mem.res) { - release_resource(s->cis_mem.res); - kfree(s->cis_mem.res); - s->cis_mem.res = NULL; + mutex_lock(&s->ops_mutex); + if (s->cis_mem.flags & MAP_ACTIVE) { + s->cis_mem.flags &= ~MAP_ACTIVE; + s->ops->set_mem_map(s, &s->cis_mem); + if (s->cis_mem.res) { + release_resource(s->cis_mem.res); + kfree(s->cis_mem.res); + s->cis_mem.res = NULL; + } + iounmap(s->cis_virt); + s->cis_virt = NULL; } - iounmap(s->cis_virt); - s->cis_virt = NULL; - } - mutex_unlock(&s->ops_mutex); + mutex_unlock(&s->ops_mutex); } -/* - * Map the card memory at "card_offset" into virtual space. +/** + * set_cis_map() - map the card memory at "card_offset" into virtual space. + * * If flags & MAP_ATTRIB, map the attribute space, otherwise * map the memory space. * * Must be called with ops_mutex held. */ -static void __iomem * -set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags) +static void __iomem *set_cis_map(struct pcmcia_socket *s, + unsigned int card_offset, unsigned int flags) { pccard_mem_map *mem = &s->cis_mem; int ret; if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) { - mem->res = pcmcia_find_mem_region(0, s->map_size, s->map_size, 0, s); + mem->res = pcmcia_find_mem_region(0, s->map_size, + s->map_size, 0, s); if (mem->res == NULL) { dev_printk(KERN_NOTICE, &s->dev, "cs: unable to map card memory!\n"); @@ -124,165 +122,170 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag return s->cis_virt; } -/*====================================================================== - - Low-level functions to read and write CIS memory. I think the - write routine is only useful for writing one-byte registers. - -======================================================================*/ /* Bits in attr field */ #define IS_ATTR 1 #define IS_INDIRECT 8 +/** + * pcmcia_read_cis_mem() - low-level function to read CIS memory + */ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, u_int len, void *ptr) { - void __iomem *sys, *end; - unsigned char *buf = ptr; - - dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); - - mutex_lock(&s->ops_mutex); - if (attr & IS_INDIRECT) { - /* Indirect accesses use a bunch of special registers at fixed - locations in common memory */ - u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN; - if (attr & IS_ATTR) { - addr *= 2; - flags = ICTRL0_AUTOINC; - } + void __iomem *sys, *end; + unsigned char *buf = ptr; - sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); - if (!sys) { - dev_dbg(&s->dev, "could not map memory\n"); - memset(ptr, 0xff, len); - mutex_unlock(&s->ops_mutex); - return -1; - } + dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len); - writeb(flags, sys+CISREG_ICTRL0); - writeb(addr & 0xff, sys+CISREG_IADDR0); - writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); - writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); - writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); - for ( ; len > 0; len--, buf++) - *buf = readb(sys+CISREG_IDATA0); - } else { - u_int inc = 1, card_offset, flags; - - if (addr > CISTPL_MAX_CIS_SIZE) - dev_dbg(&s->dev, "attempt to read CIS mem at addr %#x", addr); - - flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0); - if (attr) { - flags |= MAP_ATTRIB; - inc++; - addr *= 2; - } + mutex_lock(&s->ops_mutex); + if (attr & IS_INDIRECT) { + /* Indirect accesses use a bunch of special registers at fixed + locations in common memory */ + u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN; + if (attr & IS_ATTR) { + addr *= 2; + flags = ICTRL0_AUTOINC; + } - card_offset = addr & ~(s->map_size-1); - while (len) { - sys = set_cis_map(s, card_offset, flags); - if (!sys) { - dev_dbg(&s->dev, "could not map memory\n"); - memset(ptr, 0xff, len); - mutex_unlock(&s->ops_mutex); - return -1; - } - end = sys + s->map_size; - sys = sys + (addr & (s->map_size-1)); - for ( ; len > 0; len--, buf++, sys += inc) { - if (sys == end) - break; - *buf = readb(sys); - } - card_offset += s->map_size; - addr = 0; + sys = set_cis_map(s, 0, MAP_ACTIVE | + ((cis_width) ? MAP_16BIT : 0)); + if (!sys) { + dev_dbg(&s->dev, "could not map memory\n"); + memset(ptr, 0xff, len); + mutex_unlock(&s->ops_mutex); + return -1; + } + + writeb(flags, sys+CISREG_ICTRL0); + writeb(addr & 0xff, sys+CISREG_IADDR0); + writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); + writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); + writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); + for ( ; len > 0; len--, buf++) + *buf = readb(sys+CISREG_IDATA0); + } else { + u_int inc = 1, card_offset, flags; + + if (addr > CISTPL_MAX_CIS_SIZE) + dev_dbg(&s->dev, + "attempt to read CIS mem at addr %#x", addr); + + flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0); + if (attr) { + flags |= MAP_ATTRIB; + inc++; + addr *= 2; + } + + card_offset = addr & ~(s->map_size-1); + while (len) { + sys = set_cis_map(s, card_offset, flags); + if (!sys) { + dev_dbg(&s->dev, "could not map memory\n"); + memset(ptr, 0xff, len); + mutex_unlock(&s->ops_mutex); + return -1; + } + end = sys + s->map_size; + sys = sys + (addr & (s->map_size-1)); + for ( ; len > 0; len--, buf++, sys += inc) { + if (sys == end) + break; + *buf = readb(sys); + } + card_offset += s->map_size; + addr = 0; + } } - } - mutex_unlock(&s->ops_mutex); - dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", - *(u_char *)(ptr+0), *(u_char *)(ptr+1), - *(u_char *)(ptr+2), *(u_char *)(ptr+3)); - return 0; + mutex_unlock(&s->ops_mutex); + dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n", + *(u_char *)(ptr+0), *(u_char *)(ptr+1), + *(u_char *)(ptr+2), *(u_char *)(ptr+3)); + return 0; } +/** + * pcmcia_write_cis_mem() - low-level function to write CIS memory + * + * Probably only useful for writing one-byte registers. + */ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr, u_int len, void *ptr) { - void __iomem *sys, *end; - unsigned char *buf = ptr; - - dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); - - mutex_lock(&s->ops_mutex); - if (attr & IS_INDIRECT) { - /* Indirect accesses use a bunch of special registers at fixed - locations in common memory */ - u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN; - if (attr & IS_ATTR) { - addr *= 2; - flags = ICTRL0_AUTOINC; - } + void __iomem *sys, *end; + unsigned char *buf = ptr; - sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0)); - if (!sys) { - dev_dbg(&s->dev, "could not map memory\n"); - mutex_unlock(&s->ops_mutex); - return; /* FIXME: Error */ - } + dev_dbg(&s->dev, + "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len); - writeb(flags, sys+CISREG_ICTRL0); - writeb(addr & 0xff, sys+CISREG_IADDR0); - writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); - writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); - writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); - for ( ; len > 0; len--, buf++) - writeb(*buf, sys+CISREG_IDATA0); - } else { - u_int inc = 1, card_offset, flags; - - flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0); - if (attr & IS_ATTR) { - flags |= MAP_ATTRIB; - inc++; - addr *= 2; - } + mutex_lock(&s->ops_mutex); + if (attr & IS_INDIRECT) { + /* Indirect accesses use a bunch of special registers at fixed + locations in common memory */ + u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN; + if (attr & IS_ATTR) { + addr *= 2; + flags = ICTRL0_AUTOINC; + } - card_offset = addr & ~(s->map_size-1); - while (len) { - sys = set_cis_map(s, card_offset, flags); - if (!sys) { - dev_dbg(&s->dev, "could not map memory\n"); - mutex_unlock(&s->ops_mutex); - return; /* FIXME: error */ - } - - end = sys + s->map_size; - sys = sys + (addr & (s->map_size-1)); - for ( ; len > 0; len--, buf++, sys += inc) { - if (sys == end) - break; - writeb(*buf, sys); - } - card_offset += s->map_size; - addr = 0; - } - } - mutex_unlock(&s->ops_mutex); -} + sys = set_cis_map(s, 0, MAP_ACTIVE | + ((cis_width) ? MAP_16BIT : 0)); + if (!sys) { + dev_dbg(&s->dev, "could not map memory\n"); + mutex_unlock(&s->ops_mutex); + return; /* FIXME: Error */ + } + + writeb(flags, sys+CISREG_ICTRL0); + writeb(addr & 0xff, sys+CISREG_IADDR0); + writeb((addr>>8) & 0xff, sys+CISREG_IADDR1); + writeb((addr>>16) & 0xff, sys+CISREG_IADDR2); + writeb((addr>>24) & 0xff, sys+CISREG_IADDR3); + for ( ; len > 0; len--, buf++) + writeb(*buf, sys+CISREG_IDATA0); + } else { + u_int inc = 1, card_offset, flags; + flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0); + if (attr & IS_ATTR) { + flags |= MAP_ATTRIB; + inc++; + addr *= 2; + } -/*====================================================================== + card_offset = addr & ~(s->map_size-1); + while (len) { + sys = set_cis_map(s, card_offset, flags); + if (!sys) { + dev_dbg(&s->dev, "could not map memory\n"); + mutex_unlock(&s->ops_mutex); + return; /* FIXME: error */ + } - This is a wrapper around read_cis_mem, with the same interface, - but which caches information, for cards whose CIS may not be - readable all the time. + end = sys + s->map_size; + sys = sys + (addr & (s->map_size-1)); + for ( ; len > 0; len--, buf++, sys += inc) { + if (sys == end) + break; + writeb(*buf, sys); + } + card_offset += s->map_size; + addr = 0; + } + } + mutex_unlock(&s->ops_mutex); +} -======================================================================*/ +/** + * read_cis_cache() - read CIS memory or its associated cache + * + * This is a wrapper around read_cis_mem, with the same interface, + * but which caches information, for cards whose CIS may not be + * readable all the time. + */ static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, size_t len, void *ptr) { @@ -353,7 +356,6 @@ remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len) * This destroys the CIS cache but keeps any fake CIS alive. Must be * called with ops_mutex held. */ - void destroy_cis_cache(struct pcmcia_socket *s) { struct list_head *l, *n; @@ -366,13 +368,9 @@ void destroy_cis_cache(struct pcmcia_socket *s) } } -/*====================================================================== - - This verifies if the CIS of a card matches what is in the CIS - cache. - -======================================================================*/ - +/** + * verify_cis_cache() - does the CIS match what is in the CIS cache? + */ int verify_cis_cache(struct pcmcia_socket *s) { struct cis_cache_entry *cis; @@ -404,13 +402,12 @@ int verify_cis_cache(struct pcmcia_socket *s) return 0; } -/*====================================================================== - - For really bad cards, we provide a facility for uploading a - replacement CIS. - -======================================================================*/ - +/** + * pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS + * + * For really bad cards, we provide a facility for uploading a + * replacement CIS. + */ int pcmcia_replace_cis(struct pcmcia_socket *s, const u8 *data, const size_t len) { @@ -433,17 +430,13 @@ int pcmcia_replace_cis(struct pcmcia_socket *s, return 0; } -/*====================================================================== - - The high-level CIS tuple services - -======================================================================*/ +/* The high-level CIS tuple services */ typedef struct tuple_flags { - u_int link_space:4; - u_int has_link:1; - u_int mfc_fn:3; - u_int space:4; + u_int link_space:4; + u_int has_link:1; + u_int mfc_fn:3; + u_int space:4; } tuple_flags; #define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space) @@ -451,982 +444,961 @@ typedef struct tuple_flags { #define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn) #define SPACE(f) (((tuple_flags *)(&(f)))->space) -int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) +int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, + tuple_t *tuple) { - if (!s) - return -EINVAL; - - if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) - return -ENODEV; - tuple->TupleLink = tuple->Flags = 0; - - /* Assume presence of a LONGLINK_C to address 0 */ - tuple->CISOffset = tuple->LinkOffset = 0; - SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1; - - if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) { - cisdata_t req = tuple->DesiredTuple; - tuple->DesiredTuple = CISTPL_LONGLINK_MFC; - if (pccard_get_next_tuple(s, function, tuple) == 0) { - tuple->DesiredTuple = CISTPL_LINKTARGET; - if (pccard_get_next_tuple(s, function, tuple) != 0) - return -ENOSPC; - } else - tuple->CISOffset = tuple->TupleLink = 0; - tuple->DesiredTuple = req; - } - return pccard_get_next_tuple(s, function, tuple); + if (!s) + return -EINVAL; + + if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) + return -ENODEV; + tuple->TupleLink = tuple->Flags = 0; + + /* Assume presence of a LONGLINK_C to address 0 */ + tuple->CISOffset = tuple->LinkOffset = 0; + SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1; + + if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) { + cisdata_t req = tuple->DesiredTuple; + tuple->DesiredTuple = CISTPL_LONGLINK_MFC; + if (pccard_get_next_tuple(s, function, tuple) == 0) { + tuple->DesiredTuple = CISTPL_LINKTARGET; + if (pccard_get_next_tuple(s, function, tuple) != 0) + return -ENOSPC; + } else + tuple->CISOffset = tuple->TupleLink = 0; + tuple->DesiredTuple = req; + } + return pccard_get_next_tuple(s, function, tuple); } static int follow_link(struct pcmcia_socket *s, tuple_t *tuple) { - u_char link[5]; - u_int ofs; - int ret; - - if (MFC_FN(tuple->Flags)) { - /* Get indirect link from the MFC tuple */ - ret = read_cis_cache(s, LINK_SPACE(tuple->Flags), - tuple->LinkOffset, 5, link); - if (ret) + u_char link[5]; + u_int ofs; + int ret; + + if (MFC_FN(tuple->Flags)) { + /* Get indirect link from the MFC tuple */ + ret = read_cis_cache(s, LINK_SPACE(tuple->Flags), + tuple->LinkOffset, 5, link); + if (ret) + return -1; + ofs = get_unaligned_le32(link + 1); + SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); + /* Move to the next indirect link */ + tuple->LinkOffset += 5; + MFC_FN(tuple->Flags)--; + } else if (HAS_LINK(tuple->Flags)) { + ofs = tuple->LinkOffset; + SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags); + HAS_LINK(tuple->Flags) = 0; + } else return -1; - ofs = get_unaligned_le32(link + 1); - SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); - /* Move to the next indirect link */ - tuple->LinkOffset += 5; - MFC_FN(tuple->Flags)--; - } else if (HAS_LINK(tuple->Flags)) { - ofs = tuple->LinkOffset; - SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags); - HAS_LINK(tuple->Flags) = 0; - } else { - return -1; - } - if (SPACE(tuple->Flags)) { - /* This is ugly, but a common CIS error is to code the long - link offset incorrectly, so we check the right spot... */ + + if (SPACE(tuple->Flags)) { + /* This is ugly, but a common CIS error is to code the long + link offset incorrectly, so we check the right spot... */ + ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); + if (ret) + return -1; + if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && + (strncmp(link+2, "CIS", 3) == 0)) + return ofs; + remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5); + /* Then, we try the wrong spot... */ + ofs = ofs >> 1; + } ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); if (ret) return -1; if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && - (strncmp(link+2, "CIS", 3) == 0)) - return ofs; + (strncmp(link+2, "CIS", 3) == 0)) + return ofs; remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5); - /* Then, we try the wrong spot... */ - ofs = ofs >> 1; - } - ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link); - if (ret) - return -1; - if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) && - (strncmp(link+2, "CIS", 3) == 0)) - return ofs; - remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5); - return -1; + return -1; } -int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple) +int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, + tuple_t *tuple) { - u_char link[2], tmp; - int ofs, i, attr; - int ret; - - if (!s) - return -EINVAL; - if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) - return -ENODEV; - - link[1] = tuple->TupleLink; - ofs = tuple->CISOffset + tuple->TupleLink; - attr = SPACE(tuple->Flags); - - for (i = 0; i < MAX_TUPLES; i++) { - if (link[1] == 0xff) { - link[0] = CISTPL_END; - } else { - ret = read_cis_cache(s, attr, ofs, 2, link); - if (ret) - return -1; - if (link[0] == CISTPL_NULL) { - ofs++; continue; - } - } + u_char link[2], tmp; + int ofs, i, attr; + int ret; - /* End of chain? Follow long link if possible */ - if (link[0] == CISTPL_END) { - ofs = follow_link(s, tuple); - if (ofs < 0) - return -ENOSPC; - attr = SPACE(tuple->Flags); - ret = read_cis_cache(s, attr, ofs, 2, link); - if (ret) - return -1; - } + if (!s) + return -EINVAL; + if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS)) + return -ENODEV; - /* Is this a link tuple? Make a note of it */ - if ((link[0] == CISTPL_LONGLINK_A) || - (link[0] == CISTPL_LONGLINK_C) || - (link[0] == CISTPL_LONGLINK_MFC) || - (link[0] == CISTPL_LINKTARGET) || - (link[0] == CISTPL_INDIRECT) || - (link[0] == CISTPL_NO_LINK)) { - switch (link[0]) { - case CISTPL_LONGLINK_A: - HAS_LINK(tuple->Flags) = 1; - LINK_SPACE(tuple->Flags) = attr | IS_ATTR; - ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); - if (ret) - return -1; - break; - case CISTPL_LONGLINK_C: - HAS_LINK(tuple->Flags) = 1; - LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR; - ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset); - if (ret) - return -1; - break; - case CISTPL_INDIRECT: - HAS_LINK(tuple->Flags) = 1; - LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT; - tuple->LinkOffset = 0; - break; - case CISTPL_LONGLINK_MFC: - tuple->LinkOffset = ofs + 3; - LINK_SPACE(tuple->Flags) = attr; - if (function == BIND_FN_ALL) { - /* Follow all the MFC links */ - ret = read_cis_cache(s, attr, ofs+2, 1, &tmp); - if (ret) - return -1; - MFC_FN(tuple->Flags) = tmp; - } else { - /* Follow exactly one of the links */ - MFC_FN(tuple->Flags) = 1; - tuple->LinkOffset += function * 5; + link[1] = tuple->TupleLink; + ofs = tuple->CISOffset + tuple->TupleLink; + attr = SPACE(tuple->Flags); + + for (i = 0; i < MAX_TUPLES; i++) { + if (link[1] == 0xff) + link[0] = CISTPL_END; + else { + ret = read_cis_cache(s, attr, ofs, 2, link); + if (ret) + return -1; + if (link[0] == CISTPL_NULL) { + ofs++; + continue; + } } - break; - case CISTPL_NO_LINK: - HAS_LINK(tuple->Flags) = 0; - break; - } - if ((tuple->Attributes & TUPLE_RETURN_LINK) && - (tuple->DesiredTuple == RETURN_FIRST_TUPLE)) - break; - } else - if (tuple->DesiredTuple == RETURN_FIRST_TUPLE) - break; - if (link[0] == tuple->DesiredTuple) - break; - ofs += link[1] + 2; - } - if (i == MAX_TUPLES) { - dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n"); - return -ENOSPC; - } - - tuple->TupleCode = link[0]; - tuple->TupleLink = link[1]; - tuple->CISOffset = ofs + 2; - return 0; -} + /* End of chain? Follow long link if possible */ + if (link[0] == CISTPL_END) { + ofs = follow_link(s, tuple); + if (ofs < 0) + return -ENOSPC; + attr = SPACE(tuple->Flags); + ret = read_cis_cache(s, attr, ofs, 2, link); + if (ret) + return -1; + } -/*====================================================================*/ + /* Is this a link tuple? Make a note of it */ + if ((link[0] == CISTPL_LONGLINK_A) || + (link[0] == CISTPL_LONGLINK_C) || + (link[0] == CISTPL_LONGLINK_MFC) || + (link[0] == CISTPL_LINKTARGET) || + (link[0] == CISTPL_INDIRECT) || + (link[0] == CISTPL_NO_LINK)) { + switch (link[0]) { + case CISTPL_LONGLINK_A: + HAS_LINK(tuple->Flags) = 1; + LINK_SPACE(tuple->Flags) = attr | IS_ATTR; + ret = read_cis_cache(s, attr, ofs+2, 4, + &tuple->LinkOffset); + if (ret) + return -1; + break; + case CISTPL_LONGLINK_C: + HAS_LINK(tuple->Flags) = 1; + LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR; + ret = read_cis_cache(s, attr, ofs+2, 4, + &tuple->LinkOffset); + if (ret) + return -1; + break; + case CISTPL_INDIRECT: + HAS_LINK(tuple->Flags) = 1; + LINK_SPACE(tuple->Flags) = IS_ATTR | + IS_INDIRECT; + tuple->LinkOffset = 0; + break; + case CISTPL_LONGLINK_MFC: + tuple->LinkOffset = ofs + 3; + LINK_SPACE(tuple->Flags) = attr; + if (function == BIND_FN_ALL) { + /* Follow all the MFC links */ + ret = read_cis_cache(s, attr, ofs+2, + 1, &tmp); + if (ret) + return -1; + MFC_FN(tuple->Flags) = tmp; + } else { + /* Follow exactly one of the links */ + MFC_FN(tuple->Flags) = 1; + tuple->LinkOffset += function * 5; + } + break; + case CISTPL_NO_LINK: + HAS_LINK(tuple->Flags) = 0; + break; + } + if ((tuple->Attributes & TUPLE_RETURN_LINK) && + (tuple->DesiredTuple == RETURN_FIRST_TUPLE)) + break; + } else + if (tuple->DesiredTuple == RETURN_FIRST_TUPLE) + break; + + if (link[0] == tuple->DesiredTuple) + break; + ofs += link[1] + 2; + } + if (i == MAX_TUPLES) { + dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n"); + return -ENOSPC; + } -#define _MIN(a, b) (((a) < (b)) ? (a) : (b)) + tuple->TupleCode = link[0]; + tuple->TupleLink = link[1]; + tuple->CISOffset = ofs + 2; + return 0; +} int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple) { - u_int len; - int ret; + u_int len; + int ret; - if (!s) - return -EINVAL; + if (!s) + return -EINVAL; - if (tuple->TupleLink < tuple->TupleOffset) - return -ENOSPC; - len = tuple->TupleLink - tuple->TupleOffset; - tuple->TupleDataLen = tuple->TupleLink; - if (len == 0) + if (tuple->TupleLink < tuple->TupleOffset) + return -ENOSPC; + len = tuple->TupleLink - tuple->TupleOffset; + tuple->TupleDataLen = tuple->TupleLink; + if (len == 0) + return 0; + ret = read_cis_cache(s, SPACE(tuple->Flags), + tuple->CISOffset + tuple->TupleOffset, + min(len, (u_int) tuple->TupleDataMax), + tuple->TupleData); + if (ret) + return -1; return 0; - ret = read_cis_cache(s, SPACE(tuple->Flags), - tuple->CISOffset + tuple->TupleOffset, - _MIN(len, tuple->TupleDataMax), tuple->TupleData); - if (ret) - return -1; - return 0; } -/*====================================================================== - - Parsing routines for individual tuples - -======================================================================*/ +/* Parsing routines for individual tuples */ static int parse_device(tuple_t *tuple, cistpl_device_t *device) { - int i; - u_char scale; - u_char *p, *q; + int i; + u_char scale; + u_char *p, *q; - p = (u_char *)tuple->TupleData; - q = p + tuple->TupleDataLen; + p = (u_char *)tuple->TupleData; + q = p + tuple->TupleDataLen; - device->ndev = 0; - for (i = 0; i < CISTPL_MAX_DEVICES; i++) { + device->ndev = 0; + for (i = 0; i < CISTPL_MAX_DEVICES; i++) { - if (*p == 0xff) - break; - device->dev[i].type = (*p >> 4); - device->dev[i].wp = (*p & 0x08) ? 1 : 0; - switch (*p & 0x07) { - case 0: - device->dev[i].speed = 0; - break; - case 1: - device->dev[i].speed = 250; - break; - case 2: - device->dev[i].speed = 200; - break; - case 3: - device->dev[i].speed = 150; - break; - case 4: - device->dev[i].speed = 100; - break; - case 7: - if (++p == q) - return -EINVAL; - device->dev[i].speed = SPEED_CVT(*p); - while (*p & 0x80) + if (*p == 0xff) + break; + device->dev[i].type = (*p >> 4); + device->dev[i].wp = (*p & 0x08) ? 1 : 0; + switch (*p & 0x07) { + case 0: + device->dev[i].speed = 0; + break; + case 1: + device->dev[i].speed = 250; + break; + case 2: + device->dev[i].speed = 200; + break; + case 3: + device->dev[i].speed = 150; + break; + case 4: + device->dev[i].speed = 100; + break; + case 7: if (++p == q) return -EINVAL; - break; - default: - return -EINVAL; - } + device->dev[i].speed = SPEED_CVT(*p); + while (*p & 0x80) + if (++p == q) + return -EINVAL; + break; + default: + return -EINVAL; + } - if (++p == q) - return -EINVAL; - if (*p == 0xff) - break; - scale = *p & 7; - if (scale == 7) - return -EINVAL; - device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2)); - device->ndev++; - if (++p == q) - break; - } + if (++p == q) + return -EINVAL; + if (*p == 0xff) + break; + scale = *p & 7; + if (scale == 7) + return -EINVAL; + device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2)); + device->ndev++; + if (++p == q) + break; + } - return 0; + return 0; } -/*====================================================================*/ static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum) { - u_char *p; - if (tuple->TupleDataLen < 5) - return -EINVAL; - p = (u_char *) tuple->TupleData; - csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2; - csum->len = get_unaligned_le16(p + 2); - csum->sum = *(p + 4); - return 0; + u_char *p; + if (tuple->TupleDataLen < 5) + return -EINVAL; + p = (u_char *) tuple->TupleData; + csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2; + csum->len = get_unaligned_le16(p + 2); + csum->sum = *(p + 4); + return 0; } -/*====================================================================*/ static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link) { - if (tuple->TupleDataLen < 4) - return -EINVAL; - link->addr = get_unaligned_le32(tuple->TupleData); - return 0; + if (tuple->TupleDataLen < 4) + return -EINVAL; + link->addr = get_unaligned_le32(tuple->TupleData); + return 0; } -/*====================================================================*/ -static int parse_longlink_mfc(tuple_t *tuple, - cistpl_longlink_mfc_t *link) +static int parse_longlink_mfc(tuple_t *tuple, cistpl_longlink_mfc_t *link) { - u_char *p; - int i; - - p = (u_char *)tuple->TupleData; - - link->nfn = *p; p++; - if (tuple->TupleDataLen <= link->nfn*5) - return -EINVAL; - for (i = 0; i < link->nfn; i++) { - link->fn[i].space = *p; p++; - link->fn[i].addr = get_unaligned_le32(p); - p += 4; - } - return 0; + u_char *p; + int i; + + p = (u_char *)tuple->TupleData; + + link->nfn = *p; p++; + if (tuple->TupleDataLen <= link->nfn*5) + return -EINVAL; + for (i = 0; i < link->nfn; i++) { + link->fn[i].space = *p; p++; + link->fn[i].addr = get_unaligned_le32(p); + p += 4; + } + return 0; } -/*====================================================================*/ static int parse_strings(u_char *p, u_char *q, int max, char *s, u_char *ofs, u_char *found) { - int i, j, ns; + int i, j, ns; - if (p == q) - return -EINVAL; - ns = 0; j = 0; - for (i = 0; i < max; i++) { - if (*p == 0xff) - break; - ofs[i] = j; - ns++; - for (;;) { - s[j++] = (*p == 0xff) ? '\0' : *p; - if ((*p == '\0') || (*p == 0xff)) - break; - if (++p == q) - return -EINVAL; + if (p == q) + return -EINVAL; + ns = 0; j = 0; + for (i = 0; i < max; i++) { + if (*p == 0xff) + break; + ofs[i] = j; + ns++; + for (;;) { + s[j++] = (*p == 0xff) ? '\0' : *p; + if ((*p == '\0') || (*p == 0xff)) + break; + if (++p == q) + return -EINVAL; + } + if ((*p == 0xff) || (++p == q)) + break; } - if ((*p == 0xff) || (++p == q)) - break; - } - if (found) { - *found = ns; - return 0; - } else { + if (found) { + *found = ns; + return 0; + } + return (ns == max) ? 0 : -EINVAL; - } } -/*====================================================================*/ static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1) { - u_char *p, *q; + u_char *p, *q; - p = (u_char *)tuple->TupleData; - q = p + tuple->TupleDataLen; + p = (u_char *)tuple->TupleData; + q = p + tuple->TupleDataLen; - vers_1->major = *p; p++; - vers_1->minor = *p; p++; - if (p >= q) - return -EINVAL; + vers_1->major = *p; p++; + vers_1->minor = *p; p++; + if (p >= q) + return -EINVAL; - return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS, - vers_1->str, vers_1->ofs, &vers_1->ns); + return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS, + vers_1->str, vers_1->ofs, &vers_1->ns); } -/*====================================================================*/ static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr) { - u_char *p, *q; + u_char *p, *q; - p = (u_char *)tuple->TupleData; - q = p + tuple->TupleDataLen; + p = (u_char *)tuple->TupleData; + q = p + tuple->TupleDataLen; - return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS, - altstr->str, altstr->ofs, &altstr->ns); + return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS, + altstr->str, altstr->ofs, &altstr->ns); } -/*====================================================================*/ static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec) { - u_char *p, *q; - int nid; + u_char *p, *q; + int nid; - p = (u_char *)tuple->TupleData; - q = p + tuple->TupleDataLen; + p = (u_char *)tuple->TupleData; + q = p + tuple->TupleDataLen; - for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) { - if (p > q-2) - break; - jedec->id[nid].mfr = p[0]; - jedec->id[nid].info = p[1]; - p += 2; - } - jedec->nid = nid; - return 0; + for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) { + if (p > q-2) + break; + jedec->id[nid].mfr = p[0]; + jedec->id[nid].info = p[1]; + p += 2; + } + jedec->nid = nid; + return 0; } -/*====================================================================*/ static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m) { - if (tuple->TupleDataLen < 4) - return -EINVAL; - m->manf = get_unaligned_le16(tuple->TupleData); - m->card = get_unaligned_le16(tuple->TupleData + 2); - return 0; + if (tuple->TupleDataLen < 4) + return -EINVAL; + m->manf = get_unaligned_le16(tuple->TupleData); + m->card = get_unaligned_le16(tuple->TupleData + 2); + return 0; } -/*====================================================================*/ static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f) { - u_char *p; - if (tuple->TupleDataLen < 2) - return -EINVAL; - p = (u_char *)tuple->TupleData; - f->func = p[0]; - f->sysinit = p[1]; - return 0; + u_char *p; + if (tuple->TupleDataLen < 2) + return -EINVAL; + p = (u_char *)tuple->TupleData; + f->func = p[0]; + f->sysinit = p[1]; + return 0; } -/*====================================================================*/ static int parse_funce(tuple_t *tuple, cistpl_funce_t *f) { - u_char *p; - int i; - if (tuple->TupleDataLen < 1) - return -EINVAL; - p = (u_char *)tuple->TupleData; - f->type = p[0]; - for (i = 1; i < tuple->TupleDataLen; i++) - f->data[i-1] = p[i]; - return 0; + u_char *p; + int i; + if (tuple->TupleDataLen < 1) + return -EINVAL; + p = (u_char *)tuple->TupleData; + f->type = p[0]; + for (i = 1; i < tuple->TupleDataLen; i++) + f->data[i-1] = p[i]; + return 0; } -/*====================================================================*/ static int parse_config(tuple_t *tuple, cistpl_config_t *config) { - int rasz, rmsz, i; - u_char *p; - - p = (u_char *)tuple->TupleData; - rasz = *p & 0x03; - rmsz = (*p & 0x3c) >> 2; - if (tuple->TupleDataLen < rasz+rmsz+4) - return -EINVAL; - config->last_idx = *(++p); - p++; - config->base = 0; - for (i = 0; i <= rasz; i++) - config->base += p[i] << (8*i); - p += rasz+1; - for (i = 0; i < 4; i++) - config->rmask[i] = 0; - for (i = 0; i <= rmsz; i++) - config->rmask[i>>2] += p[i] << (8*(i%4)); - config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4); - return 0; + int rasz, rmsz, i; + u_char *p; + + p = (u_char *)tuple->TupleData; + rasz = *p & 0x03; + rmsz = (*p & 0x3c) >> 2; + if (tuple->TupleDataLen < rasz+rmsz+4) + return -EINVAL; + config->last_idx = *(++p); + p++; + config->base = 0; + for (i = 0; i <= rasz; i++) + config->base += p[i] << (8*i); + p += rasz+1; + for (i = 0; i < 4; i++) + config->rmask[i] = 0; + for (i = 0; i <= rmsz; i++) + config->rmask[i>>2] += p[i] << (8*(i%4)); + config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4); + return 0; } -/*====================================================================== +/* The following routines are all used to parse the nightmarish + * config table entries. + */ + +static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr) +{ + int i; + u_int scale; - The following routines are all used to parse the nightmarish - config table entries. + if (p == q) + return NULL; + pwr->present = *p; + pwr->flags = 0; + p++; + for (i = 0; i < 7; i++) + if (pwr->present & (1<<i)) { + if (p == q) + return NULL; + pwr->param[i] = POWER_CVT(*p); + scale = POWER_SCALE(*p); + while (*p & 0x80) { + if (++p == q) + return NULL; + if ((*p & 0x7f) < 100) + pwr->param[i] += + (*p & 0x7f) * scale / 100; + else if (*p == 0x7d) + pwr->flags |= CISTPL_POWER_HIGHZ_OK; + else if (*p == 0x7e) + pwr->param[i] = 0; + else if (*p == 0x7f) + pwr->flags |= CISTPL_POWER_HIGHZ_REQ; + else + return NULL; + } + p++; + } + return p; +} -======================================================================*/ -static u_char *parse_power(u_char *p, u_char *q, - cistpl_power_t *pwr) +static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing) { - int i; - u_int scale; - - if (p == q) - return NULL; - pwr->present = *p; - pwr->flags = 0; - p++; - for (i = 0; i < 7; i++) - if (pwr->present & (1<<i)) { - if (p == q) - return NULL; - pwr->param[i] = POWER_CVT(*p); - scale = POWER_SCALE(*p); - while (*p & 0x80) { + u_char scale; + + if (p == q) + return NULL; + scale = *p; + if ((scale & 3) != 3) { if (++p == q) return NULL; - if ((*p & 0x7f) < 100) - pwr->param[i] += (*p & 0x7f) * scale / 100; - else if (*p == 0x7d) - pwr->flags |= CISTPL_POWER_HIGHZ_OK; - else if (*p == 0x7e) - pwr->param[i] = 0; - else if (*p == 0x7f) - pwr->flags |= CISTPL_POWER_HIGHZ_REQ; - else - return NULL; - } - p++; - } - return p; + timing->wait = SPEED_CVT(*p); + timing->waitscale = exponent[scale & 3]; + } else + timing->wait = 0; + scale >>= 2; + if ((scale & 7) != 7) { + if (++p == q) + return NULL; + timing->ready = SPEED_CVT(*p); + timing->rdyscale = exponent[scale & 7]; + } else + timing->ready = 0; + scale >>= 3; + if (scale != 7) { + if (++p == q) + return NULL; + timing->reserved = SPEED_CVT(*p); + timing->rsvscale = exponent[scale]; + } else + timing->reserved = 0; + p++; + return p; } -/*====================================================================*/ -static u_char *parse_timing(u_char *p, u_char *q, - cistpl_timing_t *timing) +static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) { - u_char scale; + int i, j, bsz, lsz; - if (p == q) - return NULL; - scale = *p; - if ((scale & 3) != 3) { - if (++p == q) - return NULL; - timing->wait = SPEED_CVT(*p); - timing->waitscale = exponent[scale & 3]; - } else - timing->wait = 0; - scale >>= 2; - if ((scale & 7) != 7) { - if (++p == q) + if (p == q) return NULL; - timing->ready = SPEED_CVT(*p); - timing->rdyscale = exponent[scale & 7]; - } else - timing->ready = 0; - scale >>= 3; - if (scale != 7) { + io->flags = *p; + + if (!(*p & 0x80)) { + io->nwin = 1; + io->win[0].base = 0; + io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK)); + return p+1; + } + if (++p == q) return NULL; - timing->reserved = SPEED_CVT(*p); - timing->rsvscale = exponent[scale]; - } else - timing->reserved = 0; - p++; - return p; -} - -/*====================================================================*/ + io->nwin = (*p & 0x0f) + 1; + bsz = (*p & 0x30) >> 4; + if (bsz == 3) + bsz++; + lsz = (*p & 0xc0) >> 6; + if (lsz == 3) + lsz++; + p++; -static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) -{ - int i, j, bsz, lsz; - - if (p == q) - return NULL; - io->flags = *p; - - if (!(*p & 0x80)) { - io->nwin = 1; - io->win[0].base = 0; - io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK)); - return p+1; - } - - if (++p == q) - return NULL; - io->nwin = (*p & 0x0f) + 1; - bsz = (*p & 0x30) >> 4; - if (bsz == 3) - bsz++; - lsz = (*p & 0xc0) >> 6; - if (lsz == 3) - lsz++; - p++; - - for (i = 0; i < io->nwin; i++) { - io->win[i].base = 0; - io->win[i].len = 1; - for (j = 0; j < bsz; j++, p++) { - if (p == q) - return NULL; - io->win[i].base += *p << (j*8); - } - for (j = 0; j < lsz; j++, p++) { - if (p == q) - return NULL; - io->win[i].len += *p << (j*8); + for (i = 0; i < io->nwin; i++) { + io->win[i].base = 0; + io->win[i].len = 1; + for (j = 0; j < bsz; j++, p++) { + if (p == q) + return NULL; + io->win[i].base += *p << (j*8); + } + for (j = 0; j < lsz; j++, p++) { + if (p == q) + return NULL; + io->win[i].len += *p << (j*8); + } } - } - return p; + return p; } -/*====================================================================*/ static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem) { - int i, j, asz, lsz, has_ha; - u_int len, ca, ha; - - if (p == q) - return NULL; - - mem->nwin = (*p & 0x07) + 1; - lsz = (*p & 0x18) >> 3; - asz = (*p & 0x60) >> 5; - has_ha = (*p & 0x80); - if (++p == q) - return NULL; - - for (i = 0; i < mem->nwin; i++) { - len = ca = ha = 0; - for (j = 0; j < lsz; j++, p++) { - if (p == q) - return NULL; - len += *p << (j*8); - } - for (j = 0; j < asz; j++, p++) { - if (p == q) - return NULL; - ca += *p << (j*8); + int i, j, asz, lsz, has_ha; + u_int len, ca, ha; + + if (p == q) + return NULL; + + mem->nwin = (*p & 0x07) + 1; + lsz = (*p & 0x18) >> 3; + asz = (*p & 0x60) >> 5; + has_ha = (*p & 0x80); + if (++p == q) + return NULL; + + for (i = 0; i < mem->nwin; i++) { + len = ca = ha = 0; + for (j = 0; j < lsz; j++, p++) { + if (p == q) + return NULL; + len += *p << (j*8); + } + for (j = 0; j < asz; j++, p++) { + if (p == q) + return NULL; + ca += *p << (j*8); + } + if (has_ha) + for (j = 0; j < asz; j++, p++) { + if (p == q) + return NULL; + ha += *p << (j*8); + } + mem->win[i].len = len << 8; + mem->win[i].card_addr = ca << 8; + mem->win[i].host_addr = ha << 8; } - if (has_ha) - for (j = 0; j < asz; j++, p++) { - if (p == q) - return NULL; - ha += *p << (j*8); - } - mem->win[i].len = len << 8; - mem->win[i].card_addr = ca << 8; - mem->win[i].host_addr = ha << 8; - } - return p; + return p; } -/*====================================================================*/ static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq) { - if (p == q) - return NULL; - irq->IRQInfo1 = *p; p++; - if (irq->IRQInfo1 & IRQ_INFO2_VALID) { - if (p+2 > q) + if (p == q) return NULL; - irq->IRQInfo2 = (p[1]<<8) + p[0]; - p += 2; - } - return p; + irq->IRQInfo1 = *p; p++; + if (irq->IRQInfo1 & IRQ_INFO2_VALID) { + if (p+2 > q) + return NULL; + irq->IRQInfo2 = (p[1]<<8) + p[0]; + p += 2; + } + return p; } -/*====================================================================*/ static int parse_cftable_entry(tuple_t *tuple, cistpl_cftable_entry_t *entry) { - u_char *p, *q, features; - - p = tuple->TupleData; - q = p + tuple->TupleDataLen; - entry->index = *p & 0x3f; - entry->flags = 0; - if (*p & 0x40) - entry->flags |= CISTPL_CFTABLE_DEFAULT; - if (*p & 0x80) { - if (++p == q) - return -EINVAL; - if (*p & 0x10) - entry->flags |= CISTPL_CFTABLE_BVDS; - if (*p & 0x20) - entry->flags |= CISTPL_CFTABLE_WP; + u_char *p, *q, features; + + p = tuple->TupleData; + q = p + tuple->TupleDataLen; + entry->index = *p & 0x3f; + entry->flags = 0; if (*p & 0x40) - entry->flags |= CISTPL_CFTABLE_RDYBSY; - if (*p & 0x80) - entry->flags |= CISTPL_CFTABLE_MWAIT; - entry->interface = *p & 0x0f; - } else - entry->interface = 0; - - /* Process optional features */ - if (++p == q) - return -EINVAL; - features = *p; p++; - - /* Power options */ - if ((features & 3) > 0) { - p = parse_power(p, q, &entry->vcc); - if (p == NULL) - return -EINVAL; - } else - entry->vcc.present = 0; - if ((features & 3) > 1) { - p = parse_power(p, q, &entry->vpp1); - if (p == NULL) - return -EINVAL; - } else - entry->vpp1.present = 0; - if ((features & 3) > 2) { - p = parse_power(p, q, &entry->vpp2); - if (p == NULL) - return -EINVAL; - } else - entry->vpp2.present = 0; + entry->flags |= CISTPL_CFTABLE_DEFAULT; + if (*p & 0x80) { + if (++p == q) + return -EINVAL; + if (*p & 0x10) + entry->flags |= CISTPL_CFTABLE_BVDS; + if (*p & 0x20) + entry->flags |= CISTPL_CFTABLE_WP; + if (*p & 0x40) + entry->flags |= CISTPL_CFTABLE_RDYBSY; + if (*p & 0x80) + entry->flags |= CISTPL_CFTABLE_MWAIT; + entry->interface = *p & 0x0f; + } else + entry->interface = 0; - /* Timing options */ - if (features & 0x04) { - p = parse_timing(p, q, &entry->timing); - if (p == NULL) - return -EINVAL; - } else { - entry->timing.wait = 0; - entry->timing.ready = 0; - entry->timing.reserved = 0; - } - - /* I/O window options */ - if (features & 0x08) { - p = parse_io(p, q, &entry->io); - if (p == NULL) + /* Process optional features */ + if (++p == q) return -EINVAL; - } else - entry->io.nwin = 0; + features = *p; p++; - /* Interrupt options */ - if (features & 0x10) { - p = parse_irq(p, q, &entry->irq); - if (p == NULL) - return -EINVAL; - } else - entry->irq.IRQInfo1 = 0; - - switch (features & 0x60) { - case 0x00: - entry->mem.nwin = 0; - break; - case 0x20: - entry->mem.nwin = 1; - entry->mem.win[0].len = get_unaligned_le16(p) << 8; - entry->mem.win[0].card_addr = 0; - entry->mem.win[0].host_addr = 0; - p += 2; - if (p > q) - return -EINVAL; - break; - case 0x40: - entry->mem.nwin = 1; - entry->mem.win[0].len = get_unaligned_le16(p) << 8; - entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8; - entry->mem.win[0].host_addr = 0; - p += 4; - if (p > q) - return -EINVAL; - break; - case 0x60: - p = parse_mem(p, q, &entry->mem); - if (p == NULL) - return -EINVAL; - break; - } + /* Power options */ + if ((features & 3) > 0) { + p = parse_power(p, q, &entry->vcc); + if (p == NULL) + return -EINVAL; + } else + entry->vcc.present = 0; + if ((features & 3) > 1) { + p = parse_power(p, q, &entry->vpp1); + if (p == NULL) + return -EINVAL; + } else + entry->vpp1.present = 0; + if ((features & 3) > 2) { + p = parse_power(p, q, &entry->vpp2); + if (p == NULL) + return -EINVAL; + } else + entry->vpp2.present = 0; - /* Misc features */ - if (features & 0x80) { - if (p == q) - return -EINVAL; - entry->flags |= (*p << 8); - while (*p & 0x80) - if (++p == q) - return -EINVAL; - p++; - } + /* Timing options */ + if (features & 0x04) { + p = parse_timing(p, q, &entry->timing); + if (p == NULL) + return -EINVAL; + } else { + entry->timing.wait = 0; + entry->timing.ready = 0; + entry->timing.reserved = 0; + } - entry->subtuples = q-p; + /* I/O window options */ + if (features & 0x08) { + p = parse_io(p, q, &entry->io); + if (p == NULL) + return -EINVAL; + } else + entry->io.nwin = 0; + + /* Interrupt options */ + if (features & 0x10) { + p = parse_irq(p, q, &entry->irq); + if (p == NULL) + return -EINVAL; + } else + entry->irq.IRQInfo1 = 0; + + switch (features & 0x60) { + case 0x00: + entry->mem.nwin = 0; + break; + case 0x20: + entry->mem.nwin = 1; + entry->mem.win[0].len = get_unaligned_le16(p) << 8; + entry->mem.win[0].card_addr = 0; + entry->mem.win[0].host_addr = 0; + p += 2; + if (p > q) + return -EINVAL; + break; + case 0x40: + entry->mem.nwin = 1; + entry->mem.win[0].len = get_unaligned_le16(p) << 8; + entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8; + entry->mem.win[0].host_addr = 0; + p += 4; + if (p > q) + return -EINVAL; + break; + case 0x60: + p = parse_mem(p, q, &entry->mem); + if (p == NULL) + return -EINVAL; + break; + } + + /* Misc features */ + if (features & 0x80) { + if (p == q) + return -EINVAL; + entry->flags |= (*p << 8); + while (*p & 0x80) + if (++p == q) + return -EINVAL; + p++; + } + + entry->subtuples = q-p; - return 0; + return 0; } -/*====================================================================*/ static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo) { - u_char *p, *q; - int n; + u_char *p, *q; + int n; - p = (u_char *)tuple->TupleData; - q = p + tuple->TupleDataLen; + p = (u_char *)tuple->TupleData; + q = p + tuple->TupleDataLen; - for (n = 0; n < CISTPL_MAX_DEVICES; n++) { - if (p > q-6) - break; - geo->geo[n].buswidth = p[0]; - geo->geo[n].erase_block = 1 << (p[1]-1); - geo->geo[n].read_block = 1 << (p[2]-1); - geo->geo[n].write_block = 1 << (p[3]-1); - geo->geo[n].partition = 1 << (p[4]-1); - geo->geo[n].interleave = 1 << (p[5]-1); - p += 6; - } - geo->ngeo = n; - return 0; + for (n = 0; n < CISTPL_MAX_DEVICES; n++) { + if (p > q-6) + break; + geo->geo[n].buswidth = p[0]; + geo->geo[n].erase_block = 1 << (p[1]-1); + geo->geo[n].read_block = 1 << (p[2]-1); + geo->geo[n].write_block = 1 << (p[3]-1); + geo->geo[n].partition = 1 << (p[4]-1); + geo->geo[n].interleave = 1 << (p[5]-1); + p += 6; + } + geo->ngeo = n; + return 0; } -/*====================================================================*/ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2) { - u_char *p, *q; - - if (tuple->TupleDataLen < 10) - return -EINVAL; - - p = tuple->TupleData; - q = p + tuple->TupleDataLen; - - v2->vers = p[0]; - v2->comply = p[1]; - v2->dindex = get_unaligned_le16(p + 2); - v2->vspec8 = p[6]; - v2->vspec9 = p[7]; - v2->nhdr = p[8]; - p += 9; - return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL); + u_char *p, *q; + + if (tuple->TupleDataLen < 10) + return -EINVAL; + + p = tuple->TupleData; + q = p + tuple->TupleDataLen; + + v2->vers = p[0]; + v2->comply = p[1]; + v2->dindex = get_unaligned_le16(p + 2); + v2->vspec8 = p[6]; + v2->vspec9 = p[7]; + v2->nhdr = p[8]; + p += 9; + return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL); } -/*====================================================================*/ static int parse_org(tuple_t *tuple, cistpl_org_t *org) { - u_char *p, *q; - int i; - - p = tuple->TupleData; - q = p + tuple->TupleDataLen; - if (p == q) - return -EINVAL; - org->data_org = *p; - if (++p == q) - return -EINVAL; - for (i = 0; i < 30; i++) { - org->desc[i] = *p; - if (*p == '\0') - break; + u_char *p, *q; + int i; + + p = tuple->TupleData; + q = p + tuple->TupleDataLen; + if (p == q) + return -EINVAL; + org->data_org = *p; if (++p == q) return -EINVAL; - } - return 0; + for (i = 0; i < 30; i++) { + org->desc[i] = *p; + if (*p == '\0') + break; + if (++p == q) + return -EINVAL; + } + return 0; } -/*====================================================================*/ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt) { - u_char *p; + u_char *p; - if (tuple->TupleDataLen < 10) - return -EINVAL; + if (tuple->TupleDataLen < 10) + return -EINVAL; - p = tuple->TupleData; + p = tuple->TupleData; - fmt->type = p[0]; - fmt->edc = p[1]; - fmt->offset = get_unaligned_le32(p + 2); - fmt->length = get_unaligned_le32(p + 6); + fmt->type = p[0]; + fmt->edc = p[1]; + fmt->offset = get_unaligned_le32(p + 2); + fmt->length = get_unaligned_le32(p + 6); - return 0; + return 0; } -/*====================================================================*/ int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse) { - int ret = 0; - - if (tuple->TupleDataLen > tuple->TupleDataMax) - return -EINVAL; - switch (tuple->TupleCode) { - case CISTPL_DEVICE: - case CISTPL_DEVICE_A: - ret = parse_device(tuple, &parse->device); - break; - case CISTPL_CHECKSUM: - ret = parse_checksum(tuple, &parse->checksum); - break; - case CISTPL_LONGLINK_A: - case CISTPL_LONGLINK_C: - ret = parse_longlink(tuple, &parse->longlink); - break; - case CISTPL_LONGLINK_MFC: - ret = parse_longlink_mfc(tuple, &parse->longlink_mfc); - break; - case CISTPL_VERS_1: - ret = parse_vers_1(tuple, &parse->version_1); - break; - case CISTPL_ALTSTR: - ret = parse_altstr(tuple, &parse->altstr); - break; - case CISTPL_JEDEC_A: - case CISTPL_JEDEC_C: - ret = parse_jedec(tuple, &parse->jedec); - break; - case CISTPL_MANFID: - ret = parse_manfid(tuple, &parse->manfid); - break; - case CISTPL_FUNCID: - ret = parse_funcid(tuple, &parse->funcid); - break; - case CISTPL_FUNCE: - ret = parse_funce(tuple, &parse->funce); - break; - case CISTPL_CONFIG: - ret = parse_config(tuple, &parse->config); - break; - case CISTPL_CFTABLE_ENTRY: - ret = parse_cftable_entry(tuple, &parse->cftable_entry); - break; - case CISTPL_DEVICE_GEO: - case CISTPL_DEVICE_GEO_A: - ret = parse_device_geo(tuple, &parse->device_geo); - break; - case CISTPL_VERS_2: - ret = parse_vers_2(tuple, &parse->vers_2); - break; - case CISTPL_ORG: - ret = parse_org(tuple, &parse->org); - break; - case CISTPL_FORMAT: - case CISTPL_FORMAT_A: - ret = parse_format(tuple, &parse->format); - break; - case CISTPL_NO_LINK: - case CISTPL_LINKTARGET: - ret = 0; - break; - default: - ret = -EINVAL; - break; - } - if (ret) - pr_debug("parse_tuple failed %d\n", ret); - return ret; + int ret = 0; + + if (tuple->TupleDataLen > tuple->TupleDataMax) + return -EINVAL; + switch (tuple->TupleCode) { + case CISTPL_DEVICE: + case CISTPL_DEVICE_A: + ret = parse_device(tuple, &parse->device); + break; + case CISTPL_CHECKSUM: + ret = parse_checksum(tuple, &parse->checksum); + break; + case CISTPL_LONGLINK_A: + case CISTPL_LONGLINK_C: + ret = parse_longlink(tuple, &parse->longlink); + break; + case CISTPL_LONGLINK_MFC: + ret = parse_longlink_mfc(tuple, &parse->longlink_mfc); + break; + case CISTPL_VERS_1: + ret = parse_vers_1(tuple, &parse->version_1); + break; + case CISTPL_ALTSTR: + ret = parse_altstr(tuple, &parse->altstr); + break; + case CISTPL_JEDEC_A: + case CISTPL_JEDEC_C: + ret = parse_jedec(tuple, &parse->jedec); + break; + case CISTPL_MANFID: + ret = parse_manfid(tuple, &parse->manfid); + break; + case CISTPL_FUNCID: + ret = parse_funcid(tuple, &parse->funcid); + break; + case CISTPL_FUNCE: + ret = parse_funce(tuple, &parse->funce); + break; + case CISTPL_CONFIG: + ret = parse_config(tuple, &parse->config); + break; + case CISTPL_CFTABLE_ENTRY: + ret = parse_cftable_entry(tuple, &parse->cftable_entry); + break; + case CISTPL_DEVICE_GEO: + case CISTPL_DEVICE_GEO_A: + ret = parse_device_geo(tuple, &parse->device_geo); + break; + case CISTPL_VERS_2: + ret = parse_vers_2(tuple, &parse->vers_2); + break; + case CISTPL_ORG: + ret = parse_org(tuple, &parse->org); + break; + case CISTPL_FORMAT: + case CISTPL_FORMAT_A: + ret = parse_format(tuple, &parse->format); + break; + case CISTPL_NO_LINK: + case CISTPL_LINKTARGET: + ret = 0; + break; + default: + ret = -EINVAL; + break; + } + if (ret) + pr_debug("parse_tuple failed %d\n", ret); + return ret; } EXPORT_SYMBOL(pcmcia_parse_tuple); -/*====================================================================== - This is used internally by Card Services to look up CIS stuff. - -======================================================================*/ - -int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse) +/** + * pccard_read_tuple() - internal CIS tuple access + * @s: the struct pcmcia_socket where the card is inserted + * @function: the device function we loop for + * @code: which CIS code shall we look for? + * @parse: buffer where the tuple shall be parsed (or NULL, if no parse) + * + * pccard_read_tuple() reads out one tuple and attempts to parse it + */ +int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, + cisdata_t code, void *parse) { - tuple_t tuple; - cisdata_t *buf; - int ret; - - buf = kmalloc(256, GFP_KERNEL); - if (buf == NULL) { - dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); - return -ENOMEM; - } - tuple.DesiredTuple = code; - tuple.Attributes = 0; - if (function == BIND_FN_ALL) - tuple.Attributes = TUPLE_RETURN_COMMON; - ret = pccard_get_first_tuple(s, function, &tuple); - if (ret != 0) - goto done; - tuple.TupleData = buf; - tuple.TupleOffset = 0; - tuple.TupleDataMax = 255; - ret = pccard_get_tuple_data(s, &tuple); - if (ret != 0) - goto done; - ret = pcmcia_parse_tuple(&tuple, parse); + tuple_t tuple; + cisdata_t *buf; + int ret; + + buf = kmalloc(256, GFP_KERNEL); + if (buf == NULL) { + dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n"); + return -ENOMEM; + } + tuple.DesiredTuple = code; + tuple.Attributes = 0; + if (function == BIND_FN_ALL) + tuple.Attributes = TUPLE_RETURN_COMMON; + ret = pccard_get_first_tuple(s, function, &tuple); + if (ret != 0) + goto done; + tuple.TupleData = buf; + tuple.TupleOffset = 0; + tuple.TupleDataMax = 255; + ret = pccard_get_tuple_data(s, &tuple); + if (ret != 0) + goto done; + ret = pcmcia_parse_tuple(&tuple, parse); done: - kfree(buf); - return ret; + kfree(buf); + return ret; } diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 3889cf07d6c..9254ab0b29b 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -42,7 +42,6 @@ struct db1x_pcmcia_sock { int nr; /* socket number */ void *virt_io; - /* the "pseudo" addresses of the PCMCIA space. */ phys_addr_t phys_io; phys_addr_t phys_attr; phys_addr_t phys_mem; @@ -437,7 +436,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev) * This includes IRQs for Carddetection/ejection, the card * itself and optional status change detection. * Also, the memory areas covered by a socket. For these - * we require the 32bit "pseudo" addresses (see the au1000.h + * we require the real 36bit addresses (see the au1000.h * header for more information). */ @@ -459,11 +458,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev) ret = -ENODEV; - /* - * pseudo-attr: The 32bit address of the PCMCIA attribute space - * for this socket (usually the 36bit address shifted 4 to the - * right). - */ + /* 36bit PCMCIA Attribute area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr"); if (!r) { printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n", @@ -472,10 +467,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev) } sock->phys_attr = r->start; - /* - * pseudo-mem: The 32bit address of the PCMCIA memory space for - * this socket (usually the 36bit address shifted 4 to the right) - */ + /* 36bit PCMCIA Memory area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem"); if (!r) { printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n", @@ -484,10 +476,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev) } sock->phys_mem = r->start; - /* - * pseudo-io: The 32bit address of the PCMCIA IO space for this - * socket (usually the 36bit address shifted 4 to the right). - */ + /* 36bit PCMCIA IO area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io"); if (!r) { printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n", diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index e1741cd875a..7c204910a77 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c @@ -48,23 +48,13 @@ MODULE_AUTHOR("Jun Komuro <komurojun-mbn@nifty.com>"); * Specifies the interrupt delivery mode. The default (1) is to use PCI * interrupts; a value of 0 selects ISA interrupts. This must be set for * correct operation of PCI card readers. - * - * irq_list=i,j,... - * This list limits the set of interrupts that can be used by PCMCIA - * cards. - * The default list is 3,4,5,7,9,10,11. - * (irq_list parameter is not used, if irq_mode = 1) */ static int irq_mode = 1; /* 0 = ISA interrupt, 1 = PCI interrupt */ -static int irq_list[16]; -static unsigned int irq_list_count = 0; module_param(irq_mode, int, 0444); -module_param_array(irq_list, int, &irq_list_count, 0444); MODULE_PARM_DESC(irq_mode, "interrupt delivery mode. 0 = ISA, 1 = PCI. default is 1"); -MODULE_PARM_DESC(irq_list, "interrupts that can be used by PCMCIA cards"); static DEFINE_SPINLOCK(port_lock); @@ -605,13 +595,7 @@ static u_int __devinit pd6729_isa_scan(void) return 0; } - if (irq_list_count == 0) - mask0 = 0xffff; - else - for (i = mask0 = 0; i < irq_list_count; i++) - mask0 |= (1<<irq_list[i]); - - mask0 &= PD67_MASK; + mask0 = PD67_MASK; /* just find interrupts that aren't in use */ for (i = 0; i < 16; i++) diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c index e6f7d410aed..452c83b512c 100644 --- a/drivers/pcmcia/rsrc_mgr.c +++ b/drivers/pcmcia/rsrc_mgr.c @@ -79,9 +79,8 @@ static resource_size_t pcmcia_align(void *align_data, #ifdef CONFIG_X86 if (res->flags & IORESOURCE_IO) { - if (start & 0x300) { + if (start & 0x300) start = (start + 0x3ff) & ~0x3ff; - } } #endif diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c index 61560cd6e28..f9009d34254 100644 --- a/drivers/pcmcia/xxs1500_ss.c +++ b/drivers/pcmcia/xxs1500_ss.c @@ -218,11 +218,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev) ret = -ENODEV; - /* - * pseudo-attr: The 32bit address of the PCMCIA attribute space - * for this socket (usually the 36bit address shifted 4 to the - * right). - */ + /* 36bit PCMCIA Attribute area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr"); if (!r) { dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n"); @@ -230,10 +226,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev) } sock->phys_attr = r->start; - /* - * pseudo-mem: The 32bit address of the PCMCIA memory space for - * this socket (usually the 36bit address shifted 4 to the right) - */ + /* 36bit PCMCIA Memory area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem"); if (!r) { dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n"); @@ -241,10 +234,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev) } sock->phys_mem = r->start; - /* - * pseudo-io: The 32bit address of the PCMCIA IO space for this - * socket (usually the 36bit address shifted 4 to the right). - */ + /* 36bit PCMCIA IO area address */ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io"); if (!r) { dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n"); diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index b85375f8762..967c766f53b 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -1408,10 +1408,10 @@ static struct pci_device_id yenta_table[] = { CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX), CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX), - CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, ENE), + CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, ENE), CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE), CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE), CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE), diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 6848f213eb5..cd2ee6fce1b 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -59,6 +59,8 @@ config ASUS_LAPTOP select NEW_LEDS select BACKLIGHT_CLASS_DEVICE depends on INPUT + depends on RFKILL || RFKILL = n + select INPUT_SPARSEKMAP ---help--- This is the new Linux driver for Asus laptops. It may also support some MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate @@ -177,6 +179,7 @@ config COMPAL_LAPTOP tristate "Compal Laptop Extras" depends on ACPI depends on BACKLIGHT_CLASS_DEVICE + depends on RFKILL ---help--- This is a driver for laptops built by Compal: @@ -320,9 +323,15 @@ config THINKPAD_ACPI_VIDEO server running, phase of the moon, and the current mood of Schroedinger's cat. If you can use X.org's RandR to control your ThinkPad's video output ports instead of this feature, - don't think twice: do it and say N here to save some memory. + don't think twice: do it and say N here to save memory and avoid + bad interactions with X.org. + + NOTE: access to this feature is limited to processes with the + CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms + where it interacts badly with X.org. - If you are not sure, say Y here. + If you are not sure, say Y here but do try to check if you could + be using X.org RandR instead. config THINKPAD_ACPI_HOTKEY_POLL bool "Support NVRAM polling for hot keys" diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 61a1c750365..791fcf32150 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -45,58 +45,23 @@ #include <linux/fb.h> #include <linux/leds.h> #include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/input.h> +#include <linux/input/sparse-keymap.h> +#include <linux/rfkill.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_bus.h> -#include <asm/uaccess.h> -#include <linux/input.h> - -#define ASUS_LAPTOP_VERSION "0.42" - -#define ASUS_HOTK_NAME "Asus Laptop Support" -#define ASUS_HOTK_CLASS "hotkey" -#define ASUS_HOTK_DEVICE_NAME "Hotkey" -#define ASUS_HOTK_FILE KBUILD_MODNAME -#define ASUS_HOTK_PREFIX "\\_SB.ATKD." +#define ASUS_LAPTOP_VERSION "0.42" -/* - * Some events we use, same for all Asus - */ -#define ATKD_BR_UP 0x10 -#define ATKD_BR_DOWN 0x20 -#define ATKD_LCD_ON 0x33 -#define ATKD_LCD_OFF 0x34 - -/* - * Known bits returned by \_SB.ATKD.HWRS - */ -#define WL_HWRS 0x80 -#define BT_HWRS 0x100 - -/* - * Flags for hotk status - * WL_ON and BT_ON are also used for wireless_status() - */ -#define WL_ON 0x01 /* internal Wifi */ -#define BT_ON 0x02 /* internal Bluetooth */ -#define MLED_ON 0x04 /* mail LED */ -#define TLED_ON 0x08 /* touchpad LED */ -#define RLED_ON 0x10 /* Record LED */ -#define PLED_ON 0x20 /* Phone LED */ -#define GLED_ON 0x40 /* Gaming LED */ -#define LCD_ON 0x80 /* LCD backlight */ -#define GPS_ON 0x100 /* GPS */ -#define KEY_ON 0x200 /* Keyboard backlight */ - -#define ASUS_LOG ASUS_HOTK_FILE ": " -#define ASUS_ERR KERN_ERR ASUS_LOG -#define ASUS_WARNING KERN_WARNING ASUS_LOG -#define ASUS_NOTICE KERN_NOTICE ASUS_LOG -#define ASUS_INFO KERN_INFO ASUS_LOG -#define ASUS_DEBUG KERN_DEBUG ASUS_LOG +#define ASUS_LAPTOP_NAME "Asus Laptop Support" +#define ASUS_LAPTOP_CLASS "hotkey" +#define ASUS_LAPTOP_DEVICE_NAME "Hotkey" +#define ASUS_LAPTOP_FILE KBUILD_MODNAME +#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD." MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary"); -MODULE_DESCRIPTION(ASUS_HOTK_NAME); +MODULE_DESCRIPTION(ASUS_LAPTOP_NAME); MODULE_LICENSE("GPL"); /* @@ -113,225 +78,209 @@ static uint wapf = 1; module_param(wapf, uint, 0644); MODULE_PARM_DESC(wapf, "WAPF value"); -#define ASUS_HANDLE(object, paths...) \ - static acpi_handle object##_handle = NULL; \ - static char *object##_paths[] = { paths } +static uint wlan_status = 1; +static uint bluetooth_status = 1; + +module_param(wlan_status, uint, 0644); +MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " + "(0 = disabled, 1 = enabled, -1 = don't do anything). " + "default is 1"); + +module_param(bluetooth_status, uint, 0644); +MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot " + "(0 = disabled, 1 = enabled, -1 = don't do anything). " + "default is 1"); + +/* + * Some events we use, same for all Asus + */ +#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */ +#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */ +#define ATKD_BR_MIN ATKD_BR_UP +#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */ +#define ATKD_LCD_ON 0x33 +#define ATKD_LCD_OFF 0x34 + +/* + * Known bits returned by \_SB.ATKD.HWRS + */ +#define WL_HWRS 0x80 +#define BT_HWRS 0x100 + +/* + * Flags for hotk status + * WL_ON and BT_ON are also used for wireless_status() + */ +#define WL_RSTS 0x01 /* internal Wifi */ +#define BT_RSTS 0x02 /* internal Bluetooth */ /* LED */ -ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED"); -ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED"); -ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */ -ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */ -ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */ +#define METHOD_MLED "MLED" +#define METHOD_TLED "TLED" +#define METHOD_RLED "RLED" /* W1JC */ +#define METHOD_PLED "PLED" /* A7J */ +#define METHOD_GLED "GLED" /* G1, G2 (probably) */ /* LEDD */ -ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM"); +#define METHOD_LEDD "SLCM" /* * Bluetooth and WLAN * WLED and BLED are not handled like other XLED, because in some dsdt * they also control the WLAN/Bluetooth device. */ -ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED"); -ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED"); -ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */ +#define METHOD_WLAN "WLED" +#define METHOD_BLUETOOTH "BLED" +#define METHOD_WL_STATUS "RSTS" /* Brightness */ -ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV"); -ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV"); +#define METHOD_BRIGHTNESS_SET "SPLV" +#define METHOD_BRIGHTNESS_GET "GPLV" /* Backlight */ -ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ - "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ - "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ - "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ - "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ - "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */ - "\\_SB.PCI0.PX40.Q10", /* S1x */ - "\\Q10"); /* A2x, L2D, L3D, M2E */ +static acpi_handle lcd_switch_handle; +static const char *lcd_switch_paths[] = { + "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */ + "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */ + "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */ + "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */ + "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */ + "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */ + "\\_SB.PCI0.PX40.Q10", /* S1x */ + "\\Q10"}; /* A2x, L2D, L3D, M2E */ /* Display */ -ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP"); -ASUS_HANDLE(display_get, - /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ - "\\_SB.PCI0.P0P1.VGA.GETD", - /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ - "\\_SB.PCI0.P0P2.VGA.GETD", - /* A6V A6Q */ - "\\_SB.PCI0.P0P3.VGA.GETD", - /* A6T, A6M */ - "\\_SB.PCI0.P0PA.VGA.GETD", - /* L3C */ - "\\_SB.PCI0.PCI1.VGAC.NMAP", - /* Z96F */ - "\\_SB.PCI0.VGA.GETD", - /* A2D */ - "\\ACTD", - /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */ - "\\ADVG", - /* P30 */ - "\\DNXT", - /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */ - "\\INFB", - /* A3F A6F A3N A3L M6N W3N W6A */ - "\\SSTE"); - -ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */ -ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */ +#define METHOD_SWITCH_DISPLAY "SDSP" + +static acpi_handle display_get_handle; +static const char *display_get_paths[] = { + /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */ + "\\_SB.PCI0.P0P1.VGA.GETD", + /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */ + "\\_SB.PCI0.P0P2.VGA.GETD", + /* A6V A6Q */ + "\\_SB.PCI0.P0P3.VGA.GETD", + /* A6T, A6M */ + "\\_SB.PCI0.P0PA.VGA.GETD", + /* L3C */ + "\\_SB.PCI0.PCI1.VGAC.NMAP", + /* Z96F */ + "\\_SB.PCI0.VGA.GETD", + /* A2D */ + "\\ACTD", + /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */ + "\\ADVG", + /* P30 */ + "\\DNXT", + /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */ + "\\INFB", + /* A3F A6F A3N A3L M6N W3N W6A */ + "\\SSTE"}; + +#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */ +#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */ /* GPS */ /* R2H use different handle for GPS on/off */ -ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */ -ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */ -ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST"); +#define METHOD_GPS_ON "SDON" +#define METHOD_GPS_OFF "SDOF" +#define METHOD_GPS_STATUS "GPST" /* Keyboard light */ -ASUS_HANDLE(kled_set, ASUS_HOTK_PREFIX "SLKB"); -ASUS_HANDLE(kled_get, ASUS_HOTK_PREFIX "GLKB"); +#define METHOD_KBD_LIGHT_SET "SLKB" +#define METHOD_KBD_LIGHT_GET "GLKB" /* - * This is the main structure, we can use it to store anything interesting - * about the hotk device + * Define a specific led structure to keep the main structure clean */ -struct asus_hotk { - char *name; /* laptop name */ - struct acpi_device *device; /* the device we are in */ - acpi_handle handle; /* the handle of the hotk device */ - char status; /* status of the hotk, for LEDs, ... */ - u32 ledd_status; /* status of the LED display */ - u8 light_level; /* light sensor level */ - u8 light_switch; /* light sensor switch value */ - u16 event_count[128]; /* count for each event TODO make this better */ - struct input_dev *inputdev; - u16 *keycode_map; +struct asus_led { + int wk; + struct work_struct work; + struct led_classdev led; + struct asus_laptop *asus; + const char *method; }; /* - * This header is made available to allow proper configuration given model, - * revision number , ... this info cannot go in struct asus_hotk because it is - * available before the hotk - */ -static struct acpi_table_header *asus_info; - -/* The actual device the driver binds to */ -static struct asus_hotk *hotk; - -/* - * The hotkey driver declaration + * This is the main structure, we can use it to store anything interesting + * about the hotk device */ -static const struct acpi_device_id asus_device_ids[] = { - {"ATK0100", 0}, - {"ATK0101", 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, asus_device_ids); +struct asus_laptop { + char *name; /* laptop name */ -static int asus_hotk_add(struct acpi_device *device); -static int asus_hotk_remove(struct acpi_device *device, int type); -static void asus_hotk_notify(struct acpi_device *device, u32 event); + struct acpi_table_header *dsdt_info; + struct platform_device *platform_device; + struct acpi_device *device; /* the device we are in */ + struct backlight_device *backlight_device; -static struct acpi_driver asus_hotk_driver = { - .name = ASUS_HOTK_NAME, - .class = ASUS_HOTK_CLASS, - .owner = THIS_MODULE, - .ids = asus_device_ids, - .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, - .ops = { - .add = asus_hotk_add, - .remove = asus_hotk_remove, - .notify = asus_hotk_notify, - }, -}; + struct input_dev *inputdev; + struct key_entry *keymap; -/* The backlight device /sys/class/backlight */ -static struct backlight_device *asus_backlight_device; + struct asus_led mled; + struct asus_led tled; + struct asus_led rled; + struct asus_led pled; + struct asus_led gled; + struct asus_led kled; + struct workqueue_struct *led_workqueue; -/* - * The backlight class declaration - */ -static int read_brightness(struct backlight_device *bd); -static int update_bl_status(struct backlight_device *bd); -static struct backlight_ops asusbl_ops = { - .get_brightness = read_brightness, - .update_status = update_bl_status, -}; + int wireless_status; + bool have_rsts; + int lcd_state; -/* - * These functions actually update the LED's, and are called from a - * workqueue. By doing this as separate work rather than when the LED - * subsystem asks, we avoid messing with the Asus ACPI stuff during a - * potentially bad time, such as a timer interrupt. - */ -static struct workqueue_struct *led_workqueue; - -#define ASUS_LED(object, ledname, max) \ - static void object##_led_set(struct led_classdev *led_cdev, \ - enum led_brightness value); \ - static enum led_brightness object##_led_get( \ - struct led_classdev *led_cdev); \ - static void object##_led_update(struct work_struct *ignored); \ - static int object##_led_wk; \ - static DECLARE_WORK(object##_led_work, object##_led_update); \ - static struct led_classdev object##_led = { \ - .name = "asus::" ledname, \ - .brightness_set = object##_led_set, \ - .brightness_get = object##_led_get, \ - .max_brightness = max \ - } + struct rfkill *gps_rfkill; -ASUS_LED(mled, "mail", 1); -ASUS_LED(tled, "touchpad", 1); -ASUS_LED(rled, "record", 1); -ASUS_LED(pled, "phone", 1); -ASUS_LED(gled, "gaming", 1); -ASUS_LED(kled, "kbd_backlight", 3); - -struct key_entry { - char type; - u8 code; - u16 keycode; + acpi_handle handle; /* the handle of the hotk device */ + u32 ledd_status; /* status of the LED display */ + u8 light_level; /* light sensor level */ + u8 light_switch; /* light sensor switch value */ + u16 event_count[128]; /* count for each event TODO make this better */ + u16 *keycode_map; }; -enum { KE_KEY, KE_END }; - -static struct key_entry asus_keymap[] = { - {KE_KEY, 0x02, KEY_SCREENLOCK}, - {KE_KEY, 0x05, KEY_WLAN}, - {KE_KEY, 0x08, KEY_F13}, - {KE_KEY, 0x17, KEY_ZOOM}, - {KE_KEY, 0x1f, KEY_BATTERY}, - {KE_KEY, 0x30, KEY_VOLUMEUP}, - {KE_KEY, 0x31, KEY_VOLUMEDOWN}, - {KE_KEY, 0x32, KEY_MUTE}, - {KE_KEY, 0x33, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x40, KEY_PREVIOUSSONG}, - {KE_KEY, 0x41, KEY_NEXTSONG}, - {KE_KEY, 0x43, KEY_STOPCD}, - {KE_KEY, 0x45, KEY_PLAYPAUSE}, - {KE_KEY, 0x4c, KEY_MEDIA}, - {KE_KEY, 0x50, KEY_EMAIL}, - {KE_KEY, 0x51, KEY_WWW}, - {KE_KEY, 0x55, KEY_CALC}, - {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */ - {KE_KEY, 0x5D, KEY_WLAN}, - {KE_KEY, 0x5E, KEY_WLAN}, - {KE_KEY, 0x5F, KEY_WLAN}, - {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE}, - {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */ - {KE_KEY, 0x82, KEY_CAMERA}, - {KE_KEY, 0x88, KEY_WLAN }, - {KE_KEY, 0x8A, KEY_PROG1}, - {KE_KEY, 0x95, KEY_MEDIA}, - {KE_KEY, 0x99, KEY_PHONE}, - {KE_KEY, 0xc4, KEY_KBDILLUMUP}, - {KE_KEY, 0xc5, KEY_KBDILLUMDOWN}, +static const struct key_entry asus_keymap[] = { + /* Lenovo SL Specific keycodes */ + {KE_KEY, 0x02, { KEY_SCREENLOCK } }, + {KE_KEY, 0x05, { KEY_WLAN } }, + {KE_KEY, 0x08, { KEY_F13 } }, + {KE_KEY, 0x17, { KEY_ZOOM } }, + {KE_KEY, 0x1f, { KEY_BATTERY } }, + /* End of Lenovo SL Specific keycodes */ + {KE_KEY, 0x30, { KEY_VOLUMEUP } }, + {KE_KEY, 0x31, { KEY_VOLUMEDOWN } }, + {KE_KEY, 0x32, { KEY_MUTE } }, + {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } }, + {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } }, + {KE_KEY, 0x40, { KEY_PREVIOUSSONG } }, + {KE_KEY, 0x41, { KEY_NEXTSONG } }, + {KE_KEY, 0x43, { KEY_STOPCD } }, + {KE_KEY, 0x45, { KEY_PLAYPAUSE } }, + {KE_KEY, 0x4c, { KEY_MEDIA } }, + {KE_KEY, 0x50, { KEY_EMAIL } }, + {KE_KEY, 0x51, { KEY_WWW } }, + {KE_KEY, 0x55, { KEY_CALC } }, + {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */ + {KE_KEY, 0x5D, { KEY_WLAN } }, + {KE_KEY, 0x5E, { KEY_WLAN } }, + {KE_KEY, 0x5F, { KEY_WLAN } }, + {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } }, + {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, + {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, + {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, + {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */ + {KE_KEY, 0x7E, { KEY_BLUETOOTH } }, + {KE_KEY, 0x7D, { KEY_BLUETOOTH } }, + {KE_KEY, 0x82, { KEY_CAMERA } }, + {KE_KEY, 0x88, { KEY_WLAN } }, + {KE_KEY, 0x8A, { KEY_PROG1 } }, + {KE_KEY, 0x95, { KEY_MEDIA } }, + {KE_KEY, 0x99, { KEY_PHONE } }, + {KE_KEY, 0xc4, { KEY_KBDILLUMUP } }, + {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } }, {KE_END, 0}, }; + /* * This function evaluates an ACPI method, given an int as parameter, the * method is searched within the scope of the handle, can be NULL. The output @@ -339,8 +288,8 @@ static struct key_entry asus_keymap[] = { * * returns 0 if write is successful, -1 else. */ -static int write_acpi_int(acpi_handle handle, const char *method, int val, - struct acpi_buffer *output) +static int write_acpi_int_ret(acpi_handle handle, const char *method, int val, + struct acpi_buffer *output) { struct acpi_object_list params; /* list of input parameters (an int) */ union acpi_object in_obj; /* the only param we use */ @@ -361,102 +310,82 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val, return -1; } -static int read_wireless_status(int mask) +static int write_acpi_int(acpi_handle handle, const char *method, int val) { - unsigned long long status; - acpi_status rv = AE_OK; + return write_acpi_int_ret(handle, method, val, NULL); +} + +static int acpi_check_handle(acpi_handle handle, const char *method, + acpi_handle *ret) +{ + acpi_status status; - if (!wireless_status_handle) - return (hotk->status & mask) ? 1 : 0; + if (method == NULL) + return -ENODEV; - rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status); - if (ACPI_FAILURE(rv)) - pr_warning("Error reading Wireless status\n"); - else - return (status & mask) ? 1 : 0; + if (ret) + status = acpi_get_handle(handle, (char *)method, + ret); + else { + acpi_handle dummy; - return (hotk->status & mask) ? 1 : 0; + status = acpi_get_handle(handle, (char *)method, + &dummy); + } + + if (status != AE_OK) { + if (ret) + pr_warning("Error finding %s\n", method); + return -ENODEV; + } + return 0; } -static int read_gps_status(void) +/* Generic LED function */ +static int asus_led_set(struct asus_laptop *asus, const char *method, + int value) { - unsigned long long status; - acpi_status rv = AE_OK; - - rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status); - if (ACPI_FAILURE(rv)) - pr_warning("Error reading GPS status\n"); + if (!strcmp(method, METHOD_MLED)) + value = !value; + else if (!strcmp(method, METHOD_GLED)) + value = !value + 1; else - return status ? 1 : 0; + value = !!value; - return (hotk->status & GPS_ON) ? 1 : 0; + return write_acpi_int(asus->handle, method, value); } -/* Generic LED functions */ -static int read_status(int mask) +/* + * LEDs + */ +/* /sys/class/led handlers */ +static void asus_led_cdev_set(struct led_classdev *led_cdev, + enum led_brightness value) { - /* There is a special method for both wireless devices */ - if (mask == BT_ON || mask == WL_ON) - return read_wireless_status(mask); - else if (mask == GPS_ON) - return read_gps_status(); + struct asus_led *led = container_of(led_cdev, struct asus_led, led); + struct asus_laptop *asus = led->asus; - return (hotk->status & mask) ? 1 : 0; + led->wk = !!value; + queue_work(asus->led_workqueue, &led->work); } -static void write_status(acpi_handle handle, int out, int mask) +static void asus_led_cdev_update(struct work_struct *work) { - hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask); - - switch (mask) { - case MLED_ON: - out = !(out & 0x1); - break; - case GLED_ON: - out = (out & 0x1) + 1; - break; - case GPS_ON: - handle = (out) ? gps_on_handle : gps_off_handle; - out = 0x02; - break; - default: - out &= 0x1; - break; - } + struct asus_led *led = container_of(work, struct asus_led, work); + struct asus_laptop *asus = led->asus; - if (write_acpi_int(handle, NULL, out, NULL)) - pr_warning(" write failed %x\n", mask); + asus_led_set(asus, led->method, led->wk); } -/* /sys/class/led handlers */ -#define ASUS_LED_HANDLER(object, mask) \ - static void object##_led_set(struct led_classdev *led_cdev, \ - enum led_brightness value) \ - { \ - object##_led_wk = (value > 0) ? 1 : 0; \ - queue_work(led_workqueue, &object##_led_work); \ - } \ - static void object##_led_update(struct work_struct *ignored) \ - { \ - int value = object##_led_wk; \ - write_status(object##_set_handle, value, (mask)); \ - } \ - static enum led_brightness object##_led_get( \ - struct led_classdev *led_cdev) \ - { \ - return led_cdev->brightness; \ - } - -ASUS_LED_HANDLER(mled, MLED_ON); -ASUS_LED_HANDLER(pled, PLED_ON); -ASUS_LED_HANDLER(rled, RLED_ON); -ASUS_LED_HANDLER(tled, TLED_ON); -ASUS_LED_HANDLER(gled, GLED_ON); +static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev) +{ + return led_cdev->brightness; +} /* - * Keyboard backlight + * Keyboard backlight (also a LED) */ -static int get_kled_lvl(void) +static int asus_kled_lvl(struct asus_laptop *asus) { unsigned long long kblv; struct acpi_object_list params; @@ -468,75 +397,183 @@ static int get_kled_lvl(void) in_obj.type = ACPI_TYPE_INTEGER; in_obj.integer.value = 2; - rv = acpi_evaluate_integer(kled_get_handle, NULL, ¶ms, &kblv); + rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET, + ¶ms, &kblv); if (ACPI_FAILURE(rv)) { pr_warning("Error reading kled level\n"); - return 0; + return -ENODEV; } return kblv; } -static int set_kled_lvl(int kblv) +static int asus_kled_set(struct asus_laptop *asus, int kblv) { if (kblv > 0) kblv = (1 << 7) | (kblv & 0x7F); else kblv = 0; - if (write_acpi_int(kled_set_handle, NULL, kblv, NULL)) { + if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) { pr_warning("Keyboard LED display write failed\n"); return -EINVAL; } return 0; } -static void kled_led_set(struct led_classdev *led_cdev, - enum led_brightness value) +static void asus_kled_cdev_set(struct led_classdev *led_cdev, + enum led_brightness value) { - kled_led_wk = value; - queue_work(led_workqueue, &kled_led_work); + struct asus_led *led = container_of(led_cdev, struct asus_led, led); + struct asus_laptop *asus = led->asus; + + led->wk = value; + queue_work(asus->led_workqueue, &led->work); } -static void kled_led_update(struct work_struct *ignored) +static void asus_kled_cdev_update(struct work_struct *work) { - set_kled_lvl(kled_led_wk); + struct asus_led *led = container_of(work, struct asus_led, work); + struct asus_laptop *asus = led->asus; + + asus_kled_set(asus, led->wk); } -static enum led_brightness kled_led_get(struct led_classdev *led_cdev) +static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev) { - return get_kled_lvl(); + struct asus_led *led = container_of(led_cdev, struct asus_led, led); + struct asus_laptop *asus = led->asus; + + return asus_kled_lvl(asus); } -static int get_lcd_state(void) +static void asus_led_exit(struct asus_laptop *asus) { - return read_status(LCD_ON); + if (asus->mled.led.dev) + led_classdev_unregister(&asus->mled.led); + if (asus->tled.led.dev) + led_classdev_unregister(&asus->tled.led); + if (asus->pled.led.dev) + led_classdev_unregister(&asus->pled.led); + if (asus->rled.led.dev) + led_classdev_unregister(&asus->rled.led); + if (asus->gled.led.dev) + led_classdev_unregister(&asus->gled.led); + if (asus->kled.led.dev) + led_classdev_unregister(&asus->kled.led); + if (asus->led_workqueue) { + destroy_workqueue(asus->led_workqueue); + asus->led_workqueue = NULL; + } } -static int set_lcd_state(int value) +/* Ugly macro, need to fix that later */ +static int asus_led_register(struct asus_laptop *asus, + struct asus_led *led, + const char *name, const char *method) +{ + struct led_classdev *led_cdev = &led->led; + + if (!method || acpi_check_handle(asus->handle, method, NULL)) + return 0; /* Led not present */ + + led->asus = asus; + led->method = method; + + INIT_WORK(&led->work, asus_led_cdev_update); + led_cdev->name = name; + led_cdev->brightness_set = asus_led_cdev_set; + led_cdev->brightness_get = asus_led_cdev_get; + led_cdev->max_brightness = 1; + return led_classdev_register(&asus->platform_device->dev, led_cdev); +} + +static int asus_led_init(struct asus_laptop *asus) +{ + int r; + + /* + * Functions that actually update the LED's are called from a + * workqueue. By doing this as separate work rather than when the LED + * subsystem asks, we avoid messing with the Asus ACPI stuff during a + * potentially bad time, such as a timer interrupt. + */ + asus->led_workqueue = create_singlethread_workqueue("led_workqueue"); + if (!asus->led_workqueue) + return -ENOMEM; + + r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED); + if (r) + goto error; + r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED); + if (r) + goto error; + r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED); + if (r) + goto error; + r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED); + if (r) + goto error; + r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED); + if (r) + goto error; + if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) && + !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) { + struct asus_led *led = &asus->kled; + struct led_classdev *cdev = &led->led; + + led->asus = asus; + + INIT_WORK(&led->work, asus_kled_cdev_update); + cdev->name = "asus::kbd_backlight"; + cdev->brightness_set = asus_kled_cdev_set; + cdev->brightness_get = asus_kled_cdev_get; + cdev->max_brightness = 3; + r = led_classdev_register(&asus->platform_device->dev, cdev); + } +error: + if (r) + asus_led_exit(asus); + return r; +} + +/* + * Backlight device + */ +static int asus_lcd_status(struct asus_laptop *asus) +{ + return asus->lcd_state; +} + +static int asus_lcd_set(struct asus_laptop *asus, int value) { int lcd = 0; acpi_status status = 0; - lcd = value ? 1 : 0; + lcd = !!value; - if (lcd == get_lcd_state()) + if (lcd == asus_lcd_status(asus)) return 0; - if (lcd_switch_handle) { - status = acpi_evaluate_object(lcd_switch_handle, - NULL, NULL, NULL); + if (!lcd_switch_handle) + return -ENODEV; + + status = acpi_evaluate_object(lcd_switch_handle, + NULL, NULL, NULL); - if (ACPI_FAILURE(status)) - pr_warning("Error switching LCD\n"); + if (ACPI_FAILURE(status)) { + pr_warning("Error switching LCD\n"); + return -ENODEV; } - write_status(NULL, lcd, LCD_ON); + asus->lcd_state = lcd; return 0; } -static void lcd_blank(int blank) +static void lcd_blank(struct asus_laptop *asus, int blank) { - struct backlight_device *bd = asus_backlight_device; + struct backlight_device *bd = asus->backlight_device; + + asus->lcd_state = (blank == FB_BLANK_UNBLANK); if (bd) { bd->props.power = blank; @@ -544,44 +581,91 @@ static void lcd_blank(int blank) } } -static int read_brightness(struct backlight_device *bd) +static int asus_read_brightness(struct backlight_device *bd) { + struct asus_laptop *asus = bl_get_data(bd); unsigned long long value; acpi_status rv = AE_OK; - rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value); + rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET, + NULL, &value); if (ACPI_FAILURE(rv)) pr_warning("Error reading brightness\n"); return value; } -static int set_brightness(struct backlight_device *bd, int value) +static int asus_set_brightness(struct backlight_device *bd, int value) { - int ret = 0; - - value = (0 < value) ? ((15 < value) ? 15 : value) : 0; - /* 0 <= value <= 15 */ + struct asus_laptop *asus = bl_get_data(bd); - if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) { + if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) { pr_warning("Error changing brightness\n"); - ret = -EIO; + return -EIO; } - - return ret; + return 0; } static int update_bl_status(struct backlight_device *bd) { + struct asus_laptop *asus = bl_get_data(bd); int rv; int value = bd->props.brightness; - rv = set_brightness(bd, value); + rv = asus_set_brightness(bd, value); if (rv) return rv; value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0; - return set_lcd_state(value); + return asus_lcd_set(asus, value); +} + +static struct backlight_ops asusbl_ops = { + .get_brightness = asus_read_brightness, + .update_status = update_bl_status, +}; + +static int asus_backlight_notify(struct asus_laptop *asus) +{ + struct backlight_device *bd = asus->backlight_device; + int old = bd->props.brightness; + + backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY); + + return old; +} + +static int asus_backlight_init(struct asus_laptop *asus) +{ + struct backlight_device *bd; + struct device *dev = &asus->platform_device->dev; + + if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) && + !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) && + lcd_switch_handle) { + bd = backlight_device_register(ASUS_LAPTOP_FILE, dev, + asus, &asusbl_ops); + if (IS_ERR(bd)) { + pr_err("Could not register asus backlight device\n"); + asus->backlight_device = NULL; + return PTR_ERR(bd); + } + + asus->backlight_device = bd; + + bd->props.max_brightness = 15; + bd->props.power = FB_BLANK_UNBLANK; + bd->props.brightness = asus_read_brightness(bd); + backlight_update_status(bd); + } + return 0; +} + +static void asus_backlight_exit(struct asus_laptop *asus) +{ + if (asus->backlight_device) + backlight_device_unregister(asus->backlight_device); + asus->backlight_device = NULL; } /* @@ -596,25 +680,26 @@ static int update_bl_status(struct backlight_device *bd) static ssize_t show_infos(struct device *dev, struct device_attribute *attr, char *page) { + struct asus_laptop *asus = dev_get_drvdata(dev); int len = 0; unsigned long long temp; char buf[16]; /* enough for all info */ acpi_status rv = AE_OK; /* - * We use the easy way, we don't care of off and count, so we don't set eof - * to 1 + * We use the easy way, we don't care of off and count, + * so we don't set eof to 1 */ - len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n"); - len += sprintf(page + len, "Model reference : %s\n", hotk->name); + len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n"); + len += sprintf(page + len, "Model reference : %s\n", asus->name); /* * The SFUN method probably allows the original driver to get the list * of features supported by a given model. For now, 0x0100 or 0x0800 * bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card. * The significance of others is yet to be found. */ - rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp); + rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp); if (!ACPI_FAILURE(rv)) len += sprintf(page + len, "SFUN value : %#x\n", (uint) temp); @@ -624,7 +709,7 @@ static ssize_t show_infos(struct device *dev, * The significance of others is yet to be found. * If we don't find the method, we assume the device are present. */ - rv = acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &temp); + rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp); if (!ACPI_FAILURE(rv)) len += sprintf(page + len, "HRWS value : %#x\n", (uint) temp); @@ -635,26 +720,26 @@ static ssize_t show_infos(struct device *dev, * Note: since not all the laptops provide this method, errors are * silently ignored. */ - rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp); + rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp); if (!ACPI_FAILURE(rv)) len += sprintf(page + len, "ASYM value : %#x\n", (uint) temp); - if (asus_info) { - snprintf(buf, 16, "%d", asus_info->length); + if (asus->dsdt_info) { + snprintf(buf, 16, "%d", asus->dsdt_info->length); len += sprintf(page + len, "DSDT length : %s\n", buf); - snprintf(buf, 16, "%d", asus_info->checksum); + snprintf(buf, 16, "%d", asus->dsdt_info->checksum); len += sprintf(page + len, "DSDT checksum : %s\n", buf); - snprintf(buf, 16, "%d", asus_info->revision); + snprintf(buf, 16, "%d", asus->dsdt_info->revision); len += sprintf(page + len, "DSDT revision : %s\n", buf); - snprintf(buf, 7, "%s", asus_info->oem_id); + snprintf(buf, 7, "%s", asus->dsdt_info->oem_id); len += sprintf(page + len, "OEM id : %s\n", buf); - snprintf(buf, 9, "%s", asus_info->oem_table_id); + snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id); len += sprintf(page + len, "OEM table id : %s\n", buf); - snprintf(buf, 16, "%x", asus_info->oem_revision); + snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision); len += sprintf(page + len, "OEM revision : 0x%s\n", buf); - snprintf(buf, 5, "%s", asus_info->asl_compiler_id); + snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id); len += sprintf(page + len, "ASL comp vendor id : %s\n", buf); - snprintf(buf, 16, "%x", asus_info->asl_compiler_revision); + snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision); len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf); } @@ -672,8 +757,9 @@ static int parse_arg(const char *buf, unsigned long count, int *val) return count; } -static ssize_t store_status(const char *buf, size_t count, - acpi_handle handle, int mask) +static ssize_t sysfs_acpi_set(struct asus_laptop *asus, + const char *buf, size_t count, + const char *method) { int rv, value; int out = 0; @@ -682,8 +768,8 @@ static ssize_t store_status(const char *buf, size_t count, if (rv > 0) out = value ? 1 : 0; - write_status(handle, out, mask); - + if (write_acpi_int(asus->handle, method, value)) + return -ENODEV; return rv; } @@ -693,67 +779,116 @@ static ssize_t store_status(const char *buf, size_t count, static ssize_t show_ledd(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "0x%08x\n", hotk->ledd_status); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "0x%08x\n", asus->ledd_status); } static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) { - if (write_acpi_int(ledd_set_handle, NULL, value, NULL)) + if (write_acpi_int(asus->handle, METHOD_LEDD, value)) pr_warning("LED display write failed\n"); else - hotk->ledd_status = (u32) value; + asus->ledd_status = (u32) value; } return rv; } /* + * Wireless + */ +static int asus_wireless_status(struct asus_laptop *asus, int mask) +{ + unsigned long long status; + acpi_status rv = AE_OK; + + if (!asus->have_rsts) + return (asus->wireless_status & mask) ? 1 : 0; + + rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS, + NULL, &status); + if (ACPI_FAILURE(rv)) { + pr_warning("Error reading Wireless status\n"); + return -EINVAL; + } + return !!(status & mask); +} + +/* * WLAN */ +static int asus_wlan_set(struct asus_laptop *asus, int status) +{ + if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) { + pr_warning("Error setting wlan status to %d", status); + return -EIO; + } + return 0; +} + static ssize_t show_wlan(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", read_status(WL_ON)); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS)); } static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - return store_status(buf, count, wl_switch_handle, WL_ON); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sysfs_acpi_set(asus, buf, count, METHOD_WLAN); } /* * Bluetooth */ +static int asus_bluetooth_set(struct asus_laptop *asus, int status) +{ + if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) { + pr_warning("Error setting bluetooth status to %d", status); + return -EIO; + } + return 0; +} + static ssize_t show_bluetooth(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", read_status(BT_ON)); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS)); } static ssize_t store_bluetooth(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - return store_status(buf, count, bt_switch_handle, BT_ON); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH); } /* * Display */ -static void set_display(int value) +static void asus_set_display(struct asus_laptop *asus, int value) { /* no sanity check needed for now */ - if (write_acpi_int(display_set_handle, NULL, value, NULL)) + if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value)) pr_warning("Error setting display\n"); return; } -static int read_display(void) +static int read_display(struct asus_laptop *asus) { unsigned long long value = 0; acpi_status rv = AE_OK; @@ -769,7 +904,7 @@ static int read_display(void) pr_warning("Error reading display status\n"); } - value &= 0x0F; /* needed for some models, shouldn't hurt others */ + value &= 0x0F; /* needed for some models, shouldn't hurt others */ return value; } @@ -781,7 +916,11 @@ static int read_display(void) static ssize_t show_disp(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", read_display()); + struct asus_laptop *asus = dev_get_drvdata(dev); + + if (!display_get_handle) + return -ENODEV; + return sprintf(buf, "%d\n", read_display(asus)); } /* @@ -794,65 +933,72 @@ static ssize_t show_disp(struct device *dev, static ssize_t store_disp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) - set_display(value); + asus_set_display(asus, value); return rv; } /* * Light Sens */ -static void set_light_sens_switch(int value) +static void asus_als_switch(struct asus_laptop *asus, int value) { - if (write_acpi_int(ls_switch_handle, NULL, value, NULL)) + if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value)) pr_warning("Error setting light sensor switch\n"); - hotk->light_switch = value; + asus->light_switch = value; } static ssize_t show_lssw(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", hotk->light_switch); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus->light_switch); } static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) - set_light_sens_switch(value ? 1 : 0); + asus_als_switch(asus, value ? 1 : 0); return rv; } -static void set_light_sens_level(int value) +static void asus_als_level(struct asus_laptop *asus, int value) { - if (write_acpi_int(ls_level_handle, NULL, value, NULL)) + if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value)) pr_warning("Error setting light sensor level\n"); - hotk->light_level = value; + asus->light_level = value; } static ssize_t show_lslvl(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", hotk->light_level); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus->light_level); } static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; rv = parse_arg(buf, count, &value); if (rv > 0) { value = (0 < value) ? ((15 < value) ? 15 : value) : 0; /* 0 <= value <= 15 */ - set_light_sens_level(value); + asus_als_level(asus, value); } return rv; @@ -861,197 +1007,309 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, /* * GPS */ +static int asus_gps_status(struct asus_laptop *asus) +{ + unsigned long long status; + acpi_status rv = AE_OK; + + rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS, + NULL, &status); + if (ACPI_FAILURE(rv)) { + pr_warning("Error reading GPS status\n"); + return -ENODEV; + } + return !!status; +} + +static int asus_gps_switch(struct asus_laptop *asus, int status) +{ + const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF; + + if (write_acpi_int(asus->handle, meth, 0x02)) + return -ENODEV; + return 0; +} + static ssize_t show_gps(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", read_status(GPS_ON)); + struct asus_laptop *asus = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", asus_gps_status(asus)); } static ssize_t store_gps(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - return store_status(buf, count, NULL, GPS_ON); + struct asus_laptop *asus = dev_get_drvdata(dev); + int rv, value; + int ret; + + rv = parse_arg(buf, count, &value); + if (rv <= 0) + return -EINVAL; + ret = asus_gps_switch(asus, !!value); + if (ret) + return ret; + rfkill_set_sw_state(asus->gps_rfkill, !value); + return rv; } /* - * Hotkey functions + * rfkill */ -static struct key_entry *asus_get_entry_by_scancode(int code) +static int asus_gps_rfkill_set(void *data, bool blocked) { - struct key_entry *key; - - for (key = asus_keymap; key->type != KE_END; key++) - if (code == key->code) - return key; + acpi_handle handle = data; - return NULL; + return asus_gps_switch(handle, !blocked); } -static struct key_entry *asus_get_entry_by_keycode(int code) -{ - struct key_entry *key; - - for (key = asus_keymap; key->type != KE_END; key++) - if (code == key->keycode && key->type == KE_KEY) - return key; +static const struct rfkill_ops asus_gps_rfkill_ops = { + .set_block = asus_gps_rfkill_set, +}; - return NULL; +static void asus_rfkill_exit(struct asus_laptop *asus) +{ + if (asus->gps_rfkill) { + rfkill_unregister(asus->gps_rfkill); + rfkill_destroy(asus->gps_rfkill); + asus->gps_rfkill = NULL; + } } -static int asus_getkeycode(struct input_dev *dev, int scancode, int *keycode) +static int asus_rfkill_init(struct asus_laptop *asus) { - struct key_entry *key = asus_get_entry_by_scancode(scancode); + int result; - if (key && key->type == KE_KEY) { - *keycode = key->keycode; + if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) || + acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) || + acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) return 0; + + asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev, + RFKILL_TYPE_GPS, + &asus_gps_rfkill_ops, NULL); + if (!asus->gps_rfkill) + return -EINVAL; + + result = rfkill_register(asus->gps_rfkill); + if (result) { + rfkill_destroy(asus->gps_rfkill); + asus->gps_rfkill = NULL; } - return -EINVAL; + return result; } -static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode) +/* + * Input device (i.e. hotkeys) + */ +static void asus_input_notify(struct asus_laptop *asus, int event) { - struct key_entry *key; - int old_keycode; + if (asus->inputdev) + sparse_keymap_report_event(asus->inputdev, event, 1, true); +} - if (keycode < 0 || keycode > KEY_MAX) - return -EINVAL; +static int asus_input_init(struct asus_laptop *asus) +{ + struct input_dev *input; + int error; - key = asus_get_entry_by_scancode(scancode); - if (key && key->type == KE_KEY) { - old_keycode = key->keycode; - key->keycode = keycode; - set_bit(keycode, dev->keybit); - if (!asus_get_entry_by_keycode(old_keycode)) - clear_bit(old_keycode, dev->keybit); + input = input_allocate_device(); + if (!input) { + pr_info("Unable to allocate input device\n"); return 0; } + input->name = "Asus Laptop extra buttons"; + input->phys = ASUS_LAPTOP_FILE "/input0"; + input->id.bustype = BUS_HOST; + input->dev.parent = &asus->platform_device->dev; + input_set_drvdata(input, asus); + + error = sparse_keymap_setup(input, asus_keymap, NULL); + if (error) { + pr_err("Unable to setup input device keymap\n"); + goto err_keymap; + } + error = input_register_device(input); + if (error) { + pr_info("Unable to register input device\n"); + goto err_device; + } + + asus->inputdev = input; + return 0; - return -EINVAL; +err_keymap: + sparse_keymap_free(input); +err_device: + input_free_device(input); + return error; } -static void asus_hotk_notify(struct acpi_device *device, u32 event) +static void asus_input_exit(struct asus_laptop *asus) { - static struct key_entry *key; - u16 count; + if (asus->inputdev) { + sparse_keymap_free(asus->inputdev); + input_unregister_device(asus->inputdev); + } +} - /* TODO Find a better way to handle events count. */ - if (!hotk) - return; +/* + * ACPI driver + */ +static void asus_acpi_notify(struct acpi_device *device, u32 event) +{ + struct asus_laptop *asus = acpi_driver_data(device); + u16 count; /* * We need to tell the backlight device when the backlight power is * switched */ - if (event == ATKD_LCD_ON) { - write_status(NULL, 1, LCD_ON); - lcd_blank(FB_BLANK_UNBLANK); - } else if (event == ATKD_LCD_OFF) { - write_status(NULL, 0, LCD_ON); - lcd_blank(FB_BLANK_POWERDOWN); - } + if (event == ATKD_LCD_ON) + lcd_blank(asus, FB_BLANK_UNBLANK); + else if (event == ATKD_LCD_OFF) + lcd_blank(asus, FB_BLANK_POWERDOWN); - count = hotk->event_count[event % 128]++; - acpi_bus_generate_proc_event(hotk->device, event, count); - acpi_bus_generate_netlink_event(hotk->device->pnp.device_class, - dev_name(&hotk->device->dev), event, + /* TODO Find a better way to handle events count. */ + count = asus->event_count[event % 128]++; + acpi_bus_generate_proc_event(asus->device, event, count); + acpi_bus_generate_netlink_event(asus->device->pnp.device_class, + dev_name(&asus->device->dev), event, count); - if (hotk->inputdev) { - key = asus_get_entry_by_scancode(event); - if (!key) - return ; - - switch (key->type) { - case KE_KEY: - input_report_key(hotk->inputdev, key->keycode, 1); - input_sync(hotk->inputdev); - input_report_key(hotk->inputdev, key->keycode, 0); - input_sync(hotk->inputdev); - break; + /* Brightness events are special */ + if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) { + + /* Ignore them completely if the acpi video driver is used */ + if (asus->backlight_device != NULL) { + /* Update the backlight device. */ + asus_backlight_notify(asus); } + return ; } + asus_input_notify(asus, event); } -#define ASUS_CREATE_DEVICE_ATTR(_name) \ - struct device_attribute dev_attr_##_name = { \ - .attr = { \ - .name = __stringify(_name), \ - .mode = 0 }, \ - .show = NULL, \ - .store = NULL, \ +static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL); +static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan); +static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth, + store_bluetooth); +static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp); +static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); +static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); +static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); +static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps); + +static void asus_sysfs_exit(struct asus_laptop *asus) +{ + struct platform_device *device = asus->platform_device; + + device_remove_file(&device->dev, &dev_attr_infos); + device_remove_file(&device->dev, &dev_attr_wlan); + device_remove_file(&device->dev, &dev_attr_bluetooth); + device_remove_file(&device->dev, &dev_attr_display); + device_remove_file(&device->dev, &dev_attr_ledd); + device_remove_file(&device->dev, &dev_attr_ls_switch); + device_remove_file(&device->dev, &dev_attr_ls_level); + device_remove_file(&device->dev, &dev_attr_gps); +} + +static int asus_sysfs_init(struct asus_laptop *asus) +{ + struct platform_device *device = asus->platform_device; + int err; + + err = device_create_file(&device->dev, &dev_attr_infos); + if (err) + return err; + + if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) { + err = device_create_file(&device->dev, &dev_attr_wlan); + if (err) + return err; } -#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \ - do { \ - dev_attr_##_name.attr.mode = _mode; \ - dev_attr_##_name.show = _show; \ - dev_attr_##_name.store = _store; \ - } while(0) - -static ASUS_CREATE_DEVICE_ATTR(infos); -static ASUS_CREATE_DEVICE_ATTR(wlan); -static ASUS_CREATE_DEVICE_ATTR(bluetooth); -static ASUS_CREATE_DEVICE_ATTR(display); -static ASUS_CREATE_DEVICE_ATTR(ledd); -static ASUS_CREATE_DEVICE_ATTR(ls_switch); -static ASUS_CREATE_DEVICE_ATTR(ls_level); -static ASUS_CREATE_DEVICE_ATTR(gps); - -static struct attribute *asuspf_attributes[] = { - &dev_attr_infos.attr, - &dev_attr_wlan.attr, - &dev_attr_bluetooth.attr, - &dev_attr_display.attr, - &dev_attr_ledd.attr, - &dev_attr_ls_switch.attr, - &dev_attr_ls_level.attr, - &dev_attr_gps.attr, - NULL -}; + if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) { + err = device_create_file(&device->dev, &dev_attr_bluetooth); + if (err) + return err; + } -static struct attribute_group asuspf_attribute_group = { - .attrs = asuspf_attributes -}; + if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) { + err = device_create_file(&device->dev, &dev_attr_display); + if (err) + return err; + } -static struct platform_driver asuspf_driver = { - .driver = { - .name = ASUS_HOTK_FILE, - .owner = THIS_MODULE, - } -}; + if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) { + err = device_create_file(&device->dev, &dev_attr_ledd); + if (err) + return err; + } -static struct platform_device *asuspf_device; + if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) && + !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) { + err = device_create_file(&device->dev, &dev_attr_ls_switch); + if (err) + return err; + err = device_create_file(&device->dev, &dev_attr_ls_level); + if (err) + return err; + } -static void asus_hotk_add_fs(void) -{ - ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL); + if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) && + !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) && + !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) { + err = device_create_file(&device->dev, &dev_attr_gps); + if (err) + return err; + } - if (wl_switch_handle) - ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan); + return err; +} + +static int asus_platform_init(struct asus_laptop *asus) +{ + int err; - if (bt_switch_handle) - ASUS_SET_DEVICE_ATTR(bluetooth, 0644, - show_bluetooth, store_bluetooth); + asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1); + if (!asus->platform_device) + return -ENOMEM; + platform_set_drvdata(asus->platform_device, asus); - if (display_set_handle && display_get_handle) - ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp); - else if (display_set_handle) - ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp); + err = platform_device_add(asus->platform_device); + if (err) + goto fail_platform_device; - if (ledd_set_handle) - ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd); + err = asus_sysfs_init(asus); + if (err) + goto fail_sysfs; + return 0; - if (ls_switch_handle && ls_level_handle) { - ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl); - ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw); - } +fail_sysfs: + asus_sysfs_exit(asus); + platform_device_del(asus->platform_device); +fail_platform_device: + platform_device_put(asus->platform_device); + return err; +} - if (gps_status_handle && gps_on_handle && gps_off_handle) - ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps); +static void asus_platform_exit(struct asus_laptop *asus) +{ + asus_sysfs_exit(asus); + platform_device_unregister(asus->platform_device); } +static struct platform_driver platform_driver = { + .driver = { + .name = ASUS_LAPTOP_FILE, + .owner = THIS_MODULE, + } +}; + static int asus_handle_init(char *name, acpi_handle * handle, char **paths, int num_paths) { @@ -1073,10 +1331,11 @@ static int asus_handle_init(char *name, acpi_handle * handle, ARRAY_SIZE(object##_paths)) /* - * This function is used to initialize the hotk with right values. In this - * method, we can make all the detection we want, and modify the hotk struct + * This function is used to initialize the context with right values. In this + * method, we can make all the detection we want, and modify the asus_laptop + * struct */ -static int asus_hotk_get_info(void) +static int asus_laptop_get_info(struct asus_laptop *asus) { struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *model = NULL; @@ -1089,22 +1348,21 @@ static int asus_hotk_get_info(void) * models, but late enough to allow acpi_bus_register_driver() to fail * before doing anything ACPI-specific. Should we encounter a machine, * which needs special handling (i.e. its hotkey device has a different - * HID), this bit will be moved. A global variable asus_info contains - * the DSDT header. + * HID), this bit will be moved. */ - status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info); + status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info); if (ACPI_FAILURE(status)) pr_warning("Couldn't get the DSDT table header\n"); /* We have to write 0 on init this far for all ASUS models */ - if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { + if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) { pr_err("Hotkey initialization failed\n"); return -ENODEV; } /* This needs to be called for some laptops to init properly */ status = - acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result); + acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result); if (ACPI_FAILURE(status)) pr_warning("Error calling BSTS\n"); else if (bsts_result) @@ -1112,8 +1370,8 @@ static int asus_hotk_get_info(void) (uint) bsts_result); /* This too ... */ - write_acpi_int(hotk->handle, "CWAP", wapf, NULL); - + if (write_acpi_int(asus->handle, "CWAP", wapf)) + pr_err("Error calling CWAP(%d)\n", wapf); /* * Try to match the object returned by INIT to the specific model. * Handle every possible object (or the lack of thereof) the DSDT @@ -1134,397 +1392,210 @@ static int asus_hotk_get_info(void) break; } } - hotk->name = kstrdup(string, GFP_KERNEL); - if (!hotk->name) + asus->name = kstrdup(string, GFP_KERNEL); + if (!asus->name) return -ENOMEM; if (*string) pr_notice(" %s model detected\n", string); - ASUS_HANDLE_INIT(mled_set); - ASUS_HANDLE_INIT(tled_set); - ASUS_HANDLE_INIT(rled_set); - ASUS_HANDLE_INIT(pled_set); - ASUS_HANDLE_INIT(gled_set); - - ASUS_HANDLE_INIT(ledd_set); - - ASUS_HANDLE_INIT(kled_set); - ASUS_HANDLE_INIT(kled_get); - /* * The HWRS method return informations about the hardware. * 0x80 bit is for WLAN, 0x100 for Bluetooth. * The significance of others is yet to be found. - * If we don't find the method, we assume the device are present. */ status = - acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &hwrs_result); - if (ACPI_FAILURE(status)) - hwrs_result = WL_HWRS | BT_HWRS; - - if (hwrs_result & WL_HWRS) - ASUS_HANDLE_INIT(wl_switch); - if (hwrs_result & BT_HWRS) - ASUS_HANDLE_INIT(bt_switch); - - ASUS_HANDLE_INIT(wireless_status); + acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result); + if (!ACPI_FAILURE(status)) + pr_notice(" HRWS returned %x", (int)hwrs_result); - ASUS_HANDLE_INIT(brightness_set); - ASUS_HANDLE_INIT(brightness_get); + if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) + asus->have_rsts = true; + /* Scheduled for removal */ ASUS_HANDLE_INIT(lcd_switch); - - ASUS_HANDLE_INIT(display_set); ASUS_HANDLE_INIT(display_get); - /* - * There is a lot of models with "ALSL", but a few get - * a real light sens, so we need to check it. - */ - if (!ASUS_HANDLE_INIT(ls_switch)) - ASUS_HANDLE_INIT(ls_level); - - ASUS_HANDLE_INIT(gps_on); - ASUS_HANDLE_INIT(gps_off); - ASUS_HANDLE_INIT(gps_status); - kfree(model); return AE_OK; } -static int asus_input_init(void) -{ - const struct key_entry *key; - int result; +static bool asus_device_present; - hotk->inputdev = input_allocate_device(); - if (!hotk->inputdev) { - pr_info("Unable to allocate input device\n"); - return 0; - } - hotk->inputdev->name = "Asus Laptop extra buttons"; - hotk->inputdev->phys = ASUS_HOTK_FILE "/input0"; - hotk->inputdev->id.bustype = BUS_HOST; - hotk->inputdev->getkeycode = asus_getkeycode; - hotk->inputdev->setkeycode = asus_setkeycode; - - for (key = asus_keymap; key->type != KE_END; key++) { - switch (key->type) { - case KE_KEY: - set_bit(EV_KEY, hotk->inputdev->evbit); - set_bit(key->keycode, hotk->inputdev->keybit); - break; - } - } - result = input_register_device(hotk->inputdev); - if (result) { - pr_info("Unable to register input device\n"); - input_free_device(hotk->inputdev); - } - return result; -} - -static int asus_hotk_check(void) +static int __devinit asus_acpi_init(struct asus_laptop *asus) { int result = 0; - result = acpi_bus_get_status(hotk->device); + result = acpi_bus_get_status(asus->device); if (result) return result; - - if (hotk->device->status.present) { - result = asus_hotk_get_info(); - } else { + if (!asus->device->status.present) { pr_err("Hotkey device not present, aborting\n"); - return -EINVAL; + return -ENODEV; } - return result; -} - -static int asus_hotk_found; - -static int asus_hotk_add(struct acpi_device *device) -{ - int result; - - pr_notice("Asus Laptop Support version %s\n", - ASUS_LAPTOP_VERSION); - - hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL); - if (!hotk) - return -ENOMEM; - - hotk->handle = device->handle; - strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME); - strcpy(acpi_device_class(device), ASUS_HOTK_CLASS); - device->driver_data = hotk; - hotk->device = device; - - result = asus_hotk_check(); + result = asus_laptop_get_info(asus); if (result) - goto end; - - asus_hotk_add_fs(); - - asus_hotk_found = 1; + return result; /* WLED and BLED are on by default */ - write_status(bt_switch_handle, 1, BT_ON); - write_status(wl_switch_handle, 1, WL_ON); - - /* If the h/w switch is off, we need to check the real status */ - write_status(NULL, read_status(BT_ON), BT_ON); - write_status(NULL, read_status(WL_ON), WL_ON); + if (bluetooth_status >= 0) + asus_bluetooth_set(asus, !!bluetooth_status); - /* LCD Backlight is on by default */ - write_status(NULL, 1, LCD_ON); + if (wlan_status >= 0) + asus_wlan_set(asus, !!wlan_status); /* Keyboard Backlight is on by default */ - if (kled_set_handle) - set_kled_lvl(1); + if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL)) + asus_kled_set(asus, 1); /* LED display is off by default */ - hotk->ledd_status = 0xFFF; + asus->ledd_status = 0xFFF; /* Set initial values of light sensor and level */ - hotk->light_switch = 0; /* Default to light sensor disabled */ - hotk->light_level = 5; /* level 5 for sensor sensitivity */ + asus->light_switch = 0; /* Default to light sensor disabled */ + asus->light_level = 5; /* level 5 for sensor sensitivity */ - if (ls_switch_handle) - set_light_sens_switch(hotk->light_switch); - - if (ls_level_handle) - set_light_sens_level(hotk->light_level); - - /* GPS is on by default */ - write_status(NULL, 1, GPS_ON); - -end: - if (result) { - kfree(hotk->name); - kfree(hotk); + if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) && + !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) { + asus_als_switch(asus, asus->light_switch); + asus_als_level(asus, asus->light_level); } + asus->lcd_state = 1; /* LCD should be on when the module load */ return result; } -static int asus_hotk_remove(struct acpi_device *device, int type) -{ - kfree(hotk->name); - kfree(hotk); - - return 0; -} - -static void asus_backlight_exit(void) +static int __devinit asus_acpi_add(struct acpi_device *device) { - if (asus_backlight_device) - backlight_device_unregister(asus_backlight_device); -} - -#define ASUS_LED_UNREGISTER(object) \ - if (object##_led.dev) \ - led_classdev_unregister(&object##_led) + struct asus_laptop *asus; + int result; -static void asus_led_exit(void) -{ - destroy_workqueue(led_workqueue); - ASUS_LED_UNREGISTER(mled); - ASUS_LED_UNREGISTER(tled); - ASUS_LED_UNREGISTER(pled); - ASUS_LED_UNREGISTER(rled); - ASUS_LED_UNREGISTER(gled); - ASUS_LED_UNREGISTER(kled); -} + pr_notice("Asus Laptop Support version %s\n", + ASUS_LAPTOP_VERSION); + asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL); + if (!asus) + return -ENOMEM; + asus->handle = device->handle; + strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME); + strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS); + device->driver_data = asus; + asus->device = device; -static void asus_input_exit(void) -{ - if (hotk->inputdev) - input_unregister_device(hotk->inputdev); -} + result = asus_acpi_init(asus); + if (result) + goto fail_platform; -static void __exit asus_laptop_exit(void) -{ - asus_backlight_exit(); - asus_led_exit(); - asus_input_exit(); + /* + * Register the platform device first. It is used as a parent for the + * sub-devices below. + */ + result = asus_platform_init(asus); + if (result) + goto fail_platform; - acpi_bus_unregister_driver(&asus_hotk_driver); - sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group); - platform_device_unregister(asuspf_device); - platform_driver_unregister(&asuspf_driver); -} + if (!acpi_video_backlight_support()) { + result = asus_backlight_init(asus); + if (result) + goto fail_backlight; + } else + pr_info("Backlight controlled by ACPI video driver\n"); -static int asus_backlight_init(struct device *dev) -{ - struct backlight_device *bd; + result = asus_input_init(asus); + if (result) + goto fail_input; - if (brightness_set_handle && lcd_switch_handle) { - bd = backlight_device_register(ASUS_HOTK_FILE, dev, - NULL, &asusbl_ops); - if (IS_ERR(bd)) { - pr_err("Could not register asus backlight device\n"); - asus_backlight_device = NULL; - return PTR_ERR(bd); - } + result = asus_led_init(asus); + if (result) + goto fail_led; - asus_backlight_device = bd; + result = asus_rfkill_init(asus); + if (result) + goto fail_rfkill; - bd->props.max_brightness = 15; - bd->props.brightness = read_brightness(NULL); - bd->props.power = FB_BLANK_UNBLANK; - backlight_update_status(bd); - } + asus_device_present = true; return 0; -} -static int asus_led_register(acpi_handle handle, - struct led_classdev *ldev, struct device *dev) -{ - if (!handle) - return 0; +fail_rfkill: + asus_led_exit(asus); +fail_led: + asus_input_exit(asus); +fail_input: + asus_backlight_exit(asus); +fail_backlight: + asus_platform_exit(asus); +fail_platform: + kfree(asus->name); + kfree(asus); - return led_classdev_register(dev, ldev); + return result; } -#define ASUS_LED_REGISTER(object, device) \ - asus_led_register(object##_set_handle, &object##_led, device) - -static int asus_led_init(struct device *dev) +static int asus_acpi_remove(struct acpi_device *device, int type) { - int rv; - - rv = ASUS_LED_REGISTER(mled, dev); - if (rv) - goto out; - - rv = ASUS_LED_REGISTER(tled, dev); - if (rv) - goto out1; - - rv = ASUS_LED_REGISTER(rled, dev); - if (rv) - goto out2; - - rv = ASUS_LED_REGISTER(pled, dev); - if (rv) - goto out3; - - rv = ASUS_LED_REGISTER(gled, dev); - if (rv) - goto out4; + struct asus_laptop *asus = acpi_driver_data(device); - if (kled_set_handle && kled_get_handle) - rv = ASUS_LED_REGISTER(kled, dev); - if (rv) - goto out5; - - led_workqueue = create_singlethread_workqueue("led_workqueue"); - if (!led_workqueue) - goto out6; + asus_backlight_exit(asus); + asus_rfkill_exit(asus); + asus_led_exit(asus); + asus_input_exit(asus); + asus_platform_exit(asus); + kfree(asus->name); + kfree(asus); return 0; -out6: - rv = -ENOMEM; - ASUS_LED_UNREGISTER(kled); -out5: - ASUS_LED_UNREGISTER(gled); -out4: - ASUS_LED_UNREGISTER(pled); -out3: - ASUS_LED_UNREGISTER(rled); -out2: - ASUS_LED_UNREGISTER(tled); -out1: - ASUS_LED_UNREGISTER(mled); -out: - return rv; } +static const struct acpi_device_id asus_device_ids[] = { + {"ATK0100", 0}, + {"ATK0101", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, asus_device_ids); + +static struct acpi_driver asus_acpi_driver = { + .name = ASUS_LAPTOP_NAME, + .class = ASUS_LAPTOP_CLASS, + .owner = THIS_MODULE, + .ids = asus_device_ids, + .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS, + .ops = { + .add = asus_acpi_add, + .remove = asus_acpi_remove, + .notify = asus_acpi_notify, + }, +}; + static int __init asus_laptop_init(void) { int result; - result = acpi_bus_register_driver(&asus_hotk_driver); + result = platform_driver_register(&platform_driver); if (result < 0) return result; - /* - * This is a bit of a kludge. We only want this module loaded - * for ASUS systems, but there's currently no way to probe the - * ACPI namespace for ASUS HIDs. So we just return failure if - * we didn't find one, which will cause the module to be - * unloaded. - */ - if (!asus_hotk_found) { - acpi_bus_unregister_driver(&asus_hotk_driver); - return -ENODEV; - } - - result = asus_input_init(); - if (result) - goto fail_input; - - /* Register platform stuff */ - result = platform_driver_register(&asuspf_driver); - if (result) - goto fail_platform_driver; - - asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1); - if (!asuspf_device) { - result = -ENOMEM; - goto fail_platform_device1; + result = acpi_bus_register_driver(&asus_acpi_driver); + if (result < 0) + goto fail_acpi_driver; + if (!asus_device_present) { + result = -ENODEV; + goto fail_no_device; } - - result = platform_device_add(asuspf_device); - if (result) - goto fail_platform_device2; - - result = sysfs_create_group(&asuspf_device->dev.kobj, - &asuspf_attribute_group); - if (result) - goto fail_sysfs; - - result = asus_led_init(&asuspf_device->dev); - if (result) - goto fail_led; - - if (!acpi_video_backlight_support()) { - result = asus_backlight_init(&asuspf_device->dev); - if (result) - goto fail_backlight; - } else - pr_info("Brightness ignored, must be controlled by " - "ACPI video driver\n"); - return 0; -fail_backlight: - asus_led_exit(); - -fail_led: - sysfs_remove_group(&asuspf_device->dev.kobj, - &asuspf_attribute_group); - -fail_sysfs: - platform_device_del(asuspf_device); - -fail_platform_device2: - platform_device_put(asuspf_device); - -fail_platform_device1: - platform_driver_unregister(&asuspf_driver); - -fail_platform_driver: - asus_input_exit(); - -fail_input: - +fail_no_device: + acpi_bus_unregister_driver(&asus_acpi_driver); +fail_acpi_driver: + platform_driver_unregister(&platform_driver); return result; } +static void __exit asus_laptop_exit(void) +{ + acpi_bus_unregister_driver(&asus_acpi_driver); + platform_driver_unregister(&platform_driver); +} + module_init(asus_laptop_init); module_exit(asus_laptop_exit); diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index c1d2aeeea94..1381430e110 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c @@ -1225,9 +1225,8 @@ static int asus_model_match(char *model) else if (strncmp(model, "M2N", 3) == 0 || strncmp(model, "M3N", 3) == 0 || strncmp(model, "M5N", 3) == 0 || - strncmp(model, "M6N", 3) == 0 || strncmp(model, "S1N", 3) == 0 || - strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0) + strncmp(model, "S5N", 3) == 0) return xxN; else if (strncmp(model, "M1", 2) == 0) return M1A; diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 8cb20e45bad..035a7dd65a3 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -507,6 +507,10 @@ static int cmpc_keys_codes[] = { KEY_BRIGHTNESSDOWN, KEY_BRIGHTNESSUP, KEY_VENDOR, + KEY_UNKNOWN, + KEY_CAMERA, + KEY_BACK, + KEY_FORWARD, KEY_MAX }; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index b7f4d270591..ef614979afe 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -132,8 +132,8 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = { }; static struct calling_interface_buffer *buffer; -struct page *bufferpage; -DEFINE_MUTEX(buffer_mutex); +static struct page *bufferpage; +static DEFINE_MUTEX(buffer_mutex); static int hwswitch_state; @@ -580,6 +580,7 @@ static int __init dell_init(void) fail_backlight: i8042_remove_filter(dell_laptop_i8042_filter); + cancel_delayed_work_sync(&dell_rfkill_work); fail_filter: dell_cleanup_rfkill(); fail_rfkill: @@ -597,12 +598,12 @@ fail_platform_driver: static void __exit dell_exit(void) { - cancel_delayed_work_sync(&dell_rfkill_work); i8042_remove_filter(dell_laptop_i8042_filter); + cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); dell_cleanup_rfkill(); if (platform_device) { - platform_device_del(platform_device); + platform_device_unregister(platform_device); platform_driver_unregister(&platform_driver); } kfree(da_tokens); diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index e2be6bb33d9..9a844caa375 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -578,6 +578,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) struct pci_dev *dev; struct pci_bus *bus; bool blocked = eeepc_wlan_rfkill_blocked(eeepc); + bool absent; + u32 l; if (eeepc->wlan_rfkill) rfkill_set_sw_state(eeepc->wlan_rfkill, blocked); @@ -591,6 +593,22 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc) goto out_unlock; } + if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { + pr_err("Unable to read PCI config space?\n"); + goto out_unlock; + } + absent = (l == 0xffffffff); + + if (blocked != absent) { + pr_warning("BIOS says wireless lan is %s, " + "but the pci device is %s\n", + blocked ? "blocked" : "unblocked", + absent ? "absent" : "present"); + pr_warning("skipped wireless hotplug as probably " + "inappropriate for this model\n"); + goto out_unlock; + } + if (!blocked) { dev = pci_get_slot(bus, 0); if (dev) { @@ -1277,7 +1295,8 @@ static void eeepc_dmi_check(struct eeepc_laptop *eeepc) * hotplug code. In fact, current hotplug code seems to unplug another * device... */ - if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) { + if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 || + strcmp(model, "1005PE") == 0) { eeepc->hotplug_disabled = true; pr_info("wlan hotplug disabled\n"); } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index eb603f1d55c..e7b0c3bcef8 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -286,6 +286,7 @@ struct ibm_init_struct { char param[32]; int (*init) (struct ibm_init_struct *); + mode_t base_procfs_mode; struct ibm_struct *data; }; @@ -2082,6 +2083,7 @@ static struct attribute_set *hotkey_dev_attributes; static void tpacpi_driver_event(const unsigned int hkey_event); static void hotkey_driver_event(const unsigned int scancode); +static void hotkey_poll_setup(const bool may_warn); /* HKEY.MHKG() return bits */ #define TP_HOTKEY_TABLET_MASK (1 << 3) @@ -2264,6 +2266,8 @@ static int tpacpi_hotkey_driver_mask_set(const u32 mask) rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) & ~hotkey_source_mask); + hotkey_poll_setup(true); + mutex_unlock(&hotkey_mutex); return rc; @@ -2548,7 +2552,7 @@ static void hotkey_poll_stop_sync(void) } /* call with hotkey_mutex held */ -static void hotkey_poll_setup(bool may_warn) +static void hotkey_poll_setup(const bool may_warn) { const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask; const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask; @@ -2579,7 +2583,7 @@ static void hotkey_poll_setup(bool may_warn) } } -static void hotkey_poll_setup_safe(bool may_warn) +static void hotkey_poll_setup_safe(const bool may_warn) { mutex_lock(&hotkey_mutex); hotkey_poll_setup(may_warn); @@ -2597,7 +2601,11 @@ static void hotkey_poll_set_freq(unsigned int freq) #else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ -static void hotkey_poll_setup_safe(bool __unused) +static void hotkey_poll_setup(const bool __unused) +{ +} + +static void hotkey_poll_setup_safe(const bool __unused) { } @@ -2607,16 +2615,11 @@ static int hotkey_inputdev_open(struct input_dev *dev) { switch (tpacpi_lifecycle) { case TPACPI_LIFE_INIT: - /* - * hotkey_init will call hotkey_poll_setup_safe - * at the appropriate moment - */ - return 0; - case TPACPI_LIFE_EXITING: - return -EBUSY; case TPACPI_LIFE_RUNNING: hotkey_poll_setup_safe(false); return 0; + case TPACPI_LIFE_EXITING: + return -EBUSY; } /* Should only happen if tpacpi_lifecycle is corrupt */ @@ -2627,7 +2630,7 @@ static int hotkey_inputdev_open(struct input_dev *dev) static void hotkey_inputdev_close(struct input_dev *dev) { /* disable hotkey polling when possible */ - if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING && + if (tpacpi_lifecycle != TPACPI_LIFE_EXITING && !(hotkey_source_mask & hotkey_driver_mask)) hotkey_poll_setup_safe(false); } @@ -3655,13 +3658,19 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event) break; case 3: /* 0x3000-0x3FFF: bay-related wakeups */ - if (hkey == TP_HKEY_EV_BAYEJ_ACK) { + switch (hkey) { + case TP_HKEY_EV_BAYEJ_ACK: hotkey_autosleep_ack = 1; printk(TPACPI_INFO "bay ejected\n"); hotkey_wakeup_hotunplug_complete_notify_change(); known_ev = true; - } else { + break; + case TP_HKEY_EV_OPTDRV_EJ: + /* FIXME: kick libata if SATA link offline */ + known_ev = true; + break; + default: known_ev = false; } break; @@ -3870,7 +3879,7 @@ enum { TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */ TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */ TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume: - off / last state */ + 0 = disable, 1 = enable */ }; enum { @@ -3916,10 +3925,11 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state) } #endif - /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */ - status = TP_ACPI_BLUETOOTH_RESUMECTRL; if (state == TPACPI_RFK_RADIO_ON) - status |= TP_ACPI_BLUETOOTH_RADIOSSW; + status = TP_ACPI_BLUETOOTH_RADIOSSW + | TP_ACPI_BLUETOOTH_RESUMECTRL; + else + status = 0; if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status)) return -EIO; @@ -4070,7 +4080,7 @@ enum { TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */ TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */ TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume: - off / last state */ + 0 = disable, 1 = enable */ }; #define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw" @@ -4107,10 +4117,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state) } #endif - /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */ - status = TP_ACPI_WANCARD_RESUMECTRL; if (state == TPACPI_RFK_RADIO_ON) - status |= TP_ACPI_WANCARD_RADIOSSW; + status = TP_ACPI_WANCARD_RADIOSSW + | TP_ACPI_WANCARD_RESUMECTRL; + else + status = 0; if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status)) return -EIO; @@ -4619,6 +4630,10 @@ static int video_read(struct seq_file *m) return 0; } + /* Even reads can crash X.org, so... */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + status = video_outputsw_get(); if (status < 0) return status; @@ -4652,6 +4667,10 @@ static int video_write(char *buf) if (video_supported == TPACPI_VIDEO_NONE) return -ENODEV; + /* Even reads can crash X.org, let alone writes... */ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + enable = 0; disable = 0; @@ -6133,13 +6152,13 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = { TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */ /* Models with ATI GPUs that can use ECNVRAM */ - TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), + TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */ TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), - TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), + TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */ TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), /* Models with Intel Extreme Graphics 2 */ - TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), + TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */ TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC), @@ -6522,7 +6541,8 @@ static int volume_set_status(const u8 status) return volume_set_status_ec(status); } -static int volume_set_mute_ec(const bool mute) +/* returns < 0 on error, 0 on no change, 1 on change */ +static int __volume_set_mute_ec(const bool mute) { int rc; u8 s, n; @@ -6537,22 +6557,37 @@ static int volume_set_mute_ec(const bool mute) n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK : s & ~TP_EC_AUDIO_MUTESW_MSK; - if (n != s) + if (n != s) { rc = volume_set_status_ec(n); + if (!rc) + rc = 1; + } unlock: mutex_unlock(&volume_mutex); return rc; } +static int volume_alsa_set_mute(const bool mute) +{ + dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n", + (mute) ? "" : "un"); + return __volume_set_mute_ec(mute); +} + static int volume_set_mute(const bool mute) { + int rc; + dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n", (mute) ? "" : "un"); - return volume_set_mute_ec(mute); + + rc = __volume_set_mute_ec(mute); + return (rc < 0) ? rc : 0; } -static int volume_set_volume_ec(const u8 vol) +/* returns < 0 on error, 0 on no change, 1 on change */ +static int __volume_set_volume_ec(const u8 vol) { int rc; u8 s, n; @@ -6569,19 +6604,22 @@ static int volume_set_volume_ec(const u8 vol) n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol; - if (n != s) + if (n != s) { rc = volume_set_status_ec(n); + if (!rc) + rc = 1; + } unlock: mutex_unlock(&volume_mutex); return rc; } -static int volume_set_volume(const u8 vol) +static int volume_alsa_set_volume(const u8 vol) { dbg_printk(TPACPI_DBG_MIXER, - "trying to set volume level to %hu\n", vol); - return volume_set_volume_ec(vol); + "ALSA: trying to set volume level to %hu\n", vol); + return __volume_set_volume_ec(vol); } static void volume_alsa_notify_change(void) @@ -6628,7 +6666,7 @@ static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol, static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { - return volume_set_volume(ucontrol->value.integer.value[0]); + return volume_alsa_set_volume(ucontrol->value.integer.value[0]); } #define volume_alsa_mute_info snd_ctl_boolean_mono_info @@ -6651,7 +6689,7 @@ static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol, static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { - return volume_set_mute(!ucontrol->value.integer.value[0]); + return volume_alsa_set_mute(!ucontrol->value.integer.value[0]); } static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = { @@ -8477,9 +8515,10 @@ static int __init ibm_init(struct ibm_init_struct *iibm) "%s installed\n", ibm->name); if (ibm->read) { - mode_t mode; + mode_t mode = iibm->base_procfs_mode; - mode = S_IRUGO; + if (!mode) + mode = S_IRUGO; if (ibm->write) mode |= S_IWUSR; entry = proc_create_data(ibm->name, mode, proc_dir, @@ -8670,6 +8709,7 @@ static struct ibm_init_struct ibms_init[] __initdata = { #ifdef CONFIG_THINKPAD_ACPI_VIDEO { .init = video_init, + .base_procfs_mode = S_IRUSR, .data = &video_driver_data, }, #endif @@ -9032,6 +9072,9 @@ static int __init thinkpad_acpi_module_init(void) return ret; } } + + tpacpi_lifecycle = TPACPI_LIFE_RUNNING; + ret = input_register_device(tpacpi_inputdev); if (ret < 0) { printk(TPACPI_ERR "unable to register input device\n"); @@ -9041,7 +9084,6 @@ static int __init thinkpad_acpi_module_init(void) tp_features.input_device_registered = 1; } - tpacpi_lifecycle = TPACPI_LIFE_RUNNING; return 0; } diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 26c211724ac..405b969734d 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -814,21 +814,23 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context) if (hci_result == HCI_SUCCESS) { if (value == 0x100) continue; - else if (value & 0x80) { - key = toshiba_acpi_get_entry_by_scancode - (value & ~0x80); - if (!key) { - printk(MY_INFO "Unknown key %x\n", - value & ~0x80); - continue; - } - input_report_key(toshiba_acpi.hotkey_dev, - key->keycode, 1); - input_sync(toshiba_acpi.hotkey_dev); - input_report_key(toshiba_acpi.hotkey_dev, - key->keycode, 0); - input_sync(toshiba_acpi.hotkey_dev); + /* act on key press; ignore key release */ + if (value & 0x80) + continue; + + key = toshiba_acpi_get_entry_by_scancode + (value); + if (!key) { + printk(MY_INFO "Unknown key %x\n", + value); + continue; } + input_report_key(toshiba_acpi.hotkey_dev, + key->keycode, 1); + input_sync(toshiba_acpi.hotkey_dev); + input_report_key(toshiba_acpi.hotkey_dev, + key->keycode, 0); + input_sync(toshiba_acpi.hotkey_dev); } else if (hci_result == HCI_NOT_SUPPORTED) { /* This is a workaround for an unresolved issue on * some machines where system events sporadically diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index d4b3d67f054..bf146721395 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -98,10 +98,10 @@ config BATTERY_WM97XX Say Y to enable support for battery measured by WM97xx aux port. config BATTERY_BQ27x00 - tristate "BQ27200 battery driver" + tristate "BQ27x00 battery driver" depends on I2C help - Say Y here to enable support for batteries with BQ27200(I2C) chip. + Say Y here to enable support for batteries with BQ27x00 (I2C) chips. config BATTERY_DA9030 tristate "DA9030 battery driver" diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c index 62bb98124e2..bece33ed873 100644 --- a/drivers/power/bq27x00_battery.c +++ b/drivers/power/bq27x00_battery.c @@ -26,13 +26,22 @@ #include <linux/i2c.h> #include <asm/unaligned.h> -#define DRIVER_VERSION "1.0.0" +#define DRIVER_VERSION "1.1.0" #define BQ27x00_REG_TEMP 0x06 #define BQ27x00_REG_VOLT 0x08 -#define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */ #define BQ27x00_REG_AI 0x14 #define BQ27x00_REG_FLAGS 0x0A +#define BQ27x00_REG_TTE 0x16 +#define BQ27x00_REG_TTF 0x18 +#define BQ27x00_REG_TTECP 0x26 + +#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */ +#define BQ27000_FLAG_CHGS BIT(7) + +#define BQ27500_REG_SOC 0x2c +#define BQ27500_FLAG_DSC BIT(0) +#define BQ27500_FLAG_FC BIT(9) /* If the system has several batteries we need a different name for each * of them... @@ -46,25 +55,28 @@ struct bq27x00_access_methods { struct bq27x00_device_info *di); }; +enum bq27x00_chip { BQ27000, BQ27500 }; + struct bq27x00_device_info { struct device *dev; int id; - int voltage_uV; - int current_uA; - int temp_C; - int charge_rsoc; struct bq27x00_access_methods *bus; struct power_supply bat; + enum bq27x00_chip chip; struct i2c_client *client; }; static enum power_supply_property bq27x00_battery_props[] = { + POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, }; /* @@ -74,16 +86,11 @@ static enum power_supply_property bq27x00_battery_props[] = { static int bq27x00_read(u8 reg, int *rt_value, int b_single, struct bq27x00_device_info *di) { - int ret; - - ret = di->bus->read(reg, rt_value, b_single, di); - *rt_value = be16_to_cpu(*rt_value); - - return ret; + return di->bus->read(reg, rt_value, b_single, di); } /* - * Return the battery temperature in Celsius degrees + * Return the battery temperature in tenths of degree Celsius * Or < 0 if something fails. */ static int bq27x00_battery_temperature(struct bq27x00_device_info *di) @@ -97,7 +104,10 @@ static int bq27x00_battery_temperature(struct bq27x00_device_info *di) return ret; } - return (temp >> 2) - 273; + if (di->chip == BQ27500) + return temp - 2731; + else + return ((temp >> 2) - 273) * 10; } /* @@ -115,7 +125,7 @@ static int bq27x00_battery_voltage(struct bq27x00_device_info *di) return ret; } - return volt; + return volt * 1000; } /* @@ -134,16 +144,23 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di) dev_err(di->dev, "error reading current\n"); return 0; } - ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); - if (ret < 0) { - dev_err(di->dev, "error reading flags\n"); - return 0; - } - if ((flags & (1 << 7)) != 0) { - dev_dbg(di->dev, "negative current!\n"); - return -curr; + + if (di->chip == BQ27500) { + /* bq27500 returns signed value */ + curr = (int)(s16)curr; + } else { + ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); + if (ret < 0) { + dev_err(di->dev, "error reading flags\n"); + return 0; + } + if (flags & BQ27000_FLAG_CHGS) { + dev_dbg(di->dev, "negative current!\n"); + curr = -curr; + } } - return curr; + + return curr * 1000; } /* @@ -155,13 +172,70 @@ static int bq27x00_battery_rsoc(struct bq27x00_device_info *di) int ret; int rsoc = 0; - ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di); + if (di->chip == BQ27500) + ret = bq27x00_read(BQ27500_REG_SOC, &rsoc, 0, di); + else + ret = bq27x00_read(BQ27000_REG_RSOC, &rsoc, 1, di); if (ret) { dev_err(di->dev, "error reading relative State-of-Charge\n"); return ret; } - return rsoc >> 8; + return rsoc; +} + +static int bq27x00_battery_status(struct bq27x00_device_info *di, + union power_supply_propval *val) +{ + int flags = 0; + int status; + int ret; + + ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di); + if (ret < 0) { + dev_err(di->dev, "error reading flags\n"); + return ret; + } + + if (di->chip == BQ27500) { + if (flags & BQ27500_FLAG_FC) + status = POWER_SUPPLY_STATUS_FULL; + else if (flags & BQ27500_FLAG_DSC) + status = POWER_SUPPLY_STATUS_DISCHARGING; + else + status = POWER_SUPPLY_STATUS_CHARGING; + } else { + if (flags & BQ27000_FLAG_CHGS) + status = POWER_SUPPLY_STATUS_CHARGING; + else + status = POWER_SUPPLY_STATUS_DISCHARGING; + } + + val->intval = status; + return 0; +} + +/* + * Read a time register. + * Return < 0 if something fails. + */ +static int bq27x00_battery_time(struct bq27x00_device_info *di, int reg, + union power_supply_propval *val) +{ + int tval = 0; + int ret; + + ret = bq27x00_read(reg, &tval, 0, di); + if (ret) { + dev_err(di->dev, "error reading register %02x\n", reg); + return ret; + } + + if (tval == 65535) + return -ENODATA; + + val->intval = tval * 60; + return 0; } #define to_bq27x00_device_info(x) container_of((x), \ @@ -171,9 +245,13 @@ static int bq27x00_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { + int ret = 0; struct bq27x00_device_info *di = to_bq27x00_device_info(psy); switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + ret = bq27x00_battery_status(di, val); + break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: case POWER_SUPPLY_PROP_PRESENT: val->intval = bq27x00_battery_voltage(di); @@ -189,11 +267,20 @@ static int bq27x00_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TEMP: val->intval = bq27x00_battery_temperature(di); break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: + ret = bq27x00_battery_time(di, BQ27x00_REG_TTE, val); + break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: + ret = bq27x00_battery_time(di, BQ27x00_REG_TTECP, val); + break; + case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: + ret = bq27x00_battery_time(di, BQ27x00_REG_TTF, val); + break; default: return -EINVAL; } - return 0; + return ret; } static void bq27x00_powersupply_init(struct bq27x00_device_info *di) @@ -206,10 +293,10 @@ static void bq27x00_powersupply_init(struct bq27x00_device_info *di) } /* - * BQ27200 specific code + * i2c specific code */ -static int bq27200_read(u8 reg, int *rt_value, int b_single, +static int bq27x00_read_i2c(u8 reg, int *rt_value, int b_single, struct bq27x00_device_info *di) { struct i2c_client *client = di->client; @@ -238,7 +325,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single, err = i2c_transfer(client->adapter, msg, 1); if (err >= 0) { if (!b_single) - *rt_value = get_unaligned_be16(data); + *rt_value = get_unaligned_le16(data); else *rt_value = data[0]; @@ -248,7 +335,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single, return err; } -static int bq27200_battery_probe(struct i2c_client *client, +static int bq27x00_battery_probe(struct i2c_client *client, const struct i2c_device_id *id) { char *name; @@ -267,7 +354,7 @@ static int bq27200_battery_probe(struct i2c_client *client, if (retval < 0) return retval; - name = kasprintf(GFP_KERNEL, "bq27200-%d", num); + name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num); if (!name) { dev_err(&client->dev, "failed to allocate device name\n"); retval = -ENOMEM; @@ -281,6 +368,7 @@ static int bq27200_battery_probe(struct i2c_client *client, goto batt_failed_2; } di->id = num; + di->chip = id->driver_data; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { @@ -293,7 +381,7 @@ static int bq27200_battery_probe(struct i2c_client *client, i2c_set_clientdata(client, di); di->dev = &client->dev; di->bat.name = name; - bus->read = &bq27200_read; + bus->read = &bq27x00_read_i2c; di->bus = bus; di->client = client; @@ -323,7 +411,7 @@ batt_failed_1: return retval; } -static int bq27200_battery_remove(struct i2c_client *client) +static int bq27x00_battery_remove(struct i2c_client *client) { struct bq27x00_device_info *di = i2c_get_clientdata(client); @@ -344,27 +432,28 @@ static int bq27200_battery_remove(struct i2c_client *client) * Module stuff */ -static const struct i2c_device_id bq27200_id[] = { - { "bq27200", 0 }, +static const struct i2c_device_id bq27x00_id[] = { + { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */ + { "bq27500", BQ27500 }, {}, }; -static struct i2c_driver bq27200_battery_driver = { +static struct i2c_driver bq27x00_battery_driver = { .driver = { - .name = "bq27200-battery", + .name = "bq27x00-battery", }, - .probe = bq27200_battery_probe, - .remove = bq27200_battery_remove, - .id_table = bq27200_id, + .probe = bq27x00_battery_probe, + .remove = bq27x00_battery_remove, + .id_table = bq27x00_id, }; static int __init bq27x00_battery_init(void) { int ret; - ret = i2c_add_driver(&bq27200_battery_driver); + ret = i2c_add_driver(&bq27x00_battery_driver); if (ret) - printk(KERN_ERR "Unable to register BQ27200 driver\n"); + printk(KERN_ERR "Unable to register BQ27x00 driver\n"); return ret; } @@ -372,7 +461,7 @@ module_init(bq27x00_battery_init); static void __exit bq27x00_battery_exit(void) { - i2c_del_driver(&bq27200_battery_driver); + i2c_del_driver(&bq27x00_battery_driver); } module_exit(bq27x00_battery_exit); diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c index 3364198134a..a2e71f7b27f 100644 --- a/drivers/power/da9030_battery.c +++ b/drivers/power/da9030_battery.c @@ -509,7 +509,7 @@ static int da9030_battery_probe(struct platform_device *pdev) charger->master = pdev->dev.parent; - /* 10 seconds between monotor runs unless platfrom defines other + /* 10 seconds between monitor runs unless platform defines other interval */ charger->interval = msecs_to_jiffies( (pdata->batmon_interval ? : 10) * 1000); diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 6ea3cb5837c..23eed356a85 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -26,7 +26,7 @@ static DEFINE_MUTEX(bat_lock); static struct work_struct bat_work; -struct mutex work_lock; +static struct mutex work_lock; static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN; static struct wm97xx_batt_info *gpdata; static enum power_supply_property *prop; @@ -203,7 +203,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) goto err2; ret = request_irq(gpio_to_irq(pdata->charge_gpio), wm97xx_chrg_irq, IRQF_DISABLED, - "AC Detect", 0); + "AC Detect", dev); if (ret) goto err2; props++; /* POWER_SUPPLY_PROP_STATUS */ diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 262f62eec83..834b4844182 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -27,6 +27,17 @@ config REGULATOR_DEBUG help Say yes here to enable debugging support. +config REGULATOR_DUMMY + bool "Provide a dummy regulator if regulator lookups fail" + help + If this option is enabled then when a regulator lookup fails + and the board has not specified that it has provided full + constraints then the regulator core will provide an always + enabled dummy regulator will be provided, allowing consumer + drivers to continue. + + A warning will be generated when this substitution is done. + config REGULATOR_FIXED_VOLTAGE tristate "Fixed voltage regulator support" help @@ -69,6 +80,13 @@ config REGULATOR_MAX1586 regulator via I2C bus. The provided regulator is suitable for PXA27x chips to control VCC_CORE and VCC_USIM voltages. +config REGULATOR_MAX8649 + tristate "Maxim 8649 voltage regulator" + depends on I2C + help + This driver controls a Maxim 8649 voltage output regulator via + I2C bus. + config REGULATOR_MAX8660 tristate "Maxim 8660/8661 voltage regulator" depends on I2C @@ -91,19 +109,26 @@ config REGULATOR_WM831X of PMIC devices. config REGULATOR_WM8350 - tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC" + tristate "Wolfson Microelectronics WM8350 AudioPlus PMIC" depends on MFD_WM8350 help This driver provides support for the voltage and current regulators of the WM8350 AudioPlus PMIC. config REGULATOR_WM8400 - tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC" + tristate "Wolfson Microelectronics WM8400 AudioPlus PMIC" depends on MFD_WM8400 help This driver provides support for the voltage regulators of the WM8400 AudioPlus PMIC. +config REGULATOR_WM8994 + tristate "Wolfson Microelectronics WM8994 CODEC" + depends on MFD_WM8994 + help + This driver provides support for the voltage regulators on the + WM8994 CODEC. + config REGULATOR_DA903X tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC" depends on PMIC_DA903X diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index b3c806c7941..e845b66ad59 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -9,15 +9,18 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o +obj-$(CONFIG_REGULATOR_DUMMY) += dummy.o obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o +obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o +obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index b349db4504b..7de950959ed 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c @@ -561,7 +561,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { * for all the different regulators. */ -static int __init ab3100_regulators_probe(struct platform_device *pdev) +static int __devinit ab3100_regulators_probe(struct platform_device *pdev) { struct ab3100_platform_data *plfdata = pdev->dev.platform_data; struct ab3100 *ab3100 = platform_get_drvdata(pdev); @@ -641,7 +641,7 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev) return 0; } -static int __exit ab3100_regulators_remove(struct platform_device *pdev) +static int __devexit ab3100_regulators_remove(struct platform_device *pdev) { int i; @@ -659,7 +659,7 @@ static struct platform_driver ab3100_regulators_driver = { .owner = THIS_MODULE, }, .probe = ab3100_regulators_probe, - .remove = __exit_p(ab3100_regulators_remove), + .remove = __devexit_p(ab3100_regulators_remove), }; static __init int ab3100_regulators_init(void) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b60a4c9f8f1..c7bbe30010f 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -19,10 +19,13 @@ #include <linux/err.h> #include <linux/mutex.h> #include <linux/suspend.h> +#include <linux/delay.h> #include <linux/regulator/consumer.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> +#include "dummy.h" + #define REGULATOR_VERSION "0.5" static DEFINE_MUTEX(regulator_list_mutex); @@ -1084,6 +1087,13 @@ overflow_err: return NULL; } +static int _regulator_get_enable_time(struct regulator_dev *rdev) +{ + if (!rdev->desc->ops->enable_time) + return 0; + return rdev->desc->ops->enable_time(rdev); +} + /* Internal regulator request function */ static struct regulator *_regulator_get(struct device *dev, const char *id, int exclusive) @@ -1115,6 +1125,22 @@ static struct regulator *_regulator_get(struct device *dev, const char *id, goto found; } } + +#ifdef CONFIG_REGULATOR_DUMMY + if (!devname) + devname = "deviceless"; + + /* If the board didn't flag that it was fully constrained then + * substitute in a dummy regulator so consumers can continue. + */ + if (!has_full_constraints) { + pr_warning("%s supply %s not found, using dummy regulator\n", + devname, id); + rdev = dummy_regulator_rdev; + goto found; + } +#endif + mutex_unlock(®ulator_list_mutex); return regulator; @@ -1251,7 +1277,7 @@ static int _regulator_can_change_status(struct regulator_dev *rdev) /* locks held by regulator_enable() */ static int _regulator_enable(struct regulator_dev *rdev) { - int ret; + int ret, delay; /* do we need to enable the supply regulator first */ if (rdev->supply) { @@ -1275,13 +1301,34 @@ static int _regulator_enable(struct regulator_dev *rdev) if (!_regulator_can_change_status(rdev)) return -EPERM; - if (rdev->desc->ops->enable) { - ret = rdev->desc->ops->enable(rdev); - if (ret < 0) - return ret; - } else { + if (!rdev->desc->ops->enable) return -EINVAL; + + /* Query before enabling in case configuration + * dependant. */ + ret = _regulator_get_enable_time(rdev); + if (ret >= 0) { + delay = ret; + } else { + printk(KERN_WARNING + "%s: enable_time() failed for %s: %d\n", + __func__, rdev_get_name(rdev), + ret); + delay = 0; } + + /* Allow the regulator to ramp; it would be useful + * to extend this for bulk operations so that the + * regulators can ramp together. */ + ret = rdev->desc->ops->enable(rdev); + if (ret < 0) + return ret; + + if (delay >= 1000) + mdelay(delay / 1000); + else if (delay) + udelay(delay); + } else if (ret < 0) { printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", __func__, rdev_get_name(rdev), ret); @@ -1341,6 +1388,9 @@ static int _regulator_disable(struct regulator_dev *rdev) __func__, rdev_get_name(rdev)); return ret; } + + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, + NULL); } /* decrease our supplies ref count and disable if required */ @@ -1399,8 +1449,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev) return ret; } /* notify other consumers that power has been forced off */ - _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE, - NULL); + _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | + REGULATOR_EVENT_DISABLE, NULL); } /* decrease our supplies ref count and disable if required */ @@ -1434,9 +1484,9 @@ EXPORT_SYMBOL_GPL(regulator_force_disable); static int _regulator_is_enabled(struct regulator_dev *rdev) { - /* sanity check */ + /* If we don't know then assume that the regulator is always on */ if (!rdev->desc->ops->is_enabled) - return -EINVAL; + return 1; return rdev->desc->ops->is_enabled(rdev); } @@ -2451,8 +2501,15 @@ EXPORT_SYMBOL_GPL(regulator_get_init_drvdata); static int __init regulator_init(void) { + int ret; + printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION); - return class_register(®ulator_class); + + ret = class_register(®ulator_class); + + regulator_dummy_init(); + + return ret; } /* init early to allow our consumers to complete system booting */ diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c new file mode 100644 index 00000000000..c7410bde7b5 --- /dev/null +++ b/drivers/regulator/dummy.c @@ -0,0 +1,66 @@ +/* + * dummy.c + * + * Copyright 2010 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This is useful for systems with mixed controllable and + * non-controllable regulators, as well as for allowing testing on + * systems with no controllable regulators. + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> + +#include "dummy.h" + +struct regulator_dev *dummy_regulator_rdev; + +static struct regulator_init_data dummy_initdata; + +static struct regulator_ops dummy_ops; + +static struct regulator_desc dummy_desc = { + .name = "dummy", + .id = -1, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .ops = &dummy_ops, +}; + +static struct platform_device *dummy_pdev; + +void __init regulator_dummy_init(void) +{ + int ret; + + dummy_pdev = platform_device_alloc("reg-dummy", -1); + if (!dummy_pdev) { + pr_err("Failed to allocate dummy regulator device\n"); + return; + } + + ret = platform_device_add(dummy_pdev); + if (ret != 0) { + pr_err("Failed to register dummy regulator device: %d\n", ret); + platform_device_put(dummy_pdev); + return; + } + + dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, + &dummy_initdata, NULL); + if (IS_ERR(dummy_regulator_rdev)) { + ret = PTR_ERR(dummy_regulator_rdev); + pr_err("Failed to register regulator: %d\n", ret); + platform_device_unregister(dummy_pdev); + return; + } +} diff --git a/drivers/regulator/dummy.h b/drivers/regulator/dummy.h new file mode 100644 index 00000000000..3921c0e2424 --- /dev/null +++ b/drivers/regulator/dummy.h @@ -0,0 +1,31 @@ +/* + * dummy.h + * + * Copyright 2010 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This is useful for systems with mixed controllable and + * non-controllable regulators, as well as for allowing testing on + * systems with no controllable regulators. + */ + +#ifndef _DUMMY_H +#define _DUMMY_H + +struct regulator_dev; + +extern struct regulator_dev *dummy_regulator_rdev; + +#ifdef CONFIG_REGULATOR_DUMMY +void __init regulator_dummy_init(void); +#else +static inline void regulator_dummy_init(void) { } +#endif + +#endif diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index f9f516a3028..d11f7622430 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -24,14 +24,16 @@ #include <linux/regulator/driver.h> #include <linux/regulator/fixed.h> #include <linux/gpio.h> +#include <linux/delay.h> struct fixed_voltage_data { struct regulator_desc desc; struct regulator_dev *dev; int microvolts; int gpio; - unsigned enable_high:1; - unsigned is_enabled:1; + unsigned startup_delay; + bool enable_high; + bool is_enabled; }; static int fixed_voltage_is_enabled(struct regulator_dev *dev) @@ -47,7 +49,7 @@ static int fixed_voltage_enable(struct regulator_dev *dev) if (gpio_is_valid(data->gpio)) { gpio_set_value_cansleep(data->gpio, data->enable_high); - data->is_enabled = 1; + data->is_enabled = true; } return 0; @@ -59,12 +61,19 @@ static int fixed_voltage_disable(struct regulator_dev *dev) if (gpio_is_valid(data->gpio)) { gpio_set_value_cansleep(data->gpio, !data->enable_high); - data->is_enabled = 0; + data->is_enabled = false; } return 0; } +static int fixed_voltage_enable_time(struct regulator_dev *dev) +{ + struct fixed_voltage_data *data = rdev_get_drvdata(dev); + + return data->startup_delay; +} + static int fixed_voltage_get_voltage(struct regulator_dev *dev) { struct fixed_voltage_data *data = rdev_get_drvdata(dev); @@ -87,11 +96,12 @@ static struct regulator_ops fixed_voltage_ops = { .is_enabled = fixed_voltage_is_enabled, .enable = fixed_voltage_enable, .disable = fixed_voltage_disable, + .enable_time = fixed_voltage_enable_time, .get_voltage = fixed_voltage_get_voltage, .list_voltage = fixed_voltage_list_voltage, }; -static int regulator_fixed_voltage_probe(struct platform_device *pdev) +static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev) { struct fixed_voltage_config *config = pdev->dev.platform_data; struct fixed_voltage_data *drvdata; @@ -117,6 +127,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev) drvdata->microvolts = config->microvolts; drvdata->gpio = config->gpio; + drvdata->startup_delay = config->startup_delay; if (gpio_is_valid(config->gpio)) { drvdata->enable_high = config->enable_high; @@ -163,7 +174,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev) /* Regulator without GPIO control is considered * always enabled */ - drvdata->is_enabled = 1; + drvdata->is_enabled = true; } drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev, @@ -191,7 +202,7 @@ err: return ret; } -static int regulator_fixed_voltage_remove(struct platform_device *pdev) +static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev) { struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev); @@ -205,10 +216,11 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev) } static struct platform_driver regulator_fixed_voltage_driver = { - .probe = regulator_fixed_voltage_probe, - .remove = regulator_fixed_voltage_remove, + .probe = reg_fixed_voltage_probe, + .remove = __devexit_p(reg_fixed_voltage_remove), .driver = { .name = "reg-fixed-voltage", + .owner = THIS_MODULE, }, }; diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index 4f33a0f4a17..f5532ed7927 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -54,7 +54,7 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val); #define LP3971_BUCK2_BASE 0x29 #define LP3971_BUCK3_BASE 0x32 -const static int buck_base_addr[] = { +static const int buck_base_addr[] = { LP3971_BUCK1_BASE, LP3971_BUCK2_BASE, LP3971_BUCK3_BASE, @@ -63,7 +63,7 @@ const static int buck_base_addr[] = { #define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x]) #define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1) -const static int buck_voltage_map[] = { +static const int buck_voltage_map[] = { 0, 800, 850, 900, 950, 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500, 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800, @@ -96,17 +96,17 @@ const static int buck_voltage_map[] = { #define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2) #define LDO_VOL_CONTR_MASK 0x0f -const static int ldo45_voltage_map[] = { +static const int ldo45_voltage_map[] = { 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350, 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300, }; -const static int ldo123_voltage_map[] = { +static const int ldo123_voltage_map[] = { 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500, 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300, }; -const static int *ldo_voltage_map[] = { +static const int *ldo_voltage_map[] = { ldo123_voltage_map, /* LDO1 */ ldo123_voltage_map, /* LDO2 */ ldo123_voltage_map, /* LDO3 */ @@ -431,20 +431,20 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val) return ret; } -static int setup_regulators(struct lp3971 *lp3971, - struct lp3971_platform_data *pdata) +static int __devinit setup_regulators(struct lp3971 *lp3971, + struct lp3971_platform_data *pdata) { int i, err; - int num_regulators = pdata->num_regulators; - lp3971->num_regulators = num_regulators; - lp3971->rdev = kzalloc(sizeof(struct regulator_dev *) * num_regulators, - GFP_KERNEL); + + lp3971->num_regulators = pdata->num_regulators; + lp3971->rdev = kcalloc(pdata->num_regulators, + sizeof(struct regulator_dev *), GFP_KERNEL); /* Instantiate the regulators */ - for (i = 0; i < num_regulators; i++) { - int id = pdata->regulators[i].id; - lp3971->rdev[i] = regulator_register(®ulators[id], - lp3971->dev, pdata->regulators[i].initdata, lp3971); + for (i = 0; i < pdata->num_regulators; i++) { + struct lp3971_regulator_subdev *reg = &pdata->regulators[i]; + lp3971->rdev[i] = regulator_register(®ulators[reg->id], + lp3971->dev, reg->initdata, lp3971); if (IS_ERR(lp3971->rdev[i])) { err = PTR_ERR(lp3971->rdev[i]); @@ -455,10 +455,10 @@ static int setup_regulators(struct lp3971 *lp3971, } return 0; + error: - for (i = 0; i < num_regulators; i++) - if (lp3971->rdev[i]) - regulator_unregister(lp3971->rdev[i]); + while (--i >= 0) + regulator_unregister(lp3971->rdev[i]); kfree(lp3971->rdev); lp3971->rdev = NULL; return err; @@ -472,15 +472,17 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c, int ret; u16 val; - lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL); - if (lp3971 == NULL) { - ret = -ENOMEM; - goto err; + if (!pdata) { + dev_dbg(&i2c->dev, "No platform init data supplied\n"); + return -ENODEV; } + lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL); + if (lp3971 == NULL) + return -ENOMEM; + lp3971->i2c = i2c; lp3971->dev = &i2c->dev; - i2c_set_clientdata(i2c, lp3971); mutex_init(&lp3971->io_lock); @@ -493,19 +495,15 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c, goto err_detect; } - if (pdata) { - ret = setup_regulators(lp3971, pdata); - if (ret < 0) - goto err_detect; - } else - dev_warn(lp3971->dev, "No platform init data supplied\n"); + ret = setup_regulators(lp3971, pdata); + if (ret < 0) + goto err_detect; + i2c_set_clientdata(i2c, lp3971); return 0; err_detect: - i2c_set_clientdata(i2c, NULL); kfree(lp3971); -err: return ret; } @@ -513,11 +511,13 @@ static int __devexit lp3971_i2c_remove(struct i2c_client *i2c) { struct lp3971 *lp3971 = i2c_get_clientdata(i2c); int i; + + i2c_set_clientdata(i2c, NULL); + for (i = 0; i < lp3971->num_regulators; i++) - if (lp3971->rdev[i]) - regulator_unregister(lp3971->rdev[i]); + regulator_unregister(lp3971->rdev[i]); + kfree(lp3971->rdev); - i2c_set_clientdata(i2c, NULL); kfree(lp3971); return 0; diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c index 2c082d3ef48..a49fc952c9a 100644 --- a/drivers/regulator/max1586.c +++ b/drivers/regulator/max1586.c @@ -179,8 +179,8 @@ static struct regulator_desc max1586_reg[] = { }, }; -static int max1586_pmic_probe(struct i2c_client *client, - const struct i2c_device_id *i2c_id) +static int __devinit max1586_pmic_probe(struct i2c_client *client, + const struct i2c_device_id *i2c_id) { struct regulator_dev **rdev; struct max1586_platform_data *pdata = client->dev.platform_data; @@ -235,7 +235,7 @@ out: return ret; } -static int max1586_pmic_remove(struct i2c_client *client) +static int __devexit max1586_pmic_remove(struct i2c_client *client) { struct regulator_dev **rdev = i2c_get_clientdata(client); int i; @@ -257,9 +257,10 @@ MODULE_DEVICE_TABLE(i2c, max1586_id); static struct i2c_driver max1586_pmic_driver = { .probe = max1586_pmic_probe, - .remove = max1586_pmic_remove, + .remove = __devexit_p(max1586_pmic_remove), .driver = { .name = "max1586", + .owner = THIS_MODULE, }, .id_table = max1586_id, }; diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c new file mode 100644 index 00000000000..3ebdf698c64 --- /dev/null +++ b/drivers/regulator/max8649.c @@ -0,0 +1,408 @@ +/* + * Regulators driver for Maxim max8649 + * + * Copyright (C) 2009-2010 Marvell International Ltd. + * Haojian Zhuang <haojian.zhuang@marvell.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/max8649.h> + +#define MAX8649_DCDC_VMIN 750000 /* uV */ +#define MAX8649_DCDC_VMAX 1380000 /* uV */ +#define MAX8649_DCDC_STEP 10000 /* uV */ +#define MAX8649_VOL_MASK 0x3f + +/* Registers */ +#define MAX8649_MODE0 0x00 +#define MAX8649_MODE1 0x01 +#define MAX8649_MODE2 0x02 +#define MAX8649_MODE3 0x03 +#define MAX8649_CONTROL 0x04 +#define MAX8649_SYNC 0x05 +#define MAX8649_RAMP 0x06 +#define MAX8649_CHIP_ID1 0x08 +#define MAX8649_CHIP_ID2 0x09 + +/* Bits */ +#define MAX8649_EN_PD (1 << 7) +#define MAX8649_VID0_PD (1 << 6) +#define MAX8649_VID1_PD (1 << 5) +#define MAX8649_VID_MASK (3 << 5) + +#define MAX8649_FORCE_PWM (1 << 7) +#define MAX8649_SYNC_EXTCLK (1 << 6) + +#define MAX8649_EXT_MASK (3 << 6) + +#define MAX8649_RAMP_MASK (7 << 5) +#define MAX8649_RAMP_DOWN (1 << 1) + +struct max8649_regulator_info { + struct regulator_dev *regulator; + struct i2c_client *i2c; + struct device *dev; + struct mutex io_lock; + + int vol_reg; + unsigned mode:2; /* bit[1:0] = VID1, VID0 */ + unsigned extclk_freq:2; + unsigned extclk:1; + unsigned ramp_timing:3; + unsigned ramp_down:1; +}; + +/* I2C operations */ + +static inline int max8649_read_device(struct i2c_client *i2c, + int reg, int bytes, void *dest) +{ + unsigned char data; + int ret; + + data = (unsigned char)reg; + ret = i2c_master_send(i2c, &data, 1); + if (ret < 0) + return ret; + ret = i2c_master_recv(i2c, dest, bytes); + if (ret < 0) + return ret; + return 0; +} + +static inline int max8649_write_device(struct i2c_client *i2c, + int reg, int bytes, void *src) +{ + unsigned char buf[bytes + 1]; + int ret; + + buf[0] = (unsigned char)reg; + memcpy(&buf[1], src, bytes); + + ret = i2c_master_send(i2c, buf, bytes + 1); + if (ret < 0) + return ret; + return 0; +} + +static int max8649_reg_read(struct i2c_client *i2c, int reg) +{ + struct max8649_regulator_info *info = i2c_get_clientdata(i2c); + unsigned char data; + int ret; + + mutex_lock(&info->io_lock); + ret = max8649_read_device(i2c, reg, 1, &data); + mutex_unlock(&info->io_lock); + + if (ret < 0) + return ret; + return (int)data; +} + +static int max8649_set_bits(struct i2c_client *i2c, int reg, + unsigned char mask, unsigned char data) +{ + struct max8649_regulator_info *info = i2c_get_clientdata(i2c); + unsigned char value; + int ret; + + mutex_lock(&info->io_lock); + ret = max8649_read_device(i2c, reg, 1, &value); + if (ret < 0) + goto out; + value &= ~mask; + value |= data; + ret = max8649_write_device(i2c, reg, 1, &value); +out: + mutex_unlock(&info->io_lock); + return ret; +} + +static inline int check_range(int min_uV, int max_uV) +{ + if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX) + || (min_uV > max_uV)) + return -EINVAL; + return 0; +} + +static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index) +{ + return (MAX8649_DCDC_VMIN + index * MAX8649_DCDC_STEP); +} + +static int max8649_get_voltage(struct regulator_dev *rdev) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + unsigned char data; + int ret; + + ret = max8649_reg_read(info->i2c, info->vol_reg); + if (ret < 0) + return ret; + data = (unsigned char)ret & MAX8649_VOL_MASK; + return max8649_list_voltage(rdev, data); +} + +static int max8649_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + unsigned char data, mask; + + if (check_range(min_uV, max_uV)) { + dev_err(info->dev, "invalid voltage range (%d, %d) uV\n", + min_uV, max_uV); + return -EINVAL; + } + data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1) + / MAX8649_DCDC_STEP; + mask = MAX8649_VOL_MASK; + + return max8649_set_bits(info->i2c, info->vol_reg, mask, data); +} + +/* EN_PD means pulldown on EN input */ +static int max8649_enable(struct regulator_dev *rdev) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0); +} + +/* + * Applied internal pulldown resistor on EN input pin. + * If pulldown EN pin outside, it would be better. + */ +static int max8649_disable(struct regulator_dev *rdev) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, + MAX8649_EN_PD); +} + +static int max8649_is_enabled(struct regulator_dev *rdev) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + ret = max8649_reg_read(info->i2c, MAX8649_CONTROL); + if (ret < 0) + return ret; + return !((unsigned char)ret & MAX8649_EN_PD); +} + +static int max8649_enable_time(struct regulator_dev *rdev) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + int voltage, rate, ret; + + /* get voltage */ + ret = max8649_reg_read(info->i2c, info->vol_reg); + if (ret < 0) + return ret; + ret &= MAX8649_VOL_MASK; + voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */ + + /* get rate */ + ret = max8649_reg_read(info->i2c, MAX8649_RAMP); + if (ret < 0) + return ret; + ret = (ret & MAX8649_RAMP_MASK) >> 5; + rate = (32 * 1000) >> ret; /* uV/uS */ + + return (voltage / rate); +} + +static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + + switch (mode) { + case REGULATOR_MODE_FAST: + max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM, + MAX8649_FORCE_PWM); + break; + case REGULATOR_MODE_NORMAL: + max8649_set_bits(info->i2c, info->vol_reg, + MAX8649_FORCE_PWM, 0); + break; + default: + return -EINVAL; + } + return 0; +} + +static unsigned int max8649_get_mode(struct regulator_dev *rdev) +{ + struct max8649_regulator_info *info = rdev_get_drvdata(rdev); + int ret; + + ret = max8649_reg_read(info->i2c, info->vol_reg); + if (ret & MAX8649_FORCE_PWM) + return REGULATOR_MODE_FAST; + return REGULATOR_MODE_NORMAL; +} + +static struct regulator_ops max8649_dcdc_ops = { + .set_voltage = max8649_set_voltage, + .get_voltage = max8649_get_voltage, + .list_voltage = max8649_list_voltage, + .enable = max8649_enable, + .disable = max8649_disable, + .is_enabled = max8649_is_enabled, + .enable_time = max8649_enable_time, + .set_mode = max8649_set_mode, + .get_mode = max8649_get_mode, + +}; + +static struct regulator_desc dcdc_desc = { + .name = "max8649", + .ops = &max8649_dcdc_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = 1 << 6, + .owner = THIS_MODULE, +}; + +static int __devinit max8649_regulator_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct max8649_platform_data *pdata = client->dev.platform_data; + struct max8649_regulator_info *info = NULL; + unsigned char data; + int ret; + + info = kzalloc(sizeof(struct max8649_regulator_info), GFP_KERNEL); + if (!info) { + dev_err(&client->dev, "No enough memory\n"); + return -ENOMEM; + } + + info->i2c = client; + info->dev = &client->dev; + mutex_init(&info->io_lock); + i2c_set_clientdata(client, info); + + info->mode = pdata->mode; + switch (info->mode) { + case 0: + info->vol_reg = MAX8649_MODE0; + break; + case 1: + info->vol_reg = MAX8649_MODE1; + break; + case 2: + info->vol_reg = MAX8649_MODE2; + break; + case 3: + info->vol_reg = MAX8649_MODE3; + break; + default: + break; + } + + ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1); + if (ret < 0) { + dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n", + ret); + goto out; + } + dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret); + + /* enable VID0 & VID1 */ + max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0); + + /* enable/disable external clock synchronization */ + info->extclk = pdata->extclk; + data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0; + max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data); + if (info->extclk) { + /* set external clock frequency */ + info->extclk_freq = pdata->extclk_freq; + max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK, + info->extclk_freq); + } + + if (pdata->ramp_timing) { + info->ramp_timing = pdata->ramp_timing; + max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK, + info->ramp_timing << 5); + } + + info->ramp_down = pdata->ramp_down; + if (info->ramp_down) { + max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN, + MAX8649_RAMP_DOWN); + } + + info->regulator = regulator_register(&dcdc_desc, &client->dev, + pdata->regulator, info); + if (IS_ERR(info->regulator)) { + dev_err(info->dev, "failed to register regulator %s\n", + dcdc_desc.name); + ret = PTR_ERR(info->regulator); + goto out; + } + + dev_info(info->dev, "Max8649 regulator device is detected.\n"); + return 0; +out: + kfree(info); + return ret; +} + +static int __devexit max8649_regulator_remove(struct i2c_client *client) +{ + struct max8649_regulator_info *info = i2c_get_clientdata(client); + + if (info) { + if (info->regulator) + regulator_unregister(info->regulator); + kfree(info); + } + i2c_set_clientdata(client, NULL); + + return 0; +} + +static const struct i2c_device_id max8649_id[] = { + { "max8649", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max8649_id); + +static struct i2c_driver max8649_driver = { + .probe = max8649_regulator_probe, + .remove = __devexit_p(max8649_regulator_remove), + .driver = { + .name = "max8649", + }, + .id_table = max8649_id, +}; + +static int __init max8649_init(void) +{ + return i2c_add_driver(&max8649_driver); +} +subsys_initcall(max8649_init); + +static void __exit max8649_exit(void) +{ + i2c_del_driver(&max8649_driver); +} +module_exit(max8649_exit); + +/* Module information */ +MODULE_DESCRIPTION("MAXIM 8649 voltage regulator driver"); +MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c index acc2fb7b608..f12f1bb6213 100644 --- a/drivers/regulator/max8660.c +++ b/drivers/regulator/max8660.c @@ -345,8 +345,8 @@ static struct regulator_desc max8660_reg[] = { }, }; -static int max8660_probe(struct i2c_client *client, - const struct i2c_device_id *i2c_id) +static int __devinit max8660_probe(struct i2c_client *client, + const struct i2c_device_id *i2c_id) { struct regulator_dev **rdev; struct max8660_platform_data *pdata = client->dev.platform_data; @@ -354,7 +354,7 @@ static int max8660_probe(struct i2c_client *client, int boot_on, i, id, ret = -EINVAL; if (pdata->num_subdevs > MAX8660_V_END) { - dev_err(&client->dev, "Too much regulators found!\n"); + dev_err(&client->dev, "Too many regulators found!\n"); goto out; } @@ -462,7 +462,7 @@ out: return ret; } -static int max8660_remove(struct i2c_client *client) +static int __devexit max8660_remove(struct i2c_client *client) { struct regulator_dev **rdev = i2c_get_clientdata(client); int i; @@ -485,9 +485,10 @@ MODULE_DEVICE_TABLE(i2c, max8660_id); static struct i2c_driver max8660_driver = { .probe = max8660_probe, - .remove = max8660_remove, + .remove = __devexit_p(max8660_remove), .driver = { .name = "max8660", + .owner = THIS_MODULE, }, .id_table = max8660_id, }; diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index 39c49530004..f7b81845a19 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c @@ -2,6 +2,7 @@ * Regulator Driver for Freescale MC13783 PMIC * * Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> + * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -16,11 +17,44 @@ #include <linux/init.h> #include <linux/err.h> -#define MC13783_REG_SWITCHERS4 28 -#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18) - #define MC13783_REG_SWITCHERS5 29 #define MC13783_REG_SWITCHERS5_SW3EN (1 << 20) +#define MC13783_REG_SWITCHERS5_SW3VSEL 18 +#define MC13783_REG_SWITCHERS5_SW3VSEL_M (3 << 18) + +#define MC13783_REG_REGULATORSETTING0 30 +#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL 2 +#define MC13783_REG_REGULATORSETTING0_VDIGVSEL 4 +#define MC13783_REG_REGULATORSETTING0_VGENVSEL 6 +#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL 9 +#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL 11 +#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL 13 +#define MC13783_REG_REGULATORSETTING0_VSIMVSEL 14 +#define MC13783_REG_REGULATORSETTING0_VESIMVSEL 15 +#define MC13783_REG_REGULATORSETTING0_VCAMVSEL 16 + +#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL_M (3 << 2) +#define MC13783_REG_REGULATORSETTING0_VDIGVSEL_M (3 << 4) +#define MC13783_REG_REGULATORSETTING0_VGENVSEL_M (7 << 6) +#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL_M (3 << 9) +#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL_M (3 << 11) +#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL_M (1 << 13) +#define MC13783_REG_REGULATORSETTING0_VSIMVSEL_M (1 << 14) +#define MC13783_REG_REGULATORSETTING0_VESIMVSEL_M (1 << 15) +#define MC13783_REG_REGULATORSETTING0_VCAMVSEL_M (7 << 16) + +#define MC13783_REG_REGULATORSETTING1 31 +#define MC13783_REG_REGULATORSETTING1_VVIBVSEL 0 +#define MC13783_REG_REGULATORSETTING1_VRF1VSEL 2 +#define MC13783_REG_REGULATORSETTING1_VRF2VSEL 4 +#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL 6 +#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL 9 + +#define MC13783_REG_REGULATORSETTING1_VVIBVSEL_M (3 << 0) +#define MC13783_REG_REGULATORSETTING1_VRF1VSEL_M (3 << 2) +#define MC13783_REG_REGULATORSETTING1_VRF2VSEL_M (3 << 4) +#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL_M (7 << 6) +#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL_M (7 << 9) #define MC13783_REG_REGULATORMODE0 32 #define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0) @@ -48,19 +82,107 @@ #define MC13783_REG_POWERMISC_GPO2EN (1 << 8) #define MC13783_REG_POWERMISC_GPO3EN (1 << 10) #define MC13783_REG_POWERMISC_GPO4EN (1 << 12) +#define MC13783_REG_POWERMISC_PWGT1SPIEN (1 << 15) +#define MC13783_REG_POWERMISC_PWGT2SPIEN (1 << 16) + +#define MC13783_REG_POWERMISC_PWGTSPI_M (3 << 15) + struct mc13783_regulator { struct regulator_desc desc; int reg; int enable_bit; + int vsel_reg; + int vsel_shift; + int vsel_mask; + int const *voltages; +}; + +/* Voltage Values */ +static const int const mc13783_sw3_val[] = { + 5000000, 5000000, 5000000, 5500000, +}; + +static const int const mc13783_vaudio_val[] = { + 2775000, +}; + +static const int const mc13783_viohi_val[] = { + 2775000, +}; + +static const int const mc13783_violo_val[] = { + 1200000, 1300000, 1500000, 1800000, +}; + +static const int const mc13783_vdig_val[] = { + 1200000, 1300000, 1500000, 1800000, +}; + +static const int const mc13783_vgen_val[] = { + 1200000, 1300000, 1500000, 1800000, + 1100000, 2000000, 2775000, 2400000, +}; + +static const int const mc13783_vrfdig_val[] = { + 1200000, 1500000, 1800000, 1875000, +}; + +static const int const mc13783_vrfref_val[] = { + 2475000, 2600000, 2700000, 2775000, +}; + +static const int const mc13783_vrfcp_val[] = { + 2700000, 2775000, +}; + +static const int const mc13783_vsim_val[] = { + 1800000, 2900000, 3000000, +}; + +static const int const mc13783_vesim_val[] = { + 1800000, 2900000, +}; + +static const int const mc13783_vcam_val[] = { + 1500000, 1800000, 2500000, 2550000, + 2600000, 2750000, 2800000, 3000000, +}; + +static const int const mc13783_vrfbg_val[] = { + 1250000, +}; + +static const int const mc13783_vvib_val[] = { + 1300000, 1800000, 2000000, 3000000, +}; + +static const int const mc13783_vmmc_val[] = { + 1600000, 1800000, 2000000, 2600000, + 2700000, 2800000, 2900000, 3000000, +}; + +static const int const mc13783_vrf_val[] = { + 1500000, 1875000, 2700000, 2775000, +}; + +static const int const mc13783_gpo_val[] = { + 3100000, +}; + +static const int const mc13783_pwgtdrv_val[] = { + 5500000, }; static struct regulator_ops mc13783_regulator_ops; +static struct regulator_ops mc13783_fixed_regulator_ops; +static struct regulator_ops mc13783_gpo_regulator_ops; -#define MC13783_DEFINE(prefix, _name, _reg) \ +#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages) \ [MC13783_ ## prefix ## _ ## _name] = { \ .desc = { \ .name = #prefix "_" #_name, \ + .n_voltages = ARRAY_SIZE(_voltages), \ .ops = &mc13783_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .id = MC13783_ ## prefix ## _ ## _name, \ @@ -68,40 +190,92 @@ static struct regulator_ops mc13783_regulator_ops; }, \ .reg = MC13783_REG_ ## _reg, \ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \ + .vsel_reg = MC13783_REG_ ## _vsel_reg, \ + .vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\ + .vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\ + .voltages = _voltages, \ + } + +#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages) \ + [MC13783_ ## prefix ## _ ## _name] = { \ + .desc = { \ + .name = #prefix "_" #_name, \ + .n_voltages = ARRAY_SIZE(_voltages), \ + .ops = &mc13783_fixed_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = MC13783_ ## prefix ## _ ## _name, \ + .owner = THIS_MODULE, \ + }, \ + .reg = MC13783_REG_ ## _reg, \ + .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \ + .voltages = _voltages, \ } -#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg) -#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg) +#define MC13783_GPO_DEFINE(prefix, _name, _reg, _voltages) \ + [MC13783_ ## prefix ## _ ## _name] = { \ + .desc = { \ + .name = #prefix "_" #_name, \ + .n_voltages = ARRAY_SIZE(_voltages), \ + .ops = &mc13783_gpo_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = MC13783_ ## prefix ## _ ## _name, \ + .owner = THIS_MODULE, \ + }, \ + .reg = MC13783_REG_ ## _reg, \ + .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \ + .voltages = _voltages, \ + } + +#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \ + MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages) +#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \ + MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages) static struct mc13783_regulator mc13783_regulators[] = { - MC13783_DEFINE_SW(SW3, SWITCHERS5), - MC13783_DEFINE_SW(PLL, SWITCHERS4), - - MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0), - MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0), - MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0), - MC13783_DEFINE_REGU(VDIG, REGULATORMODE0), - MC13783_DEFINE_REGU(VGEN, REGULATORMODE0), - MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0), - MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0), - MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0), - MC13783_DEFINE_REGU(VSIM, REGULATORMODE1), - MC13783_DEFINE_REGU(VESIM, REGULATORMODE1), - MC13783_DEFINE_REGU(VCAM, REGULATORMODE1), - MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1), - MC13783_DEFINE_REGU(VVIB, REGULATORMODE1), - MC13783_DEFINE_REGU(VRF1, REGULATORMODE1), - MC13783_DEFINE_REGU(VRF2, REGULATORMODE1), - MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1), - MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1), - MC13783_DEFINE_REGU(GPO1, POWERMISC), - MC13783_DEFINE_REGU(GPO2, POWERMISC), - MC13783_DEFINE_REGU(GPO3, POWERMISC), - MC13783_DEFINE_REGU(GPO4, POWERMISC), + MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val), + + MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val), + MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val), + MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \ + mc13783_violo_val), + MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \ + mc13783_vdig_val), + MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, \ + mc13783_vgen_val), + MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, \ + mc13783_vrfdig_val), + MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, \ + mc13783_vrfref_val), + MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, \ + mc13783_vrfcp_val), + MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, \ + mc13783_vsim_val), + MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, \ + mc13783_vesim_val), + MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \ + mc13783_vcam_val), + MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val), + MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \ + mc13783_vvib_val), + MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \ + mc13783_vrf_val), + MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, \ + mc13783_vrf_val), + MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, \ + mc13783_vmmc_val), + MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \ + mc13783_vmmc_val), + MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val), + MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val), + MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val), }; struct mc13783_regulator_priv { struct mc13783 *mc13783; + u32 powermisc_pwgt_state; struct regulator_dev *regulators[]; }; @@ -154,10 +328,241 @@ static int mc13783_regulator_is_enabled(struct regulator_dev *rdev) return (val & mc13783_regulators[id].enable_bit) != 0; } +static int mc13783_regulator_list_voltage(struct regulator_dev *rdev, + unsigned selector) +{ + int id = rdev_get_id(rdev); + + if (selector >= mc13783_regulators[id].desc.n_voltages) + return -EINVAL; + + return mc13783_regulators[id].voltages[selector]; +} + +static int mc13783_get_best_voltage_index(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + int reg_id = rdev_get_id(rdev); + int i; + int bestmatch; + int bestindex; + + /* + * Locate the minimum voltage fitting the criteria on + * this regulator. The switchable voltages are not + * in strict falling order so we need to check them + * all for the best match. + */ + bestmatch = INT_MAX; + bestindex = -1; + for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) { + if (mc13783_regulators[reg_id].voltages[i] >= min_uV && + mc13783_regulators[reg_id].voltages[i] < bestmatch) { + bestmatch = mc13783_regulators[reg_id].voltages[i]; + bestindex = i; + } + } + + if (bestindex < 0 || bestmatch > max_uV) { + dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n", + min_uV, max_uV); + return -EINVAL; + } + return bestindex; +} + +static int mc13783_regulator_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + int value, id = rdev_get_id(rdev); + int ret; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", + __func__, id, min_uV, max_uV); + + /* Find the best index */ + value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV); + dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value); + if (value < 0) + return value; + + mc13783_lock(priv->mc13783); + ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg, + mc13783_regulators[id].vsel_mask, + value << mc13783_regulators[id].vsel_shift); + mc13783_unlock(priv->mc13783); + + return ret; +} + +static int mc13783_regulator_get_voltage(struct regulator_dev *rdev) +{ + struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + int ret, id = rdev_get_id(rdev); + unsigned int val; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + mc13783_lock(priv->mc13783); + ret = mc13783_reg_read(priv->mc13783, + mc13783_regulators[id].vsel_reg, &val); + mc13783_unlock(priv->mc13783); + + if (ret) + return ret; + + val = (val & mc13783_regulators[id].vsel_mask) + >> mc13783_regulators[id].vsel_shift; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); + + BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages); + + return mc13783_regulators[id].voltages[val]; +} + static struct regulator_ops mc13783_regulator_ops = { .enable = mc13783_regulator_enable, .disable = mc13783_regulator_disable, .is_enabled = mc13783_regulator_is_enabled, + .list_voltage = mc13783_regulator_list_voltage, + .set_voltage = mc13783_regulator_set_voltage, + .get_voltage = mc13783_regulator_get_voltage, +}; + +static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + int id = rdev_get_id(rdev); + + dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", + __func__, id, min_uV, max_uV); + + if (min_uV > mc13783_regulators[id].voltages[0] && + max_uV < mc13783_regulators[id].voltages[0]) + return 0; + else + return -EINVAL; +} + +static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev) +{ + int id = rdev_get_id(rdev); + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + return mc13783_regulators[id].voltages[0]; +} + +static struct regulator_ops mc13783_fixed_regulator_ops = { + .enable = mc13783_regulator_enable, + .disable = mc13783_regulator_disable, + .is_enabled = mc13783_regulator_is_enabled, + .list_voltage = mc13783_regulator_list_voltage, + .set_voltage = mc13783_fixed_regulator_set_voltage, + .get_voltage = mc13783_fixed_regulator_get_voltage, +}; + +int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, + u32 val) +{ + struct mc13783 *mc13783 = priv->mc13783; + int ret; + u32 valread; + + BUG_ON(val & ~mask); + + ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread); + if (ret) + return ret; + + /* Update the stored state for Power Gates. */ + priv->powermisc_pwgt_state = + (priv->powermisc_pwgt_state & ~mask) | val; + priv->powermisc_pwgt_state &= MC13783_REG_POWERMISC_PWGTSPI_M; + + /* Construct the new register value */ + valread = (valread & ~mask) | val; + /* Overwrite the PWGTxEN with the stored version */ + valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) | + priv->powermisc_pwgt_state; + + return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread); +} + +static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev) +{ + struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + int ret; + u32 en_val = mc13783_regulators[id].enable_bit; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + /* Power Gate enable value is 0 */ + if (id == MC13783_REGU_PWGT1SPI || + id == MC13783_REGU_PWGT2SPI) + en_val = 0; + + mc13783_lock(priv->mc13783); + ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit, + en_val); + mc13783_unlock(priv->mc13783); + + return ret; +} + +static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev) +{ + struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + int id = rdev_get_id(rdev); + int ret; + u32 dis_val = 0; + + dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); + + /* Power Gate disable value is 1 */ + if (id == MC13783_REGU_PWGT1SPI || + id == MC13783_REGU_PWGT2SPI) + dis_val = mc13783_regulators[id].enable_bit; + + mc13783_lock(priv->mc13783); + ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit, + dis_val); + mc13783_unlock(priv->mc13783); + + return ret; +} + +static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev) +{ + struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev); + int ret, id = rdev_get_id(rdev); + unsigned int val; + + mc13783_lock(priv->mc13783); + ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val); + mc13783_unlock(priv->mc13783); + + if (ret) + return ret; + + /* Power Gates state is stored in powermisc_pwgt_state + * where the meaning of bits is negated */ + val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) | + (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M); + + return (val & mc13783_regulators[id].enable_bit) != 0; +} + +static struct regulator_ops mc13783_gpo_regulator_ops = { + .enable = mc13783_gpo_regulator_enable, + .disable = mc13783_gpo_regulator_disable, + .is_enabled = mc13783_gpo_regulator_is_enabled, + .list_voltage = mc13783_regulator_list_voltage, + .set_voltage = mc13783_fixed_regulator_set_voltage, + .get_voltage = mc13783_fixed_regulator_get_voltage, }; static int __devinit mc13783_regulator_probe(struct platform_device *pdev) diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c index 33d7d899e03..29d0566379a 100644 --- a/drivers/regulator/pcap-regulator.c +++ b/drivers/regulator/pcap-regulator.c @@ -288,16 +288,18 @@ static int __devexit pcap_regulator_remove(struct platform_device *pdev) struct regulator_dev *rdev = platform_get_drvdata(pdev); regulator_unregister(rdev); + platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver pcap_regulator_driver = { .driver = { - .name = "pcap-regulator", + .name = "pcap-regulator", + .owner = THIS_MODULE, }, - .probe = pcap_regulator_probe, - .remove = __devexit_p(pcap_regulator_remove), + .probe = pcap_regulator_probe, + .remove = __devexit_p(pcap_regulator_remove), }; static int __init pcap_regulator_init(void) diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index 07fda0a75ad..1f183543bdb 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c @@ -457,8 +457,8 @@ static struct regulator_ops tps65023_ldo_ops = { .list_voltage = tps65023_ldo_list_voltage, }; -static -int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int __devinit tps_65023_probe(struct i2c_client *client, + const struct i2c_device_id *id) { static int desc_id; const struct tps_info *info = (void *)id->driver_data; @@ -466,6 +466,7 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id) struct regulator_dev *rdev; struct tps_pmic *tps; int i; + int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; @@ -475,7 +476,6 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id) * coming from the board-evm file. */ init_data = client->dev.platform_data; - if (!init_data) return -EIO; @@ -502,21 +502,12 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id) /* Register the regulators */ rdev = regulator_register(&tps->desc[i], &client->dev, - init_data, tps); + init_data, tps); if (IS_ERR(rdev)) { dev_err(&client->dev, "failed to register %s\n", id->name); - - /* Unregister */ - while (i) - regulator_unregister(tps->rdev[--i]); - - tps->client = NULL; - - /* clear the client data in i2c */ - i2c_set_clientdata(client, NULL); - kfree(tps); - return PTR_ERR(rdev); + error = PTR_ERR(rdev); + goto fail; } /* Save regulator for cleanup */ @@ -526,6 +517,13 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id) i2c_set_clientdata(client, tps); return 0; + + fail: + while (--i >= 0) + regulator_unregister(tps->rdev[i]); + + kfree(tps); + return error; } /** @@ -539,13 +537,12 @@ static int __devexit tps_65023_remove(struct i2c_client *client) struct tps_pmic *tps = i2c_get_clientdata(client); int i; + /* clear the client data in i2c */ + i2c_set_clientdata(client, NULL); + for (i = 0; i < TPS65023_NUM_REGULATOR; i++) regulator_unregister(tps->rdev[i]); - tps->client = NULL; - - /* clear the client data in i2c */ - i2c_set_clientdata(client, NULL); kfree(tps); return 0; diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index f8a6dfbef75..c2a9539acd7 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c @@ -538,8 +538,8 @@ static struct regulator_ops tps6507x_ldo_ops = { .list_voltage = tps6507x_ldo_list_voltage, }; -static -int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id) +static int __devinit tps_6507x_probe(struct i2c_client *client, + const struct i2c_device_id *id) { static int desc_id; const struct tps_info *info = (void *)id->driver_data; @@ -547,6 +547,7 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id) struct regulator_dev *rdev; struct tps_pmic *tps; int i; + int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) @@ -557,7 +558,6 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id) * coming from the board-evm file. */ init_data = client->dev.platform_data; - if (!init_data) return -EIO; @@ -586,18 +586,8 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id) if (IS_ERR(rdev)) { dev_err(&client->dev, "failed to register %s\n", id->name); - - /* Unregister */ - while (i) - regulator_unregister(tps->rdev[--i]); - - tps->client = NULL; - - /* clear the client data in i2c */ - i2c_set_clientdata(client, NULL); - - kfree(tps); - return PTR_ERR(rdev); + error = PTR_ERR(rdev); + goto fail; } /* Save regulator for cleanup */ @@ -607,6 +597,13 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id) i2c_set_clientdata(client, tps); return 0; + +fail: + while (--i >= 0) + regulator_unregister(tps->rdev[i]); + + kfree(tps); + return error; } /** @@ -620,13 +617,12 @@ static int __devexit tps_6507x_remove(struct i2c_client *client) struct tps_pmic *tps = i2c_get_clientdata(client); int i; + /* clear the client data in i2c */ + i2c_set_clientdata(client, NULL); + for (i = 0; i < TPS6507X_NUM_REGULATOR; i++) regulator_unregister(tps->rdev[i]); - tps->client = NULL; - - /* clear the client data in i2c */ - i2c_set_clientdata(client, NULL); kfree(tps); return 0; diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 7e674859bd5..9729d760fb4 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c @@ -519,19 +519,19 @@ static struct twlreg_info twl_regs[] = { /* 6030 REG with base as PMC Slave Misc : 0x0030 */ /* Turnon-delay and remap configuration values for 6030 are not verified since the specification is not public */ - TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08), - TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08), - TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08), - TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08), - TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08), - TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08), - TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08), - TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08), - TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08), - TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08) + TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x21), + TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x21), + TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x21), + TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x21), + TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x21), + TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x21), + TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21), + TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21), + TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21), + TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21) }; -static int twlreg_probe(struct platform_device *pdev) +static int __devinit twlreg_probe(struct platform_device *pdev) { int i; struct twlreg_info *info; diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c index addc032c84b..d96cecaac73 100644 --- a/drivers/regulator/virtual.c +++ b/drivers/regulator/virtual.c @@ -19,7 +19,7 @@ struct virtual_consumer_data { struct mutex lock; struct regulator *regulator; - int enabled; + bool enabled; int min_uV; int max_uV; int min_uA; @@ -49,7 +49,7 @@ static void update_voltage_constraints(struct device *dev, dev_dbg(dev, "Enabling regulator\n"); ret = regulator_enable(data->regulator); if (ret == 0) - data->enabled = 1; + data->enabled = true; else dev_err(dev, "regulator_enable() failed: %d\n", ret); @@ -59,7 +59,7 @@ static void update_voltage_constraints(struct device *dev, dev_dbg(dev, "Disabling regulator\n"); ret = regulator_disable(data->regulator); if (ret == 0) - data->enabled = 0; + data->enabled = false; else dev_err(dev, "regulator_disable() failed: %d\n", ret); @@ -89,7 +89,7 @@ static void update_current_limit_constraints(struct device *dev, dev_dbg(dev, "Enabling regulator\n"); ret = regulator_enable(data->regulator); if (ret == 0) - data->enabled = 1; + data->enabled = true; else dev_err(dev, "regulator_enable() failed: %d\n", ret); @@ -99,7 +99,7 @@ static void update_current_limit_constraints(struct device *dev, dev_dbg(dev, "Disabling regulator\n"); ret = regulator_disable(data->regulator); if (ret == 0) - data->enabled = 0; + data->enabled = false; else dev_err(dev, "regulator_disable() failed: %d\n", ret); @@ -270,24 +270,28 @@ static DEVICE_ATTR(min_microamps, 0666, show_min_uA, set_min_uA); static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA); static DEVICE_ATTR(mode, 0666, show_mode, set_mode); -static struct device_attribute *attributes[] = { - &dev_attr_min_microvolts, - &dev_attr_max_microvolts, - &dev_attr_min_microamps, - &dev_attr_max_microamps, - &dev_attr_mode, +static struct attribute *regulator_virtual_attributes[] = { + &dev_attr_min_microvolts.attr, + &dev_attr_max_microvolts.attr, + &dev_attr_min_microamps.attr, + &dev_attr_max_microamps.attr, + &dev_attr_mode.attr, + NULL }; -static int regulator_virtual_consumer_probe(struct platform_device *pdev) +static const struct attribute_group regulator_virtual_attr_group = { + .attrs = regulator_virtual_attributes, +}; + +static int __devinit regulator_virtual_probe(struct platform_device *pdev) { char *reg_id = pdev->dev.platform_data; struct virtual_consumer_data *drvdata; - int ret, i; + int ret; drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL); - if (drvdata == NULL) { + if (drvdata == NULL) return -ENOMEM; - } mutex_init(&drvdata->lock); @@ -299,13 +303,12 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev) goto err; } - for (i = 0; i < ARRAY_SIZE(attributes); i++) { - ret = device_create_file(&pdev->dev, attributes[i]); - if (ret != 0) { - dev_err(&pdev->dev, "Failed to create attr %d: %d\n", - i, ret); - goto err_regulator; - } + ret = sysfs_create_group(&pdev->dev.kobj, + ®ulator_virtual_attr_group); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to create attribute group: %d\n", ret); + goto err_regulator; } drvdata->mode = regulator_get_mode(drvdata->regulator); @@ -317,37 +320,36 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev) err_regulator: regulator_put(drvdata->regulator); err: - for (i = 0; i < ARRAY_SIZE(attributes); i++) - device_remove_file(&pdev->dev, attributes[i]); kfree(drvdata); return ret; } -static int regulator_virtual_consumer_remove(struct platform_device *pdev) +static int __devexit regulator_virtual_remove(struct platform_device *pdev) { struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev); - int i; - for (i = 0; i < ARRAY_SIZE(attributes); i++) - device_remove_file(&pdev->dev, attributes[i]); + sysfs_remove_group(&pdev->dev.kobj, ®ulator_virtual_attr_group); + if (drvdata->enabled) regulator_disable(drvdata->regulator); regulator_put(drvdata->regulator); kfree(drvdata); + platform_set_drvdata(pdev, NULL); + return 0; } static struct platform_driver regulator_virtual_consumer_driver = { - .probe = regulator_virtual_consumer_probe, - .remove = regulator_virtual_consumer_remove, + .probe = regulator_virtual_probe, + .remove = __devexit_p(regulator_virtual_remove), .driver = { .name = "reg-virt-consumer", + .owner = THIS_MODULE, }, }; - static int __init regulator_virtual_consumer_init(void) { return platform_driver_register(®ulator_virtual_consumer_driver); diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 0a6577577e8..6e18e56d850 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c @@ -600,6 +600,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev) struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); struct wm831x *wm831x = dcdc->wm831x; + platform_set_drvdata(pdev, NULL); + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc); wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); regulator_unregister(dcdc->regulator); @@ -615,6 +617,7 @@ static struct platform_driver wm831x_buckv_driver = { .remove = __devexit_p(wm831x_buckv_remove), .driver = { .name = "wm831x-buckv", + .owner = THIS_MODULE, }, }; @@ -769,6 +772,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev) struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); struct wm831x *wm831x = dcdc->wm831x; + platform_set_drvdata(pdev, NULL); + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); regulator_unregister(dcdc->regulator); kfree(dcdc); @@ -781,6 +786,7 @@ static struct platform_driver wm831x_buckp_driver = { .remove = __devexit_p(wm831x_buckp_remove), .driver = { .name = "wm831x-buckp", + .owner = THIS_MODULE, }, }; @@ -895,6 +901,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev) struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); struct wm831x *wm831x = dcdc->wm831x; + platform_set_drvdata(pdev, NULL); + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc); regulator_unregister(dcdc->regulator); kfree(dcdc); @@ -907,6 +915,7 @@ static struct platform_driver wm831x_boostp_driver = { .remove = __devexit_p(wm831x_boostp_remove), .driver = { .name = "wm831x-boostp", + .owner = THIS_MODULE, }, }; @@ -979,6 +988,8 @@ static __devexit int wm831x_epe_remove(struct platform_device *pdev) { struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, NULL); + regulator_unregister(dcdc->regulator); kfree(dcdc); @@ -990,6 +1001,7 @@ static struct platform_driver wm831x_epe_driver = { .remove = __devexit_p(wm831x_epe_remove), .driver = { .name = "wm831x-epe", + .owner = THIS_MODULE, }, }; diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c index 48857008758..ca0f6b6c384 100644 --- a/drivers/regulator/wm831x-isink.c +++ b/drivers/regulator/wm831x-isink.c @@ -222,6 +222,8 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev) struct wm831x_isink *isink = platform_get_drvdata(pdev); struct wm831x *wm831x = isink->wm831x; + platform_set_drvdata(pdev, NULL); + wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink); regulator_unregister(isink->regulator); @@ -235,6 +237,7 @@ static struct platform_driver wm831x_isink_driver = { .remove = __devexit_p(wm831x_isink_remove), .driver = { .name = "wm831x-isink", + .owner = THIS_MODULE, }, }; diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c index 61e02ac2fda..d2406c1519a 100644 --- a/drivers/regulator/wm831x-ldo.c +++ b/drivers/regulator/wm831x-ldo.c @@ -371,6 +371,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev) struct wm831x_ldo *ldo = platform_get_drvdata(pdev); struct wm831x *wm831x = ldo->wm831x; + platform_set_drvdata(pdev, NULL); + wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo); regulator_unregister(ldo->regulator); kfree(ldo); @@ -383,6 +385,7 @@ static struct platform_driver wm831x_gp_ldo_driver = { .remove = __devexit_p(wm831x_gp_ldo_remove), .driver = { .name = "wm831x-ldo", + .owner = THIS_MODULE, }, }; @@ -640,6 +643,7 @@ static struct platform_driver wm831x_aldo_driver = { .remove = __devexit_p(wm831x_aldo_remove), .driver = { .name = "wm831x-aldo", + .owner = THIS_MODULE, }, }; @@ -811,6 +815,7 @@ static struct platform_driver wm831x_alive_ldo_driver = { .remove = __devexit_p(wm831x_alive_ldo_remove), .driver = { .name = "wm831x-alive-ldo", + .owner = THIS_MODULE, }, }; diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c index e7b89e704af..94227dd6ba7 100644 --- a/drivers/regulator/wm8350-regulator.c +++ b/drivers/regulator/wm8350-regulator.c @@ -290,6 +290,51 @@ static int wm8350_isink_is_enabled(struct regulator_dev *rdev) return -EINVAL; } +static int wm8350_isink_enable_time(struct regulator_dev *rdev) +{ + struct wm8350 *wm8350 = rdev_get_drvdata(rdev); + int isink = rdev_get_id(rdev); + int reg; + + switch (isink) { + case WM8350_ISINK_A: + reg = wm8350_reg_read(wm8350, WM8350_CSA_FLASH_CONTROL); + break; + case WM8350_ISINK_B: + reg = wm8350_reg_read(wm8350, WM8350_CSB_FLASH_CONTROL); + break; + default: + return -EINVAL; + } + + if (reg & WM8350_CS1_FLASH_MODE) { + switch (reg & WM8350_CS1_ON_RAMP_MASK) { + case 0: + return 0; + case 1: + return 1950; + case 2: + return 3910; + case 3: + return 7800; + } + } else { + switch (reg & WM8350_CS1_ON_RAMP_MASK) { + case 0: + return 0; + case 1: + return 250000; + case 2: + return 500000; + case 3: + return 1000000; + } + } + + return -EINVAL; +} + + int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode, u16 trigger, u16 duration, u16 on_ramp, u16 off_ramp, u16 drive) @@ -1221,6 +1266,7 @@ static struct regulator_ops wm8350_isink_ops = { .enable = wm8350_isink_enable, .disable = wm8350_isink_disable, .is_enabled = wm8350_isink_is_enabled, + .enable_time = wm8350_isink_enable_time, }; static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = { diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c index d9a2c988c6e..924c7eb29ee 100644 --- a/drivers/regulator/wm8400-regulator.c +++ b/drivers/regulator/wm8400-regulator.c @@ -317,14 +317,17 @@ static struct regulator_desc regulators[] = { static int __devinit wm8400_regulator_probe(struct platform_device *pdev) { + struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]); struct regulator_dev *rdev; rdev = regulator_register(®ulators[pdev->id], &pdev->dev, - pdev->dev.platform_data, dev_get_drvdata(&pdev->dev)); + pdev->dev.platform_data, wm8400); if (IS_ERR(rdev)) return PTR_ERR(rdev); + platform_set_drvdata(pdev, rdev); + return 0; } @@ -332,6 +335,7 @@ static int __devexit wm8400_regulator_remove(struct platform_device *pdev) { struct regulator_dev *rdev = platform_get_drvdata(pdev); + platform_set_drvdata(pdev, NULL); regulator_unregister(rdev); return 0; @@ -370,7 +374,6 @@ int wm8400_register_regulator(struct device *dev, int reg, wm8400->regulators[reg].id = reg; wm8400->regulators[reg].dev.parent = dev; wm8400->regulators[reg].dev.platform_data = initdata; - dev_set_drvdata(&wm8400->regulators[reg].dev, wm8400); return platform_device_register(&wm8400->regulators[reg]); } diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c new file mode 100644 index 00000000000..95454a4637b --- /dev/null +++ b/drivers/regulator/wm8994-regulator.c @@ -0,0 +1,307 @@ +/* + * wm8994-regulator.c -- Regulator driver for the WM8994 + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/bitops.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/gpio.h> + +#include <linux/mfd/wm8994/core.h> +#include <linux/mfd/wm8994/registers.h> +#include <linux/mfd/wm8994/pdata.h> + +struct wm8994_ldo { + int enable; + bool is_enabled; + struct regulator_dev *regulator; + struct wm8994 *wm8994; +}; + +#define WM8994_LDO1_MAX_SELECTOR 0x7 +#define WM8994_LDO2_MAX_SELECTOR 0x3 + +static int wm8994_ldo_enable(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + + /* If we have no soft control assume that the LDO is always enabled. */ + if (!ldo->enable) + return 0; + + gpio_set_value(ldo->enable, 1); + ldo->is_enabled = true; + + return 0; +} + +static int wm8994_ldo_disable(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + + /* If we have no soft control assume that the LDO is always enabled. */ + if (!ldo->enable) + return -EINVAL; + + gpio_set_value(ldo->enable, 0); + ldo->is_enabled = false; + + return 0; +} + +static int wm8994_ldo_is_enabled(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + + return ldo->is_enabled; +} + +static int wm8994_ldo_enable_time(struct regulator_dev *rdev) +{ + /* 3ms is fairly conservative but this shouldn't be too performance + * critical; can be tweaked per-system if required. */ + return 3000; +} + +static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + if (selector > WM8994_LDO1_MAX_SELECTOR) + return -EINVAL; + + return (selector * 100000) + 2400000; +} + +static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + int val; + + val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_1); + if (val < 0) + return val; + + val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT; + + return wm8994_ldo1_list_voltage(rdev, val); +} + +static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + int selector, v; + + selector = (min_uV - 2400000) / 100000; + v = wm8994_ldo1_list_voltage(rdev, selector); + if (v < 0 || v > max_uV) + return -EINVAL; + + selector <<= WM8994_LDO1_VSEL_SHIFT; + + return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1, + WM8994_LDO1_VSEL_MASK, selector); +} + +static struct regulator_ops wm8994_ldo1_ops = { + .enable = wm8994_ldo_enable, + .disable = wm8994_ldo_disable, + .is_enabled = wm8994_ldo_is_enabled, + .enable_time = wm8994_ldo_enable_time, + + .list_voltage = wm8994_ldo1_list_voltage, + .get_voltage = wm8994_ldo1_get_voltage, + .set_voltage = wm8994_ldo1_set_voltage, +}; + +static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev, + unsigned int selector) +{ + if (selector > WM8994_LDO2_MAX_SELECTOR) + return -EINVAL; + + return (selector * 100000) + 900000; +} + +static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + int val; + + val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_2); + if (val < 0) + return val; + + val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT; + + return wm8994_ldo2_list_voltage(rdev, val); +} + +static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev, + int min_uV, int max_uV) +{ + struct wm8994_ldo *ldo = rdev_get_drvdata(rdev); + int selector, v; + + selector = (min_uV - 900000) / 100000; + v = wm8994_ldo2_list_voltage(rdev, selector); + if (v < 0 || v > max_uV) + return -EINVAL; + + selector <<= WM8994_LDO2_VSEL_SHIFT; + + return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2, + WM8994_LDO2_VSEL_MASK, selector); +} + +static struct regulator_ops wm8994_ldo2_ops = { + .enable = wm8994_ldo_enable, + .disable = wm8994_ldo_disable, + .is_enabled = wm8994_ldo_is_enabled, + .enable_time = wm8994_ldo_enable_time, + + .list_voltage = wm8994_ldo2_list_voltage, + .get_voltage = wm8994_ldo2_get_voltage, + .set_voltage = wm8994_ldo2_set_voltage, +}; + +static struct regulator_desc wm8994_ldo_desc[] = { + { + .name = "LDO1", + .id = 1, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1, + .ops = &wm8994_ldo1_ops, + .owner = THIS_MODULE, + }, + { + .name = "LDO2", + .id = 2, + .type = REGULATOR_VOLTAGE, + .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1, + .ops = &wm8994_ldo2_ops, + .owner = THIS_MODULE, + }, +}; + +static __devinit int wm8994_ldo_probe(struct platform_device *pdev) +{ + struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent); + struct wm8994_pdata *pdata = wm8994->dev->platform_data; + int id = pdev->id % ARRAY_SIZE(pdata->ldo); + struct wm8994_ldo *ldo; + int ret; + + dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1); + + if (!pdata) + return -ENODEV; + + ldo = kzalloc(sizeof(struct wm8994_ldo), GFP_KERNEL); + if (ldo == NULL) { + dev_err(&pdev->dev, "Unable to allocate private data\n"); + return -ENOMEM; + } + + ldo->wm8994 = wm8994; + + ldo->is_enabled = true; + + if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) { + ldo->enable = pdata->ldo[id].enable; + + ret = gpio_request(ldo->enable, "WM8994 LDO enable"); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n", + ret); + goto err; + } + + ret = gpio_direction_output(ldo->enable, ldo->is_enabled); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to set GPIO up: %d\n", + ret); + goto err_gpio; + } + } + + ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev, + pdata->ldo[id].init_data, ldo); + if (IS_ERR(ldo->regulator)) { + ret = PTR_ERR(ldo->regulator); + dev_err(wm8994->dev, "Failed to register LDO%d: %d\n", + id + 1, ret); + goto err_gpio; + } + + platform_set_drvdata(pdev, ldo); + + return 0; + +err_gpio: + if (gpio_is_valid(ldo->enable)) + gpio_free(ldo->enable); +err: + kfree(ldo); + return ret; +} + +static __devexit int wm8994_ldo_remove(struct platform_device *pdev) +{ + struct wm8994_ldo *ldo = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + regulator_unregister(ldo->regulator); + if (gpio_is_valid(ldo->enable)) + gpio_free(ldo->enable); + kfree(ldo); + + return 0; +} + +static struct platform_driver wm8994_ldo_driver = { + .probe = wm8994_ldo_probe, + .remove = __devexit_p(wm8994_ldo_remove), + .driver = { + .name = "wm8994-ldo", + .owner = THIS_MODULE, + }, +}; + +static int __init wm8994_ldo_init(void) +{ + int ret; + + ret = platform_driver_register(&wm8994_ldo_driver); + if (ret != 0) + pr_err("Failed to register Wm8994 GP LDO driver: %d\n", ret); + + return ret; +} +subsys_initcall(wm8994_ldo_init); + +static void __exit wm8994_ldo_exit(void) +{ + platform_driver_unregister(&wm8994_ldo_driver); +} +module_exit(wm8994_ldo_exit); + +/* Module information */ +MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); +MODULE_DESCRIPTION("WM8994 LDO driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm8994-ldo"); diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c index d935b2d04f9..ae0251ef6f4 100644 --- a/drivers/serial/68328serial.c +++ b/drivers/serial/68328serial.c @@ -153,8 +153,6 @@ static int baud_table[] = { 0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400, 57600, 115200, 0 }; -#define BAUD_TABLE_SIZE (sizeof(baud_table)/sizeof(baud_table[0])) - /* Sets or clears DTR/RTS on the requested line */ static inline void m68k_rtsdtr(struct m68k_serial *ss, int set) { @@ -1406,10 +1404,10 @@ static void m68328_set_baud(void) USTCNT = ustcnt & ~USTCNT_TXEN; again: - for (i = 0; i < sizeof(baud_table) / sizeof(baud_table[0]); i++) + for (i = 0; i < ARRAY_SIZE(baud_table); i++) if (baud_table[i] == m68328_console_baud) break; - if (i >= sizeof(baud_table) / sizeof(baud_table[0])) { + if (i >= ARRAY_SIZE(baud_table)) { m68328_console_baud = 9600; goto again; } @@ -1435,7 +1433,7 @@ int m68328_console_setup(struct console *cp, char *arg) if (arg) n = simple_strtoul(arg,NULL,0); - for (i = 0; i < BAUD_TABLE_SIZE; i++) + for (i = 0; i < ARRAY_SIZE(baud_table); i++) if (baud_table[i] == n) break; if (i < BAUD_TABLE_SIZE) { diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index a81ff7bc5fa..7c4ebe6ee18 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c @@ -2690,6 +2690,15 @@ static void __init serial8250_isa_init_ports(void) } } +static void +serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type) +{ + up->port.type = type; + up->port.fifosize = uart_config[type].fifo_size; + up->capabilities = uart_config[type].flags; + up->tx_loadsz = uart_config[type].tx_loadsz; +} + static void __init serial8250_register_ports(struct uart_driver *drv, struct device *dev) { @@ -2706,6 +2715,10 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev) struct uart_8250_port *up = &serial8250_ports[i]; up->port.dev = dev; + + if (up->port.flags & UPF_FIXED_TYPE) + serial8250_init_fixed_type_port(up, up->port.type); + uart_add_one_port(drv, &up->port); } } @@ -3118,12 +3131,8 @@ int serial8250_register_port(struct uart_port *port) if (port->dev) uart->port.dev = port->dev; - if (port->flags & UPF_FIXED_TYPE) { - uart->port.type = port->type; - uart->port.fifosize = uart_config[port->type].fifo_size; - uart->capabilities = uart_config[port->type].flags; - uart->tx_loadsz = uart_config[port->type].tx_loadsz; - } + if (port->flags & UPF_FIXED_TYPE) + serial8250_init_fixed_type_port(uart, port->type); set_io_from_upio(&uart->port); /* Possibly override default I/O functions. */ diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index b28af13c45a..01c012da4e2 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -760,7 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev) /* subdevice 0x00PS means <P> parallel, <S> serial */ unsigned int num_serial = dev->subsystem_device & 0xf; - if (dev->device == PCI_DEVICE_ID_NETMOS_9901) + if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) || + (dev->device == PCI_DEVICE_ID_NETMOS_9865)) return 0; if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) @@ -1479,6 +1480,7 @@ enum pci_board_num_t { pbn_b0_bt_1_115200, pbn_b0_bt_2_115200, + pbn_b0_bt_4_115200, pbn_b0_bt_8_115200, pbn_b0_bt_1_460800, @@ -1703,6 +1705,12 @@ static struct pciserial_board pci_boards[] __devinitdata = { .base_baud = 115200, .uart_offset = 8, }, + [pbn_b0_bt_4_115200] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, [pbn_b0_bt_8_115200] = { .flags = FL_BASE0|FL_BASE_BARS, .num_ports = 8, @@ -3191,6 +3199,15 @@ static struct pci_device_id serial_pci_tbl[] = { 0x1208, 0x0004, 0, 0, pbn_b0_4_921600 }, + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2, + 0x1204, 0x0004, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2, + 0x1208, 0x0004, 0, 0, + pbn_b0_4_921600 }, + { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3, + 0x1208, 0x0004, 0, 0, + pbn_b0_4_921600 }, /* * Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com */ @@ -3649,6 +3666,18 @@ static struct pci_device_id serial_pci_tbl[] = { 0, 0, pbn_b0_1_115200 }, /* + * Best Connectivity PCI Multi I/O cards + */ + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, + 0xA000, 0x1000, + 0, 0, pbn_b0_1_115200 }, + + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865, + 0xA000, 0x3004, + 0, 0, pbn_b0_bt_4_115200 }, + + /* * These entries match devices with class COMMUNICATION_SERIAL, * COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL */ diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 888a0ce91c4..746e07033dc 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -1418,42 +1418,37 @@ config SERIAL_BFIN_SPORT To compile this driver as a module, choose M here: the module will be called bfin_sport_uart. -choice - prompt "Baud rate for Blackfin SPORT UART" - depends on SERIAL_BFIN_SPORT - default SERIAL_SPORT_BAUD_RATE_57600 - help - Choose a baud rate for the SPORT UART, other uart settings are - 8 bit, 1 stop bit, no parity, no flow control. - -config SERIAL_SPORT_BAUD_RATE_115200 - bool "115200" - -config SERIAL_SPORT_BAUD_RATE_57600 - bool "57600" +config SERIAL_BFIN_SPORT_CONSOLE + bool "Console on Blackfin sport emulated uart" + depends on SERIAL_BFIN_SPORT=y + select SERIAL_CORE_CONSOLE -config SERIAL_SPORT_BAUD_RATE_38400 - bool "38400" +config SERIAL_BFIN_SPORT0_UART + bool "Enable UART over SPORT0" + depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M) + help + Enable UART over SPORT0 -config SERIAL_SPORT_BAUD_RATE_19200 - bool "19200" +config SERIAL_BFIN_SPORT1_UART + bool "Enable UART over SPORT1" + depends on SERIAL_BFIN_SPORT + help + Enable UART over SPORT1 -config SERIAL_SPORT_BAUD_RATE_9600 - bool "9600" -endchoice +config SERIAL_BFIN_SPORT2_UART + bool "Enable UART over SPORT2" + depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539) + help + Enable UART over SPORT2 -config SPORT_BAUD_RATE - int - depends on SERIAL_BFIN_SPORT - default 115200 if (SERIAL_SPORT_BAUD_RATE_115200) - default 57600 if (SERIAL_SPORT_BAUD_RATE_57600) - default 38400 if (SERIAL_SPORT_BAUD_RATE_38400) - default 19200 if (SERIAL_SPORT_BAUD_RATE_19200) - default 9600 if (SERIAL_SPORT_BAUD_RATE_9600) +config SERIAL_BFIN_SPORT3_UART + bool "Enable UART over SPORT3" + depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539) + help + Enable UART over SPORT3 config SERIAL_TIMBERDALE tristate "Support for timberdale UART" - depends on MFD_TIMBERDALE select SERIAL_CORE ---help--- Add support for UART controller on timberdale. diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 9d948bccafa..2c9bf9b6832 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -1213,6 +1213,24 @@ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser) return ret; } +#ifdef CONFIG_CONSOLE_POLL +static int atmel_poll_get_char(struct uart_port *port) +{ + while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY)) + cpu_relax(); + + return UART_GET_CHAR(port); +} + +static void atmel_poll_put_char(struct uart_port *port, unsigned char ch) +{ + while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY)) + cpu_relax(); + + UART_PUT_CHAR(port, ch); +} +#endif + static struct uart_ops atmel_pops = { .tx_empty = atmel_tx_empty, .set_mctrl = atmel_set_mctrl, @@ -1232,6 +1250,10 @@ static struct uart_ops atmel_pops = { .config_port = atmel_config_port, .verify_port = atmel_verify_port, .pm = atmel_serial_pm, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = atmel_poll_get_char, + .poll_put_char = atmel_poll_put_char, +#endif }; /* diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c index 37ad0c44993..a1a0e55d080 100644 --- a/drivers/serial/bcm63xx_uart.c +++ b/drivers/serial/bcm63xx_uart.c @@ -35,7 +35,7 @@ #include <bcm63xx_regs.h> #include <bcm63xx_io.h> -#define BCM63XX_NR_UARTS 1 +#define BCM63XX_NR_UARTS 2 static struct uart_port ports[BCM63XX_NR_UARTS]; @@ -784,7 +784,7 @@ static struct uart_driver bcm_uart_driver = { .dev_name = "ttyS", .major = TTY_MAJOR, .minor = 64, - .nr = 1, + .nr = BCM63XX_NR_UARTS, .cons = BCM63XX_CONSOLE, }; @@ -826,11 +826,12 @@ static int __devinit bcm_uart_probe(struct platform_device *pdev) port->dev = &pdev->dev; port->fifosize = 16; port->uartclk = clk_get_rate(clk) / 2; + port->line = pdev->id; clk_put(clk); ret = uart_add_one_port(&bcm_uart_driver, port); if (ret) { - kfree(port); + ports[pdev->id].membase = 0; return ret; } platform_set_drvdata(pdev, port); diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index 50abb7e557f..fcf273e3f48 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/ioport.h> +#include <linux/io.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> @@ -237,7 +238,8 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart) #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) - if (kgdb_connected && kgdboc_port_line == uart->port.line) + if (kgdb_connected && kgdboc_port_line == uart->port.line + && kgdboc_break_enabled) if (ch == 0x3) {/* Ctrl + C */ kgdb_breakpoint(); return; @@ -488,6 +490,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) { int x_pos, pos; + dma_disable_irq(uart->tx_dma_channel); dma_disable_irq(uart->rx_dma_channel); spin_lock_bh(&uart->port.lock); @@ -521,6 +524,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) } spin_unlock_bh(&uart->port.lock); + dma_enable_irq(uart->tx_dma_channel); dma_enable_irq(uart->rx_dma_channel); mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); @@ -746,15 +750,6 @@ static int bfin_serial_startup(struct uart_port *port) Status interrupt.\n"); } - if (uart->cts_pin >= 0) { - gpio_request(uart->cts_pin, DRIVER_NAME); - gpio_direction_output(uart->cts_pin, 1); - } - if (uart->rts_pin >= 0) { - gpio_request(uart->rts_pin, DRIVER_NAME); - gpio_direction_output(uart->rts_pin, 0); - } - /* CTS RTS PINs are negative assertive. */ UART_PUT_MCR(uart, ACTS); UART_SET_IER(uart, EDSSI); @@ -801,10 +796,6 @@ static void bfin_serial_shutdown(struct uart_port *port) gpio_free(uart->rts_pin); #endif #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS - if (uart->cts_pin >= 0) - gpio_free(uart->cts_pin); - if (uart->rts_pin >= 0) - gpio_free(uart->rts_pin); if (UART_GET_IER(uart) && EDSSI) free_irq(uart->status_irq, uart); #endif @@ -1409,8 +1400,7 @@ static int bfin_serial_remove(struct platform_device *dev) continue; uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port); bfin_serial_ports[i].port.dev = NULL; -#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \ - defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS) +#if defined(CONFIG_SERIAL_BFIN_CTSRTS) gpio_free(bfin_serial_ports[i].cts_pin); gpio_free(bfin_serial_ports[i].rts_pin); #endif diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c index 088bb35475f..7c72888fbf9 100644 --- a/drivers/serial/bfin_sport_uart.c +++ b/drivers/serial/bfin_sport_uart.c @@ -1,27 +1,11 @@ /* - * File: linux/drivers/serial/bfin_sport_uart.c + * Blackfin On-Chip Sport Emulated UART Driver * - * Based on: drivers/serial/bfin_5xx.c by Aubrey Li. - * Author: Roy Huang <roy.huang@analog.com> + * Copyright 2006-2009 Analog Devices Inc. * - * Created: Nov 22, 2006 - * Copyright: (c) 2006-2007 Analog Devices Inc. - * Description: this driver enable SPORTs on Blackfin emulate UART. + * Enter bugs at http://blackfin.uclinux.org/ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * Licensed under the GPL-2 or later. */ /* @@ -29,39 +13,18 @@ * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf * This application note describe how to implement a UART on a Sharc DSP, * but this driver is implemented on Blackfin Processor. + * Transmit Frame Sync is not used by this driver to transfer data out. */ -/* After reset, there is a prelude of low level pulse when transmit data first - * time. No addtional pulse in following transmit. - * According to document: - * The SPORTs are ready to start transmitting or receiving data no later than - * three serial clock cycles after they are enabled in the SPORTx_TCR1 or - * SPORTx_RCR1 register. No serial clock cycles are lost from this point on. - * The first internal frame sync will occur one frame sync delay after the - * SPORTs are ready. External frame syncs can occur as soon as the SPORT is - * ready. - */ +/* #define DEBUG */ -/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes - * sport receives data incorrectly. The following is Axel's words. - * As EE-191, sport rx samples 3 times of the UART baudrate and takes the - * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting, - * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short - * byte due to buadrate drift, then the 30th sample of a byte, this sample is - * also the third sample of the stop bit, will happens on the immediately - * following start bit which will be thrown away and missed. Thus since parts - * of the startbit will be missed and the receiver will begin to drift, the - * effect accumulates over time until synchronization is lost. - * If only require 2 samples of the stopbit (by sampling in total 29 samples), - * then a to short byte as in the case above will be tolerated. Then the 1/3 - * early startbit will trigger a framesync since the last read is complete - * after only 2/3 stopbit and framesync is active during the last 1/3 looking - * for a possible early startbit. */ - -//#define DEBUG +#define DRV_NAME "bfin-sport-uart" +#define DEVICE_NAME "ttySS" +#define pr_fmt(fmt) DRV_NAME ": " fmt #include <linux/module.h> #include <linux/ioport.h> +#include <linux/io.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> @@ -75,23 +38,36 @@ #include "bfin_sport_uart.h" +#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART unsigned short bfin_uart_pin_req_sport0[] = {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \ P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0}; - +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART unsigned short bfin_uart_pin_req_sport1[] = {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \ P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0}; - -#define DRV_NAME "bfin-sport-uart" +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART +unsigned short bfin_uart_pin_req_sport2[] = + {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \ + P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0}; +#endif +#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART +unsigned short bfin_uart_pin_req_sport3[] = + {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \ + P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0}; +#endif struct sport_uart_port { struct uart_port port; - char *name; - - int tx_irq; - int rx_irq; int err_irq; + unsigned short csize; + unsigned short rxmask; + unsigned short txmask1; + unsigned short txmask2; + unsigned char stopb; +/* unsigned char parib; */ }; static void sport_uart_tx_chars(struct sport_uart_port *up); @@ -99,36 +75,42 @@ static void sport_stop_tx(struct uart_port *port); static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) { - pr_debug("%s value:%x\n", __func__, value); - /* Place a Start and Stop bit */ + pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value, + up->txmask1, up->txmask2); + + /* Place Start and Stop bits */ __asm__ __volatile__ ( - "R2 = b#01111111100;" - "R3 = b#10000000001;" - "%0 <<= 2;" - "%0 = %0 & R2;" - "%0 = %0 | R3;" - : "=d"(value) - : "d"(value) - : "ASTAT", "R2", "R3" + "%[val] <<= 1;" + "%[val] = %[val] & %[mask1];" + "%[val] = %[val] | %[mask2];" + : [val]"+d"(value) + : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2) + : "ASTAT" ); pr_debug("%s value:%x\n", __func__, value); SPORT_PUT_TX(up, value); } -static inline unsigned int rx_one_byte(struct sport_uart_port *up) +static inline unsigned char rx_one_byte(struct sport_uart_port *up) { - unsigned int value, extract; + unsigned int value; + unsigned char extract; u32 tmp_mask1, tmp_mask2, tmp_shift, tmp; - value = SPORT_GET_RX32(up); - pr_debug("%s value:%x\n", __func__, value); + if ((up->csize + up->stopb) > 7) + value = SPORT_GET_RX32(up); + else + value = SPORT_GET_RX(up); + + pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value, + up->csize, up->rxmask); - /* Extract 8 bits data */ + /* Extract data */ __asm__ __volatile__ ( "%[extr] = 0;" - "%[mask1] = 0x1801(Z);" - "%[mask2] = 0x0300(Z);" + "%[mask1] = %[rxmask];" + "%[mask2] = 0x0200(Z);" "%[shift] = 0;" "LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];" ".Lloop_s:" @@ -138,9 +120,9 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up) "%[mask1] = %[mask1] - %[mask2];" ".Lloop_e:" "%[shift] += 1;" - : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp), - [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2) - : "d"(value), [lc]"a"(8) + : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp), + [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2) + : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize) : "ASTAT", "LB0", "LC0", "LT0" ); @@ -148,29 +130,28 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up) return extract; } -static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate) +static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate) { - int tclkdiv, tfsdiv, rclkdiv; + int tclkdiv, rclkdiv; + unsigned int sclk = get_sclk(); - /* Set TCR1 and TCR2 */ - SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK)); - SPORT_PUT_TCR2(up, 10); + /* Set TCR1 and TCR2, TFSR is not enabled for uart */ + SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK)); + SPORT_PUT_TCR2(up, size + 1); pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); /* Set RCR1 and RCR2 */ SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK)); - SPORT_PUT_RCR2(up, 28); + SPORT_PUT_RCR2(up, (size + 1) * 2 - 1); pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); - tclkdiv = sclk/(2 * baud_rate) - 1; - tfsdiv = 12; - rclkdiv = sclk/(2 * baud_rate * 3) - 1; + tclkdiv = sclk / (2 * baud_rate) - 1; + rclkdiv = sclk / (2 * baud_rate * 2) - 1; SPORT_PUT_TCLKDIV(up, tclkdiv); - SPORT_PUT_TFSDIV(up, tfsdiv); SPORT_PUT_RCLKDIV(up, rclkdiv); SSYNC(); - pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n", - __func__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv); + pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n", + __func__, sclk, baud_rate, tclkdiv, rclkdiv); return 0; } @@ -181,23 +162,29 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id) struct tty_struct *tty = up->port.state->port.tty; unsigned int ch; - do { + spin_lock(&up->port.lock); + + while (SPORT_GET_STAT(up) & RXNE) { ch = rx_one_byte(up); up->port.icount.rx++; - if (uart_handle_sysrq_char(&up->port, ch)) - ; - else + if (!uart_handle_sysrq_char(&up->port, ch)) tty_insert_flip_char(tty, ch, TTY_NORMAL); - } while (SPORT_GET_STAT(up) & RXNE); + } tty_flip_buffer_push(tty); + spin_unlock(&up->port.lock); + return IRQ_HANDLED; } static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id) { - sport_uart_tx_chars(dev_id); + struct sport_uart_port *up = dev_id; + + spin_lock(&up->port.lock); + sport_uart_tx_chars(up); + spin_unlock(&up->port.lock); return IRQ_HANDLED; } @@ -208,6 +195,8 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id) struct tty_struct *tty = up->port.state->port.tty; unsigned int stat = SPORT_GET_STAT(up); + spin_lock(&up->port.lock); + /* Overflow in RX FIFO */ if (stat & ROVF) { up->port.icount.overrun++; @@ -216,15 +205,16 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id) } /* These should not happen */ if (stat & (TOVF | TUVF | RUVF)) { - printk(KERN_ERR "SPORT Error:%s %s %s\n", - (stat & TOVF)?"TX overflow":"", - (stat & TUVF)?"TX underflow":"", - (stat & RUVF)?"RX underflow":""); + pr_err("SPORT Error:%s %s %s\n", + (stat & TOVF) ? "TX overflow" : "", + (stat & TUVF) ? "TX underflow" : "", + (stat & RUVF) ? "RX underflow" : ""); SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN); } SSYNC(); + spin_unlock(&up->port.lock); return IRQ_HANDLED; } @@ -232,60 +222,37 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id) static int sport_startup(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; - char buffer[20]; - int retval; + int ret; pr_debug("%s enter\n", __func__); - snprintf(buffer, 20, "%s rx", up->name); - retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); - if (retval) { - printk(KERN_ERR "Unable to request interrupt %s\n", buffer); - return retval; + ret = request_irq(up->port.irq, sport_uart_rx_irq, 0, + "SPORT_UART_RX", up); + if (ret) { + dev_err(port->dev, "unable to request SPORT RX interrupt\n"); + return ret; } - snprintf(buffer, 20, "%s tx", up->name); - retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up); - if (retval) { - printk(KERN_ERR "Unable to request interrupt %s\n", buffer); + ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0, + "SPORT_UART_TX", up); + if (ret) { + dev_err(port->dev, "unable to request SPORT TX interrupt\n"); goto fail1; } - snprintf(buffer, 20, "%s err", up->name); - retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up); - if (retval) { - printk(KERN_ERR "Unable to request interrupt %s\n", buffer); + ret = request_irq(up->err_irq, sport_uart_err_irq, 0, + "SPORT_UART_STATUS", up); + if (ret) { + dev_err(port->dev, "unable to request SPORT status interrupt\n"); goto fail2; } - if (port->line) { - if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME)) - goto fail3; - } else { - if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME)) - goto fail3; - } - - sport_uart_setup(up, get_sclk(), port->uartclk); - - /* Enable receive interrupt */ - SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN)); - SSYNC(); - return 0; + fail2: + free_irq(up->port.irq+1, up); + fail1: + free_irq(up->port.irq, up); - -fail3: - printk(KERN_ERR DRV_NAME - ": Requesting Peripherals failed\n"); - - free_irq(up->err_irq, up); -fail2: - free_irq(up->tx_irq, up); -fail1: - free_irq(up->rx_irq, up); - - return retval; - + return ret; } static void sport_uart_tx_chars(struct sport_uart_port *up) @@ -344,20 +311,17 @@ static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl) static void sport_stop_tx(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; - unsigned int stat; pr_debug("%s enter\n", __func__); - stat = SPORT_GET_STAT(up); - while(!(stat & TXHRE)) { - udelay(1); - stat = SPORT_GET_STAT(up); - } /* Although the hold register is empty, last byte is still in shift - * register and not sent out yet. If baud rate is lower than default, - * delay should be longer. For example, if the baud rate is 9600, - * the delay must be at least 2ms by experience */ - udelay(500); + * register and not sent out yet. So, put a dummy data into TX FIFO. + * Then, sport tx stops when last byte is shift out and the dummy + * data is moved into the shift register. + */ + SPORT_PUT_TX(up, 0xffff); + while (!(SPORT_GET_STAT(up) & TXHRE)) + cpu_relax(); SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); SSYNC(); @@ -370,6 +334,7 @@ static void sport_start_tx(struct uart_port *port) struct sport_uart_port *up = (struct sport_uart_port *)port; pr_debug("%s enter\n", __func__); + /* Write data into SPORT FIFO before enable SPROT to transmit */ sport_uart_tx_chars(up); @@ -403,37 +368,24 @@ static void sport_shutdown(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; - pr_debug("%s enter\n", __func__); + dev_dbg(port->dev, "%s enter\n", __func__); /* Disable sport */ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); SSYNC(); - if (port->line) { - peripheral_free_list(bfin_uart_pin_req_sport1); - } else { - peripheral_free_list(bfin_uart_pin_req_sport0); - } - - free_irq(up->rx_irq, up); - free_irq(up->tx_irq, up); + free_irq(up->port.irq, up); + free_irq(up->port.irq+1, up); free_irq(up->err_irq, up); } -static void sport_set_termios(struct uart_port *port, - struct ktermios *termios, struct ktermios *old) -{ - pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag); - uart_update_timeout(port, CS8 ,port->uartclk); -} - static const char *sport_type(struct uart_port *port) { struct sport_uart_port *up = (struct sport_uart_port *)port; pr_debug("%s enter\n", __func__); - return up->name; + return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL; } static void sport_release_port(struct uart_port *port) @@ -461,6 +413,110 @@ static int sport_verify_port(struct uart_port *port, struct serial_struct *ser) return 0; } +static void sport_set_termios(struct uart_port *port, + struct ktermios *termios, struct ktermios *old) +{ + struct sport_uart_port *up = (struct sport_uart_port *)port; + unsigned long flags; + int i; + + pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag); + + switch (termios->c_cflag & CSIZE) { + case CS8: + up->csize = 8; + break; + case CS7: + up->csize = 7; + break; + case CS6: + up->csize = 6; + break; + case CS5: + up->csize = 5; + break; + default: + pr_warning("requested word length not supported\n"); + } + + if (termios->c_cflag & CSTOPB) { + up->stopb = 1; + } + if (termios->c_cflag & PARENB) { + pr_warning("PAREN bits is not supported yet\n"); + /* up->parib = 1; */ + } + + port->read_status_mask = OE; + if (termios->c_iflag & INPCK) + port->read_status_mask |= (FE | PE); + if (termios->c_iflag & (BRKINT | PARMRK)) + port->read_status_mask |= BI; + + /* + * Characters to ignore + */ + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= FE | PE; + if (termios->c_iflag & IGNBRK) { + port->ignore_status_mask |= BI; + /* + * If we're ignoring parity and break indicators, + * ignore overruns too (for real raw support). + */ + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= OE; + } + + /* RX extract mask */ + up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8); + /* TX masks, 8 bit data and 1 bit stop for example: + * mask1 = b#0111111110 + * mask2 = b#1000000000 + */ + for (i = 0, up->txmask1 = 0; i < up->csize; i++) + up->txmask1 |= (1<<i); + up->txmask2 = (1<<i); + if (up->stopb) { + ++i; + up->txmask2 |= (1<<i); + } + up->txmask1 <<= 1; + up->txmask2 <<= 1; + /* uart baud rate */ + port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16); + + spin_lock_irqsave(&up->port.lock, flags); + + /* Disable UART */ + SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); + SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN); + + sport_uart_setup(up, up->csize + up->stopb, port->uartclk); + + /* driver TX line high after config, one dummy data is + * necessary to stop sport after shift one byte + */ + SPORT_PUT_TX(up, 0xffff); + SPORT_PUT_TX(up, 0xffff); + SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); + SSYNC(); + while (!(SPORT_GET_STAT(up) & TXHRE)) + cpu_relax(); + SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); + SSYNC(); + + /* Port speed changed, update the per-port timeout. */ + uart_update_timeout(port, termios->c_cflag, port->uartclk); + + /* Enable sport rx */ + SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN); + SSYNC(); + + spin_unlock_irqrestore(&up->port.lock, flags); +} + struct uart_ops sport_uart_ops = { .tx_empty = sport_tx_empty, .set_mctrl = sport_set_mctrl, @@ -480,138 +536,319 @@ struct uart_ops sport_uart_ops = { .verify_port = sport_verify_port, }; -static struct sport_uart_port sport_uart_ports[] = { - { /* SPORT 0 */ - .name = "SPORT0", - .tx_irq = IRQ_SPORT0_TX, - .rx_irq = IRQ_SPORT0_RX, - .err_irq= IRQ_SPORT0_ERROR, - .port = { - .type = PORT_BFIN_SPORT, - .iotype = UPIO_MEM, - .membase = (void __iomem *)SPORT0_TCR1, - .mapbase = SPORT0_TCR1, - .irq = IRQ_SPORT0_RX, - .uartclk = CONFIG_SPORT_BAUD_RATE, - .fifosize = 8, - .ops = &sport_uart_ops, - .line = 0, - }, - }, { /* SPORT 1 */ - .name = "SPORT1", - .tx_irq = IRQ_SPORT1_TX, - .rx_irq = IRQ_SPORT1_RX, - .err_irq= IRQ_SPORT1_ERROR, - .port = { - .type = PORT_BFIN_SPORT, - .iotype = UPIO_MEM, - .membase = (void __iomem *)SPORT1_TCR1, - .mapbase = SPORT1_TCR1, - .irq = IRQ_SPORT1_RX, - .uartclk = CONFIG_SPORT_BAUD_RATE, - .fifosize = 8, - .ops = &sport_uart_ops, - .line = 1, - }, +#define BFIN_SPORT_UART_MAX_PORTS 4 + +static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS]; + +#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE +static int __init +sport_uart_console_setup(struct console *co, char *options) +{ + struct sport_uart_port *up; + int baud = 57600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + /* Check whether an invalid uart number has been specified */ + if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS) + return -ENODEV; + + up = bfin_sport_uart_ports[co->index]; + if (!up) + return -ENODEV; + + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(&up->port, co, baud, parity, bits, flow); +} + +static void sport_uart_console_putchar(struct uart_port *port, int ch) +{ + struct sport_uart_port *up = (struct sport_uart_port *)port; + + while (SPORT_GET_STAT(up) & TXF) + barrier(); + + tx_one_byte(up, ch); +} + +/* + * Interrupts are disabled on entering + */ +static void +sport_uart_console_write(struct console *co, const char *s, unsigned int count) +{ + struct sport_uart_port *up = bfin_sport_uart_ports[co->index]; + unsigned long flags; + + spin_lock_irqsave(&up->port.lock, flags); + + if (SPORT_GET_TCR1(up) & TSPEN) + uart_console_write(&up->port, s, count, sport_uart_console_putchar); + else { + /* dummy data to start sport */ + while (SPORT_GET_STAT(up) & TXF) + barrier(); + SPORT_PUT_TX(up, 0xffff); + /* Enable transmit, then an interrupt will generated */ + SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); + SSYNC(); + + uart_console_write(&up->port, s, count, sport_uart_console_putchar); + + /* Although the hold register is empty, last byte is still in shift + * register and not sent out yet. So, put a dummy data into TX FIFO. + * Then, sport tx stops when last byte is shift out and the dummy + * data is moved into the shift register. + */ + while (SPORT_GET_STAT(up) & TXF) + barrier(); + SPORT_PUT_TX(up, 0xffff); + while (!(SPORT_GET_STAT(up) & TXHRE)) + barrier(); + + /* Stop sport tx transfer */ + SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); + SSYNC(); } + + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static struct uart_driver sport_uart_reg; + +static struct console sport_uart_console = { + .name = DEVICE_NAME, + .write = sport_uart_console_write, + .device = uart_console_device, + .setup = sport_uart_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &sport_uart_reg, }; +#define SPORT_UART_CONSOLE (&sport_uart_console) +#else +#define SPORT_UART_CONSOLE NULL +#endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */ + + static struct uart_driver sport_uart_reg = { .owner = THIS_MODULE, - .driver_name = "SPORT-UART", - .dev_name = "ttySS", + .driver_name = DRV_NAME, + .dev_name = DEVICE_NAME, .major = 204, .minor = 84, - .nr = ARRAY_SIZE(sport_uart_ports), - .cons = NULL, + .nr = BFIN_SPORT_UART_MAX_PORTS, + .cons = SPORT_UART_CONSOLE, }; -static int sport_uart_suspend(struct platform_device *dev, pm_message_t state) +#ifdef CONFIG_PM +static int sport_uart_suspend(struct device *dev) { - struct sport_uart_port *sport = platform_get_drvdata(dev); + struct sport_uart_port *sport = dev_get_drvdata(dev); - pr_debug("%s enter\n", __func__); + dev_dbg(dev, "%s enter\n", __func__); if (sport) uart_suspend_port(&sport_uart_reg, &sport->port); return 0; } -static int sport_uart_resume(struct platform_device *dev) +static int sport_uart_resume(struct device *dev) { - struct sport_uart_port *sport = platform_get_drvdata(dev); + struct sport_uart_port *sport = dev_get_drvdata(dev); - pr_debug("%s enter\n", __func__); + dev_dbg(dev, "%s enter\n", __func__); if (sport) uart_resume_port(&sport_uart_reg, &sport->port); return 0; } -static int sport_uart_probe(struct platform_device *dev) +static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = { + .suspend = sport_uart_suspend, + .resume = sport_uart_resume, +}; +#endif + +static int __devinit sport_uart_probe(struct platform_device *pdev) { - pr_debug("%s enter\n", __func__); - sport_uart_ports[dev->id].port.dev = &dev->dev; - uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port); - platform_set_drvdata(dev, &sport_uart_ports[dev->id]); + struct resource *res; + struct sport_uart_port *sport; + int ret = 0; - return 0; + dev_dbg(&pdev->dev, "%s enter\n", __func__); + + if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) { + dev_err(&pdev->dev, "Wrong sport uart platform device id.\n"); + return -ENOENT; + } + + if (bfin_sport_uart_ports[pdev->id] == NULL) { + bfin_sport_uart_ports[pdev->id] = + kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL); + sport = bfin_sport_uart_ports[pdev->id]; + if (!sport) { + dev_err(&pdev->dev, + "Fail to kmalloc sport_uart_port\n"); + return -ENOMEM; + } + + ret = peripheral_request_list( + (unsigned short *)pdev->dev.platform_data, DRV_NAME); + if (ret) { + dev_err(&pdev->dev, + "Fail to request SPORT peripherals\n"); + goto out_error_free_mem; + } + + spin_lock_init(&sport->port.lock); + sport->port.fifosize = SPORT_TX_FIFO_SIZE, + sport->port.ops = &sport_uart_ops; + sport->port.line = pdev->id; + sport->port.iotype = UPIO_MEM; + sport->port.flags = UPF_BOOT_AUTOCONF; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res == NULL) { + dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); + ret = -ENOENT; + goto out_error_free_peripherals; + } + + sport->port.membase = ioremap(res->start, + res->end - res->start); + if (!sport->port.membase) { + dev_err(&pdev->dev, "Cannot map sport IO\n"); + ret = -ENXIO; + goto out_error_free_peripherals; + } + + sport->port.irq = platform_get_irq(pdev, 0); + if (sport->port.irq < 0) { + dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n"); + ret = -ENOENT; + goto out_error_unmap; + } + + sport->err_irq = platform_get_irq(pdev, 1); + if (sport->err_irq < 0) { + dev_err(&pdev->dev, "No sport status IRQ specified\n"); + ret = -ENOENT; + goto out_error_unmap; + } + } + +#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE + if (!is_early_platform_device(pdev)) { +#endif + sport = bfin_sport_uart_ports[pdev->id]; + sport->port.dev = &pdev->dev; + dev_set_drvdata(&pdev->dev, sport); + ret = uart_add_one_port(&sport_uart_reg, &sport->port); +#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE + } +#endif + if (!ret) + return 0; + + if (sport) { +out_error_unmap: + iounmap(sport->port.membase); +out_error_free_peripherals: + peripheral_free_list( + (unsigned short *)pdev->dev.platform_data); +out_error_free_mem: + kfree(sport); + bfin_sport_uart_ports[pdev->id] = NULL; + } + + return ret; } -static int sport_uart_remove(struct platform_device *dev) +static int __devexit sport_uart_remove(struct platform_device *pdev) { - struct sport_uart_port *sport = platform_get_drvdata(dev); + struct sport_uart_port *sport = platform_get_drvdata(pdev); - pr_debug("%s enter\n", __func__); - platform_set_drvdata(dev, NULL); + dev_dbg(&pdev->dev, "%s enter\n", __func__); + dev_set_drvdata(&pdev->dev, NULL); - if (sport) + if (sport) { uart_remove_one_port(&sport_uart_reg, &sport->port); + iounmap(sport->port.membase); + peripheral_free_list( + (unsigned short *)pdev->dev.platform_data); + kfree(sport); + bfin_sport_uart_ports[pdev->id] = NULL; + } return 0; } static struct platform_driver sport_uart_driver = { .probe = sport_uart_probe, - .remove = sport_uart_remove, - .suspend = sport_uart_suspend, - .resume = sport_uart_resume, + .remove = __devexit_p(sport_uart_remove), .driver = { .name = DRV_NAME, +#ifdef CONFIG_PM + .pm = &bfin_sport_uart_dev_pm_ops, +#endif }, }; +#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE +static __initdata struct early_platform_driver early_sport_uart_driver = { + .class_str = DRV_NAME, + .pdrv = &sport_uart_driver, + .requested_id = EARLY_PLATFORM_ID_UNSET, +}; + +static int __init sport_uart_rs_console_init(void) +{ + early_platform_driver_register(&early_sport_uart_driver, DRV_NAME); + + early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0); + + register_console(&sport_uart_console); + + return 0; +} +console_initcall(sport_uart_rs_console_init); +#endif + static int __init sport_uart_init(void) { int ret; - pr_debug("%s enter\n", __func__); + pr_info("Serial: Blackfin uart over sport driver\n"); + ret = uart_register_driver(&sport_uart_reg); - if (ret != 0) { - printk(KERN_ERR "Failed to register %s:%d\n", + if (ret) { + pr_err("failed to register %s:%d\n", sport_uart_reg.driver_name, ret); return ret; } ret = platform_driver_register(&sport_uart_driver); - if (ret != 0) { - printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret); + if (ret) { + pr_err("failed to register sport uart driver:%d\n", ret); uart_unregister_driver(&sport_uart_reg); } - - pr_debug("%s exit\n", __func__); return ret; } +module_init(sport_uart_init); static void __exit sport_uart_exit(void) { - pr_debug("%s enter\n", __func__); platform_driver_unregister(&sport_uart_driver); uart_unregister_driver(&sport_uart_reg); } - -module_init(sport_uart_init); module_exit(sport_uart_exit); +MODULE_AUTHOR("Sonic Zhang, Roy Huang"); +MODULE_DESCRIPTION("Blackfin serial over SPORT driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h index 671d41cc1a3..abe03614e4d 100644 --- a/drivers/serial/bfin_sport_uart.h +++ b/drivers/serial/bfin_sport_uart.h @@ -1,29 +1,23 @@ /* - * File: linux/drivers/serial/bfin_sport_uart.h + * Blackfin On-Chip Sport Emulated UART Driver * - * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h - * Author: Roy Huang <roy.huang>analog.com> + * Copyright 2006-2008 Analog Devices Inc. * - * Created: Nov 22, 2006 - * Copyright: (C) Analog Device Inc. - * Description: this driver enable SPORTs on Blackfin emulate UART. + * Enter bugs at http://blackfin.uclinux.org/ * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, see the file COPYING, or write - * to the Free Software Foundation, Inc., - * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * Licensed under the GPL-2 or later. */ +/* + * This driver and the hardware supported are in term of EE-191 of ADI. + * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf + * This application note describe how to implement a UART on a Sharc DSP, + * but this driver is implemented on Blackfin Processor. + * Transmit Frame Sync is not used by this driver to transfer data out. + */ + +#ifndef _BFIN_SPORT_UART_H +#define _BFIN_SPORT_UART_H #define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */ #define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */ @@ -61,3 +55,7 @@ #define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v) #define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v) #define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v) + +#define SPORT_TX_FIFO_SIZE 8 + +#endif /* _BFIN_SPORT_UART_H */ diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c index 0028b6f89ce..53a46822705 100644 --- a/drivers/serial/icom.c +++ b/drivers/serial/icom.c @@ -751,7 +751,6 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port) trace(icom_port, "FID_STATUS", status); count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength); - count = tty_buffer_request_room(tty, count); trace(icom_port, "RCV_COUNT", count); trace(icom_port, "REAL_COUNT", count); @@ -1654,4 +1653,6 @@ MODULE_DESCRIPTION("IBM iSeries Serial IOA driver"); MODULE_SUPPORTED_DEVICE ("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters"); MODULE_LICENSE("GPL"); - +MODULE_FIRMWARE("icom_call_setup.bin"); +MODULE_FIRMWARE("icom_res_dce.bin"); +MODULE_FIRMWARE("icom_asc.bin"); diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c index 60d665a17a8..d00fcf8e6c7 100644 --- a/drivers/serial/imx.c +++ b/drivers/serial/imx.c @@ -1279,7 +1279,7 @@ static int serial_imx_probe(struct platform_device *pdev) sport->use_irda = 1; #endif - if (pdata->init) { + if (pdata && pdata->init) { ret = pdata->init(pdev); if (ret) goto clkput; @@ -1292,7 +1292,7 @@ static int serial_imx_probe(struct platform_device *pdev) return 0; deinit: - if (pdata->exit) + if (pdata && pdata->exit) pdata->exit(pdev); clkput: clk_put(sport->clk); @@ -1321,7 +1321,7 @@ static int serial_imx_remove(struct platform_device *pdev) clk_disable(sport->clk); - if (pdata->exit) + if (pdata && pdata->exit) pdata->exit(pdev); iounmap(sport->port.membase); diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index 85dc0410ac1..23ba6b40b3a 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c @@ -1411,8 +1411,7 @@ static int receive_chars(struct uart_port *the_port) read_count = do_read(the_port, ch, MAX_CHARS); if (read_count > 0) { flip = 1; - read_room = tty_buffer_request_room(tty, read_count); - tty_insert_flip_string(tty, ch, read_room); + read_room = tty_insert_flip_string(tty, ch, read_count); the_port->icount.rx += read_count; } spin_unlock_irqrestore(&the_port->lock, pflags); diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c index 108c3e0471f..12cb5e446a4 100644 --- a/drivers/serial/jsm/jsm_driver.c +++ b/drivers/serial/jsm/jsm_driver.c @@ -179,6 +179,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device return 0; out_free_irq: + jsm_remove_uart_port(brd); free_irq(brd->irq, brd); out_iounmap: iounmap(brd->re_map_membase); diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c index cd95e215550..5673ca9dfdc 100644 --- a/drivers/serial/jsm/jsm_tty.c +++ b/drivers/serial/jsm/jsm_tty.c @@ -432,7 +432,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd) int jsm_uart_port_init(struct jsm_board *brd) { - int i; + int i, rc; unsigned int line; struct jsm_channel *ch; @@ -467,8 +467,11 @@ int jsm_uart_port_init(struct jsm_board *brd) } else set_bit(line, linemap); brd->channels[i]->uart_port.line = line; - if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port)) - printk(KERN_INFO "jsm: add device failed\n"); + rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port); + if (rc){ + printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i); + return rc; + } else printk(KERN_INFO "jsm: Port %d added\n", i); } diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c index b05c5aa02cb..ecdc0facf7e 100644 --- a/drivers/serial/msm_serial.c +++ b/drivers/serial/msm_serial.c @@ -691,6 +691,7 @@ static int __init msm_serial_probe(struct platform_device *pdev) struct msm_port *msm_port; struct resource *resource; struct uart_port *port; + int irq; if (unlikely(pdev->id < 0 || pdev->id >= UART_NR)) return -ENXIO; @@ -711,9 +712,10 @@ static int __init msm_serial_probe(struct platform_device *pdev) return -ENXIO; port->mapbase = resource->start; - port->irq = platform_get_irq(pdev, 0); - if (unlikely(port->irq < 0)) + irq = platform_get_irq(pdev, 0); + if (unlikely(irq < 0)) return -ENXIO; + port->irq = irq; platform_set_drvdata(pdev, port); diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c index 34b31da01d0..7bf10264a6a 100644 --- a/drivers/serial/timbuart.c +++ b/drivers/serial/timbuart.c @@ -421,7 +421,7 @@ static struct uart_driver timbuart_driver = { static int timbuart_probe(struct platform_device *dev) { - int err; + int err, irq; struct timbuart_port *uart; struct resource *iomem; @@ -453,11 +453,12 @@ static int timbuart_probe(struct platform_device *dev) uart->port.mapbase = iomem->start; uart->port.membase = NULL; - uart->port.irq = platform_get_irq(dev, 0); - if (uart->port.irq < 0) { + irq = platform_get_irq(dev, 0); + if (irq < 0) { err = -EINVAL; goto err_register; } + uart->port.irq = irq; tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart); diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c index d8992d10d55..f6e34e03c8e 100644 --- a/drivers/staging/usbip/vhci_sysfs.c +++ b/drivers/staging/usbip/vhci_sysfs.c @@ -144,7 +144,7 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed) case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: break; default: usbip_uerr("speed %d\n", speed); diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig index 4f5bb5698f5..6a58cb1330c 100644 --- a/drivers/usb/Kconfig +++ b/drivers/usb/Kconfig @@ -21,6 +21,7 @@ config USB_ARCH_HAS_HCD default y if USB_ARCH_HAS_EHCI default y if PCMCIA && !M32R # sl811_cs default y if ARM # SL-811 + default y if BLACKFIN # SL-811 default y if SUPERH # r8a66597-hcd default PCI @@ -39,6 +40,7 @@ config USB_ARCH_HAS_OHCI default y if ARCH_PNX4008 && I2C default y if MFD_TC6393XB default y if ARCH_W90X900 + default y if ARCH_DAVINCI_DA8XX # PPC: default y if STB03xxx default y if PPC_MPC52xx diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile index be3c9b80bc9..80b4008c89b 100644 --- a/drivers/usb/Makefile +++ b/drivers/usb/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/ obj-$(CONFIG_USB_R8A66597_HCD) += host/ obj-$(CONFIG_USB_HWA_HCD) += host/ obj-$(CONFIG_USB_ISP1760_HCD) += host/ +obj-$(CONFIG_USB_IMX21_HCD) += host/ obj-$(CONFIG_USB_C67X00_HCD) += c67x00/ diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index 56802d2e994..c89990f5e01 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -5,6 +5,7 @@ * Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan * Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru) * Copyright (C) 2007 Simon Arlott + * Copyright (C) 2009 Simon Arlott * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -43,7 +44,7 @@ #include "usbatm.h" #define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott" -#define DRIVER_VERSION "0.3" +#define DRIVER_VERSION "0.4" #define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver" static const char cxacru_driver_name[] = "cxacru"; @@ -52,6 +53,7 @@ static const char cxacru_driver_name[] = "cxacru"; #define CXACRU_EP_DATA 0x02 /* Bulk in/out */ #define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */ +#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2) /* Addresses */ #define PLLFCLK_ADDR 0x00350068 @@ -105,6 +107,26 @@ enum cxacru_cm_request { CM_REQUEST_MAX, }; +/* commands for interaction with the flash memory + * + * read: response is the contents of the first 60 bytes of flash memory + * write: request contains the 60 bytes of data to write to flash memory + * response is the contents of the first 60 bytes of flash memory + * + * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS + * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * + * P: le16 USB Product ID + * V: le16 USB Vendor ID + * M: be48 MAC Address + * S: le16 ASCII Serial Number + */ +enum cxacru_cm_flash { + CM_FLASH_READ = 0xa1, + CM_FLASH_WRITE = 0xa2 +}; + /* reply codes to the commands above */ enum cxacru_cm_status { CM_STATUS_UNDEFINED, @@ -196,23 +218,32 @@ static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL) static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \ cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name) +#define CXACRU_SET_INIT(_name) \ +static DEVICE_ATTR(_name, S_IWUSR, \ + NULL, cxacru_sysfs_store_##_name) + #define CXACRU_ATTR_INIT(_value, _type, _name) \ static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ - struct usb_interface *intf = to_usb_interface(dev); \ - struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); \ - struct cxacru_data *instance = usbatm_instance->driver_data; \ + struct cxacru_data *instance = to_usbatm_driver_data(\ + to_usb_interface(dev)); \ +\ + if (instance == NULL) \ + return -ENODEV; \ +\ return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \ } \ CXACRU__ATTR_INIT(_name) #define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) +#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name) #define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) +#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) #define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name) static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf) @@ -267,12 +298,12 @@ static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf) static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) { static char *str[] = { - NULL, + "", "ANSI T1.413", "ITU-T G.992.1 (G.DMT)", "ITU-T G.992.2 (G.LITE)" }; - if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL)) + if (unlikely(value >= ARRAY_SIZE(str))) return snprintf(buf, PAGE_SIZE, "%u\n", value); return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); } @@ -288,22 +319,28 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) static ssize_t cxacru_sysfs_show_mac_address(struct device *dev, struct device_attribute *attr, char *buf) { - struct usb_interface *intf = to_usb_interface(dev); - struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); - struct atm_dev *atm_dev = usbatm_instance->atm_dev; + struct cxacru_data *instance = to_usbatm_driver_data( + to_usb_interface(dev)); - return snprintf(buf, PAGE_SIZE, "%pM\n", atm_dev->esi); + if (instance == NULL || instance->usbatm->atm_dev == NULL) + return -ENODEV; + + return snprintf(buf, PAGE_SIZE, "%pM\n", + instance->usbatm->atm_dev->esi); } static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct usb_interface *intf = to_usb_interface(dev); - struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); - struct cxacru_data *instance = usbatm_instance->driver_data; - u32 value = instance->card_info[CXINF_LINE_STARTABLE]; - static char *str[] = { "running", "stopped" }; + struct cxacru_data *instance = to_usbatm_driver_data( + to_usb_interface(dev)); + u32 value; + + if (instance == NULL) + return -ENODEV; + + value = instance->card_info[CXINF_LINE_STARTABLE]; if (unlikely(value >= ARRAY_SIZE(str))) return snprintf(buf, PAGE_SIZE, "%u\n", value); return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); @@ -312,9 +349,8 @@ static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev, static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - struct usb_interface *intf = to_usb_interface(dev); - struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); - struct cxacru_data *instance = usbatm_instance->driver_data; + struct cxacru_data *instance = to_usbatm_driver_data( + to_usb_interface(dev)); int ret; int poll = -1; char str_cmd[8]; @@ -328,13 +364,16 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev, return -EINVAL; ret = 0; + if (instance == NULL) + return -ENODEV; + if (mutex_lock_interruptible(&instance->adsl_state_serialize)) return -ERESTARTSYS; if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) { ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0); if (ret < 0) { - atm_err(usbatm_instance, "change adsl state:" + atm_err(instance->usbatm, "change adsl state:" " CHIP_ADSL_LINE_STOP returned %d\n", ret); ret = -EIO; @@ -354,7 +393,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev, if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) { ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0); if (ret < 0) { - atm_err(usbatm_instance, "change adsl state:" + atm_err(instance->usbatm, "change adsl state:" " CHIP_ADSL_LINE_START returned %d\n", ret); ret = -EIO; @@ -407,6 +446,72 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev, return ret; } +/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */ + +static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct cxacru_data *instance = to_usbatm_driver_data( + to_usb_interface(dev)); + int len = strlen(buf); + int ret, pos, num; + __le32 data[CMD_PACKET_SIZE / 4]; + + if (!capable(CAP_NET_ADMIN)) + return -EACCES; + + if (instance == NULL) + return -ENODEV; + + pos = 0; + num = 0; + while (pos < len) { + int tmp; + u32 index; + u32 value; + + ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp); + if (ret < 2) + return -EINVAL; + if (index < 0 || index > 0x7f) + return -EINVAL; + pos += tmp; + + /* skip trailing newline */ + if (buf[pos] == '\n' && pos == len-1) + pos++; + + data[num * 2 + 1] = cpu_to_le32(index); + data[num * 2 + 2] = cpu_to_le32(value); + num++; + + /* send config values when data buffer is full + * or no more data + */ + if (pos >= len || num >= CMD_MAX_CONFIG) { + char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */ + + data[0] = cpu_to_le32(num); + ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET, + (u8 *) data, 4 + num * 8, NULL, 0); + if (ret < 0) { + atm_err(instance->usbatm, + "set card data returned %d\n", ret); + return -EIO; + } + + for (tmp = 0; tmp < num; tmp++) + snprintf(log + tmp*12, 13, " %02x=%08x", + le32_to_cpu(data[tmp * 2 + 1]), + le32_to_cpu(data[tmp * 2 + 2])); + atm_info(instance->usbatm, "config%s\n", log); + num = 0; + } + } + + return len; +} + /* * All device attributes are included in CXACRU_ALL_FILES * so that the same list can be used multiple times: @@ -442,7 +547,8 @@ CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \ CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \ CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \ CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \ -CXACRU_CMD_##_action( adsl_state); +CXACRU_CMD_##_action( adsl_state); \ +CXACRU_SET_##_action( adsl_config); CXACRU_ALL_FILES(INIT); @@ -596,7 +702,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ len = ret / 4; for (offb = 0; offb < len; ) { int l = le32_to_cpu(buf[offb++]); - if (l > stride || l > (len - offb) / 2) { + if (l < 0 || l > stride || l > (len - offb) / 2) { if (printk_ratelimit()) usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n", cm, l); @@ -649,9 +755,6 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance, { struct cxacru_data *instance = usbatm_instance->driver_data; struct usb_interface *intf = usbatm_instance->usb_intf; - /* - struct atm_dev *atm_dev = usbatm_instance->atm_dev; - */ int ret; int start_polling = 1; @@ -697,6 +800,9 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance, mutex_unlock(&instance->poll_state_serialize); mutex_unlock(&instance->adsl_state_serialize); + printk(KERN_INFO "%s%d: %s %pM\n", atm_dev->type, atm_dev->number, + usbatm_instance->description, atm_dev->esi); + if (start_polling) cxacru_poll_status(&instance->poll_work.work); return 0; @@ -873,11 +979,9 @@ cleanup: static void cxacru_upload_firmware(struct cxacru_data *instance, const struct firmware *fw, - const struct firmware *bp, - const struct firmware *cf) + const struct firmware *bp) { int ret; - int off; struct usbatm_data *usbatm = instance->usbatm; struct usb_device *usb_dev = usbatm->usb_dev; __le16 signature[] = { usb_dev->descriptor.idVendor, @@ -911,6 +1015,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, } /* Firmware */ + usb_info(usbatm, "loading firmware\n"); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size); if (ret) { usb_err(usbatm, "Firmware upload failed: %d\n", ret); @@ -919,6 +1024,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, /* Boot ROM patch */ if (instance->modem_type->boot_rom_patch) { + usb_info(usbatm, "loading boot ROM patch\n"); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size); if (ret) { usb_err(usbatm, "Boot ROM patching failed: %d\n", ret); @@ -933,6 +1039,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, return; } + usb_info(usbatm, "starting device\n"); if (instance->modem_type->boot_rom_patch) { val = cpu_to_le32(BR_ADDR); ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4); @@ -958,26 +1065,6 @@ static void cxacru_upload_firmware(struct cxacru_data *instance, usb_err(usbatm, "modem failed to initialize: %d\n", ret); return; } - - /* Load config data (le32), doing one packet at a time */ - if (cf) - for (off = 0; off < cf->size / 4; ) { - __le32 buf[CMD_PACKET_SIZE / 4 - 1]; - int i, len = min_t(int, cf->size / 4 - off, CMD_PACKET_SIZE / 4 / 2 - 1); - buf[0] = cpu_to_le32(len); - for (i = 0; i < len; i++, off++) { - buf[i * 2 + 1] = cpu_to_le32(off); - memcpy(buf + i * 2 + 2, cf->data + off * 4, 4); - } - ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET, - (u8 *) buf, len, NULL, 0); - if (ret < 0) { - usb_err(usbatm, "load config data failed: %d\n", ret); - return; - } - } - - msleep_interruptible(4000); } static int cxacru_find_firmware(struct cxacru_data *instance, @@ -1003,7 +1090,7 @@ static int cxacru_find_firmware(struct cxacru_data *instance, static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, struct usb_interface *usb_intf) { - const struct firmware *fw, *bp, *cf; + const struct firmware *fw, *bp; struct cxacru_data *instance = usbatm_instance->driver_data; int ret = cxacru_find_firmware(instance, "fw", &fw); @@ -1021,13 +1108,8 @@ static int cxacru_heavy_init(struct usbatm_data *usbatm_instance, } } - if (cxacru_find_firmware(instance, "cf", &cf)) /* optional */ - cf = NULL; - - cxacru_upload_firmware(instance, fw, bp, cf); + cxacru_upload_firmware(instance, fw, bp); - if (cf) - release_firmware(cf); if (instance->modem_type->boot_rom_patch) release_firmware(bp); release_firmware(fw); diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index fbea8563df1..9b53e8df464 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -1333,6 +1333,7 @@ void usbatm_usb_disconnect(struct usb_interface *intf) if (instance->atm_dev) { sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device"); atm_dev_deregister(instance->atm_dev); + instance->atm_dev = NULL; } usbatm_put_instance(instance); /* taken in usbatm_usb_probe */ @@ -1348,7 +1349,7 @@ static int __init usbatm_usb_init(void) { dbg("%s: driver version %s", __func__, DRIVER_VERSION); - if (sizeof(struct usbatm_control) > sizeof(((struct sk_buff *) 0)->cb)) { + if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) { printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name); return -EIO; } diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h index f6f4508a9d4..0863f85fcc2 100644 --- a/drivers/usb/atm/usbatm.h +++ b/drivers/usb/atm/usbatm.h @@ -204,4 +204,19 @@ struct usbatm_data { struct urb *urbs[0]; }; +static inline void *to_usbatm_driver_data(struct usb_interface *intf) +{ + struct usbatm_data *usbatm_instance; + + if (intf == NULL) + return NULL; + + usbatm_instance = usb_get_intfdata(intf); + + if (usbatm_instance == NULL) /* set NULL before unbind() */ + return NULL; + + return usbatm_instance->driver_data; /* set NULL after unbind() */ +} + #endif /* _USBATM_H_ */ diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c index 5633bc5c8bf..029ee4a8a1f 100644 --- a/drivers/usb/c67x00/c67x00-drv.c +++ b/drivers/usb/c67x00/c67x00-drv.c @@ -137,13 +137,13 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev) if (!c67x00) return -ENOMEM; - if (!request_mem_region(res->start, res->end - res->start + 1, + if (!request_mem_region(res->start, resource_size(res), pdev->name)) { dev_err(&pdev->dev, "Memory region busy\n"); ret = -EBUSY; goto request_mem_failed; } - c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1); + c67x00->hpi.base = ioremap(res->start, resource_size(res)); if (!c67x00->hpi.base) { dev_err(&pdev->dev, "Unable to map HPI registers\n"); ret = -EIO; @@ -182,7 +182,7 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev) request_irq_failed: iounmap(c67x00->hpi.base); map_failed: - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); request_mem_failed: kfree(c67x00); @@ -208,7 +208,7 @@ static int __devexit c67x00_drv_remove(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res) - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); kfree(c67x00); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 34d4eb98829..975d556b478 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb) { wb->use = 0; acm->transmitting--; + usb_autopm_put_interface_async(acm->control); } /* @@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn) } dbg("%s susp_count: %d", __func__, acm->susp_count); + usb_autopm_get_interface_async(acm->control); if (acm->susp_count) { - acm->delayed_wb = wb; - schedule_work(&acm->waker); + if (!acm->delayed_wb) + acm->delayed_wb = wb; + else + usb_autopm_put_interface_async(acm->control); spin_unlock_irqrestore(&acm->write_lock, flags); return 0; /* A white lie */ } @@ -424,7 +428,6 @@ next_buffer: throttled = acm->throttle; spin_unlock_irqrestore(&acm->throttle_lock, flags); if (!throttled) { - tty_buffer_request_room(tty, buf->size); tty_insert_flip_string(tty, buf->base, buf->size); tty_flip_buffer_push(tty); } else { @@ -534,23 +537,6 @@ static void acm_softint(struct work_struct *work) tty_kref_put(tty); } -static void acm_waker(struct work_struct *waker) -{ - struct acm *acm = container_of(waker, struct acm, waker); - int rv; - - rv = usb_autopm_get_interface(acm->control); - if (rv < 0) { - dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__); - return; - } - if (acm->delayed_wb) { - acm_start_wb(acm, acm->delayed_wb); - acm->delayed_wb = NULL; - } - usb_autopm_put_interface(acm->control); -} - /* * TTY handlers */ @@ -566,7 +552,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) acm = acm_table[tty->index]; if (!acm || !acm->dev) - goto err_out; + goto out; else rv = 0; @@ -582,8 +568,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) mutex_lock(&acm->mutex); if (acm->port.count++) { + mutex_unlock(&acm->mutex); usb_autopm_put_interface(acm->control); - goto done; + goto out; } acm->ctrlurb->dev = acm->dev; @@ -612,18 +599,18 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp) set_bit(ASYNCB_INITIALIZED, &acm->port.flags); rv = tty_port_block_til_ready(&acm->port, tty, filp); tasklet_schedule(&acm->urb_task); -done: + mutex_unlock(&acm->mutex); -err_out: +out: mutex_unlock(&open_mutex); return rv; full_bailout: usb_kill_urb(acm->ctrlurb); bail_out: - usb_autopm_put_interface(acm->control); acm->port.count--; mutex_unlock(&acm->mutex); + usb_autopm_put_interface(acm->control); early_bail: mutex_unlock(&open_mutex); tty_port_tty_set(&acm->port, NULL); @@ -1023,7 +1010,7 @@ static int acm_probe(struct usb_interface *intf, case USB_CDC_CALL_MANAGEMENT_TYPE: call_management_function = buffer[3]; call_interface_num = buffer[4]; - if ((call_management_function & 3) != 3) + if ( (quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3) dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n"); break; default: @@ -1178,7 +1165,6 @@ made_compressed_probe: acm->urb_task.func = acm_rx_tasklet; acm->urb_task.data = (unsigned long) acm; INIT_WORK(&acm->work, acm_softint); - INIT_WORK(&acm->waker, acm_waker); init_waitqueue_head(&acm->drain_wait); spin_lock_init(&acm->throttle_lock); spin_lock_init(&acm->write_lock); @@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm) tasklet_enable(&acm->urb_task); cancel_work_sync(&acm->work); - cancel_work_sync(&acm->waker); } static void acm_disconnect(struct usb_interface *intf) @@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message) static int acm_resume(struct usb_interface *intf) { struct acm *acm = usb_get_intfdata(intf); + struct acm_wb *wb; int rv = 0; int cnt; @@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf) mutex_lock(&acm->mutex); if (acm->port.count) { rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO); + + spin_lock_irq(&acm->write_lock); + if (acm->delayed_wb) { + wb = acm->delayed_wb; + acm->delayed_wb = NULL; + spin_unlock_irq(&acm->write_lock); + acm_start_wb(acm, acm->delayed_wb); + } else { + spin_unlock_irq(&acm->write_lock); + } + + /* + * delayed error checking because we must + * do the write path at all cost + */ if (rv < 0) goto err_out; @@ -1460,6 +1461,23 @@ err_out: return rv; } +static int acm_reset_resume(struct usb_interface *intf) +{ + struct acm *acm = usb_get_intfdata(intf); + struct tty_struct *tty; + + mutex_lock(&acm->mutex); + if (acm->port.count) { + tty = tty_port_tty_get(&acm->port); + if (tty) { + tty_hangup(tty); + tty_kref_put(tty); + } + } + mutex_unlock(&acm->mutex); + return acm_resume(intf); +} + #endif /* CONFIG_PM */ #define NOKIA_PCSUITE_ACM_INFO(x) \ @@ -1471,7 +1489,7 @@ err_out: * USB driver structure. */ -static struct usb_device_id acm_ids[] = { +static const struct usb_device_id acm_ids[] = { /* quirky and broken devices */ { USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ @@ -1576,6 +1594,11 @@ static struct usb_device_id acm_ids[] = { /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ + /* Support Lego NXT using pbLua firmware */ + { USB_DEVICE(0x0694, 0xff00), + .driver_info = NOT_A_MODEM, + }, + /* control interfaces with various AT-command sets */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, USB_CDC_ACM_PROTO_AT_V25TER) }, @@ -1602,6 +1625,7 @@ static struct usb_driver acm_driver = { #ifdef CONFIG_PM .suspend = acm_suspend, .resume = acm_resume, + .reset_resume = acm_reset_resume, #endif .id_table = acm_ids, #ifdef CONFIG_PM diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index c4a0ee8ffcc..4a8e87ec6ce 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -112,7 +112,6 @@ struct acm { struct mutex mutex; struct usb_cdc_line_coding line; /* bits, stop, parity */ struct work_struct work; /* work queue entry for line discipline waking up */ - struct work_struct waker; wait_queue_head_t drain_wait; /* close processing */ struct tasklet_struct urb_task; /* rx processing */ spinlock_t throttle_lock; /* synchronize throtteling and read callback */ @@ -137,3 +136,4 @@ struct acm { #define NO_UNION_NORMAL 1 #define SINGLE_RX_URB 2 #define NO_CAP_LINE 4 +#define NOT_A_MODEM 8 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 3e564bfe17d..18aafcb08fc 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -31,7 +31,7 @@ #define DRIVER_AUTHOR "Oliver Neukum" #define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management" -static struct usb_device_id wdm_ids[] = { +static const struct usb_device_id wdm_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS, diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 9bc112ee780..93b5f85d7ce 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -163,7 +163,6 @@ struct usblp { unsigned char used; /* True if open */ unsigned char present; /* True if not disconnected */ unsigned char bidir; /* interface is bidirectional */ - unsigned char sleeping; /* interface is suspended */ unsigned char no_paper; /* Paper Out happened */ unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */ /* first 2 bytes are (big-endian) length */ @@ -191,7 +190,6 @@ static void usblp_dump(struct usblp *usblp) { dbg("quirks=%d", usblp->quirks); dbg("used=%d", usblp->used); dbg("bidir=%d", usblp->bidir); - dbg("sleeping=%d", usblp->sleeping); dbg("device_id_string=\"%s\"", usblp->device_id_string ? usblp->device_id_string + 2 : @@ -376,7 +374,7 @@ static int usblp_check_status(struct usblp *usblp, int err) static int handle_bidir (struct usblp *usblp) { - if (usblp->bidir && usblp->used && !usblp->sleeping) { + if (usblp->bidir && usblp->used) { if (usblp_submit_read(usblp) < 0) return -EIO; } @@ -503,11 +501,6 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) goto done; } - if (usblp->sleeping) { - retval = -ENODEV; - goto done; - } - dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd), _IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) ); @@ -914,8 +907,6 @@ static int usblp_wtest(struct usblp *usblp, int nonblock) return 0; } spin_unlock_irqrestore(&usblp->lock, flags); - if (usblp->sleeping) - return -ENODEV; if (nonblock) return -EAGAIN; return 1; @@ -968,8 +959,6 @@ static int usblp_rtest(struct usblp *usblp, int nonblock) return 0; } spin_unlock_irqrestore(&usblp->lock, flags); - if (usblp->sleeping) - return -ENODEV; if (nonblock) return -EAGAIN; return 1; @@ -1377,12 +1366,10 @@ static void usblp_disconnect(struct usb_interface *intf) mutex_unlock (&usblp_mutex); } -static int usblp_suspend (struct usb_interface *intf, pm_message_t message) +static int usblp_suspend(struct usb_interface *intf, pm_message_t message) { struct usblp *usblp = usb_get_intfdata (intf); - /* we take no more IO */ - usblp->sleeping = 1; usblp_unlink_urbs(usblp); #if 0 /* XXX Do we want this? What if someone is reading, should we fail? */ /* not strictly necessary, but just in case */ @@ -1393,18 +1380,17 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message) return 0; } -static int usblp_resume (struct usb_interface *intf) +static int usblp_resume(struct usb_interface *intf) { struct usblp *usblp = usb_get_intfdata (intf); int r; - usblp->sleeping = 0; r = handle_bidir (usblp); return r; } -static struct usb_device_id usblp_ids [] = { +static const struct usb_device_id usblp_ids[] = { { USB_DEVICE_INFO(7, 1, 1) }, { USB_DEVICE_INFO(7, 1, 2) }, { USB_DEVICE_INFO(7, 1, 3) }, diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 7c5f4e32c92..8588c0937a8 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c @@ -48,7 +48,7 @@ */ #define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100 -static struct usb_device_id usbtmc_devices[] = { +static const struct usb_device_id usbtmc_devices[] = { { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), }, { USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), }, { 0, } /* terminating entry */ diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index ad925946f86..97a819c23ef 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -91,8 +91,8 @@ config USB_DYNAMIC_MINORS If you are unsure about this, say N here. config USB_SUSPEND - bool "USB selective suspend/resume and wakeup" - depends on USB && PM + bool "USB runtime power management (suspend/resume and wakeup)" + depends on USB && PM_RUNTIME help If you say Y here, you can use driver calls or the sysfs "power/level" file to suspend or resume individual USB diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 355dffcc23b..c83c975152a 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -118,6 +118,7 @@ static const char *format_endpt = */ static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq); +/* guarded by usbfs_mutex */ static unsigned int conndiscevcnt; /* this struct stores the poll state for <mountpoint>/devices pollers */ @@ -156,7 +157,9 @@ static const struct class_info clas_info[] = void usbfs_conn_disc_event(void) { + mutex_lock(&usbfs_mutex); conndiscevcnt++; + mutex_unlock(&usbfs_mutex); wake_up(&deviceconndiscwq); } @@ -629,42 +632,29 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, static unsigned int usb_device_poll(struct file *file, struct poll_table_struct *wait) { - struct usb_device_status *st = file->private_data; + struct usb_device_status *st; unsigned int mask = 0; - lock_kernel(); + mutex_lock(&usbfs_mutex); + st = file->private_data; if (!st) { st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL); - - /* we may have dropped BKL - - * need to check for having lost the race */ - if (file->private_data) { - kfree(st); - st = file->private_data; - goto lost_race; - } - /* we haven't lost - check for allocation failure now */ if (!st) { - unlock_kernel(); + mutex_unlock(&usbfs_mutex); return POLLIN; } - /* - * need to prevent the module from being unloaded, since - * proc_unregister does not call the release method and - * we would have a memory leak - */ st->lastev = conndiscevcnt; file->private_data = st; mask = POLLIN; } -lost_race: + if (file->f_mode & FMODE_READ) poll_wait(file, &deviceconndiscwq, wait); if (st->lastev != conndiscevcnt) mask |= POLLIN; st->lastev = conndiscevcnt; - unlock_kernel(); + mutex_unlock(&usbfs_mutex); return mask; } @@ -685,7 +675,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; - lock_kernel(); + mutex_lock(&file->f_dentry->d_inode->i_mutex); switch (orig) { case 0: @@ -701,7 +691,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig) ret = -EINVAL; } - unlock_kernel(); + mutex_unlock(&file->f_dentry->d_inode->i_mutex); return ret; } diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index a678186f218..e909ff7b909 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -122,7 +122,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig) { loff_t ret; - lock_kernel(); + mutex_lock(&file->f_dentry->d_inode->i_mutex); switch (orig) { case 0: @@ -138,7 +138,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig) ret = -EINVAL; } - unlock_kernel(); + mutex_unlock(&file->f_dentry->d_inode->i_mutex); return ret; } @@ -310,7 +310,8 @@ static struct async *async_getpending(struct dev_state *ps, static void snoop_urb(struct usb_device *udev, void __user *userurb, int pipe, unsigned length, - int timeout_or_status, enum snoop_when when) + int timeout_or_status, enum snoop_when when, + unsigned char *data, unsigned data_len) { static const char *types[] = {"isoc", "int", "ctrl", "bulk"}; static const char *dirs[] = {"out", "in"}; @@ -344,6 +345,11 @@ static void snoop_urb(struct usb_device *udev, "status %d\n", ep, t, d, length, timeout_or_status); } + + if (data && data_len > 0) { + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1, + data, data_len, 1); + } } #define AS_CONTINUATION 1 @@ -410,7 +416,9 @@ static void async_completed(struct urb *urb) } snoop(&urb->dev->dev, "urb complete\n"); snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length, - as->status, COMPLETE); + as->status, COMPLETE, + ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_OUT) ? + NULL : urb->transfer_buffer, urb->actual_length); if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET && as->status != -ENOENT) cancel_bulk_urbs(ps, as->bulk_addr); @@ -653,20 +661,20 @@ static int usbdev_open(struct inode *inode, struct file *file) const struct cred *cred = current_cred(); int ret; - lock_kernel(); - /* Protect against simultaneous removal or release */ - mutex_lock(&usbfs_mutex); - ret = -ENOMEM; ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL); if (!ps) - goto out; + goto out_free_ps; ret = -ENODEV; + /* Protect against simultaneous removal or release */ + mutex_lock(&usbfs_mutex); + /* usbdev device-node */ if (imajor(inode) == USB_DEVICE_MAJOR) dev = usbdev_lookup_by_devt(inode->i_rdev); + #ifdef CONFIG_USB_DEVICEFS /* procfs file */ if (!dev) { @@ -678,13 +686,19 @@ static int usbdev_open(struct inode *inode, struct file *file) dev = NULL; } #endif - if (!dev || dev->state == USB_STATE_NOTATTACHED) - goto out; + mutex_unlock(&usbfs_mutex); + + if (!dev) + goto out_free_ps; + + usb_lock_device(dev); + if (dev->state == USB_STATE_NOTATTACHED) + goto out_unlock_device; + ret = usb_autoresume_device(dev); if (ret) - goto out; + goto out_unlock_device; - ret = 0; ps->dev = dev; ps->file = file; spin_lock_init(&ps->lock); @@ -702,15 +716,16 @@ static int usbdev_open(struct inode *inode, struct file *file) smp_wmb(); list_add_tail(&ps->list, &dev->filelist); file->private_data = ps; + usb_unlock_device(dev); snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current), current->comm); - out: - if (ret) { - kfree(ps); - usb_put_dev(dev); - } - mutex_unlock(&usbfs_mutex); - unlock_kernel(); + return ret; + + out_unlock_device: + usb_unlock_device(dev); + usb_put_dev(dev); + out_free_ps: + kfree(ps); return ret; } @@ -724,10 +739,7 @@ static int usbdev_release(struct inode *inode, struct file *file) usb_lock_device(dev); usb_hub_release_all_ports(dev, ps); - /* Protect against simultaneous open */ - mutex_lock(&usbfs_mutex); list_del_init(&ps->list); - mutex_unlock(&usbfs_mutex); for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed); ifnum++) { @@ -770,6 +782,13 @@ static int proc_control(struct dev_state *ps, void __user *arg) if (!tbuf) return -ENOMEM; tmo = ctrl.timeout; + snoop(&dev->dev, "control urb: bRequestType=%02x " + "bRequest=%02x wValue=%04x " + "wIndex=%04x wLength=%04x\n", + ctrl.bRequestType, ctrl.bRequest, + __le16_to_cpup(&ctrl.wValue), + __le16_to_cpup(&ctrl.wIndex), + __le16_to_cpup(&ctrl.wLength)); if (ctrl.bRequestType & 0x80) { if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data, ctrl.wLength)) { @@ -777,15 +796,15 @@ static int proc_control(struct dev_state *ps, void __user *arg) return -EINVAL; } pipe = usb_rcvctrlpipe(dev, 0); - snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); + snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usb_control_msg(dev, pipe, ctrl.bRequest, ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo); usb_lock_device(dev); - snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); - + snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, + tbuf, i); if ((i > 0) && ctrl.wLength) { if (copy_to_user(ctrl.data, tbuf, i)) { free_page((unsigned long)tbuf); @@ -800,14 +819,15 @@ static int proc_control(struct dev_state *ps, void __user *arg) } } pipe = usb_sndctrlpipe(dev, 0); - snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT); + snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, + tbuf, ctrl.wLength); usb_unlock_device(dev); i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest, ctrl.bRequestType, ctrl.wValue, ctrl.wIndex, tbuf, ctrl.wLength, tmo); usb_lock_device(dev); - snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE); + snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0); } free_page((unsigned long)tbuf); if (i < 0 && i != -EPIPE) { @@ -853,12 +873,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg) kfree(tbuf); return -EINVAL; } - snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); + snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0); usb_unlock_device(dev); i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); usb_lock_device(dev); - snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); + snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2); if (!i && len2) { if (copy_to_user(bulk.data, tbuf, len2)) { @@ -873,12 +893,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg) return -EFAULT; } } - snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT); + snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1); usb_unlock_device(dev); i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo); usb_lock_device(dev); - snoop_urb(dev, NULL, pipe, len2, i, COMPLETE); + snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0); } kfree(tbuf); if (i < 0) @@ -1097,6 +1117,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, is_in = 0; uurb->endpoint &= ~USB_DIR_IN; } + snoop(&ps->dev->dev, "control urb: bRequestType=%02x " + "bRequest=%02x wValue=%04x " + "wIndex=%04x wLength=%04x\n", + dr->bRequestType, dr->bRequest, + __le16_to_cpup(&dr->wValue), + __le16_to_cpup(&dr->wIndex), + __le16_to_cpup(&dr->wLength)); break; case USBDEVFS_URB_TYPE_BULK: @@ -1104,13 +1131,25 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, case USB_ENDPOINT_XFER_CONTROL: case USB_ENDPOINT_XFER_ISOC: return -EINVAL; - /* allow single-shot interrupt transfers, at bogus rates */ + case USB_ENDPOINT_XFER_INT: + /* allow single-shot interrupt transfers */ + uurb->type = USBDEVFS_URB_TYPE_INTERRUPT; + goto interrupt_urb; } uurb->number_of_packets = 0; if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) return -EINVAL; break; + case USBDEVFS_URB_TYPE_INTERRUPT: + if (!usb_endpoint_xfer_int(&ep->desc)) + return -EINVAL; + interrupt_urb: + uurb->number_of_packets = 0; + if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) + return -EINVAL; + break; + case USBDEVFS_URB_TYPE_ISO: /* arbitrary limit */ if (uurb->number_of_packets < 1 || @@ -1143,14 +1182,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, uurb->buffer_length = totlen; break; - case USBDEVFS_URB_TYPE_INTERRUPT: - uurb->number_of_packets = 0; - if (!usb_endpoint_xfer_int(&ep->desc)) - return -EINVAL; - if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE) - return -EINVAL; - break; - default: return -EINVAL; } @@ -1236,7 +1267,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, } } snoop_urb(ps->dev, as->userurb, as->urb->pipe, - as->urb->transfer_buffer_length, 0, SUBMIT); + as->urb->transfer_buffer_length, 0, SUBMIT, + is_in ? NULL : as->urb->transfer_buffer, + uurb->buffer_length); async_newpending(as); if (usb_endpoint_xfer_bulk(&ep->desc)) { @@ -1274,7 +1307,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, dev_printk(KERN_DEBUG, &ps->dev->dev, "usbfs: usb_submit_urb returned %d\n", ret); snoop_urb(ps->dev, as->userurb, as->urb->pipe, - 0, ret, COMPLETE); + 0, ret, COMPLETE, NULL, 0); async_removepending(as); free_async(as); return ret; @@ -1628,7 +1661,10 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl) if (driver == NULL || driver->ioctl == NULL) { retval = -ENOTTY; } else { + /* keep API that guarantees BKL */ + lock_kernel(); retval = driver->ioctl(intf, ctl->ioctl_code, buf); + unlock_kernel(); if (retval == -ENOIOCTLCMD) retval = -ENOTTY; } @@ -1711,6 +1747,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd, if (!(file->f_mode & FMODE_WRITE)) return -EPERM; + usb_lock_device(dev); if (!connected(ps)) { usb_unlock_device(dev); @@ -1877,9 +1914,7 @@ static long usbdev_ioctl(struct file *file, unsigned int cmd, { int ret; - lock_kernel(); ret = usbdev_do_ioctl(file, cmd, (void __user *)arg); - unlock_kernel(); return ret; } @@ -1890,9 +1925,7 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd, { int ret; - lock_kernel(); ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg)); - unlock_kernel(); return ret; } diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index f2f055eb683..a7037bf8168 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -25,7 +25,7 @@ #include <linux/device.h> #include <linux/usb.h> #include <linux/usb/quirks.h> -#include <linux/workqueue.h> +#include <linux/pm_runtime.h> #include "hcd.h" #include "usb.h" @@ -221,7 +221,7 @@ static int usb_probe_device(struct device *dev) { struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); struct usb_device *udev = to_usb_device(dev); - int error = -ENODEV; + int error = 0; dev_dbg(dev, "%s\n", __func__); @@ -230,18 +230,23 @@ static int usb_probe_device(struct device *dev) /* The device should always appear to be in use * unless the driver suports autosuspend. */ - udev->pm_usage_cnt = !(udriver->supports_autosuspend); + if (!udriver->supports_autosuspend) + error = usb_autoresume_device(udev); - error = udriver->probe(udev); + if (!error) + error = udriver->probe(udev); return error; } /* called from driver core with dev locked */ static int usb_unbind_device(struct device *dev) { + struct usb_device *udev = to_usb_device(dev); struct usb_device_driver *udriver = to_usb_device_driver(dev->driver); - udriver->disconnect(to_usb_device(dev)); + udriver->disconnect(udev); + if (!udriver->supports_autosuspend) + usb_autosuspend_device(udev); return 0; } @@ -274,60 +279,62 @@ static int usb_probe_interface(struct device *dev) intf->needs_binding = 0; if (usb_device_is_owned(udev)) - return -ENODEV; + return error; if (udev->authorized == 0) { dev_err(&intf->dev, "Device is not authorized for usage\n"); - return -ENODEV; + return error; } id = usb_match_id(intf, driver->id_table); if (!id) id = usb_match_dynamic_id(intf, driver); - if (id) { - dev_dbg(dev, "%s - got id\n", __func__); - - error = usb_autoresume_device(udev); - if (error) - return error; + if (!id) + return error; - /* Interface "power state" doesn't correspond to any hardware - * state whatsoever. We use it to record when it's bound to - * a driver that may start I/0: it's not frozen/quiesced. - */ - mark_active(intf); - intf->condition = USB_INTERFACE_BINDING; + dev_dbg(dev, "%s - got id\n", __func__); - /* The interface should always appear to be in use - * unless the driver suports autosuspend. - */ - atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend); - - /* Carry out a deferred switch to altsetting 0 */ - if (intf->needs_altsetting0) { - error = usb_set_interface(udev, intf->altsetting[0]. - desc.bInterfaceNumber, 0); - if (error < 0) - goto err; + error = usb_autoresume_device(udev); + if (error) + return error; - intf->needs_altsetting0 = 0; - } + intf->condition = USB_INTERFACE_BINDING; - error = driver->probe(intf, id); - if (error) + /* Bound interfaces are initially active. They are + * runtime-PM-enabled only if the driver has autosuspend support. + * They are sensitive to their children's power states. + */ + pm_runtime_set_active(dev); + pm_suspend_ignore_children(dev, false); + if (driver->supports_autosuspend) + pm_runtime_enable(dev); + + /* Carry out a deferred switch to altsetting 0 */ + if (intf->needs_altsetting0) { + error = usb_set_interface(udev, intf->altsetting[0]. + desc.bInterfaceNumber, 0); + if (error < 0) goto err; - - intf->condition = USB_INTERFACE_BOUND; - usb_autosuspend_device(udev); + intf->needs_altsetting0 = 0; } + error = driver->probe(intf, id); + if (error) + goto err; + + intf->condition = USB_INTERFACE_BOUND; + usb_autosuspend_device(udev); return error; -err: - mark_quiesced(intf); + err: intf->needs_remote_wakeup = 0; intf->condition = USB_INTERFACE_UNBOUND; usb_cancel_queued_reset(intf); + + /* Unbound interfaces are always runtime-PM-disabled and -suspended */ + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + usb_autosuspend_device(udev); return error; } @@ -377,9 +384,17 @@ static int usb_unbind_interface(struct device *dev) usb_set_intfdata(intf, NULL); intf->condition = USB_INTERFACE_UNBOUND; - mark_quiesced(intf); intf->needs_remote_wakeup = 0; + /* Unbound interfaces are always runtime-PM-disabled and -suspended */ + pm_runtime_disable(dev); + pm_runtime_set_suspended(dev); + + /* Undo any residual pm_autopm_get_interface_* calls */ + for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r) + usb_autopm_put_interface_no_suspend(intf); + atomic_set(&intf->pm_usage_cnt, 0); + if (!error) usb_autosuspend_device(udev); @@ -410,7 +425,6 @@ int usb_driver_claim_interface(struct usb_driver *driver, struct usb_interface *iface, void *priv) { struct device *dev = &iface->dev; - struct usb_device *udev = interface_to_usbdev(iface); int retval = 0; if (dev->driver) @@ -420,11 +434,16 @@ int usb_driver_claim_interface(struct usb_driver *driver, usb_set_intfdata(iface, priv); iface->needs_binding = 0; - usb_pm_lock(udev); iface->condition = USB_INTERFACE_BOUND; - mark_active(iface); - atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend); - usb_pm_unlock(udev); + + /* Bound interfaces are initially active. They are + * runtime-PM-enabled only if the driver has autosuspend support. + * They are sensitive to their children's power states. + */ + pm_runtime_set_active(dev); + pm_suspend_ignore_children(dev, false); + if (driver->supports_autosuspend) + pm_runtime_enable(dev); /* if interface was already added, bind now; else let * the future device_add() bind it, bypassing probe() @@ -691,9 +710,6 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env) { struct usb_device *usb_dev; - /* driver is often null here; dev_dbg() would oops */ - pr_debug("usb %s: uevent\n", dev_name(dev)); - if (is_usb_device(dev)) { usb_dev = to_usb_device(dev); } else if (is_usb_interface(dev)) { @@ -705,6 +721,7 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env) } if (usb_dev->devnum < 0) { + /* driver is often null here; dev_dbg() would oops */ pr_debug("usb %s: already deleted?\n", dev_name(dev)); return -ENODEV; } @@ -983,7 +1000,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action) } } -/* Caller has locked udev's pm_mutex */ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) { struct usb_device_driver *udriver; @@ -1007,7 +1023,6 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg) return status; } -/* Caller has locked udev's pm_mutex */ static int usb_resume_device(struct usb_device *udev, pm_message_t msg) { struct usb_device_driver *udriver; @@ -1041,27 +1056,20 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg) return status; } -/* Caller has locked intf's usb_device's pm mutex */ static int usb_suspend_interface(struct usb_device *udev, struct usb_interface *intf, pm_message_t msg) { struct usb_driver *driver; int status = 0; - /* with no hardware, USB interfaces only use FREEZE and ON states */ - if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf)) - goto done; - - /* This can happen; see usb_driver_release_interface() */ - if (intf->condition == USB_INTERFACE_UNBOUND) + if (udev->state == USB_STATE_NOTATTACHED || + intf->condition == USB_INTERFACE_UNBOUND) goto done; driver = to_usb_driver(intf->dev.driver); if (driver->suspend) { status = driver->suspend(intf, msg); - if (status == 0) - mark_quiesced(intf); - else if (!(msg.event & PM_EVENT_AUTO)) + if (status && !(msg.event & PM_EVENT_AUTO)) dev_err(&intf->dev, "%s error %d\n", "suspend", status); } else { @@ -1069,7 +1077,6 @@ static int usb_suspend_interface(struct usb_device *udev, intf->needs_binding = 1; dev_warn(&intf->dev, "no %s for driver %s?\n", "suspend", driver->name); - mark_quiesced(intf); } done: @@ -1077,14 +1084,13 @@ static int usb_suspend_interface(struct usb_device *udev, return status; } -/* Caller has locked intf's usb_device's pm_mutex */ static int usb_resume_interface(struct usb_device *udev, struct usb_interface *intf, pm_message_t msg, int reset_resume) { struct usb_driver *driver; int status = 0; - if (udev->state == USB_STATE_NOTATTACHED || is_active(intf)) + if (udev->state == USB_STATE_NOTATTACHED) goto done; /* Don't let autoresume interfere with unbinding */ @@ -1135,90 +1141,11 @@ static int usb_resume_interface(struct usb_device *udev, done: dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status); - if (status == 0 && intf->condition == USB_INTERFACE_BOUND) - mark_active(intf); /* Later we will unbind the driver and/or reprobe, if necessary */ return status; } -#ifdef CONFIG_USB_SUSPEND - -/* Internal routine to check whether we may autosuspend a device. */ -static int autosuspend_check(struct usb_device *udev, int reschedule) -{ - int i; - struct usb_interface *intf; - unsigned long suspend_time, j; - - /* For autosuspend, fail fast if anything is in use or autosuspend - * is disabled. Also fail if any interfaces require remote wakeup - * but it isn't available. - */ - if (udev->pm_usage_cnt > 0) - return -EBUSY; - if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled) - return -EPERM; - - suspend_time = udev->last_busy + udev->autosuspend_delay; - if (udev->actconfig) { - for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { - intf = udev->actconfig->interface[i]; - if (!is_active(intf)) - continue; - if (atomic_read(&intf->pm_usage_cnt) > 0) - return -EBUSY; - if (intf->needs_remote_wakeup && - !udev->do_remote_wakeup) { - dev_dbg(&udev->dev, "remote wakeup needed " - "for autosuspend\n"); - return -EOPNOTSUPP; - } - - /* Don't allow autosuspend if the device will need - * a reset-resume and any of its interface drivers - * doesn't include support. - */ - if (udev->quirks & USB_QUIRK_RESET_RESUME) { - struct usb_driver *driver; - - driver = to_usb_driver(intf->dev.driver); - if (!driver->reset_resume || - intf->needs_remote_wakeup) - return -EOPNOTSUPP; - } - } - } - - /* If everything is okay but the device hasn't been idle for long - * enough, queue a delayed autosuspend request. If the device - * _has_ been idle for long enough and the reschedule flag is set, - * likewise queue a delayed (1 second) autosuspend request. - */ - j = jiffies; - if (time_before(j, suspend_time)) - reschedule = 1; - else - suspend_time = j + HZ; - if (reschedule) { - if (!timer_pending(&udev->autosuspend.timer)) { - queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, - round_jiffies_up_relative(suspend_time - j)); - } - return -EAGAIN; - } - return 0; -} - -#else - -static inline int autosuspend_check(struct usb_device *udev, int reschedule) -{ - return 0; -} - -#endif /* CONFIG_USB_SUSPEND */ - /** * usb_suspend_both - suspend a USB device and its interfaces * @udev: the usb_device to suspend @@ -1230,27 +1157,12 @@ static inline int autosuspend_check(struct usb_device *udev, int reschedule) * all the interfaces which were suspended are resumed so that they remain * in the same state as the device. * - * If an autosuspend is in progress the routine checks first to make sure - * that neither the device itself or any of its active interfaces is in use - * (pm_usage_cnt is greater than 0). If they are, the autosuspend fails. - * - * If the suspend succeeds, the routine recursively queues an autosuspend - * request for @udev's parent device, thereby propagating the change up - * the device tree. If all of the parent's children are now suspended, - * the parent will autosuspend in turn. - * - * The suspend method calls are subject to mutual exclusion under control - * of @udev's pm_mutex. Many of these calls are also under the protection - * of @udev's device lock (including all requests originating outside the - * USB subsystem), but autosuspend requests generated by a child device or - * interface driver may not be. Usbcore will insure that the method calls - * do not arrive during bind, unbind, or reset operations. However, drivers - * must be prepared to handle suspend calls arriving at unpredictable times. - * The only way to block such calls is to do an autoresume (preventing - * autosuspends) while holding @udev's device lock (preventing outside - * suspends). - * - * The caller must hold @udev->pm_mutex. + * Autosuspend requests originating from a child device or an interface + * driver may be made without the protection of @udev's device lock, but + * all other suspend calls will hold the lock. Usbcore will insure that + * method calls do not arrive during bind, unbind, or reset operations. + * However drivers must be prepared to handle suspend calls arriving at + * unpredictable times. * * This routine can run only in process context. */ @@ -1259,20 +1171,11 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) int status = 0; int i = 0; struct usb_interface *intf; - struct usb_device *parent = udev->parent; if (udev->state == USB_STATE_NOTATTACHED || udev->state == USB_STATE_SUSPENDED) goto done; - udev->do_remote_wakeup = device_may_wakeup(&udev->dev); - - if (msg.event & PM_EVENT_AUTO) { - status = autosuspend_check(udev, 0); - if (status < 0) - goto done; - } - /* Suspend all the interfaces and then udev itself */ if (udev->actconfig) { for (; i < udev->actconfig->desc.bNumInterfaces; i++) { @@ -1287,35 +1190,21 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) /* If the suspend failed, resume interfaces that did get suspended */ if (status != 0) { - pm_message_t msg2; - - msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME); + msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME); while (--i >= 0) { intf = udev->actconfig->interface[i]; - usb_resume_interface(udev, intf, msg2, 0); + usb_resume_interface(udev, intf, msg, 0); } - /* Try another autosuspend when the interfaces aren't busy */ - if (msg.event & PM_EVENT_AUTO) - autosuspend_check(udev, status == -EBUSY); - - /* If the suspend succeeded then prevent any more URB submissions, - * flush any outstanding URBs, and propagate the suspend up the tree. + /* If the suspend succeeded then prevent any more URB submissions + * and flush any outstanding URBs. */ } else { - cancel_delayed_work(&udev->autosuspend); udev->can_submit = 0; for (i = 0; i < 16; ++i) { usb_hcd_flush_endpoint(udev, udev->ep_out[i]); usb_hcd_flush_endpoint(udev, udev->ep_in[i]); } - - /* If this is just a FREEZE or a PRETHAW, udev might - * not really be suspended. Only true suspends get - * propagated up the device tree. - */ - if (parent && udev->state == USB_STATE_SUSPENDED) - usb_autosuspend_device(parent); } done: @@ -1332,23 +1221,12 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg) * the resume method for @udev and then calls the resume methods for all * the interface drivers in @udev. * - * Before starting the resume, the routine calls itself recursively for - * the parent device of @udev, thereby propagating the change up the device - * tree and assuring that @udev will be able to resume. If the parent is - * unable to resume successfully, the routine fails. - * - * The resume method calls are subject to mutual exclusion under control - * of @udev's pm_mutex. Many of these calls are also under the protection - * of @udev's device lock (including all requests originating outside the - * USB subsystem), but autoresume requests generated by a child device or - * interface driver may not be. Usbcore will insure that the method calls - * do not arrive during bind, unbind, or reset operations. However, drivers - * must be prepared to handle resume calls arriving at unpredictable times. - * The only way to block such calls is to do an autoresume (preventing - * other autoresumes) while holding @udev's device lock (preventing outside - * resumes). - * - * The caller must hold @udev->pm_mutex. + * Autoresume requests originating from a child device or an interface + * driver may be made without the protection of @udev's device lock, but + * all other resume calls will hold the lock. Usbcore will insure that + * method calls do not arrive during bind, unbind, or reset operations. + * However drivers must be prepared to handle resume calls arriving at + * unpredictable times. * * This routine can run only in process context. */ @@ -1357,48 +1235,18 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) int status = 0; int i; struct usb_interface *intf; - struct usb_device *parent = udev->parent; - cancel_delayed_work(&udev->autosuspend); if (udev->state == USB_STATE_NOTATTACHED) { status = -ENODEV; goto done; } udev->can_submit = 1; - /* Propagate the resume up the tree, if necessary */ - if (udev->state == USB_STATE_SUSPENDED) { - if (parent) { - status = usb_autoresume_device(parent); - if (status == 0) { - status = usb_resume_device(udev, msg); - if (status || udev->state == - USB_STATE_NOTATTACHED) { - usb_autosuspend_device(parent); - - /* It's possible usb_resume_device() - * failed after the port was - * unsuspended, causing udev to be - * logically disconnected. We don't - * want usb_disconnect() to autosuspend - * the parent again, so tell it that - * udev disconnected while still - * suspended. */ - if (udev->state == - USB_STATE_NOTATTACHED) - udev->discon_suspended = 1; - } - } - } else { - - /* We can't progagate beyond the USB subsystem, - * so if a root hub's controller is suspended - * then we're stuck. */ - status = usb_resume_device(udev, msg); - } - } else if (udev->reset_resume) + /* Resume the device */ + if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume) status = usb_resume_device(udev, msg); + /* Resume the interfaces */ if (status == 0 && udev->actconfig) { for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { intf = udev->actconfig->interface[i]; @@ -1414,55 +1262,94 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg) return status; } -#ifdef CONFIG_USB_SUSPEND +/* The device lock is held by the PM core */ +int usb_suspend(struct device *dev, pm_message_t msg) +{ + struct usb_device *udev = to_usb_device(dev); -/* Internal routine to adjust a device's usage counter and change - * its autosuspend state. - */ -static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt) + do_unbind_rebind(udev, DO_UNBIND); + udev->do_remote_wakeup = device_may_wakeup(&udev->dev); + return usb_suspend_both(udev, msg); +} + +/* The device lock is held by the PM core */ +int usb_resume(struct device *dev, pm_message_t msg) { - int status = 0; + struct usb_device *udev = to_usb_device(dev); + int status; - usb_pm_lock(udev); - udev->pm_usage_cnt += inc_usage_cnt; - WARN_ON(udev->pm_usage_cnt < 0); - if (inc_usage_cnt) - udev->last_busy = jiffies; - if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) { - if (udev->state == USB_STATE_SUSPENDED) - status = usb_resume_both(udev, PMSG_AUTO_RESUME); - if (status != 0) - udev->pm_usage_cnt -= inc_usage_cnt; - else if (inc_usage_cnt) + /* For PM complete calls, all we do is rebind interfaces */ + if (msg.event == PM_EVENT_ON) { + if (udev->state != USB_STATE_NOTATTACHED) + do_unbind_rebind(udev, DO_REBIND); + status = 0; + + /* For all other calls, take the device back to full power and + * tell the PM core in case it was autosuspended previously. + */ + } else { + status = usb_resume_both(udev, msg); + if (status == 0) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); udev->last_busy = jiffies; - } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) { - status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND); + } } - usb_pm_unlock(udev); + + /* Avoid PM error messages for devices disconnected while suspended + * as we'll display regular disconnect messages just a bit later. + */ + if (status == -ENODEV) + status = 0; return status; } -/* usb_autosuspend_work - callback routine to autosuspend a USB device */ -void usb_autosuspend_work(struct work_struct *work) -{ - struct usb_device *udev = - container_of(work, struct usb_device, autosuspend.work); +#endif /* CONFIG_PM */ + +#ifdef CONFIG_USB_SUSPEND - usb_autopm_do_device(udev, 0); +/** + * usb_enable_autosuspend - allow a USB device to be autosuspended + * @udev: the USB device which may be autosuspended + * + * This routine allows @udev to be autosuspended. An autosuspend won't + * take place until the autosuspend_delay has elapsed and all the other + * necessary conditions are satisfied. + * + * The caller must hold @udev's device lock. + */ +int usb_enable_autosuspend(struct usb_device *udev) +{ + if (udev->autosuspend_disabled) { + udev->autosuspend_disabled = 0; + usb_autosuspend_device(udev); + } + return 0; } +EXPORT_SYMBOL_GPL(usb_enable_autosuspend); -/* usb_autoresume_work - callback routine to autoresume a USB device */ -void usb_autoresume_work(struct work_struct *work) +/** + * usb_disable_autosuspend - prevent a USB device from being autosuspended + * @udev: the USB device which may not be autosuspended + * + * This routine prevents @udev from being autosuspended and wakes it up + * if it is already autosuspended. + * + * The caller must hold @udev's device lock. + */ +int usb_disable_autosuspend(struct usb_device *udev) { - struct usb_device *udev = - container_of(work, struct usb_device, autoresume); + int rc = 0; - /* Wake it up, let the drivers do their thing, and then put it - * back to sleep. - */ - if (usb_autopm_do_device(udev, 1) == 0) - usb_autopm_do_device(udev, -1); + if (!udev->autosuspend_disabled) { + rc = usb_autoresume_device(udev); + if (rc == 0) + udev->autosuspend_disabled = 1; + } + return rc; } +EXPORT_SYMBOL_GPL(usb_disable_autosuspend); /** * usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces @@ -1472,15 +1359,11 @@ void usb_autoresume_work(struct work_struct *work) * @udev and wants to allow it to autosuspend. Examples would be when * @udev's device file in usbfs is closed or after a configuration change. * - * @udev's usage counter is decremented. If it or any of the usage counters - * for an active interface is greater than 0, no autosuspend request will be - * queued. (If an interface driver does not support autosuspend then its - * usage counter is permanently positive.) Furthermore, if an interface - * driver requires remote-wakeup capability during autosuspend but remote - * wakeup is disabled, the autosuspend will fail. + * @udev's usage counter is decremented; if it drops to 0 and all the + * interfaces are inactive then a delayed autosuspend will be attempted. + * The attempt may fail (see autosuspend_check()). * - * Often the caller will hold @udev's device lock, but this is not - * necessary. + * The caller must hold @udev's device lock. * * This routine can run only in process context. */ @@ -1488,9 +1371,11 @@ void usb_autosuspend_device(struct usb_device *udev) { int status; - status = usb_autopm_do_device(udev, -1); - dev_vdbg(&udev->dev, "%s: cnt %d\n", - __func__, udev->pm_usage_cnt); + udev->last_busy = jiffies; + status = pm_runtime_put_sync(&udev->dev); + dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n", + __func__, atomic_read(&udev->dev.power.usage_count), + status); } /** @@ -1500,17 +1385,22 @@ void usb_autosuspend_device(struct usb_device *udev) * This routine should be called when a core subsystem thinks @udev may * be ready to autosuspend. * - * @udev's usage counter left unchanged. If it or any of the usage counters - * for an active interface is greater than 0, or autosuspend is not allowed - * for any other reason, no autosuspend request will be queued. + * @udev's usage counter left unchanged. If it is 0 and all the interfaces + * are inactive then an autosuspend will be attempted. The attempt may + * fail or be delayed. + * + * The caller must hold @udev's device lock. * * This routine can run only in process context. */ void usb_try_autosuspend_device(struct usb_device *udev) { - usb_autopm_do_device(udev, 0); - dev_vdbg(&udev->dev, "%s: cnt %d\n", - __func__, udev->pm_usage_cnt); + int status; + + status = pm_runtime_idle(&udev->dev); + dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n", + __func__, atomic_read(&udev->dev.power.usage_count), + status); } /** @@ -1519,16 +1409,15 @@ void usb_try_autosuspend_device(struct usb_device *udev) * * This routine should be called when a core subsystem wants to use @udev * and needs to guarantee that it is not suspended. No autosuspend will - * occur until usb_autosuspend_device is called. (Note that this will not - * prevent suspend events originating in the PM core.) Examples would be - * when @udev's device file in usbfs is opened or when a remote-wakeup + * occur until usb_autosuspend_device() is called. (Note that this will + * not prevent suspend events originating in the PM core.) Examples would + * be when @udev's device file in usbfs is opened or when a remote-wakeup * request is received. * * @udev's usage counter is incremented to prevent subsequent autosuspends. * However if the autoresume fails then the usage counter is re-decremented. * - * Often the caller will hold @udev's device lock, but this is not - * necessary (and attempting it might cause deadlock). + * The caller must hold @udev's device lock. * * This routine can run only in process context. */ @@ -1536,42 +1425,14 @@ int usb_autoresume_device(struct usb_device *udev) { int status; - status = usb_autopm_do_device(udev, 1); - dev_vdbg(&udev->dev, "%s: status %d cnt %d\n", - __func__, status, udev->pm_usage_cnt); - return status; -} - -/* Internal routine to adjust an interface's usage counter and change - * its device's autosuspend state. - */ -static int usb_autopm_do_interface(struct usb_interface *intf, - int inc_usage_cnt) -{ - struct usb_device *udev = interface_to_usbdev(intf); - int status = 0; - - usb_pm_lock(udev); - if (intf->condition == USB_INTERFACE_UNBOUND) - status = -ENODEV; - else { - atomic_add(inc_usage_cnt, &intf->pm_usage_cnt); - udev->last_busy = jiffies; - if (inc_usage_cnt >= 0 && - atomic_read(&intf->pm_usage_cnt) > 0) { - if (udev->state == USB_STATE_SUSPENDED) - status = usb_resume_both(udev, - PMSG_AUTO_RESUME); - if (status != 0) - atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt); - else - udev->last_busy = jiffies; - } else if (inc_usage_cnt <= 0 && - atomic_read(&intf->pm_usage_cnt) <= 0) { - status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND); - } - } - usb_pm_unlock(udev); + status = pm_runtime_get_sync(&udev->dev); + if (status < 0) + pm_runtime_put_sync(&udev->dev); + dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n", + __func__, atomic_read(&udev->dev.power.usage_count), + status); + if (status > 0) + status = 0; return status; } @@ -1585,34 +1446,25 @@ static int usb_autopm_do_interface(struct usb_interface *intf, * closed. * * The routine decrements @intf's usage counter. When the counter reaches - * 0, a delayed autosuspend request for @intf's device is queued. When - * the delay expires, if @intf->pm_usage_cnt is still <= 0 along with all - * the other usage counters for the sibling interfaces and @intf's - * usb_device, the device and all its interfaces will be autosuspended. - * - * Note that @intf->pm_usage_cnt is owned by the interface driver. The - * core will not change its value other than the increment and decrement - * in usb_autopm_get_interface and usb_autopm_put_interface. The driver - * may use this simple counter-oriented discipline or may set the value - * any way it likes. + * 0, a delayed autosuspend request for @intf's device is attempted. The + * attempt may fail (see autosuspend_check()). * * If the driver has set @intf->needs_remote_wakeup then autosuspend will * take place only if the device's remote-wakeup facility is enabled. * - * Suspend method calls queued by this routine can arrive at any time - * while @intf is resumed and its usage counter is equal to 0. They are - * not protected by the usb_device's lock but only by its pm_mutex. - * Drivers must provide their own synchronization. - * * This routine can run only in process context. */ void usb_autopm_put_interface(struct usb_interface *intf) { - int status; + struct usb_device *udev = interface_to_usbdev(intf); + int status; - status = usb_autopm_do_interface(intf, -1); - dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, atomic_read(&intf->pm_usage_cnt)); + udev->last_busy = jiffies; + atomic_dec(&intf->pm_usage_cnt); + status = pm_runtime_put_sync(&intf->dev); + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", + __func__, atomic_read(&intf->dev.power.usage_count), + status); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface); @@ -1620,11 +1472,11 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface); * usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be decremented * - * This routine does essentially the same thing as - * usb_autopm_put_interface(): it decrements @intf's usage counter and - * queues a delayed autosuspend request if the counter is <= 0. The - * difference is that it does not acquire the device's pm_mutex; - * callers must handle all synchronization issues themselves. + * This routine does much the same thing as usb_autopm_put_interface(): + * It decrements @intf's usage counter and schedules a delayed + * autosuspend request if the counter is <= 0. The difference is that it + * does not perform any synchronization; callers should hold a private + * lock and handle all synchronization issues themselves. * * Typically a driver would call this routine during an URB's completion * handler, if no more URBs were pending. @@ -1634,28 +1486,58 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface); void usb_autopm_put_interface_async(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); + unsigned long last_busy; int status = 0; - if (intf->condition == USB_INTERFACE_UNBOUND) { - status = -ENODEV; - } else { - udev->last_busy = jiffies; - atomic_dec(&intf->pm_usage_cnt); - if (udev->autosuspend_disabled || udev->autosuspend_delay < 0) - status = -EPERM; - else if (atomic_read(&intf->pm_usage_cnt) <= 0 && - !timer_pending(&udev->autosuspend.timer)) { - queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend, + last_busy = udev->last_busy; + udev->last_busy = jiffies; + atomic_dec(&intf->pm_usage_cnt); + pm_runtime_put_noidle(&intf->dev); + + if (!udev->autosuspend_disabled) { + /* Optimization: Don't schedule a delayed autosuspend if + * the timer is already running and the expiration time + * wouldn't change. + * + * We have to use the interface's timer. Attempts to + * schedule a suspend for the device would fail because + * the interface is still active. + */ + if (intf->dev.power.timer_expires == 0 || + round_jiffies_up(last_busy) != + round_jiffies_up(jiffies)) { + status = pm_schedule_suspend(&intf->dev, + jiffies_to_msecs( round_jiffies_up_relative( - udev->autosuspend_delay)); + udev->autosuspend_delay))); } } - dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, atomic_read(&intf->pm_usage_cnt)); + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", + __func__, atomic_read(&intf->dev.power.usage_count), + status); } EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async); /** + * usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter + * @intf: the usb_interface whose counter should be decremented + * + * This routine decrements @intf's usage counter but does not carry out an + * autosuspend. + * + * This routine can run in atomic context. + */ +void usb_autopm_put_interface_no_suspend(struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + + udev->last_busy = jiffies; + atomic_dec(&intf->pm_usage_cnt); + pm_runtime_put_noidle(&intf->dev); +} +EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend); + +/** * usb_autopm_get_interface - increment a USB interface's PM-usage counter * @intf: the usb_interface whose counter should be incremented * @@ -1667,25 +1549,8 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async); * or @intf is unbound. A typical example would be a character-device * driver when its device file is opened. * - * - * The routine increments @intf's usage counter. (However if the - * autoresume fails then the counter is re-decremented.) So long as the - * counter is greater than 0, autosuspend will not be allowed for @intf - * or its usb_device. When the driver is finished using @intf it should - * call usb_autopm_put_interface() to decrement the usage counter and - * queue a delayed autosuspend request (if the counter is <= 0). - * - * - * Note that @intf->pm_usage_cnt is owned by the interface driver. The - * core will not change its value other than the increment and decrement - * in usb_autopm_get_interface and usb_autopm_put_interface. The driver - * may use this simple counter-oriented discipline or may set the value - * any way it likes. - * - * Resume method calls generated by this routine can arrive at any time - * while @intf is suspended. They are not protected by the usb_device's - * lock but only by its pm_mutex. Drivers must provide their own - * synchronization. + * @intf's usage counter is incremented to prevent subsequent autosuspends. + * However if the autoresume fails then the counter is re-decremented. * * This routine can run only in process context. */ @@ -1693,9 +1558,16 @@ int usb_autopm_get_interface(struct usb_interface *intf) { int status; - status = usb_autopm_do_interface(intf, 1); - dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, atomic_read(&intf->pm_usage_cnt)); + status = pm_runtime_get_sync(&intf->dev); + if (status < 0) + pm_runtime_put_sync(&intf->dev); + else + atomic_inc(&intf->pm_usage_cnt); + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", + __func__, atomic_read(&intf->dev.power.usage_count), + status); + if (status > 0) + status = 0; return status; } EXPORT_SYMBOL_GPL(usb_autopm_get_interface); @@ -1705,149 +1577,207 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface); * @intf: the usb_interface whose counter should be incremented * * This routine does much the same thing as - * usb_autopm_get_interface(): it increments @intf's usage counter and - * queues an autoresume request if the result is > 0. The differences - * are that it does not acquire the device's pm_mutex (callers must - * handle all synchronization issues themselves), and it does not - * autoresume the device directly (it only queues a request). After a - * successful call, the device will generally not yet be resumed. + * usb_autopm_get_interface(): It increments @intf's usage counter and + * queues an autoresume request if the device is suspended. The + * differences are that it does not perform any synchronization (callers + * should hold a private lock and handle all synchronization issues + * themselves), and it does not autoresume the device directly (it only + * queues a request). After a successful call, the device may not yet be + * resumed. * * This routine can run in atomic context. */ int usb_autopm_get_interface_async(struct usb_interface *intf) { - struct usb_device *udev = interface_to_usbdev(intf); - int status = 0; + int status = 0; + enum rpm_status s; - if (intf->condition == USB_INTERFACE_UNBOUND) - status = -ENODEV; - else { + /* Don't request a resume unless the interface is already suspending + * or suspended. Doing so would force a running suspend timer to be + * cancelled. + */ + pm_runtime_get_noresume(&intf->dev); + s = ACCESS_ONCE(intf->dev.power.runtime_status); + if (s == RPM_SUSPENDING || s == RPM_SUSPENDED) + status = pm_request_resume(&intf->dev); + + if (status < 0 && status != -EINPROGRESS) + pm_runtime_put_noidle(&intf->dev); + else atomic_inc(&intf->pm_usage_cnt); - if (atomic_read(&intf->pm_usage_cnt) > 0 && - udev->state == USB_STATE_SUSPENDED) - queue_work(ksuspend_usb_wq, &udev->autoresume); - } - dev_vdbg(&intf->dev, "%s: status %d cnt %d\n", - __func__, status, atomic_read(&intf->pm_usage_cnt)); + dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n", + __func__, atomic_read(&intf->dev.power.usage_count), + status); + if (status > 0) + status = 0; return status; } EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async); -#else - -void usb_autosuspend_work(struct work_struct *work) -{} - -void usb_autoresume_work(struct work_struct *work) -{} - -#endif /* CONFIG_USB_SUSPEND */ - /** - * usb_external_suspend_device - external suspend of a USB device and its interfaces - * @udev: the usb_device to suspend - * @msg: Power Management message describing this state transition + * usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter + * @intf: the usb_interface whose counter should be incremented * - * This routine handles external suspend requests: ones not generated - * internally by a USB driver (autosuspend) but rather coming from the user - * (via sysfs) or the PM core (system sleep). The suspend will be carried - * out regardless of @udev's usage counter or those of its interfaces, - * and regardless of whether or not remote wakeup is enabled. Of course, - * interface drivers still have the option of failing the suspend (if - * there are unsuspended children, for example). + * This routine increments @intf's usage counter but does not carry out an + * autoresume. * - * The caller must hold @udev's device lock. + * This routine can run in atomic context. */ -int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg) +void usb_autopm_get_interface_no_resume(struct usb_interface *intf) { - int status; + struct usb_device *udev = interface_to_usbdev(intf); - do_unbind_rebind(udev, DO_UNBIND); - usb_pm_lock(udev); - status = usb_suspend_both(udev, msg); - usb_pm_unlock(udev); - return status; + udev->last_busy = jiffies; + atomic_inc(&intf->pm_usage_cnt); + pm_runtime_get_noresume(&intf->dev); } +EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume); -/** - * usb_external_resume_device - external resume of a USB device and its interfaces - * @udev: the usb_device to resume - * @msg: Power Management message describing this state transition - * - * This routine handles external resume requests: ones not generated - * internally by a USB driver (autoresume) but rather coming from the user - * (via sysfs), the PM core (system resume), or the device itself (remote - * wakeup). @udev's usage counter is unaffected. - * - * The caller must hold @udev's device lock. - */ -int usb_external_resume_device(struct usb_device *udev, pm_message_t msg) +/* Internal routine to check whether we may autosuspend a device. */ +static int autosuspend_check(struct usb_device *udev) { - int status; + int i; + struct usb_interface *intf; + unsigned long suspend_time, j; - usb_pm_lock(udev); - status = usb_resume_both(udev, msg); - udev->last_busy = jiffies; - usb_pm_unlock(udev); - if (status == 0) - do_unbind_rebind(udev, DO_REBIND); + /* Fail if autosuspend is disabled, or any interfaces are in use, or + * any interface drivers require remote wakeup but it isn't available. + */ + udev->do_remote_wakeup = device_may_wakeup(&udev->dev); + if (udev->actconfig) { + for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { + intf = udev->actconfig->interface[i]; - /* Now that the device is awake, we can start trying to autosuspend - * it again. */ - if (status == 0) - usb_try_autosuspend_device(udev); - return status; + /* We don't need to check interfaces that are + * disabled for runtime PM. Either they are unbound + * or else their drivers don't support autosuspend + * and so they are permanently active. + */ + if (intf->dev.power.disable_depth) + continue; + if (atomic_read(&intf->dev.power.usage_count) > 0) + return -EBUSY; + if (intf->needs_remote_wakeup && + !udev->do_remote_wakeup) { + dev_dbg(&udev->dev, "remote wakeup needed " + "for autosuspend\n"); + return -EOPNOTSUPP; + } + + /* Don't allow autosuspend if the device will need + * a reset-resume and any of its interface drivers + * doesn't include support or needs remote wakeup. + */ + if (udev->quirks & USB_QUIRK_RESET_RESUME) { + struct usb_driver *driver; + + driver = to_usb_driver(intf->dev.driver); + if (!driver->reset_resume || + intf->needs_remote_wakeup) + return -EOPNOTSUPP; + } + } + } + + /* If everything is okay but the device hasn't been idle for long + * enough, queue a delayed autosuspend request. + */ + j = ACCESS_ONCE(jiffies); + suspend_time = udev->last_busy + udev->autosuspend_delay; + if (time_before(j, suspend_time)) { + pm_schedule_suspend(&udev->dev, jiffies_to_msecs( + round_jiffies_up_relative(suspend_time - j))); + return -EAGAIN; + } + return 0; } -int usb_suspend(struct device *dev, pm_message_t msg) +static int usb_runtime_suspend(struct device *dev) { - struct usb_device *udev; - - udev = to_usb_device(dev); + int status = 0; - /* If udev is already suspended, we can skip this suspend and - * we should also skip the upcoming system resume. High-speed - * root hubs are an exception; they need to resume whenever the - * system wakes up in order for USB-PERSIST port handover to work - * properly. + /* A USB device can be suspended if it passes the various autosuspend + * checks. Runtime suspend for a USB device means suspending all the + * interfaces and then the device itself. */ - if (udev->state == USB_STATE_SUSPENDED) { - if (udev->parent || udev->speed != USB_SPEED_HIGH) - udev->skip_sys_resume = 1; - return 0; + if (is_usb_device(dev)) { + struct usb_device *udev = to_usb_device(dev); + + if (autosuspend_check(udev) != 0) + return -EAGAIN; + + status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND); + + /* If an interface fails the suspend, adjust the last_busy + * time so that we don't get another suspend attempt right + * away. + */ + if (status) { + udev->last_busy = jiffies + + (udev->autosuspend_delay == 0 ? + HZ/2 : 0); + } + + /* Prevent the parent from suspending immediately after */ + else if (udev->parent) { + udev->parent->last_busy = jiffies; + } } - udev->skip_sys_resume = 0; - return usb_external_suspend_device(udev, msg); + /* Runtime suspend for a USB interface doesn't mean anything. */ + return status; } -int usb_resume(struct device *dev, pm_message_t msg) +static int usb_runtime_resume(struct device *dev) { - struct usb_device *udev; - int status; + /* Runtime resume for a USB device means resuming both the device + * and all its interfaces. + */ + if (is_usb_device(dev)) { + struct usb_device *udev = to_usb_device(dev); + int status; - udev = to_usb_device(dev); + status = usb_resume_both(udev, PMSG_AUTO_RESUME); + udev->last_busy = jiffies; + return status; + } - /* If udev->skip_sys_resume is set then udev was already suspended - * when the system sleep started, so we don't want to resume it - * during this system wakeup. - */ - if (udev->skip_sys_resume) - return 0; - status = usb_external_resume_device(udev, msg); + /* Runtime resume for a USB interface doesn't mean anything. */ + return 0; +} - /* Avoid PM error messages for devices disconnected while suspended - * as we'll display regular disconnect messages just a bit later. +static int usb_runtime_idle(struct device *dev) +{ + /* An idle USB device can be suspended if it passes the various + * autosuspend checks. An idle interface can be suspended at + * any time. */ - if (status == -ENODEV) - return 0; - return status; + if (is_usb_device(dev)) { + struct usb_device *udev = to_usb_device(dev); + + if (autosuspend_check(udev) != 0) + return 0; + } + + pm_runtime_suspend(dev); + return 0; } -#endif /* CONFIG_PM */ +static struct dev_pm_ops usb_bus_pm_ops = { + .runtime_suspend = usb_runtime_suspend, + .runtime_resume = usb_runtime_resume, + .runtime_idle = usb_runtime_idle, +}; + +#else + +#define usb_bus_pm_ops (*(struct dev_pm_ops *) NULL) + +#endif /* CONFIG_USB_SUSPEND */ struct bus_type usb_bus_type = { .name = "usb", .match = usb_device_match, .uevent = usb_uevent, + .pm = &usb_bus_pm_ops, }; diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c index bfc6c2eea64..c3536f151f0 100644 --- a/drivers/usb/core/file.c +++ b/drivers/usb/core/file.c @@ -34,7 +34,6 @@ static int usb_open(struct inode * inode, struct file * file) int err = -ENODEV; const struct file_operations *old_fops, *new_fops = NULL; - lock_kernel(); down_read(&minor_rwsem); c = usb_minors[minor]; @@ -53,7 +52,6 @@ static int usb_open(struct inode * inode, struct file * file) fops_put(old_fops); done: up_read(&minor_rwsem); - unlock_kernel(); return err; } diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 80995ef0868..2f8cedda800 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -39,6 +39,7 @@ #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <linux/pm_runtime.h> #include <linux/usb.h> @@ -141,7 +142,7 @@ static const u8 usb3_rh_dev_descriptor[18] = { 0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */ 0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */ - 0x02, 0x00, /* __le16 idProduct; device 0x0002 */ + 0x03, 0x00, /* __le16 idProduct; device 0x0003 */ KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */ 0x03, /* __u8 iManufacturer; */ @@ -1670,11 +1671,16 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev, } } for (i = 0; i < num_intfs; ++i) { + struct usb_host_interface *first_alt; + int iface_num; + + first_alt = &new_config->intf_cache[i]->altsetting[0]; + iface_num = first_alt->desc.bInterfaceNumber; /* Set up endpoints for alternate interface setting 0 */ - alt = usb_find_alt_setting(new_config, i, 0); + alt = usb_find_alt_setting(new_config, iface_num, 0); if (!alt) /* No alt setting 0? Pick the first setting. */ - alt = &new_config->intf_cache[i]->altsetting[0]; + alt = first_alt; for (j = 0; j < alt->desc.bNumEndpoints; j++) { ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]); @@ -1853,6 +1859,10 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg) return status; } +#endif /* CONFIG_PM */ + +#ifdef CONFIG_USB_SUSPEND + /* Workqueue routine for root-hub remote wakeup */ static void hcd_resume_work(struct work_struct *work) { @@ -1860,8 +1870,7 @@ static void hcd_resume_work(struct work_struct *work) struct usb_device *udev = hcd->self.root_hub; usb_lock_device(udev); - usb_mark_last_busy(udev); - usb_external_resume_device(udev, PMSG_REMOTE_RESUME); + usb_remote_wakeup(udev); usb_unlock_device(udev); } @@ -1880,12 +1889,12 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd) spin_lock_irqsave (&hcd_root_hub_lock, flags); if (hcd->rh_registered) - queue_work(ksuspend_usb_wq, &hcd->wakeup_work); + queue_work(pm_wq, &hcd->wakeup_work); spin_unlock_irqrestore (&hcd_root_hub_lock, flags); } EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub); -#endif +#endif /* CONFIG_USB_SUSPEND */ /*-------------------------------------------------------------------------*/ @@ -2030,7 +2039,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver, init_timer(&hcd->rh_timer); hcd->rh_timer.function = rh_timer_func; hcd->rh_timer.data = (unsigned long) hcd; -#ifdef CONFIG_PM +#ifdef CONFIG_USB_SUSPEND INIT_WORK(&hcd->wakeup_work, hcd_resume_work); #endif mutex_init(&hcd->bandwidth_mutex); @@ -2230,7 +2239,7 @@ void usb_remove_hcd(struct usb_hcd *hcd) hcd->rh_registered = 0; spin_unlock_irq (&hcd_root_hub_lock); -#ifdef CONFIG_PM +#ifdef CONFIG_USB_SUSPEND cancel_work_sync(&hcd->wakeup_work); #endif diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h index bbe2b924aae..a3cdb09734a 100644 --- a/drivers/usb/core/hcd.h +++ b/drivers/usb/core/hcd.h @@ -80,7 +80,7 @@ struct usb_hcd { struct timer_list rh_timer; /* drives root-hub polling */ struct urb *status_urb; /* the current status urb */ -#ifdef CONFIG_PM +#ifdef CONFIG_USB_SUSPEND struct work_struct wakeup_work; /* for remote wakeup */ #endif @@ -248,7 +248,7 @@ struct hc_driver { /* xHCI specific functions */ /* Called by usb_alloc_dev to alloc HC device structures */ int (*alloc_dev)(struct usb_hcd *, struct usb_device *); - /* Called by usb_release_dev to free HC device structures */ + /* Called by usb_disconnect to free HC device structures */ void (*free_dev)(struct usb_hcd *, struct usb_device *); /* Bandwidth computation functions */ @@ -286,6 +286,7 @@ struct hc_driver { */ int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, struct usb_tt *tt, gfp_t mem_flags); + int (*reset_device)(struct usb_hcd *, struct usb_device *); }; extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); @@ -463,16 +464,20 @@ extern int usb_find_interface_driver(struct usb_device *dev, #define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN)) #ifdef CONFIG_PM -extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); extern void usb_root_hub_lost_power(struct usb_device *rhdev); extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); +#endif /* CONFIG_PM */ + +#ifdef CONFIG_USB_SUSPEND +extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); #else static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) { return; } -#endif /* CONFIG_PM */ +#endif /* CONFIG_USB_SUSPEND */ + /* * USB device fs stuff diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 20ecb4cec8d..0940ccd6f4f 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -22,6 +22,7 @@ #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/freezer.h> +#include <linux/pm_runtime.h> #include <asm/uaccess.h> #include <asm/byteorder.h> @@ -71,7 +72,6 @@ struct usb_hub { unsigned mA_per_port; /* current for each child */ - unsigned init_done:1; unsigned limited_power:1; unsigned quiescing:1; unsigned disconnected:1; @@ -820,7 +820,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) } init3: hub->quiescing = 0; - hub->init_done = 1; status = usb_submit_urb(hub->urb, GFP_NOIO); if (status < 0) @@ -861,11 +860,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) int i; cancel_delayed_work_sync(&hub->init_work); - if (!hub->init_done) { - hub->init_done = 1; - usb_autopm_put_interface_no_suspend( - to_usb_interface(hub->intfdev)); - } /* khubd and related activity won't re-trigger */ hub->quiescing = 1; @@ -1224,6 +1218,9 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) desc = intf->cur_altsetting; hdev = interface_to_usbdev(intf); + /* Hubs have proper suspend/resume support */ + usb_enable_autosuspend(hdev); + if (hdev->level == MAX_TOPO_LEVEL) { dev_err(&intf->dev, "Unsupported bus topology: hub nested too deep\n"); @@ -1402,10 +1399,8 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev) if (udev->children[i]) recursively_mark_NOTATTACHED(udev->children[i]); } - if (udev->state == USB_STATE_SUSPENDED) { - udev->discon_suspended = 1; + if (udev->state == USB_STATE_SUSPENDED) udev->active_duration -= jiffies; - } udev->state = USB_STATE_NOTATTACHED; } @@ -1448,11 +1443,11 @@ void usb_set_device_state(struct usb_device *udev, || new_state == USB_STATE_SUSPENDED) ; /* No change to wakeup settings */ else if (new_state == USB_STATE_CONFIGURED) - device_init_wakeup(&udev->dev, + device_set_wakeup_capable(&udev->dev, (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP)); else - device_init_wakeup(&udev->dev, 0); + device_set_wakeup_capable(&udev->dev, 0); } if (udev->state == USB_STATE_SUSPENDED && new_state != USB_STATE_SUSPENDED) @@ -1529,31 +1524,15 @@ static void update_address(struct usb_device *udev, int devnum) udev->devnum = devnum; } -#ifdef CONFIG_USB_SUSPEND - -static void usb_stop_pm(struct usb_device *udev) +static void hub_free_dev(struct usb_device *udev) { - /* Synchronize with the ksuspend thread to prevent any more - * autosuspend requests from being submitted, and decrement - * the parent's count of unsuspended children. - */ - usb_pm_lock(udev); - if (udev->parent && !udev->discon_suspended) - usb_autosuspend_device(udev->parent); - usb_pm_unlock(udev); + struct usb_hcd *hcd = bus_to_hcd(udev->bus); - /* Stop any autosuspend or autoresume requests already submitted */ - cancel_delayed_work_sync(&udev->autosuspend); - cancel_work_sync(&udev->autoresume); + /* Root hubs aren't real devices, so don't free HCD resources */ + if (hcd->driver->free_dev && udev->parent) + hcd->driver->free_dev(hcd, udev); } -#else - -static inline void usb_stop_pm(struct usb_device *udev) -{ } - -#endif - /** * usb_disconnect - disconnect a device (usbcore-internal) * @pdev: pointer to device being disconnected @@ -1622,7 +1601,7 @@ void usb_disconnect(struct usb_device **pdev) *pdev = NULL; spin_unlock_irq(&device_state_lock); - usb_stop_pm(udev); + hub_free_dev(udev); put_device(&udev->dev); } @@ -1799,9 +1778,18 @@ int usb_new_device(struct usb_device *udev) { int err; - /* Increment the parent's count of unsuspended children */ - if (udev->parent) - usb_autoresume_device(udev->parent); + if (udev->parent) { + /* Initialize non-root-hub device wakeup to disabled; + * device (un)configuration controls wakeup capable + * sysfs power/wakeup controls wakeup enabled/disabled + */ + device_init_wakeup(&udev->dev, 0); + device_set_wakeup_enable(&udev->dev, 1); + } + + /* Tell the runtime-PM framework the device is active */ + pm_runtime_set_active(&udev->dev); + pm_runtime_enable(&udev->dev); usb_detect_quirks(udev); err = usb_enumerate_device(udev); /* Read descriptors */ @@ -1833,7 +1821,8 @@ int usb_new_device(struct usb_device *udev) fail: usb_set_device_state(udev, USB_STATE_NOTATTACHED); - usb_stop_pm(udev); + pm_runtime_disable(&udev->dev); + pm_runtime_set_suspended(&udev->dev); return err; } @@ -1982,7 +1971,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, if (!(portstatus & USB_PORT_STAT_RESET) && (portstatus & USB_PORT_STAT_ENABLE)) { if (hub_is_wusb(hub)) - udev->speed = USB_SPEED_VARIABLE; + udev->speed = USB_SPEED_WIRELESS; else if (portstatus & USB_PORT_STAT_HIGH_SPEED) udev->speed = USB_SPEED_HIGH; else if (portstatus & USB_PORT_STAT_LOW_SPEED) @@ -2008,7 +1997,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1, struct usb_device *udev, unsigned int delay) { int i, status; + struct usb_hcd *hcd; + hcd = bus_to_hcd(udev->bus); /* Block EHCI CF initialization during the port reset. * Some companion controllers don't like it when they mix. */ @@ -2036,6 +2027,14 @@ static int hub_port_reset(struct usb_hub *hub, int port1, /* TRSTRCY = 10 ms; plus some extra */ msleep(10 + 40); update_address(udev, 0); + if (hcd->driver->reset_device) { + status = hcd->driver->reset_device(hcd, udev); + if (status < 0) { + dev_err(&udev->dev, "Cannot reset " + "HCD device state\n"); + break; + } + } /* FALL THROUGH */ case -ENOTCONN: case -ENODEV: @@ -2381,14 +2380,17 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) } /* caller has locked udev */ -static int remote_wakeup(struct usb_device *udev) +int usb_remote_wakeup(struct usb_device *udev) { int status = 0; if (udev->state == USB_STATE_SUSPENDED) { dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-"); - usb_mark_last_busy(udev); - status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME); + status = usb_autoresume_device(udev); + if (status == 0) { + /* Let the drivers do their thing, then... */ + usb_autosuspend_device(udev); + } } return status; } @@ -2425,11 +2427,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) return status; } -static inline int remote_wakeup(struct usb_device *udev) -{ - return 0; -} - #endif static int hub_suspend(struct usb_interface *intf, pm_message_t msg) @@ -2496,11 +2493,6 @@ EXPORT_SYMBOL_GPL(usb_root_hub_lost_power); #else /* CONFIG_PM */ -static inline int remote_wakeup(struct usb_device *udev) -{ - return 0; -} - #define hub_suspend NULL #define hub_resume NULL #define hub_reset_resume NULL @@ -2645,14 +2637,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, mutex_lock(&usb_address0_mutex); - if ((hcd->driver->flags & HCD_USB3) && udev->config) { - /* FIXME this will need special handling by the xHCI driver. */ - dev_dbg(&udev->dev, - "xHCI reset of configured device " - "not supported yet.\n"); - retval = -EINVAL; - goto fail; - } else if (!udev->config && oldspeed == USB_SPEED_SUPER) { + if (!udev->config && oldspeed == USB_SPEED_SUPER) { /* Don't reset USB 3.0 devices during an initial setup */ usb_set_device_state(udev, USB_STATE_DEFAULT); } else { @@ -2678,7 +2663,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, */ switch (udev->speed) { case USB_SPEED_SUPER: - case USB_SPEED_VARIABLE: /* fixed at 512 */ + case USB_SPEED_WIRELESS: /* fixed at 512 */ udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512); break; case USB_SPEED_HIGH: /* fixed at 64 */ @@ -2706,7 +2691,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, case USB_SPEED_SUPER: speed = "super"; break; - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: speed = "variable"; type = "Wireless "; break; @@ -3006,7 +2991,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1, /* For a suspended device, treat this as a * remote wakeup event. */ - status = remote_wakeup(udev); + status = usb_remote_wakeup(udev); #endif } else { @@ -3192,6 +3177,7 @@ loop_disable: loop: usb_ep0_reinit(udev); release_address(udev); + hub_free_dev(udev); usb_put_dev(udev); if ((status == -ENOTCONN) || (status == -ENOTSUPP)) break; @@ -3259,7 +3245,7 @@ static void hub_events(void) * disconnected while waiting for the lock to succeed. */ usb_lock_device(hdev); if (unlikely(hub->disconnected)) - goto loop2; + goto loop_disconnected; /* If the hub has died, clean up after it */ if (hdev->state == USB_STATE_NOTATTACHED) { @@ -3352,7 +3338,7 @@ static void hub_events(void) msleep(10); usb_lock_device(udev); - ret = remote_wakeup(hdev-> + ret = usb_remote_wakeup(hdev-> children[i-1]); usb_unlock_device(udev); if (ret < 0) @@ -3419,7 +3405,7 @@ static void hub_events(void) * kick_khubd() and allow autosuspend. */ usb_autopm_put_interface(intf); - loop2: + loop_disconnected: usb_unlock_device(hdev); kref_put(&hub->kref, hub_release); @@ -3446,7 +3432,7 @@ static int hub_thread(void *__unused) return 0; } -static struct usb_device_id hub_id_table [] = { +static const struct usb_device_id hub_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS, .bDeviceClass = USB_CLASS_HUB}, { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index df73574a9cc..cd220277c6c 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1316,7 +1316,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) alt = usb_altnum_to_altsetting(iface, alternate); if (!alt) { - dev_warn(&dev->dev, "selecting invalid altsetting %d", + dev_warn(&dev->dev, "selecting invalid altsetting %d\n", alternate); return -EINVAL; } @@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev) /* If not, reinstate the old alternate settings */ if (retval < 0) { reset_old_alts: - for (; i >= 0; i--) { + for (i--; i >= 0; i--) { struct usb_interface *intf = config->interface[i]; struct usb_host_interface *alt; @@ -1843,7 +1843,6 @@ free_interfaces: intf->dev.dma_mask = dev->dev.dma_mask; INIT_WORK(&intf->reset_ws, __usb_queue_reset_device); device_initialize(&intf->dev); - mark_quiesced(intf); dev_set_name(&intf->dev, "%d-%s:%d.%d", dev->bus->busnum, dev->devpath, configuration, alt->desc.bInterfaceNumber); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index ab93918d920..f073c5cb4e7 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -103,10 +103,19 @@ void usb_detect_quirks(struct usb_device *udev) dev_dbg(&udev->dev, "USB quirks for this device: %x\n", udev->quirks); - /* By default, disable autosuspend for all non-hubs */ #ifdef CONFIG_USB_SUSPEND - if (udev->descriptor.bDeviceClass != USB_CLASS_HUB) - udev->autosuspend_disabled = 1; + + /* By default, disable autosuspend for all devices. The hub driver + * will enable it for hubs. + */ + usb_disable_autosuspend(udev); + + /* Autosuspend can also be disabled if the initial autosuspend_delay + * is negative. + */ + if (udev->autosuspend_delay < 0) + usb_autoresume_device(udev); + #endif /* For the present, all devices default to USB-PERSIST enabled */ @@ -120,6 +129,7 @@ void usb_detect_quirks(struct usb_device *udev) * for all devices. It will affect things like hub resets * and EMF-related port disables. */ - udev->persist_enabled = 1; + if (!(udev->quirks & USB_QUIRK_RESET_MORPHS)) + udev->persist_enabled = 1; #endif /* CONFIG_PM */ } diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index 5f3908f6e2d..43c002e3a9a 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -115,7 +115,7 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf) case USB_SPEED_HIGH: speed = "480"; break; - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: speed = "480"; break; case USB_SPEED_SUPER: @@ -191,6 +191,36 @@ show_quirks(struct device *dev, struct device_attribute *attr, char *buf) static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL); static ssize_t +show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct usb_device *udev; + + udev = to_usb_device(dev); + return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET_MORPHS)); +} + +static ssize_t +set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct usb_device *udev = to_usb_device(dev); + int config; + + if (sscanf(buf, "%d", &config) != 1 || config < 0 || config > 1) + return -EINVAL; + usb_lock_device(udev); + if (config) + udev->quirks |= USB_QUIRK_RESET_MORPHS; + else + udev->quirks &= ~USB_QUIRK_RESET_MORPHS; + usb_unlock_device(udev); + return count; +} + +static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR, + show_avoid_reset_quirk, set_avoid_reset_quirk); + +static ssize_t show_urbnum(struct device *dev, struct device_attribute *attr, char *buf) { struct usb_device *udev; @@ -226,9 +256,10 @@ set_persist(struct device *dev, struct device_attribute *attr, if (sscanf(buf, "%d", &value) != 1) return -EINVAL; - usb_pm_lock(udev); + + usb_lock_device(udev); udev->persist_enabled = !!value; - usb_pm_unlock(udev); + usb_unlock_device(udev); return count; } @@ -315,20 +346,34 @@ set_autosuspend(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct usb_device *udev = to_usb_device(dev); - int value; + int value, old_delay; + int rc; if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ || value <= - INT_MAX/HZ) return -EINVAL; value *= HZ; + usb_lock_device(udev); + old_delay = udev->autosuspend_delay; udev->autosuspend_delay = value; - if (value >= 0) - usb_try_autosuspend_device(udev); - else { - if (usb_autoresume_device(udev) == 0) + + if (old_delay < 0) { /* Autosuspend wasn't allowed */ + if (value >= 0) usb_autosuspend_device(udev); + } else { /* Autosuspend was allowed */ + if (value < 0) { + rc = usb_autoresume_device(udev); + if (rc < 0) { + count = rc; + udev->autosuspend_delay = old_delay; + } + } else { + usb_try_autosuspend_device(udev); + } } + + usb_unlock_device(udev); return count; } @@ -356,34 +401,25 @@ set_level(struct device *dev, struct device_attribute *attr, struct usb_device *udev = to_usb_device(dev); int len = count; char *cp; - int rc = 0; - int old_autosuspend_disabled; + int rc; cp = memchr(buf, '\n', count); if (cp) len = cp - buf; usb_lock_device(udev); - old_autosuspend_disabled = udev->autosuspend_disabled; - /* Setting the flags without calling usb_pm_lock is a subject to - * races, but who cares... - */ if (len == sizeof on_string - 1 && - strncmp(buf, on_string, len) == 0) { - udev->autosuspend_disabled = 1; - rc = usb_external_resume_device(udev, PMSG_USER_RESUME); + strncmp(buf, on_string, len) == 0) + rc = usb_disable_autosuspend(udev); - } else if (len == sizeof auto_string - 1 && - strncmp(buf, auto_string, len) == 0) { - udev->autosuspend_disabled = 0; - rc = usb_external_resume_device(udev, PMSG_USER_RESUME); + else if (len == sizeof auto_string - 1 && + strncmp(buf, auto_string, len) == 0) + rc = usb_enable_autosuspend(udev); - } else + else rc = -EINVAL; - if (rc) - udev->autosuspend_disabled = old_autosuspend_disabled; usb_unlock_device(udev); return (rc < 0 ? rc : count); } @@ -558,6 +594,7 @@ static struct attribute *dev_attrs[] = { &dev_attr_version.attr, &dev_attr_maxchild.attr, &dev_attr_quirks.attr, + &dev_attr_avoid_reset_quirk.attr, &dev_attr_authorized.attr, &dev_attr_remove.attr, NULL, diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index e7cae133469..27080561a1c 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -387,6 +387,13 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) { unsigned int orig_flags = urb->transfer_flags; unsigned int allowed; + static int pipetypes[4] = { + PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT + }; + + /* Check that the pipe's type matches the endpoint's type */ + if (usb_pipetype(urb->pipe) != pipetypes[xfertype]) + return -EPIPE; /* The most suitable error code :-) */ /* enforce simple/standard policy */ allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP | @@ -430,7 +437,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) case USB_ENDPOINT_XFER_INT: /* too small? */ switch (dev->speed) { - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: if (urb->interval < 6) return -EINVAL; break; @@ -446,7 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) if (urb->interval > (1 << 15)) return -EINVAL; max = 1 << 15; - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: if (urb->interval > 16) return -EINVAL; break; @@ -473,7 +480,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) default: return -EINVAL; } - if (dev->speed != USB_SPEED_VARIABLE) { + if (dev->speed != USB_SPEED_WIRELESS) { /* Round down to a power of 2, no more than max */ urb->interval = min(max, 1 << ilog2(urb->interval)); } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 0daff0d968b..1297e9b16a5 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -49,9 +49,6 @@ const char *usbcore_name = "usbcore"; static int nousb; /* Disable USB when built into kernel image */ -/* Workqueue for autosuspend and for remote wakeup of root hubs */ -struct workqueue_struct *ksuspend_usb_wq; - #ifdef CONFIG_USB_SUSPEND static int usb_autosuspend_delay = 2; /* Default delay value, * in seconds */ @@ -228,9 +225,6 @@ static void usb_release_dev(struct device *dev) hcd = bus_to_hcd(udev->bus); usb_destroy_configuration(udev); - /* Root hubs aren't real devices, so don't free HCD resources */ - if (hcd->driver->free_dev && udev->parent) - hcd->driver->free_dev(hcd, udev); usb_put_hcd(hcd); kfree(udev->product); kfree(udev->manufacturer); @@ -264,23 +258,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env) #ifdef CONFIG_PM -static int ksuspend_usb_init(void) -{ - /* This workqueue is supposed to be both freezable and - * singlethreaded. Its job doesn't justify running on more - * than one CPU. - */ - ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd"); - if (!ksuspend_usb_wq) - return -ENOMEM; - return 0; -} - -static void ksuspend_usb_cleanup(void) -{ - destroy_workqueue(ksuspend_usb_wq); -} - /* USB device Power-Management thunks. * There's no need to distinguish here between quiescing a USB device * and powering it down; the generic_suspend() routine takes care of @@ -296,7 +273,7 @@ static int usb_dev_prepare(struct device *dev) static void usb_dev_complete(struct device *dev) { /* Currently used only for rebinding interfaces */ - usb_resume(dev, PMSG_RESUME); /* Message event is meaningless */ + usb_resume(dev, PMSG_ON); /* FIXME: change to PMSG_COMPLETE */ } static int usb_dev_suspend(struct device *dev) @@ -342,9 +319,7 @@ static const struct dev_pm_ops usb_device_pm_ops = { #else -#define ksuspend_usb_init() 0 -#define ksuspend_usb_cleanup() do {} while (0) -#define usb_device_pm_ops (*(struct dev_pm_ops *)0) +#define usb_device_pm_ops (*(struct dev_pm_ops *) NULL) #endif /* CONFIG_PM */ @@ -472,9 +447,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent, INIT_LIST_HEAD(&dev->filelist); #ifdef CONFIG_PM - mutex_init(&dev->pm_mutex); - INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work); - INIT_WORK(&dev->autoresume, usb_autoresume_work); dev->autosuspend_delay = usb_autosuspend_delay * HZ; dev->connect_time = jiffies; dev->active_duration = -jiffies; @@ -1117,9 +1089,6 @@ static int __init usb_init(void) if (retval) goto out; - retval = ksuspend_usb_init(); - if (retval) - goto out; retval = bus_register(&usb_bus_type); if (retval) goto bus_register_failed; @@ -1159,7 +1128,7 @@ major_init_failed: bus_notifier_failed: bus_unregister(&usb_bus_type); bus_register_failed: - ksuspend_usb_cleanup(); + usb_debugfs_cleanup(); out: return retval; } @@ -1181,7 +1150,6 @@ static void __exit usb_exit(void) usb_hub_cleanup(); bus_unregister_notifier(&usb_bus_type, &usb_bus_nb); bus_unregister(&usb_bus_type); - ksuspend_usb_cleanup(); usb_debugfs_cleanup(); } diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 4c36c7f512a..cd882203ad3 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -55,24 +55,8 @@ extern void usb_major_cleanup(void); extern int usb_suspend(struct device *dev, pm_message_t msg); extern int usb_resume(struct device *dev, pm_message_t msg); -extern void usb_autosuspend_work(struct work_struct *work); -extern void usb_autoresume_work(struct work_struct *work); extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg); extern int usb_port_resume(struct usb_device *dev, pm_message_t msg); -extern int usb_external_suspend_device(struct usb_device *udev, - pm_message_t msg); -extern int usb_external_resume_device(struct usb_device *udev, - pm_message_t msg); - -static inline void usb_pm_lock(struct usb_device *udev) -{ - mutex_lock_nested(&udev->pm_mutex, udev->level); -} - -static inline void usb_pm_unlock(struct usb_device *udev) -{ - mutex_unlock(&udev->pm_mutex); -} #else @@ -86,9 +70,6 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg) return 0; } -static inline void usb_pm_lock(struct usb_device *udev) {} -static inline void usb_pm_unlock(struct usb_device *udev) {} - #endif #ifdef CONFIG_USB_SUSPEND @@ -96,6 +77,7 @@ static inline void usb_pm_unlock(struct usb_device *udev) {} extern void usb_autosuspend_device(struct usb_device *udev); extern void usb_try_autosuspend_device(struct usb_device *udev); extern int usb_autoresume_device(struct usb_device *udev); +extern int usb_remote_wakeup(struct usb_device *dev); #else @@ -106,9 +88,13 @@ static inline int usb_autoresume_device(struct usb_device *udev) return 0; } +static inline int usb_remote_wakeup(struct usb_device *udev) +{ + return 0; +} + #endif -extern struct workqueue_struct *ksuspend_usb_wq; extern struct bus_type usb_bus_type; extern struct device_type usb_device_type; extern struct device_type usb_if_device_type; @@ -138,23 +124,6 @@ static inline int is_usb_device_driver(struct device_driver *drv) for_devices; } -/* Interfaces and their "power state" are owned by usbcore */ - -static inline void mark_active(struct usb_interface *f) -{ - f->is_active = 1; -} - -static inline void mark_quiesced(struct usb_interface *f) -{ - f->is_active = 0; -} - -static inline int is_active(const struct usb_interface *f) -{ - return f->is_active; -} - /* for labeling diagnostics */ extern const char *usbcore_name; diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index 2958a1271b2..6e98a369784 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -66,8 +66,6 @@ static struct ehci_dev ehci_dev; #define USB_DEBUG_DEVNUM 127 -#define DBGP_DATA_TOGGLE 0x8800 - #ifdef DBGP_DEBUG #define dbgp_printk printk static void dbgp_ehci_status(char *str) @@ -88,11 +86,6 @@ static inline void dbgp_ehci_status(char *str) { } static inline void dbgp_printk(const char *fmt, ...) { } #endif -static inline u32 dbgp_pid_update(u32 x, u32 tok) -{ - return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff); -} - static inline u32 dbgp_len_update(u32 x, u32 len) { return (x & ~0x0f) | (len & 0x0f); @@ -136,6 +129,19 @@ static inline u32 dbgp_len_update(u32 x, u32 len) #define DBGP_MAX_PACKET 8 #define DBGP_TIMEOUT (250 * 1000) +#define DBGP_LOOPS 1000 + +static inline u32 dbgp_pid_write_update(u32 x, u32 tok) +{ + static int data0 = USB_PID_DATA1; + data0 ^= USB_PID_DATA_TOGGLE; + return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff); +} + +static inline u32 dbgp_pid_read_update(u32 x, u32 tok) +{ + return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff); +} static int dbgp_wait_until_complete(void) { @@ -180,7 +186,7 @@ static int dbgp_wait_until_done(unsigned ctrl) { u32 pids, lpid; int ret; - int loop = 3; + int loop = DBGP_LOOPS; retry: writel(ctrl | DBGP_GO, &ehci_debug->control); @@ -197,6 +203,8 @@ retry: */ if (ret == -DBGP_TIMEOUT && !dbgp_not_safe) dbgp_not_safe = 1; + if (ret == -DBGP_ERR_BAD && --loop > 0) + goto retry; return ret; } @@ -245,12 +253,20 @@ static inline void dbgp_get_data(void *buf, int size) bytes[i] = (hi >> (8*(i - 4))) & 0xff; } -static int dbgp_out(u32 addr, const char *bytes, int size) +static int dbgp_bulk_write(unsigned devnum, unsigned endpoint, + const char *bytes, int size) { + int ret; + u32 addr; u32 pids, ctrl; + if (size > DBGP_MAX_PACKET) + return -1; + + addr = DBGP_EPADDR(devnum, endpoint); + pids = readl(&ehci_debug->pids); - pids = dbgp_pid_update(pids, USB_PID_OUT); + pids = dbgp_pid_write_update(pids, USB_PID_OUT); ctrl = readl(&ehci_debug->control); ctrl = dbgp_len_update(ctrl, size); @@ -260,34 +276,7 @@ static int dbgp_out(u32 addr, const char *bytes, int size) dbgp_set_data(bytes, size); writel(addr, &ehci_debug->address); writel(pids, &ehci_debug->pids); - return dbgp_wait_until_done(ctrl); -} - -static int dbgp_bulk_write(unsigned devnum, unsigned endpoint, - const char *bytes, int size) -{ - int ret; - int loops = 5; - u32 addr; - if (size > DBGP_MAX_PACKET) - return -1; - - addr = DBGP_EPADDR(devnum, endpoint); -try_again: - if (loops--) { - ret = dbgp_out(addr, bytes, size); - if (ret == -DBGP_ERR_BAD) { - int try_loops = 3; - do { - /* Emit a dummy packet to re-sync communication - * with the debug device */ - if (dbgp_out(addr, "12345678", 8) >= 0) { - udelay(2); - goto try_again; - } - } while (try_loops--); - } - } + ret = dbgp_wait_until_done(ctrl); return ret; } @@ -304,7 +293,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data, addr = DBGP_EPADDR(devnum, endpoint); pids = readl(&ehci_debug->pids); - pids = dbgp_pid_update(pids, USB_PID_IN); + pids = dbgp_pid_read_update(pids, USB_PID_IN); ctrl = readl(&ehci_debug->control); ctrl = dbgp_len_update(ctrl, size); @@ -362,7 +351,6 @@ static int dbgp_control_msg(unsigned devnum, int requesttype, return dbgp_bulk_read(devnum, 0, data, size); } - /* Find a PCI capability */ static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap) { diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index ee411206c69..7460cd797f4 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -812,6 +812,16 @@ config USB_CDC_COMPOSITE Say "y" to link the driver statically, or "m" to build a dynamically linked module. +config USB_G_NOKIA + tristate "Nokia composite gadget" + depends on PHONET + help + The Nokia composite gadget provides support for acm, obex + and phonet in only one composite gadget driver. + + It's only really useful for N900 hardware. If you're building + a kernel for N900, say Y or M here. If unsure, say N. + config USB_G_MULTI tristate "Multifunction Composite Gadget (EXPERIMENTAL)" depends on BLOCK && NET diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile index 2e2c047262b..43b51da8d72 100644 --- a/drivers/usb/gadget/Makefile +++ b/drivers/usb/gadget/Makefile @@ -43,6 +43,7 @@ g_mass_storage-objs := mass_storage.o g_printer-objs := printer.o g_cdc-objs := cdc2.o g_multi-objs := multi.o +g_nokia-objs := nokia.o obj-$(CONFIG_USB_ZERO) += g_zero.o obj-$(CONFIG_USB_AUDIO) += g_audio.o @@ -55,4 +56,5 @@ obj-$(CONFIG_USB_G_PRINTER) += g_printer.o obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o obj-$(CONFIG_USB_G_MULTI) += g_multi.o +obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 043e04db2a0..12ac9cd32a0 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c @@ -1656,9 +1656,7 @@ static int __init at91udc_probe(struct platform_device *pdev) if (!res) return -ENXIO; - if (!request_mem_region(res->start, - res->end - res->start + 1, - driver_name)) { + if (!request_mem_region(res->start, resource_size(res), driver_name)) { DBG("someone's using UDC memory\n"); return -EBUSY; } @@ -1699,7 +1697,7 @@ static int __init at91udc_probe(struct platform_device *pdev) udc->ep[3].maxpacket = 64; } - udc->udp_baseaddr = ioremap(res->start, res->end - res->start + 1); + udc->udp_baseaddr = ioremap(res->start, resource_size(res)); if (!udc->udp_baseaddr) { retval = -ENOMEM; goto fail0a; @@ -1781,7 +1779,7 @@ fail0a: if (cpu_is_at91rm9200()) gpio_free(udc->board.pullup_pin); fail0: - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); DBG("%s probe failed, %d\n", driver_name, retval); return retval; } @@ -1813,7 +1811,7 @@ static int __exit at91udc_remove(struct platform_device *pdev) gpio_free(udc->board.pullup_pin); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); clk_put(udc->iclk); clk_put(udc->fclk); diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c index 4e970cf0e29..f79bdfe4bed 100644 --- a/drivers/usb/gadget/atmel_usba_udc.c +++ b/drivers/usb/gadget/atmel_usba_udc.c @@ -320,7 +320,7 @@ static inline void usba_cleanup_debugfs(struct usba_udc *udc) static int vbus_is_present(struct usba_udc *udc) { if (gpio_is_valid(udc->vbus_pin)) - return gpio_get_value(udc->vbus_pin); + return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted; /* No Vbus detection: Assume always present */ return 1; @@ -1763,7 +1763,7 @@ static irqreturn_t usba_vbus_irq(int irq, void *devid) if (!udc->driver) goto out; - vbus = gpio_get_value(udc->vbus_pin); + vbus = vbus_is_present(udc); if (vbus != udc->vbus_prev) { if (vbus) { toggle_bias(1); @@ -1914,14 +1914,14 @@ static int __init usba_udc_probe(struct platform_device *pdev) udc->vbus_pin = -ENODEV; ret = -ENOMEM; - udc->regs = ioremap(regs->start, regs->end - regs->start + 1); + udc->regs = ioremap(regs->start, resource_size(regs)); if (!udc->regs) { dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n"); goto err_map_regs; } dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n", (unsigned long)regs->start, udc->regs); - udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1); + udc->fifo = ioremap(fifo->start, resource_size(fifo)); if (!udc->fifo) { dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n"); goto err_map_fifo; @@ -2000,6 +2000,7 @@ static int __init usba_udc_probe(struct platform_device *pdev) if (gpio_is_valid(pdata->vbus_pin)) { if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) { udc->vbus_pin = pdata->vbus_pin; + udc->vbus_pin_inverted = pdata->vbus_pin_inverted; ret = request_irq(gpio_to_irq(udc->vbus_pin), usba_vbus_irq, 0, diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h index f7baea307f0..88a2e07a11a 100644 --- a/drivers/usb/gadget/atmel_usba_udc.h +++ b/drivers/usb/gadget/atmel_usba_udc.h @@ -323,6 +323,7 @@ struct usba_udc { struct platform_device *pdev; int irq; int vbus_pin; + int vbus_pin_inverted; struct clk *pclk; struct clk *hclk; diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c index cd0914ec898..65a5f94cbc0 100644 --- a/drivers/usb/gadget/epautoconf.c +++ b/drivers/usb/gadget/epautoconf.c @@ -265,16 +265,24 @@ struct usb_ep * __init usb_ep_autoconfig ( return ep; } - } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) { - /* single buffering is enough; maybe 8 byte fifo is too */ - ep = find_ep (gadget, "ep3in-bulk"); - if (ep && ep_matches (gadget, ep, desc)) - return ep; - - } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) { - ep = find_ep (gadget, "ep1-bulk"); +#ifdef CONFIG_BLACKFIN + } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) { + if ((USB_ENDPOINT_XFER_BULK == type) || + (USB_ENDPOINT_XFER_ISOC == type)) { + if (USB_DIR_IN & desc->bEndpointAddress) + ep = find_ep (gadget, "ep5in"); + else + ep = find_ep (gadget, "ep6out"); + } else if (USB_ENDPOINT_XFER_INT == type) { + if (USB_DIR_IN & desc->bEndpointAddress) + ep = find_ep(gadget, "ep1in"); + else + ep = find_ep(gadget, "ep2out"); + } else + ep = NULL; if (ep && ep_matches (gadget, ep, desc)) return ep; +#endif } /* Second, look at endpoints until an unclaimed one looks usable */ diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index 141372b6e7a..400f80372d9 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c @@ -259,7 +259,7 @@ static struct usb_configuration rndis_config_driver = { /*-------------------------------------------------------------------------*/ -#ifdef USB_ETH_EEM +#ifdef CONFIG_USB_ETH_EEM static int use_eem = 1; #else static int use_eem; diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c index d10353d46b8..e49c7325dce 100644 --- a/drivers/usb/gadget/f_acm.c +++ b/drivers/usb/gadget/f_acm.c @@ -702,14 +702,6 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f) /* Some controllers can't support CDC ACM ... */ static inline bool can_support_cdc(struct usb_configuration *c) { - /* SH3 doesn't support multiple interfaces */ - if (gadget_is_sh(c->cdev->gadget)) - return false; - - /* sa1100 doesn't have a third interrupt endpoint */ - if (gadget_is_sa1100(c->cdev->gadget)) - return false; - /* everything else is *probably* fine ... */ return true; } diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c index ecf5bdd0ae0..2fff530efc1 100644 --- a/drivers/usb/gadget/f_ecm.c +++ b/drivers/usb/gadget/f_ecm.c @@ -497,12 +497,9 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) struct net_device *net; /* Enable zlps by default for ECM conformance; - * override for musb_hdrc (avoids txdma ovhead) - * and sa1100 (can't). + * override for musb_hdrc (avoids txdma ovhead). */ - ecm->port.is_zlp_ok = !( - gadget_is_sa1100(cdev->gadget) - || gadget_is_musbhdrc(cdev->gadget) + ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget) ); ecm->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate ecm\n"); diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index a37640eba43..5a3cdd08f1d 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -368,7 +368,7 @@ struct fsg_common { struct task_struct *thread_task; /* Callback function to call when thread exits. */ - void (*thread_exits)(struct fsg_common *common); + int (*thread_exits)(struct fsg_common *common); /* Gadget's private data. */ void *private_data; @@ -392,8 +392,12 @@ struct fsg_config { const char *lun_name_format; const char *thread_name; - /* Callback function to call when thread exits. */ - void (*thread_exits)(struct fsg_common *common); + /* Callback function to call when thread exits. If no + * callback is set or it returns value lower then zero MSF + * will force eject all LUNs it operates on (including those + * marked as non-removable or with prevent_medium_removal flag + * set). */ + int (*thread_exits)(struct fsg_common *common); /* Gadget's private data. */ void *private_data; @@ -614,7 +618,12 @@ static int fsg_setup(struct usb_function *f, return -EDOM; VDBG(fsg, "get max LUN\n"); *(u8 *) req->buf = fsg->common->nluns - 1; - return 1; + + /* Respond with data/status */ + req->length = min((u16)1, w_length); + fsg->common->ep0req_name = + ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out"; + return ep0_queue(fsg->common); } VDBG(fsg, @@ -1041,7 +1050,7 @@ static void invalidate_sub(struct fsg_lun *curlun) unsigned long rc; rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); - VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); + VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); } static int do_verify(struct fsg_common *common) @@ -2524,14 +2533,6 @@ static void handle_exception(struct fsg_common *common) case FSG_STATE_CONFIG_CHANGE: rc = do_set_config(common, new_config); - if (common->ep0_req_tag != exception_req_tag) - break; - if (rc != 0) { /* STALL on errors */ - DBG(common, "ep0 set halt\n"); - usb_ep_set_halt(common->ep0); - } else { /* Complete the status stage */ - ep0_queue(common); - } break; case FSG_STATE_EXIT: @@ -2615,8 +2616,20 @@ static int fsg_main_thread(void *common_) common->thread_task = NULL; spin_unlock_irq(&common->lock); - if (common->thread_exits) - common->thread_exits(common); + if (!common->thread_exits || common->thread_exits(common) < 0) { + struct fsg_lun *curlun = common->luns; + unsigned i = common->nluns; + + down_write(&common->filesem); + for (; i--; ++curlun) { + if (!fsg_lun_is_open(curlun)) + continue; + + fsg_lun_close(curlun); + curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT; + } + up_write(&common->filesem); + } /* Let the unbind and cleanup routines know the thread has exited */ complete_and_exit(&common->thread_notifier, 0); @@ -2763,10 +2776,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, if (cfg->release != 0xffff) { i = cfg->release; } else { - /* The sa1100 controller is not supported */ - i = gadget_is_sa1100(gadget) - ? -1 - : usb_gadget_controller_number(gadget); + i = usb_gadget_controller_number(gadget); if (i >= 0) { i = 0x0300 + i; } else { @@ -2791,8 +2801,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common, * disable stalls. */ common->can_stall = cfg->can_stall && - !(gadget_is_sh(common->gadget) || - gadget_is_at91(common->gadget)); + !(gadget_is_at91(common->gadget)); spin_lock_init(&common->lock); @@ -2852,7 +2861,6 @@ error_release: /* Call fsg_common_release() directly, ref might be not * initialised */ fsg_common_release(&common->ref); - complete(&common->thread_notifier); return ERR_PTR(rc); } diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 95dae4c1ea4..a30e60c7f12 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -769,10 +769,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f) /* Some controllers can't support RNDIS ... */ static inline bool can_support_rndis(struct usb_configuration *c) { - /* only two endpoints on sa1100 */ - if (gadget_is_sa1100(c->cdev->gadget)) - return false; - /* everything else is *presumably* fine */ return true; } diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index 29dfb0277ff..b49d86e3e45 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c @@ -1448,7 +1448,7 @@ static void invalidate_sub(struct fsg_lun *curlun) unsigned long rc; rc = invalidate_mapping_pages(inode->i_mapping, 0, -1); - VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc); + VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc); } static int do_verify(struct fsg_dev *fsg) @@ -3208,15 +3208,11 @@ static int __init check_parameters(struct fsg_dev *fsg) * halt bulk endpoints correctly. If one of them is present, * disable stalls. */ - if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget)) + if (gadget_is_at91(fsg->gadget)) mod_data.can_stall = 0; if (mod_data.release == 0xffff) { // Parameter wasn't set - /* The sa1100 controller is not supported */ - if (gadget_is_sa1100(fsg->gadget)) - gcnum = -1; - else - gcnum = usb_gadget_controller_number(fsg->gadget); + gcnum = usb_gadget_controller_number(fsg->gadget); if (gcnum >= 0) mod_data.release = 0x0300 + gcnum; else { diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c index 7881f12413c..3537d51073b 100644 --- a/drivers/usb/gadget/fsl_qe_udc.c +++ b/drivers/usb/gadget/fsl_qe_udc.c @@ -2749,7 +2749,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev) } /*-------------------------------------------------------------------------*/ -static struct of_device_id __devinitdata qe_udc_match[] = { +static const struct of_device_id qe_udc_match[] __devinitconst = { { .compatible = "fsl,mpc8323-qe-usb", .data = (void *)PORT_QE, diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h index f2d270b202f..1edbc12fff1 100644 --- a/drivers/usb/gadget/gadget_chips.h +++ b/drivers/usb/gadget/gadget_chips.h @@ -45,46 +45,18 @@ #define gadget_is_goku(g) 0 #endif -/* SH3 UDC -- not yet ported 2.4 --> 2.6 */ -#ifdef CONFIG_USB_GADGET_SUPERH -#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name) -#else -#define gadget_is_sh(g) 0 -#endif - -/* not yet stable on 2.6 (would help "original Zaurus") */ -#ifdef CONFIG_USB_GADGET_SA1100 -#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name) -#else -#define gadget_is_sa1100(g) 0 -#endif - #ifdef CONFIG_USB_GADGET_LH7A40X #define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name) #else #define gadget_is_lh7a40x(g) 0 #endif -/* handhelds.org tree (?) */ -#ifdef CONFIG_USB_GADGET_MQ11XX -#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name) -#else -#define gadget_is_mq11xx(g) 0 -#endif - #ifdef CONFIG_USB_GADGET_OMAP #define gadget_is_omap(g) !strcmp("omap_udc", (g)->name) #else #define gadget_is_omap(g) 0 #endif -/* not yet ported 2.4 --> 2.6 */ -#ifdef CONFIG_USB_GADGET_N9604 -#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name) -#else -#define gadget_is_n9604(g) 0 -#endif - /* various unstable versions available */ #ifdef CONFIG_USB_GADGET_PXA27X #define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name) @@ -122,14 +94,6 @@ #define gadget_is_fsl_usb2(g) 0 #endif -/* Mentor high speed function controller */ -/* from Montavista kernel (?) */ -#ifdef CONFIG_USB_GADGET_MUSBHSFC -#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name) -#else -#define gadget_is_musbhsfc(g) 0 -#endif - /* Mentor high speed "dual role" controller, in peripheral role */ #ifdef CONFIG_USB_GADGET_MUSB_HDRC #define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name) @@ -143,13 +107,6 @@ #define gadget_is_langwell(g) 0 #endif -/* from Montavista kernel (?) */ -#ifdef CONFIG_USB_GADGET_MPC8272 -#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name) -#else -#define gadget_is_mpc8272(g) 0 -#endif - #ifdef CONFIG_USB_GADGET_M66592 #define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name) #else @@ -203,20 +160,12 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget) return 0x02; else if (gadget_is_pxa(gadget)) return 0x03; - else if (gadget_is_sh(gadget)) - return 0x04; - else if (gadget_is_sa1100(gadget)) - return 0x05; else if (gadget_is_goku(gadget)) return 0x06; - else if (gadget_is_mq11xx(gadget)) - return 0x07; else if (gadget_is_omap(gadget)) return 0x08; else if (gadget_is_lh7a40x(gadget)) return 0x09; - else if (gadget_is_n9604(gadget)) - return 0x10; else if (gadget_is_pxa27x(gadget)) return 0x11; else if (gadget_is_s3c2410(gadget)) @@ -225,12 +174,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget) return 0x13; else if (gadget_is_imx(gadget)) return 0x14; - else if (gadget_is_musbhsfc(gadget)) - return 0x15; else if (gadget_is_musbhdrc(gadget)) return 0x16; - else if (gadget_is_mpc8272(gadget)) - return 0x17; else if (gadget_is_atmel_usba(gadget)) return 0x18; else if (gadget_is_fsl_usb2(gadget)) @@ -265,10 +210,6 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget) if (gadget_is_pxa27x(gadget)) return false; - /* SH3 hardware just doesn't do altsettings */ - if (gadget_is_sh(gadget)) - return false; - /* Everything else is *presumably* fine ... */ return true; } diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c index 5f6a2e0a935..04f6224b7e0 100644 --- a/drivers/usb/gadget/gmidi.c +++ b/drivers/usb/gadget/gmidi.c @@ -618,11 +618,6 @@ gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags) } #endif - if (gadget_is_sa1100(gadget) && dev->config) { - /* tx fifo is full, but we can't clear it...*/ - ERROR(dev, "can't change configurations\n"); - return -ESPIPE; - } gmidi_reset_config(dev); switch (number) { diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c index 112bb40a427..e8edc640381 100644 --- a/drivers/usb/gadget/goku_udc.c +++ b/drivers/usb/gadget/goku_udc.c @@ -1859,7 +1859,7 @@ done: /*-------------------------------------------------------------------------*/ -static struct pci_device_id pci_ids [] = { { +static const struct pci_device_id pci_ids[] = { { .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), .class_mask = ~0, .vendor = 0x102f, /* Toshiba */ diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index bf0f6520c6d..de8a8380350 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c @@ -194,7 +194,7 @@ enum ep_state { }; struct ep_data { - struct semaphore lock; + struct mutex lock; enum ep_state state; atomic_t count; struct dev_data *dev; @@ -298,10 +298,10 @@ get_ready_ep (unsigned f_flags, struct ep_data *epdata) int val; if (f_flags & O_NONBLOCK) { - if (down_trylock (&epdata->lock) != 0) + if (!mutex_trylock(&epdata->lock)) goto nonblock; if (epdata->state != STATE_EP_ENABLED) { - up (&epdata->lock); + mutex_unlock(&epdata->lock); nonblock: val = -EAGAIN; } else @@ -309,7 +309,8 @@ nonblock: return val; } - if ((val = down_interruptible (&epdata->lock)) < 0) + val = mutex_lock_interruptible(&epdata->lock); + if (val < 0) return val; switch (epdata->state) { @@ -323,7 +324,7 @@ nonblock: // FALLTHROUGH case STATE_EP_UNBOUND: /* clean disconnect */ val = -ENODEV; - up (&epdata->lock); + mutex_unlock(&epdata->lock); } return val; } @@ -393,7 +394,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) if (likely (data->ep != NULL)) usb_ep_set_halt (data->ep); spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return -EBADMSG; } @@ -411,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) value = -EFAULT; free1: - up (&data->lock); + mutex_unlock(&data->lock); kfree (kbuf); return value; } @@ -436,7 +437,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) if (likely (data->ep != NULL)) usb_ep_set_halt (data->ep); spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return -EBADMSG; } @@ -455,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) VDEBUG (data->dev, "%s write %zu IN, status %d\n", data->name, len, (int) value); free1: - up (&data->lock); + mutex_unlock(&data->lock); kfree (kbuf); return value; } @@ -466,7 +467,8 @@ ep_release (struct inode *inode, struct file *fd) struct ep_data *data = fd->private_data; int value; - if ((value = down_interruptible(&data->lock)) < 0) + value = mutex_lock_interruptible(&data->lock); + if (value < 0) return value; /* clean up if this can be reopened */ @@ -476,7 +478,7 @@ ep_release (struct inode *inode, struct file *fd) data->hs_desc.bDescriptorType = 0; usb_ep_disable(data->ep); } - up (&data->lock); + mutex_unlock(&data->lock); put_ep (data); return 0; } @@ -507,7 +509,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) } else status = -ENODEV; spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return status; } @@ -673,7 +675,7 @@ fail: value = -ENODEV; spin_unlock_irq(&epdata->dev->lock); - up(&epdata->lock); + mutex_unlock(&epdata->lock); if (unlikely(value)) { kfree(priv); @@ -765,7 +767,8 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) u32 tag; int value, length = len; - if ((value = down_interruptible (&data->lock)) < 0) + value = mutex_lock_interruptible(&data->lock); + if (value < 0) return value; if (data->state != STATE_EP_READY) { @@ -854,7 +857,7 @@ fail: data->desc.bDescriptorType = 0; data->hs_desc.bDescriptorType = 0; } - up (&data->lock); + mutex_unlock(&data->lock); return value; fail0: value = -EINVAL; @@ -870,7 +873,7 @@ ep_open (struct inode *inode, struct file *fd) struct ep_data *data = inode->i_private; int value = -EBUSY; - if (down_interruptible (&data->lock) != 0) + if (mutex_lock_interruptible(&data->lock) != 0) return -EINTR; spin_lock_irq (&data->dev->lock); if (data->dev->state == STATE_DEV_UNBOUND) @@ -885,7 +888,7 @@ ep_open (struct inode *inode, struct file *fd) DBG (data->dev, "%s state %d\n", data->name, data->state); spin_unlock_irq (&data->dev->lock); - up (&data->lock); + mutex_unlock(&data->lock); return value; } @@ -1631,7 +1634,7 @@ static int activate_ep_files (struct dev_data *dev) if (!data) goto enomem0; data->state = STATE_EP_DISABLED; - init_MUTEX (&data->lock); + mutex_init(&data->lock); init_waitqueue_head (&data->wait); strncpy (data->name, ep->name, sizeof (data->name) - 1); diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c index 19619fbf20a..705cc1f7632 100644 --- a/drivers/usb/gadget/mass_storage.c +++ b/drivers/usb/gadget/mass_storage.c @@ -135,6 +135,12 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); static unsigned long msg_registered = 0; static void msg_cleanup(void); +static int msg_thread_exits(struct fsg_common *common) +{ + msg_cleanup(); + return 0; +} + static int __init msg_do_config(struct usb_configuration *c) { struct fsg_common *common; @@ -147,7 +153,7 @@ static int __init msg_do_config(struct usb_configuration *c) } fsg_config_from_params(&config, &mod_data); - config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup; + config.thread_exits = msg_thread_exits; common = fsg_common_init(0, c->cdev, &config); if (IS_ERR(common)) return PTR_ERR(common); diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c new file mode 100644 index 00000000000..7d6b66a8572 --- /dev/null +++ b/drivers/usb/gadget/nokia.c @@ -0,0 +1,259 @@ +/* + * nokia.c -- Nokia Composite Gadget Driver + * + * Copyright (C) 2008-2010 Nokia Corporation + * Contact: Felipe Balbi <felipe.balbi@nokia.com> + * + * This gadget driver borrows from serial.c which is: + * + * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) + * Copyright (C) 2008 by David Brownell + * Copyright (C) 2008 by Nokia Corporation + * + * This software is distributed under the terms of the GNU General + * Public License ("GPL") as published by the Free Software Foundation, + * version 2 of that License. + */ + +#include <linux/kernel.h> +#include <linux/utsname.h> +#include <linux/device.h> + +#include "u_serial.h" +#include "u_ether.h" +#include "u_phonet.h" +#include "gadget_chips.h" + +/* Defines */ + +#define NOKIA_VERSION_NUM 0x0211 +#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)" + +/*-------------------------------------------------------------------------*/ + +/* + * Kbuild is not very cooperative with respect to linking separately + * compiled library objects into one module. So for now we won't use + * separate compilation ... ensuring init/exit sections work to shrink + * the runtime footprint, and giving us at least some parts of what + * a "gcc --combine ... part1.c part2.c part3.c ... " build would. + */ +#include "composite.c" +#include "usbstring.c" +#include "config.c" +#include "epautoconf.c" + +#include "u_serial.c" +#include "f_acm.c" +#include "f_ecm.c" +#include "f_obex.c" +#include "f_serial.c" +#include "f_phonet.c" +#include "u_ether.c" + +/*-------------------------------------------------------------------------*/ + +#define NOKIA_VENDOR_ID 0x0421 /* Nokia */ +#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */ + +/* string IDs are assigned dynamically */ + +#define STRING_MANUFACTURER_IDX 0 +#define STRING_PRODUCT_IDX 1 +#define STRING_DESCRIPTION_IDX 2 + +static char manufacturer_nokia[] = "Nokia"; +static const char product_nokia[] = NOKIA_LONG_NAME; +static const char description_nokia[] = "PC-Suite Configuration"; + +static struct usb_string strings_dev[] = { + [STRING_MANUFACTURER_IDX].s = manufacturer_nokia, + [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME, + [STRING_DESCRIPTION_IDX].s = description_nokia, + { } /* end of list */ +}; + +static struct usb_gadget_strings stringtab_dev = { + .language = 0x0409, /* en-us */ + .strings = strings_dev, +}; + +static struct usb_gadget_strings *dev_strings[] = { + &stringtab_dev, + NULL, +}; + +static struct usb_device_descriptor device_desc = { + .bLength = USB_DT_DEVICE_SIZE, + .bDescriptorType = USB_DT_DEVICE, + .bcdUSB = __constant_cpu_to_le16(0x0200), + .bDeviceClass = USB_CLASS_COMM, + .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID), + .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID), + /* .iManufacturer = DYNAMIC */ + /* .iProduct = DYNAMIC */ + .bNumConfigurations = 1, +}; + +/*-------------------------------------------------------------------------*/ + +/* Module */ +MODULE_DESCRIPTION("Nokia composite gadget driver for N900"); +MODULE_AUTHOR("Felipe Balbi"); +MODULE_LICENSE("GPL"); + +/*-------------------------------------------------------------------------*/ + +static u8 hostaddr[ETH_ALEN]; + +static int __init nokia_bind_config(struct usb_configuration *c) +{ + int status = 0; + + status = phonet_bind_config(c); + if (status) + printk(KERN_DEBUG "could not bind phonet config\n"); + + status = obex_bind_config(c, 0); + if (status) + printk(KERN_DEBUG "could not bind obex config %d\n", 0); + + status = obex_bind_config(c, 1); + if (status) + printk(KERN_DEBUG "could not bind obex config %d\n", 0); + + status = acm_bind_config(c, 2); + if (status) + printk(KERN_DEBUG "could not bind acm config\n"); + + status = ecm_bind_config(c, hostaddr); + if (status) + printk(KERN_DEBUG "could not bind ecm config\n"); + + return status; +} + +static struct usb_configuration nokia_config_500ma_driver = { + .label = "Bus Powered", + .bind = nokia_bind_config, + .bConfigurationValue = 1, + /* .iConfiguration = DYNAMIC */ + .bmAttributes = USB_CONFIG_ATT_ONE, + .bMaxPower = 250, /* 500mA */ +}; + +static struct usb_configuration nokia_config_100ma_driver = { + .label = "Self Powered", + .bind = nokia_bind_config, + .bConfigurationValue = 2, + /* .iConfiguration = DYNAMIC */ + .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, + .bMaxPower = 50, /* 100 mA */ +}; + +static int __init nokia_bind(struct usb_composite_dev *cdev) +{ + int gcnum; + struct usb_gadget *gadget = cdev->gadget; + int status; + + status = gphonet_setup(cdev->gadget); + if (status < 0) + goto err_phonet; + + status = gserial_setup(cdev->gadget, 3); + if (status < 0) + goto err_serial; + + status = gether_setup(cdev->gadget, hostaddr); + if (status < 0) + goto err_ether; + + status = usb_string_id(cdev); + if (status < 0) + goto err_usb; + strings_dev[STRING_MANUFACTURER_IDX].id = status; + + device_desc.iManufacturer = status; + + status = usb_string_id(cdev); + if (status < 0) + goto err_usb; + strings_dev[STRING_PRODUCT_IDX].id = status; + + device_desc.iProduct = status; + + /* config description */ + status = usb_string_id(cdev); + if (status < 0) + goto err_usb; + strings_dev[STRING_DESCRIPTION_IDX].id = status; + + nokia_config_500ma_driver.iConfiguration = status; + nokia_config_100ma_driver.iConfiguration = status; + + /* set up other descriptors */ + gcnum = usb_gadget_controller_number(gadget); + if (gcnum >= 0) + device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM); + else { + /* this should only work with hw that supports altsettings + * and several endpoints, anything else, panic. + */ + pr_err("nokia_bind: controller '%s' not recognized\n", + gadget->name); + goto err_usb; + } + + /* finaly register the configuration */ + status = usb_add_config(cdev, &nokia_config_500ma_driver); + if (status < 0) + goto err_usb; + + status = usb_add_config(cdev, &nokia_config_100ma_driver); + if (status < 0) + goto err_usb; + + dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME); + + return 0; + +err_usb: + gether_cleanup(); +err_ether: + gserial_cleanup(); +err_serial: + gphonet_cleanup(); +err_phonet: + return status; +} + +static int __exit nokia_unbind(struct usb_composite_dev *cdev) +{ + gphonet_cleanup(); + gserial_cleanup(); + gether_cleanup(); + + return 0; +} + +static struct usb_composite_driver nokia_driver = { + .name = "g_nokia", + .dev = &device_desc, + .strings = dev_strings, + .bind = nokia_bind, + .unbind = __exit_p(nokia_unbind), +}; + +static int __init nokia_init(void) +{ + return usb_composite_register(&nokia_driver); +} +module_init(nokia_init); + +static void __exit nokia_cleanup(void) +{ + usb_composite_unregister(&nokia_driver); +} +module_exit(nokia_cleanup); + diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c index 2d867fd2241..6b8bf8c781c 100644 --- a/drivers/usb/gadget/printer.c +++ b/drivers/usb/gadget/printer.c @@ -949,12 +949,6 @@ printer_set_config(struct printer_dev *dev, unsigned number) int result = 0; struct usb_gadget *gadget = dev->gadget; - if (gadget_is_sa1100(gadget) && dev->config) { - /* tx fifo is full, but we can't clear it...*/ - INFO(dev, "can't change configurations\n"); - return -ESPIPE; - } - switch (number) { case DEV_CONFIG_VALUE: result = 0; @@ -1033,12 +1027,6 @@ set_interface(struct printer_dev *dev, unsigned number) { int result = 0; - if (gadget_is_sa1100(dev->gadget) && dev->interface < 0) { - /* tx fifo is full, but we can't clear it...*/ - INFO(dev, "can't change interfaces\n"); - return -ESPIPE; - } - /* Free the current interface */ switch (dev->interface) { case PRINTER_INTERFACE: @@ -1392,12 +1380,6 @@ printer_bind(struct usb_gadget *gadget) goto fail; } - if (gadget_is_sa1100(gadget)) { - /* hardware can't write zero length packets. */ - ERROR(dev, "SA1100 controller is unsupport by this driver\n"); - goto fail; - } - gcnum = usb_gadget_controller_number(gadget); if (gcnum >= 0) { device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum); diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c index adda1208a1e..05b892c3d68 100644 --- a/drivers/usb/gadget/pxa27x_udc.c +++ b/drivers/usb/gadget/pxa27x_udc.c @@ -742,13 +742,17 @@ static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req) * @ep: pxa physical endpoint * @req: pxa request * @status: usb request status sent to gadget API + * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held * - * Context: ep->lock held + * Context: ep->lock held if flags not NULL, else ep->lock released * * Retire a pxa27x usb request. Endpoint must be locked. */ -static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) +static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status, + unsigned long *pflags) { + unsigned long flags; + ep_del_request(ep, req); if (likely(req->req.status == -EINPROGRESS)) req->req.status = status; @@ -760,38 +764,48 @@ static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status) &req->req, status, req->req.actual, req->req.length); + if (pflags) + spin_unlock_irqrestore(&ep->lock, *pflags); + local_irq_save(flags); req->req.complete(&req->udc_usb_ep->usb_ep, &req->req); + local_irq_restore(flags); + if (pflags) + spin_lock_irqsave(&ep->lock, *pflags); } /** * ep_end_out_req - Ends endpoint OUT request * @ep: physical endpoint * @req: pxa request + * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held * - * Context: ep->lock held + * Context: ep->lock held or released (see req_done()) * * Ends endpoint OUT request (completes usb request). */ -static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) +static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, + unsigned long *pflags) { inc_ep_stats_reqs(ep, !USB_DIR_IN); - req_done(ep, req, 0); + req_done(ep, req, 0, pflags); } /** * ep0_end_out_req - Ends control endpoint OUT request (ends data stage) * @ep: physical endpoint * @req: pxa request + * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held * - * Context: ep->lock held + * Context: ep->lock held or released (see req_done()) * * Ends control endpoint OUT request (completes usb request), and puts * control endpoint into idle state */ -static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) +static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, + unsigned long *pflags) { set_ep0state(ep->dev, OUT_STATUS_STAGE); - ep_end_out_req(ep, req); + ep_end_out_req(ep, req, pflags); ep0_idle(ep->dev); } @@ -799,31 +813,35 @@ static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req) * ep_end_in_req - Ends endpoint IN request * @ep: physical endpoint * @req: pxa request + * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held * - * Context: ep->lock held + * Context: ep->lock held or released (see req_done()) * * Ends endpoint IN request (completes usb request). */ -static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) +static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, + unsigned long *pflags) { inc_ep_stats_reqs(ep, USB_DIR_IN); - req_done(ep, req, 0); + req_done(ep, req, 0, pflags); } /** * ep0_end_in_req - Ends control endpoint IN request (ends data stage) * @ep: physical endpoint * @req: pxa request + * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held * - * Context: ep->lock held + * Context: ep->lock held or released (see req_done()) * * Ends control endpoint IN request (completes usb request), and puts * control endpoint into status state */ -static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) +static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, + unsigned long *pflags) { set_ep0state(ep->dev, IN_STATUS_STAGE); - ep_end_in_req(ep, req); + ep_end_in_req(ep, req, pflags); } /** @@ -831,19 +849,22 @@ static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req) * @ep: pxa endpoint * @status: usb request status * - * Context: ep->lock held + * Context: ep->lock released * * Dequeues all requests on an endpoint. As a side effect, interrupts will be * disabled on that endpoint (because no more requests). */ static void nuke(struct pxa_ep *ep, int status) { - struct pxa27x_request *req; + struct pxa27x_request *req; + unsigned long flags; + spin_lock_irqsave(&ep->lock, flags); while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct pxa27x_request, queue); - req_done(ep, req, status); + req_done(ep, req, status, &flags); } + spin_unlock_irqrestore(&ep->lock, flags); } /** @@ -1123,6 +1144,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, int rc = 0; int is_first_req; unsigned length; + int recursion_detected; req = container_of(_req, struct pxa27x_request, req); udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); @@ -1152,6 +1174,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, return -EMSGSIZE; spin_lock_irqsave(&ep->lock, flags); + recursion_detected = ep->in_handle_ep; is_first_req = list_empty(&ep->queue); ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", @@ -1161,12 +1184,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, if (!ep->enabled) { _req->status = -ESHUTDOWN; rc = -ESHUTDOWN; - goto out; + goto out_locked; } if (req->in_use) { ep_err(ep, "refusing to queue req %p (already queued)\n", req); - goto out; + goto out_locked; } length = _req->length; @@ -1174,12 +1197,13 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, _req->actual = 0; ep_add_request(ep, req); + spin_unlock_irqrestore(&ep->lock, flags); if (is_ep0(ep)) { switch (dev->ep0state) { case WAIT_ACK_SET_CONF_INTERF: if (length == 0) { - ep_end_in_req(ep, req); + ep_end_in_req(ep, req, NULL); } else { ep_err(ep, "got a request of %d bytes while" "in state WAIT_ACK_SET_CONF_INTERF\n", @@ -1192,12 +1216,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, case IN_DATA_STAGE: if (!ep_is_full(ep)) if (write_ep0_fifo(ep, req)) - ep0_end_in_req(ep, req); + ep0_end_in_req(ep, req, NULL); break; case OUT_DATA_STAGE: if ((length == 0) || !epout_has_pkt(ep)) if (read_ep0_fifo(ep, req)) - ep0_end_out_req(ep, req); + ep0_end_out_req(ep, req, NULL); break; default: ep_err(ep, "odd state %s to send me a request\n", @@ -1207,12 +1231,15 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req, break; } } else { - handle_ep(ep); + if (!recursion_detected) + handle_ep(ep); } out: - spin_unlock_irqrestore(&ep->lock, flags); return rc; +out_locked: + spin_unlock_irqrestore(&ep->lock, flags); + goto out; } /** @@ -1242,13 +1269,14 @@ static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) /* make sure it's actually queued on this endpoint */ list_for_each_entry(req, &ep->queue, queue) { if (&req->req == _req) { - req_done(ep, req, -ECONNRESET); rc = 0; break; } } spin_unlock_irqrestore(&ep->lock, flags); + if (!rc) + req_done(ep, req, -ECONNRESET, NULL); return rc; } @@ -1445,7 +1473,6 @@ static int pxa_ep_disable(struct usb_ep *_ep) { struct pxa_ep *ep; struct udc_usb_ep *udc_usb_ep; - unsigned long flags; if (!_ep) return -EINVAL; @@ -1455,10 +1482,8 @@ static int pxa_ep_disable(struct usb_ep *_ep) if (!ep || is_ep0(ep) || !list_empty(&ep->queue)) return -EINVAL; - spin_lock_irqsave(&ep->lock, flags); ep->enabled = 0; nuke(ep, -ESHUTDOWN); - spin_unlock_irqrestore(&ep->lock, flags); pxa_ep_fifo_flush(_ep); udc_usb_ep->pxa_ep = NULL; @@ -1907,8 +1932,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc, } u; int i; int have_extrabytes = 0; + unsigned long flags; nuke(ep, -EPROTO); + spin_lock_irqsave(&ep->lock, flags); /* * In the PXA320 manual, in the section about Back-to-Back setup @@ -1947,10 +1974,13 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc, /* Tell UDC to enter Data Stage */ ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC); + spin_unlock_irqrestore(&ep->lock, flags); i = udc->driver->setup(&udc->gadget, &u.r); + spin_lock_irqsave(&ep->lock, flags); if (i < 0) goto stall; out: + spin_unlock_irqrestore(&ep->lock, flags); return; stall: ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n", @@ -2055,13 +2085,13 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) if (req && !ep_is_full(ep)) completed = write_ep0_fifo(ep, req); if (completed) - ep0_end_in_req(ep, req); + ep0_end_in_req(ep, req, NULL); break; case OUT_DATA_STAGE: /* SET_DESCRIPTOR */ if (epout_has_pkt(ep) && req) completed = read_ep0_fifo(ep, req); if (completed) - ep0_end_out_req(ep, req); + ep0_end_out_req(ep, req, NULL); break; case STALL: ep_write_UDCCSR(ep, UDCCSR0_FST); @@ -2091,7 +2121,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq) * Tries to transfer all pending request data into the endpoint and/or * transfer all pending data in the endpoint into usb requests. * - * Is always called when in_interrupt() or with ep->lock held. + * Is always called when in_interrupt() and with ep->lock released. */ static void handle_ep(struct pxa_ep *ep) { @@ -2100,10 +2130,17 @@ static void handle_ep(struct pxa_ep *ep) u32 udccsr; int is_in = ep->dir_in; int loop = 0; + unsigned long flags; + + spin_lock_irqsave(&ep->lock, flags); + if (ep->in_handle_ep) + goto recursion_detected; + ep->in_handle_ep = 1; do { completed = 0; udccsr = udc_ep_readl(ep, UDCCSR); + if (likely(!list_empty(&ep->queue))) req = list_entry(ep->queue.next, struct pxa27x_request, queue); @@ -2122,15 +2159,22 @@ static void handle_ep(struct pxa_ep *ep) if (unlikely(is_in)) { if (likely(!ep_is_full(ep))) completed = write_fifo(ep, req); - if (completed) - ep_end_in_req(ep, req); } else { if (likely(epout_has_pkt(ep))) completed = read_fifo(ep, req); - if (completed) - ep_end_out_req(ep, req); + } + + if (completed) { + if (is_in) + ep_end_in_req(ep, req, &flags); + else + ep_end_out_req(ep, req, &flags); } } while (completed); + + ep->in_handle_ep = 0; +recursion_detected: + spin_unlock_irqrestore(&ep->lock, flags); } /** @@ -2218,9 +2262,13 @@ static void irq_handle_data(int irq, struct pxa_udc *udc) continue; udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK)); - ep = &udc->pxa_ep[i]; - ep->stats.irqs++; - handle_ep(ep); + + WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); + if (i < ARRAY_SIZE(udc->pxa_ep)) { + ep = &udc->pxa_ep[i]; + ep->stats.irqs++; + handle_ep(ep); + } } for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) { @@ -2228,9 +2276,12 @@ static void irq_handle_data(int irq, struct pxa_udc *udc) if (!(udcisr1 & UDCISR_INT_MASK)) continue; - ep = &udc->pxa_ep[i]; - ep->stats.irqs++; - handle_ep(ep); + WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep)); + if (i < ARRAY_SIZE(udc->pxa_ep)) { + ep = &udc->pxa_ep[i]; + ep->stats.irqs++; + handle_ep(ep); + } } } @@ -2439,7 +2490,7 @@ static int __init pxa_udc_probe(struct platform_device *pdev) } retval = -ENOMEM; - udc->regs = ioremap(regs->start, regs->end - regs->start + 1); + udc->regs = ioremap(regs->start, resource_size(regs)); if (!udc->regs) { dev_err(&pdev->dev, "Unable to map UDC I/O memory\n"); goto err_map; diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h index e25225e2658..ff61e4866e8 100644 --- a/drivers/usb/gadget/pxa27x_udc.h +++ b/drivers/usb/gadget/pxa27x_udc.h @@ -318,6 +318,11 @@ struct udc_usb_ep { * @queue: requests queue * @lock: lock to pxa_ep data (queues and stats) * @enabled: true when endpoint enabled (not stopped by gadget layer) + * @in_handle_ep: number of recursions of handle_ep() function + * Prevents deadlocks or infinite recursions of types : + * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep() + * or + * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue() * @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX) * @name: endpoint name (for trace/debug purpose) * @dir_in: 1 if IN endpoint, 0 if OUT endpoint @@ -346,6 +351,7 @@ struct pxa_ep { spinlock_t lock; /* Protects this structure */ /* (queues, stats) */ unsigned enabled:1; + unsigned in_handle_ep:1; unsigned idx:5; char *name; diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 5fc80a10415..7e5bf593d38 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -317,7 +317,8 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg) * * Allocate a new USB request structure appropriate for the specified endpoint */ -struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags) +static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, + gfp_t flags) { struct s3c_hsotg_req *req; @@ -373,7 +374,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg, req->dma = DMA_ADDR_INVALID; hs_req->mapped = 0; } else { - dma_sync_single(hsotg->dev, req->dma, req->length, dir); + dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir); } } @@ -755,7 +756,7 @@ static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg, hs_req->mapped = 1; req->dma = dma; } else { - dma_sync_single(hsotg->dev, req->dma, req->length, dir); + dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir); hs_req->mapped = 0; } @@ -1460,7 +1461,7 @@ static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg) * as the actual data should be sent to the memory directly and we turn * on the completion interrupts to get notifications of transfer completion. */ -void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg) +static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg) { u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP); u32 epnum, status, size; @@ -3094,7 +3095,7 @@ static void s3c_hsotg_gate(struct platform_device *pdev, bool on) local_irq_restore(flags); } -struct s3c_hsotg_plat s3c_hsotg_default_pdata; +static struct s3c_hsotg_plat s3c_hsotg_default_pdata; static int __devinit s3c_hsotg_probe(struct platform_device *pdev) { diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c index 2fc02bd9584..84ca195c2d1 100644 --- a/drivers/usb/gadget/u_ether.c +++ b/drivers/usb/gadget/u_ether.c @@ -746,6 +746,10 @@ static const struct net_device_ops eth_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; +static struct device_type gadget_type = { + .name = "gadget", +}; + /** * gether_setup - initialize one ethernet-over-usb link * @g: gadget to associated with these links @@ -808,6 +812,7 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN]) dev->gadget = g; SET_NETDEV_DEV(net, &g->dev); + SET_NETDEV_DEVTYPE(net, &gadget_type); status = register_netdev(net); if (status < 0) { diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h index fd55f450bc0..3c8c0c9f9d7 100644 --- a/drivers/usb/gadget/u_ether.h +++ b/drivers/usb/gadget/u_ether.h @@ -93,13 +93,6 @@ static inline bool can_support_ecm(struct usb_gadget *gadget) if (!gadget_supports_altsettings(gadget)) return false; - /* SA1100 can do ECM, *without* status endpoint ... but we'll - * only use it in non-ECM mode for backwards compatibility - * (and since we currently require a status endpoint) - */ - if (gadget_is_sa1100(gadget)) - return false; - /* Everything else is *presumably* fine ... but this is a bit * chancy, so be **CERTAIN** there are no hardware issues with * your controller. Add it above if it can't handle CDC. diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c index 2d772401b7a..fac81ee193d 100644 --- a/drivers/usb/gadget/zero.c +++ b/drivers/usb/gadget/zero.c @@ -297,12 +297,10 @@ static int __init zero_bind(struct usb_composite_dev *cdev) */ if (loopdefault) { loopback_add(cdev, autoresume != 0); - if (!gadget_is_sh(gadget)) - sourcesink_add(cdev, autoresume != 0); + sourcesink_add(cdev, autoresume != 0); } else { sourcesink_add(cdev, autoresume != 0); - if (!gadget_is_sh(gadget)) - loopback_add(cdev, autoresume != 0); + loopback_add(cdev, autoresume != 0); } gcnum = usb_gadget_controller_number(gadget); diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig index 2678a1624fc..8d3df0397de 100644 --- a/drivers/usb/host/Kconfig +++ b/drivers/usb/host/Kconfig @@ -399,3 +399,14 @@ config USB_HWA_HCD To compile this driver a module, choose M here: the module will be called "hwa-hc". + +config USB_IMX21_HCD + tristate "iMX21 HCD support" + depends on USB && ARM && MACH_MX21 + help + This driver enables support for the on-chip USB host in the + iMX21 processor. + + To compile this driver as a module, choose M here: the + module will be called "imx21-hcd". + diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile index f58b2494c44..4e0c67f1f51 100644 --- a/drivers/usb/host/Makefile +++ b/drivers/usb/host/Makefile @@ -32,3 +32,5 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o +obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o + diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c index 87c1b7c34c0..51bd0edf544 100644 --- a/drivers/usb/host/ehci-atmel.c +++ b/drivers/usb/host/ehci-atmel.c @@ -149,7 +149,7 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev) goto fail_request_resource; } hcd->rsrc_start = res->start; - hcd->rsrc_len = res->end - res->start + 1; + hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c index dbfb482a94e..e3a74e75e82 100644 --- a/drivers/usb/host/ehci-au1xxx.c +++ b/drivers/usb/host/ehci-au1xxx.c @@ -121,6 +121,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct ehci_hcd *ehci; + struct resource *res; int ret; if (usb_disabled()) @@ -144,8 +145,9 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev) if (!hcd) return -ENOMEM; - hcd->rsrc_start = pdev->resource[0].start; - hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { pr_debug("request_mem_region failed"); diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 991174937db..0e26aa13f15 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2005 MontaVista Software + * Copyright 2005-2009 MontaVista Software, Inc. + * Copyright 2008 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -17,17 +18,20 @@ * * Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided * by Hunter Wu. + * Power Management support by Dave Liu <daveliu@freescale.com>, + * Jerry Huang <Chang-Ming.Huang@freescale.com> and + * Anton Vorontsov <avorontsov@ru.mvista.com>. */ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/pm.h> #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include "ehci-fsl.h" -/* FIXME: Power Management is un-ported so temporarily disable it */ -#undef CONFIG_PM - - /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ @@ -40,8 +44,8 @@ * Allocates basic resources for this USB host controller. * */ -int usb_hcd_fsl_probe(const struct hc_driver *driver, - struct platform_device *pdev) +static int usb_hcd_fsl_probe(const struct hc_driver *driver, + struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata; struct usb_hcd *hcd; @@ -147,7 +151,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver, * Reverses the effect of usb_hcd_fsl_probe(). * */ -void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev) +static void usb_hcd_fsl_remove(struct usb_hcd *hcd, + struct platform_device *pdev) { usb_remove_hcd(hcd); iounmap(hcd->regs); @@ -284,10 +289,81 @@ static int ehci_fsl_setup(struct usb_hcd *hcd) return retval; } +struct ehci_fsl { + struct ehci_hcd ehci; + +#ifdef CONFIG_PM + /* Saved USB PHY settings, need to restore after deep sleep. */ + u32 usb_ctrl; +#endif +}; + +#ifdef CONFIG_PM + +static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + return container_of(ehci, struct ehci_fsl, ehci); +} + +static int ehci_fsl_drv_suspend(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + void __iomem *non_ehci = hcd->regs; + + if (!fsl_deep_sleep()) + return 0; + + ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL); + return 0; +} + +static int ehci_fsl_drv_resume(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + void __iomem *non_ehci = hcd->regs; + + if (!fsl_deep_sleep()) + return 0; + + usb_root_hub_lost_power(hcd->self.root_hub); + + /* Restore USB PHY settings and enable the controller. */ + out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl); + + ehci_reset(ehci); + ehci_fsl_reinit(ehci); + + return 0; +} + +static int ehci_fsl_drv_restore(struct device *dev) +{ + struct usb_hcd *hcd = dev_get_drvdata(dev); + + usb_root_hub_lost_power(hcd->self.root_hub); + return 0; +} + +static struct dev_pm_ops ehci_fsl_pm_ops = { + .suspend = ehci_fsl_drv_suspend, + .resume = ehci_fsl_drv_resume, + .restore = ehci_fsl_drv_restore, +}; + +#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops) +#else +#define EHCI_FSL_PM_OPS NULL +#endif /* CONFIG_PM */ + static const struct hc_driver ehci_fsl_hc_driver = { .description = hcd_name, .product_desc = "Freescale On-Chip EHCI Host Controller", - .hcd_priv_size = sizeof(struct ehci_hcd), + .hcd_priv_size = sizeof(struct ehci_fsl), /* * generic hardware linkage @@ -354,6 +430,7 @@ static struct platform_driver ehci_fsl_driver = { .remove = ehci_fsl_drv_remove, .shutdown = usb_hcd_platform_shutdown, .driver = { - .name = "fsl-ehci", + .name = "fsl-ehci", + .pm = EHCI_FSL_PM_OPS, }, }; diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c index 35c56f40bdb..23cd917088b 100644 --- a/drivers/usb/host/ehci-mxc.c +++ b/drivers/usb/host/ehci-mxc.c @@ -162,6 +162,17 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) goto err_ioremap; } + /* call platform specific init function */ + if (pdata->init) { + ret = pdata->init(pdev); + if (ret) { + dev_err(dev, "platform init failed\n"); + goto err_init; + } + /* platforms need some time to settle changed IO settings */ + mdelay(10); + } + /* enable clocks */ priv->usbclk = clk_get(dev, "usb"); if (IS_ERR(priv->usbclk)) { @@ -192,18 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev) if (ret < 0) goto err_init; - /* call platform specific init function */ - if (pdata->init) { - ret = pdata->init(pdev); - if (ret) { - dev_err(dev, "platform init failed\n"); - goto err_init; - } - } - - /* most platforms need some time to settle changed IO settings */ - mdelay(10); - /* Initialize the transceiver */ if (pdata->otg) { pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET; diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index 74d07f4e8b7..f0282d6bb7a 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -26,10 +26,9 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * - * TODO (last updated Feb 23rd, 2009): + * TODO (last updated Feb 12, 2010): * - add kernel-doc * - enable AUTOIDLE - * - move DPLL5 programming to clock fw * - add suspend/resume * - move workarounds to board-files */ @@ -37,6 +36,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/gpio.h> +#include <linux/regulator/consumer.h> #include <plat/usb.h> /* @@ -178,6 +178,11 @@ struct ehci_hcd_omap { void __iomem *uhh_base; void __iomem *tll_base; void __iomem *ehci_base; + + /* Regulators for USB PHYs. + * Each PHY can have a seperate regulator. + */ + struct regulator *regulator[OMAP3_HS_USB_PORTS]; }; /*-------------------------------------------------------------------------*/ @@ -546,6 +551,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) int irq = platform_get_irq(pdev, 0); int ret = -ENODEV; + int i; + char supply[7]; if (!pdata) { dev_dbg(&pdev->dev, "missing platform_data\n"); @@ -613,6 +620,21 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) goto err_tll_ioremap; } + /* get ehci regulator and enable */ + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) { + omap->regulator[i] = NULL; + continue; + } + snprintf(supply, sizeof(supply), "hsusb%d", i); + omap->regulator[i] = regulator_get(omap->dev, supply); + if (IS_ERR(omap->regulator[i])) + dev_dbg(&pdev->dev, + "failed to get ehci port%d regulator\n", i); + else + regulator_enable(omap->regulator[i]); + } + ret = omap_start_ehc(omap, hcd); if (ret) { dev_dbg(&pdev->dev, "failed to start ehci\n"); @@ -622,13 +644,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) omap->ehci->regs = hcd->regs + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase)); + dbg_hcs_params(omap->ehci, "reset"); + dbg_hcc_params(omap->ehci, "reset"); + /* cache this readonly data; minimize chip reads */ omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params); - /* SET 1 micro-frame Interrupt interval */ - writel(readl(&omap->ehci->regs->command) | (1 << 16), - &omap->ehci->regs->command); - ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED); if (ret) { dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret); @@ -641,6 +662,12 @@ err_add_hcd: omap_stop_ehc(omap, hcd); err_start: + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (omap->regulator[i]) { + regulator_disable(omap->regulator[i]); + regulator_put(omap->regulator[i]); + } + } iounmap(omap->tll_base); err_tll_ioremap: @@ -674,13 +701,21 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) { struct ehci_hcd_omap *omap = platform_get_drvdata(pdev); struct usb_hcd *hcd = ehci_to_hcd(omap->ehci); + int i; usb_remove_hcd(hcd); omap_stop_ehc(omap, hcd); iounmap(hcd->regs); + for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) { + if (omap->regulator[i]) { + regulator_disable(omap->regulator[i]); + regulator_put(omap->regulator[i]); + } + } iounmap(omap->tll_base); iounmap(omap->uhh_base); usb_put_hcd(hcd); + kfree(omap); return 0; } diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c index 1d283e1b2b8..0f87dc72820 100644 --- a/drivers/usb/host/ehci-orion.c +++ b/drivers/usb/host/ehci-orion.c @@ -222,14 +222,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev) goto err1; } - if (!request_mem_region(res->start, res->end - res->start + 1, + if (!request_mem_region(res->start, resource_size(res), ehci_orion_hc_driver.description)) { dev_dbg(&pdev->dev, "controller already in use\n"); err = -EBUSY; goto err1; } - regs = ioremap(res->start, res->end - res->start + 1); + regs = ioremap(res->start, resource_size(res)); if (regs == NULL) { dev_dbg(&pdev->dev, "error mapping memory\n"); err = -EFAULT; @@ -244,7 +244,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev) } hcd->rsrc_start = res->start; - hcd->rsrc_len = res->end - res->start + 1; + hcd->rsrc_len = resource_size(res); hcd->regs = regs; ehci = hcd_to_ehci(hcd); @@ -287,7 +287,7 @@ err4: err3: iounmap(regs); err2: - release_mem_region(res->start, res->end - res->start + 1); + release_mem_region(res->start, resource_size(res)); err1: dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), err); diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c index 36f96da129f..8df33b8a634 100644 --- a/drivers/usb/host/ehci-ppc-of.c +++ b/drivers/usb/host/ehci-ppc-of.c @@ -134,21 +134,21 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) hcd->rsrc_len = res.end - res.start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); + printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { - printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); + printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { - printk(KERN_ERR __FILE__ ": ioremap failed\n"); + printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } @@ -161,9 +161,9 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) ehci->ohci_hcctrl_reg = ioremap(res.start + OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN); else - pr_debug(__FILE__ ": no ohci offset in fdt\n"); + pr_debug("%s: no ohci offset in fdt\n", __FILE__); if (!ehci->ohci_hcctrl_reg) { - pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n"); + pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__); } else { ehci->has_amcc_usb23 = 1; } @@ -241,7 +241,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op) else release_mem_region(res.start, 0x4); else - pr_debug(__FILE__ ": no ohci offset in fdt\n"); + pr_debug("%s: no ohci offset in fdt\n", __FILE__); of_node_put(np); } @@ -264,7 +264,7 @@ static int ehci_hcd_ppc_of_shutdown(struct of_device *op) } -static struct of_device_id ehci_hcd_ppc_of_match[] = { +static const struct of_device_id ehci_hcd_ppc_of_match[] = { { .compatible = "usb-ehci", }, diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 1e391e624c8..39340ae00ac 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -510,6 +510,8 @@ static int disable_periodic (struct ehci_hcd *ehci) ehci_writel(ehci, cmd, &ehci->regs->command); /* posted write ... */ + free_cached_itd_list(ehci); + ehci->next_uframe = -1; return 0; } @@ -2322,9 +2324,13 @@ restart: * No need to check for activity unless the * frame is current. */ - if (frame == clock_frame && live && - (q.sitd->hw_results & - SITD_ACTIVE(ehci))) { + if (((frame == clock_frame) || + (((frame + 1) % ehci->periodic_size) + == clock_frame)) + && live + && (q.sitd->hw_results & + SITD_ACTIVE(ehci))) { + incomplete = true; q_p = &q.sitd->sitd_next; hw_p = &q.sitd->hw_next; diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index a5861531ad3..f603bb2c0a8 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c @@ -177,21 +177,21 @@ ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match) hcd->rsrc_len = res.end - res.start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); + printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { - printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); + printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { - printk(KERN_ERR __FILE__ ": ioremap failed\n"); + printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } @@ -281,7 +281,7 @@ static int ehci_hcd_xilinx_of_shutdown(struct of_device *op) } -static struct of_device_id ehci_hcd_xilinx_of_match[] = { +static const struct of_device_id ehci_hcd_xilinx_of_match[] = { {.compatible = "xlnx,xps-usb-host-1.00.a",}, {}, }; diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c index 78e7c3cfcb7..5dcfb3de994 100644 --- a/drivers/usb/host/fhci-hcd.c +++ b/drivers/usb/host/fhci-hcd.c @@ -433,7 +433,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, return -ENOMEM; /* allocate the private part of the URB */ - urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags); + urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags); if (!urb_priv->tds) { kfree(urb_priv); return -ENOMEM; @@ -805,7 +805,7 @@ static int __devexit of_fhci_remove(struct of_device *ofdev) return fhci_remove(&ofdev->dev); } -static struct of_device_id of_fhci_match[] = { +static const struct of_device_id of_fhci_match[] = { { .compatible = "fsl,mpc8323-qe-usb", }, {}, }; diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c new file mode 100644 index 00000000000..512f647448c --- /dev/null +++ b/drivers/usb/host/imx21-dbg.c @@ -0,0 +1,527 @@ +/* + * Copyright (c) 2009 by Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of imx21-hcd.c */ + +#ifndef DEBUG + +static inline void create_debug_files(struct imx21 *imx21) { } +static inline void remove_debug_files(struct imx21 *imx21) { } +static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {} +static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb, + int status) {} +static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {} +static inline void debug_urb_queued_for_etd(struct imx21 *imx21, + struct urb *urb) {} +static inline void debug_urb_queued_for_dmem(struct imx21 *imx21, + struct urb *urb) {} +static inline void debug_etd_allocated(struct imx21 *imx21) {} +static inline void debug_etd_freed(struct imx21 *imx21) {} +static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {} +static inline void debug_dmem_freed(struct imx21 *imx21, int size) {} +static inline void debug_isoc_submitted(struct imx21 *imx21, + int frame, struct td *td) {} +static inline void debug_isoc_completed(struct imx21 *imx21, + int frame, struct td *td, int cc, int len) {} + +#else + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +static const char *dir_labels[] = { + "TD 0", + "OUT", + "IN", + "TD 1" +}; + +static const char *speed_labels[] = { + "Full", + "Low" +}; + +static const char *format_labels[] = { + "Control", + "ISO", + "Bulk", + "Interrupt" +}; + +static inline struct debug_stats *stats_for_urb(struct imx21 *imx21, + struct urb *urb) +{ + return usb_pipeisoc(urb->pipe) ? + &imx21->isoc_stats : &imx21->nonisoc_stats; +} + +static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->submitted++; +} + +static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st) +{ + if (st) + stats_for_urb(imx21, urb)->completed_failed++; + else + stats_for_urb(imx21, urb)->completed_ok++; +} + +static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->unlinked++; +} + +static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->queue_etd++; +} + +static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb) +{ + stats_for_urb(imx21, urb)->queue_dmem++; +} + +static inline void debug_etd_allocated(struct imx21 *imx21) +{ + imx21->etd_usage.maximum = max( + ++(imx21->etd_usage.value), + imx21->etd_usage.maximum); +} + +static inline void debug_etd_freed(struct imx21 *imx21) +{ + imx21->etd_usage.value--; +} + +static inline void debug_dmem_allocated(struct imx21 *imx21, int size) +{ + imx21->dmem_usage.value += size; + imx21->dmem_usage.maximum = max( + imx21->dmem_usage.value, + imx21->dmem_usage.maximum); +} + +static inline void debug_dmem_freed(struct imx21 *imx21, int size) +{ + imx21->dmem_usage.value -= size; +} + + +static void debug_isoc_submitted(struct imx21 *imx21, + int frame, struct td *td) +{ + struct debug_isoc_trace *trace = &imx21->isoc_trace[ + imx21->isoc_trace_index++]; + + imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace); + trace->schedule_frame = td->frame; + trace->submit_frame = frame; + trace->request_len = td->len; + trace->td = td; +} + +static inline void debug_isoc_completed(struct imx21 *imx21, + int frame, struct td *td, int cc, int len) +{ + struct debug_isoc_trace *trace, *trace_failed; + int i; + int found = 0; + + trace = imx21->isoc_trace; + for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) { + if (trace->td == td) { + trace->done_frame = frame; + trace->done_len = len; + trace->cc = cc; + trace->td = NULL; + found = 1; + break; + } + } + + if (found && cc) { + trace_failed = &imx21->isoc_trace_failed[ + imx21->isoc_trace_index_failed++]; + + imx21->isoc_trace_index_failed %= ARRAY_SIZE( + imx21->isoc_trace_failed); + *trace_failed = *trace; + } +} + + +static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize) +{ + if (ep) + snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)", + ep->desc.bEndpointAddress, + usb_endpoint_type(&ep->desc), + ep); + else + snprintf(buf, bufsize, "none"); + return buf; +} + +static char *format_etd_dword0(u32 value, char *buf, int bufsize) +{ + snprintf(buf, bufsize, + "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d", + value & 0x7F, + (value >> DW0_ENDPNT) & 0x0F, + dir_labels[(value >> DW0_DIRECT) & 0x03], + speed_labels[(value >> DW0_SPEED) & 0x01], + format_labels[(value >> DW0_FORMAT) & 0x03], + (value >> DW0_HALTED) & 0x01); + return buf; +} + +static int debug_status_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + int etds_allocated = 0; + int etds_sw_busy = 0; + int etds_hw_busy = 0; + int dmem_blocks = 0; + int queued_for_etd = 0; + int queued_for_dmem = 0; + unsigned int dmem_bytes = 0; + int i; + struct etd_priv *etd; + u32 etd_enable_mask; + unsigned long flags; + struct imx21_dmem_area *dmem; + struct ep_priv *ep_priv; + + spin_lock_irqsave(&imx21->lock, flags); + + etd_enable_mask = readl(imx21->regs + USBH_ETDENSET); + for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { + if (etd->alloc) + etds_allocated++; + if (etd->urb) + etds_sw_busy++; + if (etd_enable_mask & (1<<i)) + etds_hw_busy++; + } + + list_for_each_entry(dmem, &imx21->dmem_list, list) { + dmem_bytes += dmem->size; + dmem_blocks++; + } + + list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue) + queued_for_etd++; + + list_for_each_entry(etd, &imx21->queue_for_dmem, queue) + queued_for_dmem++; + + spin_unlock_irqrestore(&imx21->lock, flags); + + seq_printf(s, + "Frame: %d\n" + "ETDs allocated: %d/%d (max=%d)\n" + "ETDs in use sw: %d\n" + "ETDs in use hw: %d\n" + "DMEM alocated: %d/%d (max=%d)\n" + "DMEM blocks: %d\n" + "Queued waiting for ETD: %d\n" + "Queued waiting for DMEM: %d\n", + readl(imx21->regs + USBH_FRMNUB) & 0xFFFF, + etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum, + etds_sw_busy, + etds_hw_busy, + dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum, + dmem_blocks, + queued_for_etd, + queued_for_dmem); + + return 0; +} + +static int debug_dmem_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + struct imx21_dmem_area *dmem; + unsigned long flags; + char ep_text[40]; + + spin_lock_irqsave(&imx21->lock, flags); + + list_for_each_entry(dmem, &imx21->dmem_list, list) + seq_printf(s, + "%04X: size=0x%X " + "ep=%s\n", + dmem->offset, dmem->size, + format_ep(dmem->ep, ep_text, sizeof(ep_text))); + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static int debug_etd_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + struct etd_priv *etd; + char buf[60]; + u32 dword; + int i, j; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) { + int state = -1; + struct urb_priv *urb_priv; + if (etd->urb) { + urb_priv = etd->urb->hcpriv; + if (urb_priv) + state = urb_priv->state; + } + + seq_printf(s, + "etd_num: %d\n" + "ep: %s\n" + "alloc: %d\n" + "len: %d\n" + "busy sw: %d\n" + "busy hw: %d\n" + "urb state: %d\n" + "current urb: %p\n", + + i, + format_ep(etd->ep, buf, sizeof(buf)), + etd->alloc, + etd->len, + etd->urb != NULL, + (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0, + state, + etd->urb); + + for (j = 0; j < 4; j++) { + dword = etd_readl(imx21, i, j); + switch (j) { + case 0: + format_etd_dword0(dword, buf, sizeof(buf)); + break; + case 2: + snprintf(buf, sizeof(buf), + "cc=0X%02X", dword >> DW2_COMPCODE); + break; + default: + *buf = 0; + break; + } + seq_printf(s, + "dword %d: submitted=%08X cur=%08X [%s]\n", + j, + etd->submitted_dwords[j], + dword, + buf); + } + seq_printf(s, "\n"); + } + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static void debug_statistics_show_one(struct seq_file *s, + const char *name, struct debug_stats *stats) +{ + seq_printf(s, "%s:\n" + "submitted URBs: %lu\n" + "completed OK: %lu\n" + "completed failed: %lu\n" + "unlinked: %lu\n" + "queued for ETD: %lu\n" + "queued for DMEM: %lu\n\n", + name, + stats->submitted, + stats->completed_ok, + stats->completed_failed, + stats->unlinked, + stats->queue_etd, + stats->queue_dmem); +} + +static int debug_statistics_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats); + debug_statistics_show_one(s, "isoc", &imx21->isoc_stats); + seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks); + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static void debug_isoc_show_one(struct seq_file *s, + const char *name, int index, struct debug_isoc_trace *trace) +{ + seq_printf(s, "%s %d:\n" + "cc=0X%02X\n" + "scheduled frame %d (%d)\n" + "submittted frame %d (%d)\n" + "completed frame %d (%d)\n" + "requested length=%d\n" + "completed length=%d\n\n", + name, index, + trace->cc, + trace->schedule_frame, trace->schedule_frame & 0xFFFF, + trace->submit_frame, trace->submit_frame & 0xFFFF, + trace->done_frame, trace->done_frame & 0xFFFF, + trace->request_len, + trace->done_len); +} + +static int debug_isoc_show(struct seq_file *s, void *v) +{ + struct imx21 *imx21 = s->private; + struct debug_isoc_trace *trace; + unsigned long flags; + int i; + + spin_lock_irqsave(&imx21->lock, flags); + + trace = imx21->isoc_trace_failed; + for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++) + debug_isoc_show_one(s, "isoc failed", i, trace); + + trace = imx21->isoc_trace; + for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) + debug_isoc_show_one(s, "isoc", i, trace); + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static int debug_status_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_status_show, inode->i_private); +} + +static int debug_dmem_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_dmem_show, inode->i_private); +} + +static int debug_etd_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_etd_show, inode->i_private); +} + +static int debug_statistics_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_statistics_show, inode->i_private); +} + +static int debug_isoc_open(struct inode *inode, struct file *file) +{ + return single_open(file, debug_isoc_show, inode->i_private); +} + +static const struct file_operations debug_status_fops = { + .open = debug_status_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_dmem_fops = { + .open = debug_dmem_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_etd_fops = { + .open = debug_etd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_statistics_fops = { + .open = debug_statistics_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debug_isoc_fops = { + .open = debug_isoc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void create_debug_files(struct imx21 *imx21) +{ + imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL); + if (!imx21->debug_root) + goto failed_create_rootdir; + + if (!debugfs_create_file("status", S_IRUGO, + imx21->debug_root, imx21, &debug_status_fops)) + goto failed_create; + + if (!debugfs_create_file("dmem", S_IRUGO, + imx21->debug_root, imx21, &debug_dmem_fops)) + goto failed_create; + + if (!debugfs_create_file("etd", S_IRUGO, + imx21->debug_root, imx21, &debug_etd_fops)) + goto failed_create; + + if (!debugfs_create_file("statistics", S_IRUGO, + imx21->debug_root, imx21, &debug_statistics_fops)) + goto failed_create; + + if (!debugfs_create_file("isoc", S_IRUGO, + imx21->debug_root, imx21, &debug_isoc_fops)) + goto failed_create; + + return; + +failed_create: + debugfs_remove_recursive(imx21->debug_root); + +failed_create_rootdir: + imx21->debug_root = NULL; +} + + +static void remove_debug_files(struct imx21 *imx21) +{ + if (imx21->debug_root) { + debugfs_remove_recursive(imx21->debug_root); + imx21->debug_root = NULL; + } +} + +#endif + diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c new file mode 100644 index 00000000000..213e270e1c2 --- /dev/null +++ b/drivers/usb/host/imx21-hcd.c @@ -0,0 +1,1789 @@ +/* + * USB Host Controller Driver for IMX21 + * + * Copyright (C) 2006 Loping Dog Embedded Systems + * Copyright (C) 2009 Martin Fuzzey + * Originally written by Jay Monkman <jtm@lopingdog.com> + * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + + + /* + * The i.MX21 USB hardware contains + * * 32 transfer descriptors (called ETDs) + * * 4Kb of Data memory + * + * The data memory is shared between the host and fuction controlers + * (but this driver only supports the host controler) + * + * So setting up a transfer involves: + * * Allocating a ETD + * * Fill in ETD with appropriate information + * * Allocating data memory (and putting the offset in the ETD) + * * Activate the ETD + * * Get interrupt when done. + * + * An ETD is assigned to each active endpoint. + * + * Low resource (ETD and Data memory) situations are handled differently for + * isochronous and non insosynchronous transactions : + * + * Non ISOC transfers are queued if either ETDs or Data memory are unavailable + * + * ISOC transfers use 2 ETDs per endpoint to achieve double buffering. + * They allocate both ETDs and Data memory during URB submission + * (and fail if unavailable). + */ + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/usb.h> + +#include "../core/hcd.h" +#include "imx21-hcd.h" + +#ifdef DEBUG +#define DEBUG_LOG_FRAME(imx21, etd, event) \ + (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB) +#else +#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0) +#endif + +static const char hcd_name[] = "imx21-hcd"; + +static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd) +{ + return (struct imx21 *)hcd->hcd_priv; +} + + +/* =========================================== */ +/* Hardware access helpers */ +/* =========================================== */ + +static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + writel(readl(reg) | mask, reg); +} + +static inline void clear_register_bits(struct imx21 *imx21, + u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + writel(readl(reg) & ~mask, reg); +} + +static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + + if (readl(reg) & mask) + writel(mask, reg); +} + +static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask) +{ + void __iomem *reg = imx21->regs + offset; + + if (!(readl(reg) & mask)) + writel(mask, reg); +} + +static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value) +{ + writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword)); +} + +static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword) +{ + return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword)); +} + +static inline int wrap_frame(int counter) +{ + return counter & 0xFFFF; +} + +static inline int frame_after(int frame, int after) +{ + /* handle wrapping like jiffies time_afer */ + return (s16)((s16)after - (s16)frame) < 0; +} + +static int imx21_hc_get_frame(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + + return wrap_frame(readl(imx21->regs + USBH_FRMNUB)); +} + + +#include "imx21-dbg.c" + +/* =========================================== */ +/* ETD management */ +/* =========================================== */ + +static int alloc_etd(struct imx21 *imx21) +{ + int i; + struct etd_priv *etd = imx21->etd; + + for (i = 0; i < USB_NUM_ETD; i++, etd++) { + if (etd->alloc == 0) { + memset(etd, 0, sizeof(imx21->etd[0])); + etd->alloc = 1; + debug_etd_allocated(imx21); + return i; + } + } + return -1; +} + +static void disactivate_etd(struct imx21 *imx21, int num) +{ + int etd_mask = (1 << num); + struct etd_priv *etd = &imx21->etd[num]; + + writel(etd_mask, imx21->regs + USBH_ETDENCLR); + clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask); + writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR); + clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); + + etd->active_count = 0; + + DEBUG_LOG_FRAME(imx21, etd, disactivated); +} + +static void reset_etd(struct imx21 *imx21, int num) +{ + struct etd_priv *etd = imx21->etd + num; + int i; + + disactivate_etd(imx21, num); + + for (i = 0; i < 4; i++) + etd_writel(imx21, num, i, 0); + etd->urb = NULL; + etd->ep = NULL; + etd->td = NULL;; +} + +static void free_etd(struct imx21 *imx21, int num) +{ + if (num < 0) + return; + + if (num >= USB_NUM_ETD) { + dev_err(imx21->dev, "BAD etd=%d!\n", num); + return; + } + if (imx21->etd[num].alloc == 0) { + dev_err(imx21->dev, "ETD %d already free!\n", num); + return; + } + + debug_etd_freed(imx21); + reset_etd(imx21, num); + memset(&imx21->etd[num], 0, sizeof(imx21->etd[0])); +} + + +static void setup_etd_dword0(struct imx21 *imx21, + int etd_num, struct urb *urb, u8 dir, u16 maxpacket) +{ + etd_writel(imx21, etd_num, 0, + ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS | + ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) | + ((u32) dir << DW0_DIRECT) | + ((u32) ((urb->dev->speed == USB_SPEED_LOW) ? + 1 : 0) << DW0_SPEED) | + ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) | + ((u32) maxpacket << DW0_MAXPKTSIZ)); +} + +static void activate_etd(struct imx21 *imx21, + int etd_num, dma_addr_t dma, u8 dir) +{ + u32 etd_mask = 1 << etd_num; + struct etd_priv *etd = &imx21->etd[etd_num]; + + clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask); + set_register_bits(imx21, USBH_ETDDONEEN, etd_mask); + clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); + + if (dma) { + set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask); + clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask); + clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask); + writel(dma, imx21->regs + USB_ETDSMSA(etd_num)); + set_register_bits(imx21, USB_ETDDMAEN, etd_mask); + } else { + if (dir != TD_DIR_IN) { + /* need to set for ZLP */ + set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); + } + } + + DEBUG_LOG_FRAME(imx21, etd, activated); + +#ifdef DEBUG + if (!etd->active_count) { + int i; + etd->activated_frame = readl(imx21->regs + USBH_FRMNUB); + etd->disactivated_frame = -1; + etd->last_int_frame = -1; + etd->last_req_frame = -1; + + for (i = 0; i < 4; i++) + etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i); + } +#endif + + etd->active_count = 1; + writel(etd_mask, imx21->regs + USBH_ETDENSET); +} + +/* =========================================== */ +/* Data memory management */ +/* =========================================== */ + +static int alloc_dmem(struct imx21 *imx21, unsigned int size, + struct usb_host_endpoint *ep) +{ + unsigned int offset = 0; + struct imx21_dmem_area *area; + struct imx21_dmem_area *tmp; + + size += (~size + 1) & 0x3; /* Round to 4 byte multiple */ + + if (size > DMEM_SIZE) { + dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n", + size, DMEM_SIZE); + return -EINVAL; + } + + list_for_each_entry(tmp, &imx21->dmem_list, list) { + if ((size + offset) < offset) + goto fail; + if ((size + offset) <= tmp->offset) + break; + offset = tmp->size + tmp->offset; + if ((offset + size) > DMEM_SIZE) + goto fail; + } + + area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC); + if (area == NULL) + return -ENOMEM; + + area->ep = ep; + area->offset = offset; + area->size = size; + list_add_tail(&area->list, &tmp->list); + debug_dmem_allocated(imx21, size); + return offset; + +fail: + return -ENOMEM; +} + +/* Memory now available for a queued ETD - activate it */ +static void activate_queued_etd(struct imx21 *imx21, + struct etd_priv *etd, u32 dmem_offset) +{ + struct urb_priv *urb_priv = etd->urb->hcpriv; + int etd_num = etd - &imx21->etd[0]; + u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD; + u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03; + + dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n", + etd_num); + etd_writel(imx21, etd_num, 1, + ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset); + + urb_priv->active = 1; + activate_etd(imx21, etd_num, etd->dma_handle, dir); +} + +static void free_dmem(struct imx21 *imx21, int offset) +{ + struct imx21_dmem_area *area; + struct etd_priv *etd, *tmp; + int found = 0; + + list_for_each_entry(area, &imx21->dmem_list, list) { + if (area->offset == offset) { + debug_dmem_freed(imx21, area->size); + list_del(&area->list); + kfree(area); + found = 1; + break; + } + } + + if (!found) { + dev_err(imx21->dev, + "Trying to free unallocated DMEM %d\n", offset); + return; + } + + /* Try again to allocate memory for anything we've queued */ + list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) { + offset = alloc_dmem(imx21, etd->dmem_size, etd->ep); + if (offset >= 0) { + list_del(&etd->queue); + activate_queued_etd(imx21, etd, (u32)offset); + } + } +} + +static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep) +{ + struct imx21_dmem_area *area, *tmp; + + list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) { + if (area->ep == ep) { + dev_err(imx21->dev, + "Active DMEM %d for disabled ep=%p\n", + area->offset, ep); + list_del(&area->list); + kfree(area); + } + } +} + + +/* =========================================== */ +/* End handling */ +/* =========================================== */ +static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb); + +/* Endpoint now idle - release it's ETD(s) or asssign to queued request */ +static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv) +{ + int etd_num; + int i; + + for (i = 0; i < NUM_ISO_ETDS; i++) { + etd_num = ep_priv->etd[i]; + if (etd_num < 0) + continue; + + ep_priv->etd[i] = -1; + if (list_empty(&imx21->queue_for_etd)) { + free_etd(imx21, etd_num); + continue; + } + + dev_dbg(imx21->dev, + "assigning idle etd %d for queued request\n", etd_num); + ep_priv = list_first_entry(&imx21->queue_for_etd, + struct ep_priv, queue); + list_del(&ep_priv->queue); + reset_etd(imx21, etd_num); + ep_priv->waiting_etd = 0; + ep_priv->etd[i] = etd_num; + + if (list_empty(&ep_priv->ep->urb_list)) { + dev_err(imx21->dev, "No urb for queued ep!\n"); + continue; + } + schedule_nonisoc_etd(imx21, list_first_entry( + &ep_priv->ep->urb_list, struct urb, urb_list)); + } +} + +static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status) +__releases(imx21->lock) +__acquires(imx21->lock) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct ep_priv *ep_priv = urb->ep->hcpriv; + struct urb_priv *urb_priv = urb->hcpriv; + + debug_urb_completed(imx21, urb, status); + dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status); + + kfree(urb_priv->isoc_td); + kfree(urb->hcpriv); + urb->hcpriv = NULL; + usb_hcd_unlink_urb_from_ep(hcd, urb); + spin_unlock(&imx21->lock); + usb_hcd_giveback_urb(hcd, urb, status); + spin_lock(&imx21->lock); + if (list_empty(&ep_priv->ep->urb_list)) + ep_idle(imx21, ep_priv); +} + +/* =========================================== */ +/* ISOC Handling ... */ +/* =========================================== */ + +static void schedule_isoc_etds(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct ep_priv *ep_priv = ep->hcpriv; + struct etd_priv *etd; + struct urb_priv *urb_priv; + struct td *td; + int etd_num; + int i; + int cur_frame; + u8 dir; + + for (i = 0; i < NUM_ISO_ETDS; i++) { +too_late: + if (list_empty(&ep_priv->td_list)) + break; + + etd_num = ep_priv->etd[i]; + if (etd_num < 0) + break; + + etd = &imx21->etd[etd_num]; + if (etd->urb) + continue; + + td = list_entry(ep_priv->td_list.next, struct td, list); + list_del(&td->list); + urb_priv = td->urb->hcpriv; + + cur_frame = imx21_hc_get_frame(hcd); + if (frame_after(cur_frame, td->frame)) { + dev_dbg(imx21->dev, "isoc too late frame %d > %d\n", + cur_frame, td->frame); + urb_priv->isoc_status = -EXDEV; + td->urb->iso_frame_desc[ + td->isoc_index].actual_length = 0; + td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV; + if (--urb_priv->isoc_remaining == 0) + urb_done(hcd, td->urb, urb_priv->isoc_status); + goto too_late; + } + + urb_priv->active = 1; + etd->td = td; + etd->ep = td->ep; + etd->urb = td->urb; + etd->len = td->len; + + debug_isoc_submitted(imx21, cur_frame, td); + + dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN; + setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size); + etd_writel(imx21, etd_num, 1, etd->dmem_offset); + etd_writel(imx21, etd_num, 2, + (TD_NOTACCESSED << DW2_COMPCODE) | + ((td->frame & 0xFFFF) << DW2_STARTFRM)); + etd_writel(imx21, etd_num, 3, + (TD_NOTACCESSED << DW3_COMPCODE0) | + (td->len << DW3_PKTLEN0)); + + activate_etd(imx21, etd_num, td->data, dir); + } +} + +static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + int etd_mask = 1 << etd_num; + struct urb_priv *urb_priv = urb->hcpriv; + struct etd_priv *etd = imx21->etd + etd_num; + struct td *td = etd->td; + struct usb_host_endpoint *ep = etd->ep; + int isoc_index = td->isoc_index; + unsigned int pipe = urb->pipe; + int dir_in = usb_pipein(pipe); + int cc; + int bytes_xfrd; + + disactivate_etd(imx21, etd_num); + + cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf; + bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff; + + /* Input doesn't always fill the buffer, don't generate an error + * when this happens. + */ + if (dir_in && (cc == TD_DATAUNDERRUN)) + cc = TD_CC_NOERROR; + + if (cc == TD_NOTACCESSED) + bytes_xfrd = 0; + + debug_isoc_completed(imx21, + imx21_hc_get_frame(hcd), td, cc, bytes_xfrd); + if (cc) { + urb_priv->isoc_status = -EXDEV; + dev_dbg(imx21->dev, + "bad iso cc=0x%X frame=%d sched frame=%d " + "cnt=%d len=%d urb=%p etd=%d index=%d\n", + cc, imx21_hc_get_frame(hcd), td->frame, + bytes_xfrd, td->len, urb, etd_num, isoc_index); + } + + if (dir_in) + clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + + urb->actual_length += bytes_xfrd; + urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd; + urb->iso_frame_desc[isoc_index].status = cc_to_error[cc]; + + etd->td = NULL; + etd->urb = NULL; + etd->ep = NULL; + + if (--urb_priv->isoc_remaining == 0) + urb_done(hcd, urb, urb_priv->isoc_status); + + schedule_isoc_etds(hcd, ep); +} + +static struct ep_priv *alloc_isoc_ep( + struct imx21 *imx21, struct usb_host_endpoint *ep) +{ + struct ep_priv *ep_priv; + int i; + + ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); + if (ep_priv == NULL) + return NULL; + + /* Allocate the ETDs */ + for (i = 0; i < NUM_ISO_ETDS; i++) { + ep_priv->etd[i] = alloc_etd(imx21); + if (ep_priv->etd[i] < 0) { + int j; + dev_err(imx21->dev, "isoc: Couldn't allocate etd\n"); + for (j = 0; j < i; j++) + free_etd(imx21, ep_priv->etd[j]); + goto alloc_etd_failed; + } + imx21->etd[ep_priv->etd[i]].ep = ep; + } + + INIT_LIST_HEAD(&ep_priv->td_list); + ep_priv->ep = ep; + ep->hcpriv = ep_priv; + return ep_priv; + +alloc_etd_failed: + kfree(ep_priv); + return NULL; +} + +static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd, + struct usb_host_endpoint *ep, + struct urb *urb, gfp_t mem_flags) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct urb_priv *urb_priv; + unsigned long flags; + struct ep_priv *ep_priv; + struct td *td = NULL; + int i; + int ret; + int cur_frame; + u16 maxpacket; + + urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); + if (urb_priv == NULL) + return -ENOMEM; + + urb_priv->isoc_td = kzalloc( + sizeof(struct td) * urb->number_of_packets, mem_flags); + if (urb_priv->isoc_td == NULL) { + ret = -ENOMEM; + goto alloc_td_failed; + } + + spin_lock_irqsave(&imx21->lock, flags); + + if (ep->hcpriv == NULL) { + ep_priv = alloc_isoc_ep(imx21, ep); + if (ep_priv == NULL) { + ret = -ENOMEM; + goto alloc_ep_failed; + } + } else { + ep_priv = ep->hcpriv; + } + + ret = usb_hcd_link_urb_to_ep(hcd, urb); + if (ret) + goto link_failed; + + urb->status = -EINPROGRESS; + urb->actual_length = 0; + urb->error_count = 0; + urb->hcpriv = urb_priv; + urb_priv->ep = ep; + + /* allocate data memory for largest packets if not already done */ + maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); + for (i = 0; i < NUM_ISO_ETDS; i++) { + struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]]; + + if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) { + /* not sure if this can really occur.... */ + dev_err(imx21->dev, "increasing isoc buffer %d->%d\n", + etd->dmem_size, maxpacket); + ret = -EMSGSIZE; + goto alloc_dmem_failed; + } + + if (etd->dmem_size == 0) { + etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep); + if (etd->dmem_offset < 0) { + dev_dbg(imx21->dev, "failed alloc isoc dmem\n"); + ret = -EAGAIN; + goto alloc_dmem_failed; + } + etd->dmem_size = maxpacket; + } + } + + /* calculate frame */ + cur_frame = imx21_hc_get_frame(hcd); + if (urb->transfer_flags & URB_ISO_ASAP) { + if (list_empty(&ep_priv->td_list)) + urb->start_frame = cur_frame + 5; + else + urb->start_frame = list_entry( + ep_priv->td_list.prev, + struct td, list)->frame + urb->interval; + } + urb->start_frame = wrap_frame(urb->start_frame); + if (frame_after(cur_frame, urb->start_frame)) { + dev_dbg(imx21->dev, + "enqueue: adjusting iso start %d (cur=%d) asap=%d\n", + urb->start_frame, cur_frame, + (urb->transfer_flags & URB_ISO_ASAP) != 0); + urb->start_frame = wrap_frame(cur_frame + 1); + } + + /* set up transfers */ + td = urb_priv->isoc_td; + for (i = 0; i < urb->number_of_packets; i++, td++) { + td->ep = ep; + td->urb = urb; + td->len = urb->iso_frame_desc[i].length; + td->isoc_index = i; + td->frame = wrap_frame(urb->start_frame + urb->interval * i); + td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset; + list_add_tail(&td->list, &ep_priv->td_list); + } + + urb_priv->isoc_remaining = urb->number_of_packets; + dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n", + urb->number_of_packets, urb->start_frame, td->frame); + + debug_urb_submitted(imx21, urb); + schedule_isoc_etds(hcd, ep); + + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; + +alloc_dmem_failed: + usb_hcd_unlink_urb_from_ep(hcd, urb); + +link_failed: +alloc_ep_failed: + spin_unlock_irqrestore(&imx21->lock, flags); + kfree(urb_priv->isoc_td); + +alloc_td_failed: + kfree(urb_priv); + return ret; +} + +static void dequeue_isoc_urb(struct imx21 *imx21, + struct urb *urb, struct ep_priv *ep_priv) +{ + struct urb_priv *urb_priv = urb->hcpriv; + struct td *td, *tmp; + int i; + + if (urb_priv->active) { + for (i = 0; i < NUM_ISO_ETDS; i++) { + int etd_num = ep_priv->etd[i]; + if (etd_num != -1 && imx21->etd[etd_num].urb == urb) { + struct etd_priv *etd = imx21->etd + etd_num; + + reset_etd(imx21, etd_num); + if (etd->dmem_size) + free_dmem(imx21, etd->dmem_offset); + etd->dmem_size = 0; + } + } + } + + list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) { + if (td->urb == urb) { + dev_vdbg(imx21->dev, "removing td %p\n", td); + list_del(&td->list); + } + } +} + +/* =========================================== */ +/* NON ISOC Handling ... */ +/* =========================================== */ + +static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb) +{ + unsigned int pipe = urb->pipe; + struct urb_priv *urb_priv = urb->hcpriv; + struct ep_priv *ep_priv = urb_priv->ep->hcpriv; + int state = urb_priv->state; + int etd_num = ep_priv->etd[0]; + struct etd_priv *etd; + int dmem_offset; + u32 count; + u16 etd_buf_size; + u16 maxpacket; + u8 dir; + u8 bufround; + u8 datatoggle; + u8 interval = 0; + u8 relpolpos = 0; + + if (etd_num < 0) { + dev_err(imx21->dev, "No valid ETD\n"); + return; + } + if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num)) + dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num); + + etd = &imx21->etd[etd_num]; + maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe)); + if (!maxpacket) + maxpacket = 8; + + if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) { + if (state == US_CTRL_SETUP) { + dir = TD_DIR_SETUP; + etd->dma_handle = urb->setup_dma; + bufround = 0; + count = 8; + datatoggle = TD_TOGGLE_DATA0; + } else { /* US_CTRL_ACK */ + dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT; + etd->dma_handle = urb->transfer_dma; + bufround = 0; + count = 0; + datatoggle = TD_TOGGLE_DATA1; + } + } else { + dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN; + bufround = (dir == TD_DIR_IN) ? 1 : 0; + etd->dma_handle = urb->transfer_dma; + if (usb_pipebulk(pipe) && (state == US_BULK0)) + count = 0; + else + count = urb->transfer_buffer_length; + + if (usb_pipecontrol(pipe)) { + datatoggle = TD_TOGGLE_DATA1; + } else { + if (usb_gettoggle( + urb->dev, + usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe))) + datatoggle = TD_TOGGLE_DATA1; + else + datatoggle = TD_TOGGLE_DATA0; + } + } + + etd->urb = urb; + etd->ep = urb_priv->ep; + etd->len = count; + + if (usb_pipeint(pipe)) { + interval = urb->interval; + relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff; + } + + /* Write ETD to device memory */ + setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket); + + etd_writel(imx21, etd_num, 2, + (u32) interval << DW2_POLINTERV | + ((u32) relpolpos << DW2_RELPOLPOS) | + ((u32) dir << DW2_DIRPID) | + ((u32) bufround << DW2_BUFROUND) | + ((u32) datatoggle << DW2_DATATOG) | + ((u32) TD_NOTACCESSED << DW2_COMPCODE)); + + /* DMA will always transfer buffer size even if TOBYCNT in DWORD3 + is smaller. Make sure we don't overrun the buffer! + */ + if (count && count < maxpacket) + etd_buf_size = count; + else + etd_buf_size = maxpacket; + + etd_writel(imx21, etd_num, 3, + ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count); + + if (!count) + etd->dma_handle = 0; + + /* allocate x and y buffer space at once */ + etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket; + dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep); + if (dmem_offset < 0) { + /* Setup everything we can in HW and update when we get DMEM */ + etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16); + + dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num); + debug_urb_queued_for_dmem(imx21, urb); + list_add_tail(&etd->queue, &imx21->queue_for_dmem); + return; + } + + etd_writel(imx21, etd_num, 1, + (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) | + (u32) dmem_offset); + + urb_priv->active = 1; + + /* enable the ETD to kick off transfer */ + dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n", + etd_num, count, dir != TD_DIR_IN ? "out" : "in"); + activate_etd(imx21, etd_num, etd->dma_handle, dir); + +} + +static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct etd_priv *etd = &imx21->etd[etd_num]; + u32 etd_mask = 1 << etd_num; + struct urb_priv *urb_priv = urb->hcpriv; + int dir; + u16 xbufaddr; + int cc; + u32 bytes_xfrd; + int etd_done; + + disactivate_etd(imx21, etd_num); + + dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3; + xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff; + cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf; + bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff); + + /* save toggle carry */ + usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), + usb_pipeout(urb->pipe), + (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1); + + if (dir == TD_DIR_IN) { + clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask); + clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask); + } + free_dmem(imx21, xbufaddr); + + urb->error_count = 0; + if (!(urb->transfer_flags & URB_SHORT_NOT_OK) + && (cc == TD_DATAUNDERRUN)) + cc = TD_CC_NOERROR; + + if (cc != 0) + dev_vdbg(imx21->dev, "cc is 0x%x\n", cc); + + etd_done = (cc_to_error[cc] != 0); /* stop if error */ + + switch (usb_pipetype(urb->pipe)) { + case PIPE_CONTROL: + switch (urb_priv->state) { + case US_CTRL_SETUP: + if (urb->transfer_buffer_length > 0) + urb_priv->state = US_CTRL_DATA; + else + urb_priv->state = US_CTRL_ACK; + break; + case US_CTRL_DATA: + urb->actual_length += bytes_xfrd; + urb_priv->state = US_CTRL_ACK; + break; + case US_CTRL_ACK: + etd_done = 1; + break; + default: + dev_err(imx21->dev, + "Invalid pipe state %d\n", urb_priv->state); + etd_done = 1; + break; + } + break; + + case PIPE_BULK: + urb->actual_length += bytes_xfrd; + if ((urb_priv->state == US_BULK) + && (urb->transfer_flags & URB_ZERO_PACKET) + && urb->transfer_buffer_length > 0 + && ((urb->transfer_buffer_length % + usb_maxpacket(urb->dev, urb->pipe, + usb_pipeout(urb->pipe))) == 0)) { + /* need a 0-packet */ + urb_priv->state = US_BULK0; + } else { + etd_done = 1; + } + break; + + case PIPE_INTERRUPT: + urb->actual_length += bytes_xfrd; + etd_done = 1; + break; + } + + if (!etd_done) { + dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state); + schedule_nonisoc_etd(imx21, urb); + } else { + struct usb_host_endpoint *ep = urb->ep; + + urb_done(hcd, urb, cc_to_error[cc]); + etd->urb = NULL; + + if (!list_empty(&ep->urb_list)) { + urb = list_first_entry(&ep->urb_list, + struct urb, urb_list); + dev_vdbg(imx21->dev, "next URB %p\n", urb); + schedule_nonisoc_etd(imx21, urb); + } + } +} + +static struct ep_priv *alloc_ep(void) +{ + int i; + struct ep_priv *ep_priv; + + ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC); + if (!ep_priv) + return NULL; + + for (i = 0; i < NUM_ISO_ETDS; ++i) + ep_priv->etd[i] = -1; + + return ep_priv; +} + +static int imx21_hc_urb_enqueue(struct usb_hcd *hcd, + struct urb *urb, gfp_t mem_flags) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct usb_host_endpoint *ep = urb->ep; + struct urb_priv *urb_priv; + struct ep_priv *ep_priv; + struct etd_priv *etd; + int ret; + unsigned long flags; + int new_ep = 0; + + dev_vdbg(imx21->dev, + "enqueue urb=%p ep=%p len=%d " + "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n", + urb, ep, + urb->transfer_buffer_length, + urb->transfer_buffer, urb->transfer_dma, + urb->setup_packet, urb->setup_dma); + + if (usb_pipeisoc(urb->pipe)) + return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags); + + urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags); + if (!urb_priv) + return -ENOMEM; + + spin_lock_irqsave(&imx21->lock, flags); + + ep_priv = ep->hcpriv; + if (ep_priv == NULL) { + ep_priv = alloc_ep(); + if (!ep_priv) { + ret = -ENOMEM; + goto failed_alloc_ep; + } + ep->hcpriv = ep_priv; + ep_priv->ep = ep; + new_ep = 1; + } + + ret = usb_hcd_link_urb_to_ep(hcd, urb); + if (ret) + goto failed_link; + + urb->status = -EINPROGRESS; + urb->actual_length = 0; + urb->error_count = 0; + urb->hcpriv = urb_priv; + urb_priv->ep = ep; + + switch (usb_pipetype(urb->pipe)) { + case PIPE_CONTROL: + urb_priv->state = US_CTRL_SETUP; + break; + case PIPE_BULK: + urb_priv->state = US_BULK; + break; + } + + debug_urb_submitted(imx21, urb); + if (ep_priv->etd[0] < 0) { + if (ep_priv->waiting_etd) { + dev_dbg(imx21->dev, + "no ETD available already queued %p\n", + ep_priv); + debug_urb_queued_for_etd(imx21, urb); + goto out; + } + ep_priv->etd[0] = alloc_etd(imx21); + if (ep_priv->etd[0] < 0) { + dev_dbg(imx21->dev, + "no ETD available queueing %p\n", ep_priv); + debug_urb_queued_for_etd(imx21, urb); + list_add_tail(&ep_priv->queue, &imx21->queue_for_etd); + ep_priv->waiting_etd = 1; + goto out; + } + } + + /* Schedule if no URB already active for this endpoint */ + etd = &imx21->etd[ep_priv->etd[0]]; + if (etd->urb == NULL) { + DEBUG_LOG_FRAME(imx21, etd, last_req); + schedule_nonisoc_etd(imx21, urb); + } + +out: + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; + +failed_link: +failed_alloc_ep: + spin_unlock_irqrestore(&imx21->lock, flags); + kfree(urb_priv); + return ret; +} + +static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, + int status) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + struct usb_host_endpoint *ep; + struct ep_priv *ep_priv; + struct urb_priv *urb_priv = urb->hcpriv; + int ret = -EINVAL; + + dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n", + urb, usb_pipeisoc(urb->pipe), status); + + spin_lock_irqsave(&imx21->lock, flags); + + ret = usb_hcd_check_unlink_urb(hcd, urb, status); + if (ret) + goto fail; + ep = urb_priv->ep; + ep_priv = ep->hcpriv; + + debug_urb_unlinked(imx21, urb); + + if (usb_pipeisoc(urb->pipe)) { + dequeue_isoc_urb(imx21, urb, ep_priv); + schedule_isoc_etds(hcd, ep); + } else if (urb_priv->active) { + int etd_num = ep_priv->etd[0]; + if (etd_num != -1) { + disactivate_etd(imx21, etd_num); + free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff); + imx21->etd[etd_num].urb = NULL; + } + } + + urb_done(hcd, urb, status); + + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; + +fail: + spin_unlock_irqrestore(&imx21->lock, flags); + return ret; +} + +/* =========================================== */ +/* Interrupt dispatch */ +/* =========================================== */ + +static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof) +{ + int etd_num; + int enable_sof_int = 0; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) { + u32 etd_mask = 1 << etd_num; + u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask; + u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask; + struct etd_priv *etd = &imx21->etd[etd_num]; + + + if (done) { + DEBUG_LOG_FRAME(imx21, etd, last_int); + } else { +/* + * Kludge warning! + * + * When multiple transfers are using the bus we sometimes get into a state + * where the transfer has completed (the CC field of the ETD is != 0x0F), + * the ETD has self disabled but the ETDDONESTAT flag is not set + * (and hence no interrupt occurs). + * This causes the transfer in question to hang. + * The kludge below checks for this condition at each SOF and processes any + * blocked ETDs (after an arbitary 10 frame wait) + * + * With a single active transfer the usbtest test suite will run for days + * without the kludge. + * With other bus activity (eg mass storage) even just test1 will hang without + * the kludge. + */ + u32 dword0; + int cc; + + if (etd->active_count && !enabled) /* suspicious... */ + enable_sof_int = 1; + + if (!sof || enabled || !etd->active_count) + continue; + + cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE; + if (cc == TD_NOTACCESSED) + continue; + + if (++etd->active_count < 10) + continue; + + dword0 = etd_readl(imx21, etd_num, 0); + dev_dbg(imx21->dev, + "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n", + etd_num, dword0 & 0x7F, + (dword0 >> DW0_ENDPNT) & 0x0F, + cc); + +#ifdef DEBUG + dev_dbg(imx21->dev, + "frame: act=%d disact=%d" + " int=%d req=%d cur=%d\n", + etd->activated_frame, + etd->disactivated_frame, + etd->last_int_frame, + etd->last_req_frame, + readl(imx21->regs + USBH_FRMNUB)); + imx21->debug_unblocks++; +#endif + etd->active_count = 0; +/* End of kludge */ + } + + if (etd->ep == NULL || etd->urb == NULL) { + dev_dbg(imx21->dev, + "Interrupt for unexpected etd %d" + " ep=%p urb=%p\n", + etd_num, etd->ep, etd->urb); + disactivate_etd(imx21, etd_num); + continue; + } + + if (usb_pipeisoc(etd->urb->pipe)) + isoc_etd_done(hcd, etd->urb, etd_num); + else + nonisoc_etd_done(hcd, etd->urb, etd_num); + } + + /* only enable SOF interrupt if it may be needed for the kludge */ + if (enable_sof_int) + set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); + else + clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT); + + + spin_unlock_irqrestore(&imx21->lock, flags); +} + +static irqreturn_t imx21_irq(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + u32 ints = readl(imx21->regs + USBH_SYSISR); + + if (ints & USBH_SYSIEN_HERRINT) + dev_dbg(imx21->dev, "Scheduling error\n"); + + if (ints & USBH_SYSIEN_SORINT) + dev_dbg(imx21->dev, "Scheduling overrun\n"); + + if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT)) + process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT); + + writel(ints, imx21->regs + USBH_SYSISR); + return IRQ_HANDLED; +} + +static void imx21_hc_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + struct ep_priv *ep_priv; + int i; + + if (ep == NULL) + return; + + spin_lock_irqsave(&imx21->lock, flags); + ep_priv = ep->hcpriv; + dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv); + + if (!list_empty(&ep->urb_list)) + dev_dbg(imx21->dev, "ep's URB list is not empty\n"); + + if (ep_priv != NULL) { + for (i = 0; i < NUM_ISO_ETDS; i++) { + if (ep_priv->etd[i] > -1) + dev_dbg(imx21->dev, "free etd %d for disable\n", + ep_priv->etd[i]); + + free_etd(imx21, ep_priv->etd[i]); + } + kfree(ep_priv); + ep->hcpriv = NULL; + } + + for (i = 0; i < USB_NUM_ETD; i++) { + if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) { + dev_err(imx21->dev, + "Active etd %d for disabled ep=%p!\n", i, ep); + free_etd(imx21, i); + } + } + free_epdmem(imx21, ep); + spin_unlock_irqrestore(&imx21->lock, flags); +} + +/* =========================================== */ +/* Hub handling */ +/* =========================================== */ + +static int get_hub_descriptor(struct usb_hcd *hcd, + struct usb_hub_descriptor *desc) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + desc->bDescriptorType = 0x29; /* HUB descriptor */ + desc->bHubContrCurrent = 0; + + desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA) + & USBH_ROOTHUBA_NDNSTMPRT_MASK; + desc->bDescLength = 9; + desc->bPwrOn2PwrGood = 0; + desc->wHubCharacteristics = (__force __u16) cpu_to_le16( + 0x0002 | /* No power switching */ + 0x0010 | /* No over current protection */ + 0); + + desc->bitmap[0] = 1 << 1; + desc->bitmap[1] = ~0; + return 0; +} + +static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + int ports; + int changed = 0; + int i; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + ports = readl(imx21->regs + USBH_ROOTHUBA) + & USBH_ROOTHUBA_NDNSTMPRT_MASK; + if (ports > 7) { + ports = 7; + dev_err(imx21->dev, "ports %d > 7\n", ports); + } + for (i = 0; i < ports; i++) { + if (readl(imx21->regs + USBH_PORTSTAT(i)) & + (USBH_PORTSTAT_CONNECTSC | + USBH_PORTSTAT_PRTENBLSC | + USBH_PORTSTAT_PRTSTATSC | + USBH_PORTSTAT_OVRCURIC | + USBH_PORTSTAT_PRTRSTSC)) { + + changed = 1; + buf[0] |= 1 << (i + 1); + } + } + spin_unlock_irqrestore(&imx21->lock, flags); + + if (changed) + dev_info(imx21->dev, "Hub status changed\n"); + return changed; +} + +static int imx21_hc_hub_control(struct usb_hcd *hcd, + u16 typeReq, + u16 wValue, u16 wIndex, char *buf, u16 wLength) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + int rc = 0; + u32 status_write = 0; + + switch (typeReq) { + case ClearHubFeature: + dev_dbg(imx21->dev, "ClearHubFeature\n"); + switch (wValue) { + case C_HUB_OVER_CURRENT: + dev_dbg(imx21->dev, " OVER_CURRENT\n"); + break; + case C_HUB_LOCAL_POWER: + dev_dbg(imx21->dev, " LOCAL_POWER\n"); + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + break; + + case ClearPortFeature: + dev_dbg(imx21->dev, "ClearPortFeature\n"); + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + dev_dbg(imx21->dev, " ENABLE\n"); + status_write = USBH_PORTSTAT_CURCONST; + break; + case USB_PORT_FEAT_SUSPEND: + dev_dbg(imx21->dev, " SUSPEND\n"); + status_write = USBH_PORTSTAT_PRTOVRCURI; + break; + case USB_PORT_FEAT_POWER: + dev_dbg(imx21->dev, " POWER\n"); + status_write = USBH_PORTSTAT_LSDEVCON; + break; + case USB_PORT_FEAT_C_ENABLE: + dev_dbg(imx21->dev, " C_ENABLE\n"); + status_write = USBH_PORTSTAT_PRTENBLSC; + break; + case USB_PORT_FEAT_C_SUSPEND: + dev_dbg(imx21->dev, " C_SUSPEND\n"); + status_write = USBH_PORTSTAT_PRTSTATSC; + break; + case USB_PORT_FEAT_C_CONNECTION: + dev_dbg(imx21->dev, " C_CONNECTION\n"); + status_write = USBH_PORTSTAT_CONNECTSC; + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + dev_dbg(imx21->dev, " C_OVER_CURRENT\n"); + status_write = USBH_PORTSTAT_OVRCURIC; + break; + case USB_PORT_FEAT_C_RESET: + dev_dbg(imx21->dev, " C_RESET\n"); + status_write = USBH_PORTSTAT_PRTRSTSC; + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + + break; + + case GetHubDescriptor: + dev_dbg(imx21->dev, "GetHubDescriptor\n"); + rc = get_hub_descriptor(hcd, (void *)buf); + break; + + case GetHubStatus: + dev_dbg(imx21->dev, " GetHubStatus\n"); + *(__le32 *) buf = 0; + break; + + case GetPortStatus: + dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n", + wIndex, USBH_PORTSTAT(wIndex - 1)); + *(__le32 *) buf = readl(imx21->regs + + USBH_PORTSTAT(wIndex - 1)); + break; + + case SetHubFeature: + dev_dbg(imx21->dev, "SetHubFeature\n"); + switch (wValue) { + case C_HUB_OVER_CURRENT: + dev_dbg(imx21->dev, " OVER_CURRENT\n"); + break; + + case C_HUB_LOCAL_POWER: + dev_dbg(imx21->dev, " LOCAL_POWER\n"); + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + + break; + + case SetPortFeature: + dev_dbg(imx21->dev, "SetPortFeature\n"); + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + dev_dbg(imx21->dev, " SUSPEND\n"); + status_write = USBH_PORTSTAT_PRTSUSPST; + break; + case USB_PORT_FEAT_POWER: + dev_dbg(imx21->dev, " POWER\n"); + status_write = USBH_PORTSTAT_PRTPWRST; + break; + case USB_PORT_FEAT_RESET: + dev_dbg(imx21->dev, " RESET\n"); + status_write = USBH_PORTSTAT_PRTRSTST; + break; + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + break; + + default: + dev_dbg(imx21->dev, " unknown\n"); + rc = -EINVAL; + break; + } + + if (status_write) + writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1)); + return rc; +} + +/* =========================================== */ +/* Host controller management */ +/* =========================================== */ + +static int imx21_hc_reset(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long timeout; + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + /* Reset the Host controler modules */ + writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH | + USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC, + imx21->regs + USBOTG_RST_CTRL); + + /* Wait for reset to finish */ + timeout = jiffies + HZ; + while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) { + if (time_after(jiffies, timeout)) { + spin_unlock_irqrestore(&imx21->lock, flags); + dev_err(imx21->dev, "timeout waiting for reset\n"); + return -ETIMEDOUT; + } + spin_unlock_irq(&imx21->lock); + schedule_timeout(1); + spin_lock_irq(&imx21->lock); + } + spin_unlock_irqrestore(&imx21->lock, flags); + return 0; +} + +static int __devinit imx21_hc_start(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + int i, j; + u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST; + u32 usb_control = 0; + + hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) & + USBOTG_HWMODE_HOSTXCVR_MASK); + hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) & + USBOTG_HWMODE_OTGXCVR_MASK); + + if (imx21->pdata->host1_txenoe) + usb_control |= USBCTRL_HOST1_TXEN_OE; + + if (!imx21->pdata->host1_xcverless) + usb_control |= USBCTRL_HOST1_BYP_TLL; + + if (imx21->pdata->otg_ext_xcvr) + usb_control |= USBCTRL_OTC_RCV_RXDP; + + + spin_lock_irqsave(&imx21->lock, flags); + + writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN), + imx21->regs + USBOTG_CLK_CTRL); + writel(hw_mode, imx21->regs + USBOTG_HWMODE); + writel(usb_control, imx21->regs + USBCTRL); + writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE, + imx21->regs + USB_MISCCONTROL); + + /* Clear the ETDs */ + for (i = 0; i < USB_NUM_ETD; i++) + for (j = 0; j < 4; j++) + etd_writel(imx21, i, j, 0); + + /* Take the HC out of reset */ + writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1, + imx21->regs + USBH_HOST_CTRL); + + /* Enable ports */ + if (imx21->pdata->enable_otg_host) + writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, + imx21->regs + USBH_PORTSTAT(0)); + + if (imx21->pdata->enable_host1) + writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, + imx21->regs + USBH_PORTSTAT(1)); + + if (imx21->pdata->enable_host2) + writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST, + imx21->regs + USBH_PORTSTAT(2)); + + + hcd->state = HC_STATE_RUNNING; + + /* Enable host controller interrupts */ + set_register_bits(imx21, USBH_SYSIEN, + USBH_SYSIEN_HERRINT | + USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT); + set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); + + spin_unlock_irqrestore(&imx21->lock, flags); + + return 0; +} + +static void imx21_hc_stop(struct usb_hcd *hcd) +{ + struct imx21 *imx21 = hcd_to_imx21(hcd); + unsigned long flags; + + spin_lock_irqsave(&imx21->lock, flags); + + writel(0, imx21->regs + USBH_SYSIEN); + clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT); + clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN, + USBOTG_CLK_CTRL); + spin_unlock_irqrestore(&imx21->lock, flags); +} + +/* =========================================== */ +/* Driver glue */ +/* =========================================== */ + +static struct hc_driver imx21_hc_driver = { + .description = hcd_name, + .product_desc = "IMX21 USB Host Controller", + .hcd_priv_size = sizeof(struct imx21), + + .flags = HCD_USB11, + .irq = imx21_irq, + + .reset = imx21_hc_reset, + .start = imx21_hc_start, + .stop = imx21_hc_stop, + + /* I/O requests */ + .urb_enqueue = imx21_hc_urb_enqueue, + .urb_dequeue = imx21_hc_urb_dequeue, + .endpoint_disable = imx21_hc_endpoint_disable, + + /* scheduling support */ + .get_frame_number = imx21_hc_get_frame, + + /* Root hub support */ + .hub_status_data = imx21_hc_hub_status_data, + .hub_control = imx21_hc_hub_control, + +}; + +static struct mx21_usbh_platform_data default_pdata = { + .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF, + .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF, + .enable_host1 = 1, + .enable_host2 = 1, + .enable_otg_host = 1, + +}; + +static int imx21_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd = platform_get_drvdata(pdev); + struct imx21 *imx21 = hcd_to_imx21(hcd); + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + remove_debug_files(imx21); + usb_remove_hcd(hcd); + + if (res != NULL) { + clk_disable(imx21->clk); + clk_put(imx21->clk); + iounmap(imx21->regs); + release_mem_region(res->start, resource_size(res)); + } + + kfree(hcd); + return 0; +} + + +static int imx21_probe(struct platform_device *pdev) +{ + struct usb_hcd *hcd; + struct imx21 *imx21; + struct resource *res; + int ret; + int irq; + + printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -ENXIO; + + hcd = usb_create_hcd(&imx21_hc_driver, + &pdev->dev, dev_name(&pdev->dev)); + if (hcd == NULL) { + dev_err(&pdev->dev, "Cannot create hcd (%s)\n", + dev_name(&pdev->dev)); + return -ENOMEM; + } + + imx21 = hcd_to_imx21(hcd); + imx21->dev = &pdev->dev; + imx21->pdata = pdev->dev.platform_data; + if (!imx21->pdata) + imx21->pdata = &default_pdata; + + spin_lock_init(&imx21->lock); + INIT_LIST_HEAD(&imx21->dmem_list); + INIT_LIST_HEAD(&imx21->queue_for_etd); + INIT_LIST_HEAD(&imx21->queue_for_dmem); + create_debug_files(imx21); + + res = request_mem_region(res->start, resource_size(res), hcd_name); + if (!res) { + ret = -EBUSY; + goto failed_request_mem; + } + + imx21->regs = ioremap(res->start, resource_size(res)); + if (imx21->regs == NULL) { + dev_err(imx21->dev, "Cannot map registers\n"); + ret = -ENOMEM; + goto failed_ioremap; + } + + /* Enable clocks source */ + imx21->clk = clk_get(imx21->dev, NULL); + if (IS_ERR(imx21->clk)) { + dev_err(imx21->dev, "no clock found\n"); + ret = PTR_ERR(imx21->clk); + goto failed_clock_get; + } + + ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000)); + if (ret) + goto failed_clock_set; + ret = clk_enable(imx21->clk); + if (ret) + goto failed_clock_enable; + + dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n", + (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF); + + ret = usb_add_hcd(hcd, irq, IRQF_DISABLED); + if (ret != 0) { + dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret); + goto failed_add_hcd; + } + + return 0; + +failed_add_hcd: + clk_disable(imx21->clk); +failed_clock_enable: +failed_clock_set: + clk_put(imx21->clk); +failed_clock_get: + iounmap(imx21->regs); +failed_ioremap: + release_mem_region(res->start, res->end - res->start); +failed_request_mem: + remove_debug_files(imx21); + usb_put_hcd(hcd); + return ret; +} + +static struct platform_driver imx21_hcd_driver = { + .driver = { + .name = (char *)hcd_name, + }, + .probe = imx21_probe, + .remove = imx21_remove, + .suspend = NULL, + .resume = NULL, +}; + +static int __init imx21_hcd_init(void) +{ + return platform_driver_register(&imx21_hcd_driver); +} + +static void __exit imx21_hcd_cleanup(void) +{ + platform_driver_unregister(&imx21_hcd_driver); +} + +module_init(imx21_hcd_init); +module_exit(imx21_hcd_cleanup); + +MODULE_DESCRIPTION("i.MX21 USB Host controller"); +MODULE_AUTHOR("Martin Fuzzey"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:imx21-hcd"); diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h new file mode 100644 index 00000000000..1b0d913780a --- /dev/null +++ b/drivers/usb/host/imx21-hcd.h @@ -0,0 +1,436 @@ +/* + * Macros and prototypes for i.MX21 + * + * Copyright (C) 2006 Loping Dog Embedded Systems + * Copyright (C) 2009 Martin Fuzzey + * Originally written by Jay Monkman <jtm@lopingdog.com> + * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_IMX21_HCD_H__ +#define __LINUX_IMX21_HCD_H__ + +#include <mach/mx21-usbhost.h> + +#define NUM_ISO_ETDS 2 +#define USB_NUM_ETD 32 +#define DMEM_SIZE 4096 + +/* Register definitions */ +#define USBOTG_HWMODE 0x00 +#define USBOTG_HWMODE_ANASDBEN (1 << 14) +#define USBOTG_HWMODE_OTGXCVR_SHIFT 6 +#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6) +#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6) +#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6) +#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6) +#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6) +#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4 +#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4) +#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4) +#define USBOTG_HWMODE_CRECFG_MASK (3 << 0) +#define USBOTG_HWMODE_CRECFG_HOST (1 << 0) +#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0) +#define USBOTG_HWMODE_CRECFG_HNP (3 << 0) + +#define USBOTG_CINT_STAT 0x04 +#define USBOTG_CINT_STEN 0x08 +#define USBOTG_ASHNPINT (1 << 5) +#define USBOTG_ASFCINT (1 << 4) +#define USBOTG_ASHCINT (1 << 3) +#define USBOTG_SHNPINT (1 << 2) +#define USBOTG_FCINT (1 << 1) +#define USBOTG_HCINT (1 << 0) + +#define USBOTG_CLK_CTRL 0x0c +#define USBOTG_CLK_CTRL_FUNC (1 << 2) +#define USBOTG_CLK_CTRL_HST (1 << 1) +#define USBOTG_CLK_CTRL_MAIN (1 << 0) + +#define USBOTG_RST_CTRL 0x10 +#define USBOTG_RST_RSTI2C (1 << 15) +#define USBOTG_RST_RSTCTRL (1 << 5) +#define USBOTG_RST_RSTFC (1 << 4) +#define USBOTG_RST_RSTFSKE (1 << 3) +#define USBOTG_RST_RSTRH (1 << 2) +#define USBOTG_RST_RSTHSIE (1 << 1) +#define USBOTG_RST_RSTHC (1 << 0) + +#define USBOTG_FRM_INTVL 0x14 +#define USBOTG_FRM_REMAIN 0x18 +#define USBOTG_HNP_CSR 0x1c +#define USBOTG_HNP_ISR 0x2c +#define USBOTG_HNP_IEN 0x30 + +#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x)) +#define USBOTG_I2C_XCVR_DEVAD 0x118 +#define USBOTG_I2C_SEQ_OP_REG 0x119 +#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a +#define USBOTG_I2C_OP_CTRL_REG 0x11b +#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e +#define USBOTG_I2C_MASTER_INT_REG 0x11f + +#define USBH_HOST_CTRL 0x80 +#define USBH_HOST_CTRL_HCRESET (1 << 31) +#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16) +#define USBH_HOST_CTRL_RMTWUEN (1 << 4) +#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2) +#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2) +#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2) +#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2) +#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0) +#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0) +#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0) +#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0) + +#define USBH_SYSISR 0x88 +#define USBH_SYSISR_PSCINT (1 << 6) +#define USBH_SYSISR_FMOFINT (1 << 5) +#define USBH_SYSISR_HERRINT (1 << 4) +#define USBH_SYSISR_RESDETINT (1 << 3) +#define USBH_SYSISR_SOFINT (1 << 2) +#define USBH_SYSISR_DONEINT (1 << 1) +#define USBH_SYSISR_SORINT (1 << 0) + +#define USBH_SYSIEN 0x8c +#define USBH_SYSIEN_PSCINT (1 << 6) +#define USBH_SYSIEN_FMOFINT (1 << 5) +#define USBH_SYSIEN_HERRINT (1 << 4) +#define USBH_SYSIEN_RESDETINT (1 << 3) +#define USBH_SYSIEN_SOFINT (1 << 2) +#define USBH_SYSIEN_DONEINT (1 << 1) +#define USBH_SYSIEN_SORINT (1 << 0) + +#define USBH_XBUFSTAT 0x98 +#define USBH_YBUFSTAT 0x9c +#define USBH_XYINTEN 0xa0 +#define USBH_XFILLSTAT 0xa8 +#define USBH_YFILLSTAT 0xac +#define USBH_ETDENSET 0xc0 +#define USBH_ETDENCLR 0xc4 +#define USBH_IMMEDINT 0xcc +#define USBH_ETDDONESTAT 0xd0 +#define USBH_ETDDONEEN 0xd4 +#define USBH_FRMNUB 0xe0 +#define USBH_LSTHRESH 0xe4 + +#define USBH_ROOTHUBA 0xe8 +#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff) +#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24) +#define USBH_ROOTHUBA_NOOVRCURP (1 << 12) +#define USBH_ROOTHUBA_OVRCURPM (1 << 11) +#define USBH_ROOTHUBA_DEVTYPE (1 << 10) +#define USBH_ROOTHUBA_PWRSWTMD (1 << 9) +#define USBH_ROOTHUBA_NOPWRSWT (1 << 8) +#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff) + +#define USBH_ROOTHUBB 0xec +#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16)) +#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x)) + +#define USBH_ROOTSTAT 0xf0 +#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31) +#define USBH_ROOTSTAT_OVRCURCHG (1 << 17) +#define USBH_ROOTSTAT_DEVCONWUE (1 << 15) +#define USBH_ROOTSTAT_OVRCURI (1 << 1) +#define USBH_ROOTSTAT_LOCPWRS (1 << 0) + +#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4)) +#define USBH_PORTSTAT_PRTRSTSC (1 << 20) +#define USBH_PORTSTAT_OVRCURIC (1 << 19) +#define USBH_PORTSTAT_PRTSTATSC (1 << 18) +#define USBH_PORTSTAT_PRTENBLSC (1 << 17) +#define USBH_PORTSTAT_CONNECTSC (1 << 16) +#define USBH_PORTSTAT_LSDEVCON (1 << 9) +#define USBH_PORTSTAT_PRTPWRST (1 << 8) +#define USBH_PORTSTAT_PRTRSTST (1 << 4) +#define USBH_PORTSTAT_PRTOVRCURI (1 << 3) +#define USBH_PORTSTAT_PRTSUSPST (1 << 2) +#define USBH_PORTSTAT_PRTENABST (1 << 1) +#define USBH_PORTSTAT_CURCONST (1 << 0) + +#define USB_DMAREV 0x800 +#define USB_DMAINTSTAT 0x804 +#define USB_DMAINTSTAT_EPERR (1 << 1) +#define USB_DMAINTSTAT_ETDERR (1 << 0) + +#define USB_DMAINTEN 0x808 +#define USB_DMAINTEN_EPERRINTEN (1 << 1) +#define USB_DMAINTEN_ETDERRINTEN (1 << 0) + +#define USB_ETDDMAERSTAT 0x80c +#define USB_EPDMAERSTAT 0x810 +#define USB_ETDDMAEN 0x820 +#define USB_EPDMAEN 0x824 +#define USB_ETDDMAXTEN 0x828 +#define USB_EPDMAXTEN 0x82c +#define USB_ETDDMAENXYT 0x830 +#define USB_EPDMAENXYT 0x834 +#define USB_ETDDMABST4EN 0x838 +#define USB_EPDMABST4EN 0x83c + +#define USB_MISCCONTROL 0x840 +#define USB_MISCCONTROL_ISOPREVFRM (1 << 3) +#define USB_MISCCONTROL_SKPRTRY (1 << 2) +#define USB_MISCCONTROL_ARBMODE (1 << 1) +#define USB_MISCCONTROL_FILTCC (1 << 0) + +#define USB_ETDDMACHANLCLR 0x848 +#define USB_EPDMACHANLCLR 0x84c +#define USB_ETDSMSA(x) (0x900 + ((x) * 4)) +#define USB_EPSMSA(x) (0x980 + ((x) * 4)) +#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4)) +#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4)) + +#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4)) +#define DW0_ADDRESS 0 +#define DW0_ENDPNT 7 +#define DW0_DIRECT 11 +#define DW0_SPEED 13 +#define DW0_FORMAT 14 +#define DW0_MAXPKTSIZ 16 +#define DW0_HALTED 27 +#define DW0_TOGCRY 28 +#define DW0_SNDNAK 30 + +#define DW1_XBUFSRTAD 0 +#define DW1_YBUFSRTAD 16 + +#define DW2_RTRYDELAY 0 +#define DW2_POLINTERV 0 +#define DW2_STARTFRM 0 +#define DW2_RELPOLPOS 8 +#define DW2_DIRPID 16 +#define DW2_BUFROUND 18 +#define DW2_DELAYINT 19 +#define DW2_DATATOG 22 +#define DW2_ERRORCNT 24 +#define DW2_COMPCODE 28 + +#define DW3_TOTBYECNT 0 +#define DW3_PKTLEN0 0 +#define DW3_COMPCODE0 12 +#define DW3_PKTLEN1 16 +#define DW3_BUFSIZE 21 +#define DW3_COMPCODE1 28 + +#define USBCTRL 0x600 +#define USBCTRL_I2C_WU_INT_STAT (1 << 27) +#define USBCTRL_OTG_WU_INT_STAT (1 << 26) +#define USBCTRL_HOST_WU_INT_STAT (1 << 25) +#define USBCTRL_FNT_WU_INT_STAT (1 << 24) +#define USBCTRL_I2C_WU_INT_EN (1 << 19) +#define USBCTRL_OTG_WU_INT_EN (1 << 18) +#define USBCTRL_HOST_WU_INT_EN (1 << 17) +#define USBCTRL_FNT_WU_INT_EN (1 << 16) +#define USBCTRL_OTC_RCV_RXDP (1 << 13) +#define USBCTRL_HOST1_BYP_TLL (1 << 12) +#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10) +#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8) +#define USBCTRL_OTG_PWR_MASK (1 << 6) +#define USBCTRL_HOST1_PWR_MASK (1 << 5) +#define USBCTRL_HOST2_PWR_MASK (1 << 4) +#define USBCTRL_USB_BYP (1 << 2) +#define USBCTRL_HOST1_TXEN_OE (1 << 1) + + +/* Values in TD blocks */ +#define TD_DIR_SETUP 0 +#define TD_DIR_OUT 1 +#define TD_DIR_IN 2 +#define TD_FORMAT_CONTROL 0 +#define TD_FORMAT_ISO 1 +#define TD_FORMAT_BULK 2 +#define TD_FORMAT_INT 3 +#define TD_TOGGLE_CARRY 0 +#define TD_TOGGLE_DATA0 2 +#define TD_TOGGLE_DATA1 3 + +/* control transfer states */ +#define US_CTRL_SETUP 2 +#define US_CTRL_DATA 1 +#define US_CTRL_ACK 0 + +/* bulk transfer main state and 0-length packet */ +#define US_BULK 1 +#define US_BULK0 0 + +/*ETD format description*/ +#define IMX_FMT_CTRL 0x0 +#define IMX_FMT_ISO 0x1 +#define IMX_FMT_BULK 0x2 +#define IMX_FMT_INT 0x3 + +static char fmt_urb_to_etd[4] = { +/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO, +/*PIPE_INTERRUPT*/ IMX_FMT_INT, +/*PIPE_CONTROL*/ IMX_FMT_CTRL, +/*PIPE_BULK*/ IMX_FMT_BULK +}; + +/* condition (error) CC codes and mapping (OHCI like) */ + +#define TD_CC_NOERROR 0x00 +#define TD_CC_CRC 0x01 +#define TD_CC_BITSTUFFING 0x02 +#define TD_CC_DATATOGGLEM 0x03 +#define TD_CC_STALL 0x04 +#define TD_DEVNOTRESP 0x05 +#define TD_PIDCHECKFAIL 0x06 +/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/ +#define TD_DATAOVERRUN 0x08 +#define TD_DATAUNDERRUN 0x09 +#define TD_BUFFEROVERRUN 0x0C +#define TD_BUFFERUNDERRUN 0x0D +#define TD_SCHEDULEOVERRUN 0x0E +#define TD_NOTACCESSED 0x0F + +static const int cc_to_error[16] = { + /* No Error */ 0, + /* CRC Error */ -EILSEQ, + /* Bit Stuff */ -EPROTO, + /* Data Togg */ -EILSEQ, + /* Stall */ -EPIPE, + /* DevNotResp */ -ETIMEDOUT, + /* PIDCheck */ -EPROTO, + /* UnExpPID */ -EPROTO, + /* DataOver */ -EOVERFLOW, + /* DataUnder */ -EREMOTEIO, + /* (for hw) */ -EIO, + /* (for hw) */ -EIO, + /* BufferOver */ -ECOMM, + /* BuffUnder */ -ENOSR, + /* (for HCD) */ -ENOSPC, + /* (for HCD) */ -EALREADY +}; + +/* HCD data associated with a usb core URB */ +struct urb_priv { + struct urb *urb; + struct usb_host_endpoint *ep; + int active; + int state; + struct td *isoc_td; + int isoc_remaining; + int isoc_status; +}; + +/* HCD data associated with a usb core endpoint */ +struct ep_priv { + struct usb_host_endpoint *ep; + struct list_head td_list; + struct list_head queue; + int etd[NUM_ISO_ETDS]; + int waiting_etd; +}; + +/* isoc packet */ +struct td { + struct list_head list; + struct urb *urb; + struct usb_host_endpoint *ep; + dma_addr_t data; + unsigned long buf_addr; + int len; + int frame; + int isoc_index; +}; + +/* HCD data associated with a hardware ETD */ +struct etd_priv { + struct usb_host_endpoint *ep; + struct urb *urb; + struct td *td; + struct list_head queue; + dma_addr_t dma_handle; + int alloc; + int len; + int dmem_size; + int dmem_offset; + int active_count; +#ifdef DEBUG + int activated_frame; + int disactivated_frame; + int last_int_frame; + int last_req_frame; + u32 submitted_dwords[4]; +#endif +}; + +/* Hardware data memory info */ +struct imx21_dmem_area { + struct usb_host_endpoint *ep; + unsigned int offset; + unsigned int size; + struct list_head list; +}; + +#ifdef DEBUG +struct debug_usage_stats { + unsigned int value; + unsigned int maximum; +}; + +struct debug_stats { + unsigned long submitted; + unsigned long completed_ok; + unsigned long completed_failed; + unsigned long unlinked; + unsigned long queue_etd; + unsigned long queue_dmem; +}; + +struct debug_isoc_trace { + int schedule_frame; + int submit_frame; + int request_len; + int done_frame; + int done_len; + int cc; + struct td *td; +}; +#endif + +/* HCD data structure */ +struct imx21 { + spinlock_t lock; + struct device *dev; + struct mx21_usbh_platform_data *pdata; + struct list_head dmem_list; + struct list_head queue_for_etd; /* eps queued due to etd shortage */ + struct list_head queue_for_dmem; /* etds queued due to dmem shortage */ + struct etd_priv etd[USB_NUM_ETD]; + struct clk *clk; + void __iomem *regs; +#ifdef DEBUG + struct dentry *debug_root; + struct debug_stats nonisoc_stats; + struct debug_stats isoc_stats; + struct debug_usage_stats etd_usage; + struct debug_usage_stats dmem_usage; + struct debug_isoc_trace isoc_trace[20]; + struct debug_isoc_trace isoc_trace_failed[20]; + unsigned long debug_unblocks; + int isoc_trace_index; + int isoc_trace_index_failed; +#endif +}; + +#endif diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c index 42971657fde..217fb517020 100644 --- a/drivers/usb/host/isp1362-hcd.c +++ b/drivers/usb/host/isp1362-hcd.c @@ -1257,7 +1257,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd, /* avoid all allocations within spinlocks: request or endpoint */ if (!hep->hcpriv) { - ep = kcalloc(1, sizeof *ep, mem_flags); + ep = kzalloc(sizeof *ep, mem_flags); if (!ep) return -ENOMEM; } @@ -2719,24 +2719,11 @@ static int __init isp1362_probe(struct platform_device *pdev) } irq = irq_res->start; -#ifdef CONFIG_USB_HCD_DMA - if (pdev->dev.dma_mask) { - struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - - if (!dma_res) { - retval = -ENODEV; - goto err1; - } - isp1362_hcd->data_dma = dma_res->start; - isp1362_hcd->max_dma_size = resource_len(dma_res); - } -#else if (pdev->dev.dma_mask) { DBG(1, "won't do DMA"); retval = -ENODEV; goto err1; } -#endif if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) { retval = -EBUSY; diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c index 27b8f7cb447..9f01293600b 100644 --- a/drivers/usb/host/isp1760-hcd.c +++ b/drivers/usb/host/isp1760-hcd.c @@ -17,7 +17,9 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/mm.h> #include <asm/unaligned.h> +#include <asm/cacheflush.h> #include "../core/hcd.h" #include "isp1760-hcd.h" @@ -904,6 +906,14 @@ __acquires(priv->lock) status = 0; } + if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) { + void *ptr; + for (ptr = urb->transfer_buffer; + ptr < urb->transfer_buffer + urb->transfer_buffer_length; + ptr += PAGE_SIZE) + flush_dcache_page(virt_to_page(ptr)); + } + /* complete() can reenter this HCD */ usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb); spin_unlock(&priv->lock); diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c index 1c9f977a5c9..4293cfd28d6 100644 --- a/drivers/usb/host/isp1760-if.c +++ b/drivers/usb/host/isp1760-if.c @@ -109,7 +109,7 @@ static int of_isp1760_remove(struct of_device *dev) return 0; } -static struct of_device_id of_isp1760_match[] = { +static const struct of_device_id of_isp1760_match[] = { { .compatible = "nxp,usb-isp1760", }, diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c new file mode 100644 index 00000000000..4aa08d36d07 --- /dev/null +++ b/drivers/usb/host/ohci-da8xx.c @@ -0,0 +1,456 @@ +/* + * OHCI HCD (Host Controller Driver) for USB. + * + * TI DA8xx (OMAP-L1x) Bus Glue + * + * Derived from: ohci-omap.c and ohci-s3c2410.c + * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/platform_device.h> +#include <linux/clk.h> + +#include <mach/da8xx.h> +#include <mach/usb.h> + +#ifndef CONFIG_ARCH_DAVINCI_DA8XX +#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX." +#endif + +#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG) + +static struct clk *usb11_clk; +static struct clk *usb20_clk; + +/* Over-current indicator change bitmask */ +static volatile u16 ocic_mask; + +static void ohci_da8xx_clock(int on) +{ + u32 cfgchip2; + + cfgchip2 = __raw_readl(CFGCHIP2); + if (on) { + clk_enable(usb11_clk); + + /* + * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we + * need to enable the USB 2.0 module clocking, start its PHY, + * and not allow it to stop the clock during USB 2.0 suspend. + */ + if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) { + clk_enable(usb20_clk); + + cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN); + cfgchip2 |= CFGCHIP2_PHY_PLLON; + __raw_writel(cfgchip2, CFGCHIP2); + + pr_info("Waiting for USB PHY clock good...\n"); + while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD)) + cpu_relax(); + } + + /* Enable USB 1.1 PHY */ + cfgchip2 |= CFGCHIP2_USB1SUSPENDM; + } else { + clk_disable(usb11_clk); + if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) + clk_disable(usb20_clk); + + /* Disable USB 1.1 PHY */ + cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM; + } + __raw_writel(cfgchip2, CFGCHIP2); +} + +/* + * Handle the port over-current indicator change. + */ +static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub, + unsigned port) +{ + ocic_mask |= 1 << port; + + /* Once over-current is detected, the port needs to be powered down */ + if (hub->get_oci(port) > 0) + hub->set_power(port, 0); +} + +static int ohci_da8xx_init(struct usb_hcd *hcd) +{ + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev->platform_data; + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int result; + u32 rh_a; + + dev_dbg(dev, "starting USB controller\n"); + + ohci_da8xx_clock(1); + + /* + * DA8xx only have 1 port connected to the pins but the HC root hub + * register A reports 2 ports, thus we'll have to override it... + */ + ohci->num_ports = 1; + + result = ohci_init(ohci); + if (result < 0) + return result; + + /* + * Since we're providing a board-specific root hub port power control + * and over-current reporting, we have to override the HC root hub A + * register's default value, so that ohci_hub_control() could return + * the correct hub descriptor... + */ + rh_a = ohci_readl(ohci, &ohci->regs->roothub.a); + if (hub->set_power) { + rh_a &= ~RH_A_NPS; + rh_a |= RH_A_PSM; + } + if (hub->get_oci) { + rh_a &= ~RH_A_NOCP; + rh_a |= RH_A_OCPM; + } + rh_a &= ~RH_A_POTPGT; + rh_a |= hub->potpgt << 24; + ohci_writel(ohci, rh_a, &ohci->regs->roothub.a); + + return result; +} + +static void ohci_da8xx_stop(struct usb_hcd *hcd) +{ + ohci_stop(hcd); + ohci_da8xx_clock(0); +} + +static int ohci_da8xx_start(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int result; + + result = ohci_run(ohci); + if (result < 0) + ohci_da8xx_stop(hcd); + + return result; +} + +/* + * Update the status data from the hub with the over-current indicator change. + */ +static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf) +{ + int length = ohci_hub_status_data(hcd, buf); + + /* See if we have OCIC bit set on port 1 */ + if (ocic_mask & (1 << 1)) { + dev_dbg(hcd->self.controller, "over-current indicator change " + "on port 1\n"); + + if (!length) + length = 1; + + buf[0] |= 1 << 1; + } + return length; +} + +/* + * Look at the control requests to the root hub and see if we need to override. + */ +static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength) +{ + struct device *dev = hcd->self.controller; + struct da8xx_ohci_root_hub *hub = dev->platform_data; + int temp; + + switch (typeReq) { + case GetPortStatus: + /* Check the port number */ + if (wIndex != 1) + break; + + dev_dbg(dev, "GetPortStatus(%u)\n", wIndex); + + temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1); + + /* The port power status (PPS) bit defaults to 1 */ + if (hub->get_power && hub->get_power(wIndex) == 0) + temp &= ~RH_PS_PPS; + + /* The port over-current indicator (POCI) bit is always 0 */ + if (hub->get_oci && hub->get_oci(wIndex) > 0) + temp |= RH_PS_POCI; + + /* The over-current indicator change (OCIC) bit is 0 too */ + if (ocic_mask & (1 << wIndex)) + temp |= RH_PS_OCIC; + + put_unaligned(cpu_to_le32(temp), (__le32 *)buf); + return 0; + case SetPortFeature: + temp = 1; + goto check_port; + case ClearPortFeature: + temp = 0; + +check_port: + /* Check the port number */ + if (wIndex != 1) + break; + + switch (wValue) { + case USB_PORT_FEAT_POWER: + dev_dbg(dev, "%sPortFeature(%u): %s\n", + temp ? "Set" : "Clear", wIndex, "POWER"); + + if (!hub->set_power) + return -EPIPE; + + return hub->set_power(wIndex, temp) ? -EPIPE : 0; + case USB_PORT_FEAT_C_OVER_CURRENT: + dev_dbg(dev, "%sPortFeature(%u): %s\n", + temp ? "Set" : "Clear", wIndex, + "C_OVER_CURRENT"); + + if (temp) + ocic_mask |= 1 << wIndex; + else + ocic_mask &= ~(1 << wIndex); + return 0; + } + } + + return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); +} + +static const struct hc_driver ohci_da8xx_hc_driver = { + .description = hcd_name, + .product_desc = "DA8xx OHCI", + .hcd_priv_size = sizeof(struct ohci_hcd), + + /* + * generic hardware linkage + */ + .irq = ohci_irq, + .flags = HCD_USB11 | HCD_MEMORY, + + /* + * basic lifecycle operations + */ + .reset = ohci_da8xx_init, + .start = ohci_da8xx_start, + .stop = ohci_da8xx_stop, + .shutdown = ohci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .endpoint_disable = ohci_endpoint_disable, + + /* + * scheduling support + */ + .get_frame_number = ohci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ohci_da8xx_hub_status_data, + .hub_control = ohci_da8xx_hub_control, + +#ifdef CONFIG_PM + .bus_suspend = ohci_bus_suspend, + .bus_resume = ohci_bus_resume, +#endif + .start_port_reset = ohci_start_port_reset, +}; + +/*-------------------------------------------------------------------------*/ + + +/** + * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs + * Context: !in_interrupt() + * + * Allocates basic resources for this USB host controller, and + * then invokes the start() method for the HCD associated with it + * through the hotplug entry's driver_data. + */ +static int usb_hcd_da8xx_probe(const struct hc_driver *driver, + struct platform_device *pdev) +{ + struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data; + struct usb_hcd *hcd; + struct resource *mem; + int error, irq; + + if (hub == NULL) + return -ENODEV; + + usb11_clk = clk_get(&pdev->dev, "usb11"); + if (IS_ERR(usb11_clk)) + return PTR_ERR(usb11_clk); + + usb20_clk = clk_get(&pdev->dev, "usb20"); + if (IS_ERR(usb20_clk)) { + error = PTR_ERR(usb20_clk); + goto err0; + } + + hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); + if (!hcd) { + error = -ENOMEM; + goto err1; + } + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + error = -ENODEV; + goto err2; + } + hcd->rsrc_start = mem->start; + hcd->rsrc_len = mem->end - mem->start + 1; + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + dev_dbg(&pdev->dev, "request_mem_region failed\n"); + error = -EBUSY; + goto err2; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + dev_err(&pdev->dev, "ioremap failed\n"); + error = -ENOMEM; + goto err3; + } + + ohci_hcd_init(hcd_to_ohci(hcd)); + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + error = -ENODEV; + goto err4; + } + error = usb_add_hcd(hcd, irq, IRQF_DISABLED); + if (error) + goto err4; + + if (hub->ocic_notify) { + error = hub->ocic_notify(ohci_da8xx_ocic_handler); + if (!error) + return 0; + } + + usb_remove_hcd(hcd); +err4: + iounmap(hcd->regs); +err3: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +err2: + usb_put_hcd(hcd); +err1: + clk_put(usb20_clk); +err0: + clk_put(usb11_clk); + return error; +} + +/** + * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs + * @dev: USB Host Controller being removed + * Context: !in_interrupt() + * + * Reverses the effect of usb_hcd_da8xx_probe(), first invoking + * the HCD's stop() method. It is always called from a thread + * context, normally "rmmod", "apmd", or something similar. + */ +static inline void +usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev) +{ + struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data; + + hub->ocic_notify(NULL); + usb_remove_hcd(hcd); + iounmap(hcd->regs); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + usb_put_hcd(hcd); + clk_put(usb20_clk); + clk_put(usb11_clk); +} + +static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev) +{ + return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev); +} + +static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + + usb_hcd_da8xx_remove(hcd, dev); + platform_set_drvdata(dev, NULL); + + return 0; +} + +#ifdef CONFIG_PM +static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + + if (time_before(jiffies, ohci->next_statechange)) + msleep(5); + ohci->next_statechange = jiffies; + + ohci_da8xx_clock(0); + hcd->state = HC_STATE_SUSPENDED; + dev->dev.power.power_state = PMSG_SUSPEND; + return 0; +} + +static int ohci_da8xx_resume(struct platform_device *dev) +{ + struct usb_hcd *hcd = platform_get_drvdata(dev); + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + + if (time_before(jiffies, ohci->next_statechange)) + msleep(5); + ohci->next_statechange = jiffies; + + ohci_da8xx_clock(1); + dev->dev.power.power_state = PMSG_ON; + usb_hcd_resume_root_hub(hcd); + return 0; +} +#endif + +/* + * Driver definition to register with platform structure. + */ +static struct platform_driver ohci_hcd_da8xx_driver = { + .probe = ohci_hcd_da8xx_drv_probe, + .remove = ohci_hcd_da8xx_drv_remove, + .shutdown = usb_hcd_platform_shutdown, +#ifdef CONFIG_PM + .suspend = ohci_da8xx_suspend, + .resume = ohci_da8xx_resume, +#endif + .driver = { + .owner = THIS_MODULE, + .name = "ohci", + }, +}; diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c index 811f5dfdc58..8ad2441b028 100644 --- a/drivers/usb/host/ohci-dbg.c +++ b/drivers/usb/host/ohci-dbg.c @@ -53,13 +53,13 @@ urb_print(struct urb * urb, char * str, int small, int status) int i, len; if (usb_pipecontrol (pipe)) { - printk (KERN_DEBUG __FILE__ ": setup(8):"); + printk (KERN_DEBUG "%s: setup(8):", __FILE__); for (i = 0; i < 8 ; i++) printk (" %02x", ((__u8 *) urb->setup_packet) [i]); printk ("\n"); } if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) { - printk (KERN_DEBUG __FILE__ ": data(%d/%d):", + printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__, urb->actual_length, urb->transfer_buffer_length); len = usb_pipeout (pipe)? diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 24eb7478191..afe59be2364 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -1051,6 +1051,11 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER usb_hcd_pnx4008_driver #endif +#ifdef CONFIG_ARCH_DAVINCI_DA8XX +#include "ohci-da8xx.c" +#define PLATFORM_DRIVER ohci_hcd_da8xx_driver +#endif + #if defined(CONFIG_CPU_SUBTYPE_SH7720) || \ defined(CONFIG_CPU_SUBTYPE_SH7721) || \ defined(CONFIG_CPU_SUBTYPE_SH7763) || \ diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c index de42283149c..18d39f0463e 100644 --- a/drivers/usb/host/ohci-lh7a404.c +++ b/drivers/usb/host/ohci-lh7a404.c @@ -28,8 +28,8 @@ extern int usb_disabled(void); static void lh7a404_start_hc(struct platform_device *dev) { - printk(KERN_DEBUG __FILE__ - ": starting LH7A404 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n", + __FILE__); /* * Now, carefully enable the USB clock, and take @@ -39,14 +39,13 @@ static void lh7a404_start_hc(struct platform_device *dev) udelay(1000); USBH_CMDSTATUS = OHCI_HCR; - printk(KERN_DEBUG __FILE__ - ": Clock to USB host has been enabled \n"); + printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__); } static void lh7a404_stop_hc(struct platform_device *dev) { - printk(KERN_DEBUG __FILE__ - ": stopping LH7A404 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n", + __FILE__); CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */ } diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c index 2769326da42..cd74bbdd007 100644 --- a/drivers/usb/host/ohci-pnx4008.c +++ b/drivers/usb/host/ohci-pnx4008.c @@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev) } i2c_adap = i2c_get_adapter(2); memset(&i2c_info, 0, sizeof(struct i2c_board_info)); - strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE); + strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE); isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info, normal_i2c); i2c_put_adapter(i2c_adap); @@ -411,7 +411,7 @@ out3: out2: clk_put(usb_clk); out1: - i2c_unregister_client(isp1301_i2c_client); + i2c_unregister_device(isp1301_i2c_client); isp1301_i2c_client = NULL; out_i2c_driver: i2c_del_driver(&isp1301_driver); @@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev) pnx4008_unset_usb_bits(); clk_disable(usb_clk); clk_put(usb_clk); - i2c_unregister_client(isp1301_i2c_client); + i2c_unregister_device(isp1301_i2c_client); isp1301_i2c_client = NULL; i2c_del_driver(&isp1301_driver); diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c index 68a30171029..103263c230c 100644 --- a/drivers/usb/host/ohci-ppc-of.c +++ b/drivers/usb/host/ohci-ppc-of.c @@ -114,21 +114,21 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) hcd->rsrc_len = res.end - res.start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - printk(KERN_ERR __FILE__ ": request_mem_region failed\n"); + printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__); rv = -EBUSY; goto err_rmr; } irq = irq_of_parse_and_map(dn, 0); if (irq == NO_IRQ) { - printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n"); + printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__); rv = -EBUSY; goto err_irq; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { - printk(KERN_ERR __FILE__ ": ioremap failed\n"); + printk(KERN_ERR "%s: ioremap failed\n", __FILE__); rv = -ENOMEM; goto err_ioremap; } @@ -169,7 +169,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match) } else release_mem_region(res.start, 0x4); } else - pr_debug(__FILE__ ": cannot get ehci offset from fdt\n"); + pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__); } iounmap(hcd->regs); @@ -212,7 +212,7 @@ static int ohci_hcd_ppc_of_shutdown(struct of_device *op) } -static struct of_device_id ohci_hcd_ppc_of_match[] = { +static const struct of_device_id ohci_hcd_ppc_of_match[] = { #ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE { .name = "usb", diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c index cd3398b675b..89e670e38c1 100644 --- a/drivers/usb/host/ohci-ppc-soc.c +++ b/drivers/usb/host/ohci-ppc-soc.c @@ -41,14 +41,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver, res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) { - pr_debug(__FILE__ ": no irq\n"); + pr_debug("%s: no irq\n", __FILE__); return -ENODEV; } irq = res->start; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - pr_debug(__FILE__ ": no reg addr\n"); + pr_debug("%s: no reg addr\n", __FILE__); return -ENODEV; } @@ -59,14 +59,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver, hcd->rsrc_len = res->end - res->start + 1; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { - pr_debug(__FILE__ ": request_mem_region failed\n"); + pr_debug("%s: request_mem_region failed\n", __FILE__); retval = -EBUSY; goto err1; } hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { - pr_debug(__FILE__ ": ioremap failed\n"); + pr_debug("%s: ioremap failed\n", __FILE__); retval = -ENOMEM; goto err2; } diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c index e4bbe8e188e..d8eb3bdafab 100644 --- a/drivers/usb/host/ohci-sa1111.c +++ b/drivers/usb/host/ohci-sa1111.c @@ -31,8 +31,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev) { unsigned int usb_rst = 0; - printk(KERN_DEBUG __FILE__ - ": starting SA-1111 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n", + __FILE__); #ifdef CONFIG_SA1100_BADGE4 if (machine_is_badge4()) { @@ -65,8 +65,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev) static void sa1111_stop_hc(struct sa1111_dev *dev) { unsigned int usb_rst; - printk(KERN_DEBUG __FILE__ - ": stopping SA-1111 OHCI USB Controller\n"); + printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n", + __FILE__); /* * Put the USB host controller into reset. diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 5b22a4d1c9e..e11cc3aa4b8 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c @@ -51,6 +51,7 @@ #include <asm/irq.h> #include <asm/system.h> #include <asm/byteorder.h> +#include <asm/unaligned.h> #include "../core/hcd.h" #include "sl811.h" @@ -1272,12 +1273,12 @@ sl811h_hub_control( sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf); break; case GetHubStatus: - *(__le32 *) buf = cpu_to_le32(0); + put_unaligned_le32(0, buf); break; case GetPortStatus: if (wIndex != 1) goto error; - *(__le32 *) buf = cpu_to_le32(sl811->port1); + put_unaligned_le32(sl811->port1, buf); #ifndef VERBOSE if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c index 99cd00fd351..09197067fe6 100644 --- a/drivers/usb/host/uhci-hcd.c +++ b/drivers/usb/host/uhci-hcd.c @@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hcd) uhci_hc_died(uhci); uhci_scan_schedule(uhci); spin_unlock_irq(&uhci->lock); + synchronize_irq(hcd->irq); del_timer_sync(&uhci->fsbr_timer); release_uhci(uhci); diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 33128d52f21..105fa8b025b 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma) } } +char *xhci_get_slot_state(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx) +{ + struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); + + switch (GET_SLOT_STATE(slot_ctx->dev_state)) { + case 0: + return "enabled/disabled"; + case 1: + return "default"; + case 2: + return "addressed"; + case 3: + return "configured"; + default: + return "reserved"; + } +} + void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { /* Fields are 32 bits wide, DMA addresses are in bytes */ diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index ecc131c3fe3..78c4edac1db 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset) next = readl(base + ext_offset); - if (ext_offset == XHCI_HCC_PARAMS_OFFSET) + if (ext_offset == XHCI_HCC_PARAMS_OFFSET) { /* Find the first extended capability */ next = XHCI_HCC_EXT_CAPS(next); - else + ext_offset = 0; + } else { /* Find the next extended capability */ next = XHCI_EXT_CAPS_NEXT(next); + } + if (!next) return 0; /* diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c index 5e92c72df64..4cb69e0af83 100644 --- a/drivers/usb/host/xhci-hcd.c +++ b/drivers/usb/host/xhci-hcd.c @@ -1007,7 +1007,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, * for usb_set_interface() and usb_set_configuration() claim). */ if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id], - udev, ep, GFP_KERNEL) < 0) { + udev, ep, GFP_NOIO) < 0) { dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n", __func__, ep->desc.bEndpointAddress); return -ENOMEM; @@ -1181,6 +1181,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, ret = xhci_queue_evaluate_context(xhci, in_ctx->dma, udev->slot_id); if (ret < 0) { + if (command) + list_del(&command->cmd_list); spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "FIXME allocate a new ring segment\n"); return -ENOMEM; @@ -1264,30 +1266,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) xhci_zero_in_ctx(xhci, virt_dev); /* Install new rings and free or cache any old rings */ for (i = 1; i < 31; ++i) { - int rings_cached; - if (!virt_dev->eps[i].new_ring) continue; /* Only cache or free the old ring if it exists. * It may not if this is the first add of an endpoint. */ if (virt_dev->eps[i].ring) { - rings_cached = virt_dev->num_rings_cached; - if (rings_cached < XHCI_MAX_RINGS_CACHED) { - virt_dev->num_rings_cached++; - rings_cached = virt_dev->num_rings_cached; - virt_dev->ring_cache[rings_cached] = - virt_dev->eps[i].ring; - xhci_dbg(xhci, "Cached old ring, " - "%d ring%s cached\n", - rings_cached, - (rings_cached > 1) ? "s" : ""); - } else { - xhci_ring_free(xhci, virt_dev->eps[i].ring); - xhci_dbg(xhci, "Ring cache full (%d rings), " - "freeing ring\n", - virt_dev->num_rings_cached); - } + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); } virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].new_ring = NULL; @@ -1458,6 +1443,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, } /* + * This submits a Reset Device Command, which will set the device state to 0, + * set the device address to 0, and disable all the endpoints except the default + * control endpoint. The USB core should come back and call + * xhci_address_device(), and then re-set up the configuration. If this is + * called because of a usb_reset_and_verify_device(), then the old alternate + * settings will be re-installed through the normal bandwidth allocation + * functions. + * + * Wait for the Reset Device command to finish. Remove all structures + * associated with the endpoints that were disabled. Clear the input device + * structure? Cache the rings? Reset the control endpoint 0 max packet size? + */ +int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + int ret, i; + unsigned long flags; + struct xhci_hcd *xhci; + unsigned int slot_id; + struct xhci_virt_device *virt_dev; + struct xhci_command *reset_device_cmd; + int timeleft; + int last_freed_endpoint; + + ret = xhci_check_args(hcd, udev, NULL, 0, __func__); + if (ret <= 0) + return ret; + xhci = hcd_to_xhci(hcd); + slot_id = udev->slot_id; + virt_dev = xhci->devs[slot_id]; + if (!virt_dev) { + xhci_dbg(xhci, "%s called with invalid slot ID %u\n", + __func__, slot_id); + return -EINVAL; + } + + xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); + /* Allocate the command structure that holds the struct completion. + * Assume we're in process context, since the normal device reset + * process has to wait for the device anyway. Storage devices are + * reset as part of error handling, so use GFP_NOIO instead of + * GFP_KERNEL. + */ + reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO); + if (!reset_device_cmd) { + xhci_dbg(xhci, "Couldn't allocate command structure.\n"); + return -ENOMEM; + } + + /* Attempt to submit the Reset Device command to the command ring */ + spin_lock_irqsave(&xhci->lock, flags); + reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; + list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); + ret = xhci_queue_reset_device(xhci, slot_id); + if (ret) { + xhci_dbg(xhci, "FIXME: allocate a command ring segment\n"); + list_del(&reset_device_cmd->cmd_list); + spin_unlock_irqrestore(&xhci->lock, flags); + goto command_cleanup; + } + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + /* Wait for the Reset Device command to finish */ + timeleft = wait_for_completion_interruptible_timeout( + reset_device_cmd->completion, + USB_CTRL_SET_TIMEOUT); + if (timeleft <= 0) { + xhci_warn(xhci, "%s while waiting for reset device command\n", + timeleft == 0 ? "Timeout" : "Signal"); + spin_lock_irqsave(&xhci->lock, flags); + /* The timeout might have raced with the event ring handler, so + * only delete from the list if the item isn't poisoned. + */ + if (reset_device_cmd->cmd_list.next != LIST_POISON1) + list_del(&reset_device_cmd->cmd_list); + spin_unlock_irqrestore(&xhci->lock, flags); + ret = -ETIME; + goto command_cleanup; + } + + /* The Reset Device command can't fail, according to the 0.95/0.96 spec, + * unless we tried to reset a slot ID that wasn't enabled, + * or the device wasn't in the addressed or configured state. + */ + ret = reset_device_cmd->status; + switch (ret) { + case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */ + case COMP_CTX_STATE: /* 0.96 completion code for same thing */ + xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n", + slot_id, + xhci_get_slot_state(xhci, virt_dev->out_ctx)); + xhci_info(xhci, "Not freeing device rings.\n"); + /* Don't treat this as an error. May change my mind later. */ + ret = 0; + goto command_cleanup; + case COMP_SUCCESS: + xhci_dbg(xhci, "Successful reset device command.\n"); + break; + default: + if (xhci_is_vendor_info_code(xhci, ret)) + break; + xhci_warn(xhci, "Unknown completion code %u for " + "reset device command.\n", ret); + ret = -EINVAL; + goto command_cleanup; + } + + /* Everything but endpoint 0 is disabled, so free or cache the rings. */ + last_freed_endpoint = 1; + for (i = 1; i < 31; ++i) { + if (!virt_dev->eps[i].ring) + continue; + xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); + last_freed_endpoint = i; + } + xhci_dbg(xhci, "Output context after successful reset device cmd:\n"); + xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint); + ret = 0; + +command_cleanup: + xhci_free_command(xhci, reset_device_cmd); + return ret; +} + +/* * At this point, the struct usb_device is about to go away, the device has * disconnected, and all traffic has been stopped and the endpoints have been * disabled. Free any HC data structures associated with that device. @@ -1694,7 +1804,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, xhci_warn(xhci, "Cannot update hub desc for unknown device.\n"); return -EINVAL; } - config_cmd = xhci_alloc_command(xhci, true, mem_flags); + config_cmd = xhci_alloc_command(xhci, true, true, mem_flags); if (!config_cmd) { xhci_dbg(xhci, "Could not allocate xHCI command structure.\n"); return -ENOMEM; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index eac5b53aa9e..208b805b80e 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -129,6 +129,50 @@ static u32 xhci_port_state_to_neutral(u32 state) return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS); } +static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex, + u32 __iomem *addr, u32 port_status) +{ + /* Write 1 to disable the port */ + xhci_writel(xhci, port_status | PORT_PE, addr); + port_status = xhci_readl(xhci, addr); + xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n", + wIndex, port_status); +} + +static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue, + u16 wIndex, u32 __iomem *addr, u32 port_status) +{ + char *port_change_bit; + u32 status; + + switch (wValue) { + case USB_PORT_FEAT_C_RESET: + status = PORT_RC; + port_change_bit = "reset"; + break; + case USB_PORT_FEAT_C_CONNECTION: + status = PORT_CSC; + port_change_bit = "connect"; + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + status = PORT_OCC; + port_change_bit = "over-current"; + break; + case USB_PORT_FEAT_C_ENABLE: + status = PORT_PEC; + port_change_bit = "enable/disable"; + break; + default: + /* Should never happen */ + return; + } + /* Change bits are all write 1 to clear */ + xhci_writel(xhci, port_status | status, addr); + port_status = xhci_readl(xhci, addr); + xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n", + port_change_bit, wIndex, port_status); +} + int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { @@ -138,7 +182,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u32 temp, status; int retval = 0; u32 __iomem *addr; - char *port_change_bit; ports = HCS_MAX_PORTS(xhci->hcs_params1); @@ -229,26 +272,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, temp = xhci_port_state_to_neutral(temp); switch (wValue) { case USB_PORT_FEAT_C_RESET: - status = PORT_RC; - port_change_bit = "reset"; - break; case USB_PORT_FEAT_C_CONNECTION: - status = PORT_CSC; - port_change_bit = "connect"; - break; case USB_PORT_FEAT_C_OVER_CURRENT: - status = PORT_OCC; - port_change_bit = "over-current"; + case USB_PORT_FEAT_C_ENABLE: + xhci_clear_port_change_bit(xhci, wValue, wIndex, + addr, temp); + break; + case USB_PORT_FEAT_ENABLE: + xhci_disable_port(xhci, wIndex, addr, temp); break; default: goto error; } - /* Change bits are all write 1 to clear */ - xhci_writel(xhci, temp | status, addr); - temp = xhci_readl(xhci, addr); - xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n", - port_change_bit, wIndex, temp); - temp = xhci_readl(xhci, addr); /* unblock any posted writes */ break; default: error: diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index bffcef7a554..49f7d72f8b1 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -198,6 +198,31 @@ fail: return 0; } +void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + unsigned int ep_index) +{ + int rings_cached; + + rings_cached = virt_dev->num_rings_cached; + if (rings_cached < XHCI_MAX_RINGS_CACHED) { + virt_dev->num_rings_cached++; + rings_cached = virt_dev->num_rings_cached; + virt_dev->ring_cache[rings_cached] = + virt_dev->eps[ep_index].ring; + xhci_dbg(xhci, "Cached old ring, " + "%d ring%s cached\n", + rings_cached, + (rings_cached > 1) ? "s" : ""); + } else { + xhci_ring_free(xhci, virt_dev->eps[ep_index].ring); + xhci_dbg(xhci, "Ring cache full (%d rings), " + "freeing ring\n", + virt_dev->num_rings_cached); + } + virt_dev->eps[ep_index].ring = NULL; +} + /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue * pointers to the beginning of the ring. */ @@ -242,6 +267,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, void xhci_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) { + if (!ctx) + return; dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); kfree(ctx); } @@ -427,7 +454,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud case USB_SPEED_LOW: slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; break; - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); return -EINVAL; break; @@ -471,7 +498,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud case USB_SPEED_LOW: ep0_ctx->ep_info2 |= MAX_PACKET(8); break; - case USB_SPEED_VARIABLE: + case USB_SPEED_WIRELESS: xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); return -EINVAL; break; @@ -819,7 +846,8 @@ static void scratchpad_free(struct xhci_hcd *xhci) } struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, - bool allocate_completion, gfp_t mem_flags) + bool allocate_in_ctx, bool allocate_completion, + gfp_t mem_flags) { struct xhci_command *command; @@ -827,11 +855,14 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, if (!command) return NULL; - command->in_ctx = - xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); - if (!command->in_ctx) { - kfree(command); - return NULL; + if (allocate_in_ctx) { + command->in_ctx = + xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, + mem_flags); + if (!command->in_ctx) { + kfree(command); + return NULL; + } } if (allocate_completion) { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index e097008d6fb..417d37aff8d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -139,6 +139,7 @@ static const struct hc_driver xhci_pci_hc_driver = { .reset_bandwidth = xhci_reset_bandwidth, .address_device = xhci_address_device, .update_hub_device = xhci_update_hub_device, + .reset_device = xhci_reset_device, /* * scheduling support diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index ee7bc7ecbc5..6ba841bca4a 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -953,6 +953,17 @@ bandwidth_change: case TRB_TYPE(TRB_RESET_EP): handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue); break; + case TRB_TYPE(TRB_RESET_DEV): + xhci_dbg(xhci, "Completed reset device command.\n"); + slot_id = TRB_TO_SLOT_ID( + xhci->cmd_ring->dequeue->generic.field[3]); + virt_dev = xhci->devs[slot_id]; + if (virt_dev) + handle_cmd_in_cmd_wait_list(xhci, virt_dev, event); + else + xhci_warn(xhci, "Reset device command completion " + "for disabled slot %u\n", slot_id); + break; default: /* Skip over unknown commands on the event ring */ xhci->error_bitmask |= 1 << 6; @@ -1080,6 +1091,20 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, return 0; } +int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) +{ + if (trb_comp_code >= 224 && trb_comp_code <= 255) { + /* Vendor defined "informational" completion code, + * treat as not-an-error. + */ + xhci_dbg(xhci, "Vendor defined info completion code %u\n", + trb_comp_code); + xhci_dbg(xhci, "Treating code as success.\n"); + return 1; + } + return 0; +} + /* * If this function returns an error condition, it means it got a Transfer * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. @@ -1196,13 +1221,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, status = -ENOSR; break; default: - if (trb_comp_code >= 224 && trb_comp_code <= 255) { - /* Vendor defined "informational" completion code, - * treat as not-an-error. - */ - xhci_dbg(xhci, "Vendor defined info completion code %u\n", - trb_comp_code); - xhci_dbg(xhci, "Treating code as success.\n"); + if (xhci_is_vendor_info_code(xhci, trb_comp_code)) { status = 0; break; } @@ -2181,6 +2200,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, false); } +/* Queue a reset device command TRB */ +int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id) +{ + return queue_command(xhci, 0, 0, 0, + TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id), + false); +} + /* Queue a configure endpoint command TRB */ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 877813505ef..e5eb09b2f38 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1210,6 +1210,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst); void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci); void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring); void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep); +char *xhci_get_slot_state(struct xhci_hcd *xhci, + struct xhci_container_ctx *ctx); /* xHCI memory management */ void xhci_mem_cleanup(struct xhci_hcd *xhci); @@ -1233,8 +1235,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_device *udev, struct usb_host_endpoint *ep, gfp_t mem_flags); void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring); +void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + unsigned int ep_index); struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, - bool allocate_completion, gfp_t mem_flags); + bool allocate_in_ctx, bool allocate_completion, + gfp_t mem_flags); void xhci_free_command(struct xhci_hcd *xhci, struct xhci_command *command); @@ -1264,6 +1270,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status); int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep); +int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev); int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); @@ -1272,6 +1279,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); struct xhci_segment *trb_in_td(struct xhci_segment *start_seg, union xhci_trb *start_trb, union xhci_trb *end_trb, dma_addr_t suspect_dma); +int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code); void xhci_ring_cmd_db(struct xhci_hcd *xhci); void *xhci_setup_one_noop(struct xhci_hcd *xhci); void xhci_handle_event(struct xhci_hcd *xhci); @@ -1293,6 +1301,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id); int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index); +int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id); void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_td *cur_td, struct xhci_dequeue_state *state); diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c index eca355dccf6..e192e8f7c56 100644 --- a/drivers/usb/image/mdc800.c +++ b/drivers/usb/image/mdc800.c @@ -967,7 +967,7 @@ static const struct file_operations mdc800_device_ops = -static struct usb_device_id mdc800_table [] = { +static const struct usb_device_id mdc800_table[] = { { USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 459a7287fe0..3a6bcd5fee0 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c @@ -155,7 +155,7 @@ static int mts_usb_probe(struct usb_interface *intf, const struct usb_device_id *id); static void mts_usb_disconnect(struct usb_interface *intf); -static struct usb_device_id mts_usb_ids []; +static const struct usb_device_id mts_usb_ids[]; static struct usb_driver mts_usb_driver = { .name = "microtekX6", @@ -656,7 +656,7 @@ static struct scsi_host_template mts_scsi_host_template = { /* The entries of microtek_table must correspond, line-by-line to the entries of mts_supported_products[]. */ -static struct usb_device_id mts_usb_ids [] = +static const struct usb_device_id mts_usb_ids[] = { { USB_DEVICE(0x4ce, 0x0300) }, { USB_DEVICE(0x5da, 0x0094) }, diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index abe3aa67ed0..55660eaf947 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -87,17 +87,6 @@ config USB_LCD To compile this driver as a module, choose M here: the module will be called usblcd. -config USB_BERRY_CHARGE - tristate "USB BlackBerry recharge support" - depends on USB - help - Say Y here if you want to connect a BlackBerry device to your - computer's USB port and have it automatically switch to "recharge" - mode. - - To compile this driver as a module, choose M here: the - module will be called berry_charge. - config USB_LED tristate "USB LED driver support" depends on USB @@ -242,17 +231,3 @@ config USB_ISIGHTFW driver beforehand. Tools for doing so are available at http://bersace03.free.fr -config USB_VST - tristate "USB VST driver" - depends on USB - help - This driver is intended for Vernier Software Technologies - bulk usb devices such as their Ocean-Optics spectrometers or - Labquest. - It is a bulk channel driver with configurable read and write - timeouts. - - To compile this driver as a module, choose M here: the - module will be called vstusb. - - diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 0826aab8303..717703e8142 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -5,7 +5,6 @@ obj-$(CONFIG_USB_ADUTUX) += adutux.o obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o -obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o obj-$(CONFIG_USB_CYTHERM) += cytherm.o obj-$(CONFIG_USB_EMI26) += emi26.o @@ -23,7 +22,6 @@ obj-$(CONFIG_USB_TEST) += usbtest.o obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o obj-$(CONFIG_USB_USS720) += uss720.o obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o -obj-$(CONFIG_USB_VST) += vstusb.o obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/ diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 20352654201..d240de097c6 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -38,7 +38,7 @@ static int debug = 1; #define dbg(lvl, format, arg...) \ do { \ if (debug >= lvl) \ - printk(KERN_DEBUG __FILE__ " : " format " \n", ## arg); \ + printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \ } while (0) @@ -56,7 +56,7 @@ MODULE_PARM_DESC(debug, "Debug enabled or not"); #define ADU_PRODUCT_ID 0x0064 /* table of devices that work with this driver */ -static struct usb_device_id device_table [] = { +static const struct usb_device_id device_table[] = { { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */ { USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */ @@ -132,8 +132,8 @@ static void adu_debug_data(int level, const char *function, int size, if (debug < level) return; - printk(KERN_DEBUG __FILE__": %s - length = %d, data = ", - function, size); + printk(KERN_DEBUG "%s: %s - length = %d, data = ", + __FILE__, function, size); for (i = 0; i < size; ++i) printk("%.2x ", data[i]); printk("\n"); diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index 1eb9e4162cc..4d2952f1fb1 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -57,7 +57,7 @@ .bInterfaceProtocol = 0x00 /* table of devices that work with this driver */ -static struct usb_device_id appledisplay_table [] = { +static const struct usb_device_id appledisplay_table[] = { { APPLEDISPLAY_DEVICE(0x9218) }, { APPLEDISPLAY_DEVICE(0x9219) }, { APPLEDISPLAY_DEVICE(0x921c) }, @@ -179,7 +179,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd) return pdata->msgdata[1]; } -static struct backlight_ops appledisplay_bl_data = { +static const struct backlight_ops appledisplay_bl_data = { .get_brightness = appledisplay_bl_get_brightness, .update_status = appledisplay_bl_update_status, }; @@ -283,6 +283,7 @@ static int appledisplay_probe(struct usb_interface *iface, &appledisplay_bl_data); if (IS_ERR(pdata->bd)) { dev_err(&iface->dev, "Backlight registration failed\n"); + retval = PTR_ERR(pdata->bd); goto error; } diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c deleted file mode 100644 index c05a85bc592..00000000000 --- a/drivers/usb/misc/berry_charge.c +++ /dev/null @@ -1,183 +0,0 @@ -/* - * USB BlackBerry charging module - * - * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation, version 2. - * - * Information on how to switch configs was taken by the bcharge.cc file - * created by the barry.sf.net project. - * - * bcharge.cc has the following copyright: - * Copyright (C) 2006, Net Direct Inc. (http://www.netdirect.ca/) - * and is released under the GPLv2. - * - * - */ - -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/usb.h> - -#define RIM_VENDOR 0x0fca -#define BLACKBERRY 0x0001 -#define BLACKBERRY_PEARL_DUAL 0x0004 -#define BLACKBERRY_PEARL 0x0006 - -static int debug; -static int pearl_dual_mode = 1; - -#ifdef dbg -#undef dbg -#endif -#define dbg(dev, format, arg...) \ - if (debug) \ - dev_printk(KERN_DEBUG , dev , format , ## arg) - -static struct usb_device_id id_table [] = { - { USB_DEVICE(RIM_VENDOR, BLACKBERRY) }, - { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL) }, - { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL_DUAL) }, - { }, /* Terminating entry */ -}; -MODULE_DEVICE_TABLE(usb, id_table); - -static int magic_charge(struct usb_device *udev) -{ - char *dummy_buffer = kzalloc(2, GFP_KERNEL); - int retval; - - if (!dummy_buffer) - return -ENOMEM; - - /* send two magic commands and then set the configuration. The device - * will then reset itself with the new power usage and should start - * charging. */ - - /* Note, with testing, it only seems that the first message is really - * needed (at least for the 8700c), but to be safe, we emulate what - * other operating systems seem to be sending to their device. We - * really need to get some specs for this device to be sure about what - * is going on here. - */ - dbg(&udev->dev, "Sending first magic command\n"); - retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - 0xa5, 0xc0, 0, 1, dummy_buffer, 2, 100); - if (retval != 2) { - dev_err(&udev->dev, "First magic command failed: %d.\n", - retval); - goto exit; - } - - dbg(&udev->dev, "Sending second magic command\n"); - retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - 0xa2, 0x40, 0, 1, dummy_buffer, 0, 100); - if (retval != 0) { - dev_err(&udev->dev, "Second magic command failed: %d.\n", - retval); - goto exit; - } - - dbg(&udev->dev, "Calling set_configuration\n"); - retval = usb_driver_set_configuration(udev, 1); - if (retval) - dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval); - -exit: - kfree(dummy_buffer); - return retval; -} - -static int magic_dual_mode(struct usb_device *udev) -{ - char *dummy_buffer = kzalloc(2, GFP_KERNEL); - int retval; - - if (!dummy_buffer) - return -ENOMEM; - - /* send magic command so that the Blackberry Pearl device exposes - * two interfaces: both the USB mass-storage one and one which can - * be used for database access. */ - dbg(&udev->dev, "Sending magic pearl command\n"); - retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - 0xa9, 0xc0, 1, 1, dummy_buffer, 2, 100); - dbg(&udev->dev, "Magic pearl command returned %d\n", retval); - - dbg(&udev->dev, "Calling set_configuration\n"); - retval = usb_driver_set_configuration(udev, 1); - if (retval) - dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval); - - kfree(dummy_buffer); - return retval; -} - -static int berry_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct usb_device *udev = interface_to_usbdev(intf); - - if (udev->bus_mA < 500) { - dbg(&udev->dev, "Not enough power to charge available\n"); - return -ENODEV; - } - - dbg(&udev->dev, "Power is set to %dmA\n", - udev->actconfig->desc.bMaxPower * 2); - - /* check the power usage so we don't try to enable something that is - * already enabled */ - if ((udev->actconfig->desc.bMaxPower * 2) == 500) { - dbg(&udev->dev, "device is already charging, power is " - "set to %dmA\n", udev->actconfig->desc.bMaxPower * 2); - return -ENODEV; - } - - /* turn the power on */ - magic_charge(udev); - - if ((le16_to_cpu(udev->descriptor.idProduct) == BLACKBERRY_PEARL) && - (pearl_dual_mode)) - magic_dual_mode(udev); - - /* we don't really want to bind to the device, userspace programs can - * handle the syncing just fine, so get outta here. */ - return -ENODEV; -} - -static void berry_disconnect(struct usb_interface *intf) -{ -} - -static struct usb_driver berry_driver = { - .name = "berry_charge", - .probe = berry_probe, - .disconnect = berry_disconnect, - .id_table = id_table, -}; - -static int __init berry_init(void) -{ - return usb_register(&berry_driver); -} - -static void __exit berry_exit(void) -{ - usb_deregister(&berry_driver); -} - -module_init(berry_init); -module_exit(berry_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>"); -module_param(debug, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(debug, "Debug enabled or not"); -module_param(pearl_dual_mode, bool, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(pearl_dual_mode, "Change Blackberry Pearl to run in dual mode"); diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c index 5720bfef6a3..1547d8cac5f 100644 --- a/drivers/usb/misc/cypress_cy7c63.c +++ b/drivers/usb/misc/cypress_cy7c63.c @@ -56,7 +56,7 @@ /* table of devices that work with this driver */ -static struct usb_device_id cypress_table [] = { +static const struct usb_device_id cypress_table[] = { { USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) }, { } }; diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c index 4fb3c38b924..b9cbbbda824 100644 --- a/drivers/usb/misc/cytherm.c +++ b/drivers/usb/misc/cytherm.c @@ -27,7 +27,7 @@ #define USB_SKEL_VENDOR_ID 0x04b4 #define USB_SKEL_PRODUCT_ID 0x0002 -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, { } }; diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c index 879a980ca8c..a6521c95f68 100644 --- a/drivers/usb/misc/emi26.c +++ b/drivers/usb/misc/emi26.c @@ -245,7 +245,7 @@ wraperr: return err; } -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) }, { USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) }, { } /* Terminating entry */ diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c index 59860b32853..fc15ad4c313 100644 --- a/drivers/usb/misc/emi62.c +++ b/drivers/usb/misc/emi62.c @@ -259,7 +259,7 @@ wraperr: return err; } -static __devinitdata struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] __devinitconst = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c index 9d0675ed0d4..1edb6d36189 100644 --- a/drivers/usb/misc/ftdi-elan.c +++ b/drivers/usb/misc/ftdi-elan.c @@ -86,7 +86,7 @@ static struct list_head ftdi_static_list; #define USB_FTDI_ELAN_VENDOR_ID 0x0403 #define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea /* table of devices that work with this driver*/ -static struct usb_device_id ftdi_elan_table[] = { +static const struct usb_device_id ftdi_elan_table[] = { {USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)}, { /* Terminating entry */ } }; @@ -623,9 +623,12 @@ static void ftdi_elan_status_work(struct work_struct *work) */ static int ftdi_elan_open(struct inode *inode, struct file *file) { - int subminor = iminor(inode); - struct usb_interface *interface = usb_find_interface(&ftdi_elan_driver, - subminor); + int subminor; + struct usb_interface *interface; + + subminor = iminor(inode); + interface = usb_find_interface(&ftdi_elan_driver, subminor); + if (!interface) { printk(KERN_ERR "can't find device for minor %d\n", subminor); return -ENODEV; diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index 1337a9ce80b..a54c3cb804c 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c @@ -48,7 +48,7 @@ #define ID_CHERRY 0x0010 /* device ID table */ -static struct usb_device_id idmouse_table[] = { +static const struct usb_device_id idmouse_table[] = { {USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */ {USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */ {} /* terminating null entry */ diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c index e75bb87ee92..d3c85236388 100644 --- a/drivers/usb/misc/iowarrior.c +++ b/drivers/usb/misc/iowarrior.c @@ -139,7 +139,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type, /* driver registration */ /*---------------------*/ /* table of devices that work with this driver */ -static struct usb_device_id iowarrior_ids[] = { +static const struct usb_device_id iowarrior_ids[] = { {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)}, {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)}, @@ -602,10 +602,12 @@ static int iowarrior_open(struct inode *inode, struct file *file) dbg("%s", __func__); + lock_kernel(); subminor = iminor(inode); interface = usb_find_interface(&iowarrior_driver, subminor); if (!interface) { + unlock_kernel(); err("%s - error, can't find device for minor %d", __func__, subminor); return -ENODEV; @@ -615,6 +617,7 @@ static int iowarrior_open(struct inode *inode, struct file *file) dev = usb_get_intfdata(interface); if (!dev) { mutex_unlock(&iowarrior_open_disc_lock); + unlock_kernel(); return -ENODEV; } @@ -641,6 +644,7 @@ static int iowarrior_open(struct inode *inode, struct file *file) out: mutex_unlock(&dev->mutex); + unlock_kernel(); return retval; } diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c index b897f6554ec..06e990adc6c 100644 --- a/drivers/usb/misc/isight_firmware.c +++ b/drivers/usb/misc/isight_firmware.c @@ -26,7 +26,7 @@ #include <linux/errno.h> #include <linux/module.h> -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { {USB_DEVICE(0x05ac, 0x8300)}, {}, }; @@ -112,6 +112,8 @@ out: return ret; } +MODULE_FIRMWARE("isight.fw"); + static void isight_firmware_disconnect(struct usb_interface *intf) { } diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c index 90f130126c1..dd41d871004 100644 --- a/drivers/usb/misc/ldusb.c +++ b/drivers/usb/misc/ldusb.c @@ -69,7 +69,7 @@ #endif /* table of devices that work with this driver */ -static struct usb_device_id ld_usb_table [] = { +static const struct usb_device_id ld_usb_table[] = { { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) }, { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) }, @@ -798,7 +798,7 @@ static int __init ld_usb_init(void) /* register this driver with the USB subsystem */ retval = usb_register(&ld_usb_driver); if (retval) - err("usb_register failed for the "__FILE__" driver. Error number %d\n", retval); + err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval); return retval; } diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index faa6d623de7..8547bf9e317 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -95,8 +95,11 @@ /* Use our own dbg macro */ #undef dbg -#define dbg(lvl, format, arg...) do { if (debug >= lvl) printk(KERN_DEBUG __FILE__ ": " format "\n", ## arg); } while (0) - +#define dbg(lvl, format, arg...) \ +do { \ + if (debug >= lvl) \ + printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \ +} while (0) /* Version Information */ #define DRIVER_VERSION "v0.96" @@ -192,7 +195,7 @@ struct tower_get_version_reply { /* table of devices that work with this driver */ -static struct usb_device_id tower_table [] = { +static const struct usb_device_id tower_table[] = { { USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) }, { } /* Terminating entry */ }; @@ -302,7 +305,7 @@ static inline void lego_usb_tower_debug_data (int level, const char *function, i if (debug < level) return; - printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", function, size); + printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size); for (i = 0; i < size; ++i) { printk ("%.2x ", data[i]); } @@ -1055,7 +1058,7 @@ static int __init lego_usb_tower_init(void) /* register this driver with the USB subsystem */ result = usb_register(&tower_driver); if (result < 0) { - err("usb_register failed for the "__FILE__" driver. Error number %d", result); + err("usb_register failed for the %s driver. Error number %d", __FILE__, result); retval = -1; goto exit; } diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c index 32d0199d0c3..a85771b1563 100644 --- a/drivers/usb/misc/rio500.c +++ b/drivers/usb/misc/rio500.c @@ -78,10 +78,13 @@ static int open_rio(struct inode *inode, struct file *file) { struct rio_usb_data *rio = &rio_instance; + /* against disconnect() */ + lock_kernel(); mutex_lock(&(rio->lock)); if (rio->isopen || !rio->present) { mutex_unlock(&(rio->lock)); + unlock_kernel(); return -EBUSY; } rio->isopen = 1; @@ -91,6 +94,7 @@ static int open_rio(struct inode *inode, struct file *file) mutex_unlock(&(rio->lock)); dev_info(&rio->rio_dev->dev, "Rio opened.\n"); + unlock_kernel(); return 0; } @@ -115,7 +119,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) int retries; int retval=0; - lock_kernel(); mutex_lock(&(rio->lock)); /* Sanity check to make sure rio is connected, powered, etc */ if (rio->present == 0 || rio->rio_dev == NULL) { @@ -254,7 +257,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg) err_out: mutex_unlock(&(rio->lock)); - unlock_kernel(); return retval; } @@ -489,6 +491,7 @@ static void disconnect_rio(struct usb_interface *intf) struct rio_usb_data *rio = usb_get_intfdata (intf); usb_set_intfdata (intf, NULL); + lock_kernel(); if (rio) { usb_deregister_dev(intf, &usb_rio_class); @@ -498,6 +501,7 @@ static void disconnect_rio(struct usb_interface *intf) /* better let it finish - the release will do whats needed */ rio->rio_dev = NULL; mutex_unlock(&(rio->lock)); + unlock_kernel(); return; } kfree(rio->ibuf); @@ -508,9 +512,10 @@ static void disconnect_rio(struct usb_interface *intf) rio->present = 0; mutex_unlock(&(rio->lock)); } + unlock_kernel(); } -static struct usb_device_id rio_table [] = { +static const struct usb_device_id rio_table[] = { { USB_DEVICE(0x0841, 1) }, /* Rio 500 */ { } /* Terminating entry */ }; diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index 8b37a4b9839..aae95a009bd 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -250,7 +250,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe, sisusb->urbstatus[index] |= SU_URB_BUSY; /* Submit URB */ - retval = usb_submit_urb(urb, GFP_ATOMIC); + retval = usb_submit_urb(urb, GFP_KERNEL); /* If OK, and if timeout > 0, wait for completion */ if ((retval == 0) && timeout) { @@ -306,7 +306,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data, urb->actual_length = 0; sisusb->completein = 0; - retval = usb_submit_urb(urb, GFP_ATOMIC); + retval = usb_submit_urb(urb, GFP_KERNEL); if (retval == 0) { wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout); if (!sisusb->completein) { @@ -2416,21 +2416,28 @@ sisusb_open(struct inode *inode, struct file *file) struct usb_interface *interface; int subminor = iminor(inode); - if (!(interface = usb_find_interface(&sisusb_driver, subminor))) + lock_kernel(); + if (!(interface = usb_find_interface(&sisusb_driver, subminor))) { + unlock_kernel(); return -ENODEV; + } - if (!(sisusb = usb_get_intfdata(interface))) + if (!(sisusb = usb_get_intfdata(interface))) { + unlock_kernel(); return -ENODEV; + } mutex_lock(&sisusb->lock); if (!sisusb->present || !sisusb->ready) { mutex_unlock(&sisusb->lock); + unlock_kernel(); return -ENODEV; } if (sisusb->isopen) { mutex_unlock(&sisusb->lock); + unlock_kernel(); return -EBUSY; } @@ -2439,11 +2446,13 @@ sisusb_open(struct inode *inode, struct file *file) if (sisusb_init_gfxdevice(sisusb, 0)) { mutex_unlock(&sisusb->lock); dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n"); + unlock_kernel(); return -EIO; } } else { mutex_unlock(&sisusb->lock); dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n"); + unlock_kernel(); return -EIO; } } @@ -2456,6 +2465,7 @@ sisusb_open(struct inode *inode, struct file *file) file->private_data = sisusb; mutex_unlock(&sisusb->lock); + unlock_kernel(); return 0; } @@ -3238,7 +3248,7 @@ static void sisusb_disconnect(struct usb_interface *intf) kref_put(&sisusb->kref, sisusb_delete); } -static struct usb_device_id sisusb_table [] = { +static const struct usb_device_id sisusb_table[] = { { USB_DEVICE(0x0711, 0x0550) }, { USB_DEVICE(0x0711, 0x0900) }, { USB_DEVICE(0x0711, 0x0901) }, diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c index 2e14102955c..5da28eaee31 100644 --- a/drivers/usb/misc/trancevibrator.c +++ b/drivers/usb/misc/trancevibrator.c @@ -33,7 +33,7 @@ #define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */ #define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */ -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) }, { }, }; diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index 4fb120357c5..90aede90553 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -30,7 +30,7 @@ #define IOCTL_GET_DRV_VERSION 2 -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, }, { }, }; @@ -74,10 +74,12 @@ static int lcd_open(struct inode *inode, struct file *file) struct usb_interface *interface; int subminor, r; + lock_kernel(); subminor = iminor(inode); interface = usb_find_interface(&lcd_driver, subminor); if (!interface) { + unlock_kernel(); err ("USBLCD: %s - error, can't find device for minor %d", __func__, subminor); return -ENODEV; @@ -87,6 +89,7 @@ static int lcd_open(struct inode *inode, struct file *file) dev = usb_get_intfdata(interface); if (!dev) { mutex_unlock(&open_disc_mutex); + unlock_kernel(); return -ENODEV; } @@ -98,11 +101,13 @@ static int lcd_open(struct inode *inode, struct file *file) r = usb_autopm_get_interface(interface); if (r < 0) { kref_put(&dev->kref, lcd_delete); + unlock_kernel(); return r; } /* save our object in the file's private structure */ file->private_data = dev; + unlock_kernel(); return 0; } diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c index 06cb71942dc..63da2c3c838 100644 --- a/drivers/usb/misc/usbled.c +++ b/drivers/usb/misc/usbled.c @@ -24,7 +24,7 @@ #define PRODUCT_ID 0x1223 /* table of devices that work with this driver */ -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(VENDOR_ID, PRODUCT_ID) }, { }, }; diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c index 3db255537e7..a9555cb901a 100644 --- a/drivers/usb/misc/usbsevseg.c +++ b/drivers/usb/misc/usbsevseg.c @@ -27,7 +27,7 @@ #define MAXLEN 6 /* table of devices that work with this driver */ -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(VENDOR_ID, PRODUCT_ID) }, { }, }; diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 3dab0c0b196..a21cce6f740 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -1580,10 +1580,6 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf) return -ERESTARTSYS; /* FIXME: What if a system sleep starts while a test is running? */ - if (!intf->is_active) { - mutex_unlock(&dev->lock); - return -EHOSTUNREACH; - } /* some devices, like ez-usb default devices, need a non-default * altsetting to have any active endpoints. some tests change @@ -2101,7 +2097,7 @@ static struct usbtest_info generic_info = { #endif -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { /*-------------------------------------------------------------*/ diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index 9a6c27a0179..f56fed53f2d 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c @@ -770,7 +770,7 @@ static void uss720_disconnect(struct usb_interface *intf) } /* table of cables that work through this driver */ -static struct usb_device_id uss720_table [] = { +static const struct usb_device_id uss720_table[] = { { USB_DEVICE(0x047e, 0x1001) }, { USB_DEVICE(0x0557, 0x2001) }, { USB_DEVICE(0x0729, 0x1284) }, diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c deleted file mode 100644 index f26ea8dc157..00000000000 --- a/drivers/usb/misc/vstusb.c +++ /dev/null @@ -1,783 +0,0 @@ -/***************************************************************************** - * File: drivers/usb/misc/vstusb.c - * - * Purpose: Support for the bulk USB Vernier Spectrophotometers - * - * Author: Johnnie Peters - * Axian Consulting - * Beaverton, OR, USA 97005 - * - * Modified by: EQware Engineering, Inc. - * Oregon City, OR, USA 97045 - * - * Copyright: 2007, 2008 - * Vernier Software & Technology - * Beaverton, OR, USA 97005 - * - * Web: www.vernier.com - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - *****************************************************************************/ -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/mutex.h> -#include <linux/uaccess.h> -#include <linux/usb.h> - -#include <linux/usb/vstusb.h> - -#define DRIVER_VERSION "VST USB Driver Version 1.5" -#define DRIVER_DESC "Vernier Software Technology Bulk USB Driver" - -#ifdef CONFIG_USB_DYNAMIC_MINORS - #define VSTUSB_MINOR_BASE 0 -#else - #define VSTUSB_MINOR_BASE 199 -#endif - -#define USB_VENDOR_OCEANOPTICS 0x2457 -#define USB_VENDOR_VERNIER 0x08F7 /* Vernier Software & Technology */ - -#define USB_PRODUCT_USB2000 0x1002 -#define USB_PRODUCT_ADC1000_FW 0x1003 /* firmware download (renumerates) */ -#define USB_PRODUCT_ADC1000 0x1004 -#define USB_PRODUCT_HR2000_FW 0x1009 /* firmware download (renumerates) */ -#define USB_PRODUCT_HR2000 0x100A -#define USB_PRODUCT_HR4000_FW 0x1011 /* firmware download (renumerates) */ -#define USB_PRODUCT_HR4000 0x1012 -#define USB_PRODUCT_USB650 0x1014 /* "Red Tide" */ -#define USB_PRODUCT_QE65000 0x1018 -#define USB_PRODUCT_USB4000 0x1022 -#define USB_PRODUCT_USB325 0x1024 /* "Vernier Spectrometer" */ - -#define USB_PRODUCT_LABPRO 0x0001 -#define USB_PRODUCT_LABQUEST 0x0005 - -#define VST_MAXBUFFER (64*1024) - -static struct usb_device_id id_table[] = { - { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB2000)}, - { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_HR4000)}, - { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB650)}, - { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB4000)}, - { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB325)}, - { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABQUEST)}, - { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABPRO)}, - {}, -}; - -MODULE_DEVICE_TABLE(usb, id_table); - -struct vstusb_device { - struct kref kref; - struct mutex lock; - struct usb_device *usb_dev; - char present; - char isopen; - struct usb_anchor submitted; - int rd_pipe; - int rd_timeout_ms; - int wr_pipe; - int wr_timeout_ms; -}; -#define to_vst_dev(d) container_of(d, struct vstusb_device, kref) - -static struct usb_driver vstusb_driver; - -static void vstusb_delete(struct kref *kref) -{ - struct vstusb_device *vstdev = to_vst_dev(kref); - - usb_put_dev(vstdev->usb_dev); - kfree(vstdev); -} - -static int vstusb_open(struct inode *inode, struct file *file) -{ - struct vstusb_device *vstdev; - struct usb_interface *interface; - - interface = usb_find_interface(&vstusb_driver, iminor(inode)); - - if (!interface) { - printk(KERN_ERR KBUILD_MODNAME - ": %s - error, can't find device for minor %d\n", - __func__, iminor(inode)); - return -ENODEV; - } - - vstdev = usb_get_intfdata(interface); - - if (!vstdev) - return -ENODEV; - - /* lock this device */ - mutex_lock(&vstdev->lock); - - /* can only open one time */ - if ((!vstdev->present) || (vstdev->isopen)) { - mutex_unlock(&vstdev->lock); - return -EBUSY; - } - - /* increment our usage count */ - kref_get(&vstdev->kref); - - vstdev->isopen = 1; - - /* save device in the file's private structure */ - file->private_data = vstdev; - - dev_dbg(&vstdev->usb_dev->dev, "%s: opened\n", __func__); - - mutex_unlock(&vstdev->lock); - - return 0; -} - -static int vstusb_release(struct inode *inode, struct file *file) -{ - struct vstusb_device *vstdev; - - vstdev = file->private_data; - - if (vstdev == NULL) - return -ENODEV; - - mutex_lock(&vstdev->lock); - - vstdev->isopen = 0; - - dev_dbg(&vstdev->usb_dev->dev, "%s: released\n", __func__); - - mutex_unlock(&vstdev->lock); - - kref_put(&vstdev->kref, vstusb_delete); - - return 0; -} - -static void usb_api_blocking_completion(struct urb *urb) -{ - struct completion *completeit = urb->context; - - complete(completeit); -} - -static int vstusb_fill_and_send_urb(struct urb *urb, - struct usb_device *usb_dev, - unsigned int pipe, void *data, - unsigned int len, struct completion *done) -{ - struct usb_host_endpoint *ep; - struct usb_host_endpoint **hostep; - unsigned int pipend; - - int status; - - hostep = usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out; - pipend = usb_pipeendpoint(pipe); - ep = hostep[pipend]; - - if (!ep || (len == 0)) - return -EINVAL; - - if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) - == USB_ENDPOINT_XFER_INT) { - pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30); - usb_fill_int_urb(urb, usb_dev, pipe, data, len, - (usb_complete_t)usb_api_blocking_completion, - NULL, ep->desc.bInterval); - } else - usb_fill_bulk_urb(urb, usb_dev, pipe, data, len, - (usb_complete_t)usb_api_blocking_completion, - NULL); - - init_completion(done); - urb->context = done; - urb->actual_length = 0; - status = usb_submit_urb(urb, GFP_KERNEL); - - return status; -} - -static int vstusb_complete_urb(struct urb *urb, struct completion *done, - int timeout, int *actual_length) -{ - unsigned long expire; - int status; - - expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT; - if (!wait_for_completion_interruptible_timeout(done, expire)) { - usb_kill_urb(urb); - status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status; - - dev_dbg(&urb->dev->dev, - "%s timed out on ep%d%s len=%d/%d, urb status = %d\n", - current->comm, - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out", - urb->actual_length, - urb->transfer_buffer_length, - urb->status); - - } else { - if (signal_pending(current)) { - /* if really an error */ - if (urb->status && !((urb->status == -ENOENT) || - (urb->status == -ECONNRESET) || - (urb->status == -ESHUTDOWN))) { - status = -EINTR; - usb_kill_urb(urb); - } else { - status = 0; - } - - dev_dbg(&urb->dev->dev, - "%s: signal pending on ep%d%s len=%d/%d," - "urb status = %d\n", - current->comm, - usb_pipeendpoint(urb->pipe), - usb_pipein(urb->pipe) ? "in" : "out", - urb->actual_length, - urb->transfer_buffer_length, - urb->status); - - } else { - status = urb->status; - } - } - - if (actual_length) - *actual_length = urb->actual_length; - - return status; -} - -static ssize_t vstusb_read(struct file *file, char __user *buffer, - size_t count, loff_t *ppos) -{ - struct vstusb_device *vstdev; - int cnt = -1; - void *buf; - int retval = 0; - - struct urb *urb; - struct usb_device *dev; - unsigned int pipe; - int timeout; - - DECLARE_COMPLETION_ONSTACK(done); - - vstdev = file->private_data; - - if (vstdev == NULL) - return -ENODEV; - - /* verify that we actually want to read some data */ - if ((count == 0) || (count > VST_MAXBUFFER)) - return -EINVAL; - - /* lock this object */ - if (mutex_lock_interruptible(&vstdev->lock)) - return -ERESTARTSYS; - - /* anyone home */ - if (!vstdev->present) { - mutex_unlock(&vstdev->lock); - printk(KERN_ERR KBUILD_MODNAME - ": %s: device not present\n", __func__); - return -ENODEV; - } - - /* pull out the necessary data */ - dev = vstdev->usb_dev; - pipe = usb_rcvbulkpipe(dev, vstdev->rd_pipe); - timeout = vstdev->rd_timeout_ms; - - buf = kmalloc(count, GFP_KERNEL); - if (buf == NULL) { - mutex_unlock(&vstdev->lock); - return -ENOMEM; - } - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - kfree(buf); - mutex_unlock(&vstdev->lock); - return -ENOMEM; - } - - usb_anchor_urb(urb, &vstdev->submitted); - retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done); - mutex_unlock(&vstdev->lock); - if (retval) { - usb_unanchor_urb(urb); - dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n", - __func__, retval, pipe); - goto exit; - } - - retval = vstusb_complete_urb(urb, &done, timeout, &cnt); - if (retval) { - dev_err(&dev->dev, "%s: error %d completing urb %d\n", - __func__, retval, pipe); - goto exit; - } - - if (copy_to_user(buffer, buf, cnt)) { - dev_err(&dev->dev, "%s: can't copy_to_user\n", __func__); - retval = -EFAULT; - } else { - retval = cnt; - dev_dbg(&dev->dev, "%s: read %d bytes from pipe %d\n", - __func__, cnt, pipe); - } - -exit: - usb_free_urb(urb); - kfree(buf); - return retval; -} - -static ssize_t vstusb_write(struct file *file, const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct vstusb_device *vstdev; - int cnt = -1; - void *buf; - int retval = 0; - - struct urb *urb; - struct usb_device *dev; - unsigned int pipe; - int timeout; - - DECLARE_COMPLETION_ONSTACK(done); - - vstdev = file->private_data; - - if (vstdev == NULL) - return -ENODEV; - - /* verify that we actually have some data to write */ - if ((count == 0) || (count > VST_MAXBUFFER)) - return retval; - - /* lock this object */ - if (mutex_lock_interruptible(&vstdev->lock)) - return -ERESTARTSYS; - - /* anyone home */ - if (!vstdev->present) { - mutex_unlock(&vstdev->lock); - printk(KERN_ERR KBUILD_MODNAME - ": %s: device not present\n", __func__); - return -ENODEV; - } - - /* pull out the necessary data */ - dev = vstdev->usb_dev; - pipe = usb_sndbulkpipe(dev, vstdev->wr_pipe); - timeout = vstdev->wr_timeout_ms; - - buf = kmalloc(count, GFP_KERNEL); - if (buf == NULL) { - mutex_unlock(&vstdev->lock); - return -ENOMEM; - } - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - kfree(buf); - mutex_unlock(&vstdev->lock); - return -ENOMEM; - } - - if (copy_from_user(buf, buffer, count)) { - mutex_unlock(&vstdev->lock); - dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__); - retval = -EFAULT; - goto exit; - } - - usb_anchor_urb(urb, &vstdev->submitted); - retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done); - mutex_unlock(&vstdev->lock); - if (retval) { - usb_unanchor_urb(urb); - dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n", - __func__, retval, pipe); - goto exit; - } - - retval = vstusb_complete_urb(urb, &done, timeout, &cnt); - if (retval) { - dev_err(&dev->dev, "%s: error %d completing urb %d\n", - __func__, retval, pipe); - goto exit; - } else { - retval = cnt; - dev_dbg(&dev->dev, "%s: sent %d bytes to pipe %d\n", - __func__, cnt, pipe); - } - -exit: - usb_free_urb(urb); - kfree(buf); - return retval; -} - -static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - int retval = 0; - int cnt = -1; - void __user *data = (void __user *)arg; - struct vstusb_args usb_data; - - struct vstusb_device *vstdev; - void *buffer = NULL; /* must be initialized. buffer is - * referenced on exit but not all - * ioctls allocate it */ - - struct urb *urb = NULL; /* must be initialized. urb is - * referenced on exit but not all - * ioctls allocate it */ - struct usb_device *dev; - unsigned int pipe; - int timeout; - - DECLARE_COMPLETION_ONSTACK(done); - - vstdev = file->private_data; - - if (_IOC_TYPE(cmd) != VST_IOC_MAGIC) { - dev_warn(&vstdev->usb_dev->dev, - "%s: ioctl command %x, bad ioctl magic %x, " - "expected %x\n", __func__, cmd, - _IOC_TYPE(cmd), VST_IOC_MAGIC); - return -EINVAL; - } - - if (vstdev == NULL) - return -ENODEV; - - if (copy_from_user(&usb_data, data, sizeof(struct vstusb_args))) { - dev_err(&vstdev->usb_dev->dev, "%s: can't copy_from_user\n", - __func__); - return -EFAULT; - } - - /* lock this object */ - if (mutex_lock_interruptible(&vstdev->lock)) { - retval = -ERESTARTSYS; - goto exit; - } - - /* anyone home */ - if (!vstdev->present) { - mutex_unlock(&vstdev->lock); - dev_err(&vstdev->usb_dev->dev, "%s: device not present\n", - __func__); - retval = -ENODEV; - goto exit; - } - - /* pull out the necessary data */ - dev = vstdev->usb_dev; - - switch (cmd) { - - case IOCTL_VSTUSB_CONFIG_RW: - - vstdev->rd_pipe = usb_data.rd_pipe; - vstdev->rd_timeout_ms = usb_data.rd_timeout_ms; - vstdev->wr_pipe = usb_data.wr_pipe; - vstdev->wr_timeout_ms = usb_data.wr_timeout_ms; - - mutex_unlock(&vstdev->lock); - - dev_dbg(&dev->dev, "%s: setting pipes/timeouts, " - "rdpipe = %d, rdtimeout = %d, " - "wrpipe = %d, wrtimeout = %d\n", __func__, - vstdev->rd_pipe, vstdev->rd_timeout_ms, - vstdev->wr_pipe, vstdev->wr_timeout_ms); - break; - - case IOCTL_VSTUSB_SEND_PIPE: - - if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) { - mutex_unlock(&vstdev->lock); - retval = -EINVAL; - goto exit; - } - - buffer = kmalloc(usb_data.count, GFP_KERNEL); - if (buffer == NULL) { - mutex_unlock(&vstdev->lock); - retval = -ENOMEM; - goto exit; - } - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - mutex_unlock(&vstdev->lock); - retval = -ENOMEM; - goto exit; - } - - timeout = usb_data.timeout_ms; - - pipe = usb_sndbulkpipe(dev, usb_data.pipe); - - if (copy_from_user(buffer, usb_data.buffer, usb_data.count)) { - dev_err(&dev->dev, "%s: can't copy_from_user\n", - __func__); - mutex_unlock(&vstdev->lock); - retval = -EFAULT; - goto exit; - } - - usb_anchor_urb(urb, &vstdev->submitted); - retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer, - usb_data.count, &done); - mutex_unlock(&vstdev->lock); - if (retval) { - usb_unanchor_urb(urb); - dev_err(&dev->dev, - "%s: error %d filling and sending urb %d\n", - __func__, retval, pipe); - goto exit; - } - - retval = vstusb_complete_urb(urb, &done, timeout, &cnt); - if (retval) { - dev_err(&dev->dev, "%s: error %d completing urb %d\n", - __func__, retval, pipe); - } - - break; - case IOCTL_VSTUSB_RECV_PIPE: - - if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) { - mutex_unlock(&vstdev->lock); - retval = -EINVAL; - goto exit; - } - - buffer = kmalloc(usb_data.count, GFP_KERNEL); - if (buffer == NULL) { - mutex_unlock(&vstdev->lock); - retval = -ENOMEM; - goto exit; - } - - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) { - mutex_unlock(&vstdev->lock); - retval = -ENOMEM; - goto exit; - } - - timeout = usb_data.timeout_ms; - - pipe = usb_rcvbulkpipe(dev, usb_data.pipe); - - usb_anchor_urb(urb, &vstdev->submitted); - retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer, - usb_data.count, &done); - mutex_unlock(&vstdev->lock); - if (retval) { - usb_unanchor_urb(urb); - dev_err(&dev->dev, - "%s: error %d filling and sending urb %d\n", - __func__, retval, pipe); - goto exit; - } - - retval = vstusb_complete_urb(urb, &done, timeout, &cnt); - if (retval) { - dev_err(&dev->dev, "%s: error %d completing urb %d\n", - __func__, retval, pipe); - goto exit; - } - - if (copy_to_user(usb_data.buffer, buffer, cnt)) { - dev_err(&dev->dev, "%s: can't copy_to_user\n", - __func__); - retval = -EFAULT; - goto exit; - } - - usb_data.count = cnt; - if (copy_to_user(data, &usb_data, sizeof(struct vstusb_args))) { - dev_err(&dev->dev, "%s: can't copy_to_user\n", - __func__); - retval = -EFAULT; - } else { - dev_dbg(&dev->dev, "%s: recv %zd bytes from pipe %d\n", - __func__, usb_data.count, usb_data.pipe); - } - - break; - - default: - mutex_unlock(&vstdev->lock); - dev_warn(&dev->dev, "ioctl_vstusb: invalid ioctl cmd %x\n", - cmd); - return -EINVAL; - break; - } -exit: - usb_free_urb(urb); - kfree(buffer); - return retval; -} - -static const struct file_operations vstusb_fops = { - .owner = THIS_MODULE, - .read = vstusb_read, - .write = vstusb_write, - .unlocked_ioctl = vstusb_ioctl, - .compat_ioctl = vstusb_ioctl, - .open = vstusb_open, - .release = vstusb_release, -}; - -static struct usb_class_driver usb_vstusb_class = { - .name = "usb/vstusb%d", - .fops = &vstusb_fops, - .minor_base = VSTUSB_MINOR_BASE, -}; - -static int vstusb_probe(struct usb_interface *intf, - const struct usb_device_id *id) -{ - struct usb_device *dev = interface_to_usbdev(intf); - struct vstusb_device *vstdev; - int i; - int retval = 0; - - /* allocate memory for our device state and intialize it */ - - vstdev = kzalloc(sizeof(*vstdev), GFP_KERNEL); - if (vstdev == NULL) - return -ENOMEM; - - /* must do usb_get_dev() prior to kref_init() since the kref_put() - * release function will do a usb_put_dev() */ - usb_get_dev(dev); - kref_init(&vstdev->kref); - mutex_init(&vstdev->lock); - - i = dev->descriptor.bcdDevice; - - dev_dbg(&intf->dev, "Version %1d%1d.%1d%1d found at address %d\n", - (i & 0xF000) >> 12, (i & 0xF00) >> 8, - (i & 0xF0) >> 4, (i & 0xF), dev->devnum); - - vstdev->present = 1; - vstdev->isopen = 0; - vstdev->usb_dev = dev; - init_usb_anchor(&vstdev->submitted); - - usb_set_intfdata(intf, vstdev); - retval = usb_register_dev(intf, &usb_vstusb_class); - if (retval) { - dev_err(&intf->dev, - "%s: Not able to get a minor for this device.\n", - __func__); - usb_set_intfdata(intf, NULL); - kref_put(&vstdev->kref, vstusb_delete); - return retval; - } - - /* let the user know what node this device is now attached to */ - dev_info(&intf->dev, - "VST USB Device #%d now attached to major %d minor %d\n", - (intf->minor - VSTUSB_MINOR_BASE), USB_MAJOR, intf->minor); - - dev_info(&intf->dev, "%s, %s\n", DRIVER_DESC, DRIVER_VERSION); - - return retval; -} - -static void vstusb_disconnect(struct usb_interface *intf) -{ - struct vstusb_device *vstdev = usb_get_intfdata(intf); - - usb_deregister_dev(intf, &usb_vstusb_class); - usb_set_intfdata(intf, NULL); - - if (vstdev) { - - mutex_lock(&vstdev->lock); - vstdev->present = 0; - - usb_kill_anchored_urbs(&vstdev->submitted); - - mutex_unlock(&vstdev->lock); - - kref_put(&vstdev->kref, vstusb_delete); - } - -} - -static int vstusb_suspend(struct usb_interface *intf, pm_message_t message) -{ - struct vstusb_device *vstdev = usb_get_intfdata(intf); - int time; - if (!vstdev) - return 0; - - mutex_lock(&vstdev->lock); - time = usb_wait_anchor_empty_timeout(&vstdev->submitted, 1000); - if (!time) - usb_kill_anchored_urbs(&vstdev->submitted); - mutex_unlock(&vstdev->lock); - - return 0; -} - -static int vstusb_resume(struct usb_interface *intf) -{ - return 0; -} - -static struct usb_driver vstusb_driver = { - .name = "vstusb", - .probe = vstusb_probe, - .disconnect = vstusb_disconnect, - .suspend = vstusb_suspend, - .resume = vstusb_resume, - .id_table = id_table, -}; - -static int __init vstusb_init(void) -{ - int rc; - - rc = usb_register(&vstusb_driver); - if (rc) - printk(KERN_ERR "%s: failed to register (%d)", __func__, rc); - - return rc; -} - -static void __exit vstusb_exit(void) -{ - usb_deregister(&vstusb_driver); -} - -module_init(vstusb_init); -module_exit(vstusb_exit); - -MODULE_AUTHOR("Dennis O'Brien/Stephen Ware"); -MODULE_DESCRIPTION(DRIVER_VERSION); -MODULE_LICENSE("GPL"); diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c index 385ec052016..6dd44bc1f5f 100644 --- a/drivers/usb/mon/mon_bin.c +++ b/drivers/usb/mon/mon_bin.c @@ -460,8 +460,8 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, char ev_type, int status) { const struct usb_endpoint_descriptor *epd = &urb->ep->desc; - unsigned long flags; struct timeval ts; + unsigned long flags; unsigned int urb_length; unsigned int offset; unsigned int length; @@ -600,10 +600,13 @@ static void mon_bin_complete(void *data, struct urb *urb, int status) static void mon_bin_error(void *data, struct urb *urb, int error) { struct mon_reader_bin *rp = data; + struct timeval ts; unsigned long flags; unsigned int offset; struct mon_bin_hdr *ep; + do_gettimeofday(&ts); + spin_lock_irqsave(&rp->b_lock, flags); offset = mon_buff_area_alloc(rp, PKT_SIZE); @@ -623,6 +626,8 @@ static void mon_bin_error(void *data, struct urb *urb, int error) ep->devnum = urb->dev->devnum; ep->busnum = urb->dev->bus->busnum; ep->id = (unsigned long) urb; + ep->ts_sec = ts.tv_sec; + ep->ts_usec = ts.tv_usec; ep->status = error; ep->flag_setup = '-'; diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index 047568ff223..31c11888ec6 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -180,7 +180,7 @@ static inline unsigned int mon_get_timestamp(void) unsigned int stamp; do_gettimeofday(&tval); - stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s. */ + stamp = tval.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */ stamp = stamp * 1000000 + tval.tv_usec; return stamp; } @@ -273,12 +273,12 @@ static void mon_text_error(void *data, struct urb *urb, int error) ep->type = 'E'; ep->id = (unsigned long) urb; - ep->busnum = 0; + ep->busnum = urb->dev->bus->busnum; ep->devnum = urb->dev->devnum; ep->epnum = usb_endpoint_num(&urb->ep->desc); ep->xfertype = usb_endpoint_type(&urb->ep->desc); ep->is_in = usb_urb_dir_in(urb); - ep->tstamp = 0; + ep->tstamp = mon_get_timestamp(); ep->length = 0; ep->status = error; diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index ad26e656966..bcee1339d4f 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -30,7 +30,6 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) void __iomem *fifo = hw_ep->fifo; void __iomem *epio = hw_ep->regs; u8 epnum = hw_ep->epnum; - u16 dma_reg = 0; prefetch((u8 *)src); @@ -42,15 +41,17 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) dump_fifo_data(src, len); if (!ANOMALY_05000380 && epnum != 0) { - flush_dcache_range((unsigned int)src, - (unsigned int)(src + len)); + u16 dma_reg; + + flush_dcache_range((unsigned long)src, + (unsigned long)(src + len)); /* Setup DMA address register */ - dma_reg = (u16) ((u32) src & 0xFFFF); + dma_reg = (u32)src; bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); SSYNC(); - dma_reg = (u16) (((u32) src >> 16) & 0xFFFF); + dma_reg = (u32)src >> 16; bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); SSYNC(); @@ -79,12 +80,9 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) SSYNC(); if (unlikely((unsigned long)src & 0x01)) - outsw_8((unsigned long)fifo, src, - len & 0x01 ? (len >> 1) + 1 : len >> 1); + outsw_8((unsigned long)fifo, src, (len + 1) >> 1); else - outsw((unsigned long)fifo, src, - len & 0x01 ? (len >> 1) + 1 : len >> 1); - + outsw((unsigned long)fifo, src, (len + 1) >> 1); } } /* @@ -94,19 +92,19 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) { void __iomem *fifo = hw_ep->fifo; u8 epnum = hw_ep->epnum; - u16 dma_reg = 0; if (ANOMALY_05000467 && epnum != 0) { + u16 dma_reg; - invalidate_dcache_range((unsigned int)dst, - (unsigned int)(dst + len)); + invalidate_dcache_range((unsigned long)dst, + (unsigned long)(dst + len)); /* Setup DMA address register */ - dma_reg = (u16) ((u32) dst & 0xFFFF); + dma_reg = (u32)dst; bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); SSYNC(); - dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF); + dma_reg = (u32)dst >> 16; bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); SSYNC(); diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c index a44a450c860..3c69a76ec39 100644 --- a/drivers/usb/musb/cppi_dma.c +++ b/drivers/usb/musb/cppi_dma.c @@ -1191,8 +1191,13 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id) bd = tx_ch->head; + /* + * If Head is null then this could mean that a abort interrupt + * that needs to be acknowledged. + */ if (NULL == bd) { DBG(1, "null BD\n"); + tx_ram->tx_complete = 0; continue; } @@ -1412,15 +1417,6 @@ static int cppi_channel_abort(struct dma_channel *channel) if (cppi_ch->transmit) { struct cppi_tx_stateram __iomem *tx_ram; - int enabled; - - /* mask interrupts raised to signal teardown complete. */ - enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG) - & (1 << cppi_ch->index); - if (enabled) - musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG, - (1 << cppi_ch->index)); - /* REVISIT put timeouts on these controller handshakes */ cppi_dump_tx(6, cppi_ch, " (teardown)"); @@ -1435,7 +1431,6 @@ static int cppi_channel_abort(struct dma_channel *channel) do { value = musb_readl(&tx_ram->tx_complete, 0); } while (0xFFFFFFFC != value); - musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC); /* FIXME clean up the transfer state ... here? * the completion routine should get called with @@ -1448,23 +1443,15 @@ static int cppi_channel_abort(struct dma_channel *channel) musb_writew(regs, MUSB_TXCSR, value); musb_writew(regs, MUSB_TXCSR, value); - /* While we scrub the TX state RAM, ensure that we clean - * up any interrupt that's currently asserted: + /* * 1. Write to completion Ptr value 0x1(bit 0 set) * (write back mode) - * 2. Write to completion Ptr value 0x0(bit 0 cleared) - * (compare mode) - * Value written is compared(for bits 31:2) and when - * equal, interrupt is deasserted. + * 2. Wait for abort interrupt and then put the channel in + * compare mode by writing 1 to the tx_complete register. */ cppi_reset_tx(tx_ram, 1); - musb_writel(&tx_ram->tx_complete, 0, 0); - - /* re-enable interrupt */ - if (enabled) - musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG, - (1 << cppi_ch->index)); - + cppi_ch->head = 0; + musb_writel(&tx_ram->tx_complete, 0, 1); cppi_dump_tx(5, cppi_ch, " (done teardown)"); /* REVISIT tx side _should_ clean up the same way diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 738efd8063b..b4bbf8f2c23 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -557,6 +557,69 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, handled = IRQ_HANDLED; } + + if (int_usb & MUSB_INTR_SUSPEND) { + DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", + otg_state_string(musb), devctl, power); + handled = IRQ_HANDLED; + + switch (musb->xceiv->state) { +#ifdef CONFIG_USB_MUSB_OTG + case OTG_STATE_A_PERIPHERAL: + /* We also come here if the cable is removed, since + * this silicon doesn't report ID-no-longer-grounded. + * + * We depend on T(a_wait_bcon) to shut us down, and + * hope users don't do anything dicey during this + * undesired detour through A_WAIT_BCON. + */ + musb_hnp_stop(musb); + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + musb_root_disconnect(musb); + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon + ? : OTG_TIME_A_WAIT_BCON)); + + break; +#endif + case OTG_STATE_B_IDLE: + if (!musb->is_active) + break; + case OTG_STATE_B_PERIPHERAL: + musb_g_suspend(musb); + musb->is_active = is_otg_enabled(musb) + && musb->xceiv->gadget->b_hnp_enable; + if (musb->is_active) { +#ifdef CONFIG_USB_MUSB_OTG + musb->xceiv->state = OTG_STATE_B_WAIT_ACON; + DBG(1, "HNP: Setting timer for b_ase0_brst\n"); + mod_timer(&musb->otg_timer, jiffies + + msecs_to_jiffies( + OTG_TIME_B_ASE0_BRST)); +#endif + } + break; + case OTG_STATE_A_WAIT_BCON: + if (musb->a_wait_bcon != 0) + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon)); + break; + case OTG_STATE_A_HOST: + musb->xceiv->state = OTG_STATE_A_SUSPEND; + musb->is_active = is_otg_enabled(musb) + && musb->xceiv->host->b_hnp_enable; + break; + case OTG_STATE_B_HOST: + /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ + DBG(1, "REVISIT: SUSPEND as B_HOST\n"); + break; + default: + /* "should not happen" */ + musb->is_active = 0; + break; + } + } + if (int_usb & MUSB_INTR_CONNECT) { struct usb_hcd *hcd = musb_to_hcd(musb); @@ -625,10 +688,61 @@ b_host: } #endif /* CONFIG_USB_MUSB_HDRC_HCD */ + if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { + DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", + otg_state_string(musb), + MUSB_MODE(musb), devctl); + handled = IRQ_HANDLED; + + switch (musb->xceiv->state) { +#ifdef CONFIG_USB_MUSB_HDRC_HCD + case OTG_STATE_A_HOST: + case OTG_STATE_A_SUSPEND: + usb_hcd_resume_root_hub(musb_to_hcd(musb)); + musb_root_disconnect(musb); + if (musb->a_wait_bcon != 0 && is_otg_enabled(musb)) + musb_platform_try_idle(musb, jiffies + + msecs_to_jiffies(musb->a_wait_bcon)); + break; +#endif /* HOST */ +#ifdef CONFIG_USB_MUSB_OTG + case OTG_STATE_B_HOST: + /* REVISIT this behaves for "real disconnect" + * cases; make sure the other transitions from + * from B_HOST act right too. The B_HOST code + * in hnp_stop() is currently not used... + */ + musb_root_disconnect(musb); + musb_to_hcd(musb)->self.is_b_host = 0; + musb->xceiv->state = OTG_STATE_B_PERIPHERAL; + MUSB_DEV_MODE(musb); + musb_g_disconnect(musb); + break; + case OTG_STATE_A_PERIPHERAL: + musb_hnp_stop(musb); + musb_root_disconnect(musb); + /* FALLTHROUGH */ + case OTG_STATE_B_WAIT_ACON: + /* FALLTHROUGH */ +#endif /* OTG */ +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + case OTG_STATE_B_PERIPHERAL: + case OTG_STATE_B_IDLE: + musb_g_disconnect(musb); + break; +#endif /* GADGET */ + default: + WARNING("unhandled DISCONNECT transition (%s)\n", + otg_state_string(musb)); + break; + } + } + /* mentor saves a bit: bus reset and babble share the same irq. * only host sees babble; only peripheral sees bus reset. */ if (int_usb & MUSB_INTR_RESET) { + handled = IRQ_HANDLED; if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { /* * Looks like non-HS BABBLE can be ignored, but @@ -641,7 +755,7 @@ b_host: DBG(1, "BABBLE devctl: %02x\n", devctl); else { ERR("Stopping host session -- babble\n"); - musb_writeb(mbase, MUSB_DEVCTL, 0); + musb_writeb(musb->mregs, MUSB_DEVCTL, 0); } } else if (is_peripheral_capable()) { DBG(1, "BUS RESET as %s\n", otg_state_string(musb)); @@ -686,29 +800,7 @@ b_host: otg_state_string(musb)); } } - - handled = IRQ_HANDLED; } - schedule_work(&musb->irq_work); - - return handled; -} - -/* - * Interrupt Service Routine to record USB "global" interrupts. - * Since these do not happen often and signify things of - * paramount importance, it seems OK to check them individually; - * the order of the tests is specified in the manual - * - * @param musb instance pointer - * @param int_usb register contents - * @param devctl - * @param power - */ -static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, - u8 devctl, u8 power) -{ - irqreturn_t handled = IRQ_NONE; #if 0 /* REVISIT ... this would be for multiplexing periodic endpoints, or @@ -755,117 +847,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb, } #endif - if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { - DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n", - otg_state_string(musb), - MUSB_MODE(musb), devctl); - handled = IRQ_HANDLED; - - switch (musb->xceiv->state) { -#ifdef CONFIG_USB_MUSB_HDRC_HCD - case OTG_STATE_A_HOST: - case OTG_STATE_A_SUSPEND: - usb_hcd_resume_root_hub(musb_to_hcd(musb)); - musb_root_disconnect(musb); - if (musb->a_wait_bcon != 0 && is_otg_enabled(musb)) - musb_platform_try_idle(musb, jiffies - + msecs_to_jiffies(musb->a_wait_bcon)); - break; -#endif /* HOST */ -#ifdef CONFIG_USB_MUSB_OTG - case OTG_STATE_B_HOST: - /* REVISIT this behaves for "real disconnect" - * cases; make sure the other transitions from - * from B_HOST act right too. The B_HOST code - * in hnp_stop() is currently not used... - */ - musb_root_disconnect(musb); - musb_to_hcd(musb)->self.is_b_host = 0; - musb->xceiv->state = OTG_STATE_B_PERIPHERAL; - MUSB_DEV_MODE(musb); - musb_g_disconnect(musb); - break; - case OTG_STATE_A_PERIPHERAL: - musb_hnp_stop(musb); - musb_root_disconnect(musb); - /* FALLTHROUGH */ - case OTG_STATE_B_WAIT_ACON: - /* FALLTHROUGH */ -#endif /* OTG */ -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - case OTG_STATE_B_PERIPHERAL: - case OTG_STATE_B_IDLE: - musb_g_disconnect(musb); - break; -#endif /* GADGET */ - default: - WARNING("unhandled DISCONNECT transition (%s)\n", - otg_state_string(musb)); - break; - } - - schedule_work(&musb->irq_work); - } - - if (int_usb & MUSB_INTR_SUSPEND) { - DBG(1, "SUSPEND (%s) devctl %02x power %02x\n", - otg_state_string(musb), devctl, power); - handled = IRQ_HANDLED; - - switch (musb->xceiv->state) { -#ifdef CONFIG_USB_MUSB_OTG - case OTG_STATE_A_PERIPHERAL: - /* We also come here if the cable is removed, since - * this silicon doesn't report ID-no-longer-grounded. - * - * We depend on T(a_wait_bcon) to shut us down, and - * hope users don't do anything dicey during this - * undesired detour through A_WAIT_BCON. - */ - musb_hnp_stop(musb); - usb_hcd_resume_root_hub(musb_to_hcd(musb)); - musb_root_disconnect(musb); - musb_platform_try_idle(musb, jiffies - + msecs_to_jiffies(musb->a_wait_bcon - ? : OTG_TIME_A_WAIT_BCON)); - break; -#endif - case OTG_STATE_B_PERIPHERAL: - musb_g_suspend(musb); - musb->is_active = is_otg_enabled(musb) - && musb->xceiv->gadget->b_hnp_enable; - if (musb->is_active) { -#ifdef CONFIG_USB_MUSB_OTG - musb->xceiv->state = OTG_STATE_B_WAIT_ACON; - DBG(1, "HNP: Setting timer for b_ase0_brst\n"); - mod_timer(&musb->otg_timer, jiffies - + msecs_to_jiffies( - OTG_TIME_B_ASE0_BRST)); -#endif - } - break; - case OTG_STATE_A_WAIT_BCON: - if (musb->a_wait_bcon != 0) - musb_platform_try_idle(musb, jiffies - + msecs_to_jiffies(musb->a_wait_bcon)); - break; - case OTG_STATE_A_HOST: - musb->xceiv->state = OTG_STATE_A_SUSPEND; - musb->is_active = is_otg_enabled(musb) - && musb->xceiv->host->b_hnp_enable; - break; - case OTG_STATE_B_HOST: - /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ - DBG(1, "REVISIT: SUSPEND as B_HOST\n"); - break; - default: - /* "should not happen" */ - musb->is_active = 0; - break; - } - schedule_work(&musb->irq_work); - } - + schedule_work(&musb->irq_work); return handled; } @@ -1095,6 +1077,36 @@ static struct fifo_cfg __initdata mode_4_cfg[] = { { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, }; +/* mode 5 - fits in 8KB */ +static struct fifo_cfg __initdata mode_5_cfg[] = { +{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, +{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, +{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, }, +{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, }, +{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, }, +{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, +{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, +}; /* * configure a fifo; for non-shared endpoints, this may be called @@ -1210,6 +1222,10 @@ static int __init ep_config_from_table(struct musb *musb) cfg = mode_4_cfg; n = ARRAY_SIZE(mode_4_cfg); break; + case 5: + cfg = mode_5_cfg; + n = ARRAY_SIZE(mode_5_cfg); + break; } printk(KERN_DEBUG "%s: setup fifo_mode %d\n", @@ -1314,9 +1330,6 @@ enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, }; */ static int __init musb_core_init(u16 musb_type, struct musb *musb) { -#ifdef MUSB_AHB_ID - u32 data; -#endif u8 reg; char *type; char aInfo[90], aRevision[32], aDate[12]; @@ -1328,23 +1341,17 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) reg = musb_read_configdata(mbase); strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); - if (reg & MUSB_CONFIGDATA_DYNFIFO) + if (reg & MUSB_CONFIGDATA_DYNFIFO) { strcat(aInfo, ", dyn FIFOs"); + musb->dyn_fifo = true; + } if (reg & MUSB_CONFIGDATA_MPRXE) { strcat(aInfo, ", bulk combine"); -#ifdef C_MP_RX musb->bulk_combine = true; -#else - strcat(aInfo, " (X)"); /* no driver support */ -#endif } if (reg & MUSB_CONFIGDATA_MPTXE) { strcat(aInfo, ", bulk split"); -#ifdef C_MP_TX musb->bulk_split = true; -#else - strcat(aInfo, " (X)"); /* no driver support */ -#endif } if (reg & MUSB_CONFIGDATA_HBRXE) { strcat(aInfo, ", HB-ISO Rx"); @@ -1360,20 +1367,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", musb_driver_name, reg, aInfo); -#ifdef MUSB_AHB_ID - data = musb_readl(mbase, 0x404); - sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff), - (data >> 16) & 0xff, (data >> 24) & 0xff); - /* FIXME ID2 and ID3 are unused */ - data = musb_readl(mbase, 0x408); - printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data); - data = musb_readl(mbase, 0x40c); - printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data); - reg = musb_readb(mbase, 0x400); - musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC; -#else aDate[0] = 0; -#endif if (MUSB_CONTROLLER_MHDRC == musb_type) { musb->is_multipoint = 1; type = "M"; @@ -1404,21 +1398,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb) musb->nr_endpoints = 1; musb->epmask = 1; - if (reg & MUSB_CONFIGDATA_DYNFIFO) { - if (musb->config->dyn_fifo) - status = ep_config_from_table(musb); - else { - ERR("reconfigure software for Dynamic FIFOs\n"); - status = -ENODEV; - } - } else { - if (!musb->config->dyn_fifo) - status = ep_config_from_hw(musb); - else { - ERR("reconfigure software for static FIFOs\n"); - return -ENODEV; - } - } + if (musb->dyn_fifo) + status = ep_config_from_table(musb); + else + status = ep_config_from_hw(musb); if (status < 0) return status; @@ -1587,11 +1570,6 @@ irqreturn_t musb_interrupt(struct musb *musb) ep_num++; } - /* finish handling "global" interrupts after handling fifos */ - if (musb->int_usb) - retval |= musb_stage2_irq(musb, - musb->int_usb, devctl, power); - return retval; } @@ -1696,7 +1674,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr, unsigned long val; if (sscanf(buf, "%lu", &val) < 1) { - printk(KERN_ERR "Invalid VBUS timeout ms value\n"); + dev_err(dev, "Invalid VBUS timeout ms value\n"); return -EINVAL; } @@ -1746,7 +1724,7 @@ musb_srp_store(struct device *dev, struct device_attribute *attr, if (sscanf(buf, "%hu", &srp) != 1 || (srp != 1)) { - printk(KERN_ERR "SRP: Value must be 1\n"); + dev_err(dev, "SRP: Value must be 1\n"); return -EINVAL; } @@ -1759,6 +1737,19 @@ static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); #endif /* CONFIG_USB_GADGET_MUSB_HDRC */ +static struct attribute *musb_attributes[] = { + &dev_attr_mode.attr, + &dev_attr_vbus.attr, +#ifdef CONFIG_USB_GADGET_MUSB_HDRC + &dev_attr_srp.attr, +#endif + NULL +}; + +static const struct attribute_group musb_attr_group = { + .attrs = musb_attributes, +}; + #endif /* sysfs */ /* Only used to provide driver mode change events */ @@ -1833,11 +1824,7 @@ static void musb_free(struct musb *musb) */ #ifdef CONFIG_SYSFS - device_remove_file(musb->controller, &dev_attr_mode); - device_remove_file(musb->controller, &dev_attr_vbus); -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - device_remove_file(musb->controller, &dev_attr_srp); -#endif + sysfs_remove_group(&musb->controller->kobj, &musb_attr_group); #endif #ifdef CONFIG_USB_GADGET_MUSB_HDRC @@ -2017,22 +2004,10 @@ bad_config: musb->irq_wake = 0; } - pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n", - musb_driver_name, - ({char *s; - switch (musb->board_mode) { - case MUSB_HOST: s = "Host"; break; - case MUSB_PERIPHERAL: s = "Peripheral"; break; - default: s = "OTG"; break; - }; s; }), - ctrl, - (is_dma_capable() && musb->dma_controller) - ? "DMA" : "PIO", - musb->nIrq); - /* host side needs more setup */ if (is_host_enabled(musb)) { struct usb_hcd *hcd = musb_to_hcd(musb); + u8 busctl; otg_set_host(musb->xceiv, &hcd->self); @@ -2040,6 +2015,13 @@ bad_config: hcd->self.otg_port = 1; musb->xceiv->host = &hcd->self; hcd->power_budget = 2 * (plat->power ? : 250); + + /* program PHY to use external vBus if required */ + if (plat->extvbus) { + busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL); + busctl |= MUSB_ULPI_USE_EXTVBUS; + musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl); + } } /* For the host-only role, we can activate right away. @@ -2079,26 +2061,26 @@ bad_config: } #ifdef CONFIG_SYSFS - status = device_create_file(dev, &dev_attr_mode); - status = device_create_file(dev, &dev_attr_vbus); -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - status = device_create_file(dev, &dev_attr_srp); -#endif /* CONFIG_USB_GADGET_MUSB_HDRC */ - status = 0; + status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group); #endif if (status) goto fail2; + dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n", + ({char *s; + switch (musb->board_mode) { + case MUSB_HOST: s = "Host"; break; + case MUSB_PERIPHERAL: s = "Peripheral"; break; + default: s = "OTG"; break; + }; s; }), + ctrl, + (is_dma_capable() && musb->dma_controller) + ? "DMA" : "PIO", + musb->nIrq); + return 0; fail2: -#ifdef CONFIG_SYSFS - device_remove_file(musb->controller, &dev_attr_mode); - device_remove_file(musb->controller, &dev_attr_vbus); -#ifdef CONFIG_USB_GADGET_MUSB_HDRC - device_remove_file(musb->controller, &dev_attr_srp); -#endif -#endif musb_platform_exit(musb); fail: dev_err(musb->controller, @@ -2127,6 +2109,7 @@ static int __init musb_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; int irq = platform_get_irq(pdev, 0); + int status; struct resource *iomem; void __iomem *base; @@ -2134,7 +2117,7 @@ static int __init musb_probe(struct platform_device *pdev) if (!iomem || irq == 0) return -ENODEV; - base = ioremap(iomem->start, iomem->end - iomem->start + 1); + base = ioremap(iomem->start, resource_size(iomem)); if (!base) { dev_err(dev, "ioremap failed\n"); return -ENOMEM; @@ -2144,7 +2127,12 @@ static int __init musb_probe(struct platform_device *pdev) /* clobbered by use_dma=n */ orig_dma_mask = dev->dma_mask; #endif - return musb_init_controller(dev, irq, base); + + status = musb_init_controller(dev, irq, base); + if (status < 0) + iounmap(base); + + return status; } static int __exit musb_remove(struct platform_device *pdev) @@ -2173,6 +2161,148 @@ static int __exit musb_remove(struct platform_device *pdev) #ifdef CONFIG_PM +static struct musb_context_registers musb_context; + +void musb_save_context(struct musb *musb) +{ + int i; + void __iomem *musb_base = musb->mregs; + + if (is_host_enabled(musb)) { + musb_context.frame = musb_readw(musb_base, MUSB_FRAME); + musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE); + } + musb_context.power = musb_readb(musb_base, MUSB_POWER); + musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); + musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); + musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); + musb_context.index = musb_readb(musb_base, MUSB_INDEX); + musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL); + + for (i = 0; i < MUSB_C_NUM_EPS; ++i) { + musb_writeb(musb_base, MUSB_INDEX, i); + musb_context.index_regs[i].txmaxp = + musb_readw(musb_base, 0x10 + MUSB_TXMAXP); + musb_context.index_regs[i].txcsr = + musb_readw(musb_base, 0x10 + MUSB_TXCSR); + musb_context.index_regs[i].rxmaxp = + musb_readw(musb_base, 0x10 + MUSB_RXMAXP); + musb_context.index_regs[i].rxcsr = + musb_readw(musb_base, 0x10 + MUSB_RXCSR); + + if (musb->dyn_fifo) { + musb_context.index_regs[i].txfifoadd = + musb_read_txfifoadd(musb_base); + musb_context.index_regs[i].rxfifoadd = + musb_read_rxfifoadd(musb_base); + musb_context.index_regs[i].txfifosz = + musb_read_txfifosz(musb_base); + musb_context.index_regs[i].rxfifosz = + musb_read_rxfifosz(musb_base); + } + if (is_host_enabled(musb)) { + musb_context.index_regs[i].txtype = + musb_readb(musb_base, 0x10 + MUSB_TXTYPE); + musb_context.index_regs[i].txinterval = + musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL); + musb_context.index_regs[i].rxtype = + musb_readb(musb_base, 0x10 + MUSB_RXTYPE); + musb_context.index_regs[i].rxinterval = + musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL); + + musb_context.index_regs[i].txfunaddr = + musb_read_txfunaddr(musb_base, i); + musb_context.index_regs[i].txhubaddr = + musb_read_txhubaddr(musb_base, i); + musb_context.index_regs[i].txhubport = + musb_read_txhubport(musb_base, i); + + musb_context.index_regs[i].rxfunaddr = + musb_read_rxfunaddr(musb_base, i); + musb_context.index_regs[i].rxhubaddr = + musb_read_rxhubaddr(musb_base, i); + musb_context.index_regs[i].rxhubport = + musb_read_rxhubport(musb_base, i); + } + } + + musb_writeb(musb_base, MUSB_INDEX, musb_context.index); + + musb_platform_save_context(musb, &musb_context); +} + +void musb_restore_context(struct musb *musb) +{ + int i; + void __iomem *musb_base = musb->mregs; + void __iomem *ep_target_regs; + + musb_platform_restore_context(musb, &musb_context); + + if (is_host_enabled(musb)) { + musb_writew(musb_base, MUSB_FRAME, musb_context.frame); + musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode); + } + musb_writeb(musb_base, MUSB_POWER, musb_context.power); + musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe); + musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe); + musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe); + musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl); + + for (i = 0; i < MUSB_C_NUM_EPS; ++i) { + musb_writeb(musb_base, MUSB_INDEX, i); + musb_writew(musb_base, 0x10 + MUSB_TXMAXP, + musb_context.index_regs[i].txmaxp); + musb_writew(musb_base, 0x10 + MUSB_TXCSR, + musb_context.index_regs[i].txcsr); + musb_writew(musb_base, 0x10 + MUSB_RXMAXP, + musb_context.index_regs[i].rxmaxp); + musb_writew(musb_base, 0x10 + MUSB_RXCSR, + musb_context.index_regs[i].rxcsr); + + if (musb->dyn_fifo) { + musb_write_txfifosz(musb_base, + musb_context.index_regs[i].txfifosz); + musb_write_rxfifosz(musb_base, + musb_context.index_regs[i].rxfifosz); + musb_write_txfifoadd(musb_base, + musb_context.index_regs[i].txfifoadd); + musb_write_rxfifoadd(musb_base, + musb_context.index_regs[i].rxfifoadd); + } + + if (is_host_enabled(musb)) { + musb_writeb(musb_base, 0x10 + MUSB_TXTYPE, + musb_context.index_regs[i].txtype); + musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL, + musb_context.index_regs[i].txinterval); + musb_writeb(musb_base, 0x10 + MUSB_RXTYPE, + musb_context.index_regs[i].rxtype); + musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL, + + musb_context.index_regs[i].rxinterval); + musb_write_txfunaddr(musb_base, i, + musb_context.index_regs[i].txfunaddr); + musb_write_txhubaddr(musb_base, i, + musb_context.index_regs[i].txhubaddr); + musb_write_txhubport(musb_base, i, + musb_context.index_regs[i].txhubport); + + ep_target_regs = + musb_read_target_reg_base(i, musb_base); + + musb_write_rxfunaddr(ep_target_regs, + musb_context.index_regs[i].rxfunaddr); + musb_write_rxhubaddr(ep_target_regs, + musb_context.index_regs[i].rxhubaddr); + musb_write_rxhubport(ep_target_regs, + musb_context.index_regs[i].rxhubport); + } + } + + musb_writeb(musb_base, MUSB_INDEX, musb_context.index); +} + static int musb_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -2194,6 +2324,8 @@ static int musb_suspend(struct device *dev) */ } + musb_save_context(musb); + if (musb->set_clock) musb->set_clock(musb->clock, 0); else @@ -2215,6 +2347,8 @@ static int musb_resume_noirq(struct device *dev) else clk_enable(musb->clock); + musb_restore_context(musb); + /* for static cmos like DaVinci, register values were preserved * unless for some reason the whole soc powered down or the USB * module got reset through the PSC (vs just being disabled). diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 5514c7ee85b..d849fb81c13 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -52,6 +52,15 @@ struct musb; struct musb_hw_ep; struct musb_ep; +/* Helper defines for struct musb->hwvers */ +#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f) +#define MUSB_HWVERS_MINOR(x) (x & 0x3ff) +#define MUSB_HWVERS_RC 0x8000 +#define MUSB_HWVERS_1300 0x52C +#define MUSB_HWVERS_1400 0x590 +#define MUSB_HWVERS_1800 0x720 +#define MUSB_HWVERS_1900 0x784 +#define MUSB_HWVERS_2000 0x800 #include "musb_debug.h" #include "musb_dma.h" @@ -322,13 +331,6 @@ struct musb { struct clk *clock; irqreturn_t (*isr)(int, void *); struct work_struct irq_work; -#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f) -#define MUSB_HWVERS_MINOR(x) (x & 0x3ff) -#define MUSB_HWVERS_RC 0x8000 -#define MUSB_HWVERS_1300 0x52C -#define MUSB_HWVERS_1400 0x590 -#define MUSB_HWVERS_1800 0x720 -#define MUSB_HWVERS_2000 0x800 u16 hwvers; /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ @@ -411,22 +413,15 @@ struct musb { unsigned hb_iso_rx:1; /* high bandwidth iso rx? */ unsigned hb_iso_tx:1; /* high bandwidth iso tx? */ + unsigned dyn_fifo:1; /* dynamic FIFO supported? */ -#ifdef C_MP_TX - unsigned bulk_split:1; + unsigned bulk_split:1; #define can_bulk_split(musb,type) \ - (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) -#else -#define can_bulk_split(musb, type) 0 -#endif + (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) -#ifdef C_MP_RX - unsigned bulk_combine:1; + unsigned bulk_combine:1; #define can_bulk_combine(musb,type) \ - (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) -#else -#define can_bulk_combine(musb, type) 0 -#endif + (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) #ifdef CONFIG_USB_GADGET_MUSB_HDRC /* is_suspended means USB B_PERIPHERAL suspend */ @@ -461,6 +456,45 @@ struct musb { #endif }; +#ifdef CONFIG_PM +struct musb_csr_regs { + /* FIFO registers */ + u16 txmaxp, txcsr, rxmaxp, rxcsr; + u16 rxfifoadd, txfifoadd; + u8 txtype, txinterval, rxtype, rxinterval; + u8 rxfifosz, txfifosz; + u8 txfunaddr, txhubaddr, txhubport; + u8 rxfunaddr, rxhubaddr, rxhubport; +}; + +struct musb_context_registers { + +#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430) + u32 otg_sysconfig, otg_forcestandby; +#endif + u8 power; + u16 intrtxe, intrrxe; + u8 intrusbe; + u16 frame; + u8 index, testmode; + + u8 devctl, misc; + + struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; +}; + +#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430) +extern void musb_platform_save_context(struct musb *musb, + struct musb_context_registers *musb_context); +extern void musb_platform_restore_context(struct musb *musb, + struct musb_context_registers *musb_context); +#else +#define musb_platform_save_context(m, x) do {} while (0) +#define musb_platform_restore_context(m, x) do {} while (0) +#endif + +#endif + static inline void musb_set_vbus(struct musb *musb, int is_on) { musb->board_set_vbus(musb, is_on); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index cbcf14a236e..a9f288cd70e 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -895,7 +895,14 @@ static int musb_gadget_enable(struct usb_ep *ep, /* REVISIT if can_bulk_split(), use by updating "tmp"; * likewise high bandwidth periodic tx */ - musb_writew(regs, MUSB_TXMAXP, tmp); + /* Set TXMAXP with the FIFO size of the endpoint + * to disable double buffering mode. Currently, It seems that double + * buffering has problem if musb RTL revision number < 2.0. + */ + if (musb->hwvers < MUSB_HWVERS_2000) + musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); + else + musb_writew(regs, MUSB_TXMAXP, tmp); csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; if (musb_readw(regs, MUSB_TXCSR) @@ -925,7 +932,13 @@ static int musb_gadget_enable(struct usb_ep *ep, /* REVISIT if can_bulk_combine() use by updating "tmp" * likewise high bandwidth periodic rx */ - musb_writew(regs, MUSB_RXMAXP, tmp); + /* Set RXMAXP with the FIFO size of the endpoint + * to disable double buffering mode. + */ + if (musb->hwvers < MUSB_HWVERS_2000) + musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx); + else + musb_writew(regs, MUSB_RXMAXP, tmp); /* force shared fifo to OUT-only mode */ if (hw_ep->is_shared_fifo) { @@ -1697,8 +1710,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver) return -EINVAL; /* driver must be initialized to support peripheral mode */ - if (!musb || !(musb->board_mode == MUSB_OTG - || musb->board_mode != MUSB_OTG)) { + if (!musb) { DBG(1, "%s, no dev??\n", __func__); return -ENODEV; } diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 74c4c3698f1..3421cf9858b 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -605,8 +605,14 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); /* NOTE: bulk combining rewrites high bits of maxpacket */ - musb_writew(ep->regs, MUSB_RXMAXP, - qh->maxpacket | ((qh->hb_mult - 1) << 11)); + /* Set RXMAXP with the FIFO size of the endpoint + * to disable double buffer mode. + */ + if (musb->hwvers < MUSB_HWVERS_2000) + musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); + else + musb_writew(ep->regs, MUSB_RXMAXP, + qh->maxpacket | ((qh->hb_mult - 1) << 11)); ep->rx_reinit = 0; } @@ -1771,6 +1777,9 @@ static int musb_schedule( int best_end, epnum; struct musb_hw_ep *hw_ep = NULL; struct list_head *head = NULL; + u8 toggle; + u8 txtype; + struct urb *urb = next_urb(qh); /* use fixed hardware for control and bulk */ if (qh->type == USB_ENDPOINT_XFER_CONTROL) { @@ -1809,6 +1818,27 @@ static int musb_schedule( diff -= (qh->maxpacket * qh->hb_mult); if (diff >= 0 && best_diff > diff) { + + /* + * Mentor controller has a bug in that if we schedule + * a BULK Tx transfer on an endpoint that had earlier + * handled ISOC then the BULK transfer has to start on + * a zero toggle. If the BULK transfer starts on a 1 + * toggle then this transfer will fail as the mentor + * controller starts the Bulk transfer on a 0 toggle + * irrespective of the programming of the toggle bits + * in the TXCSR register. Check for this condition + * while allocating the EP for a Tx Bulk transfer. If + * so skip this EP. + */ + hw_ep = musb->endpoints + epnum; + toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); + txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) + >> 4) & 0x3; + if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && + toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) + continue; + best_diff = diff; best_end = epnum; } diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h index 473a94ef905..292894a2c24 100644 --- a/drivers/usb/musb/musb_regs.h +++ b/drivers/usb/musb/musb_regs.h @@ -72,6 +72,10 @@ #define MUSB_DEVCTL_HR 0x02 #define MUSB_DEVCTL_SESSION 0x01 +/* MUSB ULPI VBUSCONTROL */ +#define MUSB_ULPI_USE_EXTVBUS 0x01 +#define MUSB_ULPI_USE_EXTVBUSIND 0x02 + /* TESTMODE */ #define MUSB_TEST_FORCE_HOST 0x80 #define MUSB_TEST_FIFO_ACCESS 0x40 @@ -246,6 +250,7 @@ /* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */ #define MUSB_HWVERS 0x6C /* 8 bit */ +#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */ #define MUSB_EPINFO 0x78 /* 8 bit */ #define MUSB_RAMINFO 0x79 /* 8 bit */ @@ -321,6 +326,26 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) musb_writew(mbase, MUSB_RXFIFOADD, c_off); } +static inline u8 musb_read_txfifosz(void __iomem *mbase) +{ + return musb_readb(mbase, MUSB_TXFIFOSZ); +} + +static inline u16 musb_read_txfifoadd(void __iomem *mbase) +{ + return musb_readw(mbase, MUSB_TXFIFOADD); +} + +static inline u8 musb_read_rxfifosz(void __iomem *mbase) +{ + return musb_readb(mbase, MUSB_RXFIFOSZ); +} + +static inline u16 musb_read_rxfifoadd(void __iomem *mbase) +{ + return musb_readw(mbase, MUSB_RXFIFOADD); +} + static inline u8 musb_read_configdata(void __iomem *mbase) { musb_writeb(mbase, MUSB_INDEX, 0); @@ -376,6 +401,36 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum, qh_h_port_reg); } +static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR)); +} + +static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR)); +} + +static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT)); +} + +static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR)); +} + +static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR)); +} + +static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum) +{ + return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT)); +} + #else /* CONFIG_BLACKFIN */ #define USB_BASE USB_FADDR @@ -455,6 +510,22 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off) { } +static inline u8 musb_read_txfifosz(void __iomem *mbase) +{ +} + +static inline u16 musb_read_txfifoadd(void __iomem *mbase) +{ +} + +static inline u8 musb_read_rxfifosz(void __iomem *mbase) +{ +} + +static inline u16 musb_read_rxfifoadd(void __iomem *mbase) +{ +} + static inline u8 musb_read_configdata(void __iomem *mbase) { return 0; @@ -462,7 +533,11 @@ static inline u8 musb_read_configdata(void __iomem *mbase) static inline u16 musb_read_hwvers(void __iomem *mbase) { - return 0; + /* + * This register is invisible on Blackfin, actually the MUSB + * RTL version of Blackfin is 1.9, so just harcode its value. + */ + return MUSB_HWVERS_1900; } static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase) @@ -500,6 +575,30 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum, { } +static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum) +{ +} + +static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum) +{ +} + +static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum) +{ +} + +static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum) +{ +} + +static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum) +{ +} + +static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum) +{ +} + #endif /* CONFIG_BLACKFIN */ #endif /* __MUSB_REGS_H__ */ diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index a237550f91b..2fa7d5c00f3 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c @@ -250,20 +250,39 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) u8 bchannel; u8 int_hsdma; - u32 addr; + u32 addr, count; u16 csr; spin_lock_irqsave(&musb->lock, flags); int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR); - if (!int_hsdma) - goto done; #ifdef CONFIG_BLACKFIN /* Clear DMA interrupt flags */ musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma); #endif + if (!int_hsdma) { + DBG(2, "spurious DMA irq\n"); + + for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { + musb_channel = (struct musb_dma_channel *) + &(controller->channel[bchannel]); + channel = &musb_channel->channel; + if (channel->status == MUSB_DMA_STATUS_BUSY) { + count = musb_read_hsdma_count(mbase, bchannel); + + if (count == 0) + int_hsdma |= (1 << bchannel); + } + } + + DBG(2, "int_hsdma = 0x%x\n", int_hsdma); + + if (!int_hsdma) + goto done; + } + for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { if (int_hsdma & (1 << bchannel)) { musb_channel = (struct musb_dma_channel *) diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h index 1299d92dc83..613f95a058f 100644 --- a/drivers/usb/musb/musbhsdma.h +++ b/drivers/usb/musb/musbhsdma.h @@ -55,6 +55,10 @@ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \ addr) +#define musb_read_hsdma_count(mbase, bchannel) \ + musb_readl(mbase, \ + MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT)) + #define musb_write_hsdma_count(mbase, bchannel, len) \ musb_writel(mbase, \ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \ @@ -96,6 +100,19 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase, ((u16)(((u32) dma_addr >> 16) & 0xFFFF))); } +static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel) +{ + u32 count = musb_readw(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH)); + + count = count << 16; + + count |= musb_readw(mbase, + MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW)); + + return count; +} + static inline void musb_write_hsdma_count(void __iomem *mbase, u8 bchannel, u32 len) { diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 83beeac5e7b..3fe16867b5a 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -220,7 +220,7 @@ int __init musb_platform_init(struct musb *musb) musb_platform_resume(musb); - l = omap_readl(OTG_SYSCONFIG); + l = musb_readl(musb->mregs, OTG_SYSCONFIG); l &= ~ENABLEWAKEUP; /* disable wakeup */ l &= ~NOSTDBY; /* remove possible nostdby */ l |= SMARTSTDBY; /* enable smart standby */ @@ -233,17 +233,19 @@ int __init musb_platform_init(struct musb *musb) */ if (!cpu_is_omap3430()) l |= AUTOIDLE; /* enable auto idle */ - omap_writel(l, OTG_SYSCONFIG); + musb_writel(musb->mregs, OTG_SYSCONFIG, l); - l = omap_readl(OTG_INTERFSEL); + l = musb_readl(musb->mregs, OTG_INTERFSEL); l |= ULPI_12PIN; - omap_writel(l, OTG_INTERFSEL); + musb_writel(musb->mregs, OTG_INTERFSEL, l); pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", - omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG), - omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL), - omap_readl(OTG_SIMENABLE)); + musb_readl(musb->mregs, OTG_REVISION), + musb_readl(musb->mregs, OTG_SYSCONFIG), + musb_readl(musb->mregs, OTG_SYSSTATUS), + musb_readl(musb->mregs, OTG_INTERFSEL), + musb_readl(musb->mregs, OTG_SIMENABLE)); omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1); @@ -255,6 +257,22 @@ int __init musb_platform_init(struct musb *musb) return 0; } +#ifdef CONFIG_PM +void musb_platform_save_context(struct musb *musb, + struct musb_context_registers *musb_context) +{ + musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG); + musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY); +} + +void musb_platform_restore_context(struct musb *musb, + struct musb_context_registers *musb_context) +{ + musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig); + musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby); +} +#endif + int musb_platform_suspend(struct musb *musb) { u32 l; @@ -263,13 +281,13 @@ int musb_platform_suspend(struct musb *musb) return 0; /* in any role */ - l = omap_readl(OTG_FORCESTDBY); + l = musb_readl(musb->mregs, OTG_FORCESTDBY); l |= ENABLEFORCE; /* enable MSTANDBY */ - omap_writel(l, OTG_FORCESTDBY); + musb_writel(musb->mregs, OTG_FORCESTDBY, l); - l = omap_readl(OTG_SYSCONFIG); + l = musb_readl(musb->mregs, OTG_SYSCONFIG); l |= ENABLEWAKEUP; /* enable wakeup */ - omap_writel(l, OTG_SYSCONFIG); + musb_writel(musb->mregs, OTG_SYSCONFIG, l); otg_set_suspend(musb->xceiv, 1); @@ -295,13 +313,13 @@ static int musb_platform_resume(struct musb *musb) else clk_enable(musb->clock); - l = omap_readl(OTG_SYSCONFIG); + l = musb_readl(musb->mregs, OTG_SYSCONFIG); l &= ~ENABLEWAKEUP; /* disable wakeup */ - omap_writel(l, OTG_SYSCONFIG); + musb_writel(musb->mregs, OTG_SYSCONFIG, l); - l = omap_readl(OTG_FORCESTDBY); + l = musb_readl(musb->mregs, OTG_FORCESTDBY); l &= ~ENABLEFORCE; /* disable MSTANDBY */ - omap_writel(l, OTG_FORCESTDBY); + musb_writel(musb->mregs, OTG_FORCESTDBY, l); return 0; } diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h index fbede7798ae..40b3c02ae9f 100644 --- a/drivers/usb/musb/omap2430.h +++ b/drivers/usb/musb/omap2430.h @@ -10,47 +10,43 @@ #ifndef __MUSB_OMAP243X_H__ #define __MUSB_OMAP243X_H__ -#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430) -#include <mach/hardware.h> #include <plat/usb.h> /* * OMAP2430-specific definitions */ -#define MENTOR_BASE_OFFSET 0 -#if defined(CONFIG_ARCH_OMAP2430) -#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE) -#elif defined(CONFIG_ARCH_OMAP3430) -#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE) -#endif -#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset)) -#define OTG_REVISION OMAP_HSOTG(0x0) -#define OTG_SYSCONFIG OMAP_HSOTG(0x4) +#define OTG_REVISION 0x400 + +#define OTG_SYSCONFIG 0x404 # define MIDLEMODE 12 /* bit position */ # define FORCESTDBY (0 << MIDLEMODE) # define NOSTDBY (1 << MIDLEMODE) # define SMARTSTDBY (2 << MIDLEMODE) + # define SIDLEMODE 3 /* bit position */ # define FORCEIDLE (0 << SIDLEMODE) # define NOIDLE (1 << SIDLEMODE) # define SMARTIDLE (2 << SIDLEMODE) + # define ENABLEWAKEUP (1 << 2) # define SOFTRST (1 << 1) # define AUTOIDLE (1 << 0) -#define OTG_SYSSTATUS OMAP_HSOTG(0x8) + +#define OTG_SYSSTATUS 0x408 # define RESETDONE (1 << 0) -#define OTG_INTERFSEL OMAP_HSOTG(0xc) + +#define OTG_INTERFSEL 0x40c # define EXTCP (1 << 2) -# define PHYSEL 0 /* bit position */ +# define PHYSEL 0 /* bit position */ # define UTMI_8BIT (0 << PHYSEL) # define ULPI_12PIN (1 << PHYSEL) # define ULPI_8PIN (2 << PHYSEL) -#define OTG_SIMENABLE OMAP_HSOTG(0x10) + +#define OTG_SIMENABLE 0x410 # define TM1 (1 << 0) -#define OTG_FORCESTDBY OMAP_HSOTG(0x14) -# define ENABLEFORCE (1 << 0) -#endif /* CONFIG_ARCH_OMAP2430 */ +#define OTG_FORCESTDBY 0x414 +# define ENABLEFORCE (1 << 0) #endif /* __MUSB_OMAP243X_H__ */ diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index 88b587c703e..ab776a8d98c 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c @@ -1118,7 +1118,7 @@ int __init musb_platform_init(struct musb *musb) } musb->sync = mem->start; - sync = ioremap(mem->start, mem->end - mem->start + 1); + sync = ioremap(mem->start, resource_size(mem)); if (!sync) { pr_debug("ioremap for sync failed\n"); ret = -ENOMEM; diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c index e13c77052e5..1c868096bd6 100644 --- a/drivers/usb/musb/tusb6010_omap.c +++ b/drivers/usb/musb/tusb6010_omap.c @@ -648,7 +648,7 @@ void dma_controller_destroy(struct dma_controller *c) } } - if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0) + if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0) omap_free_dma(tusb_dma->ch); kfree(tusb_dma); diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c index 2be9f2fa41f..3e4e9f434d7 100644 --- a/drivers/usb/otg/twl4030-usb.c +++ b/drivers/usb/otg/twl4030-usb.c @@ -36,7 +36,7 @@ #include <linux/i2c/twl.h> #include <linux/regulator/consumer.h> #include <linux/err.h> - +#include <linux/notifier.h> /* Register defines */ @@ -236,15 +236,6 @@ #define PMBR1 0x0D #define GPIO_USB_4PIN_ULPI_2430C (3 << 0) - - -enum linkstat { - USB_LINK_UNKNOWN = 0, - USB_LINK_NONE, - USB_LINK_VBUS, - USB_LINK_ID, -}; - struct twl4030_usb { struct otg_transceiver otg; struct device *dev; @@ -347,10 +338,10 @@ twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits) /*-------------------------------------------------------------------------*/ -static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl) +static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl) { int status; - int linkstat = USB_LINK_UNKNOWN; + int linkstat = USB_EVENT_NONE; /* * For ID/VBUS sensing, see manual section 15.4.8 ... @@ -368,11 +359,11 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl) dev_err(twl->dev, "USB link status err %d\n", status); else if (status & (BIT(7) | BIT(2))) { if (status & BIT(2)) - linkstat = USB_LINK_ID; + linkstat = USB_EVENT_ID; else - linkstat = USB_LINK_VBUS; + linkstat = USB_EVENT_VBUS; } else - linkstat = USB_LINK_NONE; + linkstat = USB_EVENT_NONE; dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n", status, status, linkstat); @@ -383,7 +374,7 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl) spin_lock_irq(&twl->lock); twl->linkstat = linkstat; - if (linkstat == USB_LINK_ID) { + if (linkstat == USB_EVENT_ID) { twl->otg.default_a = true; twl->otg.state = OTG_STATE_A_IDLE; } else { @@ -564,7 +555,7 @@ static ssize_t twl4030_usb_vbus_show(struct device *dev, spin_lock_irqsave(&twl->lock, flags); ret = sprintf(buf, "%s\n", - (twl->linkstat == USB_LINK_VBUS) ? "on" : "off"); + (twl->linkstat == USB_EVENT_VBUS) ? "on" : "off"); spin_unlock_irqrestore(&twl->lock, flags); return ret; @@ -576,17 +567,8 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) struct twl4030_usb *twl = _twl; int status; -#ifdef CONFIG_LOCKDEP - /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which - * we don't want and can't tolerate. Although it might be - * friendlier not to borrow this thread context... - */ - local_irq_enable(); -#endif - status = twl4030_usb_linkstat(twl); - if (status != USB_LINK_UNKNOWN) { - + if (status >= 0) { /* FIXME add a set_power() method so that B-devices can * configure the charger appropriately. It's not always * correct to consume VBUS power, and how much current to @@ -598,12 +580,13 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) * USB_LINK_VBUS state. musb_hdrc won't care until it * starts to handle softconnect right. */ - if (status == USB_LINK_NONE) + if (status == USB_EVENT_NONE) twl4030_phy_suspend(twl, 0); else twl4030_phy_resume(twl); - twl4030charger_usb_en(status == USB_LINK_VBUS); + blocking_notifier_call_chain(&twl->otg.notifier, status, + twl->otg.gadget); } sysfs_notify(&twl->dev->kobj, NULL, "vbus"); @@ -693,6 +676,8 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) if (device_create_file(&pdev->dev, &dev_attr_vbus)) dev_warn(&pdev->dev, "could not create sysfs file\n"); + BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier); + /* Our job is to use irqs and status from the power module * to keep the transceiver disabled when nothing's connected. * @@ -702,7 +687,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev) * need both handles, otherwise just one suffices. */ twl->irq_enabled = true; - status = request_irq(twl->irq, twl4030_usb_irq, + status = request_threaded_irq(twl->irq, NULL, twl4030_usb_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, "twl4030_usb", twl); if (status < 0) { diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig index c480ea4c19f..c78b255e3f8 100644 --- a/drivers/usb/serial/Kconfig +++ b/drivers/usb/serial/Kconfig @@ -472,6 +472,17 @@ config USB_SERIAL_OTI6858 To compile this driver as a module, choose M here: the module will be called oti6858. +config USB_SERIAL_QCAUX + tristate "USB Qualcomm Auxiliary Serial Port Driver" + ---help--- + Say Y here if you want to use the auxiliary serial ports provided + by many modems based on Qualcomm chipsets. These ports often use + a proprietary protocol called DM and cannot be used for AT- or + PPP-based communication. + + To compile this driver as a module, choose M here: the + module will be called moto_modem. If unsure, choose N. + config USB_SERIAL_QUALCOMM tristate "USB Qualcomm Serial modem" help @@ -600,6 +611,14 @@ config USB_SERIAL_OPTICON To compile this driver as a module, choose M here: the module will be called opticon. +config USB_SERIAL_VIVOPAY_SERIAL + tristate "USB ViVOpay serial interface driver" + help + Say Y here if you want to use a ViVOtech ViVOpay USB device. + + To compile this driver as a module, choose M here: the + module will be called vivopay-serial. + config USB_SERIAL_DEBUG tristate "USB Debugging Device" help diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile index 66619beb6cc..83c9e431a56 100644 --- a/drivers/usb/serial/Makefile +++ b/drivers/usb/serial/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o obj-$(CONFIG_USB_SERIAL_OPTION) += option.o obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o +obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o @@ -55,4 +56,5 @@ obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o +obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c index b10ac840941..365db1097bf 100644 --- a/drivers/usb/serial/aircable.c +++ b/drivers/usb/serial/aircable.c @@ -78,7 +78,7 @@ static int debug; #define DRIVER_DESC "AIRcable USB Driver" /* ID table that will be registered with USB core */ -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) }, { }, }; @@ -468,10 +468,6 @@ static void aircable_read_bulk_callback(struct urb *urb) if (status) { dbg("%s - urb status = %d", __func__, status); - if (!port->port.count) { - dbg("%s - port is closed, exiting.", __func__); - return; - } if (status == -EPROTO) { dbg("%s - caught -EPROTO, resubmitting the urb", __func__); @@ -530,23 +526,19 @@ static void aircable_read_bulk_callback(struct urb *urb) } tty_kref_put(tty); - /* Schedule the next read _if_ we are still open */ - if (port->port.count) { - usb_fill_bulk_urb(port->read_urb, port->serial->dev, - usb_rcvbulkpipe(port->serial->dev, - port->bulk_in_endpointAddress), - port->read_urb->transfer_buffer, - port->read_urb->transfer_buffer_length, - aircable_read_bulk_callback, port); - - result = usb_submit_urb(urb, GFP_ATOMIC); - if (result) - dev_err(&urb->dev->dev, - "%s - failed resubmitting read urb, error %d\n", - __func__, result); - } - - return; + /* Schedule the next read */ + usb_fill_bulk_urb(port->read_urb, port->serial->dev, + usb_rcvbulkpipe(port->serial->dev, + port->bulk_in_endpointAddress), + port->read_urb->transfer_buffer, + port->read_urb->transfer_buffer_length, + aircable_read_bulk_callback, port); + + result = usb_submit_urb(urb, GFP_ATOMIC); + if (result && result != -EPERM) + dev_err(&urb->dev->dev, + "%s - failed resubmitting read urb, error %d\n", + __func__, result); } /* Based on ftdi_sio.c throttle */ diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index a9c2dec8e3f..547c9448c28 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -50,7 +50,7 @@ static int debug; /* usb timeout of 1 second */ #define ARK_TIMEOUT (1*HZ) -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x6547, 0x0232) }, { USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */ { }, @@ -733,7 +733,6 @@ static void ark3116_read_bulk_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (tty) { - tty_buffer_request_room(tty, urb->actual_length + 1); /* overrun is special, not associated with a char */ if (unlikely(lsr & UART_LSR_OE)) tty_insert_flip_char(tty, 0, TTY_OVERRUN); diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c index a0467bc6162..1295e44e3f1 100644 --- a/drivers/usb/serial/belkin_sa.c +++ b/drivers/usb/serial/belkin_sa.c @@ -103,7 +103,7 @@ static int belkin_sa_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear); -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) }, { USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) }, { USB_DEVICE(PERACOM_VID, PERACOM_PID) }, diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 59eff721fcc..9f4fed1968b 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -22,6 +22,7 @@ #include <linux/usb.h> #include <linux/usb/serial.h> #include <linux/serial.h> +#include <asm/unaligned.h> #define DEFAULT_BAUD_RATE 9600 #define DEFAULT_TIMEOUT 1000 @@ -70,7 +71,7 @@ static int debug; -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x1a86, 0x7523) }, { }, @@ -392,16 +393,22 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state) struct usb_serial_port *port = tty->driver_data; int r; uint16_t reg_contents; - uint8_t break_reg[2]; + uint8_t *break_reg; dbg("%s()", __func__); + break_reg = kmalloc(2, GFP_KERNEL); + if (!break_reg) { + dev_err(&port->dev, "%s - kmalloc failed\n", __func__); + return; + } + r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG, - ch341_break_reg, 0, break_reg, sizeof(break_reg)); + ch341_break_reg, 0, break_reg, 2); if (r < 0) { - printk(KERN_WARNING "%s: USB control read error whilst getting" - " break register contents.\n", __FILE__); - return; + dev_err(&port->dev, "%s - USB control read error (%d)\n", + __func__, r); + goto out; } dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x", __func__, break_reg[0], break_reg[1]); @@ -416,12 +423,14 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state) } dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x", __func__, break_reg[0], break_reg[1]); - reg_contents = (uint16_t)break_reg[0] | ((uint16_t)break_reg[1] << 8); + reg_contents = get_unaligned_le16(break_reg); r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG, ch341_break_reg, reg_contents); if (r < 0) - printk(KERN_WARNING "%s: USB control write error whilst setting" - " break register contents.\n", __FILE__); + dev_err(&port->dev, "%s - USB control write error (%d)\n", + __func__, r); +out: + kfree(break_reg); } static int ch341_tiocmset(struct tty_struct *tty, struct file *file, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index bd254ec97d1..507382b0a9e 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -55,7 +55,7 @@ static int cp210x_carrier_raised(struct usb_serial_port *p); static int debug; -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ @@ -91,11 +91,12 @@ static struct usb_device_id id_table [] = { { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ + { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */ { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */ - { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */ + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ @@ -612,7 +613,7 @@ static void cp210x_set_termios(struct tty_struct *tty, baud); if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV, ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) { - dbg("Baud rate requested not supported by device\n"); + dbg("Baud rate requested not supported by device"); baud = tty_termios_baud_rate(old_termios); } } diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c index b0f6402a91c..f744ab7a3b1 100644 --- a/drivers/usb/serial/cyberjack.c +++ b/drivers/usb/serial/cyberjack.c @@ -70,7 +70,7 @@ static void cyberjack_read_int_callback(struct urb *urb); static void cyberjack_read_bulk_callback(struct urb *urb); static void cyberjack_write_bulk_callback(struct urb *urb); -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) }, { } /* Terminating entry */ }; @@ -391,11 +391,10 @@ static void cyberjack_read_bulk_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (!tty) { - dbg("%s - ignoring since device not open\n", __func__); + dbg("%s - ignoring since device not open", __func__); return; } if (urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); } diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index a591ebec0f8..baf74b44e6e 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -66,17 +66,15 @@ #include <linux/serial.h> #include <linux/delay.h> #include <linux/uaccess.h> +#include <asm/unaligned.h> #include "cypress_m8.h" -#ifdef CONFIG_USB_SERIAL_DEBUG - static int debug = 1; -#else - static int debug; -#endif +static int debug; static int stats; static int interval; +static int unstable_bauds; /* * Version Information @@ -89,24 +87,24 @@ static int interval; #define CYPRESS_BUF_SIZE 1024 #define CYPRESS_CLOSING_WAIT (30*HZ) -static struct usb_device_id id_table_earthmate [] = { +static const struct usb_device_id id_table_earthmate[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { } /* Terminating entry */ }; -static struct usb_device_id id_table_cyphidcomrs232 [] = { +static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, { } /* Terminating entry */ }; -static struct usb_device_id id_table_nokiaca42v2 [] = { +static const struct usb_device_id id_table_nokiaca42v2[] = { { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) }, { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, @@ -295,6 +293,9 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) struct cypress_private *priv; priv = usb_get_serial_port_data(port); + if (unstable_bauds) + return new_rate; + /* * The general purpose firmware for the Cypress M8 allows for * a maximum speed of 57600bps (I have no idea whether DeLorme @@ -344,7 +345,8 @@ static int cypress_serial_control(struct tty_struct *tty, { int new_baudrate = 0, retval = 0, tries = 0; struct cypress_private *priv; - __u8 feature_buffer[5]; + u8 *feature_buffer; + const unsigned int feature_len = 5; unsigned long flags; dbg("%s", __func__); @@ -354,17 +356,18 @@ static int cypress_serial_control(struct tty_struct *tty, if (!priv->comm_is_ok) return -ENODEV; + feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL); + if (!feature_buffer) + return -ENOMEM; + switch (cypress_request_type) { case CYPRESS_SET_CONFIG: - new_baudrate = priv->baud_rate; /* 0 means 'Hang up' so doesn't change the true bit rate */ - if (baud_rate == 0) - new_baudrate = priv->baud_rate; - /* Change of speed ? */ - else if (baud_rate != priv->baud_rate) { + new_baudrate = priv->baud_rate; + if (baud_rate && baud_rate != priv->baud_rate) { dbg("%s - baud rate is changing", __func__); retval = analyze_baud_rate(port, baud_rate); - if (retval >= 0) { + if (retval >= 0) { new_baudrate = retval; dbg("%s - New baud rate set to %d", __func__, new_baudrate); @@ -373,9 +376,8 @@ static int cypress_serial_control(struct tty_struct *tty, dbg("%s - baud rate is being sent as %d", __func__, new_baudrate); - memset(feature_buffer, 0, sizeof(feature_buffer)); /* fill the feature_buffer with new configuration */ - *((u_int32_t *)feature_buffer) = new_baudrate; + put_unaligned_le32(new_baudrate, feature_buffer); feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */ /* 1 bit gap */ feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */ @@ -397,15 +399,15 @@ static int cypress_serial_control(struct tty_struct *tty, HID_REQ_SET_REPORT, USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 0x0300, 0, feature_buffer, - sizeof(feature_buffer), 500); + feature_len, 500); if (tries++ >= 3) break; - } while (retval != sizeof(feature_buffer) && + } while (retval != feature_len && retval != -ENODEV); - if (retval != sizeof(feature_buffer)) { + if (retval != feature_len) { dev_err(&port->dev, "%s - failed sending serial " "line settings - %d\n", __func__, retval); cypress_set_dead(port); @@ -425,43 +427,42 @@ static int cypress_serial_control(struct tty_struct *tty, /* Not implemented for this device, and if we try to do it we're likely to crash the hardware. */ - return -ENOTTY; + retval = -ENOTTY; + goto out; } dbg("%s - retreiving serial line settings", __func__); - /* set initial values in feature buffer */ - memset(feature_buffer, 0, sizeof(feature_buffer)); - do { retval = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), HID_REQ_GET_REPORT, USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS, 0x0300, 0, feature_buffer, - sizeof(feature_buffer), 500); + feature_len, 500); if (tries++ >= 3) break; - } while (retval != sizeof(feature_buffer) + } while (retval != feature_len && retval != -ENODEV); - if (retval != sizeof(feature_buffer)) { + if (retval != feature_len) { dev_err(&port->dev, "%s - failed to retrieve serial " "line settings - %d\n", __func__, retval); cypress_set_dead(port); - return retval; + goto out; } else { spin_lock_irqsave(&priv->lock, flags); /* store the config in one byte, and later use bit masks to check values */ priv->current_config = feature_buffer[4]; - priv->baud_rate = *((u_int32_t *)feature_buffer); + priv->baud_rate = get_unaligned_le32(feature_buffer); spin_unlock_irqrestore(&priv->lock, flags); } } spin_lock_irqsave(&priv->lock, flags); ++priv->cmd_count; spin_unlock_irqrestore(&priv->lock, flags); - +out: + kfree(feature_buffer); return retval; } /* cypress_serial_control */ @@ -690,7 +691,6 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on) { struct cypress_private *priv = usb_get_serial_port_data(port); /* drop dtr and rts */ - priv = usb_get_serial_port_data(port); spin_lock_irq(&priv->lock); if (on == 0) priv->line_control = 0; @@ -1307,13 +1307,9 @@ static void cypress_read_int_callback(struct urb *urb) spin_unlock_irqrestore(&priv->lock, flags); /* process read if there is data other than line status */ - if (tty && (bytes > i)) { - bytes = tty_buffer_request_room(tty, bytes); - for (; i < bytes ; ++i) { - dbg("pushing byte number %d - %d - %c", i, data[i], - data[i]); - tty_insert_flip_char(tty, data[i], tty_flag); - } + if (tty && bytes > i) { + tty_insert_flip_string_fixed_flag(tty, data + i, + bytes - i, tty_flag); tty_flip_buffer_push(tty); } @@ -1325,9 +1321,9 @@ static void cypress_read_int_callback(struct urb *urb) continue_read: tty_kref_put(tty); - /* Continue trying to always read... unless the port has closed. */ + /* Continue trying to always read */ - if (port->port.count > 0 && priv->comm_is_ok) { + if (priv->comm_is_ok) { usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev, usb_rcvintpipe(port->serial->dev, port->interrupt_in_endpointAddress), @@ -1336,7 +1332,7 @@ continue_read: cypress_read_int_callback, port, priv->read_urb_interval); result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); - if (result) { + if (result && result != -EPERM) { dev_err(&urb->dev->dev, "%s - failed resubmitting " "read urb, error %d\n", __func__, result); @@ -1650,3 +1646,5 @@ module_param(stats, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(stats, "Enable statistics or not"); module_param(interval, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(interval, "Overrides interrupt interval"); +module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates"); diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 68e80be6b9e..68b0aa5e516 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -470,18 +470,18 @@ static int digi_read_oob_callback(struct urb *urb); static int debug; -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) }, { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) }, { } /* Terminating entry */ }; -static struct usb_device_id id_table_2 [] = { +static const struct usb_device_id id_table_2[] = { { USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) }, { } /* Terminating entry */ }; -static struct usb_device_id id_table_4 [] = { +static const struct usb_device_id id_table_4[] = { { USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) }, { } /* Terminating entry */ }; @@ -1262,10 +1262,10 @@ static void digi_write_bulk_callback(struct urb *urb) return; } - /* try to send any buffered data on this port, if it is open */ + /* try to send any buffered data on this port */ spin_lock(&priv->dp_port_lock); priv->dp_write_urb_in_use = 0; - if (port->port.count && priv->dp_out_buf_len > 0) { + if (priv->dp_out_buf_len > 0) { *((unsigned char *)(port->write_urb->transfer_buffer)) = (unsigned char)DIGI_CMD_SEND_DATA; *((unsigned char *)(port->write_urb->transfer_buffer) + 1) @@ -1288,7 +1288,7 @@ static void digi_write_bulk_callback(struct urb *urb) schedule_work(&priv->dp_wakeup_work); spin_unlock(&priv->dp_port_lock); - if (ret) + if (ret && ret != -EPERM) dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d, port=%d\n", __func__, ret, priv->dp_port_num); @@ -1353,8 +1353,7 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port) struct digi_port *priv = usb_get_serial_port_data(port); struct ktermios not_termios; - dbg("digi_open: TOP: port=%d, open_count=%d", - priv->dp_port_num, port->port.count); + dbg("digi_open: TOP: port=%d", priv->dp_port_num); /* be sure the device is started up */ if (digi_startup_device(port->serial) != 0) @@ -1393,8 +1392,7 @@ static void digi_close(struct usb_serial_port *port) unsigned char buf[32]; struct digi_port *priv = usb_get_serial_port_data(port); - dbg("digi_close: TOP: port=%d, open_count=%d", - priv->dp_port_num, port->port.count); + dbg("digi_close: TOP: port=%d", priv->dp_port_num); mutex_lock(&port->serial->disc_mutex); /* if disconnected, just clear flags */ @@ -1629,7 +1627,7 @@ static void digi_read_bulk_callback(struct urb *urb) /* continue read */ urb->dev = port->serial->dev; ret = usb_submit_urb(urb, GFP_ATOMIC); - if (ret != 0) { + if (ret != 0 && ret != -EPERM) { dev_err(&port->dev, "%s: failed resubmitting urb, ret=%d, port=%d\n", __func__, ret, priv->dp_port_num); @@ -1658,12 +1656,11 @@ static int digi_read_inb_callback(struct urb *urb) int port_status = ((unsigned char *)urb->transfer_buffer)[2]; unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3; int flag, throttled; - int i; int status = urb->status; /* do not process callbacks on closed ports */ /* but do continue the read chain */ - if (port->port.count == 0) + if (urb->status == -ENOENT) return 0; /* short/multiple packet check */ @@ -1705,17 +1702,9 @@ static int digi_read_inb_callback(struct urb *urb) /* data length is len-1 (one byte of len is port_status) */ --len; - - len = tty_buffer_request_room(tty, len); if (len > 0) { - /* Hot path */ - if (flag == TTY_NORMAL) - tty_insert_flip_string(tty, data, len); - else { - for (i = 0; i < len; i++) - tty_insert_flip_char(tty, - data[i], flag); - } + tty_insert_flip_string_fixed_flag(tty, data, len, + flag); tty_flip_buffer_push(tty); } } @@ -1776,8 +1765,7 @@ static int digi_read_oob_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); rts = 0; - if (port->port.count) - rts = tty->termios->c_cflag & CRTSCTS; + rts = tty->termios->c_cflag & CRTSCTS; if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) { spin_lock(&priv->dp_port_lock); diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c index 7dd0e3eadbe..5f740a1eaca 100644 --- a/drivers/usb/serial/empeg.c +++ b/drivers/usb/serial/empeg.c @@ -93,7 +93,7 @@ static void empeg_init_termios(struct tty_struct *tty); static void empeg_write_bulk_callback(struct urb *urb); static void empeg_read_bulk_callback(struct urb *urb); -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) }, { } /* Terminating entry */ }; @@ -346,7 +346,6 @@ static void empeg_read_bulk_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); bytes_in += urb->actual_length; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 7638828e731..6af0dfa5f5a 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -33,12 +33,12 @@ #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/usb.h> #include <linux/serial.h> @@ -88,10 +88,10 @@ struct ftdi_private { unsigned int latency; /* latency setting in use */ spinlock_t tx_lock; /* spinlock for transmit state */ - unsigned long tx_bytes; unsigned long tx_outstanding_bytes; unsigned long tx_outstanding_urbs; unsigned short max_packet_size; + struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */ }; /* struct ftdi_sio_quirk is used by devices requiring special attention. */ @@ -614,6 +614,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) }, { USB_DEVICE(TTI_VID, TTI_QL355P_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) }, + { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, @@ -737,6 +738,10 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) }, { USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) }, + { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) }, { }, /* Optional parameter entry */ { } /* Terminating entry */ }; @@ -812,7 +817,7 @@ static struct usb_serial_driver ftdi_sio_device = { .name = "ftdi_sio", }, .description = "FTDI USB Serial Device", - .usb_driver = &ftdi_driver , + .usb_driver = &ftdi_driver, .id_table = id_table_combined, .num_ports = 1, .probe = ftdi_sio_probe, @@ -828,8 +833,8 @@ static struct usb_serial_driver ftdi_sio_device = { .chars_in_buffer = ftdi_chars_in_buffer, .read_bulk_callback = ftdi_read_bulk_callback, .write_bulk_callback = ftdi_write_bulk_callback, - .tiocmget = ftdi_tiocmget, - .tiocmset = ftdi_tiocmset, + .tiocmget = ftdi_tiocmget, + .tiocmset = ftdi_tiocmset, .ioctl = ftdi_ioctl, .set_termios = ftdi_set_termios, .break_ctl = ftdi_break_ctl, @@ -935,7 +940,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set, unsigned int clear) { struct ftdi_private *priv = usb_get_serial_port_data(port); - char *buf; unsigned urb_value; int rv; @@ -944,10 +948,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set, return 0; /* no change */ } - buf = kmalloc(1, GFP_NOIO); - if (!buf) - return -ENOMEM; - clear &= ~set; /* 'set' takes precedence over 'clear' */ urb_value = 0; if (clear & TIOCM_DTR) @@ -963,9 +963,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set, FTDI_SIO_SET_MODEM_CTRL_REQUEST, FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE, urb_value, priv->interface, - buf, 0, WDR_TIMEOUT); - - kfree(buf); + NULL, 0, WDR_TIMEOUT); if (rv < 0) { dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s", __func__, @@ -1124,16 +1122,11 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty, static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); - char *buf; __u16 urb_value; __u16 urb_index; __u32 urb_index_value; int rv; - buf = kmalloc(1, GFP_NOIO); - if (!buf) - return -ENOMEM; - urb_index_value = get_ftdi_divisor(tty, port); urb_value = (__u16)urb_index_value; urb_index = (__u16)(urb_index_value >> 16); @@ -1146,9 +1139,7 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port) FTDI_SIO_SET_BAUDRATE_REQUEST, FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE, urb_value, urb_index, - buf, 0, WDR_SHORT_TIMEOUT); - - kfree(buf); + NULL, 0, WDR_SHORT_TIMEOUT); return rv; } @@ -1156,8 +1147,7 @@ static int write_latency_timer(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; - char buf[1]; - int rv = 0; + int rv; int l = priv->latency; if (priv->flags & ASYNC_LOW_LATENCY) @@ -1170,8 +1160,7 @@ static int write_latency_timer(struct usb_serial_port *port) FTDI_SIO_SET_LATENCY_TIMER_REQUEST, FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, l, priv->interface, - buf, 0, WDR_TIMEOUT); - + NULL, 0, WDR_TIMEOUT); if (rv < 0) dev_err(&port->dev, "Unable to write latency timer: %i\n", rv); return rv; @@ -1181,24 +1170,29 @@ static int read_latency_timer(struct usb_serial_port *port) { struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; - unsigned short latency = 0; - int rv = 0; - + unsigned char *buf; + int rv; dbg("%s", __func__); + buf = kmalloc(1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + rv = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), FTDI_SIO_GET_LATENCY_TIMER_REQUEST, FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 0, priv->interface, - (char *) &latency, 1, WDR_TIMEOUT); - - if (rv < 0) { + buf, 1, WDR_TIMEOUT); + if (rv < 0) dev_err(&port->dev, "Unable to read latency timer: %i\n", rv); - return -EIO; - } - return latency; + else + priv->latency = buf[0]; + + kfree(buf); + + return rv; } static int get_serial_info(struct usb_serial_port *port, @@ -1229,7 +1223,7 @@ static int set_serial_info(struct tty_struct *tty, if (copy_from_user(&new_serial, newinfo, sizeof(new_serial))) return -EFAULT; - lock_kernel(); + mutex_lock(&priv->cfg_lock); old_priv = *priv; /* Do error checking and permission checking */ @@ -1237,7 +1231,7 @@ static int set_serial_info(struct tty_struct *tty, if (!capable(CAP_SYS_ADMIN)) { if (((new_serial.flags & ~ASYNC_USR_MASK) != (priv->flags & ~ASYNC_USR_MASK))) { - unlock_kernel(); + mutex_unlock(&priv->cfg_lock); return -EPERM; } priv->flags = ((priv->flags & ~ASYNC_USR_MASK) | @@ -1248,7 +1242,7 @@ static int set_serial_info(struct tty_struct *tty, if ((new_serial.baud_base != priv->baud_base) && (new_serial.baud_base < 9600)) { - unlock_kernel(); + mutex_unlock(&priv->cfg_lock); return -EINVAL; } @@ -1278,11 +1272,11 @@ check_and_exit: (priv->flags & ASYNC_SPD_MASK)) || (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) && (old_priv.custom_divisor != priv->custom_divisor))) { - unlock_kernel(); + mutex_unlock(&priv->cfg_lock); change_speed(tty, port); } else - unlock_kernel(); + mutex_unlock(&priv->cfg_lock); return 0; } /* set_serial_info */ @@ -1338,20 +1332,20 @@ static void ftdi_determine_type(struct usb_serial_port *port) __func__); } } else if (version < 0x200) { - /* Old device. Assume its the original SIO. */ + /* Old device. Assume it's the original SIO. */ priv->chip_type = SIO; priv->baud_base = 12000000 / 16; priv->write_offset = 1; } else if (version < 0x400) { - /* Assume its an FT8U232AM (or FT8U245AM) */ + /* Assume it's an FT8U232AM (or FT8U245AM) */ /* (It might be a BM because of the iSerialNumber bug, * but it will still work as an AM device.) */ priv->chip_type = FT8U232AM; } else if (version < 0x600) { - /* Assume its an FT232BM (or FT245BM) */ + /* Assume it's an FT232BM (or FT245BM) */ priv->chip_type = FT232BM; } else { - /* Assume its an FT232R */ + /* Assume it's an FT232R */ priv->chip_type = FT232RL; } dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); @@ -1371,7 +1365,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; unsigned num_endpoints; - int i = 0; + int i; num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); @@ -1423,7 +1417,7 @@ static ssize_t store_latency_timer(struct device *dev, struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); int v = simple_strtoul(valbuf, NULL, 10); - int rv = 0; + int rv; priv->latency = v; rv = write_latency_timer(port); @@ -1440,9 +1434,8 @@ static ssize_t store_event_char(struct device *dev, struct usb_serial_port *port = to_usb_serial_port(dev); struct ftdi_private *priv = usb_get_serial_port_data(port); struct usb_device *udev = port->serial->dev; - char buf[1]; int v = simple_strtoul(valbuf, NULL, 10); - int rv = 0; + int rv; dbg("%s: setting event char = %i", __func__, v); @@ -1451,8 +1444,7 @@ static ssize_t store_event_char(struct device *dev, FTDI_SIO_SET_EVENT_CHAR_REQUEST, FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE, v, priv->interface, - buf, 0, WDR_TIMEOUT); - + NULL, 0, WDR_TIMEOUT); if (rv < 0) { dbg("Unable to write event character: %i", rv); return -EIO; @@ -1551,9 +1543,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) kref_init(&priv->kref); spin_lock_init(&priv->tx_lock); + mutex_init(&priv->cfg_lock); init_waitqueue_head(&priv->delta_msr_wait); - /* This will push the characters through immediately rather - than queue a task to deliver them */ + priv->flags = ASYNC_LOW_LATENCY; if (quirk && quirk->port_probe) @@ -1585,7 +1577,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port) ftdi_determine_type(port); ftdi_set_max_packet_size(port); - read_latency_timer(port); + if (read_latency_timer(port) < 0) + priv->latency = 16; create_sysfs_attrs(port); return 0; } @@ -1630,8 +1623,6 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial) { struct usb_device *udev = serial->dev; int latency = ndi_latency_timer; - int rv = 0; - char buf[1]; if (latency == 0) latency = 1; @@ -1641,10 +1632,11 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial) dbg("%s setting NDI device latency to %d", __func__, latency); dev_info(&udev->dev, "NDI device with a latency value of %d", latency); - rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + /* FIXME: errors are not returned */ + usb_control_msg(udev, usb_sndctrlpipe(udev, 0), FTDI_SIO_SET_LATENCY_TIMER_REQUEST, FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE, - latency, 0, buf, 0, WDR_TIMEOUT); + latency, 0, NULL, 0, WDR_TIMEOUT); return 0; } @@ -1720,7 +1712,7 @@ static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags) urb->transfer_buffer_length, ftdi_read_bulk_callback, port); result = usb_submit_urb(urb, mem_flags); - if (result) + if (result && result != -EPERM) dev_err(&port->dev, "%s - failed submitting read urb, error %d\n", __func__, result); @@ -1732,16 +1724,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) struct usb_device *dev = port->serial->dev; struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned long flags; - - int result = 0; - char buf[1]; /* Needed for the usb_control_msg I think */ + int result; dbg("%s", __func__); - spin_lock_irqsave(&priv->tx_lock, flags); - priv->tx_bytes = 0; - spin_unlock_irqrestore(&priv->tx_lock, flags); - write_latency_timer(port); /* No error checking for this (will get errors later anyway) */ @@ -1749,7 +1735,7 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) usb_control_msg(dev, usb_sndctrlpipe(dev, 0), FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE, FTDI_SIO_RESET_SIO, - priv->interface, buf, 0, WDR_TIMEOUT); + priv->interface, NULL, 0, WDR_TIMEOUT); /* Termios defaults are set by usb_serial_init. We don't change port->tty->termios - this would lose speed settings, etc. @@ -1777,7 +1763,6 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port) static void ftdi_dtr_rts(struct usb_serial_port *port, int on) { struct ftdi_private *priv = usb_get_serial_port_data(port); - char buf[1]; mutex_lock(&port->serial->disc_mutex); if (!port->serial->disconnected) { @@ -1786,7 +1771,7 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on) usb_sndctrlpipe(port->serial->dev, 0), FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, - 0, priv->interface, buf, 0, + 0, priv->interface, NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "error from flowcontrol urb\n"); } @@ -1847,7 +1832,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port, spin_lock_irqsave(&priv->tx_lock, flags); if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) { spin_unlock_irqrestore(&priv->tx_lock, flags); - dbg("%s - write limit hit\n", __func__); + dbg("%s - write limit hit", __func__); return 0; } priv->tx_outstanding_urbs++; @@ -1927,7 +1912,6 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port, } else { spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_outstanding_bytes += count; - priv->tx_bytes += count; spin_unlock_irqrestore(&priv->tx_lock, flags); } @@ -2154,8 +2138,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); - __u16 urb_value = 0; - char buf[1]; + __u16 urb_value; /* break_state = -1 to turn on break, and 0 to turn off break */ /* see drivers/char/tty_io.c to see it used */ @@ -2171,7 +2154,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state) FTDI_SIO_SET_DATA_REQUEST, FTDI_SIO_SET_DATA_REQUEST_TYPE, urb_value , priv->interface, - buf, 0, WDR_TIMEOUT) < 0) { + NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "%s FAILED to enable/disable break state " "(state was %d)\n", __func__, break_state); } @@ -2195,7 +2178,6 @@ static void ftdi_set_termios(struct tty_struct *tty, struct ktermios *termios = tty->termios; unsigned int cflag = termios->c_cflag; __u16 urb_value; /* will hold the new flags */ - char buf[1]; /* Perhaps I should dynamically alloc this? */ /* Added for xon/xoff support */ unsigned int iflag = termios->c_iflag; @@ -2246,12 +2228,10 @@ static void ftdi_set_termios(struct tty_struct *tty, } if (cflag & CSIZE) { switch (cflag & CSIZE) { - case CS5: urb_value |= 5; dbg("Setting CS5"); break; - case CS6: urb_value |= 6; dbg("Setting CS6"); break; case CS7: urb_value |= 7; dbg("Setting CS7"); break; case CS8: urb_value |= 8; dbg("Setting CS8"); break; default: - dev_err(&port->dev, "CSIZE was set but not CS5-CS8\n"); + dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n"); } } @@ -2263,7 +2243,7 @@ static void ftdi_set_termios(struct tty_struct *tty, FTDI_SIO_SET_DATA_REQUEST, FTDI_SIO_SET_DATA_REQUEST_TYPE, urb_value , priv->interface, - buf, 0, WDR_SHORT_TIMEOUT) < 0) { + NULL, 0, WDR_SHORT_TIMEOUT) < 0) { dev_err(&port->dev, "%s FAILED to set " "databits/stopbits/parity\n", __func__); } @@ -2275,7 +2255,7 @@ static void ftdi_set_termios(struct tty_struct *tty, FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->interface, - buf, 0, WDR_TIMEOUT) < 0) { + NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "%s error from disable flowcontrol urb\n", __func__); @@ -2301,7 +2281,7 @@ static void ftdi_set_termios(struct tty_struct *tty, FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0 , (FTDI_SIO_RTS_CTS_HS | priv->interface), - buf, 0, WDR_TIMEOUT) < 0) { + NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "urb failed to set to rts/cts flow control\n"); } @@ -2333,7 +2313,7 @@ static void ftdi_set_termios(struct tty_struct *tty, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, urb_value , (FTDI_SIO_XON_XOFF_HS | priv->interface), - buf, 0, WDR_TIMEOUT) < 0) { + NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "urb failed to set to " "xon/xoff flow control\n"); } @@ -2347,7 +2327,7 @@ static void ftdi_set_termios(struct tty_struct *tty, FTDI_SIO_SET_FLOW_CTRL_REQUEST, FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE, 0, priv->interface, - buf, 0, WDR_TIMEOUT) < 0) { + NULL, 0, WDR_TIMEOUT) < 0) { dev_err(&port->dev, "urb failed to clear flow control\n"); } @@ -2361,21 +2341,22 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file) { struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); - unsigned char buf[2]; + unsigned char *buf; + int len; int ret; dbg("%s TIOCMGET", __func__); + + buf = kmalloc(2, GFP_KERNEL); + if (!buf) + return -ENOMEM; + /* + * The 8U232AM returns a two byte value (the SIO a 1 byte value) in + * the same format as the data returned from the in point. + */ switch (priv->chip_type) { case SIO: - /* Request the status from the device */ - ret = usb_control_msg(port->serial->dev, - usb_rcvctrlpipe(port->serial->dev, 0), - FTDI_SIO_GET_MODEM_STATUS_REQUEST, - FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, - 0, 0, - buf, 1, WDR_TIMEOUT); - if (ret < 0) - return ret; + len = 1; break; case FT8U232AM: case FT232BM: @@ -2383,27 +2364,30 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file) case FT232RL: case FT2232H: case FT4232H: - /* the 8U232AM returns a two byte value (the sio is a 1 byte - value) - in the same format as the data returned from the in - point */ - ret = usb_control_msg(port->serial->dev, - usb_rcvctrlpipe(port->serial->dev, 0), - FTDI_SIO_GET_MODEM_STATUS_REQUEST, - FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, - 0, priv->interface, - buf, 2, WDR_TIMEOUT); - if (ret < 0) - return ret; + len = 2; break; default: - return -EFAULT; + ret = -EFAULT; + goto out; } - return (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) | + ret = usb_control_msg(port->serial->dev, + usb_rcvctrlpipe(port->serial->dev, 0), + FTDI_SIO_GET_MODEM_STATUS_REQUEST, + FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE, + 0, priv->interface, + buf, len, WDR_TIMEOUT); + if (ret < 0) + goto out; + + ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) | (buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) | (buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) | (buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) | priv->last_dtr_rts; +out: + kfree(buf); + return ret; } static int ftdi_tiocmset(struct tty_struct *tty, struct file *file, @@ -2508,8 +2492,7 @@ void ftdi_unthrottle(struct tty_struct *tty) port->throttled = port->throttle_req = 0; spin_unlock_irqrestore(&port->lock, flags); - /* Resubmit urb if throttled and open. */ - if (was_throttled && test_bit(ASYNCB_INITIALIZED, &port->port.flags)) + if (was_throttled) ftdi_submit_read_urb(port, GFP_KERNEL); } diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h index b0e0d64f822..ff9bf80327a 100644 --- a/drivers/usb/serial/ftdi_sio.h +++ b/drivers/usb/serial/ftdi_sio.h @@ -28,13 +28,13 @@ #define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */ #define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */ #define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */ -#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modern status register */ +#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */ #define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */ #define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */ #define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */ #define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */ -/* Interface indicies for FT2232, FT2232H and FT4232H devices*/ +/* Interface indices for FT2232, FT2232H and FT4232H devices */ #define INTERFACE_A 1 #define INTERFACE_B 2 #define INTERFACE_C 3 @@ -270,7 +270,7 @@ typedef enum { * BmRequestType: 0100 0000b * bRequest: FTDI_SIO_SET_FLOW_CTRL * wValue: Xoff/Xon - * wIndex: Protocol/Port - hIndex is protocl / lIndex is port + * wIndex: Protocol/Port - hIndex is protocol / lIndex is port * wLength: 0 * Data: None * diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index c8951aeed98..0727e198503 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -22,7 +22,7 @@ #define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */ #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ #define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ -#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ +#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ #define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ @@ -49,7 +49,7 @@ #define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8 #define LMI_LM3S_EVAL_BOARD_PID 0xbcd9 -#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */ +#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */ /* OpenDCC (www.opendcc.de) product id */ #define FTDI_OPENDCC_PID 0xBFD8 @@ -185,7 +185,7 @@ #define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */ #define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */ #define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */ -#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */ +#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Energy monitor EM 1010 PC */ #define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */ #define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */ #define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */ @@ -212,8 +212,8 @@ * drivers, or possibly the Comedi drivers in some cases. */ #define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */ #define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */ -#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */ -#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */ +#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperatur-Feuchte-Messgeraet (TFM 100) */ +#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkuhr (UDF 77) */ #define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */ /* @@ -320,7 +320,7 @@ /* * 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485, - * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices + * USB-TTY aktiv, USB-TTY passiv. Some PIDs are used by several devices * and I'm not entirely sure which are used by which. */ #define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 @@ -330,10 +330,10 @@ * Linx Technologies product ids */ #define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */ -#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */ -#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */ -#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */ -#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ +#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */ +#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */ +#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */ +#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */ /* * Oceanic product ids @@ -494,6 +494,13 @@ #define RATOC_PRODUCT_ID_USB60F 0xb020 /* + * Contec products (http://www.contec.com) + * Submitted by Daniel Sangorrin + */ +#define CONTEC_VID 0x06CE /* Vendor ID */ +#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */ + +/* * Definitions for B&B Electronics products. */ #define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */ @@ -642,7 +649,7 @@ #define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */ #define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */ -/* Larsen and Brusgaard AltiTrack/USBtrack */ +/* Larsen and Brusgaard AltiTrack/USBtrack */ #define LARSENBRUSGAARD_VID 0x0FD8 #define LB_ALTITRACK_PID 0x0001 @@ -971,7 +978,7 @@ #define ALTI2_N3_PID 0x6001 /* Neptune 3 */ /* - * Dresden Elektronic Sensor Terminal Board + * Dresden Elektronik Sensor Terminal Board */ #define DE_VID 0x1cf1 /* Vendor ID */ #define STB_PID 0x0001 /* Sensor Terminal Board */ @@ -1002,3 +1009,11 @@ #define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/ #define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/ #define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */ + +/* + * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403) + */ +#define MJSG_GENERIC_PID 0x9378 +#define MJSG_SR_RADIO_PID 0x9379 +#define MJSG_XM_RADIO_PID 0x937A +#define MJSG_HD_RADIO_PID 0x937C diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c index d30f736d2cc..e21ce9ddfc6 100644 --- a/drivers/usb/serial/funsoft.c +++ b/drivers/usb/serial/funsoft.c @@ -18,7 +18,7 @@ static int debug; -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1404, 0xcddc) }, { }, }; diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c index 5ac900e5a50..a42b29a695b 100644 --- a/drivers/usb/serial/garmin_gps.c +++ b/drivers/usb/serial/garmin_gps.c @@ -210,7 +210,7 @@ static unsigned char const PRIVATE_REQ[] -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { /* the same device id seems to be used by all usb enabled GPS devices */ { USB_DEVICE(GARMIN_VENDOR_ID, 3) }, @@ -271,7 +271,6 @@ static void send_to_tty(struct usb_serial_port *port, usb_serial_debug_data(debug, &port->dev, __func__, actual_length, data); - tty_buffer_request_room(tty, actual_length); tty_insert_flip_string(tty, data, actual_length); tty_flip_buffer_push(tty); } diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 83443d6306d..89fac36684c 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -20,6 +20,7 @@ #include <linux/usb/serial.h> #include <linux/uaccess.h> #include <linux/kfifo.h> +#include <linux/serial.h> static int debug; @@ -41,7 +42,7 @@ static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ /* we want to look at all devices, as the vendor/product id can change * depending on the command line argument */ -static struct usb_device_id generic_serial_ids[] = { +static const struct usb_device_id generic_serial_ids[] = { {.driver_info = 42}, {} }; @@ -194,7 +195,7 @@ static int usb_serial_multi_urb_write(struct tty_struct *tty, if (port->urbs_in_flight > port->serial->type->max_in_flight_urbs) { spin_unlock_irqrestore(&port->lock, flags); - dbg("%s - write limit hit\n", __func__); + dbg("%s - write limit hit", __func__); return bwrite; } port->tx_bytes_flight += towrite; @@ -585,7 +586,7 @@ int usb_serial_generic_resume(struct usb_serial *serial) for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; - if (!port->port.count) + if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) continue; if (port->read_urb) { diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c index 43132927513..809379159b0 100644 --- a/drivers/usb/serial/hp4x.c +++ b/drivers/usb/serial/hp4x.c @@ -29,7 +29,7 @@ #define HP_VENDOR_ID 0x03f0 #define HP49GP_PRODUCT_ID 0x0121 -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c index b97960ac92f..3ef8df0ef88 100644 --- a/drivers/usb/serial/io_edgeport.c +++ b/drivers/usb/serial/io_edgeport.c @@ -364,42 +364,6 @@ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial) release_firmware(fw); } - -/************************************************************************ - * * - * Get string descriptor from device * - * * - ************************************************************************/ -static int get_string(struct usb_device *dev, int Id, char *string, int buflen) -{ - struct usb_string_descriptor StringDesc; - struct usb_string_descriptor *pStringDesc; - - dbg("%s - USB String ID = %d", __func__, Id); - - if (!usb_get_descriptor(dev, USB_DT_STRING, Id, - &StringDesc, sizeof(StringDesc))) - return 0; - - pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL); - if (!pStringDesc) - return 0; - - if (!usb_get_descriptor(dev, USB_DT_STRING, Id, - pStringDesc, StringDesc.bLength)) { - kfree(pStringDesc); - return 0; - } - - unicode_to_ascii(string, buflen, - pStringDesc->wData, pStringDesc->bLength/2); - - kfree(pStringDesc); - dbg("%s - USB String %s", __func__, string); - return strlen(string); -} - - #if 0 /************************************************************************ * @@ -2007,7 +1971,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial, return; case IOSP_EXT_STATUS_RX_CHECK_RSP: - dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __func__, edge_serial->rxPort, byte3); + dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============", __func__, edge_serial->rxPort, byte3); /* Port->RxCheckRsp = true; */ return; } @@ -2075,7 +2039,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial, break; default: - dbg("%s - Unrecognized IOSP status code %u\n", __func__, code); + dbg("%s - Unrecognized IOSP status code %u", __func__, code); break; } return; @@ -2091,18 +2055,13 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty, { int cnt; - do { - cnt = tty_buffer_request_room(tty, length); - if (cnt < length) { - dev_err(dev, "%s - dropping data, %d bytes lost\n", - __func__, length - cnt); - if (cnt == 0) - break; - } - tty_insert_flip_string(tty, data, cnt); - data += cnt; - length -= cnt; - } while (length > 0); + cnt = tty_insert_flip_string(tty, data, length); + if (cnt < length) { + dev_err(dev, "%s - dropping data, %d bytes lost\n", + __func__, length - cnt); + } + data += cnt; + length -= cnt; tty_flip_buffer_push(tty); } @@ -2530,7 +2489,7 @@ static int calc_baud_rate_divisor(int baudrate, int *divisor) *divisor = custom; - dbg("%s - Baud %d = %d\n", __func__, baudrate, custom); + dbg("%s - Baud %d = %d", __func__, baudrate, custom); return 0; } @@ -2915,7 +2874,7 @@ static void load_application_firmware(struct edgeport_serial *edge_serial) break; case EDGE_DOWNLOAD_FILE_NONE: - dbg ("No download file specified, skipping download\n"); + dbg("No download file specified, skipping download"); return; default: @@ -2997,10 +2956,12 @@ static int edge_startup(struct usb_serial *serial) usb_set_serial_data(serial, edge_serial); /* get the name for the device from the device */ - i = get_string(dev, dev->descriptor.iManufacturer, + i = usb_string(dev, dev->descriptor.iManufacturer, &edge_serial->name[0], MAX_NAME_LEN+1); + if (i < 0) + i = 0; edge_serial->name[i++] = ' '; - get_string(dev, dev->descriptor.iProduct, + usb_string(dev, dev->descriptor.iProduct, &edge_serial->name[i], MAX_NAME_LEN+2 - i); dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name); diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h index 9241d314751..feb56a4ca79 100644 --- a/drivers/usb/serial/io_tables.h +++ b/drivers/usb/serial/io_tables.h @@ -14,7 +14,7 @@ #ifndef IO_TABLES_H #define IO_TABLES_H -static struct usb_device_id edgeport_2port_id_table [] = { +static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) }, @@ -23,7 +23,7 @@ static struct usb_device_id edgeport_2port_id_table [] = { { } }; -static struct usb_device_id edgeport_4port_id_table [] = { +static const struct usb_device_id edgeport_4port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, @@ -37,7 +37,7 @@ static struct usb_device_id edgeport_4port_id_table [] = { { } }; -static struct usb_device_id edgeport_8port_id_table [] = { +static const struct usb_device_id edgeport_8port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) }, @@ -47,7 +47,7 @@ static struct usb_device_id edgeport_8port_id_table [] = { { } }; -static struct usb_device_id Epic_port_id_table [] = { +static const struct usb_device_id Epic_port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) }, { USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) }, @@ -60,7 +60,7 @@ static struct usb_device_id Epic_port_id_table [] = { }; /* Devices that this driver supports */ -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) }, diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index d4cc0f7af40..aa876f71f22 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -134,7 +134,7 @@ struct edgeport_serial { /* Devices that this driver supports */ -static struct usb_device_id edgeport_1port_id_table [] = { +static const struct usb_device_id edgeport_1port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, @@ -154,7 +154,7 @@ static struct usb_device_id edgeport_1port_id_table [] = { { } }; -static struct usb_device_id edgeport_2port_id_table [] = { +static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) }, @@ -177,7 +177,7 @@ static struct usb_device_id edgeport_2port_id_table [] = { }; /* Devices that this driver supports */ -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) }, @@ -413,11 +413,18 @@ static int write_boot_mem(struct edgeport_serial *serial, { int status = 0; int i; - __u8 temp; + u8 *temp; /* Must do a read before write */ if (!serial->TiReadI2C) { - status = read_boot_mem(serial, 0, 1, &temp); + temp = kmalloc(1, GFP_KERNEL); + if (!temp) { + dev_err(&serial->serial->dev->dev, + "%s - out of memory\n", __func__); + return -ENOMEM; + } + status = read_boot_mem(serial, 0, 1, temp); + kfree(temp); if (status) return status; } @@ -935,37 +942,47 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev) static int i2c_type_bootmode(struct edgeport_serial *serial) { int status; - __u8 data; + u8 *data; + + data = kmalloc(1, GFP_KERNEL); + if (!data) { + dev_err(&serial->serial->dev->dev, + "%s - out of memory\n", __func__); + return -ENOMEM; + } /* Try to read type 2 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, - DTK_ADDR_SPACE_I2C_TYPE_II, 0, &data, 0x01); + DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01); if (status) dbg("%s - read 2 status error = %d", __func__, status); else - dbg("%s - read 2 data = 0x%x", __func__, data); - if ((!status) && (data == UMP5152 || data == UMP3410)) { + dbg("%s - read 2 data = 0x%x", __func__, *data); + if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dbg("%s - ROM_TYPE_II", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; - return 0; + goto out; } /* Try to read type 3 */ status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ, - DTK_ADDR_SPACE_I2C_TYPE_III, 0, &data, 0x01); + DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01); if (status) dbg("%s - read 3 status error = %d", __func__, status); else - dbg("%s - read 2 data = 0x%x", __func__, data); - if ((!status) && (data == UMP5152 || data == UMP3410)) { + dbg("%s - read 2 data = 0x%x", __func__, *data); + if ((!status) && (*data == UMP5152 || *data == UMP3410)) { dbg("%s - ROM_TYPE_III", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III; - return 0; + goto out; } dbg("%s - Unknown", __func__); serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II; - return -ENODEV; + status = -ENODEV; +out: + kfree(data); + return status; } static int bulk_xfer(struct usb_serial *serial, void *buffer, @@ -1113,7 +1130,7 @@ static int download_fw(struct edgeport_serial *serial) I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc); if (start_address != 0) { struct ti_i2c_firmware_rec *firmware_version; - __u8 record; + u8 *record; dbg("%s - Found Type FIRMWARE (Type 2) record", __func__); @@ -1165,6 +1182,15 @@ static int download_fw(struct edgeport_serial *serial) OperationalMajorVersion, OperationalMinorVersion); + record = kmalloc(1, GFP_KERNEL); + if (!record) { + dev_err(dev, "%s - out of memory.\n", + __func__); + kfree(firmware_version); + kfree(rom_desc); + kfree(ti_manuf_desc); + return -ENOMEM; + } /* In order to update the I2C firmware we must * change the type 2 record to type 0xF2. This * will force the UMP to come up in Boot Mode. @@ -1177,13 +1203,14 @@ static int download_fw(struct edgeport_serial *serial) * firmware will update the record type from * 0xf2 to 0x02. */ - record = I2C_DESC_TYPE_FIRMWARE_BLANK; + *record = I2C_DESC_TYPE_FIRMWARE_BLANK; /* Change the I2C Firmware record type to 0xf2 to trigger an update */ status = write_rom(serial, start_address, - sizeof(record), &record); + sizeof(*record), record); if (status) { + kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); @@ -1196,19 +1223,21 @@ static int download_fw(struct edgeport_serial *serial) */ status = read_rom(serial, start_address, - sizeof(record), - &record); + sizeof(*record), + record); if (status) { + kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); return status; } - if (record != I2C_DESC_TYPE_FIRMWARE_BLANK) { + if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) { dev_err(dev, "%s - error resetting device\n", __func__); + kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); @@ -1226,6 +1255,7 @@ static int download_fw(struct edgeport_serial *serial) __func__, status); /* return an error on purpose. */ + kfree(record); kfree(firmware_version); kfree(rom_desc); kfree(ti_manuf_desc); @@ -1686,7 +1716,7 @@ static void edge_interrupt_callback(struct urb *urb) case TIUMP_INTERRUPT_CODE_MSR: /* MSR */ /* Copy MSR from UMP */ msr = data[1]; - dbg("%s - ===== Port %u MSR Status = %02x ======\n", + dbg("%s - ===== Port %u MSR Status = %02x ======", __func__, port_number, msr); handle_new_msr(edge_port, msr); break; @@ -1790,7 +1820,6 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty, { int queued; - tty_buffer_request_room(tty, length); queued = tty_insert_flip_string(tty, data, length); if (queued < length) dev_err(dev, "%s - dropping data, %d bytes lost\n", diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c index d6231c38813..3fea9298eb1 100644 --- a/drivers/usb/serial/ipaq.c +++ b/drivers/usb/serial/ipaq.c @@ -747,7 +747,6 @@ static void ipaq_read_bulk_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (tty && urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); bytes_in += urb->actual_length; diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c index 727d323f092..e1d07840cee 100644 --- a/drivers/usb/serial/ipw.c +++ b/drivers/usb/serial/ipw.c @@ -134,7 +134,7 @@ enum { #define IPW_WANTS_TO_SEND 0x30 -static struct usb_device_id usb_ipw_ids[] = { +static const struct usb_device_id usb_ipw_ids[] = { { USB_DEVICE(IPW_VID, IPW_PID) }, { }, }; @@ -172,7 +172,6 @@ static void ipw_read_bulk_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (tty && urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); } diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c index 95d8d26b9a4..4a0f5197423 100644 --- a/drivers/usb/serial/ir-usb.c +++ b/drivers/usb/serial/ir-usb.c @@ -100,7 +100,7 @@ static u8 ir_baud; static u8 ir_xbof; static u8 ir_add_bof; -static struct usb_device_id ir_id_table[] = { +static const struct usb_device_id ir_id_table[] = { { USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */ { USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */ { USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */ @@ -445,11 +445,6 @@ static void ir_read_bulk_callback(struct urb *urb) dbg("%s - port %d", __func__, port->number); - if (!port->port.count) { - dbg("%s - port closed.", __func__); - return; - } - switch (status) { case 0: /* Successful */ /* @@ -462,10 +457,8 @@ static void ir_read_bulk_callback(struct urb *urb) usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, data); tty = tty_port_tty_get(&port->port); - if (tty_buffer_request_room(tty, urb->actual_length - 1)) { - tty_insert_flip_string(tty, data+1, urb->actual_length - 1); - tty_flip_buffer_push(tty); - } + tty_insert_flip_string(tty, data+1, urb->actual_length - 1); + tty_flip_buffer_push(tty); tty_kref_put(tty); /* diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index e6e02b178d2..43f13cf2f01 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -43,7 +43,7 @@ static int debug; #define DRIVER_VERSION "v0.11" #define DRIVER_DESC "Infinity USB Unlimited Phoenix driver" -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { {USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)}, {} /* Terminating entry */ }; diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index f8c4b07033f..297163c3c61 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -464,13 +464,9 @@ static void usa26_indat_callback(struct urb *urb) /* Resubmit urb so we continue receiving */ urb->dev = port->serial->dev; - if (port->port.count) { - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err != 0) - dbg("%s - resubmit read urb failed. (%d)", - __func__, err); - } - return; + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) + dbg("%s - resubmit read urb failed. (%d)", __func__, err); } /* Outdat handling is common for all devices */ @@ -483,8 +479,7 @@ static void usa2x_outdat_callback(struct urb *urb) p_priv = usb_get_serial_port_data(port); dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]); - if (port->port.count) - usb_serial_port_softint(port); + usb_serial_port_softint(port); } static void usa26_inack_callback(struct urb *urb) @@ -615,12 +610,10 @@ static void usa28_indat_callback(struct urb *urb) /* Resubmit urb so we continue receiving */ urb->dev = port->serial->dev; - if (port->port.count) { - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err != 0) - dbg("%s - resubmit read urb failed. (%d)", - __func__, err); - } + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) + dbg("%s - resubmit read urb failed. (%d)", + __func__, err); p_priv->in_flip ^= 1; urb = p_priv->in_urbs[p_priv->in_flip]; @@ -856,12 +849,9 @@ static void usa49_indat_callback(struct urb *urb) /* Resubmit urb so we continue receiving */ urb->dev = port->serial->dev; - if (port->port.count) { - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err != 0) - dbg("%s - resubmit read urb failed. (%d)", - __func__, err); - } + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) + dbg("%s - resubmit read urb failed. (%d)", __func__, err); } static void usa49wg_indat_callback(struct urb *urb) @@ -904,11 +894,7 @@ static void usa49wg_indat_callback(struct urb *urb) /* no error on any byte */ i++; for (x = 1; x < len ; ++x) - if (port->port.count) - tty_insert_flip_char(tty, - data[i++], 0); - else - i++; + tty_insert_flip_char(tty, data[i++], 0); } else { /* * some bytes had errors, every byte has status @@ -922,14 +908,12 @@ static void usa49wg_indat_callback(struct urb *urb) if (stat & RXERROR_PARITY) flag |= TTY_PARITY; /* XXX should handle break (0x10) */ - if (port->port.count) - tty_insert_flip_char(tty, + tty_insert_flip_char(tty, data[i+1], flag); i += 2; } } - if (port->port.count) - tty_flip_buffer_push(tty); + tty_flip_buffer_push(tty); tty_kref_put(tty); } } @@ -1013,13 +997,9 @@ static void usa90_indat_callback(struct urb *urb) /* Resubmit urb so we continue receiving */ urb->dev = port->serial->dev; - if (port->port.count) { - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err != 0) - dbg("%s - resubmit read urb failed. (%d)", - __func__, err); - } - return; + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err != 0) + dbg("%s - resubmit read urb failed. (%d)", __func__, err); } @@ -2418,8 +2398,7 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial, msg.portEnabled = 0; /* Sending intermediate configs */ else { - if (port->port.count) - msg.portEnabled = 1; + msg.portEnabled = 1; msg.txBreak = (p_priv->break_on); } diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h index 30771e5b397..bf3297ddd18 100644 --- a/drivers/usb/serial/keyspan.h +++ b/drivers/usb/serial/keyspan.h @@ -456,7 +456,7 @@ static const struct keyspan_device_details *keyspan_devices[] = { NULL, }; -static struct usb_device_id keyspan_ids_combined[] = { +static const struct usb_device_id keyspan_ids_combined[] = { { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) }, @@ -497,7 +497,7 @@ static struct usb_driver keyspan_driver = { }; /* usb_device_id table for the pre-firmware download keyspan devices */ -static struct usb_device_id keyspan_pre_ids[] = { +static const struct usb_device_id keyspan_pre_ids[] = { { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) }, @@ -513,7 +513,7 @@ static struct usb_device_id keyspan_pre_ids[] = { { } /* Terminating entry */ }; -static struct usb_device_id keyspan_1port_ids[] = { +static const struct usb_device_id keyspan_1port_ids[] = { { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) }, @@ -524,7 +524,7 @@ static struct usb_device_id keyspan_1port_ids[] = { { } /* Terminating entry */ }; -static struct usb_device_id keyspan_2port_ids[] = { +static const struct usb_device_id keyspan_2port_ids[] = { { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) }, @@ -532,7 +532,7 @@ static struct usb_device_id keyspan_2port_ids[] = { { } /* Terminating entry */ }; -static struct usb_device_id keyspan_4port_ids[] = { +static const struct usb_device_id keyspan_4port_ids[] = { { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) }, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)}, { USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)}, diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c index 1296a097f5c..185fe9a7d4e 100644 --- a/drivers/usb/serial/keyspan_pda.c +++ b/drivers/usb/serial/keyspan_pda.c @@ -125,7 +125,7 @@ struct keyspan_pda_private { #define ENTREGRA_VENDOR_ID 0x1645 #define ENTREGRA_FAKE_ID 0x8093 -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { #ifdef KEYSPAN { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) }, #endif @@ -147,20 +147,20 @@ static struct usb_driver keyspan_pda_driver = { .no_dynamic_id = 1, }; -static struct usb_device_id id_table_std [] = { +static const struct usb_device_id id_table_std[] = { { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) }, { } /* Terminating entry */ }; #ifdef KEYSPAN -static struct usb_device_id id_table_fake [] = { +static const struct usb_device_id id_table_fake[] = { { USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) }, { } /* Terminating entry */ }; #endif #ifdef XIRCOM -static struct usb_device_id id_table_fake_xircom [] = { +static const struct usb_device_id id_table_fake_xircom[] = { { USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) }, { USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) }, { } @@ -429,13 +429,20 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial, unsigned char *value) { int rc; - unsigned char data; + u8 *data; + + data = kmalloc(1, GFP_KERNEL); + if (!data) + return -ENOMEM; + rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 3, /* get pins */ USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, - 0, 0, &data, 1, 2000); + 0, 0, data, 1, 2000); if (rc >= 0) - *value = data; + *value = *data; + + kfree(data); return rc; } @@ -543,7 +550,14 @@ static int keyspan_pda_write(struct tty_struct *tty, device how much room it really has. This is done only on scheduler time, since usb_control_msg() sleeps. */ if (count > priv->tx_room && !in_interrupt()) { - unsigned char room; + u8 *room; + + room = kmalloc(1, GFP_KERNEL); + if (!room) { + rc = -ENOMEM; + goto exit; + } + rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 6, /* write_room */ @@ -551,9 +565,14 @@ static int keyspan_pda_write(struct tty_struct *tty, | USB_DIR_IN, 0, /* value: 0 means "remaining room" */ 0, /* index */ - &room, + room, 1, 2000); + if (rc > 0) { + dbg(" roomquery says %d", *room); + priv->tx_room = *room; + } + kfree(room); if (rc < 0) { dbg(" roomquery failed"); goto exit; @@ -563,8 +582,6 @@ static int keyspan_pda_write(struct tty_struct *tty, rc = -EIO; /* device didn't return any data */ goto exit; } - dbg(" roomquery says %d", room); - priv->tx_room = room; } if (count > priv->tx_room) { /* we're about to completely fill the Tx buffer, so @@ -684,18 +701,22 @@ static int keyspan_pda_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial = port->serial; - unsigned char room; + u8 *room; int rc = 0; struct keyspan_pda_private *priv; /* find out how much room is in the Tx ring */ + room = kmalloc(1, GFP_KERNEL); + if (!room) + return -ENOMEM; + rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 6, /* write_room */ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN, 0, /* value */ 0, /* index */ - &room, + room, 1, 2000); if (rc < 0) { @@ -708,8 +729,8 @@ static int keyspan_pda_open(struct tty_struct *tty, goto error; } priv = usb_get_serial_port_data(port); - priv->tx_room = room; - priv->tx_throttled = room ? 0 : 1; + priv->tx_room = *room; + priv->tx_throttled = *room ? 0 : 1; /*Start reading from the device*/ port->interrupt_in_urb->dev = serial->dev; @@ -718,8 +739,8 @@ static int keyspan_pda_open(struct tty_struct *tty, dbg("%s - usb_submit_urb(read int) failed", __func__); goto error; } - error: + kfree(room); return rc; } static void keyspan_pda_close(struct usb_serial_port *port) @@ -789,6 +810,13 @@ static int keyspan_pda_fake_startup(struct usb_serial *serial) return 1; } +#ifdef KEYSPAN +MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw"); +#endif +#ifdef XIRCOM +MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw"); +#endif + static int keyspan_pda_startup(struct usb_serial *serial) { diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c index 3a7873806f4..8eef91ba4b1 100644 --- a/drivers/usb/serial/kl5kusb105.c +++ b/drivers/usb/serial/kl5kusb105.c @@ -94,7 +94,7 @@ static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file, /* * All of the device info needed for the KLSI converters. */ -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) }, { USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) }, { } /* Terminating entry */ @@ -212,10 +212,19 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, unsigned long *line_state_p) { int rc; - __u8 status_buf[KLSI_STATUSBUF_LEN] = { -1, -1}; + u8 *status_buf; __u16 status; dev_info(&port->serial->dev->dev, "sending SIO Poll request\n"); + + status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL); + if (!status_buf) { + dev_err(&port->dev, "%s - out of memory for status buffer.\n", + __func__); + return -ENOMEM; + } + status_buf[0] = 0xff; + status_buf[1] = 0xff; rc = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), KL5KUSB105A_SIO_POLL, @@ -236,6 +245,8 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, *line_state_p = klsi_105_status2linestate(status); } + + kfree(status_buf); return rc; } @@ -364,7 +375,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) int rc; int i; unsigned long line_state; - struct klsi_105_port_settings cfg; + struct klsi_105_port_settings *cfg; unsigned long flags; dbg("%s port %d", __func__, port->number); @@ -376,12 +387,18 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) * Then read the modem line control and store values in * priv->line_state. */ - cfg.pktlen = 5; - cfg.baudrate = kl5kusb105a_sio_b9600; - cfg.databits = kl5kusb105a_dtb_8; - cfg.unknown1 = 0; - cfg.unknown2 = 1; - klsi_105_chg_port_settings(port, &cfg); + cfg = kmalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + dev_err(&port->dev, "%s - out of memory for config buffer.\n", + __func__); + return -ENOMEM; + } + cfg->pktlen = 5; + cfg->baudrate = kl5kusb105a_sio_b9600; + cfg->databits = kl5kusb105a_dtb_8; + cfg->unknown1 = 0; + cfg->unknown2 = 1; + klsi_105_chg_port_settings(port, cfg); /* set up termios structure */ spin_lock_irqsave(&priv->lock, flags); @@ -391,11 +408,11 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) priv->termios.c_lflag = tty->termios->c_lflag; for (i = 0; i < NCCS; i++) priv->termios.c_cc[i] = tty->termios->c_cc[i]; - priv->cfg.pktlen = cfg.pktlen; - priv->cfg.baudrate = cfg.baudrate; - priv->cfg.databits = cfg.databits; - priv->cfg.unknown1 = cfg.unknown1; - priv->cfg.unknown2 = cfg.unknown2; + priv->cfg.pktlen = cfg->pktlen; + priv->cfg.baudrate = cfg->baudrate; + priv->cfg.databits = cfg->databits; + priv->cfg.unknown1 = cfg->unknown1; + priv->cfg.unknown2 = cfg->unknown2; spin_unlock_irqrestore(&priv->lock, flags); /* READ_ON and urb submission */ @@ -441,6 +458,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) retval = rc; exit: + kfree(cfg); return retval; } /* klsi_105_open */ @@ -681,7 +699,6 @@ static void klsi_105_read_bulk_callback(struct urb *urb) bytes_sent = urb->actual_length - 2; } - tty_buffer_request_room(tty, bytes_sent); tty_insert_flip_string(tty, data + 2, bytes_sent); tty_flip_buffer_push(tty); tty_kref_put(tty); @@ -714,10 +731,17 @@ static void klsi_105_set_termios(struct tty_struct *tty, unsigned int old_iflag = old_termios->c_iflag; unsigned int cflag = tty->termios->c_cflag; unsigned int old_cflag = old_termios->c_cflag; - struct klsi_105_port_settings cfg; + struct klsi_105_port_settings *cfg; unsigned long flags; speed_t baud; + cfg = kmalloc(sizeof(*cfg), GFP_KERNEL); + if (!cfg) { + dev_err(&port->dev, "%s - out of memory for config buffer.\n", + __func__); + return; + } + /* lock while we are modifying the settings */ spin_lock_irqsave(&priv->lock, flags); @@ -793,11 +817,11 @@ static void klsi_105_set_termios(struct tty_struct *tty, case CS5: dbg("%s - 5 bits/byte not supported", __func__); spin_unlock_irqrestore(&priv->lock, flags); - return ; + goto err; case CS6: dbg("%s - 6 bits/byte not supported", __func__); spin_unlock_irqrestore(&priv->lock, flags); - return ; + goto err; case CS7: priv->cfg.databits = kl5kusb105a_dtb_7; break; @@ -856,11 +880,13 @@ static void klsi_105_set_termios(struct tty_struct *tty, #endif ; } - memcpy(&cfg, &priv->cfg, sizeof(cfg)); + memcpy(cfg, &priv->cfg, sizeof(*cfg)); spin_unlock_irqrestore(&priv->lock, flags); /* now commit changes to device */ - klsi_105_chg_port_settings(port, &cfg); + klsi_105_chg_port_settings(port, cfg); +err: + kfree(cfg); } /* klsi_105_set_termios */ diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c index 45ea694b3ae..c113a2a0e10 100644 --- a/drivers/usb/serial/kobil_sct.c +++ b/drivers/usb/serial/kobil_sct.c @@ -86,7 +86,7 @@ static void kobil_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old); static void kobil_init_termios(struct tty_struct *tty); -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) }, { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) }, { USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) }, @@ -388,7 +388,6 @@ static void kobil_read_int_callback(struct urb *urb) */ /* END DEBUG */ - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); } @@ -624,7 +623,6 @@ static void kobil_set_termios(struct tty_struct *tty, unsigned short urb_val = 0; int c_cflag = tty->termios->c_cflag; speed_t speed; - void *settings; priv = usb_get_serial_port_data(port); if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || @@ -647,25 +645,13 @@ static void kobil_set_termios(struct tty_struct *tty, } urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits : SUSBCR_SPASB_1StopBit; - - settings = kzalloc(50, GFP_KERNEL); - if (!settings) - return; - - sprintf(settings, "%d ", speed); - if (c_cflag & PARENB) { - if (c_cflag & PARODD) { + if (c_cflag & PARODD) urb_val |= SUSBCR_SPASB_OddParity; - strcat(settings, "Odd Parity"); - } else { + else urb_val |= SUSBCR_SPASB_EvenParity; - strcat(settings, "Even Parity"); - } - } else { + } else urb_val |= SUSBCR_SPASB_NoParity; - strcat(settings, "No Parity"); - } tty->termios->c_cflag &= ~CMSPAR; tty_encode_baud_rate(tty, speed, speed); @@ -675,11 +661,10 @@ static void kobil_set_termios(struct tty_struct *tty, USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT, urb_val, 0, - settings, + NULL, 0, KOBIL_TIMEOUT ); - kfree(settings); } static int kobil_ioctl(struct tty_struct *tty, struct file *file, diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index cd009cb280a..2849f8c3201 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -75,6 +75,7 @@ #include <linux/module.h> #include <linux/spinlock.h> #include <linux/uaccess.h> +#include <asm/unaligned.h> #include <linux/usb.h> #include <linux/usb/serial.h> #include "mct_u232.h" @@ -110,7 +111,7 @@ static void mct_u232_unthrottle(struct tty_struct *tty); /* * All of the device info needed for the MCT USB-RS232 converter. */ -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(MCT_U232_VID, MCT_U232_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) }, { USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) }, @@ -231,19 +232,22 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial, static int mct_u232_set_baud_rate(struct tty_struct *tty, struct usb_serial *serial, struct usb_serial_port *port, speed_t value) { - __le32 divisor; + unsigned int divisor; int rc; - unsigned char zero_byte = 0; + unsigned char *buf; unsigned char cts_enable_byte = 0; speed_t speed; - divisor = cpu_to_le32(mct_u232_calculate_baud_rate(serial, value, - &speed)); + buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + divisor = mct_u232_calculate_baud_rate(serial, value, &speed); + put_unaligned_le32(cpu_to_le32(divisor), buf); rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_BAUD_RATE_REQUEST, MCT_U232_SET_REQUEST_TYPE, - 0, 0, &divisor, MCT_U232_SET_BAUD_RATE_SIZE, + 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE, WDR_TIMEOUT); if (rc < 0) /*FIXME: What value speed results */ dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n", @@ -269,10 +273,11 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty, a device which is not asserting 'CTS'. */ + buf[0] = 0; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_UNKNOWN1_REQUEST, MCT_U232_SET_REQUEST_TYPE, - 0, 0, &zero_byte, MCT_U232_SET_UNKNOWN1_SIZE, + 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " @@ -284,30 +289,40 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty, dbg("set_baud_rate: send second control message, data = %02X", cts_enable_byte); + buf[0] = cts_enable_byte; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_CTS_REQUEST, MCT_U232_SET_REQUEST_TYPE, - 0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE, + 0, 0, buf, MCT_U232_SET_CTS_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&port->dev, "Sending USB device request code %d " "failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc); + kfree(buf); return rc; } /* mct_u232_set_baud_rate */ static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr) { int rc; + unsigned char *buf; + + buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + buf[0] = lcr; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_LINE_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, - 0, 0, &lcr, MCT_U232_SET_LINE_CTRL_SIZE, + 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&serial->dev->dev, "Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc); dbg("set_line_ctrl: 0x%x", lcr); + kfree(buf); return rc; } /* mct_u232_set_line_ctrl */ @@ -315,23 +330,31 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial, unsigned int control_state) { int rc; - unsigned char mcr = MCT_U232_MCR_NONE; + unsigned char mcr; + unsigned char *buf; + + buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + mcr = MCT_U232_MCR_NONE; if (control_state & TIOCM_DTR) mcr |= MCT_U232_MCR_DTR; if (control_state & TIOCM_RTS) mcr |= MCT_U232_MCR_RTS; + buf[0] = mcr; rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), MCT_U232_SET_MODEM_CTRL_REQUEST, MCT_U232_SET_REQUEST_TYPE, - 0, 0, &mcr, MCT_U232_SET_MODEM_CTRL_SIZE, + 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, WDR_TIMEOUT); if (rc < 0) dev_err(&serial->dev->dev, "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc); dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); + kfree(buf); return rc; } /* mct_u232_set_modem_ctrl */ @@ -339,17 +362,27 @@ static int mct_u232_get_modem_stat(struct usb_serial *serial, unsigned char *msr) { int rc; + unsigned char *buf; + + buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL); + if (buf == NULL) { + *msr = 0; + return -ENOMEM; + } rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), MCT_U232_GET_MODEM_STAT_REQUEST, MCT_U232_GET_REQUEST_TYPE, - 0, 0, msr, MCT_U232_GET_MODEM_STAT_SIZE, + 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE, WDR_TIMEOUT); if (rc < 0) { dev_err(&serial->dev->dev, "Get MODEM STATus failed (error = %d)\n", rc); *msr = 0; + } else { + *msr = buf[0]; } dbg("get_modem_stat: 0x%x", *msr); + kfree(buf); return rc; } /* mct_u232_get_modem_stat */ diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h index 07b6bec31dc..7417d5ce1e2 100644 --- a/drivers/usb/serial/mct_u232.h +++ b/drivers/usb/serial/mct_u232.h @@ -73,6 +73,8 @@ #define MCT_U232_SET_CTS_REQUEST 12 #define MCT_U232_SET_CTS_SIZE 1 +#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */ + /* * Baud rate (divisor) * Actually, there are two of them, MCT website calls them "Philips solution" diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 763e32a44be..0d47f2c4d59 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -81,12 +81,15 @@ struct moschip_serial { static int debug; +static struct usb_serial_driver moschip7720_2port_driver; + #define USB_VENDOR_ID_MOSCHIP 0x9710 #define MOSCHIP_DEVICE_ID_7720 0x7720 #define MOSCHIP_DEVICE_ID_7715 0x7715 -static struct usb_device_id moschip_port_id_table[] = { +static const struct usb_device_id moschip_port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) }, + { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7715) }, { } /* terminating entry */ }; MODULE_DEVICE_TABLE(usb, moschip_port_id_table); @@ -106,7 +109,7 @@ static void mos7720_interrupt_callback(struct urb *urb) __u8 sp1; __u8 sp2; - dbg("%s", " : Entering\n"); + dbg(" : Entering"); switch (status) { case 0: @@ -186,6 +189,75 @@ exit: } /* + * mos7715_interrupt_callback + * this is the 7715's callback function for when we have received data on + * the interrupt endpoint. + */ +static void mos7715_interrupt_callback(struct urb *urb) +{ + int result; + int length; + int status = urb->status; + __u8 *data; + __u8 iir; + + switch (status) { + case 0: + /* success */ + break; + case -ECONNRESET: + case -ENOENT: + case -ESHUTDOWN: + /* this urb is terminated, clean up */ + dbg("%s - urb shutting down with status: %d", __func__, + status); + return; + default: + dbg("%s - nonzero urb status received: %d", __func__, + status); + goto exit; + } + + length = urb->actual_length; + data = urb->transfer_buffer; + + /* Structure of data from 7715 device: + * Byte 1: IIR serial Port + * Byte 2: unused + * Byte 2: DSR parallel port + * Byte 4: FIFO status for both */ + + if (unlikely(length != 4)) { + dbg("Wrong data !!!"); + return; + } + + iir = data[0]; + if (!(iir & 0x01)) { /* serial port interrupt pending */ + switch (iir & 0x0f) { + case SERIAL_IIR_RLS: + dbg("Serial Port: Receiver status error or address " + "bit detected in 9-bit mode\n"); + break; + case SERIAL_IIR_CTI: + dbg("Serial Port: Receiver time out"); + break; + case SERIAL_IIR_MS: + dbg("Serial Port: Modem status change"); + break; + } + } + +exit: + result = usb_submit_urb(urb, GFP_ATOMIC); + if (result) + dev_err(&urb->dev->dev, + "%s - Error %d submitting control urb\n", + __func__, result); + return; +} + +/* * mos7720_bulk_in_callback * this is the callback function for when we have received data on the * bulk in endpoint. @@ -206,7 +278,7 @@ static void mos7720_bulk_in_callback(struct urb *urb) mos7720_port = urb->context; if (!mos7720_port) { - dbg("%s", "NULL mos7720_port pointer \n"); + dbg("NULL mos7720_port pointer"); return ; } @@ -218,7 +290,6 @@ static void mos7720_bulk_in_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (tty && urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); } @@ -275,17 +346,15 @@ static void mos7720_bulk_out_data_callback(struct urb *urb) * this function will be used for sending command to device */ static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value, - __u16 index, void *data) + __u16 index, u8 *data) { int status; - unsigned int pipe; + u8 *buf; u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); - __u8 requesttype; - __u16 size = 0x0000; if (value < MOS_MAX_PORT) { if (product == MOSCHIP_DEVICE_ID_7715) - value = value*0x100+0x100; + value = 0x0200; /* identifies the 7715's serial port */ else value = value*0x100+0x200; } else { @@ -298,27 +367,58 @@ static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value, } if (request == MOS_WRITE) { - request = (__u8)MOS_WRITE; - requesttype = (__u8)0x40; - value = value + (__u16)*((unsigned char *)data); - data = NULL; - pipe = usb_sndctrlpipe(serial->dev, 0); + value = value + *data; + status = usb_control_msg(serial->dev, + usb_sndctrlpipe(serial->dev, 0), MOS_WRITE, + 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT); } else { - request = (__u8)MOS_READ; - requesttype = (__u8)0xC0; - size = 0x01; - pipe = usb_rcvctrlpipe(serial->dev, 0); + buf = kmalloc(1, GFP_KERNEL); + if (!buf) { + status = -ENOMEM; + goto out; + } + status = usb_control_msg(serial->dev, + usb_rcvctrlpipe(serial->dev, 0), MOS_READ, + 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT); + *data = *buf; + kfree(buf); } - - status = usb_control_msg(serial->dev, pipe, request, requesttype, - value, index, data, size, MOS_WDR_TIMEOUT); - +out: if (status < 0) - dbg("Command Write failed Value %x index %x\n", value, index); + dbg("Command Write failed Value %x index %x", value, index); return status; } + +/* + * mos77xx_probe + * this function installs the appropriate read interrupt endpoint callback + * depending on whether the device is a 7720 or 7715, thus avoiding costly + * run-time checks in the high-frequency callback routine itself. + */ +static int mos77xx_probe(struct usb_serial *serial, + const struct usb_device_id *id) +{ + if (id->idProduct == MOSCHIP_DEVICE_ID_7715) + moschip7720_2port_driver.read_int_callback = + mos7715_interrupt_callback; + else + moschip7720_2port_driver.read_int_callback = + mos7720_interrupt_callback; + + return 0; +} + +static int mos77xx_calc_num_ports(struct usb_serial *serial) +{ + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); + if (product == MOSCHIP_DEVICE_ID_7715) + return 1; + + return 2; +} + static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) { struct usb_serial *serial; @@ -390,7 +490,7 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port) */ port_number = port->number - port->serial->minor; send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data); - dbg("SS::%p LSR:%x\n", mos7720_port, data); + dbg("SS::%p LSR:%x", mos7720_port, data); dbg("Check:Sending Command .........."); @@ -729,7 +829,7 @@ static void mos7720_throttle(struct tty_struct *tty) struct moschip_port *mos7720_port; int status; - dbg("%s- port %d\n", __func__, port->number); + dbg("%s- port %d", __func__, port->number); mos7720_port = usb_get_serial_port_data(port); @@ -1208,7 +1308,7 @@ static void mos7720_set_termios(struct tty_struct *tty, return; } - dbg("%s\n", "setting termios - ASPIRE"); + dbg("setting termios - ASPIRE"); cflag = tty->termios->c_cflag; @@ -1226,7 +1326,7 @@ static void mos7720_set_termios(struct tty_struct *tty, change_port_settings(tty, mos7720_port, old_termios); if (!port->read_urb) { - dbg("%s", "URB KILLED !!!!!\n"); + dbg("URB KILLED !!!!!"); return; } @@ -1495,6 +1595,7 @@ static int mos7720_startup(struct usb_serial *serial) struct usb_device *dev; int i; char data; + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); dbg("%s: Entering ..........", __func__); @@ -1514,6 +1615,29 @@ static int mos7720_startup(struct usb_serial *serial) usb_set_serial_data(serial, mos7720_serial); + /* + * The 7715 uses the first bulk in/out endpoint pair for the parallel + * port, and the second for the serial port. Because the usbserial core + * assumes both pairs are serial ports, we must engage in a bit of + * subterfuge and swap the pointers for ports 0 and 1 in order to make + * port 0 point to the serial port. However, both moschip devices use a + * single interrupt-in endpoint for both ports (as mentioned a little + * further down), and this endpoint was assigned to port 0. So after + * the swap, we must copy the interrupt endpoint elements from port 1 + * (as newly assigned) to port 0, and null out port 1 pointers. + */ + if (product == MOSCHIP_DEVICE_ID_7715) { + struct usb_serial_port *tmp = serial->port[0]; + serial->port[0] = serial->port[1]; + serial->port[1] = tmp; + serial->port[0]->interrupt_in_urb = tmp->interrupt_in_urb; + serial->port[0]->interrupt_in_buffer = tmp->interrupt_in_buffer; + serial->port[0]->interrupt_in_endpointAddress = + tmp->interrupt_in_endpointAddress; + serial->port[1]->interrupt_in_urb = NULL; + serial->port[1]->interrupt_in_buffer = NULL; + } + /* we set up the pointers to the endpoints in the mos7720_open * * function, as the structures aren't created yet. */ @@ -1529,7 +1653,7 @@ static int mos7720_startup(struct usb_serial *serial) /* Initialize all port interrupt end point to port 0 int * endpoint. Our device has only one interrupt endpoint - * comman to all ports */ + * common to all ports */ serial->port[i]->interrupt_in_endpointAddress = serial->port[0]->interrupt_in_endpointAddress; @@ -1584,11 +1708,12 @@ static struct usb_serial_driver moschip7720_2port_driver = { .description = "Moschip 2 port adapter", .usb_driver = &usb_driver, .id_table = moschip_port_id_table, - .num_ports = 2, + .calc_num_ports = mos77xx_calc_num_ports, .open = mos7720_open, .close = mos7720_close, .throttle = mos7720_throttle, .unthrottle = mos7720_unthrottle, + .probe = mos77xx_probe, .attach = mos7720_startup, .release = mos7720_release, .ioctl = mos7720_ioctl, @@ -1600,7 +1725,7 @@ static struct usb_serial_driver moschip7720_2port_driver = { .chars_in_buffer = mos7720_chars_in_buffer, .break_ctl = mos7720_break, .read_bulk_callback = mos7720_bulk_in_callback, - .read_int_callback = mos7720_interrupt_callback, + .read_int_callback = NULL /* dynamically assigned in probe() */ }; static int __init moschip7720_init(void) diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index 2cfe2451ed9..2fda1c0182b 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -181,7 +181,7 @@ #define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */ -static struct usb_device_id moschip_port_id_table[] = { +static const struct usb_device_id moschip_port_id_table[] = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, @@ -198,7 +198,7 @@ static struct usb_device_id moschip_port_id_table[] = { {} /* terminating entry */ }; -static __devinitdata struct usb_device_id moschip_id_table_combined[] = { +static const struct usb_device_id moschip_id_table_combined[] __devinitconst = { {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)}, @@ -283,12 +283,19 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg, { struct usb_device *dev = port->serial->dev; int ret = 0; + u8 *buf; + + buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); + if (!buf) + return -ENOMEM; ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, - MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH, + MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); + *val = buf[0]; dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val); - *val = (*val) & 0x00ff; + + kfree(buf); return ret; } @@ -341,6 +348,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg, struct usb_device *dev = port->serial->dev; int ret = 0; __u16 Wval; + u8 *buf; + + buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL); + if (!buf) + return -ENOMEM; /* dbg("application number is %4x", (((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */ @@ -364,9 +376,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg, } } ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ, - MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH, + MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); - *val = (*val) & 0x00ff; + *val = buf[0]; + + kfree(buf); return ret; } @@ -750,7 +764,6 @@ static void mos7840_bulk_in_callback(struct urb *urb) if (urb->actual_length) { tty = tty_port_tty_get(&mos7840_port->port->port); if (tty) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); dbg(" %s ", data); tty_flip_buffer_push(tty); diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c index 99bd00f5188..cf1718394e1 100644 --- a/drivers/usb/serial/moto_modem.c +++ b/drivers/usb/serial/moto_modem.c @@ -21,7 +21,7 @@ #include <linux/usb.h> #include <linux/usb/serial.h> -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c index 5ceaa4c6be0..04a6cbbed2c 100644 --- a/drivers/usb/serial/navman.c +++ b/drivers/usb/serial/navman.c @@ -22,7 +22,7 @@ static int debug; -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ { }, }; @@ -66,7 +66,6 @@ static void navman_read_int_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (tty && urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); } diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c index 062265038bf..89c724c0ac0 100644 --- a/drivers/usb/serial/omninet.c +++ b/drivers/usb/serial/omninet.c @@ -75,7 +75,7 @@ static void omninet_disconnect(struct usb_serial *serial); static void omninet_release(struct usb_serial *serial); static int omninet_attach(struct usb_serial *serial); -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) }, { USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) }, { } /* Terminating entry */ @@ -218,8 +218,8 @@ static void omninet_read_bulk_callback(struct urb *urb) if (debug && header->oh_xxx != 0x30) { if (urb->actual_length) { - printk(KERN_DEBUG __FILE__ - ": omninet_read %d: ", header->oh_len); + printk(KERN_DEBUG "%s: omninet_read %d: ", + __FILE__, header->oh_len); for (i = 0; i < (header->oh_len + OMNINET_HEADERLEN); i++) printk("%.2x ", data[i]); @@ -332,7 +332,7 @@ static void omninet_write_bulk_callback(struct urb *urb) struct usb_serial_port *port = urb->context; int status = urb->status; - dbg("%s - port %0x\n", __func__, port->number); + dbg("%s - port %0x", __func__, port->number); port->write_urb_busy = 0; if (status) { diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 4cdb975caa8..f37476e2268 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -22,7 +22,7 @@ static int debug; -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x065a, 0x0009) }, { }, }; @@ -55,7 +55,6 @@ static void opticon_bulk_callback(struct urb *urb) int status = urb->status; struct tty_struct *tty; int result; - int available_room = 0; int data_length; dbg("%s - port %d", __func__, port->number); @@ -96,13 +95,9 @@ static void opticon_bulk_callback(struct urb *urb) /* real data, send it to the tty layer */ tty = tty_port_tty_get(&port->port); if (tty) { - available_room = tty_buffer_request_room(tty, - data_length); - if (available_room) { - tty_insert_flip_string(tty, data, - available_room); - tty_flip_buffer_push(tty); - } + tty_insert_flip_string(tty, data, + data_length); + tty_flip_buffer_push(tty); tty_kref_put(tty); } } else { @@ -217,7 +212,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, spin_lock_irqsave(&priv->lock, flags); if (priv->outstanding_urbs > URB_UPPER_LIMIT) { spin_unlock_irqrestore(&priv->lock, flags); - dbg("%s - write limit hit\n", __func__); + dbg("%s - write limit hit", __func__); return 0; } priv->outstanding_urbs++; @@ -288,7 +283,7 @@ static int opticon_write_room(struct tty_struct *tty) spin_lock_irqsave(&priv->lock, flags); if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) { spin_unlock_irqrestore(&priv->lock, flags); - dbg("%s - write limit hit\n", __func__); + dbg("%s - write limit hit", __func__); return 0; } spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 6e94a6711f0..847b805d63a 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -336,15 +336,42 @@ static int option_resume(struct usb_serial *serial); #define AIRPLUS_VENDOR_ID 0x1011 #define AIRPLUS_PRODUCT_MCD650 0x3198 +/* Longcheer/Longsung vendor ID; makes whitelabel devices that + * many other vendors like 4G Systems, Alcatel, ChinaBird, + * Mobidata, etc sell under their own brand names. + */ +#define LONGCHEER_VENDOR_ID 0x1c9e + /* 4G Systems products */ -#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e +/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick * + * It seems to contain a Qualcomm QSC6240/6290 chipset */ #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 /* Haier products */ #define HAIER_VENDOR_ID 0x201e #define HAIER_PRODUCT_CE100 0x2009 -static struct usb_device_id option_ids[] = { +/* some devices interfaces need special handling due to a number of reasons */ +enum option_blacklist_reason { + OPTION_BLACKLIST_NONE = 0, + OPTION_BLACKLIST_SENDSETUP = 1, + OPTION_BLACKLIST_RESERVED_IF = 2 +}; + +struct option_blacklist_info { + const u32 infolen; /* number of interface numbers on blacklist */ + const u8 *ifaceinfo; /* pointer to the array holding the numbers */ + enum option_blacklist_reason reason; +}; + +static const u8 four_g_w14_no_sendsetup[] = { 0, 1 }; +static const struct option_blacklist_info four_g_w14_blacklist = { + .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup), + .ifaceinfo = four_g_w14_no_sendsetup, + .reason = OPTION_BLACKLIST_SENDSETUP +}; + +static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) }, @@ -644,7 +671,9 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, - { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), + .driver_info = (kernel_ulong_t)&four_g_w14_blacklist + }, { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, { } /* Terminating entry */ }; @@ -709,6 +738,7 @@ struct option_intf_private { spinlock_t susp_lock; unsigned int suspended:1; int in_flight; + struct option_blacklist_info *blacklist_info; }; struct option_port_private { @@ -778,9 +808,27 @@ static int option_probe(struct usb_serial *serial, if (!data) return -ENOMEM; spin_lock_init(&data->susp_lock); + data->blacklist_info = (struct option_blacklist_info*) id->driver_info; return 0; } +static enum option_blacklist_reason is_blacklisted(const u8 ifnum, + const struct option_blacklist_info *blacklist) +{ + const u8 *info; + int i; + + if (blacklist) { + info = blacklist->ifaceinfo; + + for (i = 0; i < blacklist->infolen; i++) { + if (info[i] == ifnum) + return blacklist->reason; + } + } + return OPTION_BLACKLIST_NONE; +} + static void option_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { @@ -921,7 +969,6 @@ static void option_indat_callback(struct urb *urb) } else { tty = tty_port_tty_get(&port->port); if (urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); } else @@ -929,9 +976,9 @@ static void option_indat_callback(struct urb *urb) tty_kref_put(tty); /* Resubmit urb so we continue receiving */ - if (port->port.count && status != -ESHUTDOWN) { + if (status != -ESHUTDOWN) { err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err && err != -EPERM) printk(KERN_ERR "%s: resubmit read urb failed. " "(%d)", __func__, err); else @@ -985,7 +1032,7 @@ static void option_instat_callback(struct urb *urb) (struct usb_ctrlrequest *)urb->transfer_buffer; if (!req_pkt) { - dbg("%s: NULL req_pkt\n", __func__); + dbg("%s: NULL req_pkt", __func__); return; } if ((req_pkt->bRequestType == 0xA1) && @@ -1211,11 +1258,19 @@ static void option_setup_urbs(struct usb_serial *serial) static int option_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; + struct option_intf_private *intfdata = + (struct option_intf_private *) serial->private; struct option_port_private *portdata; int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; int val = 0; dbg("%s", __func__); + if (is_blacklisted(ifNum, intfdata->blacklist_info) == + OPTION_BLACKLIST_SENDSETUP) { + dbg("No send_setup on blacklisted interface #%d\n", ifNum); + return -EIO; + } + portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) @@ -1401,7 +1456,7 @@ static int option_resume(struct usb_serial *serial) for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; if (!port->interrupt_in_urb) { - dbg("%s: No interrupt URB for port %d\n", __func__, i); + dbg("%s: No interrupt URB for port %d", __func__, i); continue; } err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c index c644e26394b..deeacdea05d 100644 --- a/drivers/usb/serial/oti6858.c +++ b/drivers/usb/serial/oti6858.c @@ -58,7 +58,7 @@ #define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>" #define OTI6858_VERSION "0.1" -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) }, { } }; @@ -302,7 +302,7 @@ void send_data(struct work_struct *work) struct usb_serial_port *port = priv->port; int count = 0, result; unsigned long flags; - unsigned char allow; + u8 *allow; dbg("%s(port = %d)", __func__, port->number); @@ -321,13 +321,20 @@ void send_data(struct work_struct *work) count = port->bulk_out_size; if (count != 0) { + allow = kmalloc(1, GFP_KERNEL); + if (!allow) { + dev_err(&port->dev, "%s(): kmalloc failed\n", + __func__); + return; + } result = usb_control_msg(port->serial->dev, usb_rcvctrlpipe(port->serial->dev, 0), OTI6858_REQ_T_CHECK_TXBUFF, OTI6858_REQ_CHECK_TXBUFF, - count, 0, &allow, 1, 100); - if (result != 1 || allow != 0) + count, 0, allow, 1, 100); + if (result != 1 || *allow != 0) count = 0; + kfree(allow); } if (count == 0) { @@ -578,9 +585,6 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port) usb_clear_halt(serial->dev, port->write_urb->pipe); usb_clear_halt(serial->dev, port->read_urb->pipe); - if (port->port.count != 1) - return 0; - buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL); if (buf == NULL) { dev_err(&port->dev, "%s(): out of memory!\n", __func__); @@ -927,10 +931,6 @@ static void oti6858_read_bulk_callback(struct urb *urb) spin_unlock_irqrestore(&priv->lock, flags); if (status != 0) { - if (!port->port.count) { - dbg("%s(): port is closed, exiting", __func__); - return; - } /* if (status == -EPROTO) { * PL2303 mysteriously fails with -EPROTO reschedule @@ -954,14 +954,12 @@ static void oti6858_read_bulk_callback(struct urb *urb) } tty_kref_put(tty); - /* schedule the interrupt urb if we are still open */ - if (port->port.count != 0) { - port->interrupt_in_urb->dev = port->serial->dev; - result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); - if (result != 0) { - dev_err(&port->dev, "%s(): usb_submit_urb() failed," - " error %d\n", __func__, result); - } + /* schedule the interrupt urb */ + port->interrupt_in_urb->dev = port->serial->dev; + result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC); + if (result != 0 && result != -EPERM) { + dev_err(&port->dev, "%s(): usb_submit_urb() failed," + " error %d\n", __func__, result); } } diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 9ec1a49e236..73d5f346d3e 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -50,7 +50,7 @@ struct pl2303_buf { char *buf_put; }; -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) }, { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) }, @@ -451,7 +451,6 @@ static void pl2303_send(struct usb_serial_port *port) port->write_urb->transfer_buffer); port->write_urb->transfer_buffer_length = count; - port->write_urb->dev = port->serial->dev; result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) { dev_err(&port->dev, "%s - failed submitting write urb," @@ -769,7 +768,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) pl2303_set_termios(tty, port, &tmp_termios); dbg("%s - submitting read urb", __func__); - port->read_urb->dev = serial->dev; result = usb_submit_urb(port->read_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting read urb," @@ -779,7 +777,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port) } dbg("%s - submitting interrupt urb", __func__); - port->interrupt_in_urb->dev = serial->dev; result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); if (result) { dev_err(&port->dev, "%s - failed submitting interrupt urb," @@ -895,10 +892,23 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg) static int pl2303_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg) { + struct serial_struct ser; struct usb_serial_port *port = tty->driver_data; dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd); switch (cmd) { + case TIOCGSERIAL: + memset(&ser, 0, sizeof ser); + ser.type = PORT_16654; + ser.line = port->serial->minor; + ser.port = port->number; + ser.baud_base = 460800; + + if (copy_to_user((void __user *)arg, &ser, sizeof ser)) + return -EFAULT; + + return 0; + case TIOCMIWAIT: dbg("%s (%d) TIOCMIWAIT", __func__, port->number); return wait_modem_info(port, arg); @@ -1042,7 +1052,6 @@ static void pl2303_push_data(struct tty_struct *tty, tty_flag = TTY_FRAME; dbg("%s - tty_flag = %d", __func__, tty_flag); - tty_buffer_request_room(tty, urb->actual_length + 1); /* overrun is special, not associated with a char */ if (line_status & UART_OVERRUN_ERROR) tty_insert_flip_char(tty, 0, TTY_OVERRUN); @@ -1072,16 +1081,11 @@ static void pl2303_read_bulk_callback(struct urb *urb) if (status) { dbg("%s - urb status = %d", __func__, status); - if (!port->port.count) { - dbg("%s - port is closed, exiting.", __func__); - return; - } if (status == -EPROTO) { /* PL2303 mysteriously fails with -EPROTO reschedule * the read */ dbg("%s - caught -EPROTO, resubmitting the urb", __func__); - urb->dev = port->serial->dev; result = usb_submit_urb(urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - failed" @@ -1108,15 +1112,10 @@ static void pl2303_read_bulk_callback(struct urb *urb) } tty_kref_put(tty); /* Schedule the next read _if_ we are still open */ - if (port->port.count) { - urb->dev = port->serial->dev; - result = usb_submit_urb(urb, GFP_ATOMIC); - if (result) - dev_err(&urb->dev->dev, "%s - failed resubmitting" - " read urb, error %d\n", __func__, result); - } - - return; + result = usb_submit_urb(urb, GFP_ATOMIC); + if (result && result != -EPERM) + dev_err(&urb->dev->dev, "%s - failed resubmitting" + " read urb, error %d\n", __func__, result); } static void pl2303_write_bulk_callback(struct urb *urb) @@ -1146,7 +1145,6 @@ static void pl2303_write_bulk_callback(struct urb *urb) dbg("%s - nonzero write bulk status received: %d", __func__, status); port->write_urb->transfer_buffer_length = 1; - port->write_urb->dev = port->serial->dev; result = usb_submit_urb(port->write_urb, GFP_ATOMIC); if (result) dev_err(&urb->dev->dev, "%s - failed resubmitting write" diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c new file mode 100644 index 00000000000..0b936206171 --- /dev/null +++ b/drivers/usb/serial/qcaux.c @@ -0,0 +1,96 @@ +/* + * Qualcomm USB Auxiliary Serial Port driver + * + * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com> + * Copyright (C) 2010 Dan Williams <dcbw@redhat.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Devices listed here usually provide a CDC ACM port on which normal modem + * AT commands and PPP can be used. But when that port is in-use by PPP it + * cannot be used simultaneously for status or signal strength. Instead, the + * ports here can be queried for that information using the Qualcomm DM + * protocol. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/tty.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/usb/serial.h> + +/* NOTE: for now, only use this driver for devices that provide a CDC-ACM port + * for normal AT commands, but also provide secondary USB interfaces for the + * QCDM-capable ports. Devices that do not provide a CDC-ACM port should + * probably be driven by option.ko. + */ + +/* UTStarcom/Pantech/Curitel devices */ +#define UTSTARCOM_VENDOR_ID 0x106c +#define UTSTARCOM_PRODUCT_PC5740 0x3701 +#define UTSTARCOM_PRODUCT_PC5750 0x3702 /* aka Pantech PX-500 */ +#define UTSTARCOM_PRODUCT_UM150 0x3711 +#define UTSTARCOM_PRODUCT_UM175_V1 0x3712 +#define UTSTARCOM_PRODUCT_UM175_V2 0x3714 +#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715 + +/* CMOTECH devices */ +#define CMOTECH_VENDOR_ID 0x16d8 +#define CMOTECH_PRODUCT_CDU550 0x5553 +#define CMOTECH_PRODUCT_CDX650 0x6512 + +static struct usb_device_id id_table[] = { + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V1, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V2, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, + { }, +}; +MODULE_DEVICE_TABLE(usb, id_table); + +static struct usb_driver qcaux_driver = { + .name = "qcaux", + .probe = usb_serial_probe, + .disconnect = usb_serial_disconnect, + .id_table = id_table, + .no_dynamic_id = 1, +}; + +static struct usb_serial_driver qcaux_device = { + .driver = { + .owner = THIS_MODULE, + .name = "qcaux", + }, + .id_table = id_table, + .num_ports = 1, +}; + +static int __init qcaux_init(void) +{ + int retval; + + retval = usb_serial_register(&qcaux_device); + if (retval) + return retval; + retval = usb_register(&qcaux_driver); + if (retval) + usb_serial_deregister(&qcaux_device); + return retval; +} + +static void __exit qcaux_exit(void) +{ + usb_deregister(&qcaux_driver); + usb_serial_deregister(&qcaux_device); +} + +module_init(qcaux_init); +module_exit(qcaux_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 7528b8d57f1..310ff6ec656 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -21,7 +21,7 @@ static int debug; -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c index 951ea0c6ba7..cb8195cabfd 100644 --- a/drivers/usb/serial/siemens_mpi.c +++ b/drivers/usb/serial/siemens_mpi.c @@ -22,7 +22,7 @@ #define DRIVER_DESC "Driver for Siemens USB/MPI adapter" -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { /* Vendor and product id for 6ES7-972-0CB20-0XA0 */ { USB_DEVICE(0x908, 0x0004) }, { }, diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 3eb6143bb64..34e6f894cba 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -226,7 +226,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = { .ifaceinfo = direct_ip_non_serial_ifaces, }; -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ @@ -304,16 +304,6 @@ static struct usb_device_id id_table [] = { }; MODULE_DEVICE_TABLE(usb, id_table); -static struct usb_driver sierra_driver = { - .name = "sierra", - .probe = usb_serial_probe, - .disconnect = usb_serial_disconnect, - .suspend = usb_serial_suspend, - .resume = usb_serial_resume, - .id_table = id_table, - .no_dynamic_id = 1, - .supports_autosuspend = 1, -}; struct sierra_port_private { spinlock_t lock; /* lock the structure */ @@ -477,7 +467,7 @@ static void sierra_outdat_callback(struct urb *urb) static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { - struct sierra_port_private *portdata = usb_get_serial_port_data(port); + struct sierra_port_private *portdata; struct sierra_intf_private *intfdata; struct usb_serial *serial = port->serial; unsigned long flags; @@ -604,14 +594,15 @@ static void sierra_indat_callback(struct urb *urb) } else { if (urb->actual_length) { tty = tty_port_tty_get(&port->port); - - tty_buffer_request_room(tty, urb->actual_length); - tty_insert_flip_string(tty, data, urb->actual_length); - tty_flip_buffer_push(tty); - - tty_kref_put(tty); - usb_serial_debug_data(debug, &port->dev, __func__, - urb->actual_length, data); + if (tty) { + tty_insert_flip_string(tty, data, + urb->actual_length); + tty_flip_buffer_push(tty); + + tty_kref_put(tty); + usb_serial_debug_data(debug, &port->dev, + __func__, urb->actual_length, data); + } } else { dev_dbg(&port->dev, "%s: empty read urb" " received\n", __func__); @@ -619,10 +610,10 @@ static void sierra_indat_callback(struct urb *urb) } /* Resubmit urb so we continue receiving */ - if (port->port.count && status != -ESHUTDOWN && status != -EPERM) { + if (status != -ESHUTDOWN && status != -EPERM) { usb_mark_last_busy(port->serial->dev); err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err && err != -EPERM) dev_err(&port->dev, "resubmit read urb failed." "(%d)\n", err); } @@ -681,11 +672,11 @@ static void sierra_instat_callback(struct urb *urb) dev_dbg(&port->dev, "%s: error %d\n", __func__, status); /* Resubmit urb so we continue receiving IRQ data */ - if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) { + if (status != -ESHUTDOWN && status != -ENOENT) { usb_mark_last_busy(serial->dev); urb->dev = serial->dev; err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err && err != -EPERM) dev_err(&port->dev, "%s: resubmit intr urb " "failed. (%d)\n", __func__, err); } @@ -1061,11 +1052,31 @@ static int sierra_resume(struct usb_serial *serial) return ec ? -EIO : 0; } + +static int sierra_reset_resume(struct usb_interface *intf) +{ + struct usb_serial *serial = usb_get_intfdata(intf); + dev_err(&serial->dev->dev, "%s\n", __func__); + return usb_serial_resume(intf); +} #else #define sierra_suspend NULL #define sierra_resume NULL +#define sierra_reset_resume NULL #endif +static struct usb_driver sierra_driver = { + .name = "sierra", + .probe = usb_serial_probe, + .disconnect = usb_serial_disconnect, + .suspend = usb_serial_suspend, + .resume = usb_serial_resume, + .reset_resume = sierra_reset_resume, + .id_table = id_table, + .no_dynamic_id = 1, + .supports_autosuspend = 1, +}; + static struct usb_serial_driver sierra_device = { .driver = { .owner = THIS_MODULE, diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c index 1e58220403d..5d39191e724 100644 --- a/drivers/usb/serial/spcp8x5.c +++ b/drivers/usb/serial/spcp8x5.c @@ -45,7 +45,7 @@ static int debug; #define SPCP8x5_835_VID 0x04fc #define SPCP8x5_835_PID 0x0231 -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)}, { USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)}, { USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)}, @@ -609,7 +609,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty, if (i < 0) dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n", uartdata, i); - dbg("0x21:0x40:0:0 %d\n", i); + dbg("0x21:0x40:0:0 %d", i); if (cflag & CRTSCTS) { /* enable hardware flow control */ @@ -677,7 +677,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb) struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; unsigned long flags; - int i; int result = urb->status; u8 status; char tty_flag; @@ -687,8 +686,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb) /* check the urb status */ if (result) { - if (!port->port.count) - return; if (result == -EPROTO) { /* spcp8x5 mysteriously fails with -EPROTO */ /* reschedule the read */ @@ -726,26 +723,20 @@ static void spcp8x5_read_bulk_callback(struct urb *urb) tty = tty_port_tty_get(&port->port); if (tty && urb->actual_length) { - tty_buffer_request_room(tty, urb->actual_length + 1); /* overrun is special, not associated with a char */ if (status & UART_OVERRUN_ERROR) tty_insert_flip_char(tty, 0, TTY_OVERRUN); - for (i = 0; i < urb->actual_length; ++i) - tty_insert_flip_char(tty, data[i], tty_flag); + tty_insert_flip_string_fixed_flag(tty, data, + urb->actual_length, tty_flag); tty_flip_buffer_push(tty); } tty_kref_put(tty); - /* Schedule the next read _if_ we are still open */ - if (port->port.count) { - urb->dev = port->serial->dev; - result = usb_submit_urb(urb , GFP_ATOMIC); - if (result) - dev_dbg(&port->dev, "failed submitting read urb %d\n", - result); - } - - return; + /* Schedule the next read */ + urb->dev = port->serial->dev; + result = usb_submit_urb(urb , GFP_ATOMIC); + if (result) + dev_dbg(&port->dev, "failed submitting read urb %d\n", result); } /* get data from ring buffer and then write to usb bus */ diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c index b282c0f2d8e..72398888858 100644 --- a/drivers/usb/serial/symbolserial.c +++ b/drivers/usb/serial/symbolserial.c @@ -21,7 +21,7 @@ static int debug; -static struct usb_device_id id_table[] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x05e0, 0x0600) }, { }, }; @@ -51,7 +51,6 @@ static void symbol_int_callback(struct urb *urb) int status = urb->status; struct tty_struct *tty; int result; - int available_room = 0; int data_length; dbg("%s - port %d", __func__, port->number); @@ -89,13 +88,8 @@ static void symbol_int_callback(struct urb *urb) */ tty = tty_port_tty_get(&port->port); if (tty) { - available_room = tty_buffer_request_room(tty, - data_length); - if (available_room) { - tty_insert_flip_string(tty, &data[1], - available_room); - tty_flip_buffer_push(tty); - } + tty_insert_flip_string(tty, &data[1], data_length); + tty_flip_buffer_push(tty); tty_kref_put(tty); } } else { diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 1e9dc882169..0afe5c71c17 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -1271,14 +1271,13 @@ static void ti_recv(struct device *dev, struct tty_struct *tty, int cnt; do { - cnt = tty_buffer_request_room(tty, length); + cnt = tty_insert_flip_string(tty, data, length); if (cnt < length) { dev_err(dev, "%s - dropping data, %d bytes lost\n", __func__, length - cnt); if (cnt == 0) break; } - tty_insert_flip_string(tty, data, cnt); tty_flip_buffer_push(tty); data += cnt; length -= cnt; diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 33c85f7084f..3873660d821 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c @@ -358,10 +358,6 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf, dbg("%s - port %d, %d byte(s)", __func__, port->number, count); - /* count is managed under the mutex lock for the tty so cannot - drop to zero until after the last close completes */ - WARN_ON(!port->port.count); - /* pass on to the driver specific version of this function */ retval = port->serial->type->write(tty, port, buf, count); @@ -373,7 +369,6 @@ static int serial_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; dbg("%s - port %d", __func__, port->number); - WARN_ON(!port->port.count); /* pass on to the driver specific version of this function */ return port->serial->type->write_room(tty); } @@ -381,7 +376,7 @@ static int serial_write_room(struct tty_struct *tty) static int serial_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; - dbg("%s = port %d", __func__, port->number); + dbg("%s - port %d", __func__, port->number); /* if the device was unplugged then any remaining characters fell out of the connector ;) */ @@ -396,7 +391,6 @@ static void serial_throttle(struct tty_struct *tty) struct usb_serial_port *port = tty->driver_data; dbg("%s - port %d", __func__, port->number); - WARN_ON(!port->port.count); /* pass on to the driver specific version of this function */ if (port->serial->type->throttle) port->serial->type->throttle(tty); @@ -407,7 +401,6 @@ static void serial_unthrottle(struct tty_struct *tty) struct usb_serial_port *port = tty->driver_data; dbg("%s - port %d", __func__, port->number); - WARN_ON(!port->port.count); /* pass on to the driver specific version of this function */ if (port->serial->type->unthrottle) port->serial->type->unthrottle(tty); @@ -421,8 +414,6 @@ static int serial_ioctl(struct tty_struct *tty, struct file *file, dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd); - WARN_ON(!port->port.count); - /* pass on to the driver specific version of this function if it is available */ if (port->serial->type->ioctl) { @@ -437,7 +428,6 @@ static void serial_set_termios(struct tty_struct *tty, struct ktermios *old) struct usb_serial_port *port = tty->driver_data; dbg("%s - port %d", __func__, port->number); - WARN_ON(!port->port.count); /* pass on to the driver specific version of this function if it is available */ if (port->serial->type->set_termios) @@ -452,7 +442,6 @@ static int serial_break(struct tty_struct *tty, int break_state) dbg("%s - port %d", __func__, port->number); - WARN_ON(!port->port.count); /* pass on to the driver specific version of this function if it is available */ if (port->serial->type->break_ctl) @@ -513,7 +502,6 @@ static int serial_tiocmget(struct tty_struct *tty, struct file *file) dbg("%s - port %d", __func__, port->number); - WARN_ON(!port->port.count); if (port->serial->type->tiocmget) return port->serial->type->tiocmget(tty, file); return -EINVAL; @@ -526,7 +514,6 @@ static int serial_tiocmset(struct tty_struct *tty, struct file *file, dbg("%s - port %d", __func__, port->number); - WARN_ON(!port->port.count); if (port->serial->type->tiocmset) return port->serial->type->tiocmset(tty, file, set, clear); return -EINVAL; diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c index 7b5bfc4edd3..252cc2d993b 100644 --- a/drivers/usb/serial/usb_debug.c +++ b/drivers/usb/serial/usb_debug.c @@ -29,7 +29,7 @@ static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = { 0xff, }; -static struct usb_device_id id_table [] = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0525, 0x127a) }, { }, }; diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index ad1f9232292..094942707c7 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -368,7 +368,7 @@ static int visor_write(struct tty_struct *tty, struct usb_serial_port *port, spin_lock_irqsave(&priv->lock, flags); if (priv->outstanding_urbs > URB_UPPER_LIMIT) { spin_unlock_irqrestore(&priv->lock, flags); - dbg("%s - write limit hit\n", __func__); + dbg("%s - write limit hit", __func__); return 0; } priv->outstanding_urbs++; @@ -446,7 +446,7 @@ static int visor_write_room(struct tty_struct *tty) spin_lock_irqsave(&priv->lock, flags); if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) { spin_unlock_irqrestore(&priv->lock, flags); - dbg("%s - write limit hit\n", __func__); + dbg("%s - write limit hit", __func__); return 0; } spin_unlock_irqrestore(&priv->lock, flags); @@ -503,13 +503,9 @@ static void visor_read_bulk_callback(struct urb *urb) if (urb->actual_length) { tty = tty_port_tty_get(&port->port); if (tty) { - available_room = tty_buffer_request_room(tty, - urb->actual_length); - if (available_room) { - tty_insert_flip_string(tty, data, - available_room); - tty_flip_buffer_push(tty); - } + tty_insert_flip_string(tty, data, + urb->actual_length); + tty_flip_buffer_push(tty); tty_kref_put(tty); } spin_lock(&priv->lock); @@ -807,10 +803,14 @@ static int clie_3_5_startup(struct usb_serial *serial) { struct device *dev = &serial->dev->dev; int result; - u8 data; + u8 *data; dbg("%s", __func__); + data = kmalloc(1, GFP_KERNEL); + if (!data) + return -ENOMEM; + /* * Note that PEG-300 series devices expect the following two calls. */ @@ -818,36 +818,42 @@ static int clie_3_5_startup(struct usb_serial *serial) /* get the config number */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQ_GET_CONFIGURATION, USB_DIR_IN, - 0, 0, &data, 1, 3000); + 0, 0, data, 1, 3000); if (result < 0) { dev_err(dev, "%s: get config number failed: %d\n", __func__, result); - return result; + goto out; } if (result != 1) { dev_err(dev, "%s: get config number bad return length: %d\n", __func__, result); - return -EIO; + result = -EIO; + goto out; } /* get the interface number */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), USB_REQ_GET_INTERFACE, USB_DIR_IN | USB_RECIP_INTERFACE, - 0, 0, &data, 1, 3000); + 0, 0, data, 1, 3000); if (result < 0) { dev_err(dev, "%s: get interface number failed: %d\n", __func__, result); - return result; + goto out; } if (result != 1) { dev_err(dev, "%s: get interface number bad return length: %d\n", __func__, result); - return -EIO; + result = -EIO; + goto out; } - return generic_startup(serial); + result = generic_startup(serial); +out: + kfree(data); + + return result; } static int treo_attach(struct usb_serial *serial) diff --git a/drivers/usb/serial/vivopay-serial.c b/drivers/usb/serial/vivopay-serial.c new file mode 100644 index 00000000000..f719d00972f --- /dev/null +++ b/drivers/usb/serial/vivopay-serial.c @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com) + * Copyright (C) 2009 Outpost Embedded, LLC + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/tty.h> +#include <linux/module.h> +#include <linux/usb.h> +#include <linux/usb/serial.h> + + +#define DRIVER_VERSION "v1.0" +#define DRIVER_DESC "ViVOpay USB Serial Driver" + +#define VIVOPAY_VENDOR_ID 0x1d5f + + +static struct usb_device_id id_table [] = { + /* ViVOpay 8800 */ + { USB_DEVICE(VIVOPAY_VENDOR_ID, 0x1004) }, + { }, +}; + +MODULE_DEVICE_TABLE(usb, id_table); + +static struct usb_driver vivopay_serial_driver = { + .name = "vivopay-serial", + .probe = usb_serial_probe, + .disconnect = usb_serial_disconnect, + .id_table = id_table, + .no_dynamic_id = 1, +}; + +static struct usb_serial_driver vivopay_serial_device = { + .driver = { + .owner = THIS_MODULE, + .name = "vivopay-serial", + }, + .id_table = id_table, + .usb_driver = &vivopay_serial_driver, + .num_ports = 1, +}; + +static int __init vivopay_serial_init(void) +{ + int retval; + retval = usb_serial_register(&vivopay_serial_device); + if (retval) + goto failed_usb_serial_register; + retval = usb_register(&vivopay_serial_driver); + if (retval) + goto failed_usb_register; + printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" + DRIVER_DESC "\n"); + return 0; +failed_usb_register: + usb_serial_deregister(&vivopay_serial_device); +failed_usb_serial_register: + return retval; +} + +static void __exit vivopay_serial_exit(void) +{ + usb_deregister(&vivopay_serial_driver); + usb_serial_deregister(&vivopay_serial_device); +} + +module_init(vivopay_serial_init); +module_exit(vivopay_serial_exit); + +MODULE_AUTHOR("Forest Bond <forest.bond@outpostembedded.com>"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 1093d2eb046..12ed8209ca7 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -111,17 +111,17 @@ static int debug; separate ID tables, and then a third table that combines them just for the purpose of exporting the autoloading information. */ -static struct usb_device_id id_table_std [] = { +static const struct usb_device_id id_table_std[] = { { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) }, { } /* Terminating entry */ }; -static struct usb_device_id id_table_prerenumeration [] = { +static const struct usb_device_id id_table_prerenumeration[] = { { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) }, { } /* Terminating entry */ }; -static struct usb_device_id id_table_combined [] = { +static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) }, { USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) }, { } /* Terminating entry */ @@ -1492,21 +1492,9 @@ static void rx_data_softint(struct work_struct *work) wrap = list_entry(tmp, struct whiteheat_urb_wrap, list); urb = wrap->urb; - if (tty && urb->actual_length) { - int len = tty_buffer_request_room(tty, - urb->actual_length); - /* This stuff can go away now I suspect */ - if (unlikely(len < urb->actual_length)) { - spin_lock_irqsave(&info->lock, flags); - list_add(tmp, &info->rx_urb_q); - spin_unlock_irqrestore(&info->lock, flags); - tty_flip_buffer_push(tty); - schedule_work(&info->rx_work); - goto out; - } - tty_insert_flip_string(tty, urb->transfer_buffer, len); - sent += len; - } + if (tty && urb->actual_length) + sent += tty_insert_flip_string(tty, + urb->transfer_buffer, urb->actual_length); urb->dev = port->serial->dev; result = usb_submit_urb(urb, GFP_ATOMIC); diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c index 80e65f29921..198bb3ed95b 100644 --- a/drivers/usb/storage/onetouch.c +++ b/drivers/usb/storage/onetouch.c @@ -202,7 +202,7 @@ static int onetouch_connect_input(struct us_data *ss) goto fail1; onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN, - GFP_ATOMIC, &onetouch->data_dma); + GFP_KERNEL, &onetouch->data_dma); if (!onetouch->data) goto fail1; diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index aadc16b5eed..4cc035562cc 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -133,7 +133,7 @@ static int slave_configure(struct scsi_device *sdev) if (us->fflags & US_FL_MAX_SECTORS_MIN) max_sectors = PAGE_CACHE_SIZE >> 9; - if (queue_max_sectors(sdev->request_queue) > max_sectors) + if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); } else if (sdev->type == TYPE_TAPE) { @@ -484,7 +484,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att { struct scsi_device *sdev = to_scsi_device(dev); - return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue)); + return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue)); } /* Input routine for the sysfs max_sectors file */ @@ -494,9 +494,9 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at struct scsi_device *sdev = to_scsi_device(dev); unsigned short ms; - if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { + if (sscanf(buf, "%hu", &ms) > 0) { blk_queue_max_hw_sectors(sdev->request_queue, ms); - return strlen(buf); + return count; } return -EINVAL; } @@ -539,7 +539,7 @@ struct scsi_host_template usb_stor_host_template = { .slave_configure = slave_configure, /* lots of sg segments can be handled */ - .sg_tablesize = SG_ALL, + .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS, /* limit the total size of a transfer to 120 KB */ .max_sectors = 240, diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c index b62a28814eb..bd3f415893d 100644 --- a/drivers/usb/storage/shuttle_usbat.c +++ b/drivers/usb/storage/shuttle_usbat.c @@ -1628,10 +1628,10 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us) return USB_STOR_TRANSPORT_ERROR; } - if ( (result = usbat_multiple_write(us, - registers, data, 7)) != USB_STOR_TRANSPORT_GOOD) { + result = usbat_multiple_write(us, registers, data, 7); + + if (result != USB_STOR_TRANSPORT_GOOD) return result; - } /* * Write the 12-byte command header. @@ -1643,12 +1643,11 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us) * AT SPEED 4 IS UNRELIABLE!!! */ - if ((result = usbat_write_block(us, - USBAT_ATA, srb->cmnd, 12, - (srb->cmnd[0]==GPCMD_BLANK ? 75 : 10), 0) != - USB_STOR_TRANSPORT_GOOD)) { + result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12, + srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0); + + if (result != USB_STOR_TRANSPORT_GOOD) return result; - } /* If there is response data to be read in then do it here. */ diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index cc313d16d72..468038126e5 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -47,6 +47,8 @@ #include <linux/errno.h> #include <linux/slab.h> +#include <linux/usb/quirks.h> + #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_device.h> @@ -1297,6 +1299,10 @@ int usb_stor_port_reset(struct us_data *us) { int result; + /*for these devices we must use the class specific method */ + if (us->pusb_dev->quirks & USB_QUIRK_RESET_MORPHS) + return -EPERM; + result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf); if (result < 0) US_DEBUGP("unable to lock device for reset: %d\n", result); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 49575fba375..98b549b1cab 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1147,8 +1147,8 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000, 0 ), /* Reported by Jan Dumon <j.dumon@option.com> - * This device (wrongly) has a vendor-specific device descriptor. - * The entry is needed so usb-storage can bind to it's mass-storage + * These devices (wrongly) have a vendor-specific device descriptor. + * These entries are needed so usb-storage can bind to their mass-storage * interface as an interface driver */ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000, "Option", @@ -1156,6 +1156,90 @@ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000, US_SC_DEVICE, US_PR_DEVICE, NULL, 0 ), +UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000, + "Option", + "GI 0451 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000, + "Option", + "GI 0451 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000, + "Option", + "GI 0452 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000, + "Option", + "GI 0461 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000, + "Option", + "GI 0461 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000, + "Option", + "GI 033x SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000, + "Option", + "GI 033x SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000, + "Option", + "GI 033x SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000, + "Option", + "GI 070x SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000, + "Option", + "GI 1505 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000, + "Option", + "GI 1509 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000, + "Option", + "GI 1515 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000, + "Option", + "GI 1215 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + +UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000, + "Option", + "GI 1505 SD-Card", + US_SC_DEVICE, US_PR_DEVICE, NULL, + 0 ), + /* Reported by Ben Efros <ben@pc-doctor.com> */ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000, "Seagate", diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c index b1e579c5c97..61522787f39 100644 --- a/drivers/usb/usb-skeleton.c +++ b/drivers/usb/usb-skeleton.c @@ -28,7 +28,7 @@ #define USB_SKEL_PRODUCT_ID 0xfff0 /* table of devices that work with this driver */ -static struct usb_device_id skel_table[] = { +static const struct usb_device_id skel_table[] = { { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c index 25eae405f62..51a8e0d5789 100644 --- a/drivers/usb/wusbcore/cbaf.c +++ b/drivers/usb/wusbcore/cbaf.c @@ -641,7 +641,7 @@ static void cbaf_disconnect(struct usb_interface *iface) kzfree(cbaf); } -static struct usb_device_id cbaf_id_table[] = { +static const struct usb_device_id cbaf_id_table[] = { { USB_INTERFACE_INFO(0xef, 0x03, 0x01), }, { }, }; diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c index dced419f7ab..1c918286159 100644 --- a/drivers/usb/wusbcore/devconnect.c +++ b/drivers/usb/wusbcore/devconnect.c @@ -868,7 +868,7 @@ static struct usb_wireless_cap_descriptor wusb_cap_descr_default = { * reference that we'll drop. * * First we need to determine if the device is a WUSB device (else we - * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE) + * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS) * [FIXME: maybe we'd need something more definitive]. If so, we track * it's usb_busd and from there, the WUSB HC. * diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c index 3b52161e6e9..2d827397e30 100644 --- a/drivers/usb/wusbcore/mmc.c +++ b/drivers/usb/wusbcore/mmc.c @@ -263,7 +263,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) { int result = 0; - if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0) + if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0) chid = NULL; mutex_lock(&wusbhc->mutex); diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 3681c6a8821..b0a3fa00706 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -3025,6 +3025,20 @@ static int fbcon_fb_unregistered(struct fb_info *info) return 0; } +static void fbcon_remap_all(int idx) +{ + int i; + for (i = first_fb_vc; i <= last_fb_vc; i++) + set_con2fb_map(i, idx, 0); + + if (con_is_bound(&fb_con)) { + printk(KERN_INFO "fbcon: Remapping primary device, " + "fb%i, to tty %i-%i\n", idx, + first_fb_vc + 1, last_fb_vc + 1); + info_idx = idx; + } +} + #ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY static void fbcon_select_primary(struct fb_info *info) { @@ -3225,6 +3239,10 @@ static int fbcon_event_notify(struct notifier_block *self, caps = event->data; fbcon_get_requirement(info, caps); break; + case FB_EVENT_REMAP_ALL_CONSOLE: + idx = info->node; + fbcon_remap_all(idx); + break; } done: return ret; diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index 99bbd282ce6..a15b44e9c00 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1513,7 +1513,6 @@ register_framebuffer(struct fb_info *fb_info) fb_info->fix.id, registered_fb[i]->fix.id); unregister_framebuffer(registered_fb[i]); - break; } } } diff --git a/drivers/xen/events.c b/drivers/xen/events.c index ce602dd09bc..2f8413794d0 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs) int bit_idx = __ffs(pending_bits); int port = (word_idx * BITS_PER_LONG) + bit_idx; int irq = evtchn_to_irq[port]; + struct irq_desc *desc; - if (irq != -1) - handle_irq(irq, regs); + if (irq != -1) { + desc = irq_to_desc(irq); + if (desc) + generic_handle_irq_desc(irq, desc); + } } } |